@firebase/ai 2.6.1-canary.9cf4b7e35 → 2.6.1-canary.b2827448b
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-public.d.ts +104 -11
- package/dist/ai.d.ts +105 -12
- package/dist/esm/index.esm.js +157 -77
- package/dist/esm/index.esm.js.map +1 -1
- package/dist/esm/src/constants.d.ts +1 -1
- package/dist/esm/src/methods/chat-session.d.ts +7 -3
- package/dist/esm/src/methods/count-tokens.d.ts +2 -2
- package/dist/esm/src/methods/generate-content.d.ts +5 -5
- package/dist/esm/src/models/generative-model.d.ts +4 -4
- package/dist/esm/src/models/imagen-model.d.ts +3 -3
- package/dist/esm/src/models/template-generative-model.d.ts +3 -3
- package/dist/esm/src/models/template-imagen-model.d.ts +2 -2
- package/dist/esm/src/requests/request.d.ts +4 -2
- package/dist/esm/src/requests/stream-reader.d.ts +1 -3
- package/dist/esm/src/types/enums.d.ts +21 -0
- package/dist/esm/src/types/imagen/internal.d.ts +1 -1
- package/dist/esm/src/types/requests.d.ts +68 -3
- package/dist/index.cjs.js +157 -76
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.node.cjs.js +157 -76
- package/dist/index.node.cjs.js.map +1 -1
- package/dist/index.node.mjs +157 -77
- package/dist/index.node.mjs.map +1 -1
- package/dist/src/constants.d.ts +1 -1
- package/dist/src/methods/chat-session.d.ts +7 -3
- package/dist/src/methods/count-tokens.d.ts +2 -2
- package/dist/src/methods/generate-content.d.ts +5 -5
- package/dist/src/models/generative-model.d.ts +4 -4
- package/dist/src/models/imagen-model.d.ts +3 -3
- package/dist/src/models/template-generative-model.d.ts +3 -3
- package/dist/src/models/template-imagen-model.d.ts +2 -2
- package/dist/src/requests/request.d.ts +4 -2
- package/dist/src/requests/stream-reader.d.ts +1 -3
- package/dist/src/types/enums.d.ts +21 -0
- package/dist/src/types/imagen/internal.d.ts +1 -1
- package/dist/src/types/requests.d.ts +68 -3
- package/package.json +8 -8
package/dist/esm/index.esm.js
CHANGED
|
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
|
|
|
4
4
|
import { Logger } from '@firebase/logger';
|
|
5
5
|
|
|
6
6
|
var name = "@firebase/ai";
|
|
7
|
-
var version = "2.6.1-canary.
|
|
7
|
+
var version = "2.6.1-canary.b2827448b";
|
|
8
8
|
|
|
9
9
|
/**
|
|
10
10
|
* @license
|
|
@@ -32,7 +32,7 @@ const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;
|
|
|
32
32
|
/**
|
|
33
33
|
* Defines the name of the default in-cloud model to use for hybrid inference.
|
|
34
34
|
*/
|
|
35
|
-
const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.
|
|
35
|
+
const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.5-flash-lite';
|
|
36
36
|
|
|
37
37
|
/**
|
|
38
38
|
* @license
|
|
@@ -408,6 +408,19 @@ const Language = {
|
|
|
408
408
|
UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
|
|
409
409
|
PYTHON: 'PYTHON'
|
|
410
410
|
};
|
|
411
|
+
/**
|
|
412
|
+
* A preset that controls the model's "thinking" process. Use
|
|
413
|
+
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
|
|
414
|
+
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
|
|
415
|
+
*
|
|
416
|
+
* @public
|
|
417
|
+
*/
|
|
418
|
+
const ThinkingLevel = {
|
|
419
|
+
MINIMAL: 'MINIMAL',
|
|
420
|
+
LOW: 'LOW',
|
|
421
|
+
MEDIUM: 'MEDIUM',
|
|
422
|
+
HIGH: 'HIGH'
|
|
423
|
+
};
|
|
411
424
|
|
|
412
425
|
/**
|
|
413
426
|
* @license
|
|
@@ -1441,6 +1454,8 @@ class AIModel {
|
|
|
1441
1454
|
* See the License for the specific language governing permissions and
|
|
1442
1455
|
* limitations under the License.
|
|
1443
1456
|
*/
|
|
1457
|
+
const TIMEOUT_EXPIRED_MESSAGE = 'Timeout has expired.';
|
|
1458
|
+
const ABORT_ERROR_NAME = 'AbortError';
|
|
1444
1459
|
class RequestURL {
|
|
1445
1460
|
constructor(params) {
|
|
1446
1461
|
this.params = params;
|
|
@@ -1463,7 +1478,7 @@ class RequestURL {
|
|
|
1463
1478
|
}
|
|
1464
1479
|
}
|
|
1465
1480
|
get baseUrl() {
|
|
1466
|
-
return this.params.
|
|
1481
|
+
return (this.params.singleRequestOptions?.baseUrl ?? `https://${DEFAULT_DOMAIN}`);
|
|
1467
1482
|
}
|
|
1468
1483
|
get queryParams() {
|
|
1469
1484
|
const params = new URLSearchParams();
|
|
@@ -1531,21 +1546,32 @@ async function getHeaders(url) {
|
|
|
1531
1546
|
async function makeRequest(requestUrlParams, body) {
|
|
1532
1547
|
const url = new RequestURL(requestUrlParams);
|
|
1533
1548
|
let response;
|
|
1534
|
-
|
|
1549
|
+
const externalSignal = requestUrlParams.singleRequestOptions?.signal;
|
|
1550
|
+
const timeoutMillis = requestUrlParams.singleRequestOptions?.timeout != null &&
|
|
1551
|
+
requestUrlParams.singleRequestOptions.timeout >= 0
|
|
1552
|
+
? requestUrlParams.singleRequestOptions.timeout
|
|
1553
|
+
: DEFAULT_FETCH_TIMEOUT_MS;
|
|
1554
|
+
const internalAbortController = new AbortController();
|
|
1555
|
+
const fetchTimeoutId = setTimeout(() => {
|
|
1556
|
+
internalAbortController.abort(new DOMException(TIMEOUT_EXPIRED_MESSAGE, ABORT_ERROR_NAME));
|
|
1557
|
+
logger.debug(`Aborting request to ${url} due to timeout (${timeoutMillis}ms)`);
|
|
1558
|
+
}, timeoutMillis);
|
|
1559
|
+
// Used to abort the fetch if either the user-defined `externalSignal` is aborted, or if the
|
|
1560
|
+
// internal signal (triggered by timeouts) is aborted.
|
|
1561
|
+
const combinedSignal = AbortSignal.any(externalSignal
|
|
1562
|
+
? [externalSignal, internalAbortController.signal]
|
|
1563
|
+
: [internalAbortController.signal]);
|
|
1564
|
+
if (externalSignal && externalSignal.aborted) {
|
|
1565
|
+
clearTimeout(fetchTimeoutId);
|
|
1566
|
+
throw new DOMException(externalSignal.reason ?? 'Aborted externally before fetch', ABORT_ERROR_NAME);
|
|
1567
|
+
}
|
|
1535
1568
|
try {
|
|
1536
1569
|
const fetchOptions = {
|
|
1537
1570
|
method: 'POST',
|
|
1538
1571
|
headers: await getHeaders(url),
|
|
1572
|
+
signal: combinedSignal,
|
|
1539
1573
|
body
|
|
1540
1574
|
};
|
|
1541
|
-
// Timeout is 180s by default.
|
|
1542
|
-
const timeoutMillis = requestUrlParams.requestOptions?.timeout != null &&
|
|
1543
|
-
requestUrlParams.requestOptions.timeout >= 0
|
|
1544
|
-
? requestUrlParams.requestOptions.timeout
|
|
1545
|
-
: DEFAULT_FETCH_TIMEOUT_MS;
|
|
1546
|
-
const abortController = new AbortController();
|
|
1547
|
-
fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis);
|
|
1548
|
-
fetchOptions.signal = abortController.signal;
|
|
1549
1575
|
response = await fetch(url.toString(), fetchOptions);
|
|
1550
1576
|
if (!response.ok) {
|
|
1551
1577
|
let message = '';
|
|
@@ -1588,16 +1614,18 @@ async function makeRequest(requestUrlParams, body) {
|
|
|
1588
1614
|
let err = e;
|
|
1589
1615
|
if (e.code !== AIErrorCode.FETCH_ERROR &&
|
|
1590
1616
|
e.code !== AIErrorCode.API_NOT_ENABLED &&
|
|
1591
|
-
e instanceof Error
|
|
1617
|
+
e instanceof Error &&
|
|
1618
|
+
e.name !== ABORT_ERROR_NAME) {
|
|
1592
1619
|
err = new AIError(AIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}`);
|
|
1593
1620
|
err.stack = e.stack;
|
|
1594
1621
|
}
|
|
1595
1622
|
throw err;
|
|
1596
1623
|
}
|
|
1597
1624
|
finally {
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1625
|
+
// When doing streaming requests, this will clear the timeout once the stream begins.
|
|
1626
|
+
// If a timeout it 3000ms, and the stream starts after 300ms and ends after 5000ms, the
|
|
1627
|
+
// timeout will be cleared after 300ms, so it won't abort the request.
|
|
1628
|
+
clearTimeout(fetchTimeoutId);
|
|
1601
1629
|
}
|
|
1602
1630
|
return response;
|
|
1603
1631
|
}
|
|
@@ -2035,6 +2063,8 @@ const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/;
|
|
|
2035
2063
|
function processStream(response, apiSettings, inferenceSource) {
|
|
2036
2064
|
const inputStream = response.body.pipeThrough(new TextDecoderStream('utf8', { fatal: true }));
|
|
2037
2065
|
const responseStream = getResponseStream(inputStream);
|
|
2066
|
+
// We split the stream so the user can iterate over partial results (stream1)
|
|
2067
|
+
// while we aggregate the full result for history/final response (stream2).
|
|
2038
2068
|
const [stream1, stream2] = responseStream.tee();
|
|
2039
2069
|
return {
|
|
2040
2070
|
stream: generateResponseSequence(stream1, apiSettings, inferenceSource),
|
|
@@ -2071,7 +2101,6 @@ async function* generateResponseSequence(stream, apiSettings, inferenceSource) {
|
|
|
2071
2101
|
enhancedResponse = createEnhancedContentResponse(value, inferenceSource);
|
|
2072
2102
|
}
|
|
2073
2103
|
const firstCandidate = enhancedResponse.candidates?.[0];
|
|
2074
|
-
// Don't yield a response with no useful data for the developer.
|
|
2075
2104
|
if (!firstCandidate?.content?.parts &&
|
|
2076
2105
|
!firstCandidate?.finishReason &&
|
|
2077
2106
|
!firstCandidate?.citationMetadata &&
|
|
@@ -2082,9 +2111,7 @@ async function* generateResponseSequence(stream, apiSettings, inferenceSource) {
|
|
|
2082
2111
|
}
|
|
2083
2112
|
}
|
|
2084
2113
|
/**
|
|
2085
|
-
* Reads a raw stream
|
|
2086
|
-
* chunks, returning a new stream that provides a single complete
|
|
2087
|
-
* GenerateContentResponse in each iteration.
|
|
2114
|
+
* Reads a raw string stream, buffers incomplete chunks, and yields parsed JSON objects.
|
|
2088
2115
|
*/
|
|
2089
2116
|
function getResponseStream(inputStream) {
|
|
2090
2117
|
const reader = inputStream.getReader();
|
|
@@ -2103,6 +2130,8 @@ function getResponseStream(inputStream) {
|
|
|
2103
2130
|
return;
|
|
2104
2131
|
}
|
|
2105
2132
|
currentText += value;
|
|
2133
|
+
// SSE events may span chunk boundaries, so we buffer until we match
|
|
2134
|
+
// the full "data: {json}\n\n" pattern.
|
|
2106
2135
|
let match = currentText.match(responseLineRE);
|
|
2107
2136
|
let parsedResponse;
|
|
2108
2137
|
while (match) {
|
|
@@ -2136,8 +2165,7 @@ function aggregateResponses(responses) {
|
|
|
2136
2165
|
for (const response of responses) {
|
|
2137
2166
|
if (response.candidates) {
|
|
2138
2167
|
for (const candidate of response.candidates) {
|
|
2139
|
-
//
|
|
2140
|
-
// See: https://github.com/firebase/firebase-js-sdk/issues/8566
|
|
2168
|
+
// Use 0 if index is undefined (protobuf default value omission).
|
|
2141
2169
|
const i = candidate.index || 0;
|
|
2142
2170
|
if (!aggregatedResponse.candidates) {
|
|
2143
2171
|
aggregatedResponse.candidates = [];
|
|
@@ -2147,7 +2175,7 @@ function aggregateResponses(responses) {
|
|
|
2147
2175
|
index: candidate.index
|
|
2148
2176
|
};
|
|
2149
2177
|
}
|
|
2150
|
-
//
|
|
2178
|
+
// Overwrite with the latest metadata
|
|
2151
2179
|
aggregatedResponse.candidates[i].citationMetadata =
|
|
2152
2180
|
candidate.citationMetadata;
|
|
2153
2181
|
aggregatedResponse.candidates[i].finishReason = candidate.finishReason;
|
|
@@ -2168,12 +2196,7 @@ function aggregateResponses(responses) {
|
|
|
2168
2196
|
aggregatedResponse.candidates[i].urlContextMetadata =
|
|
2169
2197
|
urlContextMetadata;
|
|
2170
2198
|
}
|
|
2171
|
-
/**
|
|
2172
|
-
* Candidates should always have content and parts, but this handles
|
|
2173
|
-
* possible malformed responses.
|
|
2174
|
-
*/
|
|
2175
2199
|
if (candidate.content) {
|
|
2176
|
-
// Skip a candidate without parts.
|
|
2177
2200
|
if (!candidate.content.parts) {
|
|
2178
2201
|
continue;
|
|
2179
2202
|
}
|
|
@@ -2305,7 +2328,7 @@ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCa
|
|
|
2305
2328
|
* See the License for the specific language governing permissions and
|
|
2306
2329
|
* limitations under the License.
|
|
2307
2330
|
*/
|
|
2308
|
-
async function generateContentStreamOnCloud(apiSettings, model, params,
|
|
2331
|
+
async function generateContentStreamOnCloud(apiSettings, model, params, singleRequestOptions) {
|
|
2309
2332
|
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
|
|
2310
2333
|
params = mapGenerateContentRequest(params);
|
|
2311
2334
|
}
|
|
@@ -2314,14 +2337,14 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
|
|
|
2314
2337
|
model,
|
|
2315
2338
|
apiSettings,
|
|
2316
2339
|
stream: true,
|
|
2317
|
-
|
|
2340
|
+
singleRequestOptions
|
|
2318
2341
|
}, JSON.stringify(params));
|
|
2319
2342
|
}
|
|
2320
|
-
async function generateContentStream(apiSettings, model, params, chromeAdapter,
|
|
2321
|
-
const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params,
|
|
2343
|
+
async function generateContentStream(apiSettings, model, params, chromeAdapter, singleRequestOptions) {
|
|
2344
|
+
const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, singleRequestOptions));
|
|
2322
2345
|
return processStream(callResult.response, apiSettings, callResult.inferenceSource);
|
|
2323
2346
|
}
|
|
2324
|
-
async function generateContentOnCloud(apiSettings, model, params,
|
|
2347
|
+
async function generateContentOnCloud(apiSettings, model, params, singleRequestOptions) {
|
|
2325
2348
|
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
|
|
2326
2349
|
params = mapGenerateContentRequest(params);
|
|
2327
2350
|
}
|
|
@@ -2330,16 +2353,16 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
|
|
|
2330
2353
|
task: "generateContent" /* Task.GENERATE_CONTENT */,
|
|
2331
2354
|
apiSettings,
|
|
2332
2355
|
stream: false,
|
|
2333
|
-
|
|
2356
|
+
singleRequestOptions
|
|
2334
2357
|
}, JSON.stringify(params));
|
|
2335
2358
|
}
|
|
2336
|
-
async function templateGenerateContent(apiSettings, templateId, templateParams,
|
|
2359
|
+
async function templateGenerateContent(apiSettings, templateId, templateParams, singleRequestOptions) {
|
|
2337
2360
|
const response = await makeRequest({
|
|
2338
2361
|
task: "templateGenerateContent" /* ServerPromptTemplateTask.TEMPLATE_GENERATE_CONTENT */,
|
|
2339
2362
|
templateId,
|
|
2340
2363
|
apiSettings,
|
|
2341
2364
|
stream: false,
|
|
2342
|
-
|
|
2365
|
+
singleRequestOptions
|
|
2343
2366
|
}, JSON.stringify(templateParams));
|
|
2344
2367
|
const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
|
|
2345
2368
|
const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
|
|
@@ -2347,18 +2370,18 @@ async function templateGenerateContent(apiSettings, templateId, templateParams,
|
|
|
2347
2370
|
response: enhancedResponse
|
|
2348
2371
|
};
|
|
2349
2372
|
}
|
|
2350
|
-
async function templateGenerateContentStream(apiSettings, templateId, templateParams,
|
|
2373
|
+
async function templateGenerateContentStream(apiSettings, templateId, templateParams, singleRequestOptions) {
|
|
2351
2374
|
const response = await makeRequest({
|
|
2352
2375
|
task: "templateStreamGenerateContent" /* ServerPromptTemplateTask.TEMPLATE_STREAM_GENERATE_CONTENT */,
|
|
2353
2376
|
templateId,
|
|
2354
2377
|
apiSettings,
|
|
2355
2378
|
stream: true,
|
|
2356
|
-
|
|
2379
|
+
singleRequestOptions
|
|
2357
2380
|
}, JSON.stringify(templateParams));
|
|
2358
2381
|
return processStream(response, apiSettings);
|
|
2359
2382
|
}
|
|
2360
|
-
async function generateContent(apiSettings, model, params, chromeAdapter,
|
|
2361
|
-
const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params,
|
|
2383
|
+
async function generateContent(apiSettings, model, params, chromeAdapter, singleRequestOptions) {
|
|
2384
|
+
const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, singleRequestOptions));
|
|
2362
2385
|
const generateContentResponse = await processGenerateContentResponse(callResult.response, apiSettings);
|
|
2363
2386
|
const enhancedResponse = createEnhancedContentResponse(generateContentResponse, callResult.inferenceSource);
|
|
2364
2387
|
return {
|
|
@@ -2612,7 +2635,8 @@ function validateChatHistory(history) {
|
|
|
2612
2635
|
* limitations under the License.
|
|
2613
2636
|
*/
|
|
2614
2637
|
/**
|
|
2615
|
-
*
|
|
2638
|
+
* Used to break the internal promise chain when an error is already handled
|
|
2639
|
+
* by the user, preventing duplicate console logs.
|
|
2616
2640
|
*/
|
|
2617
2641
|
const SILENT_ERROR = 'SILENT_ERROR';
|
|
2618
2642
|
/**
|
|
@@ -2628,6 +2652,10 @@ class ChatSession {
|
|
|
2628
2652
|
this.params = params;
|
|
2629
2653
|
this.requestOptions = requestOptions;
|
|
2630
2654
|
this._history = [];
|
|
2655
|
+
/**
|
|
2656
|
+
* Ensures sequential execution of chat messages to maintain history order.
|
|
2657
|
+
* Each call waits for the previous one to settle before proceeding.
|
|
2658
|
+
*/
|
|
2631
2659
|
this._sendPromise = Promise.resolve();
|
|
2632
2660
|
this._apiSettings = apiSettings;
|
|
2633
2661
|
if (params?.history) {
|
|
@@ -2648,7 +2676,7 @@ class ChatSession {
|
|
|
2648
2676
|
* Sends a chat message and receives a non-streaming
|
|
2649
2677
|
* {@link GenerateContentResult}
|
|
2650
2678
|
*/
|
|
2651
|
-
async sendMessage(request) {
|
|
2679
|
+
async sendMessage(request, singleRequestOptions) {
|
|
2652
2680
|
await this._sendPromise;
|
|
2653
2681
|
const newContent = formatNewContent(request);
|
|
2654
2682
|
const generateContentRequest = {
|
|
@@ -2660,16 +2688,20 @@ class ChatSession {
|
|
|
2660
2688
|
contents: [...this._history, newContent]
|
|
2661
2689
|
};
|
|
2662
2690
|
let finalResult = {};
|
|
2663
|
-
// Add onto the chain.
|
|
2664
2691
|
this._sendPromise = this._sendPromise
|
|
2665
|
-
.then(() => generateContent(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter,
|
|
2692
|
+
.then(() => generateContent(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, {
|
|
2693
|
+
...this.requestOptions,
|
|
2694
|
+
...singleRequestOptions
|
|
2695
|
+
}))
|
|
2666
2696
|
.then(result => {
|
|
2697
|
+
// TODO: Make this update atomic. If creating `responseContent` throws,
|
|
2698
|
+
// history will contain the user message but not the response, causing
|
|
2699
|
+
// validation errors on the next request.
|
|
2667
2700
|
if (result.response.candidates &&
|
|
2668
2701
|
result.response.candidates.length > 0) {
|
|
2669
2702
|
this._history.push(newContent);
|
|
2670
2703
|
const responseContent = {
|
|
2671
2704
|
parts: result.response.candidates?.[0].content.parts || [],
|
|
2672
|
-
// Response seems to come back without a role set.
|
|
2673
2705
|
role: result.response.candidates?.[0].content.role || 'model'
|
|
2674
2706
|
};
|
|
2675
2707
|
this._history.push(responseContent);
|
|
@@ -2690,7 +2722,7 @@ class ChatSession {
|
|
|
2690
2722
|
* {@link GenerateContentStreamResult} containing an iterable stream
|
|
2691
2723
|
* and a response promise.
|
|
2692
2724
|
*/
|
|
2693
|
-
async sendMessageStream(request) {
|
|
2725
|
+
async sendMessageStream(request, singleRequestOptions) {
|
|
2694
2726
|
await this._sendPromise;
|
|
2695
2727
|
const newContent = formatNewContent(request);
|
|
2696
2728
|
const generateContentRequest = {
|
|
@@ -2701,21 +2733,29 @@ class ChatSession {
|
|
|
2701
2733
|
systemInstruction: this.params?.systemInstruction,
|
|
2702
2734
|
contents: [...this._history, newContent]
|
|
2703
2735
|
};
|
|
2704
|
-
const streamPromise = generateContentStream(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter,
|
|
2705
|
-
|
|
2736
|
+
const streamPromise = generateContentStream(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, {
|
|
2737
|
+
...this.requestOptions,
|
|
2738
|
+
...singleRequestOptions
|
|
2739
|
+
});
|
|
2740
|
+
// We hook into the chain to update history, but we don't block the
|
|
2741
|
+
// return of `streamPromise` to the user.
|
|
2706
2742
|
this._sendPromise = this._sendPromise
|
|
2707
2743
|
.then(() => streamPromise)
|
|
2708
|
-
// This must be handled to avoid unhandled rejection, but jump
|
|
2709
|
-
// to the final catch block with a label to not log this error.
|
|
2710
2744
|
.catch(_ignored => {
|
|
2745
|
+
// If the initial fetch fails, the user's `streamPromise` rejects.
|
|
2746
|
+
// We swallow the error here to prevent double logging in the final catch.
|
|
2711
2747
|
throw new Error(SILENT_ERROR);
|
|
2712
2748
|
})
|
|
2713
2749
|
.then(streamResult => streamResult.response)
|
|
2714
2750
|
.then(response => {
|
|
2751
|
+
// This runs after the stream completes. Runtime errors here cannot be
|
|
2752
|
+
// caught by the user because their promise has likely already resolved.
|
|
2753
|
+
// TODO: Move response validation logic upstream to `stream-reader` so
|
|
2754
|
+
// errors propagate to the user's `result.response` promise.
|
|
2715
2755
|
if (response.candidates && response.candidates.length > 0) {
|
|
2716
2756
|
this._history.push(newContent);
|
|
2757
|
+
// TODO: Validate that `response.candidates[0].content` is not null.
|
|
2717
2758
|
const responseContent = { ...response.candidates[0].content };
|
|
2718
|
-
// Response seems to come back without a role set.
|
|
2719
2759
|
if (!responseContent.role) {
|
|
2720
2760
|
responseContent.role = 'model';
|
|
2721
2761
|
}
|
|
@@ -2729,12 +2769,8 @@ class ChatSession {
|
|
|
2729
2769
|
}
|
|
2730
2770
|
})
|
|
2731
2771
|
.catch(e => {
|
|
2732
|
-
//
|
|
2733
|
-
|
|
2734
|
-
// Avoid duplicating the error message in logs.
|
|
2735
|
-
if (e.message !== SILENT_ERROR) {
|
|
2736
|
-
// Users do not have access to _sendPromise to catch errors
|
|
2737
|
-
// downstream from streamPromise, so they should not throw.
|
|
2772
|
+
// Filter out errors already handled by the user or initiated by them.
|
|
2773
|
+
if (e.message !== SILENT_ERROR && e.name !== 'AbortError') {
|
|
2738
2774
|
logger.error(e);
|
|
2739
2775
|
}
|
|
2740
2776
|
});
|
|
@@ -2758,7 +2794,7 @@ class ChatSession {
|
|
|
2758
2794
|
* See the License for the specific language governing permissions and
|
|
2759
2795
|
* limitations under the License.
|
|
2760
2796
|
*/
|
|
2761
|
-
async function countTokensOnCloud(apiSettings, model, params,
|
|
2797
|
+
async function countTokensOnCloud(apiSettings, model, params, singleRequestOptions) {
|
|
2762
2798
|
let body = '';
|
|
2763
2799
|
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
|
|
2764
2800
|
const mappedParams = mapCountTokensRequest(params, model);
|
|
@@ -2772,7 +2808,7 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
|
|
|
2772
2808
|
task: "countTokens" /* Task.COUNT_TOKENS */,
|
|
2773
2809
|
apiSettings,
|
|
2774
2810
|
stream: false,
|
|
2775
|
-
|
|
2811
|
+
singleRequestOptions
|
|
2776
2812
|
}, body);
|
|
2777
2813
|
return response.json();
|
|
2778
2814
|
}
|
|
@@ -2808,6 +2844,7 @@ class GenerativeModel extends AIModel {
|
|
|
2808
2844
|
super(ai, modelParams.model);
|
|
2809
2845
|
this.chromeAdapter = chromeAdapter;
|
|
2810
2846
|
this.generationConfig = modelParams.generationConfig || {};
|
|
2847
|
+
validateGenerationConfig(this.generationConfig);
|
|
2811
2848
|
this.safetySettings = modelParams.safetySettings || [];
|
|
2812
2849
|
this.tools = modelParams.tools;
|
|
2813
2850
|
this.toolConfig = modelParams.toolConfig;
|
|
@@ -2818,7 +2855,7 @@ class GenerativeModel extends AIModel {
|
|
|
2818
2855
|
* Makes a single non-streaming call to the model
|
|
2819
2856
|
* and returns an object containing a single {@link GenerateContentResponse}.
|
|
2820
2857
|
*/
|
|
2821
|
-
async generateContent(request) {
|
|
2858
|
+
async generateContent(request, singleRequestOptions) {
|
|
2822
2859
|
const formattedParams = formatGenerateContentInput(request);
|
|
2823
2860
|
return generateContent(this._apiSettings, this.model, {
|
|
2824
2861
|
generationConfig: this.generationConfig,
|
|
@@ -2827,7 +2864,12 @@ class GenerativeModel extends AIModel {
|
|
|
2827
2864
|
toolConfig: this.toolConfig,
|
|
2828
2865
|
systemInstruction: this.systemInstruction,
|
|
2829
2866
|
...formattedParams
|
|
2830
|
-
}, this.chromeAdapter,
|
|
2867
|
+
}, this.chromeAdapter,
|
|
2868
|
+
// Merge request options
|
|
2869
|
+
{
|
|
2870
|
+
...this.requestOptions,
|
|
2871
|
+
...singleRequestOptions
|
|
2872
|
+
});
|
|
2831
2873
|
}
|
|
2832
2874
|
/**
|
|
2833
2875
|
* Makes a single streaming call to the model
|
|
@@ -2835,7 +2877,7 @@ class GenerativeModel extends AIModel {
|
|
|
2835
2877
|
* over all chunks in the streaming response as well as
|
|
2836
2878
|
* a promise that returns the final aggregated response.
|
|
2837
2879
|
*/
|
|
2838
|
-
async generateContentStream(request) {
|
|
2880
|
+
async generateContentStream(request, singleRequestOptions) {
|
|
2839
2881
|
const formattedParams = formatGenerateContentInput(request);
|
|
2840
2882
|
return generateContentStream(this._apiSettings, this.model, {
|
|
2841
2883
|
generationConfig: this.generationConfig,
|
|
@@ -2844,7 +2886,12 @@ class GenerativeModel extends AIModel {
|
|
|
2844
2886
|
toolConfig: this.toolConfig,
|
|
2845
2887
|
systemInstruction: this.systemInstruction,
|
|
2846
2888
|
...formattedParams
|
|
2847
|
-
}, this.chromeAdapter,
|
|
2889
|
+
}, this.chromeAdapter,
|
|
2890
|
+
// Merge request options
|
|
2891
|
+
{
|
|
2892
|
+
...this.requestOptions,
|
|
2893
|
+
...singleRequestOptions
|
|
2894
|
+
});
|
|
2848
2895
|
}
|
|
2849
2896
|
/**
|
|
2850
2897
|
* Gets a new {@link ChatSession} instance which can be used for
|
|
@@ -2868,9 +2915,26 @@ class GenerativeModel extends AIModel {
|
|
|
2868
2915
|
/**
|
|
2869
2916
|
* Counts the tokens in the provided request.
|
|
2870
2917
|
*/
|
|
2871
|
-
async countTokens(request) {
|
|
2918
|
+
async countTokens(request, singleRequestOptions) {
|
|
2872
2919
|
const formattedParams = formatGenerateContentInput(request);
|
|
2873
|
-
return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter
|
|
2920
|
+
return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter,
|
|
2921
|
+
// Merge request options
|
|
2922
|
+
{
|
|
2923
|
+
...this.requestOptions,
|
|
2924
|
+
...singleRequestOptions
|
|
2925
|
+
});
|
|
2926
|
+
}
|
|
2927
|
+
}
|
|
2928
|
+
/**
|
|
2929
|
+
* Client-side validation of some common `GenerationConfig` pitfalls, in order
|
|
2930
|
+
* to save the developer a wasted request.
|
|
2931
|
+
*/
|
|
2932
|
+
function validateGenerationConfig(generationConfig) {
|
|
2933
|
+
if (
|
|
2934
|
+
// != allows for null and undefined. 0 is considered "set" by the model
|
|
2935
|
+
generationConfig.thinkingConfig?.thinkingBudget != null &&
|
|
2936
|
+
generationConfig.thinkingConfig?.thinkingLevel) {
|
|
2937
|
+
throw new AIError(AIErrorCode.UNSUPPORTED, `Cannot set both thinkingBudget and thinkingLevel in a config.`);
|
|
2874
2938
|
}
|
|
2875
2939
|
}
|
|
2876
2940
|
|
|
@@ -3321,7 +3385,7 @@ class ImagenModel extends AIModel {
|
|
|
3321
3385
|
*
|
|
3322
3386
|
* @public
|
|
3323
3387
|
*/
|
|
3324
|
-
async generateImages(prompt) {
|
|
3388
|
+
async generateImages(prompt, singleRequestOptions) {
|
|
3325
3389
|
const body = createPredictRequestBody(prompt, {
|
|
3326
3390
|
...this.generationConfig,
|
|
3327
3391
|
...this.safetySettings
|
|
@@ -3331,7 +3395,11 @@ class ImagenModel extends AIModel {
|
|
|
3331
3395
|
model: this.model,
|
|
3332
3396
|
apiSettings: this._apiSettings,
|
|
3333
3397
|
stream: false,
|
|
3334
|
-
|
|
3398
|
+
// Merge request options. Single request options overwrite the model's request options.
|
|
3399
|
+
singleRequestOptions: {
|
|
3400
|
+
...this.requestOptions,
|
|
3401
|
+
...singleRequestOptions
|
|
3402
|
+
}
|
|
3335
3403
|
}, JSON.stringify(body));
|
|
3336
3404
|
return handlePredictResponse(response);
|
|
3337
3405
|
}
|
|
@@ -3354,7 +3422,7 @@ class ImagenModel extends AIModel {
|
|
|
3354
3422
|
* returned object will have a `filteredReason` property.
|
|
3355
3423
|
* If all images are filtered, the `images` array will be empty.
|
|
3356
3424
|
*/
|
|
3357
|
-
async generateImagesGCS(prompt, gcsURI) {
|
|
3425
|
+
async generateImagesGCS(prompt, gcsURI, singleRequestOptions) {
|
|
3358
3426
|
const body = createPredictRequestBody(prompt, {
|
|
3359
3427
|
gcsURI,
|
|
3360
3428
|
...this.generationConfig,
|
|
@@ -3365,7 +3433,11 @@ class ImagenModel extends AIModel {
|
|
|
3365
3433
|
model: this.model,
|
|
3366
3434
|
apiSettings: this._apiSettings,
|
|
3367
3435
|
stream: false,
|
|
3368
|
-
|
|
3436
|
+
// Merge request options. Single request options overwrite the model's request options.
|
|
3437
|
+
singleRequestOptions: {
|
|
3438
|
+
...this.requestOptions,
|
|
3439
|
+
...singleRequestOptions
|
|
3440
|
+
}
|
|
3369
3441
|
}, JSON.stringify(body));
|
|
3370
3442
|
return handlePredictResponse(response);
|
|
3371
3443
|
}
|
|
@@ -3559,9 +3631,11 @@ class TemplateGenerativeModel {
|
|
|
3559
3631
|
*
|
|
3560
3632
|
* @beta
|
|
3561
3633
|
*/
|
|
3562
|
-
async generateContent(templateId, templateVariables
|
|
3563
|
-
|
|
3564
|
-
|
|
3634
|
+
async generateContent(templateId, templateVariables, singleRequestOptions) {
|
|
3635
|
+
return templateGenerateContent(this._apiSettings, templateId, { inputs: templateVariables }, {
|
|
3636
|
+
...this.requestOptions,
|
|
3637
|
+
...singleRequestOptions
|
|
3638
|
+
});
|
|
3565
3639
|
}
|
|
3566
3640
|
/**
|
|
3567
3641
|
* Makes a single streaming call to the model and returns an object
|
|
@@ -3575,8 +3649,11 @@ class TemplateGenerativeModel {
|
|
|
3575
3649
|
*
|
|
3576
3650
|
* @beta
|
|
3577
3651
|
*/
|
|
3578
|
-
async generateContentStream(templateId, templateVariables) {
|
|
3579
|
-
return templateGenerateContentStream(this._apiSettings, templateId, { inputs: templateVariables },
|
|
3652
|
+
async generateContentStream(templateId, templateVariables, singleRequestOptions) {
|
|
3653
|
+
return templateGenerateContentStream(this._apiSettings, templateId, { inputs: templateVariables }, {
|
|
3654
|
+
...this.requestOptions,
|
|
3655
|
+
...singleRequestOptions
|
|
3656
|
+
});
|
|
3580
3657
|
}
|
|
3581
3658
|
}
|
|
3582
3659
|
|
|
@@ -3621,13 +3698,16 @@ class TemplateImagenModel {
|
|
|
3621
3698
|
*
|
|
3622
3699
|
* @beta
|
|
3623
3700
|
*/
|
|
3624
|
-
async generateImages(templateId, templateVariables) {
|
|
3701
|
+
async generateImages(templateId, templateVariables, singleRequestOptions) {
|
|
3625
3702
|
const response = await makeRequest({
|
|
3626
3703
|
task: "templatePredict" /* ServerPromptTemplateTask.TEMPLATE_PREDICT */,
|
|
3627
3704
|
templateId,
|
|
3628
3705
|
apiSettings: this._apiSettings,
|
|
3629
3706
|
stream: false,
|
|
3630
|
-
|
|
3707
|
+
singleRequestOptions: {
|
|
3708
|
+
...this.requestOptions,
|
|
3709
|
+
...singleRequestOptions
|
|
3710
|
+
}
|
|
3631
3711
|
}, JSON.stringify({ inputs: templateVariables }));
|
|
3632
3712
|
return handlePredictResponse(response);
|
|
3633
3713
|
}
|
|
@@ -4462,5 +4542,5 @@ function registerAI() {
|
|
|
4462
4542
|
}
|
|
4463
4543
|
registerAI();
|
|
4464
4544
|
|
|
4465
|
-
export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, InferenceSource, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, TemplateGenerativeModel, TemplateImagenModel, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, getTemplateGenerativeModel, getTemplateImagenModel, startAudioConversation };
|
|
4545
|
+
export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, InferenceSource, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, TemplateGenerativeModel, TemplateImagenModel, ThinkingLevel, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, getTemplateGenerativeModel, getTemplateImagenModel, startAudioConversation };
|
|
4466
4546
|
//# sourceMappingURL=index.esm.js.map
|