@providerprotocol/ai 0.0.11 → 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.d.ts +51 -15
- package/dist/anthropic/index.js +54 -19
- package/dist/anthropic/index.js.map +1 -1
- package/dist/{chunk-SUNYWHTH.js → chunk-MOU4U3PO.js} +55 -3
- package/dist/chunk-MOU4U3PO.js.map +1 -0
- package/dist/{chunk-Y6Q7JCNP.js → chunk-MSR5P65T.js} +1 -1
- package/dist/chunk-MSR5P65T.js.map +1 -0
- package/dist/{chunk-W4BB4BG2.js → chunk-SVYROCLD.js} +31 -11
- package/dist/chunk-SVYROCLD.js.map +1 -0
- package/dist/chunk-U4JJC2YX.js +234 -0
- package/dist/chunk-U4JJC2YX.js.map +1 -0
- package/dist/{chunk-X5G4EHL7.js → chunk-Z7RBRCRN.js} +1 -1
- package/dist/chunk-Z7RBRCRN.js.map +1 -0
- package/dist/google/index.d.ts +376 -7
- package/dist/google/index.js +127 -15
- package/dist/google/index.js.map +1 -1
- package/dist/http/index.d.ts +222 -25
- package/dist/http/index.js +3 -3
- package/dist/index.d.ts +1482 -198
- package/dist/index.js +233 -49
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.d.ts +92 -20
- package/dist/ollama/index.js +17 -7
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +340 -61
- package/dist/openai/index.js +57 -15
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +107 -51
- package/dist/openrouter/index.js +36 -8
- package/dist/openrouter/index.js.map +1 -1
- package/dist/provider-mKkz7Q9U.d.ts +488 -0
- package/dist/retry-Dh70lgr0.d.ts +508 -0
- package/dist/xai/index.d.ts +97 -22
- package/dist/xai/index.js +55 -19
- package/dist/xai/index.js.map +1 -1
- package/package.json +8 -12
- package/dist/chunk-CUCRF5W6.js +0 -136
- package/dist/chunk-CUCRF5W6.js.map +0 -1
- package/dist/chunk-SUNYWHTH.js.map +0 -1
- package/dist/chunk-W4BB4BG2.js.map +0 -1
- package/dist/chunk-X5G4EHL7.js.map +0 -1
- package/dist/chunk-Y6Q7JCNP.js.map +0 -1
- package/dist/provider-CUJWjgNl.d.ts +0 -192
- package/dist/retry-I2661_rv.d.ts +0 -118
- package/src/anthropic/index.ts +0 -3
- package/src/core/image.ts +0 -188
- package/src/core/llm.ts +0 -650
- package/src/core/provider.ts +0 -92
- package/src/google/index.ts +0 -3
- package/src/http/errors.ts +0 -112
- package/src/http/fetch.ts +0 -210
- package/src/http/index.ts +0 -31
- package/src/http/keys.ts +0 -136
- package/src/http/retry.ts +0 -205
- package/src/http/sse.ts +0 -136
- package/src/index.ts +0 -32
- package/src/ollama/index.ts +0 -3
- package/src/openai/index.ts +0 -39
- package/src/openrouter/index.ts +0 -11
- package/src/providers/anthropic/index.ts +0 -17
- package/src/providers/anthropic/llm.ts +0 -196
- package/src/providers/anthropic/transform.ts +0 -434
- package/src/providers/anthropic/types.ts +0 -213
- package/src/providers/google/index.ts +0 -17
- package/src/providers/google/llm.ts +0 -203
- package/src/providers/google/transform.ts +0 -447
- package/src/providers/google/types.ts +0 -214
- package/src/providers/ollama/index.ts +0 -43
- package/src/providers/ollama/llm.ts +0 -272
- package/src/providers/ollama/transform.ts +0 -434
- package/src/providers/ollama/types.ts +0 -260
- package/src/providers/openai/index.ts +0 -186
- package/src/providers/openai/llm.completions.ts +0 -201
- package/src/providers/openai/llm.responses.ts +0 -211
- package/src/providers/openai/transform.completions.ts +0 -561
- package/src/providers/openai/transform.responses.ts +0 -708
- package/src/providers/openai/types.ts +0 -1249
- package/src/providers/openrouter/index.ts +0 -177
- package/src/providers/openrouter/llm.completions.ts +0 -201
- package/src/providers/openrouter/llm.responses.ts +0 -211
- package/src/providers/openrouter/transform.completions.ts +0 -538
- package/src/providers/openrouter/transform.responses.ts +0 -742
- package/src/providers/openrouter/types.ts +0 -717
- package/src/providers/xai/index.ts +0 -223
- package/src/providers/xai/llm.completions.ts +0 -201
- package/src/providers/xai/llm.messages.ts +0 -195
- package/src/providers/xai/llm.responses.ts +0 -211
- package/src/providers/xai/transform.completions.ts +0 -565
- package/src/providers/xai/transform.messages.ts +0 -448
- package/src/providers/xai/transform.responses.ts +0 -678
- package/src/providers/xai/types.ts +0 -938
- package/src/types/content.ts +0 -133
- package/src/types/errors.ts +0 -85
- package/src/types/index.ts +0 -105
- package/src/types/llm.ts +0 -211
- package/src/types/messages.ts +0 -205
- package/src/types/provider.ts +0 -195
- package/src/types/schema.ts +0 -58
- package/src/types/stream.ts +0 -188
- package/src/types/thread.ts +0 -226
- package/src/types/tool.ts +0 -88
- package/src/types/turn.ts +0 -118
- package/src/utils/id.ts +0 -28
- package/src/xai/index.ts +0 -41
package/dist/google/index.d.ts
CHANGED
|
@@ -1,7 +1,29 @@
|
|
|
1
|
-
import { b as Provider } from '../provider-
|
|
1
|
+
import { b as Provider } from '../provider-mKkz7Q9U.js';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
|
-
* Google Gemini
|
|
4
|
+
* Provider-specific parameters for Google Gemini API requests.
|
|
5
|
+
*
|
|
6
|
+
* These parameters are passed through to the Google `generationConfig` field
|
|
7
|
+
* and control model behavior such as output length, randomness, and sampling
|
|
8
|
+
* strategies. All fields are optional and will use Google's defaults if omitted.
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```typescript
|
|
12
|
+
* const params: GoogleLLMParams = {
|
|
13
|
+
* maxOutputTokens: 2048,
|
|
14
|
+
* temperature: 0.7,
|
|
15
|
+
* topP: 0.9,
|
|
16
|
+
* stopSequences: ['\n\n'],
|
|
17
|
+
* };
|
|
18
|
+
*
|
|
19
|
+
* const response = await model.complete({
|
|
20
|
+
* messages: [...],
|
|
21
|
+
* config: { apiKey: '...' },
|
|
22
|
+
* params,
|
|
23
|
+
* });
|
|
24
|
+
* ```
|
|
25
|
+
*
|
|
26
|
+
* @see {@link https://ai.google.dev/api/rest/v1beta/GenerationConfig Google GenerationConfig docs}
|
|
5
27
|
*/
|
|
6
28
|
interface GoogleLLMParams {
|
|
7
29
|
/** Maximum number of tokens to generate */
|
|
@@ -51,19 +73,366 @@ interface GoogleLLMParams {
|
|
|
51
73
|
* Thinking/reasoning configuration for Gemini 3+ models
|
|
52
74
|
*/
|
|
53
75
|
thinkingConfig?: GoogleThinkingConfig;
|
|
76
|
+
/**
|
|
77
|
+
* Cached content name to use for this request.
|
|
78
|
+
* Format: "cachedContents/{id}" as returned from cache creation.
|
|
79
|
+
* When set, the cached content is prepended to the request.
|
|
80
|
+
*/
|
|
81
|
+
cachedContent?: string;
|
|
54
82
|
}
|
|
55
83
|
/**
|
|
56
|
-
*
|
|
84
|
+
* Configuration for extended thinking/reasoning in Gemini 3+ models.
|
|
85
|
+
*
|
|
86
|
+
* Enables models to spend additional compute on reasoning before
|
|
87
|
+
* generating a response, improving quality for complex tasks.
|
|
57
88
|
*/
|
|
58
89
|
interface GoogleThinkingConfig {
|
|
59
|
-
/**
|
|
90
|
+
/** Token budget allocated for model thinking/reasoning before response generation. */
|
|
60
91
|
thinkingBudget?: number;
|
|
61
92
|
}
|
|
93
|
+
/**
|
|
94
|
+
* A single content turn in the Google conversation format.
|
|
95
|
+
*
|
|
96
|
+
* Represents either a user message or model response, containing
|
|
97
|
+
* one or more parts that can be text, images, or function calls/responses.
|
|
98
|
+
*/
|
|
99
|
+
interface GoogleContent {
|
|
100
|
+
/** Role indicating message source: 'user' for user input, 'model' for assistant responses. */
|
|
101
|
+
role: 'user' | 'model';
|
|
102
|
+
/** Array of content parts within this message turn. */
|
|
103
|
+
parts: GooglePart[];
|
|
104
|
+
}
|
|
105
|
+
/**
|
|
106
|
+
* Union type for all possible content part types in Google messages.
|
|
107
|
+
*
|
|
108
|
+
* Parts can contain text, inline images, function calls (from model),
|
|
109
|
+
* or function responses (from user providing tool results).
|
|
110
|
+
*/
|
|
111
|
+
type GooglePart = GoogleTextPart | GoogleImagePart | GoogleFunctionCallPart | GoogleFunctionResponsePart;
|
|
112
|
+
/**
|
|
113
|
+
* Text content part.
|
|
114
|
+
*/
|
|
115
|
+
interface GoogleTextPart {
|
|
116
|
+
/** The text content. */
|
|
117
|
+
text: string;
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Inline image content part with base64-encoded data.
|
|
121
|
+
*/
|
|
122
|
+
interface GoogleImagePart {
|
|
123
|
+
/** Inline image data container. */
|
|
124
|
+
inlineData: {
|
|
125
|
+
/** MIME type of the image (e.g., 'image/png', 'image/jpeg'). */
|
|
126
|
+
mimeType: string;
|
|
127
|
+
/** Base64-encoded image data. */
|
|
128
|
+
data: string;
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
/**
|
|
132
|
+
* Function call part generated by the model.
|
|
133
|
+
*
|
|
134
|
+
* Represents the model's request to invoke a declared function with
|
|
135
|
+
* specific arguments.
|
|
136
|
+
*/
|
|
137
|
+
interface GoogleFunctionCallPart {
|
|
138
|
+
/** Function call details. */
|
|
139
|
+
functionCall: {
|
|
140
|
+
/** Name of the function to call. */
|
|
141
|
+
name: string;
|
|
142
|
+
/** Arguments to pass to the function. */
|
|
143
|
+
args: Record<string, unknown>;
|
|
144
|
+
};
|
|
145
|
+
/** Thought signature for Gemini 3+ models to maintain context across multi-turn tool calls. */
|
|
146
|
+
thoughtSignature?: string;
|
|
147
|
+
}
|
|
148
|
+
/**
|
|
149
|
+
* Function response part provided by the user.
|
|
150
|
+
*
|
|
151
|
+
* Contains the result of executing a function call, sent back to
|
|
152
|
+
* the model to continue the conversation.
|
|
153
|
+
*/
|
|
154
|
+
interface GoogleFunctionResponsePart {
|
|
155
|
+
/** Function response details. */
|
|
156
|
+
functionResponse: {
|
|
157
|
+
/** Name of the function that was called. */
|
|
158
|
+
name: string;
|
|
159
|
+
/** Response data from the function execution. */
|
|
160
|
+
response: Record<string, unknown>;
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Tool definition containing function declarations.
|
|
165
|
+
*
|
|
166
|
+
* Google groups function declarations within a tools array, where each
|
|
167
|
+
* tool object contains an array of function declarations.
|
|
168
|
+
*/
|
|
169
|
+
interface GoogleTool {
|
|
170
|
+
/** Array of function declarations available for the model to call. */
|
|
171
|
+
functionDeclarations: GoogleFunctionDeclaration[];
|
|
172
|
+
}
|
|
173
|
+
/**
|
|
174
|
+
* Declaration of a callable function/tool for the model.
|
|
175
|
+
*
|
|
176
|
+
* Describes the function signature including its name, purpose,
|
|
177
|
+
* and expected parameters in JSON Schema format.
|
|
178
|
+
*/
|
|
179
|
+
interface GoogleFunctionDeclaration {
|
|
180
|
+
/** Unique name of the function. */
|
|
181
|
+
name: string;
|
|
182
|
+
/** Human-readable description of what the function does. */
|
|
183
|
+
description: string;
|
|
184
|
+
/** JSON Schema describing the function parameters. */
|
|
185
|
+
parameters: {
|
|
186
|
+
/** Schema type, always 'object' for function parameters. */
|
|
187
|
+
type: 'object';
|
|
188
|
+
/** Map of parameter names to their JSON Schema definitions. */
|
|
189
|
+
properties: Record<string, unknown>;
|
|
190
|
+
/** Array of required parameter names. */
|
|
191
|
+
required?: string[];
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
/**
|
|
195
|
+
* Response from creating or retrieving a cached content entry.
|
|
196
|
+
*/
|
|
197
|
+
interface GoogleCacheResponse {
|
|
198
|
+
/** Cache identifier in format "cachedContents/{id}" - use this in requests */
|
|
199
|
+
name: string;
|
|
200
|
+
/** Model this cache is associated with */
|
|
201
|
+
model: string;
|
|
202
|
+
/** Display name for the cache */
|
|
203
|
+
displayName?: string;
|
|
204
|
+
/** When the cache was created (RFC 3339 format) */
|
|
205
|
+
createTime: string;
|
|
206
|
+
/** When the cache was last updated (RFC 3339 format) */
|
|
207
|
+
updateTime: string;
|
|
208
|
+
/** When the cache expires (RFC 3339 format) */
|
|
209
|
+
expireTime: string;
|
|
210
|
+
/** Token usage metadata */
|
|
211
|
+
usageMetadata?: {
|
|
212
|
+
/** Total tokens in the cached content */
|
|
213
|
+
totalTokenCount: number;
|
|
214
|
+
};
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Request body for updating a cached content entry.
|
|
218
|
+
* Only expiration can be updated; all other fields are immutable.
|
|
219
|
+
*/
|
|
220
|
+
interface GoogleCacheUpdateRequest {
|
|
221
|
+
/** New absolute expiration time (RFC 3339 format, mutually exclusive with ttl) */
|
|
222
|
+
expireTime?: string;
|
|
223
|
+
/** New time-to-live duration (e.g., "3600s", mutually exclusive with expireTime) */
|
|
224
|
+
ttl?: string;
|
|
225
|
+
}
|
|
226
|
+
/**
|
|
227
|
+
* Response from listing cached content entries.
|
|
228
|
+
*/
|
|
229
|
+
interface GoogleCacheListResponse {
|
|
230
|
+
/** Array of cached content entries */
|
|
231
|
+
cachedContents?: GoogleCacheResponse[];
|
|
232
|
+
/** Token for fetching the next page of results */
|
|
233
|
+
nextPageToken?: string;
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
/**
|
|
237
|
+
* @fileoverview Google Gemini caching utilities.
|
|
238
|
+
*
|
|
239
|
+
* Provides functions for creating and managing cached content entries
|
|
240
|
+
* that can be reused across multiple Gemini API requests to reduce
|
|
241
|
+
* costs and latency for repeated context.
|
|
242
|
+
*
|
|
243
|
+
* @see {@link https://ai.google.dev/api/caching Google Caching API docs}
|
|
244
|
+
* @module providers/google/cache
|
|
245
|
+
*/
|
|
246
|
+
|
|
247
|
+
/**
|
|
248
|
+
* Options for creating a cached content entry.
|
|
249
|
+
*/
|
|
250
|
+
interface CacheCreateOptions {
|
|
251
|
+
/** API key for authentication */
|
|
252
|
+
apiKey: string;
|
|
253
|
+
/** Model to associate with this cache (e.g., "gemini-1.5-flash-001") */
|
|
254
|
+
model: string;
|
|
255
|
+
/** Optional display name for the cache (max 128 chars) */
|
|
256
|
+
displayName?: string;
|
|
257
|
+
/** Content messages to cache */
|
|
258
|
+
contents?: GoogleContent[];
|
|
259
|
+
/** System instruction text to cache */
|
|
260
|
+
systemInstruction?: string;
|
|
261
|
+
/** Tool declarations to cache */
|
|
262
|
+
tools?: GoogleTool[];
|
|
263
|
+
/** Time-to-live duration (e.g., "3600s" for 1 hour) */
|
|
264
|
+
ttl?: string;
|
|
265
|
+
/** Absolute expiration time (RFC 3339 format, alternative to ttl) */
|
|
266
|
+
expireTime?: string;
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Options for listing cached content entries.
|
|
270
|
+
*/
|
|
271
|
+
interface CacheListOptions {
|
|
272
|
+
/** API key for authentication */
|
|
273
|
+
apiKey: string;
|
|
274
|
+
/** Maximum number of caches to return per page */
|
|
275
|
+
pageSize?: number;
|
|
276
|
+
/** Token for fetching the next page of results */
|
|
277
|
+
pageToken?: string;
|
|
278
|
+
}
|
|
279
|
+
/**
|
|
280
|
+
* Creates a new cached content entry.
|
|
281
|
+
*
|
|
282
|
+
* Caches can contain system instructions, conversation content, and tool
|
|
283
|
+
* declarations that are reused across multiple requests. This reduces
|
|
284
|
+
* token costs and processing time for repeated context.
|
|
285
|
+
*
|
|
286
|
+
* @param options - Cache creation options
|
|
287
|
+
* @returns The created cache entry with its name/ID for use in requests
|
|
288
|
+
*
|
|
289
|
+
* @example
|
|
290
|
+
* ```typescript
|
|
291
|
+
* import { google } from '@anthropic/provider-protocol';
|
|
292
|
+
*
|
|
293
|
+
* // Create a cache with system instruction and large context
|
|
294
|
+
* const cache = await google.cache.create({
|
|
295
|
+
* apiKey: process.env.GOOGLE_API_KEY,
|
|
296
|
+
* model: 'gemini-1.5-flash-001',
|
|
297
|
+
* displayName: 'Code Review Context',
|
|
298
|
+
* systemInstruction: 'You are an expert code reviewer...',
|
|
299
|
+
* contents: [
|
|
300
|
+
* { role: 'user', parts: [{ text: largeCodebaseContent }] }
|
|
301
|
+
* ],
|
|
302
|
+
* ttl: '3600s', // 1 hour
|
|
303
|
+
* });
|
|
304
|
+
*
|
|
305
|
+
* // Use the cache in subsequent requests
|
|
306
|
+
* const response = await model.complete({
|
|
307
|
+
* messages: [userMessage('Review this function')],
|
|
308
|
+
* params: { cachedContent: cache.name },
|
|
309
|
+
* });
|
|
310
|
+
* ```
|
|
311
|
+
*/
|
|
312
|
+
declare function create(options: CacheCreateOptions): Promise<GoogleCacheResponse>;
|
|
313
|
+
/**
|
|
314
|
+
* Retrieves a cached content entry by name.
|
|
315
|
+
*
|
|
316
|
+
* @param name - The cache name (format: "cachedContents/{id}")
|
|
317
|
+
* @param apiKey - API key for authentication
|
|
318
|
+
* @returns The cache entry details
|
|
319
|
+
*
|
|
320
|
+
* @example
|
|
321
|
+
* ```typescript
|
|
322
|
+
* const cache = await google.cache.get('cachedContents/abc123', apiKey);
|
|
323
|
+
* console.log(`Cache expires at: ${cache.expireTime}`);
|
|
324
|
+
* ```
|
|
325
|
+
*/
|
|
326
|
+
declare function get(name: string, apiKey: string): Promise<GoogleCacheResponse>;
|
|
327
|
+
/**
|
|
328
|
+
* Lists all cached content entries.
|
|
329
|
+
*
|
|
330
|
+
* @param options - List options including API key and pagination
|
|
331
|
+
* @returns Array of cache entries and optional next page token
|
|
332
|
+
*
|
|
333
|
+
* @example
|
|
334
|
+
* ```typescript
|
|
335
|
+
* const { cachedContents, nextPageToken } = await google.cache.list({
|
|
336
|
+
* apiKey: process.env.GOOGLE_API_KEY,
|
|
337
|
+
* pageSize: 10,
|
|
338
|
+
* });
|
|
339
|
+
*
|
|
340
|
+
* for (const cache of cachedContents ?? []) {
|
|
341
|
+
* console.log(`${cache.displayName}: ${cache.name}`);
|
|
342
|
+
* }
|
|
343
|
+
* ```
|
|
344
|
+
*/
|
|
345
|
+
declare function list(options: CacheListOptions): Promise<GoogleCacheListResponse>;
|
|
346
|
+
/**
|
|
347
|
+
* Updates a cached content entry's expiration time.
|
|
348
|
+
*
|
|
349
|
+
* Only the expiration time can be updated; all other fields
|
|
350
|
+
* (contents, systemInstruction, tools) are immutable after creation.
|
|
351
|
+
*
|
|
352
|
+
* @param name - The cache name (format: "cachedContents/{id}")
|
|
353
|
+
* @param update - The update to apply (ttl or expireTime)
|
|
354
|
+
* @param apiKey - API key for authentication
|
|
355
|
+
* @returns The updated cache entry
|
|
356
|
+
*
|
|
357
|
+
* @example
|
|
358
|
+
* ```typescript
|
|
359
|
+
* // Extend cache expiration by 2 hours
|
|
360
|
+
* const updated = await google.cache.update(
|
|
361
|
+
* 'cachedContents/abc123',
|
|
362
|
+
* { ttl: '7200s' },
|
|
363
|
+
* apiKey
|
|
364
|
+
* );
|
|
365
|
+
* ```
|
|
366
|
+
*/
|
|
367
|
+
declare function update(name: string, updateRequest: GoogleCacheUpdateRequest, apiKey: string): Promise<GoogleCacheResponse>;
|
|
62
368
|
|
|
63
369
|
/**
|
|
64
|
-
* Google Gemini provider
|
|
65
|
-
*
|
|
370
|
+
* Google Gemini provider for the Unified Provider Protocol (UPP).
|
|
371
|
+
*
|
|
372
|
+
* Provides access to Google's Gemini family of large language models through
|
|
373
|
+
* a standardized interface. Supports text generation, multimodal inputs
|
|
374
|
+
* (images, video, audio), tool/function calling, and structured output.
|
|
375
|
+
*
|
|
376
|
+
* @example
|
|
377
|
+
* ```typescript
|
|
378
|
+
* import { google } from './providers/google';
|
|
379
|
+
*
|
|
380
|
+
* // Create a model instance
|
|
381
|
+
* const gemini = google.llm.bind('gemini-1.5-pro');
|
|
382
|
+
*
|
|
383
|
+
* // Simple completion
|
|
384
|
+
* const response = await gemini.complete({
|
|
385
|
+
* messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }],
|
|
386
|
+
* config: { apiKey: process.env.GOOGLE_API_KEY },
|
|
387
|
+
* });
|
|
388
|
+
*
|
|
389
|
+
* // Streaming completion
|
|
390
|
+
* const stream = gemini.stream({
|
|
391
|
+
* messages: [{ role: 'user', content: [{ type: 'text', text: 'Tell me a story' }] }],
|
|
392
|
+
* config: { apiKey: process.env.GOOGLE_API_KEY },
|
|
393
|
+
* });
|
|
394
|
+
*
|
|
395
|
+
* for await (const event of stream) {
|
|
396
|
+
* if (event.type === 'text_delta') {
|
|
397
|
+
* process.stdout.write(event.delta.text);
|
|
398
|
+
* }
|
|
399
|
+
* }
|
|
400
|
+
* ```
|
|
401
|
+
*
|
|
402
|
+
* @example Caching
|
|
403
|
+
* ```typescript
|
|
404
|
+
* // Create a cache for repeated context
|
|
405
|
+
* const cacheEntry = await google.cache.create({
|
|
406
|
+
* apiKey: process.env.GOOGLE_API_KEY,
|
|
407
|
+
* model: 'gemini-1.5-flash-001',
|
|
408
|
+
* systemInstruction: 'You are an expert code reviewer...',
|
|
409
|
+
* contents: [{ role: 'user', parts: [{ text: largeCodebase }] }],
|
|
410
|
+
* ttl: '3600s',
|
|
411
|
+
* });
|
|
412
|
+
*
|
|
413
|
+
* // Use cache in requests
|
|
414
|
+
* const response = await gemini.complete({
|
|
415
|
+
* messages: [userMessage('Review this function')],
|
|
416
|
+
* config: { apiKey: process.env.GOOGLE_API_KEY },
|
|
417
|
+
* params: { cachedContent: cacheEntry.name },
|
|
418
|
+
* });
|
|
419
|
+
*
|
|
420
|
+
* // Manage caches
|
|
421
|
+
* await google.cache.update(cacheEntry.name, { ttl: '7200s' }, apiKey);
|
|
422
|
+
* await google.cache.delete(cacheEntry.name, apiKey);
|
|
423
|
+
* ```
|
|
424
|
+
*
|
|
425
|
+
* @see {@link GoogleLLMParams} for provider-specific configuration options
|
|
426
|
+
* @see {@link cache} for caching utilities
|
|
66
427
|
*/
|
|
67
|
-
declare const google: Provider<unknown
|
|
428
|
+
declare const google: Provider<unknown> & {
|
|
429
|
+
cache: {
|
|
430
|
+
create: typeof create;
|
|
431
|
+
get: typeof get;
|
|
432
|
+
list: typeof list;
|
|
433
|
+
update: typeof update;
|
|
434
|
+
delete: (name: string, apiKey: string) => Promise<void>;
|
|
435
|
+
};
|
|
436
|
+
};
|
|
68
437
|
|
|
69
438
|
export { type GoogleLLMParams, google };
|
package/dist/google/index.js
CHANGED
|
@@ -1,36 +1,43 @@
|
|
|
1
1
|
import {
|
|
2
2
|
createProvider
|
|
3
|
-
} from "../chunk-
|
|
3
|
+
} from "../chunk-MSR5P65T.js";
|
|
4
4
|
import {
|
|
5
5
|
AssistantMessage,
|
|
6
6
|
isAssistantMessage,
|
|
7
7
|
isToolResultMessage,
|
|
8
8
|
isUserMessage
|
|
9
|
-
} from "../chunk-
|
|
9
|
+
} from "../chunk-SVYROCLD.js";
|
|
10
10
|
import {
|
|
11
11
|
parseSSEStream
|
|
12
|
-
} from "../chunk-
|
|
12
|
+
} from "../chunk-Z7RBRCRN.js";
|
|
13
13
|
import {
|
|
14
14
|
UPPError,
|
|
15
15
|
doFetch,
|
|
16
16
|
doStreamFetch,
|
|
17
17
|
normalizeHttpError,
|
|
18
18
|
resolveApiKey
|
|
19
|
-
} from "../chunk-
|
|
19
|
+
} from "../chunk-MOU4U3PO.js";
|
|
20
20
|
|
|
21
21
|
// src/providers/google/transform.ts
|
|
22
22
|
function transformRequest(request, modelId) {
|
|
23
23
|
const params = request.params ?? {};
|
|
24
|
+
const { cachedContent, ...generationParams } = params;
|
|
24
25
|
const googleRequest = {
|
|
25
26
|
contents: transformMessages(request.messages)
|
|
26
27
|
};
|
|
27
28
|
if (request.system) {
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
29
|
+
if (typeof request.system === "string") {
|
|
30
|
+
googleRequest.systemInstruction = {
|
|
31
|
+
parts: [{ text: request.system }]
|
|
32
|
+
};
|
|
33
|
+
} else {
|
|
34
|
+
googleRequest.systemInstruction = {
|
|
35
|
+
parts: request.system
|
|
36
|
+
};
|
|
37
|
+
}
|
|
31
38
|
}
|
|
32
39
|
const generationConfig = {
|
|
33
|
-
...
|
|
40
|
+
...generationParams
|
|
34
41
|
};
|
|
35
42
|
if (request.structure) {
|
|
36
43
|
generationConfig.responseMimeType = "application/json";
|
|
@@ -46,6 +53,9 @@ function transformRequest(request, modelId) {
|
|
|
46
53
|
}
|
|
47
54
|
];
|
|
48
55
|
}
|
|
56
|
+
if (cachedContent) {
|
|
57
|
+
googleRequest.cachedContent = cachedContent;
|
|
58
|
+
}
|
|
49
59
|
return googleRequest;
|
|
50
60
|
}
|
|
51
61
|
function filterValidContent(content) {
|
|
@@ -104,7 +114,6 @@ function transformMessages(messages) {
|
|
|
104
114
|
parts: msg.results.map((result) => ({
|
|
105
115
|
functionResponse: {
|
|
106
116
|
name: result.toolCallId,
|
|
107
|
-
// Google uses the function name, but we store it in toolCallId
|
|
108
117
|
response: typeof result.result === "object" ? result.result : { result: result.result }
|
|
109
118
|
}
|
|
110
119
|
}))
|
|
@@ -173,7 +182,6 @@ function transformResponse(data) {
|
|
|
173
182
|
const fc = part;
|
|
174
183
|
toolCalls.push({
|
|
175
184
|
toolCallId: fc.functionCall.name,
|
|
176
|
-
// Google doesn't have call IDs, use name
|
|
177
185
|
toolName: fc.functionCall.name,
|
|
178
186
|
arguments: fc.functionCall.args
|
|
179
187
|
});
|
|
@@ -192,7 +200,6 @@ function transformResponse(data) {
|
|
|
192
200
|
google: {
|
|
193
201
|
finishReason: candidate.finishReason,
|
|
194
202
|
safetyRatings: candidate.safetyRatings,
|
|
195
|
-
// Store function call parts with thought signatures for multi-turn
|
|
196
203
|
functionCallParts: functionCallParts.length > 0 ? functionCallParts : void 0
|
|
197
204
|
}
|
|
198
205
|
}
|
|
@@ -201,7 +208,9 @@ function transformResponse(data) {
|
|
|
201
208
|
const usage = {
|
|
202
209
|
inputTokens: data.usageMetadata?.promptTokenCount ?? 0,
|
|
203
210
|
outputTokens: data.usageMetadata?.candidatesTokenCount ?? 0,
|
|
204
|
-
totalTokens: data.usageMetadata?.totalTokenCount ?? 0
|
|
211
|
+
totalTokens: data.usageMetadata?.totalTokenCount ?? 0,
|
|
212
|
+
cacheReadTokens: data.usageMetadata?.cachedContentTokenCount ?? 0,
|
|
213
|
+
cacheWriteTokens: 0
|
|
205
214
|
};
|
|
206
215
|
return {
|
|
207
216
|
message,
|
|
@@ -217,6 +226,7 @@ function createStreamState() {
|
|
|
217
226
|
finishReason: null,
|
|
218
227
|
inputTokens: 0,
|
|
219
228
|
outputTokens: 0,
|
|
229
|
+
cacheReadTokens: 0,
|
|
220
230
|
isFirstChunk: true
|
|
221
231
|
};
|
|
222
232
|
}
|
|
@@ -229,6 +239,7 @@ function transformStreamChunk(chunk, state) {
|
|
|
229
239
|
if (chunk.usageMetadata) {
|
|
230
240
|
state.inputTokens = chunk.usageMetadata.promptTokenCount;
|
|
231
241
|
state.outputTokens = chunk.usageMetadata.candidatesTokenCount;
|
|
242
|
+
state.cacheReadTokens = chunk.usageMetadata.cachedContentTokenCount ?? 0;
|
|
232
243
|
}
|
|
233
244
|
const candidate = chunk.candidates?.[0];
|
|
234
245
|
if (!candidate) {
|
|
@@ -297,7 +308,6 @@ function buildResponseFromState(state) {
|
|
|
297
308
|
metadata: {
|
|
298
309
|
google: {
|
|
299
310
|
finishReason: state.finishReason,
|
|
300
|
-
// Store function call parts with thought signatures for multi-turn
|
|
301
311
|
functionCallParts: functionCallParts.length > 0 ? functionCallParts : void 0
|
|
302
312
|
}
|
|
303
313
|
}
|
|
@@ -306,7 +316,9 @@ function buildResponseFromState(state) {
|
|
|
306
316
|
const usage = {
|
|
307
317
|
inputTokens: state.inputTokens,
|
|
308
318
|
outputTokens: state.outputTokens,
|
|
309
|
-
totalTokens: state.inputTokens + state.outputTokens
|
|
319
|
+
totalTokens: state.inputTokens + state.outputTokens,
|
|
320
|
+
cacheReadTokens: state.cacheReadTokens,
|
|
321
|
+
cacheWriteTokens: 0
|
|
310
322
|
};
|
|
311
323
|
return {
|
|
312
324
|
message,
|
|
@@ -462,14 +474,114 @@ function createLLMHandler() {
|
|
|
462
474
|
};
|
|
463
475
|
}
|
|
464
476
|
|
|
477
|
+
// src/providers/google/cache.ts
|
|
478
|
+
var CACHE_API_BASE = "https://generativelanguage.googleapis.com/v1beta/cachedContents";
|
|
479
|
+
async function create(options) {
|
|
480
|
+
const {
|
|
481
|
+
apiKey,
|
|
482
|
+
model,
|
|
483
|
+
displayName,
|
|
484
|
+
contents,
|
|
485
|
+
systemInstruction,
|
|
486
|
+
tools,
|
|
487
|
+
ttl,
|
|
488
|
+
expireTime
|
|
489
|
+
} = options;
|
|
490
|
+
const requestBody = {
|
|
491
|
+
model: model.startsWith("models/") ? model : `models/${model}`
|
|
492
|
+
};
|
|
493
|
+
if (displayName) {
|
|
494
|
+
requestBody.displayName = displayName;
|
|
495
|
+
}
|
|
496
|
+
if (contents && contents.length > 0) {
|
|
497
|
+
requestBody.contents = contents;
|
|
498
|
+
}
|
|
499
|
+
if (systemInstruction) {
|
|
500
|
+
requestBody.systemInstruction = {
|
|
501
|
+
parts: [{ text: systemInstruction }]
|
|
502
|
+
};
|
|
503
|
+
}
|
|
504
|
+
if (tools && tools.length > 0) {
|
|
505
|
+
requestBody.tools = tools;
|
|
506
|
+
}
|
|
507
|
+
if (ttl) {
|
|
508
|
+
requestBody.ttl = ttl;
|
|
509
|
+
} else if (expireTime) {
|
|
510
|
+
requestBody.expireTime = expireTime;
|
|
511
|
+
}
|
|
512
|
+
const response = await fetch(`${CACHE_API_BASE}?key=${apiKey}`, {
|
|
513
|
+
method: "POST",
|
|
514
|
+
headers: { "Content-Type": "application/json" },
|
|
515
|
+
body: JSON.stringify(requestBody)
|
|
516
|
+
});
|
|
517
|
+
if (!response.ok) {
|
|
518
|
+
const error = await response.text();
|
|
519
|
+
throw new Error(`Failed to create cache: ${response.status} ${error}`);
|
|
520
|
+
}
|
|
521
|
+
return response.json();
|
|
522
|
+
}
|
|
523
|
+
async function get(name, apiKey) {
|
|
524
|
+
const cacheName = name.startsWith("cachedContents/") ? name : `cachedContents/${name}`;
|
|
525
|
+
const url = `https://generativelanguage.googleapis.com/v1beta/${cacheName}?key=${apiKey}`;
|
|
526
|
+
const response = await fetch(url, { method: "GET" });
|
|
527
|
+
if (!response.ok) {
|
|
528
|
+
const error = await response.text();
|
|
529
|
+
throw new Error(`Failed to get cache: ${response.status} ${error}`);
|
|
530
|
+
}
|
|
531
|
+
return response.json();
|
|
532
|
+
}
|
|
533
|
+
async function list(options) {
|
|
534
|
+
const { apiKey, pageSize, pageToken } = options;
|
|
535
|
+
const params = new URLSearchParams({ key: apiKey });
|
|
536
|
+
if (pageSize) params.set("pageSize", String(pageSize));
|
|
537
|
+
if (pageToken) params.set("pageToken", pageToken);
|
|
538
|
+
const response = await fetch(`${CACHE_API_BASE}?${params}`, { method: "GET" });
|
|
539
|
+
if (!response.ok) {
|
|
540
|
+
const error = await response.text();
|
|
541
|
+
throw new Error(`Failed to list caches: ${response.status} ${error}`);
|
|
542
|
+
}
|
|
543
|
+
return response.json();
|
|
544
|
+
}
|
|
545
|
+
async function update(name, updateRequest, apiKey) {
|
|
546
|
+
const cacheName = name.startsWith("cachedContents/") ? name : `cachedContents/${name}`;
|
|
547
|
+
const url = `https://generativelanguage.googleapis.com/v1beta/${cacheName}?key=${apiKey}`;
|
|
548
|
+
const response = await fetch(url, {
|
|
549
|
+
method: "PATCH",
|
|
550
|
+
headers: { "Content-Type": "application/json" },
|
|
551
|
+
body: JSON.stringify(updateRequest)
|
|
552
|
+
});
|
|
553
|
+
if (!response.ok) {
|
|
554
|
+
const error = await response.text();
|
|
555
|
+
throw new Error(`Failed to update cache: ${response.status} ${error}`);
|
|
556
|
+
}
|
|
557
|
+
return response.json();
|
|
558
|
+
}
|
|
559
|
+
async function deleteCache(name, apiKey) {
|
|
560
|
+
const cacheName = name.startsWith("cachedContents/") ? name : `cachedContents/${name}`;
|
|
561
|
+
const url = `https://generativelanguage.googleapis.com/v1beta/${cacheName}?key=${apiKey}`;
|
|
562
|
+
const response = await fetch(url, { method: "DELETE" });
|
|
563
|
+
if (!response.ok) {
|
|
564
|
+
const error = await response.text();
|
|
565
|
+
throw new Error(`Failed to delete cache: ${response.status} ${error}`);
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
var cache = {
|
|
569
|
+
create,
|
|
570
|
+
get,
|
|
571
|
+
list,
|
|
572
|
+
update,
|
|
573
|
+
delete: deleteCache
|
|
574
|
+
};
|
|
575
|
+
|
|
465
576
|
// src/providers/google/index.ts
|
|
466
|
-
var
|
|
577
|
+
var baseProvider = createProvider({
|
|
467
578
|
name: "google",
|
|
468
579
|
version: "1.0.0",
|
|
469
580
|
modalities: {
|
|
470
581
|
llm: createLLMHandler()
|
|
471
582
|
}
|
|
472
583
|
});
|
|
584
|
+
var google = Object.assign(baseProvider, { cache });
|
|
473
585
|
export {
|
|
474
586
|
google
|
|
475
587
|
};
|