@omnikit-ai/sdk 2.0.8 → 2.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -229,12 +229,12 @@ interface LLMMessage {
229
229
  }>;
230
230
  }
231
231
  /**
232
- * Available LLM models for InvokeLLM
233
- * - 'gemini-flash': Fast and cost-effective (default)
234
- * - 'gemini-pro': Smarter with extended thinking (128 token thinking budget)
235
- * - 'gemini-pro-3': Gemini 3 Pro Preview with low thinking effort
232
+ * Available LLM models:
233
+ * - Gemini 2.5: 'gemini-2.5-flash-lite' (fastest), 'gemini-2.5-flash', 'gemini-2.5-pro'
234
+ * - Gemini 3: 'gemini-3-flash' (best multimodal), 'gemini-3-pro' (advanced reasoning)
235
+ * - Legacy aliases: 'gemini-flash', 'gemini-pro', 'gemini-pro-3' (for backward compatibility)
236
236
  */
237
- type LLMModel = 'gemini-flash' | 'gemini-pro' | 'gemini-pro-3';
237
+ type LLMModel = 'gemini-2.5-flash-lite' | 'gemini-2.5-flash' | 'gemini-2.5-pro' | 'gemini-3-flash' | 'gemini-3-pro' | 'gemini-flash' | 'gemini-pro' | 'gemini-pro-3';
238
238
  interface LLMParams {
239
239
  /** Message-based format for advanced use */
240
240
  messages?: LLMMessage[];
@@ -254,10 +254,11 @@ interface LLMParams {
254
254
  type: 'json_object';
255
255
  } | Record<string, any>;
256
256
  /**
257
- * Model to use for LLM processing
258
- * - 'gemini-flash': Fast and cost-effective (default)
259
- * - 'gemini-pro': Smarter with extended thinking for complex reasoning
260
- * - 'gemini-pro-3': Gemini 3 Pro Preview with low thinking effort
257
+ * Model to use for LLM processing.
258
+ * Defaults: 'gemini-2.5-flash-lite' for text, 'gemini-3-flash' for files/images.
259
+ * - 'gemini-2.5-flash-lite': Fastest, best for simple text tasks
260
+ * - 'gemini-3-flash': Best multimodal (images, PDFs, videos)
261
+ * - 'gemini-2.5-pro' / 'gemini-3-pro': Advanced reasoning
261
262
  */
262
263
  model?: LLMModel | string;
263
264
  /**
package/dist/index.d.ts CHANGED
@@ -229,12 +229,12 @@ interface LLMMessage {
229
229
  }>;
230
230
  }
231
231
  /**
232
- * Available LLM models for InvokeLLM
233
- * - 'gemini-flash': Fast and cost-effective (default)
234
- * - 'gemini-pro': Smarter with extended thinking (128 token thinking budget)
235
- * - 'gemini-pro-3': Gemini 3 Pro Preview with low thinking effort
232
+ * Available LLM models:
233
+ * - Gemini 2.5: 'gemini-2.5-flash-lite' (fastest), 'gemini-2.5-flash', 'gemini-2.5-pro'
234
+ * - Gemini 3: 'gemini-3-flash' (best multimodal), 'gemini-3-pro' (advanced reasoning)
235
+ * - Legacy aliases: 'gemini-flash', 'gemini-pro', 'gemini-pro-3' (for backward compatibility)
236
236
  */
237
- type LLMModel = 'gemini-flash' | 'gemini-pro' | 'gemini-pro-3';
237
+ type LLMModel = 'gemini-2.5-flash-lite' | 'gemini-2.5-flash' | 'gemini-2.5-pro' | 'gemini-3-flash' | 'gemini-3-pro' | 'gemini-flash' | 'gemini-pro' | 'gemini-pro-3';
238
238
  interface LLMParams {
239
239
  /** Message-based format for advanced use */
240
240
  messages?: LLMMessage[];
@@ -254,10 +254,11 @@ interface LLMParams {
254
254
  type: 'json_object';
255
255
  } | Record<string, any>;
256
256
  /**
257
- * Model to use for LLM processing
258
- * - 'gemini-flash': Fast and cost-effective (default)
259
- * - 'gemini-pro': Smarter with extended thinking for complex reasoning
260
- * - 'gemini-pro-3': Gemini 3 Pro Preview with low thinking effort
257
+ * Model to use for LLM processing.
258
+ * Defaults: 'gemini-2.5-flash-lite' for text, 'gemini-3-flash' for files/images.
259
+ * - 'gemini-2.5-flash-lite': Fastest, best for simple text tasks
260
+ * - 'gemini-3-flash': Best multimodal (images, PDFs, videos)
261
+ * - 'gemini-2.5-pro' / 'gemini-3-pro': Advanced reasoning
261
262
  */
262
263
  model?: LLMModel | string;
263
264
  /**
package/dist/index.js CHANGED
@@ -437,6 +437,14 @@ var LiveVoiceSessionImpl = class {
437
437
 
438
438
  // src/client.ts
439
439
  var LLM_MODEL_MAP = {
440
+ // Gemini 2.5 models
441
+ "gemini-2.5-flash-lite": "vertex_ai/gemini-2.5-flash-lite",
442
+ "gemini-2.5-flash": "vertex_ai/gemini-2.5-flash",
443
+ "gemini-2.5-pro": "vertex_ai/gemini-2.5-pro",
444
+ // Gemini 3 models
445
+ "gemini-3-flash": "vertex_ai/gemini-3-flash-preview",
446
+ "gemini-3-pro": "vertex_ai/gemini-3-pro-preview",
447
+ // Legacy aliases (for backward compatibility)
440
448
  "gemini-flash": "vertex_ai/gemini-2.5-flash",
441
449
  "gemini-pro": "vertex_ai/gemini-2.5-pro",
442
450
  "gemini-pro-3": "vertex_ai/gemini-3-pro-preview"
@@ -1211,6 +1219,12 @@ Example: await ${collectionName}.list({ limit: 100, sort: '-created_at' })`,
1211
1219
  } else if (client.userToken) {
1212
1220
  headers["Authorization"] = `Bearer ${client.userToken}`;
1213
1221
  }
1222
+ if ((normalizedName === "UploadFile" || normalizedName === "uploadFile") && params?.file instanceof File) {
1223
+ return client.handleFileUpload(params.file, servicePath, useServiceToken);
1224
+ }
1225
+ if ((normalizedName === "UploadPrivateFile" || normalizedName === "uploadPrivateFile") && params?.file instanceof File) {
1226
+ return client.handleFileUpload(params.file, servicePath, useServiceToken);
1227
+ }
1214
1228
  const fetchResponse = await fetch(
1215
1229
  `${client.baseUrl}/apps/${client.appId}/${servicePath}`,
1216
1230
  {