@livekit/agents-plugin-google 1.0.30 → 1.0.32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/beta/realtime/api_proto.cjs.map +1 -1
- package/dist/beta/realtime/api_proto.d.cts +9 -2
- package/dist/beta/realtime/api_proto.d.ts +9 -2
- package/dist/beta/realtime/api_proto.d.ts.map +1 -1
- package/dist/beta/realtime/realtime_api.cjs +38 -13
- package/dist/beta/realtime/realtime_api.cjs.map +1 -1
- package/dist/beta/realtime/realtime_api.d.cts +10 -0
- package/dist/beta/realtime/realtime_api.d.ts +10 -0
- package/dist/beta/realtime/realtime_api.d.ts.map +1 -1
- package/dist/beta/realtime/realtime_api.js +38 -13
- package/dist/beta/realtime/realtime_api.js.map +1 -1
- package/dist/llm.cjs +3 -1
- package/dist/llm.cjs.map +1 -1
- package/dist/llm.d.ts.map +1 -1
- package/dist/llm.js +3 -1
- package/dist/llm.js.map +1 -1
- package/package.json +6 -6
- package/src/beta/realtime/api_proto.ts +46 -6
- package/src/beta/realtime/realtime_api.ts +55 -10
- package/src/llm.ts +2 -0
package/dist/llm.js
CHANGED
|
@@ -340,7 +340,9 @@ class LLMStream extends llm.LLMStream {
|
|
|
340
340
|
llm.FunctionCall.create({
|
|
341
341
|
callId: part.functionCall.id || shortuuid("function_call_"),
|
|
342
342
|
name: part.functionCall.name,
|
|
343
|
-
args: JSON.stringify(part.functionCall.args)
|
|
343
|
+
args: JSON.stringify(part.functionCall.args),
|
|
344
|
+
// Preserve thought signature for Gemini 3+ thinking mode
|
|
345
|
+
thoughtSignature: part.thoughtSignature
|
|
344
346
|
})
|
|
345
347
|
]
|
|
346
348
|
}
|
package/dist/llm.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/llm.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2025 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport type * as types from '@google/genai';\nimport { FunctionCallingConfigMode, type GenerateContentConfig, GoogleGenAI } from '@google/genai';\nimport type { APIConnectOptions } from '@livekit/agents';\nimport {\n APIConnectionError,\n APIStatusError,\n DEFAULT_API_CONNECT_OPTIONS,\n llm,\n shortuuid,\n} from '@livekit/agents';\nimport type { ChatModels } from './models.js';\nimport type { LLMTools } from './tools.js';\nimport { toFunctionDeclarations } from './utils.js';\n\ninterface GoogleFormatData {\n systemMessages: string[] | null;\n}\n\nexport interface LLMOptions {\n model: string | ChatModels;\n apiKey?: string;\n temperature?: number;\n toolChoice?: llm.ToolChoice;\n vertexai?: boolean;\n project?: string;\n location?: string;\n maxOutputTokens?: number;\n topP?: number;\n topK?: number;\n presencePenalty?: number;\n frequencyPenalty?: number;\n thinkingConfig?: types.ThinkingConfig;\n automaticFunctionCallingConfig?: types.AutomaticFunctionCallingConfig;\n geminiTools?: LLMTools;\n httpOptions?: types.HttpOptions;\n seed?: number;\n}\n\nexport class LLM extends llm.LLM {\n #opts: LLMOptions;\n #client: GoogleGenAI;\n\n label(): string {\n return 'google.LLM';\n }\n\n get model(): string {\n return this.#opts.model;\n }\n\n /**\n * Create a new instance of Google GenAI LLM.\n *\n * Environment Requirements:\n * - For VertexAI: Set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to the path of the service account key file or use any of the other Google Cloud auth methods.\n * The Google Cloud project and location can be set via `project` and `location` arguments or the environment variables\n * `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION`. By default, the project is inferred from the service account key file,\n * and the location defaults to \"us-central1\".\n * - For Google Gemini API: Set the `apiKey` argument or the `GOOGLE_API_KEY` environment variable.\n *\n * @param model - The model name to use. Defaults to \"gemini-2.0-flash-001\".\n * @param apiKey - The API key for Google Gemini. If not provided, it attempts to read from the `GOOGLE_API_KEY` environment variable.\n * @param vertexai - Whether to use VertexAI. If not provided, it attempts to read from the `GOOGLE_GENAI_USE_VERTEXAI` environment variable. Defaults to false.\n * @param project - The Google Cloud project to use (only for VertexAI). Defaults to undefined.\n * @param location - The location to use for VertexAI API requests. Default value is \"us-central1\".\n * @param temperature - Sampling temperature for response generation. Defaults to undefined.\n * @param maxOutputTokens - Maximum number of tokens to generate in the output. Defaults to undefined.\n * @param topP - The nucleus sampling probability for response generation. Defaults to undefined.\n * @param topK - The top-k sampling value for response generation. Defaults to undefined.\n * @param presencePenalty - Penalizes the model for generating previously mentioned concepts. Defaults to undefined.\n * @param frequencyPenalty - Penalizes the model for repeating words. Defaults to undefined.\n * @param toolChoice - Specifies whether to use tools during response generation. Defaults to \"auto\".\n * @param thinkingConfig - The thinking configuration for response generation. Defaults to undefined.\n * @param automaticFunctionCallingConfig - The automatic function calling configuration for response generation. Defaults to undefined.\n * @param geminiTools - The Gemini-specific tools to use for the session.\n * @param httpOptions - The HTTP options to use for the session.\n * @param seed - Random seed for reproducible results. Defaults to undefined.\n */\n constructor(\n {\n model,\n apiKey,\n vertexai,\n project,\n location,\n temperature,\n maxOutputTokens,\n topP,\n topK,\n presencePenalty,\n frequencyPenalty,\n toolChoice,\n thinkingConfig,\n automaticFunctionCallingConfig,\n geminiTools,\n httpOptions,\n seed,\n }: LLMOptions = {\n model: 'gemini-2.0-flash-001',\n },\n ) {\n super();\n\n const useVertexAI =\n vertexai ??\n (process.env.GOOGLE_GENAI_USE_VERTEXAI === 'true' ||\n process.env.GOOGLE_GENAI_USE_VERTEXAI === '1');\n\n let gcpProject: string | undefined = project ?? process.env.GOOGLE_CLOUD_PROJECT;\n let gcpLocation: string | undefined = location ?? process.env.GOOGLE_CLOUD_LOCATION;\n let geminiApiKey: string | undefined = apiKey ?? process.env.GOOGLE_API_KEY;\n\n if (useVertexAI) {\n if (!gcpProject) {\n // TODO(brian): use default_async to get the project ID\n throw new Error(\n 'Project ID is required for Vertex AI. Set via project option or GOOGLE_CLOUD_PROJECT environment variable',\n );\n }\n geminiApiKey = undefined;\n } else {\n gcpProject = undefined;\n gcpLocation = undefined;\n if (!geminiApiKey) {\n throw new Error(\n 'API key is required for Google API either via apiKey or GOOGLE_API_KEY environment variable',\n );\n }\n }\n\n // Validate thinkingConfig\n if (thinkingConfig?.thinkingBudget !== undefined) {\n const budget = thinkingConfig.thinkingBudget;\n if (budget < 0 || budget > 24576) {\n throw new Error('thinkingBudget inside thinkingConfig must be between 0 and 24576');\n }\n }\n\n const clientOptions: types.GoogleGenAIOptions = useVertexAI\n ? {\n vertexai: true,\n project: gcpProject,\n location: gcpLocation,\n }\n : {\n apiKey: geminiApiKey,\n };\n\n this.#client = new GoogleGenAI(clientOptions);\n\n this.#opts = {\n model,\n vertexai: useVertexAI,\n project: gcpProject,\n location: gcpLocation,\n temperature,\n maxOutputTokens,\n topP,\n topK,\n presencePenalty,\n frequencyPenalty,\n toolChoice,\n thinkingConfig,\n automaticFunctionCallingConfig,\n geminiTools,\n httpOptions,\n seed,\n apiKey,\n };\n }\n\n chat({\n chatCtx,\n toolCtx,\n connOptions = DEFAULT_API_CONNECT_OPTIONS,\n toolChoice,\n extraKwargs,\n geminiTools,\n }: {\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions?: APIConnectOptions;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n extraKwargs?: Record<string, unknown>;\n geminiTools?: LLMTools;\n }): LLMStream {\n const extras: GenerateContentConfig = { ...extraKwargs } as GenerateContentConfig;\n\n toolChoice = toolChoice !== undefined ? toolChoice : this.#opts.toolChoice;\n\n if (toolChoice) {\n let geminiToolConfig: types.ToolConfig;\n\n if (typeof toolChoice === 'object' && toolChoice.type === 'function') {\n geminiToolConfig = {\n functionCallingConfig: {\n mode: FunctionCallingConfigMode.ANY,\n allowedFunctionNames: [toolChoice.function.name],\n },\n };\n } else if (toolChoice === 'required') {\n const toolNames = Object.entries(toolCtx || {}).map(([name]) => name);\n geminiToolConfig = {\n functionCallingConfig: {\n mode: FunctionCallingConfigMode.ANY,\n allowedFunctionNames: toolNames.length > 0 ? toolNames : undefined,\n },\n };\n } else if (toolChoice === 'auto') {\n geminiToolConfig = {\n functionCallingConfig: {\n mode: FunctionCallingConfigMode.AUTO,\n },\n };\n } else if (toolChoice === 'none') {\n geminiToolConfig = {\n functionCallingConfig: {\n mode: FunctionCallingConfigMode.NONE,\n },\n };\n } else {\n throw new Error(`Invalid tool choice: ${toolChoice}`);\n }\n\n extras.toolConfig = geminiToolConfig;\n }\n\n if (this.#opts.temperature !== undefined) {\n extras.temperature = this.#opts.temperature;\n }\n if (this.#opts.maxOutputTokens !== undefined) {\n extras.maxOutputTokens = this.#opts.maxOutputTokens;\n }\n if (this.#opts.topP !== undefined) {\n extras.topP = this.#opts.topP;\n }\n if (this.#opts.topK !== undefined) {\n extras.topK = this.#opts.topK;\n }\n if (this.#opts.presencePenalty !== undefined) {\n extras.presencePenalty = this.#opts.presencePenalty;\n }\n if (this.#opts.frequencyPenalty !== undefined) {\n extras.frequencyPenalty = this.#opts.frequencyPenalty;\n }\n if (this.#opts.seed !== undefined) {\n extras.seed = this.#opts.seed;\n }\n\n if (this.#opts.thinkingConfig !== undefined) {\n extras.thinkingConfig = this.#opts.thinkingConfig;\n }\n\n if (this.#opts.automaticFunctionCallingConfig !== undefined) {\n extras.automaticFunctionCalling = this.#opts.automaticFunctionCallingConfig;\n }\n\n geminiTools = geminiTools !== undefined ? geminiTools : this.#opts.geminiTools;\n\n return new LLMStream(this, {\n client: this.#client,\n model: this.#opts.model,\n chatCtx,\n toolCtx,\n connOptions,\n geminiTools,\n extraKwargs: extras,\n });\n }\n}\n\nexport class LLMStream extends llm.LLMStream {\n #client: GoogleGenAI;\n #model: string;\n #geminiTools?: LLMTools;\n #extraKwargs: GenerateContentConfig;\n\n constructor(\n llm: LLM,\n {\n client,\n model,\n chatCtx,\n toolCtx,\n connOptions,\n geminiTools,\n extraKwargs,\n }: {\n client: GoogleGenAI;\n model: string;\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions: APIConnectOptions;\n geminiTools?: LLMTools;\n extraKwargs: GenerateContentConfig;\n },\n ) {\n // Call base constructor with dev 1.0 object parameter pattern\n super(llm, { chatCtx, toolCtx, connOptions });\n this.#client = client;\n this.#model = model;\n this.#geminiTools = geminiTools;\n this.#extraKwargs = extraKwargs;\n }\n\n protected async run(): Promise<void> {\n let retryable = true;\n const requestId = `google_${Date.now()}`;\n\n try {\n const [turns, extraData] = (await this.chatCtx.toProviderFormat('google')) as [\n Record<string, unknown>[],\n GoogleFormatData,\n ];\n\n const contents: types.Content[] = turns.map((turn: Record<string, unknown>) => ({\n role: turn.role as types.Content['role'],\n parts: turn.parts as types.Part[],\n }));\n\n const functionDeclarations = this.toolCtx ? toFunctionDeclarations(this.toolCtx) : undefined;\n const tools =\n functionDeclarations && functionDeclarations.length > 0\n ? [{ functionDeclarations }]\n : undefined;\n\n let systemInstruction: types.Content | undefined = undefined;\n if (extraData.systemMessages && extraData.systemMessages.length > 0) {\n systemInstruction = {\n parts: extraData.systemMessages.map((content: string) => ({ text: content })),\n };\n }\n\n const response = await this.#client.models.generateContentStream({\n model: this.#model,\n contents,\n config: {\n ...this.#extraKwargs,\n systemInstruction,\n httpOptions: this.#extraKwargs.httpOptions ?? {\n timeout: Math.floor(this.connOptions.timeoutMs),\n },\n tools,\n },\n });\n\n for await (const chunk of response) {\n if (chunk.promptFeedback) {\n throw new APIStatusError({\n message: `Prompt feedback error: ${JSON.stringify(chunk.promptFeedback)}`,\n options: {\n retryable: false,\n requestId,\n },\n });\n }\n\n if (!chunk.candidates || !chunk.candidates[0]?.content?.parts) {\n this.logger.warn(`No candidates in the response: ${JSON.stringify(chunk)}`);\n continue;\n }\n\n if (chunk.candidates.length > 1) {\n this.logger.warn(\n 'Google LLM: there are multiple candidates in the response, returning response from the first one.',\n );\n }\n\n for (const part of chunk.candidates[0].content.parts) {\n const chatChunk = this.#parsePart(requestId, part);\n if (chatChunk) {\n retryable = false;\n this.queue.put(chatChunk);\n }\n }\n\n if (chunk.usageMetadata) {\n const usage = chunk.usageMetadata;\n this.queue.put({\n id: requestId,\n usage: {\n completionTokens: usage.candidatesTokenCount || 0,\n promptTokens: usage.promptTokenCount || 0,\n promptCachedTokens: usage.cachedContentTokenCount || 0,\n totalTokens: usage.totalTokenCount || 0,\n },\n });\n }\n }\n } catch (error: unknown) {\n const err = error as {\n code?: number;\n message?: string;\n status?: string;\n type?: string;\n };\n\n if (err.code && err.code >= 400 && err.code < 500) {\n if (err.code === 429) {\n throw new APIStatusError({\n message: `Google LLM: Rate limit error - ${err.message || 'Unknown error'}`,\n options: {\n statusCode: 429,\n retryable: true,\n },\n });\n } else {\n throw new APIStatusError({\n message: `Google LLM: Client error (${err.code}) - ${err.message || 'Unknown error'}`,\n options: {\n statusCode: err.code,\n retryable: false,\n },\n });\n }\n }\n\n if (err.code && err.code >= 500) {\n throw new APIStatusError({\n message: `Google LLM: Server error (${err.code}) - ${err.message || 'Unknown error'}`,\n options: {\n statusCode: err.code,\n retryable,\n },\n });\n }\n\n throw new APIConnectionError({\n message: `Google LLM: API error - ${err.message || 'Unknown error'}`,\n options: {\n retryable,\n },\n });\n }\n }\n\n #parsePart(id: string, part: types.Part): llm.ChatChunk | null {\n if (part.functionCall) {\n return {\n id,\n delta: {\n role: 'assistant',\n toolCalls: [\n llm.FunctionCall.create({\n callId: part.functionCall.id || shortuuid('function_call_'),\n name: part.functionCall.name!,\n args: JSON.stringify(part.functionCall.args!),\n }),\n ],\n },\n };\n }\n\n return {\n id,\n delta: {\n content: part.text,\n role: 'assistant',\n },\n };\n }\n}\n"],"mappings":"AAIA,SAAS,2BAAuD,mBAAmB;AAEnF;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAGP,SAAS,8BAA8B;AA0BhC,MAAM,YAAY,IAAI,IAAI;AAAA,EAC/B;AAAA,EACA;AAAA,EAEA,QAAgB;AACd,WAAO;AAAA,EACT;AAAA,EAEA,IAAI,QAAgB;AAClB,WAAO,KAAK,MAAM;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8BA,YACE;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAgB;AAAA,IACd,OAAO;AAAA,EACT,GACA;AACA,UAAM;AAEN,UAAM,cACJ,aACC,QAAQ,IAAI,8BAA8B,UACzC,QAAQ,IAAI,8BAA8B;AAE9C,QAAI,aAAiC,WAAW,QAAQ,IAAI;AAC5D,QAAI,cAAkC,YAAY,QAAQ,IAAI;AAC9D,QAAI,eAAmC,UAAU,QAAQ,IAAI;AAE7D,QAAI,aAAa;AACf,UAAI,CAAC,YAAY;AAEf,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AACA,qBAAe;AAAA,IACjB,OAAO;AACL,mBAAa;AACb,oBAAc;AACd,UAAI,CAAC,cAAc;AACjB,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,SAAI,iDAAgB,oBAAmB,QAAW;AAChD,YAAM,SAAS,eAAe;AAC9B,UAAI,SAAS,KAAK,SAAS,OAAO;AAChC,cAAM,IAAI,MAAM,kEAAkE;AAAA,MACpF;AAAA,IACF;AAEA,UAAM,gBAA0C,cAC5C;AAAA,MACE,UAAU;AAAA,MACV,SAAS;AAAA,MACT,UAAU;AAAA,IACZ,IACA;AAAA,MACE,QAAQ;AAAA,IACV;AAEJ,SAAK,UAAU,IAAI,YAAY,aAAa;AAE5C,SAAK,QAAQ;AAAA,MACX;AAAA,MACA,UAAU;AAAA,MACV,SAAS;AAAA,MACT,UAAU;AAAA,MACV;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA,EAEA,KAAK;AAAA,IACH;AAAA,IACA;AAAA,IACA,cAAc;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAQc;AACZ,UAAM,SAAgC,EAAE,GAAG,YAAY;AAEvD,iBAAa,eAAe,SAAY,aAAa,KAAK,MAAM;AAEhE,QAAI,YAAY;AACd,UAAI;AAEJ,UAAI,OAAO,eAAe,YAAY,WAAW,SAAS,YAAY;AACpE,2BAAmB;AAAA,UACjB,uBAAuB;AAAA,YACrB,MAAM,0BAA0B;AAAA,YAChC,sBAAsB,CAAC,WAAW,SAAS,IAAI;AAAA,UACjD;AAAA,QACF;AAAA,MACF,WAAW,eAAe,YAAY;AACpC,cAAM,YAAY,OAAO,QAAQ,WAAW,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI,MAAM,IAAI;AACpE,2BAAmB;AAAA,UACjB,uBAAuB;AAAA,YACrB,MAAM,0BAA0B;AAAA,YAChC,sBAAsB,UAAU,SAAS,IAAI,YAAY;AAAA,UAC3D;AAAA,QACF;AAAA,MACF,WAAW,eAAe,QAAQ;AAChC,2BAAmB;AAAA,UACjB,uBAAuB;AAAA,YACrB,MAAM,0BAA0B;AAAA,UAClC;AAAA,QACF;AAAA,MACF,WAAW,eAAe,QAAQ;AAChC,2BAAmB;AAAA,UACjB,uBAAuB;AAAA,YACrB,MAAM,0BAA0B;AAAA,UAClC;AAAA,QACF;AAAA,MACF,OAAO;AACL,cAAM,IAAI,MAAM,wBAAwB,UAAU,EAAE;AAAA,MACtD;AAEA,aAAO,aAAa;AAAA,IACtB;AAEA,QAAI,KAAK,MAAM,gBAAgB,QAAW;AACxC,aAAO,cAAc,KAAK,MAAM;AAAA,IAClC;AACA,QAAI,KAAK,MAAM,oBAAoB,QAAW;AAC5C,aAAO,kBAAkB,KAAK,MAAM;AAAA,IACtC;AACA,QAAI,KAAK,MAAM,SAAS,QAAW;AACjC,aAAO,OAAO,KAAK,MAAM;AAAA,IAC3B;AACA,QAAI,KAAK,MAAM,SAAS,QAAW;AACjC,aAAO,OAAO,KAAK,MAAM;AAAA,IAC3B;AACA,QAAI,KAAK,MAAM,oBAAoB,QAAW;AAC5C,aAAO,kBAAkB,KAAK,MAAM;AAAA,IACtC;AACA,QAAI,KAAK,MAAM,qBAAqB,QAAW;AAC7C,aAAO,mBAAmB,KAAK,MAAM;AAAA,IACvC;AACA,QAAI,KAAK,MAAM,SAAS,QAAW;AACjC,aAAO,OAAO,KAAK,MAAM;AAAA,IAC3B;AAEA,QAAI,KAAK,MAAM,mBAAmB,QAAW;AAC3C,aAAO,iBAAiB,KAAK,MAAM;AAAA,IACrC;AAEA,QAAI,KAAK,MAAM,mCAAmC,QAAW;AAC3D,aAAO,2BAA2B,KAAK,MAAM;AAAA,IAC/C;AAEA,kBAAc,gBAAgB,SAAY,cAAc,KAAK,MAAM;AAEnE,WAAO,IAAI,UAAU,MAAM;AAAA,MACzB,QAAQ,KAAK;AAAA,MACb,OAAO,KAAK,MAAM;AAAA,MAClB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,IACf,CAAC;AAAA,EACH;AACF;AAEO,MAAM,kBAAkB,IAAI,UAAU;AAAA,EAC3C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA,YACEA,MACA;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GASA;AAEA,UAAMA,MAAK,EAAE,SAAS,SAAS,YAAY,CAAC;AAC5C,SAAK,UAAU;AACf,SAAK,SAAS;AACd,SAAK,eAAe;AACpB,SAAK,eAAe;AAAA,EACtB;AAAA,EAEA,MAAgB,MAAqB;AArTvC;AAsTI,QAAI,YAAY;AAChB,UAAM,YAAY,UAAU,KAAK,IAAI,CAAC;AAEtC,QAAI;AACF,YAAM,CAAC,OAAO,SAAS,IAAK,MAAM,KAAK,QAAQ,iBAAiB,QAAQ;AAKxE,YAAM,WAA4B,MAAM,IAAI,CAAC,UAAmC;AAAA,QAC9E,MAAM,KAAK;AAAA,QACX,OAAO,KAAK;AAAA,MACd,EAAE;AAEF,YAAM,uBAAuB,KAAK,UAAU,uBAAuB,KAAK,OAAO,IAAI;AACnF,YAAM,QACJ,wBAAwB,qBAAqB,SAAS,IAClD,CAAC,EAAE,qBAAqB,CAAC,IACzB;AAEN,UAAI,oBAA+C;AACnD,UAAI,UAAU,kBAAkB,UAAU,eAAe,SAAS,GAAG;AACnE,4BAAoB;AAAA,UAClB,OAAO,UAAU,eAAe,IAAI,CAAC,aAAqB,EAAE,MAAM,QAAQ,EAAE;AAAA,QAC9E;AAAA,MACF;AAEA,YAAM,WAAW,MAAM,KAAK,QAAQ,OAAO,sBAAsB;AAAA,QAC/D,OAAO,KAAK;AAAA,QACZ;AAAA,QACA,QAAQ;AAAA,UACN,GAAG,KAAK;AAAA,UACR;AAAA,UACA,aAAa,KAAK,aAAa,eAAe;AAAA,YAC5C,SAAS,KAAK,MAAM,KAAK,YAAY,SAAS;AAAA,UAChD;AAAA,UACA;AAAA,QACF;AAAA,MACF,CAAC;AAED,uBAAiB,SAAS,UAAU;AAClC,YAAI,MAAM,gBAAgB;AACxB,gBAAM,IAAI,eAAe;AAAA,YACvB,SAAS,0BAA0B,KAAK,UAAU,MAAM,cAAc,CAAC;AAAA,YACvE,SAAS;AAAA,cACP,WAAW;AAAA,cACX;AAAA,YACF;AAAA,UACF,CAAC;AAAA,QACH;AAEA,YAAI,CAAC,MAAM,cAAc,GAAC,iBAAM,WAAW,CAAC,MAAlB,mBAAqB,YAArB,mBAA8B,QAAO;AAC7D,eAAK,OAAO,KAAK,kCAAkC,KAAK,UAAU,KAAK,CAAC,EAAE;AAC1E;AAAA,QACF;AAEA,YAAI,MAAM,WAAW,SAAS,GAAG;AAC/B,eAAK,OAAO;AAAA,YACV;AAAA,UACF;AAAA,QACF;AAEA,mBAAW,QAAQ,MAAM,WAAW,CAAC,EAAE,QAAQ,OAAO;AACpD,gBAAM,YAAY,KAAK,WAAW,WAAW,IAAI;AACjD,cAAI,WAAW;AACb,wBAAY;AACZ,iBAAK,MAAM,IAAI,SAAS;AAAA,UAC1B;AAAA,QACF;AAEA,YAAI,MAAM,eAAe;AACvB,gBAAM,QAAQ,MAAM;AACpB,eAAK,MAAM,IAAI;AAAA,YACb,IAAI;AAAA,YACJ,OAAO;AAAA,cACL,kBAAkB,MAAM,wBAAwB;AAAA,cAChD,cAAc,MAAM,oBAAoB;AAAA,cACxC,oBAAoB,MAAM,2BAA2B;AAAA,cACrD,aAAa,MAAM,mBAAmB;AAAA,YACxC;AAAA,UACF,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF,SAAS,OAAgB;AACvB,YAAM,MAAM;AAOZ,UAAI,IAAI,QAAQ,IAAI,QAAQ,OAAO,IAAI,OAAO,KAAK;AACjD,YAAI,IAAI,SAAS,KAAK;AACpB,gBAAM,IAAI,eAAe;AAAA,YACvB,SAAS,kCAAkC,IAAI,WAAW,eAAe;AAAA,YACzE,SAAS;AAAA,cACP,YAAY;AAAA,cACZ,WAAW;AAAA,YACb;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AACL,gBAAM,IAAI,eAAe;AAAA,YACvB,SAAS,6BAA6B,IAAI,IAAI,OAAO,IAAI,WAAW,eAAe;AAAA,YACnF,SAAS;AAAA,cACP,YAAY,IAAI;AAAA,cAChB,WAAW;AAAA,YACb;AAAA,UACF,CAAC;AAAA,QACH;AAAA,MACF;AAEA,UAAI,IAAI,QAAQ,IAAI,QAAQ,KAAK;AAC/B,cAAM,IAAI,eAAe;AAAA,UACvB,SAAS,6BAA6B,IAAI,IAAI,OAAO,IAAI,WAAW,eAAe;AAAA,UACnF,SAAS;AAAA,YACP,YAAY,IAAI;AAAA,YAChB;AAAA,UACF;AAAA,QACF,CAAC;AAAA,MACH;AAEA,YAAM,IAAI,mBAAmB;AAAA,QAC3B,SAAS,2BAA2B,IAAI,WAAW,eAAe;AAAA,QAClE,SAAS;AAAA,UACP;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA,EAEA,WAAW,IAAY,MAAwC;AAC7D,QAAI,KAAK,cAAc;AACrB,aAAO;AAAA,QACL;AAAA,QACA,OAAO;AAAA,UACL,MAAM;AAAA,UACN,WAAW;AAAA,YACT,IAAI,aAAa,OAAO;AAAA,cACtB,QAAQ,KAAK,aAAa,MAAM,UAAU,gBAAgB;AAAA,cAC1D,MAAM,KAAK,aAAa;AAAA,cACxB,MAAM,KAAK,UAAU,KAAK,aAAa,IAAK;AAAA,YAC9C,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAO;AAAA,QACL,SAAS,KAAK;AAAA,QACd,MAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACF;","names":["llm"]}
|
|
1
|
+
{"version":3,"sources":["../src/llm.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2025 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport type * as types from '@google/genai';\nimport { FunctionCallingConfigMode, type GenerateContentConfig, GoogleGenAI } from '@google/genai';\nimport type { APIConnectOptions } from '@livekit/agents';\nimport {\n APIConnectionError,\n APIStatusError,\n DEFAULT_API_CONNECT_OPTIONS,\n llm,\n shortuuid,\n} from '@livekit/agents';\nimport type { ChatModels } from './models.js';\nimport type { LLMTools } from './tools.js';\nimport { toFunctionDeclarations } from './utils.js';\n\ninterface GoogleFormatData {\n systemMessages: string[] | null;\n}\n\nexport interface LLMOptions {\n model: string | ChatModels;\n apiKey?: string;\n temperature?: number;\n toolChoice?: llm.ToolChoice;\n vertexai?: boolean;\n project?: string;\n location?: string;\n maxOutputTokens?: number;\n topP?: number;\n topK?: number;\n presencePenalty?: number;\n frequencyPenalty?: number;\n thinkingConfig?: types.ThinkingConfig;\n automaticFunctionCallingConfig?: types.AutomaticFunctionCallingConfig;\n geminiTools?: LLMTools;\n httpOptions?: types.HttpOptions;\n seed?: number;\n}\n\nexport class LLM extends llm.LLM {\n #opts: LLMOptions;\n #client: GoogleGenAI;\n\n label(): string {\n return 'google.LLM';\n }\n\n get model(): string {\n return this.#opts.model;\n }\n\n /**\n * Create a new instance of Google GenAI LLM.\n *\n * Environment Requirements:\n * - For VertexAI: Set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to the path of the service account key file or use any of the other Google Cloud auth methods.\n * The Google Cloud project and location can be set via `project` and `location` arguments or the environment variables\n * `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION`. By default, the project is inferred from the service account key file,\n * and the location defaults to \"us-central1\".\n * - For Google Gemini API: Set the `apiKey` argument or the `GOOGLE_API_KEY` environment variable.\n *\n * @param model - The model name to use. Defaults to \"gemini-2.0-flash-001\".\n * @param apiKey - The API key for Google Gemini. If not provided, it attempts to read from the `GOOGLE_API_KEY` environment variable.\n * @param vertexai - Whether to use VertexAI. If not provided, it attempts to read from the `GOOGLE_GENAI_USE_VERTEXAI` environment variable. Defaults to false.\n * @param project - The Google Cloud project to use (only for VertexAI). Defaults to undefined.\n * @param location - The location to use for VertexAI API requests. Default value is \"us-central1\".\n * @param temperature - Sampling temperature for response generation. Defaults to undefined.\n * @param maxOutputTokens - Maximum number of tokens to generate in the output. Defaults to undefined.\n * @param topP - The nucleus sampling probability for response generation. Defaults to undefined.\n * @param topK - The top-k sampling value for response generation. Defaults to undefined.\n * @param presencePenalty - Penalizes the model for generating previously mentioned concepts. Defaults to undefined.\n * @param frequencyPenalty - Penalizes the model for repeating words. Defaults to undefined.\n * @param toolChoice - Specifies whether to use tools during response generation. Defaults to \"auto\".\n * @param thinkingConfig - The thinking configuration for response generation. Defaults to undefined.\n * @param automaticFunctionCallingConfig - The automatic function calling configuration for response generation. Defaults to undefined.\n * @param geminiTools - The Gemini-specific tools to use for the session.\n * @param httpOptions - The HTTP options to use for the session.\n * @param seed - Random seed for reproducible results. Defaults to undefined.\n */\n constructor(\n {\n model,\n apiKey,\n vertexai,\n project,\n location,\n temperature,\n maxOutputTokens,\n topP,\n topK,\n presencePenalty,\n frequencyPenalty,\n toolChoice,\n thinkingConfig,\n automaticFunctionCallingConfig,\n geminiTools,\n httpOptions,\n seed,\n }: LLMOptions = {\n model: 'gemini-2.0-flash-001',\n },\n ) {\n super();\n\n const useVertexAI =\n vertexai ??\n (process.env.GOOGLE_GENAI_USE_VERTEXAI === 'true' ||\n process.env.GOOGLE_GENAI_USE_VERTEXAI === '1');\n\n let gcpProject: string | undefined = project ?? process.env.GOOGLE_CLOUD_PROJECT;\n let gcpLocation: string | undefined = location ?? process.env.GOOGLE_CLOUD_LOCATION;\n let geminiApiKey: string | undefined = apiKey ?? process.env.GOOGLE_API_KEY;\n\n if (useVertexAI) {\n if (!gcpProject) {\n // TODO(brian): use default_async to get the project ID\n throw new Error(\n 'Project ID is required for Vertex AI. Set via project option or GOOGLE_CLOUD_PROJECT environment variable',\n );\n }\n geminiApiKey = undefined;\n } else {\n gcpProject = undefined;\n gcpLocation = undefined;\n if (!geminiApiKey) {\n throw new Error(\n 'API key is required for Google API either via apiKey or GOOGLE_API_KEY environment variable',\n );\n }\n }\n\n // Validate thinkingConfig\n if (thinkingConfig?.thinkingBudget !== undefined) {\n const budget = thinkingConfig.thinkingBudget;\n if (budget < 0 || budget > 24576) {\n throw new Error('thinkingBudget inside thinkingConfig must be between 0 and 24576');\n }\n }\n\n const clientOptions: types.GoogleGenAIOptions = useVertexAI\n ? {\n vertexai: true,\n project: gcpProject,\n location: gcpLocation,\n }\n : {\n apiKey: geminiApiKey,\n };\n\n this.#client = new GoogleGenAI(clientOptions);\n\n this.#opts = {\n model,\n vertexai: useVertexAI,\n project: gcpProject,\n location: gcpLocation,\n temperature,\n maxOutputTokens,\n topP,\n topK,\n presencePenalty,\n frequencyPenalty,\n toolChoice,\n thinkingConfig,\n automaticFunctionCallingConfig,\n geminiTools,\n httpOptions,\n seed,\n apiKey,\n };\n }\n\n chat({\n chatCtx,\n toolCtx,\n connOptions = DEFAULT_API_CONNECT_OPTIONS,\n toolChoice,\n extraKwargs,\n geminiTools,\n }: {\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions?: APIConnectOptions;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n extraKwargs?: Record<string, unknown>;\n geminiTools?: LLMTools;\n }): LLMStream {\n const extras: GenerateContentConfig = { ...extraKwargs } as GenerateContentConfig;\n\n toolChoice = toolChoice !== undefined ? toolChoice : this.#opts.toolChoice;\n\n if (toolChoice) {\n let geminiToolConfig: types.ToolConfig;\n\n if (typeof toolChoice === 'object' && toolChoice.type === 'function') {\n geminiToolConfig = {\n functionCallingConfig: {\n mode: FunctionCallingConfigMode.ANY,\n allowedFunctionNames: [toolChoice.function.name],\n },\n };\n } else if (toolChoice === 'required') {\n const toolNames = Object.entries(toolCtx || {}).map(([name]) => name);\n geminiToolConfig = {\n functionCallingConfig: {\n mode: FunctionCallingConfigMode.ANY,\n allowedFunctionNames: toolNames.length > 0 ? toolNames : undefined,\n },\n };\n } else if (toolChoice === 'auto') {\n geminiToolConfig = {\n functionCallingConfig: {\n mode: FunctionCallingConfigMode.AUTO,\n },\n };\n } else if (toolChoice === 'none') {\n geminiToolConfig = {\n functionCallingConfig: {\n mode: FunctionCallingConfigMode.NONE,\n },\n };\n } else {\n throw new Error(`Invalid tool choice: ${toolChoice}`);\n }\n\n extras.toolConfig = geminiToolConfig;\n }\n\n if (this.#opts.temperature !== undefined) {\n extras.temperature = this.#opts.temperature;\n }\n if (this.#opts.maxOutputTokens !== undefined) {\n extras.maxOutputTokens = this.#opts.maxOutputTokens;\n }\n if (this.#opts.topP !== undefined) {\n extras.topP = this.#opts.topP;\n }\n if (this.#opts.topK !== undefined) {\n extras.topK = this.#opts.topK;\n }\n if (this.#opts.presencePenalty !== undefined) {\n extras.presencePenalty = this.#opts.presencePenalty;\n }\n if (this.#opts.frequencyPenalty !== undefined) {\n extras.frequencyPenalty = this.#opts.frequencyPenalty;\n }\n if (this.#opts.seed !== undefined) {\n extras.seed = this.#opts.seed;\n }\n\n if (this.#opts.thinkingConfig !== undefined) {\n extras.thinkingConfig = this.#opts.thinkingConfig;\n }\n\n if (this.#opts.automaticFunctionCallingConfig !== undefined) {\n extras.automaticFunctionCalling = this.#opts.automaticFunctionCallingConfig;\n }\n\n geminiTools = geminiTools !== undefined ? geminiTools : this.#opts.geminiTools;\n\n return new LLMStream(this, {\n client: this.#client,\n model: this.#opts.model,\n chatCtx,\n toolCtx,\n connOptions,\n geminiTools,\n extraKwargs: extras,\n });\n }\n}\n\nexport class LLMStream extends llm.LLMStream {\n #client: GoogleGenAI;\n #model: string;\n #geminiTools?: LLMTools;\n #extraKwargs: GenerateContentConfig;\n\n constructor(\n llm: LLM,\n {\n client,\n model,\n chatCtx,\n toolCtx,\n connOptions,\n geminiTools,\n extraKwargs,\n }: {\n client: GoogleGenAI;\n model: string;\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions: APIConnectOptions;\n geminiTools?: LLMTools;\n extraKwargs: GenerateContentConfig;\n },\n ) {\n // Call base constructor with dev 1.0 object parameter pattern\n super(llm, { chatCtx, toolCtx, connOptions });\n this.#client = client;\n this.#model = model;\n this.#geminiTools = geminiTools;\n this.#extraKwargs = extraKwargs;\n }\n\n protected async run(): Promise<void> {\n let retryable = true;\n const requestId = `google_${Date.now()}`;\n\n try {\n const [turns, extraData] = (await this.chatCtx.toProviderFormat('google')) as [\n Record<string, unknown>[],\n GoogleFormatData,\n ];\n\n const contents: types.Content[] = turns.map((turn: Record<string, unknown>) => ({\n role: turn.role as types.Content['role'],\n parts: turn.parts as types.Part[],\n }));\n\n const functionDeclarations = this.toolCtx ? toFunctionDeclarations(this.toolCtx) : undefined;\n const tools =\n functionDeclarations && functionDeclarations.length > 0\n ? [{ functionDeclarations }]\n : undefined;\n\n let systemInstruction: types.Content | undefined = undefined;\n if (extraData.systemMessages && extraData.systemMessages.length > 0) {\n systemInstruction = {\n parts: extraData.systemMessages.map((content: string) => ({ text: content })),\n };\n }\n\n const response = await this.#client.models.generateContentStream({\n model: this.#model,\n contents,\n config: {\n ...this.#extraKwargs,\n systemInstruction,\n httpOptions: this.#extraKwargs.httpOptions ?? {\n timeout: Math.floor(this.connOptions.timeoutMs),\n },\n tools,\n },\n });\n\n for await (const chunk of response) {\n if (chunk.promptFeedback) {\n throw new APIStatusError({\n message: `Prompt feedback error: ${JSON.stringify(chunk.promptFeedback)}`,\n options: {\n retryable: false,\n requestId,\n },\n });\n }\n\n if (!chunk.candidates || !chunk.candidates[0]?.content?.parts) {\n this.logger.warn(`No candidates in the response: ${JSON.stringify(chunk)}`);\n continue;\n }\n\n if (chunk.candidates.length > 1) {\n this.logger.warn(\n 'Google LLM: there are multiple candidates in the response, returning response from the first one.',\n );\n }\n\n for (const part of chunk.candidates[0].content.parts) {\n const chatChunk = this.#parsePart(requestId, part);\n if (chatChunk) {\n retryable = false;\n this.queue.put(chatChunk);\n }\n }\n\n if (chunk.usageMetadata) {\n const usage = chunk.usageMetadata;\n this.queue.put({\n id: requestId,\n usage: {\n completionTokens: usage.candidatesTokenCount || 0,\n promptTokens: usage.promptTokenCount || 0,\n promptCachedTokens: usage.cachedContentTokenCount || 0,\n totalTokens: usage.totalTokenCount || 0,\n },\n });\n }\n }\n } catch (error: unknown) {\n const err = error as {\n code?: number;\n message?: string;\n status?: string;\n type?: string;\n };\n\n if (err.code && err.code >= 400 && err.code < 500) {\n if (err.code === 429) {\n throw new APIStatusError({\n message: `Google LLM: Rate limit error - ${err.message || 'Unknown error'}`,\n options: {\n statusCode: 429,\n retryable: true,\n },\n });\n } else {\n throw new APIStatusError({\n message: `Google LLM: Client error (${err.code}) - ${err.message || 'Unknown error'}`,\n options: {\n statusCode: err.code,\n retryable: false,\n },\n });\n }\n }\n\n if (err.code && err.code >= 500) {\n throw new APIStatusError({\n message: `Google LLM: Server error (${err.code}) - ${err.message || 'Unknown error'}`,\n options: {\n statusCode: err.code,\n retryable,\n },\n });\n }\n\n throw new APIConnectionError({\n message: `Google LLM: API error - ${err.message || 'Unknown error'}`,\n options: {\n retryable,\n },\n });\n }\n }\n\n #parsePart(id: string, part: types.Part): llm.ChatChunk | null {\n if (part.functionCall) {\n return {\n id,\n delta: {\n role: 'assistant',\n toolCalls: [\n llm.FunctionCall.create({\n callId: part.functionCall.id || shortuuid('function_call_'),\n name: part.functionCall.name!,\n args: JSON.stringify(part.functionCall.args!),\n // Preserve thought signature for Gemini 3+ thinking mode\n thoughtSignature: part.thoughtSignature,\n }),\n ],\n },\n };\n }\n\n return {\n id,\n delta: {\n content: part.text,\n role: 'assistant',\n },\n };\n }\n}\n"],"mappings":"AAIA,SAAS,2BAAuD,mBAAmB;AAEnF;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAGP,SAAS,8BAA8B;AA0BhC,MAAM,YAAY,IAAI,IAAI;AAAA,EAC/B;AAAA,EACA;AAAA,EAEA,QAAgB;AACd,WAAO;AAAA,EACT;AAAA,EAEA,IAAI,QAAgB;AAClB,WAAO,KAAK,MAAM;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8BA,YACE;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAgB;AAAA,IACd,OAAO;AAAA,EACT,GACA;AACA,UAAM;AAEN,UAAM,cACJ,aACC,QAAQ,IAAI,8BAA8B,UACzC,QAAQ,IAAI,8BAA8B;AAE9C,QAAI,aAAiC,WAAW,QAAQ,IAAI;AAC5D,QAAI,cAAkC,YAAY,QAAQ,IAAI;AAC9D,QAAI,eAAmC,UAAU,QAAQ,IAAI;AAE7D,QAAI,aAAa;AACf,UAAI,CAAC,YAAY;AAEf,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AACA,qBAAe;AAAA,IACjB,OAAO;AACL,mBAAa;AACb,oBAAc;AACd,UAAI,CAAC,cAAc;AACjB,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,SAAI,iDAAgB,oBAAmB,QAAW;AAChD,YAAM,SAAS,eAAe;AAC9B,UAAI,SAAS,KAAK,SAAS,OAAO;AAChC,cAAM,IAAI,MAAM,kEAAkE;AAAA,MACpF;AAAA,IACF;AAEA,UAAM,gBAA0C,cAC5C;AAAA,MACE,UAAU;AAAA,MACV,SAAS;AAAA,MACT,UAAU;AAAA,IACZ,IACA;AAAA,MACE,QAAQ;AAAA,IACV;AAEJ,SAAK,UAAU,IAAI,YAAY,aAAa;AAE5C,SAAK,QAAQ;AAAA,MACX;AAAA,MACA,UAAU;AAAA,MACV,SAAS;AAAA,MACT,UAAU;AAAA,MACV;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA,EAEA,KAAK;AAAA,IACH;AAAA,IACA;AAAA,IACA,cAAc;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAQc;AACZ,UAAM,SAAgC,EAAE,GAAG,YAAY;AAEvD,iBAAa,eAAe,SAAY,aAAa,KAAK,MAAM;AAEhE,QAAI,YAAY;AACd,UAAI;AAEJ,UAAI,OAAO,eAAe,YAAY,WAAW,SAAS,YAAY;AACpE,2BAAmB;AAAA,UACjB,uBAAuB;AAAA,YACrB,MAAM,0BAA0B;AAAA,YAChC,sBAAsB,CAAC,WAAW,SAAS,IAAI;AAAA,UACjD;AAAA,QACF;AAAA,MACF,WAAW,eAAe,YAAY;AACpC,cAAM,YAAY,OAAO,QAAQ,WAAW,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,IAAI,MAAM,IAAI;AACpE,2BAAmB;AAAA,UACjB,uBAAuB;AAAA,YACrB,MAAM,0BAA0B;AAAA,YAChC,sBAAsB,UAAU,SAAS,IAAI,YAAY;AAAA,UAC3D;AAAA,QACF;AAAA,MACF,WAAW,eAAe,QAAQ;AAChC,2BAAmB;AAAA,UACjB,uBAAuB;AAAA,YACrB,MAAM,0BAA0B;AAAA,UAClC;AAAA,QACF;AAAA,MACF,WAAW,eAAe,QAAQ;AAChC,2BAAmB;AAAA,UACjB,uBAAuB;AAAA,YACrB,MAAM,0BAA0B;AAAA,UAClC;AAAA,QACF;AAAA,MACF,OAAO;AACL,cAAM,IAAI,MAAM,wBAAwB,UAAU,EAAE;AAAA,MACtD;AAEA,aAAO,aAAa;AAAA,IACtB;AAEA,QAAI,KAAK,MAAM,gBAAgB,QAAW;AACxC,aAAO,cAAc,KAAK,MAAM;AAAA,IAClC;AACA,QAAI,KAAK,MAAM,oBAAoB,QAAW;AAC5C,aAAO,kBAAkB,KAAK,MAAM;AAAA,IACtC;AACA,QAAI,KAAK,MAAM,SAAS,QAAW;AACjC,aAAO,OAAO,KAAK,MAAM;AAAA,IAC3B;AACA,QAAI,KAAK,MAAM,SAAS,QAAW;AACjC,aAAO,OAAO,KAAK,MAAM;AAAA,IAC3B;AACA,QAAI,KAAK,MAAM,oBAAoB,QAAW;AAC5C,aAAO,kBAAkB,KAAK,MAAM;AAAA,IACtC;AACA,QAAI,KAAK,MAAM,qBAAqB,QAAW;AAC7C,aAAO,mBAAmB,KAAK,MAAM;AAAA,IACvC;AACA,QAAI,KAAK,MAAM,SAAS,QAAW;AACjC,aAAO,OAAO,KAAK,MAAM;AAAA,IAC3B;AAEA,QAAI,KAAK,MAAM,mBAAmB,QAAW;AAC3C,aAAO,iBAAiB,KAAK,MAAM;AAAA,IACrC;AAEA,QAAI,KAAK,MAAM,mCAAmC,QAAW;AAC3D,aAAO,2BAA2B,KAAK,MAAM;AAAA,IAC/C;AAEA,kBAAc,gBAAgB,SAAY,cAAc,KAAK,MAAM;AAEnE,WAAO,IAAI,UAAU,MAAM;AAAA,MACzB,QAAQ,KAAK;AAAA,MACb,OAAO,KAAK,MAAM;AAAA,MAClB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,IACf,CAAC;AAAA,EACH;AACF;AAEO,MAAM,kBAAkB,IAAI,UAAU;AAAA,EAC3C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA,YACEA,MACA;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GASA;AAEA,UAAMA,MAAK,EAAE,SAAS,SAAS,YAAY,CAAC;AAC5C,SAAK,UAAU;AACf,SAAK,SAAS;AACd,SAAK,eAAe;AACpB,SAAK,eAAe;AAAA,EACtB;AAAA,EAEA,MAAgB,MAAqB;AArTvC;AAsTI,QAAI,YAAY;AAChB,UAAM,YAAY,UAAU,KAAK,IAAI,CAAC;AAEtC,QAAI;AACF,YAAM,CAAC,OAAO,SAAS,IAAK,MAAM,KAAK,QAAQ,iBAAiB,QAAQ;AAKxE,YAAM,WAA4B,MAAM,IAAI,CAAC,UAAmC;AAAA,QAC9E,MAAM,KAAK;AAAA,QACX,OAAO,KAAK;AAAA,MACd,EAAE;AAEF,YAAM,uBAAuB,KAAK,UAAU,uBAAuB,KAAK,OAAO,IAAI;AACnF,YAAM,QACJ,wBAAwB,qBAAqB,SAAS,IAClD,CAAC,EAAE,qBAAqB,CAAC,IACzB;AAEN,UAAI,oBAA+C;AACnD,UAAI,UAAU,kBAAkB,UAAU,eAAe,SAAS,GAAG;AACnE,4BAAoB;AAAA,UAClB,OAAO,UAAU,eAAe,IAAI,CAAC,aAAqB,EAAE,MAAM,QAAQ,EAAE;AAAA,QAC9E;AAAA,MACF;AAEA,YAAM,WAAW,MAAM,KAAK,QAAQ,OAAO,sBAAsB;AAAA,QAC/D,OAAO,KAAK;AAAA,QACZ;AAAA,QACA,QAAQ;AAAA,UACN,GAAG,KAAK;AAAA,UACR;AAAA,UACA,aAAa,KAAK,aAAa,eAAe;AAAA,YAC5C,SAAS,KAAK,MAAM,KAAK,YAAY,SAAS;AAAA,UAChD;AAAA,UACA;AAAA,QACF;AAAA,MACF,CAAC;AAED,uBAAiB,SAAS,UAAU;AAClC,YAAI,MAAM,gBAAgB;AACxB,gBAAM,IAAI,eAAe;AAAA,YACvB,SAAS,0BAA0B,KAAK,UAAU,MAAM,cAAc,CAAC;AAAA,YACvE,SAAS;AAAA,cACP,WAAW;AAAA,cACX;AAAA,YACF;AAAA,UACF,CAAC;AAAA,QACH;AAEA,YAAI,CAAC,MAAM,cAAc,GAAC,iBAAM,WAAW,CAAC,MAAlB,mBAAqB,YAArB,mBAA8B,QAAO;AAC7D,eAAK,OAAO,KAAK,kCAAkC,KAAK,UAAU,KAAK,CAAC,EAAE;AAC1E;AAAA,QACF;AAEA,YAAI,MAAM,WAAW,SAAS,GAAG;AAC/B,eAAK,OAAO;AAAA,YACV;AAAA,UACF;AAAA,QACF;AAEA,mBAAW,QAAQ,MAAM,WAAW,CAAC,EAAE,QAAQ,OAAO;AACpD,gBAAM,YAAY,KAAK,WAAW,WAAW,IAAI;AACjD,cAAI,WAAW;AACb,wBAAY;AACZ,iBAAK,MAAM,IAAI,SAAS;AAAA,UAC1B;AAAA,QACF;AAEA,YAAI,MAAM,eAAe;AACvB,gBAAM,QAAQ,MAAM;AACpB,eAAK,MAAM,IAAI;AAAA,YACb,IAAI;AAAA,YACJ,OAAO;AAAA,cACL,kBAAkB,MAAM,wBAAwB;AAAA,cAChD,cAAc,MAAM,oBAAoB;AAAA,cACxC,oBAAoB,MAAM,2BAA2B;AAAA,cACrD,aAAa,MAAM,mBAAmB;AAAA,YACxC;AAAA,UACF,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF,SAAS,OAAgB;AACvB,YAAM,MAAM;AAOZ,UAAI,IAAI,QAAQ,IAAI,QAAQ,OAAO,IAAI,OAAO,KAAK;AACjD,YAAI,IAAI,SAAS,KAAK;AACpB,gBAAM,IAAI,eAAe;AAAA,YACvB,SAAS,kCAAkC,IAAI,WAAW,eAAe;AAAA,YACzE,SAAS;AAAA,cACP,YAAY;AAAA,cACZ,WAAW;AAAA,YACb;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AACL,gBAAM,IAAI,eAAe;AAAA,YACvB,SAAS,6BAA6B,IAAI,IAAI,OAAO,IAAI,WAAW,eAAe;AAAA,YACnF,SAAS;AAAA,cACP,YAAY,IAAI;AAAA,cAChB,WAAW;AAAA,YACb;AAAA,UACF,CAAC;AAAA,QACH;AAAA,MACF;AAEA,UAAI,IAAI,QAAQ,IAAI,QAAQ,KAAK;AAC/B,cAAM,IAAI,eAAe;AAAA,UACvB,SAAS,6BAA6B,IAAI,IAAI,OAAO,IAAI,WAAW,eAAe;AAAA,UACnF,SAAS;AAAA,YACP,YAAY,IAAI;AAAA,YAChB;AAAA,UACF;AAAA,QACF,CAAC;AAAA,MACH;AAEA,YAAM,IAAI,mBAAmB;AAAA,QAC3B,SAAS,2BAA2B,IAAI,WAAW,eAAe;AAAA,QAClE,SAAS;AAAA,UACP;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA,EAEA,WAAW,IAAY,MAAwC;AAC7D,QAAI,KAAK,cAAc;AACrB,aAAO;AAAA,QACL;AAAA,QACA,OAAO;AAAA,UACL,MAAM;AAAA,UACN,WAAW;AAAA,YACT,IAAI,aAAa,OAAO;AAAA,cACtB,QAAQ,KAAK,aAAa,MAAM,UAAU,gBAAgB;AAAA,cAC1D,MAAM,KAAK,aAAa;AAAA,cACxB,MAAM,KAAK,UAAU,KAAK,aAAa,IAAK;AAAA;AAAA,cAE5C,kBAAkB,KAAK;AAAA,YACzB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,MACL;AAAA,MACA,OAAO;AAAA,QACL,SAAS,KAAK;AAAA,QACd,MAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACF;","names":["llm"]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@livekit/agents-plugin-google",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.32",
|
|
4
4
|
"description": "Google Gemini plugin for LiveKit Node Agents",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"require": "dist/index.cjs",
|
|
@@ -29,19 +29,19 @@
|
|
|
29
29
|
"@microsoft/api-extractor": "^7.35.0",
|
|
30
30
|
"tsup": "^8.3.5",
|
|
31
31
|
"typescript": "^5.0.0",
|
|
32
|
-
"@livekit/agents": "1.0.
|
|
33
|
-
"@livekit/agents-plugin-openai": "1.0.
|
|
34
|
-
"@livekit/agents-plugins-test": "1.0.
|
|
32
|
+
"@livekit/agents": "1.0.32",
|
|
33
|
+
"@livekit/agents-plugin-openai": "1.0.32",
|
|
34
|
+
"@livekit/agents-plugins-test": "1.0.32"
|
|
35
35
|
},
|
|
36
36
|
"dependencies": {
|
|
37
|
-
"@google/genai": "^1.
|
|
37
|
+
"@google/genai": "^1.34.0",
|
|
38
38
|
"@livekit/mutex": "^1.1.1",
|
|
39
39
|
"@types/json-schema": "^7.0.15",
|
|
40
40
|
"json-schema": "^0.4.0"
|
|
41
41
|
},
|
|
42
42
|
"peerDependencies": {
|
|
43
43
|
"@livekit/rtc-node": "^0.13.22",
|
|
44
|
-
"@livekit/agents": "1.0.
|
|
44
|
+
"@livekit/agents": "1.0.32"
|
|
45
45
|
},
|
|
46
46
|
"scripts": {
|
|
47
47
|
"build": "tsup --onSuccess \"pnpm build:types\"",
|
|
@@ -5,18 +5,58 @@ import type * as types from '@google/genai';
|
|
|
5
5
|
|
|
6
6
|
/**
|
|
7
7
|
* Supported Google Live API models
|
|
8
|
+
*
|
|
9
|
+
* Gemini API deprecations: https://ai.google.dev/gemini-api/docs/deprecations
|
|
10
|
+
* Gemini API release notes with preview deprecations: https://ai.google.dev/gemini-api/docs/changelog
|
|
11
|
+
* Live models: https://docs.cloud.google.com/vertex-ai/generative-ai/docs/live-api
|
|
12
|
+
* VertexAI retirement: https://docs.cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions#retired-models
|
|
13
|
+
* Additional references:
|
|
14
|
+
* 1. https://github.com/kazunori279/adk-streaming-test/blob/main/test_report.md
|
|
8
15
|
*/
|
|
9
16
|
export type LiveAPIModels =
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
| 'gemini-2.
|
|
13
|
-
| 'gemini-2.5-flash-preview-native-audio
|
|
14
|
-
|
|
17
|
+
// VertexAI models
|
|
18
|
+
| 'gemini-live-2.5-flash-native-audio' // GA https://docs.cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-5-flash-live-api#live-2.5-flash
|
|
19
|
+
| 'gemini-live-2.5-flash-preview-native-audio-09-2025' // Public preview https://docs.cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-5-flash-live-api#live-2.5-flash-preview
|
|
20
|
+
| 'gemini-live-2.5-flash-preview-native-audio' // still works, possibly an alias, but not mentioned in any docs or changelog
|
|
21
|
+
// Gemini API models
|
|
22
|
+
| 'gemini-2.5-flash-native-audio-preview-12-2025' // https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-live
|
|
23
|
+
| 'gemini-2.5-flash-native-audio-preview-09-2025' // https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-live
|
|
24
|
+
| 'gemini-2.0-flash-exp'; // still works in Gemini API but not VertexAI
|
|
15
25
|
|
|
16
26
|
/**
|
|
17
27
|
* Available voice options for Google Realtime API
|
|
18
28
|
*/
|
|
19
|
-
export type Voice =
|
|
29
|
+
export type Voice =
|
|
30
|
+
| 'Achernar'
|
|
31
|
+
| 'Achird'
|
|
32
|
+
| 'Algenib'
|
|
33
|
+
| 'Algieba'
|
|
34
|
+
| 'Alnilam'
|
|
35
|
+
| 'Aoede'
|
|
36
|
+
| 'Autonoe'
|
|
37
|
+
| 'Callirrhoe'
|
|
38
|
+
| 'Charon'
|
|
39
|
+
| 'Despina'
|
|
40
|
+
| 'Enceladus'
|
|
41
|
+
| 'Erinome'
|
|
42
|
+
| 'Fenrir'
|
|
43
|
+
| 'Gacrux'
|
|
44
|
+
| 'Iapetus'
|
|
45
|
+
| 'Kore'
|
|
46
|
+
| 'Laomedeia'
|
|
47
|
+
| 'Leda'
|
|
48
|
+
| 'Orus'
|
|
49
|
+
| 'Pulcherrima'
|
|
50
|
+
| 'Puck'
|
|
51
|
+
| 'Rasalgethi'
|
|
52
|
+
| 'Sadachbia'
|
|
53
|
+
| 'Sadaltager'
|
|
54
|
+
| 'Schedar'
|
|
55
|
+
| 'Sulafat'
|
|
56
|
+
| 'Umbriel'
|
|
57
|
+
| 'Vindemiatrix'
|
|
58
|
+
| 'Zephyr'
|
|
59
|
+
| 'Zubenelgenubi';
|
|
20
60
|
|
|
21
61
|
/**
|
|
22
62
|
* Union type for all possible client events
|
|
@@ -102,6 +102,7 @@ interface RealtimeOptions {
|
|
|
102
102
|
contextWindowCompression?: ContextWindowCompressionConfig;
|
|
103
103
|
apiVersion?: string;
|
|
104
104
|
geminiTools?: LLMTools;
|
|
105
|
+
thinkingConfig?: types.ThinkingConfig;
|
|
105
106
|
}
|
|
106
107
|
|
|
107
108
|
/**
|
|
@@ -136,6 +137,10 @@ export class RealtimeModel extends llm.RealtimeModel {
|
|
|
136
137
|
/** @internal */
|
|
137
138
|
_options: RealtimeOptions;
|
|
138
139
|
|
|
140
|
+
get model(): string {
|
|
141
|
+
return this._options.model;
|
|
142
|
+
}
|
|
143
|
+
|
|
139
144
|
constructor(
|
|
140
145
|
options: {
|
|
141
146
|
/**
|
|
@@ -273,6 +278,14 @@ export class RealtimeModel extends llm.RealtimeModel {
|
|
|
273
278
|
* Gemini-specific tools to use for the session
|
|
274
279
|
*/
|
|
275
280
|
geminiTools?: LLMTools;
|
|
281
|
+
|
|
282
|
+
/**
|
|
283
|
+
* Thinking configuration for native audio models.
|
|
284
|
+
* If not set, the model's default thinking behavior is used.
|
|
285
|
+
* Use `\{ thinkingBudget: 0 \}` to disable thinking.
|
|
286
|
+
* Use `\{ thinkingBudget: -1 \}` for automatic/dynamic thinking.
|
|
287
|
+
*/
|
|
288
|
+
thinkingConfig?: types.ThinkingConfig;
|
|
276
289
|
} = {},
|
|
277
290
|
) {
|
|
278
291
|
const inputAudioTranscription =
|
|
@@ -300,7 +313,9 @@ export class RealtimeModel extends llm.RealtimeModel {
|
|
|
300
313
|
const vertexai = options.vertexai ?? false;
|
|
301
314
|
|
|
302
315
|
// Model selection based on API type
|
|
303
|
-
const defaultModel = vertexai
|
|
316
|
+
const defaultModel = vertexai
|
|
317
|
+
? 'gemini-live-2.5-flash-native-audio'
|
|
318
|
+
: 'gemini-2.5-flash-native-audio-preview-12-2025';
|
|
304
319
|
|
|
305
320
|
this._options = {
|
|
306
321
|
model: options.model || defaultModel,
|
|
@@ -330,6 +345,7 @@ export class RealtimeModel extends llm.RealtimeModel {
|
|
|
330
345
|
contextWindowCompression: options.contextWindowCompression,
|
|
331
346
|
apiVersion: options.apiVersion,
|
|
332
347
|
geminiTools: options.geminiTools,
|
|
348
|
+
thinkingConfig: options.thinkingConfig,
|
|
333
349
|
};
|
|
334
350
|
}
|
|
335
351
|
|
|
@@ -934,9 +950,11 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
934
950
|
unlock();
|
|
935
951
|
}
|
|
936
952
|
|
|
953
|
+
// start new generation for serverContent or for standalone toolCalls (functionChannel closed)
|
|
937
954
|
if (
|
|
938
955
|
(!this.currentGeneration || this.currentGeneration._done) &&
|
|
939
|
-
(response.serverContent ||
|
|
956
|
+
(response.serverContent ||
|
|
957
|
+
(response.toolCall && this.currentGeneration?.functionChannel.closed !== false))
|
|
940
958
|
) {
|
|
941
959
|
this.startNewGeneration();
|
|
942
960
|
}
|
|
@@ -1034,7 +1052,7 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
1034
1052
|
return obj;
|
|
1035
1053
|
}
|
|
1036
1054
|
|
|
1037
|
-
private markCurrentGenerationDone(): void {
|
|
1055
|
+
private markCurrentGenerationDone(keepFunctionChannelOpen: boolean = false): void {
|
|
1038
1056
|
if (!this.currentGeneration || this.currentGeneration._done) {
|
|
1039
1057
|
return;
|
|
1040
1058
|
}
|
|
@@ -1076,7 +1094,9 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
1076
1094
|
|
|
1077
1095
|
gen.textChannel.close();
|
|
1078
1096
|
gen.audioChannel.close();
|
|
1079
|
-
|
|
1097
|
+
if (!keepFunctionChannelOpen) {
|
|
1098
|
+
gen.functionChannel.close();
|
|
1099
|
+
}
|
|
1080
1100
|
gen.messageChannel.close();
|
|
1081
1101
|
gen._done = true;
|
|
1082
1102
|
}
|
|
@@ -1095,6 +1115,7 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
1095
1115
|
const opts = this.options;
|
|
1096
1116
|
|
|
1097
1117
|
const config: types.LiveConnectConfig = {
|
|
1118
|
+
thinkingConfig: opts.thinkingConfig,
|
|
1098
1119
|
responseModalities: opts.responseModalities,
|
|
1099
1120
|
systemInstruction: opts.instructions
|
|
1100
1121
|
? {
|
|
@@ -1156,6 +1177,11 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
1156
1177
|
}
|
|
1157
1178
|
|
|
1158
1179
|
private startNewGeneration(): void {
|
|
1180
|
+
// close functionChannel of previous generation if still open (no toolCall arrived)
|
|
1181
|
+
if (this.currentGeneration && !this.currentGeneration.functionChannel.closed) {
|
|
1182
|
+
this.currentGeneration.functionChannel.close();
|
|
1183
|
+
}
|
|
1184
|
+
|
|
1159
1185
|
if (this.currentGeneration && !this.currentGeneration._done) {
|
|
1160
1186
|
this.#logger.warn('Starting new generation while another is active. Finalizing previous.');
|
|
1161
1187
|
this.markCurrentGenerationDone();
|
|
@@ -1196,6 +1222,7 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
1196
1222
|
messageStream: this.currentGeneration.messageChannel.stream(),
|
|
1197
1223
|
functionStream: this.currentGeneration.functionChannel.stream(),
|
|
1198
1224
|
userInitiated: false,
|
|
1225
|
+
responseId,
|
|
1199
1226
|
};
|
|
1200
1227
|
|
|
1201
1228
|
if (this.pendingGenerationFut && !this.pendingGenerationFut.done) {
|
|
@@ -1233,6 +1260,11 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
1233
1260
|
const turn = serverContent.modelTurn;
|
|
1234
1261
|
|
|
1235
1262
|
for (const part of turn.parts || []) {
|
|
1263
|
+
// bypass reasoning/thought output
|
|
1264
|
+
if (part.thought) {
|
|
1265
|
+
continue;
|
|
1266
|
+
}
|
|
1267
|
+
|
|
1236
1268
|
if (part.text) {
|
|
1237
1269
|
gen.outputText += part.text;
|
|
1238
1270
|
gen.textChannel.write(part.text);
|
|
@@ -1301,7 +1333,8 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
1301
1333
|
}
|
|
1302
1334
|
|
|
1303
1335
|
if (serverContent.turnComplete) {
|
|
1304
|
-
|
|
1336
|
+
// keep functionChannel open for potential late-arriving toolCalls
|
|
1337
|
+
this.markCurrentGenerationDone(true);
|
|
1305
1338
|
}
|
|
1306
1339
|
}
|
|
1307
1340
|
|
|
@@ -1313,14 +1346,26 @@ export class RealtimeSession extends llm.RealtimeSession {
|
|
|
1313
1346
|
|
|
1314
1347
|
const gen = this.currentGeneration;
|
|
1315
1348
|
|
|
1349
|
+
if (gen.functionChannel.closed) {
|
|
1350
|
+
this.#logger.warn('received tool call but functionChannel is already closed.');
|
|
1351
|
+
return;
|
|
1352
|
+
}
|
|
1353
|
+
|
|
1316
1354
|
for (const fc of toolCall.functionCalls || []) {
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1355
|
+
if (!fc.name) {
|
|
1356
|
+
this.#logger.warn('received function call without name, skipping');
|
|
1357
|
+
continue;
|
|
1358
|
+
}
|
|
1359
|
+
gen.functionChannel.write(
|
|
1360
|
+
llm.FunctionCall.create({
|
|
1361
|
+
callId: fc.id || shortuuid('fnc-call-'),
|
|
1362
|
+
name: fc.name,
|
|
1363
|
+
args: fc.args ? JSON.stringify(fc.args) : '',
|
|
1364
|
+
}),
|
|
1365
|
+
);
|
|
1322
1366
|
}
|
|
1323
1367
|
|
|
1368
|
+
gen.functionChannel.close();
|
|
1324
1369
|
this.markCurrentGenerationDone();
|
|
1325
1370
|
}
|
|
1326
1371
|
|
package/src/llm.ts
CHANGED
|
@@ -449,6 +449,8 @@ export class LLMStream extends llm.LLMStream {
|
|
|
449
449
|
callId: part.functionCall.id || shortuuid('function_call_'),
|
|
450
450
|
name: part.functionCall.name!,
|
|
451
451
|
args: JSON.stringify(part.functionCall.args!),
|
|
452
|
+
// Preserve thought signature for Gemini 3+ thinking mode
|
|
453
|
+
thoughtSignature: part.thoughtSignature,
|
|
452
454
|
}),
|
|
453
455
|
],
|
|
454
456
|
},
|