@mastra/voice-google-gemini-live 0.0.0-remove-unused-model-providers-api-20251030210744 → 0.0.0-safe-stringify-telemetry-20251205024938

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1260,7 +1260,7 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
1260
1260
  sessionDurationTimeout;
1261
1261
  // Tool integration properties
1262
1262
  tools;
1263
- requestContext;
1263
+ runtimeContext;
1264
1264
  // Store the configuration options
1265
1265
  options;
1266
1266
  /**
@@ -1497,67 +1497,70 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
1497
1497
  /**
1498
1498
  * Establish connection to the Gemini Live API
1499
1499
  */
1500
- async connect({ requestContext } = {}) {
1501
- if (this.state === "connected") {
1502
- this.log("Already connected to Gemini Live API");
1503
- return;
1504
- }
1505
- this.requestContext = requestContext;
1506
- this.emit("session", { state: "connecting" });
1507
- try {
1508
- let wsUrl;
1509
- let headers = {};
1510
- if (this.options.vertexAI) {
1511
- wsUrl = `wss://${this.options.location}-aiplatform.googleapis.com/ws/google.cloud.aiplatform.v1beta1.PredictionService.ServerStreamingPredict`;
1512
- await this.authManager.initialize();
1513
- const accessToken = await this.authManager.getAccessToken();
1514
- headers = { headers: { Authorization: `Bearer ${accessToken}` } };
1515
- this.log("Using Vertex AI authentication with OAuth token");
1516
- } else {
1517
- wsUrl = `wss://generativelanguage.googleapis.com/ws/google.ai.generativelanguage.v1alpha.GenerativeService.BidiGenerateContent`;
1518
- headers = {
1519
- headers: {
1520
- "x-goog-api-key": this.options.apiKey || "",
1521
- "Content-Type": "application/json"
1522
- }
1523
- };
1524
- this.log("Using Live API authentication with API key");
1525
- }
1526
- this.log("Connecting to:", wsUrl);
1527
- this.ws = new WebSocket(wsUrl, void 0, headers);
1528
- this.connectionManager.setWebSocket(this.ws);
1529
- this.setupEventListeners();
1530
- await this.connectionManager.waitForOpen();
1531
- if (this.isResuming && this.sessionHandle) {
1532
- await this.sendSessionResumption();
1533
- } else {
1534
- this.sendInitialConfig();
1535
- this.sessionStartTime = Date.now();
1536
- this.sessionId = randomUUID();
1500
+ async connect({ runtimeContext } = {}) {
1501
+ return this.traced(async () => {
1502
+ if (this.state === "connected") {
1503
+ this.log("Already connected to Gemini Live API");
1504
+ return;
1537
1505
  }
1538
- await this.waitForSessionCreated();
1539
- this.state = "connected";
1540
- this.emit("session", {
1541
- state: "connected",
1542
- config: {
1506
+ this.runtimeContext = runtimeContext;
1507
+ this.emit("session", { state: "connecting" });
1508
+ try {
1509
+ let wsUrl;
1510
+ let headers = {};
1511
+ if (this.options.vertexAI) {
1512
+ const location = this.getVertexLocation();
1513
+ wsUrl = `wss://${location}-aiplatform.googleapis.com/ws/google.cloud.aiplatform.v1beta1.LlmBidiService/BidiGenerateContent`;
1514
+ await this.authManager.initialize();
1515
+ const accessToken = await this.authManager.getAccessToken();
1516
+ headers = { headers: { Authorization: `Bearer ${accessToken}` } };
1517
+ this.log("Using Vertex AI authentication with OAuth token");
1518
+ } else {
1519
+ wsUrl = `wss://generativelanguage.googleapis.com/ws/google.ai.generativelanguage.v1alpha.GenerativeService.BidiGenerateContent`;
1520
+ headers = {
1521
+ headers: {
1522
+ "x-goog-api-key": this.options.apiKey || "",
1523
+ "Content-Type": "application/json"
1524
+ }
1525
+ };
1526
+ this.log("Using Live API authentication with API key");
1527
+ }
1528
+ this.log("Connecting to:", wsUrl);
1529
+ this.ws = new WebSocket(wsUrl, void 0, headers);
1530
+ this.connectionManager.setWebSocket(this.ws);
1531
+ this.setupEventListeners();
1532
+ await this.connectionManager.waitForOpen();
1533
+ if (this.isResuming && this.sessionHandle) {
1534
+ await this.sendSessionResumption();
1535
+ } else {
1536
+ this.sendInitialConfig();
1537
+ this.sessionStartTime = Date.now();
1538
+ this.sessionId = randomUUID();
1539
+ }
1540
+ await this.waitForSessionCreated();
1541
+ this.state = "connected";
1542
+ this.emit("session", {
1543
+ state: "connected",
1544
+ config: {
1545
+ sessionId: this.sessionId,
1546
+ isResuming: this.isResuming,
1547
+ toolCount: Object.keys(this.tools || {}).length
1548
+ }
1549
+ });
1550
+ this.log("Successfully connected to Gemini Live API", {
1543
1551
  sessionId: this.sessionId,
1544
1552
  isResuming: this.isResuming,
1545
1553
  toolCount: Object.keys(this.tools || {}).length
1554
+ });
1555
+ if (this.options.sessionConfig?.maxDuration) {
1556
+ this.startSessionDurationMonitor();
1546
1557
  }
1547
- });
1548
- this.log("Successfully connected to Gemini Live API", {
1549
- sessionId: this.sessionId,
1550
- isResuming: this.isResuming,
1551
- toolCount: Object.keys(this.tools || {}).length
1552
- });
1553
- if (this.options.sessionConfig?.maxDuration) {
1554
- this.startSessionDurationMonitor();
1558
+ } catch (error) {
1559
+ this.state = "disconnected";
1560
+ this.log("Connection failed", error);
1561
+ throw error;
1555
1562
  }
1556
- } catch (error) {
1557
- this.state = "disconnected";
1558
- this.log("Connection failed", error);
1559
- throw error;
1560
- }
1563
+ }, "gemini-live.connect")();
1561
1564
  }
1562
1565
  /**
1563
1566
  * Disconnect from the Gemini Live API
@@ -1595,164 +1598,172 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
1595
1598
  * Send text to be converted to speech
1596
1599
  */
1597
1600
  async speak(input, options) {
1598
- this.validateConnectionState();
1599
- if (typeof input !== "string") {
1600
- const chunks = [];
1601
- for await (const chunk of input) {
1602
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(String(chunk)));
1601
+ return this.traced(async () => {
1602
+ this.validateConnectionState();
1603
+ if (typeof input !== "string") {
1604
+ const chunks = [];
1605
+ for await (const chunk of input) {
1606
+ chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(String(chunk)));
1607
+ }
1608
+ input = Buffer.concat(chunks).toString("utf-8");
1603
1609
  }
1604
- input = Buffer.concat(chunks).toString("utf-8");
1605
- }
1606
- if (input.trim().length === 0) {
1607
- throw this.createAndEmitError("invalid_audio_format" /* INVALID_AUDIO_FORMAT */, "Input text is empty");
1608
- }
1609
- this.addToContext("user", input);
1610
- const textMessage = {
1611
- client_content: {
1612
- turns: [
1613
- {
1614
- role: "user",
1615
- parts: [
1616
- {
1617
- text: input
1618
- }
1619
- ]
1620
- }
1621
- ],
1622
- turnComplete: true
1610
+ if (input.trim().length === 0) {
1611
+ throw this.createAndEmitError("invalid_audio_format" /* INVALID_AUDIO_FORMAT */, "Input text is empty");
1623
1612
  }
1624
- };
1625
- if (options && (options.speaker || options.languageCode || options.responseModalities)) {
1626
- const updateMessage = {
1627
- type: "session.update",
1628
- session: {
1629
- generation_config: {
1630
- ...options.responseModalities ? { response_modalities: options.responseModalities } : {},
1631
- speech_config: {
1632
- ...options.languageCode ? { language_code: options.languageCode } : {},
1633
- ...options.speaker ? { voice_config: { prebuilt_voice_config: { voice_name: options.speaker } } } : {}
1613
+ this.addToContext("user", input);
1614
+ const textMessage = {
1615
+ client_content: {
1616
+ turns: [
1617
+ {
1618
+ role: "user",
1619
+ parts: [
1620
+ {
1621
+ text: input
1622
+ }
1623
+ ]
1634
1624
  }
1635
- }
1625
+ ],
1626
+ turnComplete: true
1636
1627
  }
1637
1628
  };
1629
+ if (options && (options.speaker || options.languageCode || options.responseModalities)) {
1630
+ const updateMessage = {
1631
+ type: "session.update",
1632
+ session: {
1633
+ generation_config: {
1634
+ ...options.responseModalities ? { response_modalities: options.responseModalities } : {},
1635
+ speech_config: {
1636
+ ...options.languageCode ? { language_code: options.languageCode } : {},
1637
+ ...options.speaker ? { voice_config: { prebuilt_voice_config: { voice_name: options.speaker } } } : {}
1638
+ }
1639
+ }
1640
+ }
1641
+ };
1642
+ try {
1643
+ this.sendEvent("session.update", updateMessage);
1644
+ this.log("Applied per-turn runtime options", options);
1645
+ } catch (error) {
1646
+ this.log("Failed to apply per-turn runtime options", error);
1647
+ }
1648
+ }
1638
1649
  try {
1639
- this.sendEvent("session.update", updateMessage);
1640
- this.log("Applied per-turn runtime options", options);
1650
+ this.sendEvent("client_content", textMessage);
1651
+ this.log("Text message sent", { text: input });
1641
1652
  } catch (error) {
1642
- this.log("Failed to apply per-turn runtime options", error);
1653
+ this.log("Failed to send text message", error);
1654
+ throw this.createAndEmitError("audio_processing_error" /* AUDIO_PROCESSING_ERROR */, "Failed to send text message", error);
1643
1655
  }
1644
- }
1645
- try {
1646
- this.sendEvent("client_content", textMessage);
1647
- this.log("Text message sent", { text: input });
1648
- } catch (error) {
1649
- this.log("Failed to send text message", error);
1650
- throw this.createAndEmitError("audio_processing_error" /* AUDIO_PROCESSING_ERROR */, "Failed to send text message", error);
1651
- }
1656
+ }, "gemini-live.speak")();
1652
1657
  }
1653
1658
  /**
1654
1659
  * Send audio stream for processing
1655
1660
  */
1656
1661
  async send(audioData) {
1657
- this.validateConnectionState();
1658
- if ("readable" in audioData && typeof audioData.on === "function") {
1659
- const stream = audioData;
1660
- stream.on("data", (chunk) => {
1661
- try {
1662
- const base64Audio = this.audioStreamManager.processAudioChunk(chunk);
1663
- const message = this.audioStreamManager.createAudioMessage(base64Audio, "realtime");
1664
- this.sendEvent("realtime_input", message);
1665
- } catch (error) {
1666
- this.log("Failed to process audio chunk", error);
1667
- this.createAndEmitError("audio_processing_error" /* AUDIO_PROCESSING_ERROR */, "Failed to process audio chunk", error);
1668
- }
1669
- });
1670
- stream.on("error", (error) => {
1671
- this.log("Audio stream error", error);
1672
- this.createAndEmitError("audio_stream_error" /* AUDIO_STREAM_ERROR */, "Audio stream error", error);
1673
- });
1674
- stream.on("end", () => {
1675
- this.log("Audio stream ended");
1676
- });
1677
- } else {
1678
- const validateAudio = this.audioStreamManager.validateAndConvertAudioInput(audioData);
1679
- const base64Audio = this.audioStreamManager.int16ArrayToBase64(validateAudio);
1680
- const message = this.audioStreamManager.createAudioMessage(base64Audio, "realtime");
1681
- this.sendEvent("realtime_input", message);
1682
- }
1662
+ return this.traced(async () => {
1663
+ this.validateConnectionState();
1664
+ if ("readable" in audioData && typeof audioData.on === "function") {
1665
+ const stream = audioData;
1666
+ stream.on("data", (chunk) => {
1667
+ try {
1668
+ const base64Audio = this.audioStreamManager.processAudioChunk(chunk);
1669
+ const message = this.audioStreamManager.createAudioMessage(base64Audio, "realtime");
1670
+ this.sendEvent("realtime_input", message);
1671
+ } catch (error) {
1672
+ this.log("Failed to process audio chunk", error);
1673
+ this.createAndEmitError("audio_processing_error" /* AUDIO_PROCESSING_ERROR */, "Failed to process audio chunk", error);
1674
+ }
1675
+ });
1676
+ stream.on("error", (error) => {
1677
+ this.log("Audio stream error", error);
1678
+ this.createAndEmitError("audio_stream_error" /* AUDIO_STREAM_ERROR */, "Audio stream error", error);
1679
+ });
1680
+ stream.on("end", () => {
1681
+ this.log("Audio stream ended");
1682
+ });
1683
+ } else {
1684
+ const validateAudio = this.audioStreamManager.validateAndConvertAudioInput(audioData);
1685
+ const base64Audio = this.audioStreamManager.int16ArrayToBase64(validateAudio);
1686
+ const message = this.audioStreamManager.createAudioMessage(base64Audio, "realtime");
1687
+ this.sendEvent("realtime_input", message);
1688
+ }
1689
+ }, "gemini-live.send")();
1683
1690
  }
1684
1691
  /**
1685
1692
  * Process speech from audio stream (traditional STT interface)
1686
1693
  */
1687
1694
  async listen(audioStream, _options) {
1688
- this.validateConnectionState();
1689
- let transcriptionText = "";
1690
- const onWriting = (data) => {
1691
- if (data.role === "user") {
1692
- transcriptionText += data.text;
1693
- this.log("Received transcription text:", { text: data.text, total: transcriptionText });
1694
- }
1695
- };
1696
- const onError = (error) => {
1697
- throw new Error(`Transcription failed: ${error.message}`);
1698
- };
1699
- const onSession = (data) => {
1700
- if (data.state === "disconnected") {
1701
- throw new Error("Session disconnected during transcription");
1702
- }
1703
- };
1704
- this.on("writing", onWriting);
1705
- this.on("error", onError);
1706
- this.on("session", onSession);
1707
- try {
1708
- const result = await this.audioStreamManager.handleAudioTranscription(
1709
- audioStream,
1710
- (base64Audio) => {
1711
- return new Promise((resolve, reject) => {
1712
- try {
1713
- const message = this.audioStreamManager.createAudioMessage(base64Audio, "input");
1714
- const cleanup = () => {
1715
- this.off("turnComplete", onTurnComplete);
1716
- this.off("error", onErr);
1717
- };
1718
- const onTurnComplete = () => {
1719
- cleanup();
1720
- resolve(transcriptionText.trim());
1721
- };
1722
- const onErr = (e) => {
1723
- cleanup();
1724
- reject(new Error(e.message));
1725
- };
1726
- this.on("turnComplete", onTurnComplete);
1727
- this.on("error", onErr);
1728
- this.sendEvent("client_content", message);
1729
- this.log("Sent audio for transcription");
1730
- } catch (err) {
1731
- reject(err);
1732
- }
1733
- });
1734
- },
1735
- (error) => {
1736
- this.createAndEmitError("audio_processing_error" /* AUDIO_PROCESSING_ERROR */, "Audio transcription failed", error);
1695
+ return this.traced(async () => {
1696
+ this.validateConnectionState();
1697
+ let transcriptionText = "";
1698
+ const onWriting = (data) => {
1699
+ if (data.role === "user") {
1700
+ transcriptionText += data.text;
1701
+ this.log("Received transcription text:", { text: data.text, total: transcriptionText });
1737
1702
  }
1738
- );
1739
- return result;
1740
- } finally {
1741
- this.off("writing", onWriting);
1742
- this.off("error", onError);
1743
- this.off("session", onSession);
1744
- }
1703
+ };
1704
+ const onError = (error) => {
1705
+ throw new Error(`Transcription failed: ${error.message}`);
1706
+ };
1707
+ const onSession = (data) => {
1708
+ if (data.state === "disconnected") {
1709
+ throw new Error("Session disconnected during transcription");
1710
+ }
1711
+ };
1712
+ this.on("writing", onWriting);
1713
+ this.on("error", onError);
1714
+ this.on("session", onSession);
1715
+ try {
1716
+ const result = await this.audioStreamManager.handleAudioTranscription(
1717
+ audioStream,
1718
+ (base64Audio) => {
1719
+ return new Promise((resolve, reject) => {
1720
+ try {
1721
+ const message = this.audioStreamManager.createAudioMessage(base64Audio, "input");
1722
+ const cleanup = () => {
1723
+ this.off("turnComplete", onTurnComplete);
1724
+ this.off("error", onErr);
1725
+ };
1726
+ const onTurnComplete = () => {
1727
+ cleanup();
1728
+ resolve(transcriptionText.trim());
1729
+ };
1730
+ const onErr = (e) => {
1731
+ cleanup();
1732
+ reject(new Error(e.message));
1733
+ };
1734
+ this.on("turnComplete", onTurnComplete);
1735
+ this.on("error", onErr);
1736
+ this.sendEvent("client_content", message);
1737
+ this.log("Sent audio for transcription");
1738
+ } catch (err) {
1739
+ reject(err);
1740
+ }
1741
+ });
1742
+ },
1743
+ (error) => {
1744
+ this.createAndEmitError("audio_processing_error" /* AUDIO_PROCESSING_ERROR */, "Audio transcription failed", error);
1745
+ }
1746
+ );
1747
+ return result;
1748
+ } finally {
1749
+ this.off("writing", onWriting);
1750
+ this.off("error", onError);
1751
+ this.off("session", onSession);
1752
+ }
1753
+ }, "gemini-live.listen")();
1745
1754
  }
1746
1755
  /**
1747
1756
  * Get available speakers/voices
1748
1757
  */
1749
1758
  async getSpeakers() {
1750
- return [
1751
- { voiceId: "Puck", description: "Conversational, friendly" },
1752
- { voiceId: "Charon", description: "Deep, authoritative" },
1753
- { voiceId: "Kore", description: "Neutral, professional" },
1754
- { voiceId: "Fenrir", description: "Warm, approachable" }
1755
- ];
1759
+ return this.traced(async () => {
1760
+ return [
1761
+ { voiceId: "Puck", description: "Conversational, friendly" },
1762
+ { voiceId: "Charon", description: "Deep, authoritative" },
1763
+ { voiceId: "Kore", description: "Neutral, professional" },
1764
+ { voiceId: "Fenrir", description: "Warm, approachable" }
1765
+ ];
1766
+ }, "gemini-live.getSpeakers")();
1756
1767
  }
1757
1768
  /**
1758
1769
  * Resume a previous session using a session handle
@@ -2257,6 +2268,18 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
2257
2268
  role: "assistant"
2258
2269
  });
2259
2270
  }
2271
+ if (part.functionCall) {
2272
+ this.log("Found function call in serverContent.modelTurn.parts", part.functionCall);
2273
+ const toolCallData = {
2274
+ toolCall: {
2275
+ name: part.functionCall.name,
2276
+ args: part.functionCall.args || {},
2277
+ id: part.functionCall.id || randomUUID()
2278
+ }
2279
+ };
2280
+ void this.handleToolCall(toolCallData);
2281
+ continue;
2282
+ }
2260
2283
  if (part.inlineData?.mimeType?.includes("audio") && typeof part.inlineData.data === "string") {
2261
2284
  try {
2262
2285
  const audioData = part.inlineData.data;
@@ -2331,9 +2354,24 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
2331
2354
  if (!data.toolCall) {
2332
2355
  return;
2333
2356
  }
2334
- const toolName = data.toolCall.name || "";
2335
- const toolArgs = data.toolCall.args || {};
2336
- const toolId = data.toolCall.id || randomUUID();
2357
+ let toolCalls = [];
2358
+ if (data.toolCall.functionCalls && Array.isArray(data.toolCall.functionCalls)) {
2359
+ toolCalls = data.toolCall.functionCalls;
2360
+ } else if (data.toolCall.name) {
2361
+ toolCalls = [{ name: data.toolCall.name, args: data.toolCall.args, id: data.toolCall.id }];
2362
+ }
2363
+ for (const toolCall of toolCalls) {
2364
+ const toolName = toolCall.name || "";
2365
+ const toolArgs = toolCall.args || {};
2366
+ const toolId = toolCall.id || randomUUID();
2367
+ await this.processSingleToolCall(toolName, toolArgs, toolId);
2368
+ }
2369
+ }
2370
+ /**
2371
+ * Process a single tool call
2372
+ * @private
2373
+ */
2374
+ async processSingleToolCall(toolName, toolArgs, toolId) {
2337
2375
  this.log("Processing tool call", { toolName, toolArgs, toolId });
2338
2376
  this.emit("toolCall", {
2339
2377
  name: toolName,
@@ -2354,7 +2392,7 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
2354
2392
  if (tool.execute) {
2355
2393
  this.log("Executing tool", { toolName, toolArgs });
2356
2394
  result = await tool.execute(
2357
- { context: toolArgs, requestContext: this.requestContext },
2395
+ { context: toolArgs, runtimeContext: this.runtimeContext },
2358
2396
  {
2359
2397
  toolCallId: toolId,
2360
2398
  messages: []
@@ -2366,23 +2404,31 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
2366
2404
  result = { error: "Tool has no execute function" };
2367
2405
  }
2368
2406
  const toolResultMessage = {
2369
- tool_result: {
2370
- tool_call_id: toolId,
2371
- result
2407
+ toolResponse: {
2408
+ functionResponses: [
2409
+ {
2410
+ id: toolId,
2411
+ response: result
2412
+ }
2413
+ ]
2372
2414
  }
2373
2415
  };
2374
- this.sendEvent("tool_result", toolResultMessage);
2416
+ this.sendEvent("toolResponse", toolResultMessage);
2375
2417
  this.log("Tool result sent", { toolName, toolId, result });
2376
2418
  } catch (error) {
2377
2419
  const errorMessage = error instanceof Error ? error.message : "Unknown error";
2378
2420
  this.log("Tool execution failed", { toolName, error: errorMessage });
2379
2421
  const errorResultMessage = {
2380
- tool_result: {
2381
- tool_call_id: toolId,
2382
- result: { error: errorMessage }
2422
+ toolResponse: {
2423
+ functionResponses: [
2424
+ {
2425
+ id: toolId,
2426
+ response: { error: errorMessage }
2427
+ }
2428
+ ]
2383
2429
  }
2384
2430
  };
2385
- this.sendEvent("tool_result", errorResultMessage);
2431
+ this.sendEvent("toolResponse", errorResultMessage);
2386
2432
  this.createAndEmitError("tool_execution_error" /* TOOL_EXECUTION_ERROR */, `Tool execution failed: ${errorMessage}`, {
2387
2433
  toolName,
2388
2434
  toolArgs,
@@ -2442,6 +2488,31 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
2442
2488
  }
2443
2489
  return "text";
2444
2490
  }
2491
+ /**
2492
+ * Resolve Vertex AI location with sensible default
2493
+ * @private
2494
+ */
2495
+ getVertexLocation() {
2496
+ return this.options.location?.trim() || "us-central1";
2497
+ }
2498
+ /**
2499
+ * Resolve the correct model identifier for Gemini API or Vertex AI
2500
+ * @private
2501
+ */
2502
+ resolveModelIdentifier() {
2503
+ const model = this.options.model ?? DEFAULT_MODEL;
2504
+ if (!this.options.vertexAI) {
2505
+ return `models/${model}`;
2506
+ }
2507
+ if (!this.options.project) {
2508
+ throw this.createAndEmitError(
2509
+ "project_id_missing" /* PROJECT_ID_MISSING */,
2510
+ "Google Cloud project ID is required when using Vertex AI."
2511
+ );
2512
+ }
2513
+ const location = this.getVertexLocation();
2514
+ return `projects/${this.options.project}/locations/${location}/publishers/google/models/${model}`;
2515
+ }
2445
2516
  /**
2446
2517
  * Send initial configuration to Gemini Live API
2447
2518
  * @private
@@ -2452,7 +2523,7 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
2452
2523
  }
2453
2524
  const setupMessage = {
2454
2525
  setup: {
2455
- model: `models/${this.options.model}`
2526
+ model: this.resolveModelIdentifier()
2456
2527
  }
2457
2528
  };
2458
2529
  if (this.options.instructions) {
@@ -2601,6 +2672,8 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
2601
2672
  message = data;
2602
2673
  } else if (type === "realtime_input" && data.realtime_input) {
2603
2674
  message = data;
2675
+ } else if (type === "toolResponse" && data.toolResponse) {
2676
+ message = data;
2604
2677
  } else if (type === "session.update" && data.session) {
2605
2678
  message = data;
2606
2679
  } else {
@@ -2651,7 +2724,7 @@ var GeminiLiveVoice = class _GeminiLiveVoice extends MastraVoice {
2651
2724
  * Get the current tools configured for this voice instance
2652
2725
  * @returns Object containing the current tools
2653
2726
  */
2654
- listTools() {
2727
+ getTools() {
2655
2728
  return this.tools;
2656
2729
  }
2657
2730
  log(message, ...args) {