telnyx-mcp 6.42.0 → 6.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3142,13 +3142,13 @@ const EMBEDDED_METHODS = [
3142
3142
  'tags?: string[];',
3143
3143
  "telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; };",
3144
3144
  'tool_ids?: string[];',
3145
- "tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[];",
3145
+ "tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[];",
3146
3146
  'transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; };',
3147
3147
  "voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; };",
3148
3148
  "widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; };",
3149
3149
  ],
3150
3150
  response: '{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }',
3151
- markdown: "## create\n\n`client.ai.assistants.create(instructions: string, name: string, description?: string, dynamic_variables?: object, dynamic_variables_webhook_timeout_ms?: number, dynamic_variables_webhook_url?: string, enabled_features?: 'telephony' | 'messaging'[], external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }, fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }, greeting?: string, insight_settings?: { insight_group_id?: string; }, integrations?: { integration_id: string; allowed_list?: string[]; }[], interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }, llm_api_key_ref?: string, mcp_servers?: { id: string; allowed_tools?: string[]; }[], messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }, model?: string, observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }, post_conversation_settings?: { enabled?: boolean; }, privacy_settings?: { data_retention?: boolean; }, tags?: string[], telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }, tool_ids?: string[], tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[], transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }, voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }, widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants`\n\nCreate a new AI Assistant.\n\n### Parameters\n\n- `instructions: string`\n System instructions for the assistant. These may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables)\n\n- `name: string`\n\n- `description?: string`\n\n- `dynamic_variables?: object`\n Map of dynamic variables and their default values\n\n- `dynamic_variables_webhook_timeout_ms?: number`\n Timeout in milliseconds for the dynamic variables webhook. Must be between 1 and 10000 ms. If the webhook does not respond within this timeout, the call proceeds with default values. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables).\n\n- `dynamic_variables_webhook_url?: string`\n If `dynamic_variables_webhook_url` is set, Telnyx sends a POST request to this URL at the start of the conversation to resolve dynamic variables. **Gotcha:** the webhook response must wrap variables under a top-level `dynamic_variables` object, e.g. `{\"dynamic_variables\": {\"customer_name\": \"Jane\"}}`. Returning a flat object will be ignored and variables will fall back to their defaults. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) for the full request/response format and timeout behavior.\n\n- `enabled_features?: 'telephony' | 'messaging'[]`\n\n- `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `base_url: string`\n Base URL for the external LLM endpoint.\n - `model: string`\n Model identifier to use with the external LLM endpoint.\n - `authentication_method?: 'token' | 'certificate'`\n Authentication method used when connecting to the external LLM endpoint.\n - `certificate_ref?: string`\n Integration secret identifier for the client certificate used with certificate authentication.\n - `forward_metadata?: boolean`\n When `true`, Telnyx forwards the assistant's dynamic variables to the external LLM endpoint as a top-level `extra_metadata` object on the chat completion request body. Defaults to `false`. Example payload sent to the external endpoint: `{\"extra_metadata\": {\"customer_name\": \"Jane\", \"account_id\": \"acct_789\", \"telnyx_agent_target\": \"+13125550100\", \"telnyx_end_user_target\": \"+13125550123\"}}`. Distinct from OpenAI's native `metadata` field, which has its own size and type limits.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the external LLM API key.\n - `token_retrieval_url?: string`\n URL used to retrieve an access token when certificate authentication is enabled.\n\n- `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `llm_api_key_ref?: string`\n Integration secret identifier for the fallback model API key.\n - `model?: string`\n Fallback Telnyx-hosted model to use when the primary LLM provider is unavailable.\n\n- `greeting?: string`\n Text that the assistant will use to start the conversation. This may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). Use an empty string to have the assistant wait for the user to speak first. Use the special value `<assistant-speaks-first-with-model-generated-message>` to have the assistant generate the greeting based on the system instructions.\n\n- `insight_settings?: { insight_group_id?: string; }`\n - `insight_group_id?: string`\n Reference to an Insight Group. Insights in this group will be run automatically for all the assistant's conversations.\n\n- `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n Connected integrations attached to the assistant. The catalog of available integrations is at `/ai/integrations`; the user's connected integrations are at `/ai/integrations/connections`. Each item references a catalog integration by `integration_id`.\n\n- `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n Settings for interruptions and how the assistant decides the user has finished speaking. These timings are most relevant when using non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn behavior is controlled by the transcription end-of-turn settings under `transcription.settings` (`eot_threshold`, `eot_timeout_ms`, `eager_eot_threshold`).\n - `enable?: boolean`\n Whether users can interrupt the assistant while it is speaking.\n - `start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }`\n Controls when the assistant starts speaking after the user stops. These thresholds primarily apply to non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn detection is driven by the transcription end-of-turn settings under `transcription.settings` instead.\n\n- `llm_api_key_ref?: string`\n This is only needed when using third-party inference providers selected by `model`. The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your LLM provider's API key. For bring-your-own endpoint authentication, use `external_llm.llm_api_key_ref` instead. Warning: Free plans are unlikely to work with this integration.\n\n- `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n MCP servers attached to the assistant. Create MCP servers with `/ai/mcp_servers`, then reference them by `id` here.\n\n- `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `conversation_inactivity_minutes?: number`\n If more than this many minutes have passed since the last message, the assistant will start a new conversation instead of continuing the existing one.\n - `default_messaging_profile_id?: string`\n Default Messaging Profile used for messaging exchanges with your assistant. This will be created automatically on assistant creation.\n - `delivery_status_webhook_url?: string`\n The URL where webhooks related to delivery statused for assistant messages will be sent.\n\n- `model?: string`\n ID of the model to use when `external_llm` is not set. You can use the [Get models API](https://developers.telnyx.com/api-reference/chat/get-available-models) to see available models. If `external_llm` is provided, the assistant uses `external_llm` instead of this field. If neither `model` nor `external_llm` is provided, Telnyx applies the default model.\n\n- `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `host?: string`\n - `public_key_ref?: string`\n - `secret_key_ref?: string`\n - `status?: 'enabled' | 'disabled'`\n\n- `post_conversation_settings?: { enabled?: boolean; }`\n Configuration for post-conversation processing. When enabled, the assistant receives one additional LLM turn after the conversation ends, allowing it to execute tool calls such as logging to a CRM or sending a summary. The assistant can execute multiple parallel or sequential tools during this phase. Telephony-control tools (e.g. hangup, transfer) are unavailable post-conversation. Beta feature.\n - `enabled?: boolean`\n Whether post-conversation processing is enabled. When true, the assistant will be invoked after the conversation ends to perform any final tool calls. Defaults to false.\n\n- `privacy_settings?: { data_retention?: boolean; }`\n - `data_retention?: boolean`\n If true, conversation history and insights will be stored. If false, they will not be stored. This in‑tool toggle governs solely the retention of conversation history and insights via the AI assistant. It has no effect on any separate recording, transcription, or storage configuration that you have set at the account, number, or application level. All such external settings remain in force regardless of your selection here.\n\n- `tags?: string[]`\n Tags associated with the assistant. Tags can also be managed with the assistant tag endpoints.\n\n- `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `default_texml_app_id?: string`\n Default Texml App used for voice calls with your assistant. This will be created automatically on assistant creation.\n - `noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'`\n The noise suppression engine to use. Use 'disabled' to turn off noise suppression.\n - `noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }`\n Configuration for noise suppression. Only applicable when noise_suppression is 'deepfilternet'.\n - `recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }`\n Configuration for call recording format and channel settings.\n - `supports_unauthenticated_web_calls?: boolean`\n When enabled, allows users to interact with your AI assistant directly from your website without requiring authentication. This is required for FE widgets that work with assistants that have telephony enabled.\n - `time_limit_secs?: number`\n Maximum duration in seconds for the AI assistant to participate on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `user_idle_reply_secs?: number`\n Duration in seconds of end user silence before the assistant checks in on the user. When this limit is reached the assistant will prompt the user to respond. This is distinct from user_idle_timeout_secs which stops the assistant entirely.\n - `user_idle_timeout_secs?: number`\n Maximum duration in seconds of end user silence on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: { message?: string; prompt?: string; type?: 'prompt' | 'message'; }; }; }`\n Configuration for voicemail detection (AMD - Answering Machine Detection) on outgoing calls. These settings only apply if AMD is enabled on the Dial command. See [TeXML Dial documentation](https://developers.telnyx.com/api-reference/texml-rest-commands/initiate-an-outbound-call) for enabling AMD. Recommended settings: MachineDetection=Enable, AsyncAmd=true, DetectionMode=Premium.\n\n- `tool_ids?: string[]`\n IDs of shared tools to attach to the assistant. New integrations should prefer `tool_ids` over inline `tools`.\n\n- `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n Deprecated for new integrations. Inline tool definitions available to the assistant. Prefer `tool_ids` to attach shared tools created with the AI Tools endpoints.\n\n- `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `api_key_ref?: string`\n Integration secret identifier for the transcription provider API key. Currently used for Azure transcription regions that require a customer-provided API key.\n - `language?: string`\n The language of the audio to be transcribed. If not set, or if set to `auto`, supported models will automatically detect the language. For `deepgram/flux`, supported values are: `auto` (Telnyx language detection controls the language hint), `multi` (no language hint), and language-specific hints `en`, `es`, `fr`, `de`, `hi`, `ru`, `pt`, `ja`, `it`, and `nl`.\n - `model?: string`\n The speech to text model to be used by the voice assistant. All Deepgram models are run on-premise.\n\n- `deepgram/flux` is optimized for turn-taking with multilingual language hints.\n- `deepgram/nova-3` is multilingual with automatic language detection.\n- `deepgram/nova-2` is Deepgram's previous-generation multilingual model.\n- `azure/fast` is a multilingual Azure transcription model.\n- `assemblyai/universal-streaming` is a multilingual streaming model with configurable turn detection.\n- `xai/grok-stt` is a multilingual Grok STT model.\n - `region?: string`\n Region on third party cloud providers (currently Azure) if using one of their models. Some regions require `api_key_ref`.\n - `settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }`\n\n- `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `voice: string`\n The voice to be used by the voice assistant. Check the full list of [available voices](https://developers.telnyx.com/docs/tts-stt/tts-available-voices) via our voices API.\nTo use ElevenLabs, you must reference your ElevenLabs API key as an integration secret under the `api_key_ref` field. See [integration secrets documentation](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) for details. For Telnyx voices, use `Telnyx.<model_id>.<voice_id>` (e.g. Telnyx.KokoroTTS.af_heart).\nThe voice portion of the identifier supports [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) using mustache syntax (e.g. `Telnyx.Ultra.{{voice_id}}`). The variable is resolved at call time from your dynamic variables webhook, allowing you to select the voice dynamically per call.\n - `api_key_ref?: string`\n The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your ElevenLabs API key. Warning: Free plans are unlikely to work with this integration.\n - `background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }`\n Optional background audio to play on the call. Use a predefined media bed, or supply a looped MP3 URL. If a media URL is chosen in the portal, customers can preview it before saving.\n - `expressive_mode?: boolean`\n Enables emotionally expressive speech using SSML emotion tags. When enabled, the assistant uses audio tags like angry, excited, content, and sad to add emotional nuance. Only supported for Telnyx Ultra voices.\n - `language_boost?: string`\n Enhances recognition for specific languages and dialects during MiniMax TTS synthesis. Default is null (no boost). Set to 'auto' for automatic language detection. Only applicable when using MiniMax voices.\n - `similarity_boost?: number`\n Determines how closely the AI should adhere to the original voice when attempting to replicate it. Only applicable when using ElevenLabs.\n - `speed?: number`\n Adjusts speech velocity. 1.0 is default speed; values less than 1.0 slow speech; values greater than 1.0 accelerate it. Only applicable when using ElevenLabs.\n - `style?: number`\n Determines the style exaggeration of the voice. Amplifies speaker style but consumes additional resources when set above 0. Only applicable when using ElevenLabs.\n - `temperature?: number`\n Determines how stable the voice is and the randomness between each generation. Lower values create a broader emotional range; higher values produce more consistent, monotonous output. Only applicable when using ElevenLabs.\n - `use_speaker_boost?: boolean`\n Amplifies similarity to the original speaker voice. Increases computational load and latency slightly. Only applicable when using ElevenLabs.\n - `voice_speed?: number`\n The speed of the voice in the range [0.25, 2.0]. 1.0 is deafult speed. Larger numbers make the voice faster, smaller numbers make it slower. This is only applicable for Telnyx Natural voices.\n\n- `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n Configuration settings for the assistant's web widget.\n - `agent_thinking_text?: string`\n Text displayed while the agent is processing.\n - `audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }`\n - `default_state?: 'expanded' | 'collapsed'`\n The default state of the widget.\n - `give_feedback_url?: string`\n URL for users to give feedback.\n - `logo_icon_url?: string`\n URL to a custom logo icon for the widget.\n - `position?: 'fixed' | 'static'`\n The positioning style for the widget.\n - `report_issue_url?: string`\n URL for users to report issues.\n - `speak_to_interrupt_text?: string`\n Text prompting users to speak to interrupt.\n - `start_call_text?: string`\n Custom text displayed on the start call button.\n - `theme?: 'light' | 'dark'`\n The visual theme for the widget.\n - `view_history_url?: string`\n URL to view conversation history.\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.create({ instructions: 'instructions', name: 'name' });\n\nconsole.log(inferenceEmbedding);\n```",
3151
+ markdown: "## create\n\n`client.ai.assistants.create(instructions: string, name: string, description?: string, dynamic_variables?: object, dynamic_variables_webhook_timeout_ms?: number, dynamic_variables_webhook_url?: string, enabled_features?: 'telephony' | 'messaging'[], external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }, fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }, greeting?: string, insight_settings?: { insight_group_id?: string; }, integrations?: { integration_id: string; allowed_list?: string[]; }[], interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }, llm_api_key_ref?: string, mcp_servers?: { id: string; allowed_tools?: string[]; }[], messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }, model?: string, observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }, post_conversation_settings?: { enabled?: boolean; }, privacy_settings?: { data_retention?: boolean; }, tags?: string[], telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }, tool_ids?: string[], tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[], transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }, voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }, widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants`\n\nCreate a new AI Assistant.\n\n### Parameters\n\n- `instructions: string`\n System instructions for the assistant. These may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables)\n\n- `name: string`\n\n- `description?: string`\n\n- `dynamic_variables?: object`\n Map of dynamic variables and their default values\n\n- `dynamic_variables_webhook_timeout_ms?: number`\n Timeout in milliseconds for the dynamic variables webhook. Must be between 1 and 10000 ms. If the webhook does not respond within this timeout, the call proceeds with default values. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables).\n\n- `dynamic_variables_webhook_url?: string`\n If `dynamic_variables_webhook_url` is set, Telnyx sends a POST request to this URL at the start of the conversation to resolve dynamic variables. **Gotcha:** the webhook response must wrap variables under a top-level `dynamic_variables` object, e.g. `{\"dynamic_variables\": {\"customer_name\": \"Jane\"}}`. Returning a flat object will be ignored and variables will fall back to their defaults. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) for the full request/response format and timeout behavior.\n\n- `enabled_features?: 'telephony' | 'messaging'[]`\n\n- `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `base_url: string`\n Base URL for the external LLM endpoint.\n - `model: string`\n Model identifier to use with the external LLM endpoint.\n - `authentication_method?: 'token' | 'certificate'`\n Authentication method used when connecting to the external LLM endpoint.\n - `certificate_ref?: string`\n Integration secret identifier for the client certificate used with certificate authentication.\n - `forward_metadata?: boolean`\n When `true`, Telnyx forwards the assistant's dynamic variables to the external LLM endpoint as a top-level `extra_metadata` object on the chat completion request body. Defaults to `false`. Example payload sent to the external endpoint: `{\"extra_metadata\": {\"customer_name\": \"Jane\", \"account_id\": \"acct_789\", \"telnyx_agent_target\": \"+13125550100\", \"telnyx_end_user_target\": \"+13125550123\"}}`. Distinct from OpenAI's native `metadata` field, which has its own size and type limits.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the external LLM API key.\n - `token_retrieval_url?: string`\n URL used to retrieve an access token when certificate authentication is enabled.\n\n- `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `llm_api_key_ref?: string`\n Integration secret identifier for the fallback model API key.\n - `model?: string`\n Fallback Telnyx-hosted model to use when the primary LLM provider is unavailable.\n\n- `greeting?: string`\n Text that the assistant will use to start the conversation. This may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). Use an empty string to have the assistant wait for the user to speak first. Use the special value `<assistant-speaks-first-with-model-generated-message>` to have the assistant generate the greeting based on the system instructions.\n\n- `insight_settings?: { insight_group_id?: string; }`\n - `insight_group_id?: string`\n Reference to an Insight Group. Insights in this group will be run automatically for all the assistant's conversations.\n\n- `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n Connected integrations attached to the assistant. The catalog of available integrations is at `/ai/integrations`; the user's connected integrations are at `/ai/integrations/connections`. Each item references a catalog integration by `integration_id`.\n\n- `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n Settings for interruptions and how the assistant decides the user has finished speaking. These timings are most relevant when using non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn behavior is controlled by the transcription end-of-turn settings under `transcription.settings` (`eot_threshold`, `eot_timeout_ms`, `eager_eot_threshold`).\n - `enable?: boolean`\n Whether users can interrupt the assistant while it is speaking.\n - `start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }`\n Controls when the assistant starts speaking after the user stops. These thresholds primarily apply to non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn detection is driven by the transcription end-of-turn settings under `transcription.settings` instead.\n\n- `llm_api_key_ref?: string`\n This is only needed when using third-party inference providers selected by `model`. The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your LLM provider's API key. For bring-your-own endpoint authentication, use `external_llm.llm_api_key_ref` instead. Warning: Free plans are unlikely to work with this integration.\n\n- `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n MCP servers attached to the assistant. Create MCP servers with `/ai/mcp_servers`, then reference them by `id` here.\n\n- `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `conversation_inactivity_minutes?: number`\n If more than this many minutes have passed since the last message, the assistant will start a new conversation instead of continuing the existing one.\n - `default_messaging_profile_id?: string`\n Default Messaging Profile used for messaging exchanges with your assistant. This will be created automatically on assistant creation.\n - `delivery_status_webhook_url?: string`\n The URL where webhooks related to delivery statused for assistant messages will be sent.\n\n- `model?: string`\n ID of the model to use when `external_llm` is not set. You can use the [Get models API](https://developers.telnyx.com/api-reference/chat/get-available-models) to see available models. If `external_llm` is provided, the assistant uses `external_llm` instead of this field. If neither `model` nor `external_llm` is provided, Telnyx applies the default model.\n\n- `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `host?: string`\n - `public_key_ref?: string`\n - `secret_key_ref?: string`\n - `status?: 'enabled' | 'disabled'`\n\n- `post_conversation_settings?: { enabled?: boolean; }`\n Configuration for post-conversation processing. When enabled, the assistant receives one additional LLM turn after the conversation ends, allowing it to execute tool calls such as logging to a CRM or sending a summary. The assistant can execute multiple parallel or sequential tools during this phase. Telephony-control tools (e.g. hangup, transfer) are unavailable post-conversation. Beta feature.\n - `enabled?: boolean`\n Whether post-conversation processing is enabled. When true, the assistant will be invoked after the conversation ends to perform any final tool calls. Defaults to false.\n\n- `privacy_settings?: { data_retention?: boolean; }`\n - `data_retention?: boolean`\n If true, conversation history and insights will be stored. If false, they will not be stored. This in‑tool toggle governs solely the retention of conversation history and insights via the AI assistant. It has no effect on any separate recording, transcription, or storage configuration that you have set at the account, number, or application level. All such external settings remain in force regardless of your selection here.\n\n- `tags?: string[]`\n Tags associated with the assistant. Tags can also be managed with the assistant tag endpoints.\n\n- `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `default_texml_app_id?: string`\n Default Texml App used for voice calls with your assistant. This will be created automatically on assistant creation.\n - `noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'`\n The noise suppression engine to use. Use 'disabled' to turn off noise suppression.\n - `noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }`\n Configuration for noise suppression. Only applicable when noise_suppression is 'deepfilternet'.\n - `recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }`\n Configuration for call recording format and channel settings.\n - `supports_unauthenticated_web_calls?: boolean`\n When enabled, allows users to interact with your AI assistant directly from your website without requiring authentication. This is required for FE widgets that work with assistants that have telephony enabled.\n - `time_limit_secs?: number`\n Maximum duration in seconds for the AI assistant to participate on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `user_idle_reply_secs?: number`\n Duration in seconds of end user silence before the assistant checks in on the user. When this limit is reached the assistant will prompt the user to respond. This is distinct from user_idle_timeout_secs which stops the assistant entirely.\n - `user_idle_timeout_secs?: number`\n Maximum duration in seconds of end user silence on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: { message?: string; prompt?: string; type?: 'prompt' | 'message'; }; }; }`\n Configuration for voicemail detection (AMD - Answering Machine Detection) on outgoing calls. These settings only apply if AMD is enabled on the Dial command. See [TeXML Dial documentation](https://developers.telnyx.com/api-reference/texml-rest-commands/initiate-an-outbound-call) for enabling AMD. Recommended settings: MachineDetection=Enable, AsyncAmd=true, DetectionMode=Premium.\n\n- `tool_ids?: string[]`\n IDs of shared tools to attach to the assistant. New integrations should prefer `tool_ids` over inline `tools`.\n\n- `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n Deprecated for new integrations. Inline tool definitions available to the assistant. Prefer `tool_ids` to attach shared tools created with the AI Tools endpoints.\n\n- `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `api_key_ref?: string`\n Integration secret identifier for the transcription provider API key. Currently used for Azure transcription regions that require a customer-provided API key.\n - `language?: string`\n The language of the audio to be transcribed. If not set, or if set to `auto`, supported models will automatically detect the language. For `deepgram/flux`, supported values are: `auto` (Telnyx language detection controls the language hint), `multi` (no language hint), and language-specific hints `en`, `es`, `fr`, `de`, `hi`, `ru`, `pt`, `ja`, `it`, and `nl`.\n - `model?: string`\n The speech to text model to be used by the voice assistant. All Deepgram models are run on-premise.\n\n- `deepgram/flux` is optimized for turn-taking with multilingual language hints.\n- `deepgram/nova-3` is multilingual with automatic language detection.\n- `deepgram/nova-2` is Deepgram's previous-generation multilingual model.\n- `azure/fast` is a multilingual Azure transcription model.\n- `assemblyai/universal-streaming` is a multilingual streaming model with configurable turn detection.\n- `xai/grok-stt` is a multilingual Grok STT model.\n - `region?: string`\n Region on third party cloud providers (currently Azure) if using one of their models. Some regions require `api_key_ref`.\n - `settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }`\n\n- `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `voice: string`\n The voice to be used by the voice assistant. Check the full list of [available voices](https://developers.telnyx.com/docs/tts-stt/tts-available-voices) via our voices API.\nTo use ElevenLabs, you must reference your ElevenLabs API key as an integration secret under the `api_key_ref` field. See [integration secrets documentation](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) for details. For Telnyx voices, use `Telnyx.<model_id>.<voice_id>` (e.g. Telnyx.KokoroTTS.af_heart).\nThe voice portion of the identifier supports [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) using mustache syntax (e.g. `Telnyx.Ultra.{{voice_id}}`). The variable is resolved at call time from your dynamic variables webhook, allowing you to select the voice dynamically per call.\n - `api_key_ref?: string`\n The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your ElevenLabs API key. Warning: Free plans are unlikely to work with this integration.\n - `background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }`\n Optional background audio to play on the call. Use a predefined media bed, or supply a looped MP3 URL. If a media URL is chosen in the portal, customers can preview it before saving.\n - `expressive_mode?: boolean`\n Enables emotionally expressive speech using SSML emotion tags. When enabled, the assistant uses audio tags like angry, excited, content, and sad to add emotional nuance. Only supported for Telnyx Ultra voices.\n - `language_boost?: string`\n Enhances recognition for specific languages and dialects during MiniMax TTS synthesis. Default is null (no boost). Set to 'auto' for automatic language detection. Only applicable when using MiniMax voices.\n - `similarity_boost?: number`\n Determines how closely the AI should adhere to the original voice when attempting to replicate it. Only applicable when using ElevenLabs.\n - `speed?: number`\n Adjusts speech velocity. 1.0 is default speed; values less than 1.0 slow speech; values greater than 1.0 accelerate it. Only applicable when using ElevenLabs.\n - `style?: number`\n Determines the style exaggeration of the voice. Amplifies speaker style but consumes additional resources when set above 0. Only applicable when using ElevenLabs.\n - `temperature?: number`\n Determines how stable the voice is and the randomness between each generation. Lower values create a broader emotional range; higher values produce more consistent, monotonous output. Only applicable when using ElevenLabs.\n - `use_speaker_boost?: boolean`\n Amplifies similarity to the original speaker voice. Increases computational load and latency slightly. Only applicable when using ElevenLabs.\n - `voice_speed?: number`\n The speed of the voice in the range [0.25, 2.0]. 1.0 is deafult speed. Larger numbers make the voice faster, smaller numbers make it slower. This is only applicable for Telnyx Natural voices.\n\n- `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n Configuration settings for the assistant's web widget.\n - `agent_thinking_text?: string`\n Text displayed while the agent is processing.\n - `audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }`\n - `default_state?: 'expanded' | 'collapsed'`\n The default state of the widget.\n - `give_feedback_url?: string`\n URL for users to give feedback.\n - `logo_icon_url?: string`\n URL to a custom logo icon for the widget.\n - `position?: 'fixed' | 'static'`\n The positioning style for the widget.\n - `report_issue_url?: string`\n URL for users to report issues.\n - `speak_to_interrupt_text?: string`\n Text prompting users to speak to interrupt.\n - `start_call_text?: string`\n Custom text displayed on the start call button.\n - `theme?: 'light' | 'dark'`\n The visual theme for the widget.\n - `view_history_url?: string`\n URL to view conversation history.\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.create({ instructions: 'instructions', name: 'name' });\n\nconsole.log(inferenceEmbedding);\n```",
3152
3152
  perLanguage: {
3153
3153
  typescript: {
3154
3154
  method: 'client.ai.assistants.create',
@@ -3289,7 +3289,7 @@ const EMBEDDED_METHODS = [
3289
3289
  'to?: string;',
3290
3290
  ],
3291
3291
  response: '{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }',
3292
- markdown: "## retrieve\n\n`client.ai.assistants.retrieve(assistant_id: string, call_control_id?: string, fetch_dynamic_variables_from_webhook?: boolean, from?: string, to?: string): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**get** `/ai/assistants/{assistant_id}`\n\nRetrieve an AI Assistant configuration by `assistant_id`.\n\n### Parameters\n\n- `assistant_id: string`\n\n- `call_control_id?: string`\n\n- `fetch_dynamic_variables_from_webhook?: boolean`\n\n- `from?: string`\n\n- `to?: string`\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.retrieve('assistant_id');\n\nconsole.log(inferenceEmbedding);\n```",
3292
+ markdown: "## retrieve\n\n`client.ai.assistants.retrieve(assistant_id: string, call_control_id?: string, fetch_dynamic_variables_from_webhook?: boolean, from?: string, to?: string): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**get** `/ai/assistants/{assistant_id}`\n\nRetrieve an AI Assistant configuration by `assistant_id`.\n\n### Parameters\n\n- `assistant_id: string`\n\n- `call_control_id?: string`\n\n- `fetch_dynamic_variables_from_webhook?: boolean`\n\n- `from?: string`\n\n- `to?: string`\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.retrieve('assistant_id');\n\nconsole.log(inferenceEmbedding);\n```",
3293
3293
  perLanguage: {
3294
3294
  typescript: {
3295
3295
  method: 'client.ai.assistants.retrieve',
@@ -3358,14 +3358,14 @@ const EMBEDDED_METHODS = [
3358
3358
  'tags?: string[];',
3359
3359
  "telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; };",
3360
3360
  'tool_ids?: string[];',
3361
- "tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[];",
3361
+ "tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[];",
3362
3362
  'transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; };',
3363
3363
  'version_name?: string;',
3364
3364
  "voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; };",
3365
3365
  "widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; };",
3366
3366
  ],
3367
3367
  response: '{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }',
3368
- markdown: "## update\n\n`client.ai.assistants.update(assistant_id: string, description?: string, dynamic_variables?: object, dynamic_variables_webhook_timeout_ms?: number, dynamic_variables_webhook_url?: string, enabled_features?: 'telephony' | 'messaging'[], external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }, fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }, greeting?: string, insight_settings?: { insight_group_id?: string; }, instructions?: string, integrations?: { integration_id: string; allowed_list?: string[]; }[], interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }, llm_api_key_ref?: string, mcp_servers?: { id: string; allowed_tools?: string[]; }[], messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }, model?: string, name?: string, observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }, post_conversation_settings?: { enabled?: boolean; }, privacy_settings?: { data_retention?: boolean; }, promote_to_main?: boolean, tags?: string[], telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }, tool_ids?: string[], tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[], transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }, version_name?: string, voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }, widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants/{assistant_id}`\n\nUpdate an AI Assistant's attributes.\n\n### Parameters\n\n- `assistant_id: string`\n\n- `description?: string`\n\n- `dynamic_variables?: object`\n Map of dynamic variables and their default values\n\n- `dynamic_variables_webhook_timeout_ms?: number`\n Timeout in milliseconds for the dynamic variables webhook. Must be between 1 and 10000 ms. If the webhook does not respond within this timeout, the call proceeds with default values. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables).\n\n- `dynamic_variables_webhook_url?: string`\n If `dynamic_variables_webhook_url` is set, Telnyx sends a POST request to this URL at the start of the conversation to resolve dynamic variables. **Gotcha:** the webhook response must wrap variables under a top-level `dynamic_variables` object, e.g. `{\"dynamic_variables\": {\"customer_name\": \"Jane\"}}`. Returning a flat object will be ignored and variables will fall back to their defaults. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) for the full request/response format and timeout behavior.\n\n- `enabled_features?: 'telephony' | 'messaging'[]`\n\n- `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `base_url: string`\n Base URL for the external LLM endpoint.\n - `model: string`\n Model identifier to use with the external LLM endpoint.\n - `authentication_method?: 'token' | 'certificate'`\n Authentication method used when connecting to the external LLM endpoint.\n - `certificate_ref?: string`\n Integration secret identifier for the client certificate used with certificate authentication.\n - `forward_metadata?: boolean`\n When `true`, Telnyx forwards the assistant's dynamic variables to the external LLM endpoint as a top-level `extra_metadata` object on the chat completion request body. Defaults to `false`. Example payload sent to the external endpoint: `{\"extra_metadata\": {\"customer_name\": \"Jane\", \"account_id\": \"acct_789\", \"telnyx_agent_target\": \"+13125550100\", \"telnyx_end_user_target\": \"+13125550123\"}}`. Distinct from OpenAI's native `metadata` field, which has its own size and type limits.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the external LLM API key.\n - `token_retrieval_url?: string`\n URL used to retrieve an access token when certificate authentication is enabled.\n\n- `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `llm_api_key_ref?: string`\n Integration secret identifier for the fallback model API key.\n - `model?: string`\n Fallback Telnyx-hosted model to use when the primary LLM provider is unavailable.\n\n- `greeting?: string`\n Text that the assistant will use to start the conversation. This may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). Use an empty string to have the assistant wait for the user to speak first. Use the special value `<assistant-speaks-first-with-model-generated-message>` to have the assistant generate the greeting based on the system instructions.\n\n- `insight_settings?: { insight_group_id?: string; }`\n - `insight_group_id?: string`\n Reference to an Insight Group. Insights in this group will be run automatically for all the assistant's conversations.\n\n- `instructions?: string`\n System instructions for the assistant. These may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables)\n\n- `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n Connected integrations attached to the assistant. The catalog of available integrations is at `/ai/integrations`; the user's connected integrations are at `/ai/integrations/connections`. Each item references a catalog integration by `integration_id`.\n\n- `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n Settings for interruptions and how the assistant decides the user has finished speaking. These timings are most relevant when using non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn behavior is controlled by the transcription end-of-turn settings under `transcription.settings` (`eot_threshold`, `eot_timeout_ms`, `eager_eot_threshold`).\n - `enable?: boolean`\n Whether users can interrupt the assistant while it is speaking.\n - `start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }`\n Controls when the assistant starts speaking after the user stops. These thresholds primarily apply to non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn detection is driven by the transcription end-of-turn settings under `transcription.settings` instead.\n\n- `llm_api_key_ref?: string`\n This is only needed when using third-party inference providers selected by `model`. The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your LLM provider's API key. For bring-your-own endpoint authentication, use `external_llm.llm_api_key_ref` instead. Warning: Free plans are unlikely to work with this integration.\n\n- `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n MCP servers attached to the assistant. Create MCP servers with `/ai/mcp_servers`, then reference them by `id` here.\n\n- `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `conversation_inactivity_minutes?: number`\n If more than this many minutes have passed since the last message, the assistant will start a new conversation instead of continuing the existing one.\n - `default_messaging_profile_id?: string`\n Default Messaging Profile used for messaging exchanges with your assistant. This will be created automatically on assistant creation.\n - `delivery_status_webhook_url?: string`\n The URL where webhooks related to delivery statused for assistant messages will be sent.\n\n- `model?: string`\n ID of the model to use when `external_llm` is not set. You can use the [Get models API](https://developers.telnyx.com/api-reference/chat/get-available-models) to see available models. If `external_llm` is provided, the assistant uses `external_llm` instead of this field. If neither `model` nor `external_llm` is provided, Telnyx applies the default model.\n\n- `name?: string`\n\n- `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `host?: string`\n - `public_key_ref?: string`\n - `secret_key_ref?: string`\n - `status?: 'enabled' | 'disabled'`\n\n- `post_conversation_settings?: { enabled?: boolean; }`\n Configuration for post-conversation processing. When enabled, the assistant receives one additional LLM turn after the conversation ends, allowing it to execute tool calls such as logging to a CRM or sending a summary. The assistant can execute multiple parallel or sequential tools during this phase. Telephony-control tools (e.g. hangup, transfer) are unavailable post-conversation. Beta feature.\n - `enabled?: boolean`\n Whether post-conversation processing is enabled. When true, the assistant will be invoked after the conversation ends to perform any final tool calls. Defaults to false.\n\n- `privacy_settings?: { data_retention?: boolean; }`\n - `data_retention?: boolean`\n If true, conversation history and insights will be stored. If false, they will not be stored. This in‑tool toggle governs solely the retention of conversation history and insights via the AI assistant. It has no effect on any separate recording, transcription, or storage configuration that you have set at the account, number, or application level. All such external settings remain in force regardless of your selection here.\n\n- `promote_to_main?: boolean`\n Indicates whether the assistant should be promoted to the main version. Defaults to true.\n\n- `tags?: string[]`\n Tags associated with the assistant. Tags can also be managed with the assistant tag endpoints.\n\n- `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `default_texml_app_id?: string`\n Default Texml App used for voice calls with your assistant. This will be created automatically on assistant creation.\n - `noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'`\n The noise suppression engine to use. Use 'disabled' to turn off noise suppression.\n - `noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }`\n Configuration for noise suppression. Only applicable when noise_suppression is 'deepfilternet'.\n - `recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }`\n Configuration for call recording format and channel settings.\n - `supports_unauthenticated_web_calls?: boolean`\n When enabled, allows users to interact with your AI assistant directly from your website without requiring authentication. This is required for FE widgets that work with assistants that have telephony enabled.\n - `time_limit_secs?: number`\n Maximum duration in seconds for the AI assistant to participate on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `user_idle_reply_secs?: number`\n Duration in seconds of end user silence before the assistant checks in on the user. When this limit is reached the assistant will prompt the user to respond. This is distinct from user_idle_timeout_secs which stops the assistant entirely.\n - `user_idle_timeout_secs?: number`\n Maximum duration in seconds of end user silence on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: { message?: string; prompt?: string; type?: 'prompt' | 'message'; }; }; }`\n Configuration for voicemail detection (AMD - Answering Machine Detection) on outgoing calls. These settings only apply if AMD is enabled on the Dial command. See [TeXML Dial documentation](https://developers.telnyx.com/api-reference/texml-rest-commands/initiate-an-outbound-call) for enabling AMD. Recommended settings: MachineDetection=Enable, AsyncAmd=true, DetectionMode=Premium.\n\n- `tool_ids?: string[]`\n IDs of shared tools to attach to the assistant. New integrations should prefer `tool_ids` over inline `tools`.\n\n- `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n Deprecated for new integrations. Inline tool definitions available to the assistant. Prefer `tool_ids` to attach shared tools created with the AI Tools endpoints.\n\n- `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `api_key_ref?: string`\n Integration secret identifier for the transcription provider API key. Currently used for Azure transcription regions that require a customer-provided API key.\n - `language?: string`\n The language of the audio to be transcribed. If not set, or if set to `auto`, supported models will automatically detect the language. For `deepgram/flux`, supported values are: `auto` (Telnyx language detection controls the language hint), `multi` (no language hint), and language-specific hints `en`, `es`, `fr`, `de`, `hi`, `ru`, `pt`, `ja`, `it`, and `nl`.\n - `model?: string`\n The speech to text model to be used by the voice assistant. All Deepgram models are run on-premise.\n\n- `deepgram/flux` is optimized for turn-taking with multilingual language hints.\n- `deepgram/nova-3` is multilingual with automatic language detection.\n- `deepgram/nova-2` is Deepgram's previous-generation multilingual model.\n- `azure/fast` is a multilingual Azure transcription model.\n- `assemblyai/universal-streaming` is a multilingual streaming model with configurable turn detection.\n- `xai/grok-stt` is a multilingual Grok STT model.\n - `region?: string`\n Region on third party cloud providers (currently Azure) if using one of their models. Some regions require `api_key_ref`.\n - `settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }`\n\n- `version_name?: string`\n Human-readable name for the assistant version.\n\n- `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `voice: string`\n The voice to be used by the voice assistant. Check the full list of [available voices](https://developers.telnyx.com/docs/tts-stt/tts-available-voices) via our voices API.\nTo use ElevenLabs, you must reference your ElevenLabs API key as an integration secret under the `api_key_ref` field. See [integration secrets documentation](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) for details. For Telnyx voices, use `Telnyx.<model_id>.<voice_id>` (e.g. Telnyx.KokoroTTS.af_heart).\nThe voice portion of the identifier supports [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) using mustache syntax (e.g. `Telnyx.Ultra.{{voice_id}}`). The variable is resolved at call time from your dynamic variables webhook, allowing you to select the voice dynamically per call.\n - `api_key_ref?: string`\n The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your ElevenLabs API key. Warning: Free plans are unlikely to work with this integration.\n - `background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }`\n Optional background audio to play on the call. Use a predefined media bed, or supply a looped MP3 URL. If a media URL is chosen in the portal, customers can preview it before saving.\n - `expressive_mode?: boolean`\n Enables emotionally expressive speech using SSML emotion tags. When enabled, the assistant uses audio tags like angry, excited, content, and sad to add emotional nuance. Only supported for Telnyx Ultra voices.\n - `language_boost?: string`\n Enhances recognition for specific languages and dialects during MiniMax TTS synthesis. Default is null (no boost). Set to 'auto' for automatic language detection. Only applicable when using MiniMax voices.\n - `similarity_boost?: number`\n Determines how closely the AI should adhere to the original voice when attempting to replicate it. Only applicable when using ElevenLabs.\n - `speed?: number`\n Adjusts speech velocity. 1.0 is default speed; values less than 1.0 slow speech; values greater than 1.0 accelerate it. Only applicable when using ElevenLabs.\n - `style?: number`\n Determines the style exaggeration of the voice. Amplifies speaker style but consumes additional resources when set above 0. Only applicable when using ElevenLabs.\n - `temperature?: number`\n Determines how stable the voice is and the randomness between each generation. Lower values create a broader emotional range; higher values produce more consistent, monotonous output. Only applicable when using ElevenLabs.\n - `use_speaker_boost?: boolean`\n Amplifies similarity to the original speaker voice. Increases computational load and latency slightly. Only applicable when using ElevenLabs.\n - `voice_speed?: number`\n The speed of the voice in the range [0.25, 2.0]. 1.0 is deafult speed. Larger numbers make the voice faster, smaller numbers make it slower. This is only applicable for Telnyx Natural voices.\n\n- `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n Configuration settings for the assistant's web widget.\n - `agent_thinking_text?: string`\n Text displayed while the agent is processing.\n - `audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }`\n - `default_state?: 'expanded' | 'collapsed'`\n The default state of the widget.\n - `give_feedback_url?: string`\n URL for users to give feedback.\n - `logo_icon_url?: string`\n URL to a custom logo icon for the widget.\n - `position?: 'fixed' | 'static'`\n The positioning style for the widget.\n - `report_issue_url?: string`\n URL for users to report issues.\n - `speak_to_interrupt_text?: string`\n Text prompting users to speak to interrupt.\n - `start_call_text?: string`\n Custom text displayed on the start call button.\n - `theme?: 'light' | 'dark'`\n The visual theme for the widget.\n - `view_history_url?: string`\n URL to view conversation history.\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.update('assistant_id');\n\nconsole.log(inferenceEmbedding);\n```",
3368
+ markdown: "## update\n\n`client.ai.assistants.update(assistant_id: string, description?: string, dynamic_variables?: object, dynamic_variables_webhook_timeout_ms?: number, dynamic_variables_webhook_url?: string, enabled_features?: 'telephony' | 'messaging'[], external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }, fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }, greeting?: string, insight_settings?: { insight_group_id?: string; }, instructions?: string, integrations?: { integration_id: string; allowed_list?: string[]; }[], interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }, llm_api_key_ref?: string, mcp_servers?: { id: string; allowed_tools?: string[]; }[], messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }, model?: string, name?: string, observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }, post_conversation_settings?: { enabled?: boolean; }, privacy_settings?: { data_retention?: boolean; }, promote_to_main?: boolean, tags?: string[], telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }, tool_ids?: string[], tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[], transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }, version_name?: string, voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }, widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants/{assistant_id}`\n\nUpdate an AI Assistant's attributes.\n\n### Parameters\n\n- `assistant_id: string`\n\n- `description?: string`\n\n- `dynamic_variables?: object`\n Map of dynamic variables and their default values\n\n- `dynamic_variables_webhook_timeout_ms?: number`\n Timeout in milliseconds for the dynamic variables webhook. Must be between 1 and 10000 ms. If the webhook does not respond within this timeout, the call proceeds with default values. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables).\n\n- `dynamic_variables_webhook_url?: string`\n If `dynamic_variables_webhook_url` is set, Telnyx sends a POST request to this URL at the start of the conversation to resolve dynamic variables. **Gotcha:** the webhook response must wrap variables under a top-level `dynamic_variables` object, e.g. `{\"dynamic_variables\": {\"customer_name\": \"Jane\"}}`. Returning a flat object will be ignored and variables will fall back to their defaults. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) for the full request/response format and timeout behavior.\n\n- `enabled_features?: 'telephony' | 'messaging'[]`\n\n- `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `base_url: string`\n Base URL for the external LLM endpoint.\n - `model: string`\n Model identifier to use with the external LLM endpoint.\n - `authentication_method?: 'token' | 'certificate'`\n Authentication method used when connecting to the external LLM endpoint.\n - `certificate_ref?: string`\n Integration secret identifier for the client certificate used with certificate authentication.\n - `forward_metadata?: boolean`\n When `true`, Telnyx forwards the assistant's dynamic variables to the external LLM endpoint as a top-level `extra_metadata` object on the chat completion request body. Defaults to `false`. Example payload sent to the external endpoint: `{\"extra_metadata\": {\"customer_name\": \"Jane\", \"account_id\": \"acct_789\", \"telnyx_agent_target\": \"+13125550100\", \"telnyx_end_user_target\": \"+13125550123\"}}`. Distinct from OpenAI's native `metadata` field, which has its own size and type limits.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the external LLM API key.\n - `token_retrieval_url?: string`\n URL used to retrieve an access token when certificate authentication is enabled.\n\n- `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `llm_api_key_ref?: string`\n Integration secret identifier for the fallback model API key.\n - `model?: string`\n Fallback Telnyx-hosted model to use when the primary LLM provider is unavailable.\n\n- `greeting?: string`\n Text that the assistant will use to start the conversation. This may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). Use an empty string to have the assistant wait for the user to speak first. Use the special value `<assistant-speaks-first-with-model-generated-message>` to have the assistant generate the greeting based on the system instructions.\n\n- `insight_settings?: { insight_group_id?: string; }`\n - `insight_group_id?: string`\n Reference to an Insight Group. Insights in this group will be run automatically for all the assistant's conversations.\n\n- `instructions?: string`\n System instructions for the assistant. These may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables)\n\n- `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n Connected integrations attached to the assistant. The catalog of available integrations is at `/ai/integrations`; the user's connected integrations are at `/ai/integrations/connections`. Each item references a catalog integration by `integration_id`.\n\n- `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n Settings for interruptions and how the assistant decides the user has finished speaking. These timings are most relevant when using non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn behavior is controlled by the transcription end-of-turn settings under `transcription.settings` (`eot_threshold`, `eot_timeout_ms`, `eager_eot_threshold`).\n - `enable?: boolean`\n Whether users can interrupt the assistant while it is speaking.\n - `start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }`\n Controls when the assistant starts speaking after the user stops. These thresholds primarily apply to non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn detection is driven by the transcription end-of-turn settings under `transcription.settings` instead.\n\n- `llm_api_key_ref?: string`\n This is only needed when using third-party inference providers selected by `model`. The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your LLM provider's API key. For bring-your-own endpoint authentication, use `external_llm.llm_api_key_ref` instead. Warning: Free plans are unlikely to work with this integration.\n\n- `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n MCP servers attached to the assistant. Create MCP servers with `/ai/mcp_servers`, then reference them by `id` here.\n\n- `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `conversation_inactivity_minutes?: number`\n If more than this many minutes have passed since the last message, the assistant will start a new conversation instead of continuing the existing one.\n - `default_messaging_profile_id?: string`\n Default Messaging Profile used for messaging exchanges with your assistant. This will be created automatically on assistant creation.\n - `delivery_status_webhook_url?: string`\n The URL where webhooks related to delivery statused for assistant messages will be sent.\n\n- `model?: string`\n ID of the model to use when `external_llm` is not set. You can use the [Get models API](https://developers.telnyx.com/api-reference/chat/get-available-models) to see available models. If `external_llm` is provided, the assistant uses `external_llm` instead of this field. If neither `model` nor `external_llm` is provided, Telnyx applies the default model.\n\n- `name?: string`\n\n- `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `host?: string`\n - `public_key_ref?: string`\n - `secret_key_ref?: string`\n - `status?: 'enabled' | 'disabled'`\n\n- `post_conversation_settings?: { enabled?: boolean; }`\n Configuration for post-conversation processing. When enabled, the assistant receives one additional LLM turn after the conversation ends, allowing it to execute tool calls such as logging to a CRM or sending a summary. The assistant can execute multiple parallel or sequential tools during this phase. Telephony-control tools (e.g. hangup, transfer) are unavailable post-conversation. Beta feature.\n - `enabled?: boolean`\n Whether post-conversation processing is enabled. When true, the assistant will be invoked after the conversation ends to perform any final tool calls. Defaults to false.\n\n- `privacy_settings?: { data_retention?: boolean; }`\n - `data_retention?: boolean`\n If true, conversation history and insights will be stored. If false, they will not be stored. This in‑tool toggle governs solely the retention of conversation history and insights via the AI assistant. It has no effect on any separate recording, transcription, or storage configuration that you have set at the account, number, or application level. All such external settings remain in force regardless of your selection here.\n\n- `promote_to_main?: boolean`\n Indicates whether the assistant should be promoted to the main version. Defaults to true.\n\n- `tags?: string[]`\n Tags associated with the assistant. Tags can also be managed with the assistant tag endpoints.\n\n- `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `default_texml_app_id?: string`\n Default Texml App used for voice calls with your assistant. This will be created automatically on assistant creation.\n - `noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'`\n The noise suppression engine to use. Use 'disabled' to turn off noise suppression.\n - `noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }`\n Configuration for noise suppression. Only applicable when noise_suppression is 'deepfilternet'.\n - `recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }`\n Configuration for call recording format and channel settings.\n - `supports_unauthenticated_web_calls?: boolean`\n When enabled, allows users to interact with your AI assistant directly from your website without requiring authentication. This is required for FE widgets that work with assistants that have telephony enabled.\n - `time_limit_secs?: number`\n Maximum duration in seconds for the AI assistant to participate on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `user_idle_reply_secs?: number`\n Duration in seconds of end user silence before the assistant checks in on the user. When this limit is reached the assistant will prompt the user to respond. This is distinct from user_idle_timeout_secs which stops the assistant entirely.\n - `user_idle_timeout_secs?: number`\n Maximum duration in seconds of end user silence on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: { message?: string; prompt?: string; type?: 'prompt' | 'message'; }; }; }`\n Configuration for voicemail detection (AMD - Answering Machine Detection) on outgoing calls. These settings only apply if AMD is enabled on the Dial command. See [TeXML Dial documentation](https://developers.telnyx.com/api-reference/texml-rest-commands/initiate-an-outbound-call) for enabling AMD. Recommended settings: MachineDetection=Enable, AsyncAmd=true, DetectionMode=Premium.\n\n- `tool_ids?: string[]`\n IDs of shared tools to attach to the assistant. New integrations should prefer `tool_ids` over inline `tools`.\n\n- `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n Deprecated for new integrations. Inline tool definitions available to the assistant. Prefer `tool_ids` to attach shared tools created with the AI Tools endpoints.\n\n- `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `api_key_ref?: string`\n Integration secret identifier for the transcription provider API key. Currently used for Azure transcription regions that require a customer-provided API key.\n - `language?: string`\n The language of the audio to be transcribed. If not set, or if set to `auto`, supported models will automatically detect the language. For `deepgram/flux`, supported values are: `auto` (Telnyx language detection controls the language hint), `multi` (no language hint), and language-specific hints `en`, `es`, `fr`, `de`, `hi`, `ru`, `pt`, `ja`, `it`, and `nl`.\n - `model?: string`\n The speech to text model to be used by the voice assistant. All Deepgram models are run on-premise.\n\n- `deepgram/flux` is optimized for turn-taking with multilingual language hints.\n- `deepgram/nova-3` is multilingual with automatic language detection.\n- `deepgram/nova-2` is Deepgram's previous-generation multilingual model.\n- `azure/fast` is a multilingual Azure transcription model.\n- `assemblyai/universal-streaming` is a multilingual streaming model with configurable turn detection.\n- `xai/grok-stt` is a multilingual Grok STT model.\n - `region?: string`\n Region on third party cloud providers (currently Azure) if using one of their models. Some regions require `api_key_ref`.\n - `settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }`\n\n- `version_name?: string`\n Human-readable name for the assistant version.\n\n- `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `voice: string`\n The voice to be used by the voice assistant. Check the full list of [available voices](https://developers.telnyx.com/docs/tts-stt/tts-available-voices) via our voices API.\nTo use ElevenLabs, you must reference your ElevenLabs API key as an integration secret under the `api_key_ref` field. See [integration secrets documentation](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) for details. For Telnyx voices, use `Telnyx.<model_id>.<voice_id>` (e.g. Telnyx.KokoroTTS.af_heart).\nThe voice portion of the identifier supports [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) using mustache syntax (e.g. `Telnyx.Ultra.{{voice_id}}`). The variable is resolved at call time from your dynamic variables webhook, allowing you to select the voice dynamically per call.\n - `api_key_ref?: string`\n The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your ElevenLabs API key. Warning: Free plans are unlikely to work with this integration.\n - `background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }`\n Optional background audio to play on the call. Use a predefined media bed, or supply a looped MP3 URL. If a media URL is chosen in the portal, customers can preview it before saving.\n - `expressive_mode?: boolean`\n Enables emotionally expressive speech using SSML emotion tags. When enabled, the assistant uses audio tags like angry, excited, content, and sad to add emotional nuance. Only supported for Telnyx Ultra voices.\n - `language_boost?: string`\n Enhances recognition for specific languages and dialects during MiniMax TTS synthesis. Default is null (no boost). Set to 'auto' for automatic language detection. Only applicable when using MiniMax voices.\n - `similarity_boost?: number`\n Determines how closely the AI should adhere to the original voice when attempting to replicate it. Only applicable when using ElevenLabs.\n - `speed?: number`\n Adjusts speech velocity. 1.0 is default speed; values less than 1.0 slow speech; values greater than 1.0 accelerate it. Only applicable when using ElevenLabs.\n - `style?: number`\n Determines the style exaggeration of the voice. Amplifies speaker style but consumes additional resources when set above 0. Only applicable when using ElevenLabs.\n - `temperature?: number`\n Determines how stable the voice is and the randomness between each generation. Lower values create a broader emotional range; higher values produce more consistent, monotonous output. Only applicable when using ElevenLabs.\n - `use_speaker_boost?: boolean`\n Amplifies similarity to the original speaker voice. Increases computational load and latency slightly. Only applicable when using ElevenLabs.\n - `voice_speed?: number`\n The speed of the voice in the range [0.25, 2.0]. 1.0 is deafult speed. Larger numbers make the voice faster, smaller numbers make it slower. This is only applicable for Telnyx Natural voices.\n\n- `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n Configuration settings for the assistant's web widget.\n - `agent_thinking_text?: string`\n Text displayed while the agent is processing.\n - `audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }`\n - `default_state?: 'expanded' | 'collapsed'`\n The default state of the widget.\n - `give_feedback_url?: string`\n URL for users to give feedback.\n - `logo_icon_url?: string`\n URL to a custom logo icon for the widget.\n - `position?: 'fixed' | 'static'`\n The positioning style for the widget.\n - `report_issue_url?: string`\n URL for users to report issues.\n - `speak_to_interrupt_text?: string`\n Text prompting users to speak to interrupt.\n - `start_call_text?: string`\n Custom text displayed on the start call button.\n - `theme?: 'light' | 'dark'`\n The visual theme for the widget.\n - `view_history_url?: string`\n URL to view conversation history.\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.update('assistant_id');\n\nconsole.log(inferenceEmbedding);\n```",
3369
3369
  perLanguage: {
3370
3370
  typescript: {
3371
3371
  method: 'client.ai.assistants.update',
@@ -3455,7 +3455,7 @@ const EMBEDDED_METHODS = [
3455
3455
  qualified: 'client.ai.assistants.clone',
3456
3456
  params: ['assistant_id: string;'],
3457
3457
  response: '{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }',
3458
- markdown: "## clone\n\n`client.ai.assistants.clone(assistant_id: string): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants/{assistant_id}/clone`\n\nClone an existing assistant, excluding telephony and messaging settings.\n\n### Parameters\n\n- `assistant_id: string`\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.clone('assistant_id');\n\nconsole.log(inferenceEmbedding);\n```",
3458
+ markdown: "## clone\n\n`client.ai.assistants.clone(assistant_id: string): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants/{assistant_id}/clone`\n\nClone an existing assistant, excluding telephony and messaging settings.\n\n### Parameters\n\n- `assistant_id: string`\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.clone('assistant_id');\n\nconsole.log(inferenceEmbedding);\n```",
3459
3459
  perLanguage: {
3460
3460
  typescript: {
3461
3461
  method: 'client.ai.assistants.clone',
@@ -4724,7 +4724,7 @@ const EMBEDDED_METHODS = [
4724
4724
  qualified: 'client.ai.assistants.versions.retrieve',
4725
4725
  params: ['assistant_id: string;', 'version_id: string;', 'include_mcp_servers?: boolean;'],
4726
4726
  response: '{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }',
4727
- markdown: "## retrieve\n\n`client.ai.assistants.versions.retrieve(assistant_id: string, version_id: string, include_mcp_servers?: boolean): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**get** `/ai/assistants/{assistant_id}/versions/{version_id}`\n\nRetrieves a specific version of an assistant by assistant_id and version_id\n\n### Parameters\n\n- `assistant_id: string`\n\n- `version_id: string`\n\n- `include_mcp_servers?: boolean`\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.versions.retrieve('version_id', { assistant_id: 'assistant_id' });\n\nconsole.log(inferenceEmbedding);\n```",
4727
+ markdown: "## retrieve\n\n`client.ai.assistants.versions.retrieve(assistant_id: string, version_id: string, include_mcp_servers?: boolean): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**get** `/ai/assistants/{assistant_id}/versions/{version_id}`\n\nRetrieves a specific version of an assistant by assistant_id and version_id\n\n### Parameters\n\n- `assistant_id: string`\n\n- `version_id: string`\n\n- `include_mcp_servers?: boolean`\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.versions.retrieve('version_id', { assistant_id: 'assistant_id' });\n\nconsole.log(inferenceEmbedding);\n```",
4728
4728
  perLanguage: {
4729
4729
  typescript: {
4730
4730
  method: 'client.ai.assistants.versions.retrieve',
@@ -4793,14 +4793,14 @@ const EMBEDDED_METHODS = [
4793
4793
  'tags?: string[];',
4794
4794
  "telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; };",
4795
4795
  'tool_ids?: string[];',
4796
- "tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[];",
4796
+ "tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[];",
4797
4797
  'transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; };',
4798
4798
  'version_name?: string;',
4799
4799
  "voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; };",
4800
4800
  "widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; };",
4801
4801
  ],
4802
4802
  response: '{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }',
4803
- markdown: "## update\n\n`client.ai.assistants.versions.update(assistant_id: string, version_id: string, description?: string, dynamic_variables?: object, dynamic_variables_webhook_timeout_ms?: number, dynamic_variables_webhook_url?: string, enabled_features?: 'telephony' | 'messaging'[], external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }, fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }, greeting?: string, insight_settings?: { insight_group_id?: string; }, instructions?: string, integrations?: { integration_id: string; allowed_list?: string[]; }[], interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }, llm_api_key_ref?: string, mcp_servers?: { id: string; allowed_tools?: string[]; }[], messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }, model?: string, name?: string, observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }, post_conversation_settings?: { enabled?: boolean; }, privacy_settings?: { data_retention?: boolean; }, tags?: string[], telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }, tool_ids?: string[], tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[], transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }, version_name?: string, voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }, widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants/{assistant_id}/versions/{version_id}`\n\nUpdates the configuration of a specific assistant version. Can not update main version\n\n### Parameters\n\n- `assistant_id: string`\n\n- `version_id: string`\n\n- `description?: string`\n\n- `dynamic_variables?: object`\n Map of dynamic variables and their default values\n\n- `dynamic_variables_webhook_timeout_ms?: number`\n Timeout in milliseconds for the dynamic variables webhook. Must be between 1 and 10000 ms. If the webhook does not respond within this timeout, the call proceeds with default values. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables).\n\n- `dynamic_variables_webhook_url?: string`\n If `dynamic_variables_webhook_url` is set, Telnyx sends a POST request to this URL at the start of the conversation to resolve dynamic variables. **Gotcha:** the webhook response must wrap variables under a top-level `dynamic_variables` object, e.g. `{\"dynamic_variables\": {\"customer_name\": \"Jane\"}}`. Returning a flat object will be ignored and variables will fall back to their defaults. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) for the full request/response format and timeout behavior.\n\n- `enabled_features?: 'telephony' | 'messaging'[]`\n\n- `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `base_url: string`\n Base URL for the external LLM endpoint.\n - `model: string`\n Model identifier to use with the external LLM endpoint.\n - `authentication_method?: 'token' | 'certificate'`\n Authentication method used when connecting to the external LLM endpoint.\n - `certificate_ref?: string`\n Integration secret identifier for the client certificate used with certificate authentication.\n - `forward_metadata?: boolean`\n When `true`, Telnyx forwards the assistant's dynamic variables to the external LLM endpoint as a top-level `extra_metadata` object on the chat completion request body. Defaults to `false`. Example payload sent to the external endpoint: `{\"extra_metadata\": {\"customer_name\": \"Jane\", \"account_id\": \"acct_789\", \"telnyx_agent_target\": \"+13125550100\", \"telnyx_end_user_target\": \"+13125550123\"}}`. Distinct from OpenAI's native `metadata` field, which has its own size and type limits.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the external LLM API key.\n - `token_retrieval_url?: string`\n URL used to retrieve an access token when certificate authentication is enabled.\n\n- `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `llm_api_key_ref?: string`\n Integration secret identifier for the fallback model API key.\n - `model?: string`\n Fallback Telnyx-hosted model to use when the primary LLM provider is unavailable.\n\n- `greeting?: string`\n Text that the assistant will use to start the conversation. This may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). Use an empty string to have the assistant wait for the user to speak first. Use the special value `<assistant-speaks-first-with-model-generated-message>` to have the assistant generate the greeting based on the system instructions.\n\n- `insight_settings?: { insight_group_id?: string; }`\n - `insight_group_id?: string`\n Reference to an Insight Group. Insights in this group will be run automatically for all the assistant's conversations.\n\n- `instructions?: string`\n System instructions for the assistant. These may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables)\n\n- `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n Connected integrations attached to the assistant. The catalog of available integrations is at `/ai/integrations`; the user's connected integrations are at `/ai/integrations/connections`. Each item references a catalog integration by `integration_id`.\n\n- `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n Settings for interruptions and how the assistant decides the user has finished speaking. These timings are most relevant when using non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn behavior is controlled by the transcription end-of-turn settings under `transcription.settings` (`eot_threshold`, `eot_timeout_ms`, `eager_eot_threshold`).\n - `enable?: boolean`\n Whether users can interrupt the assistant while it is speaking.\n - `start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }`\n Controls when the assistant starts speaking after the user stops. These thresholds primarily apply to non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn detection is driven by the transcription end-of-turn settings under `transcription.settings` instead.\n\n- `llm_api_key_ref?: string`\n This is only needed when using third-party inference providers selected by `model`. The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your LLM provider's API key. For bring-your-own endpoint authentication, use `external_llm.llm_api_key_ref` instead. Warning: Free plans are unlikely to work with this integration.\n\n- `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n MCP servers attached to the assistant. Create MCP servers with `/ai/mcp_servers`, then reference them by `id` here.\n\n- `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `conversation_inactivity_minutes?: number`\n If more than this many minutes have passed since the last message, the assistant will start a new conversation instead of continuing the existing one.\n - `default_messaging_profile_id?: string`\n Default Messaging Profile used for messaging exchanges with your assistant. This will be created automatically on assistant creation.\n - `delivery_status_webhook_url?: string`\n The URL where webhooks related to delivery statused for assistant messages will be sent.\n\n- `model?: string`\n ID of the model to use when `external_llm` is not set. You can use the [Get models API](https://developers.telnyx.com/api-reference/chat/get-available-models) to see available models. If `external_llm` is provided, the assistant uses `external_llm` instead of this field. If neither `model` nor `external_llm` is provided, Telnyx applies the default model.\n\n- `name?: string`\n\n- `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `host?: string`\n - `public_key_ref?: string`\n - `secret_key_ref?: string`\n - `status?: 'enabled' | 'disabled'`\n\n- `post_conversation_settings?: { enabled?: boolean; }`\n Configuration for post-conversation processing. When enabled, the assistant receives one additional LLM turn after the conversation ends, allowing it to execute tool calls such as logging to a CRM or sending a summary. The assistant can execute multiple parallel or sequential tools during this phase. Telephony-control tools (e.g. hangup, transfer) are unavailable post-conversation. Beta feature.\n - `enabled?: boolean`\n Whether post-conversation processing is enabled. When true, the assistant will be invoked after the conversation ends to perform any final tool calls. Defaults to false.\n\n- `privacy_settings?: { data_retention?: boolean; }`\n - `data_retention?: boolean`\n If true, conversation history and insights will be stored. If false, they will not be stored. This in‑tool toggle governs solely the retention of conversation history and insights via the AI assistant. It has no effect on any separate recording, transcription, or storage configuration that you have set at the account, number, or application level. All such external settings remain in force regardless of your selection here.\n\n- `tags?: string[]`\n Tags associated with the assistant. Tags can also be managed with the assistant tag endpoints.\n\n- `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `default_texml_app_id?: string`\n Default Texml App used for voice calls with your assistant. This will be created automatically on assistant creation.\n - `noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'`\n The noise suppression engine to use. Use 'disabled' to turn off noise suppression.\n - `noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }`\n Configuration for noise suppression. Only applicable when noise_suppression is 'deepfilternet'.\n - `recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }`\n Configuration for call recording format and channel settings.\n - `supports_unauthenticated_web_calls?: boolean`\n When enabled, allows users to interact with your AI assistant directly from your website without requiring authentication. This is required for FE widgets that work with assistants that have telephony enabled.\n - `time_limit_secs?: number`\n Maximum duration in seconds for the AI assistant to participate on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `user_idle_reply_secs?: number`\n Duration in seconds of end user silence before the assistant checks in on the user. When this limit is reached the assistant will prompt the user to respond. This is distinct from user_idle_timeout_secs which stops the assistant entirely.\n - `user_idle_timeout_secs?: number`\n Maximum duration in seconds of end user silence on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: { message?: string; prompt?: string; type?: 'prompt' | 'message'; }; }; }`\n Configuration for voicemail detection (AMD - Answering Machine Detection) on outgoing calls. These settings only apply if AMD is enabled on the Dial command. See [TeXML Dial documentation](https://developers.telnyx.com/api-reference/texml-rest-commands/initiate-an-outbound-call) for enabling AMD. Recommended settings: MachineDetection=Enable, AsyncAmd=true, DetectionMode=Premium.\n\n- `tool_ids?: string[]`\n IDs of shared tools to attach to the assistant. New integrations should prefer `tool_ids` over inline `tools`.\n\n- `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n Deprecated for new integrations. Inline tool definitions available to the assistant. Prefer `tool_ids` to attach shared tools created with the AI Tools endpoints.\n\n- `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `api_key_ref?: string`\n Integration secret identifier for the transcription provider API key. Currently used for Azure transcription regions that require a customer-provided API key.\n - `language?: string`\n The language of the audio to be transcribed. If not set, or if set to `auto`, supported models will automatically detect the language. For `deepgram/flux`, supported values are: `auto` (Telnyx language detection controls the language hint), `multi` (no language hint), and language-specific hints `en`, `es`, `fr`, `de`, `hi`, `ru`, `pt`, `ja`, `it`, and `nl`.\n - `model?: string`\n The speech to text model to be used by the voice assistant. All Deepgram models are run on-premise.\n\n- `deepgram/flux` is optimized for turn-taking with multilingual language hints.\n- `deepgram/nova-3` is multilingual with automatic language detection.\n- `deepgram/nova-2` is Deepgram's previous-generation multilingual model.\n- `azure/fast` is a multilingual Azure transcription model.\n- `assemblyai/universal-streaming` is a multilingual streaming model with configurable turn detection.\n- `xai/grok-stt` is a multilingual Grok STT model.\n - `region?: string`\n Region on third party cloud providers (currently Azure) if using one of their models. Some regions require `api_key_ref`.\n - `settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }`\n\n- `version_name?: string`\n Human-readable name for the assistant version.\n\n- `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `voice: string`\n The voice to be used by the voice assistant. Check the full list of [available voices](https://developers.telnyx.com/docs/tts-stt/tts-available-voices) via our voices API.\nTo use ElevenLabs, you must reference your ElevenLabs API key as an integration secret under the `api_key_ref` field. See [integration secrets documentation](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) for details. For Telnyx voices, use `Telnyx.<model_id>.<voice_id>` (e.g. Telnyx.KokoroTTS.af_heart).\nThe voice portion of the identifier supports [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) using mustache syntax (e.g. `Telnyx.Ultra.{{voice_id}}`). The variable is resolved at call time from your dynamic variables webhook, allowing you to select the voice dynamically per call.\n - `api_key_ref?: string`\n The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your ElevenLabs API key. Warning: Free plans are unlikely to work with this integration.\n - `background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }`\n Optional background audio to play on the call. Use a predefined media bed, or supply a looped MP3 URL. If a media URL is chosen in the portal, customers can preview it before saving.\n - `expressive_mode?: boolean`\n Enables emotionally expressive speech using SSML emotion tags. When enabled, the assistant uses audio tags like angry, excited, content, and sad to add emotional nuance. Only supported for Telnyx Ultra voices.\n - `language_boost?: string`\n Enhances recognition for specific languages and dialects during MiniMax TTS synthesis. Default is null (no boost). Set to 'auto' for automatic language detection. Only applicable when using MiniMax voices.\n - `similarity_boost?: number`\n Determines how closely the AI should adhere to the original voice when attempting to replicate it. Only applicable when using ElevenLabs.\n - `speed?: number`\n Adjusts speech velocity. 1.0 is default speed; values less than 1.0 slow speech; values greater than 1.0 accelerate it. Only applicable when using ElevenLabs.\n - `style?: number`\n Determines the style exaggeration of the voice. Amplifies speaker style but consumes additional resources when set above 0. Only applicable when using ElevenLabs.\n - `temperature?: number`\n Determines how stable the voice is and the randomness between each generation. Lower values create a broader emotional range; higher values produce more consistent, monotonous output. Only applicable when using ElevenLabs.\n - `use_speaker_boost?: boolean`\n Amplifies similarity to the original speaker voice. Increases computational load and latency slightly. Only applicable when using ElevenLabs.\n - `voice_speed?: number`\n The speed of the voice in the range [0.25, 2.0]. 1.0 is deafult speed. Larger numbers make the voice faster, smaller numbers make it slower. This is only applicable for Telnyx Natural voices.\n\n- `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n Configuration settings for the assistant's web widget.\n - `agent_thinking_text?: string`\n Text displayed while the agent is processing.\n - `audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }`\n - `default_state?: 'expanded' | 'collapsed'`\n The default state of the widget.\n - `give_feedback_url?: string`\n URL for users to give feedback.\n - `logo_icon_url?: string`\n URL to a custom logo icon for the widget.\n - `position?: 'fixed' | 'static'`\n The positioning style for the widget.\n - `report_issue_url?: string`\n URL for users to report issues.\n - `speak_to_interrupt_text?: string`\n Text prompting users to speak to interrupt.\n - `start_call_text?: string`\n Custom text displayed on the start call button.\n - `theme?: 'light' | 'dark'`\n The visual theme for the widget.\n - `view_history_url?: string`\n URL to view conversation history.\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.versions.update('version_id', { assistant_id: 'assistant_id' });\n\nconsole.log(inferenceEmbedding);\n```",
4803
+ markdown: "## update\n\n`client.ai.assistants.versions.update(assistant_id: string, version_id: string, description?: string, dynamic_variables?: object, dynamic_variables_webhook_timeout_ms?: number, dynamic_variables_webhook_url?: string, enabled_features?: 'telephony' | 'messaging'[], external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }, fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }, greeting?: string, insight_settings?: { insight_group_id?: string; }, instructions?: string, integrations?: { integration_id: string; allowed_list?: string[]; }[], interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }, llm_api_key_ref?: string, mcp_servers?: { id: string; allowed_tools?: string[]; }[], messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }, model?: string, name?: string, observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }, post_conversation_settings?: { enabled?: boolean; }, privacy_settings?: { data_retention?: boolean; }, tags?: string[], telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }, tool_ids?: string[], tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[], transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }, version_name?: string, voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }, widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants/{assistant_id}/versions/{version_id}`\n\nUpdates the configuration of a specific assistant version. Can not update main version\n\n### Parameters\n\n- `assistant_id: string`\n\n- `version_id: string`\n\n- `description?: string`\n\n- `dynamic_variables?: object`\n Map of dynamic variables and their default values\n\n- `dynamic_variables_webhook_timeout_ms?: number`\n Timeout in milliseconds for the dynamic variables webhook. Must be between 1 and 10000 ms. If the webhook does not respond within this timeout, the call proceeds with default values. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables).\n\n- `dynamic_variables_webhook_url?: string`\n If `dynamic_variables_webhook_url` is set, Telnyx sends a POST request to this URL at the start of the conversation to resolve dynamic variables. **Gotcha:** the webhook response must wrap variables under a top-level `dynamic_variables` object, e.g. `{\"dynamic_variables\": {\"customer_name\": \"Jane\"}}`. Returning a flat object will be ignored and variables will fall back to their defaults. See the [dynamic variables guide](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) for the full request/response format and timeout behavior.\n\n- `enabled_features?: 'telephony' | 'messaging'[]`\n\n- `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `base_url: string`\n Base URL for the external LLM endpoint.\n - `model: string`\n Model identifier to use with the external LLM endpoint.\n - `authentication_method?: 'token' | 'certificate'`\n Authentication method used when connecting to the external LLM endpoint.\n - `certificate_ref?: string`\n Integration secret identifier for the client certificate used with certificate authentication.\n - `forward_metadata?: boolean`\n When `true`, Telnyx forwards the assistant's dynamic variables to the external LLM endpoint as a top-level `extra_metadata` object on the chat completion request body. Defaults to `false`. Example payload sent to the external endpoint: `{\"extra_metadata\": {\"customer_name\": \"Jane\", \"account_id\": \"acct_789\", \"telnyx_agent_target\": \"+13125550100\", \"telnyx_end_user_target\": \"+13125550123\"}}`. Distinct from OpenAI's native `metadata` field, which has its own size and type limits.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the external LLM API key.\n - `token_retrieval_url?: string`\n URL used to retrieve an access token when certificate authentication is enabled.\n\n- `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `llm_api_key_ref?: string`\n Integration secret identifier for the fallback model API key.\n - `model?: string`\n Fallback Telnyx-hosted model to use when the primary LLM provider is unavailable.\n\n- `greeting?: string`\n Text that the assistant will use to start the conversation. This may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). Use an empty string to have the assistant wait for the user to speak first. Use the special value `<assistant-speaks-first-with-model-generated-message>` to have the assistant generate the greeting based on the system instructions.\n\n- `insight_settings?: { insight_group_id?: string; }`\n - `insight_group_id?: string`\n Reference to an Insight Group. Insights in this group will be run automatically for all the assistant's conversations.\n\n- `instructions?: string`\n System instructions for the assistant. These may be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables)\n\n- `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n Connected integrations attached to the assistant. The catalog of available integrations is at `/ai/integrations`; the user's connected integrations are at `/ai/integrations/connections`. Each item references a catalog integration by `integration_id`.\n\n- `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n Settings for interruptions and how the assistant decides the user has finished speaking. These timings are most relevant when using non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn behavior is controlled by the transcription end-of-turn settings under `transcription.settings` (`eot_threshold`, `eot_timeout_ms`, `eager_eot_threshold`).\n - `enable?: boolean`\n Whether users can interrupt the assistant while it is speaking.\n - `start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }`\n Controls when the assistant starts speaking after the user stops. These thresholds primarily apply to non turn-taking transcription models. For turn-taking models like `deepgram/flux`, end-of-turn detection is driven by the transcription end-of-turn settings under `transcription.settings` instead.\n\n- `llm_api_key_ref?: string`\n This is only needed when using third-party inference providers selected by `model`. The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your LLM provider's API key. For bring-your-own endpoint authentication, use `external_llm.llm_api_key_ref` instead. Warning: Free plans are unlikely to work with this integration.\n\n- `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n MCP servers attached to the assistant. Create MCP servers with `/ai/mcp_servers`, then reference them by `id` here.\n\n- `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `conversation_inactivity_minutes?: number`\n If more than this many minutes have passed since the last message, the assistant will start a new conversation instead of continuing the existing one.\n - `default_messaging_profile_id?: string`\n Default Messaging Profile used for messaging exchanges with your assistant. This will be created automatically on assistant creation.\n - `delivery_status_webhook_url?: string`\n The URL where webhooks related to delivery statused for assistant messages will be sent.\n\n- `model?: string`\n ID of the model to use when `external_llm` is not set. You can use the [Get models API](https://developers.telnyx.com/api-reference/chat/get-available-models) to see available models. If `external_llm` is provided, the assistant uses `external_llm` instead of this field. If neither `model` nor `external_llm` is provided, Telnyx applies the default model.\n\n- `name?: string`\n\n- `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `host?: string`\n - `public_key_ref?: string`\n - `secret_key_ref?: string`\n - `status?: 'enabled' | 'disabled'`\n\n- `post_conversation_settings?: { enabled?: boolean; }`\n Configuration for post-conversation processing. When enabled, the assistant receives one additional LLM turn after the conversation ends, allowing it to execute tool calls such as logging to a CRM or sending a summary. The assistant can execute multiple parallel or sequential tools during this phase. Telephony-control tools (e.g. hangup, transfer) are unavailable post-conversation. Beta feature.\n - `enabled?: boolean`\n Whether post-conversation processing is enabled. When true, the assistant will be invoked after the conversation ends to perform any final tool calls. Defaults to false.\n\n- `privacy_settings?: { data_retention?: boolean; }`\n - `data_retention?: boolean`\n If true, conversation history and insights will be stored. If false, they will not be stored. This in‑tool toggle governs solely the retention of conversation history and insights via the AI assistant. It has no effect on any separate recording, transcription, or storage configuration that you have set at the account, number, or application level. All such external settings remain in force regardless of your selection here.\n\n- `tags?: string[]`\n Tags associated with the assistant. Tags can also be managed with the assistant tag endpoints.\n\n- `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `default_texml_app_id?: string`\n Default Texml App used for voice calls with your assistant. This will be created automatically on assistant creation.\n - `noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'`\n The noise suppression engine to use. Use 'disabled' to turn off noise suppression.\n - `noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }`\n Configuration for noise suppression. Only applicable when noise_suppression is 'deepfilternet'.\n - `recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }`\n Configuration for call recording format and channel settings.\n - `supports_unauthenticated_web_calls?: boolean`\n When enabled, allows users to interact with your AI assistant directly from your website without requiring authentication. This is required for FE widgets that work with assistants that have telephony enabled.\n - `time_limit_secs?: number`\n Maximum duration in seconds for the AI assistant to participate on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `user_idle_reply_secs?: number`\n Duration in seconds of end user silence before the assistant checks in on the user. When this limit is reached the assistant will prompt the user to respond. This is distinct from user_idle_timeout_secs which stops the assistant entirely.\n - `user_idle_timeout_secs?: number`\n Maximum duration in seconds of end user silence on the call. When this limit is reached the assistant will be stopped. This limit does not apply to portions of a call without an active assistant (for instance, a call transferred to a human representative).\n - `voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: { message?: string; prompt?: string; type?: 'prompt' | 'message'; }; }; }`\n Configuration for voicemail detection (AMD - Answering Machine Detection) on outgoing calls. These settings only apply if AMD is enabled on the Dial command. See [TeXML Dial documentation](https://developers.telnyx.com/api-reference/texml-rest-commands/initiate-an-outbound-call) for enabling AMD. Recommended settings: MachineDetection=Enable, AsyncAmd=true, DetectionMode=Premium.\n\n- `tool_ids?: string[]`\n IDs of shared tools to attach to the assistant. New integrations should prefer `tool_ids` over inline `tools`.\n\n- `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n Deprecated for new integrations. Inline tool definitions available to the assistant. Prefer `tool_ids` to attach shared tools created with the AI Tools endpoints.\n\n- `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `api_key_ref?: string`\n Integration secret identifier for the transcription provider API key. Currently used for Azure transcription regions that require a customer-provided API key.\n - `language?: string`\n The language of the audio to be transcribed. If not set, or if set to `auto`, supported models will automatically detect the language. For `deepgram/flux`, supported values are: `auto` (Telnyx language detection controls the language hint), `multi` (no language hint), and language-specific hints `en`, `es`, `fr`, `de`, `hi`, `ru`, `pt`, `ja`, `it`, and `nl`.\n - `model?: string`\n The speech to text model to be used by the voice assistant. All Deepgram models are run on-premise.\n\n- `deepgram/flux` is optimized for turn-taking with multilingual language hints.\n- `deepgram/nova-3` is multilingual with automatic language detection.\n- `deepgram/nova-2` is Deepgram's previous-generation multilingual model.\n- `azure/fast` is a multilingual Azure transcription model.\n- `assemblyai/universal-streaming` is a multilingual streaming model with configurable turn detection.\n- `xai/grok-stt` is a multilingual Grok STT model.\n - `region?: string`\n Region on third party cloud providers (currently Azure) if using one of their models. Some regions require `api_key_ref`.\n - `settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }`\n\n- `version_name?: string`\n Human-readable name for the assistant version.\n\n- `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `voice: string`\n The voice to be used by the voice assistant. Check the full list of [available voices](https://developers.telnyx.com/docs/tts-stt/tts-available-voices) via our voices API.\nTo use ElevenLabs, you must reference your ElevenLabs API key as an integration secret under the `api_key_ref` field. See [integration secrets documentation](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) for details. For Telnyx voices, use `Telnyx.<model_id>.<voice_id>` (e.g. Telnyx.KokoroTTS.af_heart).\nThe voice portion of the identifier supports [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables) using mustache syntax (e.g. `Telnyx.Ultra.{{voice_id}}`). The variable is resolved at call time from your dynamic variables webhook, allowing you to select the voice dynamically per call.\n - `api_key_ref?: string`\n The `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api-reference/integration-secrets/create-a-secret) that refers to your ElevenLabs API key. Warning: Free plans are unlikely to work with this integration.\n - `background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }`\n Optional background audio to play on the call. Use a predefined media bed, or supply a looped MP3 URL. If a media URL is chosen in the portal, customers can preview it before saving.\n - `expressive_mode?: boolean`\n Enables emotionally expressive speech using SSML emotion tags. When enabled, the assistant uses audio tags like angry, excited, content, and sad to add emotional nuance. Only supported for Telnyx Ultra voices.\n - `language_boost?: string`\n Enhances recognition for specific languages and dialects during MiniMax TTS synthesis. Default is null (no boost). Set to 'auto' for automatic language detection. Only applicable when using MiniMax voices.\n - `similarity_boost?: number`\n Determines how closely the AI should adhere to the original voice when attempting to replicate it. Only applicable when using ElevenLabs.\n - `speed?: number`\n Adjusts speech velocity. 1.0 is default speed; values less than 1.0 slow speech; values greater than 1.0 accelerate it. Only applicable when using ElevenLabs.\n - `style?: number`\n Determines the style exaggeration of the voice. Amplifies speaker style but consumes additional resources when set above 0. Only applicable when using ElevenLabs.\n - `temperature?: number`\n Determines how stable the voice is and the randomness between each generation. Lower values create a broader emotional range; higher values produce more consistent, monotonous output. Only applicable when using ElevenLabs.\n - `use_speaker_boost?: boolean`\n Amplifies similarity to the original speaker voice. Increases computational load and latency slightly. Only applicable when using ElevenLabs.\n - `voice_speed?: number`\n The speed of the voice in the range [0.25, 2.0]. 1.0 is deafult speed. Larger numbers make the voice faster, smaller numbers make it slower. This is only applicable for Telnyx Natural voices.\n\n- `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n Configuration settings for the assistant's web widget.\n - `agent_thinking_text?: string`\n Text displayed while the agent is processing.\n - `audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }`\n - `default_state?: 'expanded' | 'collapsed'`\n The default state of the widget.\n - `give_feedback_url?: string`\n URL for users to give feedback.\n - `logo_icon_url?: string`\n URL to a custom logo icon for the widget.\n - `position?: 'fixed' | 'static'`\n The positioning style for the widget.\n - `report_issue_url?: string`\n URL for users to report issues.\n - `speak_to_interrupt_text?: string`\n Text prompting users to speak to interrupt.\n - `start_call_text?: string`\n Custom text displayed on the start call button.\n - `theme?: 'light' | 'dark'`\n The visual theme for the widget.\n - `view_history_url?: string`\n URL to view conversation history.\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.versions.update('version_id', { assistant_id: 'assistant_id' });\n\nconsole.log(inferenceEmbedding);\n```",
4804
4804
  perLanguage: {
4805
4805
  typescript: {
4806
4806
  method: 'client.ai.assistants.versions.update',
@@ -4845,7 +4845,7 @@ const EMBEDDED_METHODS = [
4845
4845
  qualified: 'client.ai.assistants.versions.promote',
4846
4846
  params: ['assistant_id: string;', 'version_id: string;'],
4847
4847
  response: '{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }',
4848
- markdown: "## promote\n\n`client.ai.assistants.versions.promote(assistant_id: string, version_id: string): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants/{assistant_id}/versions/{version_id}/promote`\n\nPromotes a specific version to be the main/current version of the assistant. This will delete any existing canary deploy configuration and send all live production traffic to this version.\n\n### Parameters\n\n- `assistant_id: string`\n\n- `version_id: string`\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.versions.promote('version_id', { assistant_id: 'assistant_id' });\n\nconsole.log(inferenceEmbedding);\n```",
4848
+ markdown: "## promote\n\n`client.ai.assistants.versions.promote(assistant_id: string, version_id: string): { id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: enabled_features[]; external_llm?: object; fallback_config?: object; greeting?: string; import_metadata?: import_metadata; insight_settings?: insight_settings; integrations?: object[]; interruption_settings?: object; llm_api_key_ref?: string; mcp_servers?: object[]; messaging_settings?: messaging_settings; observability_settings?: observability; post_conversation_settings?: object; privacy_settings?: privacy_settings; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: telephony_settings; tools?: assistant_tool[]; transcription?: transcription_settings; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: voice_settings; widget_settings?: widget_settings; }`\n\n**post** `/ai/assistants/{assistant_id}/versions/{version_id}/promote`\n\nPromotes a specific version to be the main/current version of the assistant. This will delete any existing canary deploy configuration and send all live production traffic to this version.\n\n### Parameters\n\n- `assistant_id: string`\n\n- `version_id: string`\n\n### Returns\n\n- `{ id: string; created_at: string; instructions: string; model: string; name: string; description?: string; dynamic_variables?: object; dynamic_variables_webhook_timeout_ms?: number; dynamic_variables_webhook_url?: string; enabled_features?: 'telephony' | 'messaging'[]; external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }; insight_settings?: { insight_group_id?: string; }; integrations?: { integration_id: string; allowed_list?: string[]; }[]; interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: object; wait_seconds?: number; }; }; llm_api_key_ref?: string; mcp_servers?: { id: string; allowed_tools?: string[]; }[]; messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }; observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }; post_conversation_settings?: { enabled?: boolean; }; privacy_settings?: { data_retention?: boolean; }; related_mission_ids?: string[]; tags?: string[]; telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: object; recording_settings?: object; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: object; }; tools?: object | object | { handoff: object; type: 'handoff'; } | object | { transfer: object; type: 'transfer'; } | { invite_config: object; type: 'invite'; } | { refer: object; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: object; type: 'send_message'; } | { skip_turn: object; type: 'skip_turn'; }[]; transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: transcription_settings_config; }; version_created_at?: string; version_id?: string; version_name?: string; voice_settings?: { voice: string; api_key_ref?: string; background_audio?: object | object | object; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }; widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: audio_visualizer_config; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }; }`\n\n - `id: string`\n - `created_at: string`\n - `instructions: string`\n - `model: string`\n - `name: string`\n - `description?: string`\n - `dynamic_variables?: object`\n - `dynamic_variables_webhook_timeout_ms?: number`\n - `dynamic_variables_webhook_url?: string`\n - `enabled_features?: 'telephony' | 'messaging'[]`\n - `external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }`\n - `fallback_config?: { external_llm?: { base_url: string; model: string; authentication_method?: 'token' | 'certificate'; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n - `greeting?: string`\n - `import_metadata?: { import_id?: string; import_provider?: 'elevenlabs' | 'vapi' | 'retell'; }`\n - `insight_settings?: { insight_group_id?: string; }`\n - `integrations?: { integration_id: string; allowed_list?: string[]; }[]`\n - `interruption_settings?: { enable?: boolean; start_speaking_plan?: { transcription_endpointing_plan?: { on_no_punctuation_seconds?: number; on_number_seconds?: number; on_punctuation_seconds?: number; }; wait_seconds?: number; }; }`\n - `llm_api_key_ref?: string`\n - `mcp_servers?: { id: string; allowed_tools?: string[]; }[]`\n - `messaging_settings?: { conversation_inactivity_minutes?: number; default_messaging_profile_id?: string; delivery_status_webhook_url?: string; }`\n - `observability_settings?: { host?: string; public_key_ref?: string; secret_key_ref?: string; status?: 'enabled' | 'disabled'; }`\n - `post_conversation_settings?: { enabled?: boolean; }`\n - `privacy_settings?: { data_retention?: boolean; }`\n - `related_mission_ids?: string[]`\n - `tags?: string[]`\n - `telephony_settings?: { default_texml_app_id?: string; noise_suppression?: 'krisp' | 'deepfilternet' | 'disabled'; noise_suppression_config?: { attenuation_limit?: number; mode?: 'advanced'; }; recording_settings?: { channels?: 'single' | 'dual'; enabled?: boolean; format?: 'wav' | 'mp3'; }; supports_unauthenticated_web_calls?: boolean; time_limit_secs?: number; user_idle_reply_secs?: number; user_idle_timeout_secs?: number; voicemail_detection?: { on_voicemail_detected?: { action?: 'stop_assistant' | 'leave_message_and_stop_assistant' | 'continue_assistant'; voicemail_message?: object; }; }; }`\n - `tools?: { type: 'webhook'; webhook: { description: string; name: string; url: string; async?: boolean; body_parameters?: object; headers?: object[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: object; query_parameters?: object; store_fields_as_variables?: object[]; timeout_ms?: number; }; } | { retrieval: object; type: 'retrieval'; } | { handoff: { ai_assistants: { id: string; name: string; }[]; voice_mode?: 'unified' | 'distinct'; }; type: 'handoff'; } | { hangup: object; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; custom_headers?: { name?: string; value?: string; }[]; voicemail_detection?: { detection_config?: object; detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; warm_message_delay_ms?: number; warm_transfer_instructions?: string; }; type: 'transfer'; } | { invite_config: { custom_headers?: { name?: string; value?: string; }[]; from?: string; targets?: { to: string; name?: string; }[] | string; voicemail_detection?: { detection_mode?: 'disabled' | 'premium'; on_voicemail_detected?: object; }; }; type: 'invite'; } | { refer: { targets: { name: string; sip_address: string; sip_auth_password?: string; sip_auth_username?: string; }[]; custom_headers?: { name?: string; value?: string; }[]; sip_headers?: { name?: 'User-to-User' | 'Diversion'; value?: string; }[]; }; type: 'refer'; } | { send_dtmf: object; type: 'send_dtmf'; } | { send_message: { message_template?: string; }; type: 'send_message'; } | { skip_turn: { description?: string; }; type: 'skip_turn'; }[]`\n - `transcription?: { api_key_ref?: string; language?: string; model?: string; region?: string; settings?: { eager_eot_threshold?: number; end_of_turn_confidence_threshold?: number; eot_threshold?: number; eot_timeout_ms?: number; keyterm?: string; max_turn_silence?: number; min_turn_silence?: number; numerals?: boolean; smart_format?: boolean; }; }`\n - `version_created_at?: string`\n - `version_id?: string`\n - `version_name?: string`\n - `voice_settings?: { voice: string; api_key_ref?: string; background_audio?: { type: 'predefined_media'; value: 'silence' | 'office'; } | { type: 'media_url'; value: string; } | { type: 'media_name'; value: string; }; expressive_mode?: boolean; language_boost?: string; similarity_boost?: number; speed?: number; style?: number; temperature?: number; use_speaker_boost?: boolean; voice_speed?: number; }`\n - `widget_settings?: { agent_thinking_text?: string; audio_visualizer_config?: { color?: 'verdant' | 'twilight' | 'bloom' | 'mystic' | 'flare' | 'glacier'; preset?: string; }; default_state?: 'expanded' | 'collapsed'; give_feedback_url?: string; logo_icon_url?: string; position?: 'fixed' | 'static'; report_issue_url?: string; speak_to_interrupt_text?: string; start_call_text?: string; theme?: 'light' | 'dark'; view_history_url?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst inferenceEmbedding = await client.ai.assistants.versions.promote('version_id', { assistant_id: 'assistant_id' });\n\nconsole.log(inferenceEmbedding);\n```",
4849
4849
  perLanguage: {
4850
4850
  typescript: {
4851
4851
  method: 'client.ai.assistants.versions.promote',
@@ -11038,7 +11038,7 @@ const EMBEDDED_METHODS = [
11038
11038
  "webhook_urls_method?: 'POST' | 'GET';",
11039
11039
  ],
11040
11040
  response: "{ data?: { call_control_id: string; call_leg_id: string; call_session_id: string; is_alive: boolean; record_type: 'call'; call_duration?: number; client_state?: string; end_time?: string; recording_id?: string; start_time?: string; }; }",
11041
- markdown: "## dial\n\n`client.calls.dial(connection_id: string, from: string, to: string | string[], answering_machine_detection?: 'premium' | 'detect' | 'detect_beep' | 'detect_words' | 'greeting_end' | 'disabled', answering_machine_detection_config?: { after_greeting_silence_millis?: number; between_words_silence_millis?: number; greeting_duration_millis?: number; greeting_silence_duration_millis?: number; greeting_total_analysis_time_millis?: number; initial_silence_millis?: number; maximum_number_of_words?: number; maximum_word_length_millis?: number; silence_threshold?: number; total_analysis_time_millis?: number; }, assistant?: { id: string; dynamic_variables?: object; external_llm?: object; fallback_config?: object; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, audio_url?: string, billing_group_id?: string, bridge_intent?: boolean, bridge_on_answer?: boolean, client_state?: string, command_id?: string, conference_config?: { id?: string; beep_enabled?: 'always' | 'never' | 'on_enter' | 'on_exit'; conference_name?: string; early_media?: boolean; end_conference_on_exit?: boolean; hold?: boolean; hold_audio_url?: string; hold_media_name?: string; mute?: boolean; soft_end_conference_on_exit?: boolean; start_conference_on_create?: boolean; start_conference_on_enter?: boolean; supervisor_role?: 'barge' | 'monitor' | 'none' | 'whisper'; whisper_call_control_ids?: string[]; }, custom_headers?: { name: string; value: string; }[], deepfake_detection?: { enabled: boolean; rtp_timeout?: number; timeout?: number; }, dialogflow_config?: { analyze_sentiment?: boolean; partial_automated_agent_reply?: boolean; }, enable_dialogflow?: boolean, from_display_name?: string, link_to?: string, media_encryption?: 'disabled' | 'SRTP' | 'DTLS', media_name?: string, park_after_unbridge?: string, preferred_codecs?: string, prevent_double_bridge?: boolean, privacy?: 'id' | 'none', record?: 'record-from-answer', record_channels?: 'single' | 'dual', record_custom_file_name?: string, record_format?: 'wav' | 'mp3', record_max_length?: number, record_timeout_secs?: number, record_track?: 'both' | 'inbound' | 'outbound', record_trim?: 'trim-silence', send_silence_when_idle?: boolean, sip_auth_password?: string, sip_auth_username?: string, sip_headers?: { name: 'User-to-User'; value: string; }[], sip_region?: 'US' | 'Europe' | 'Canada' | 'Australia' | 'Middle East', sip_transport_protocol?: 'UDP' | 'TCP' | 'TLS', sound_modifications?: { octaves?: number; pitch?: number; semitone?: number; track?: string; }, stream_auth_token?: string, stream_bidirectional_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16', stream_bidirectional_mode?: 'mp3' | 'rtp', stream_bidirectional_sampling_rate?: 8000 | 16000 | 22050 | 24000 | 48000, stream_bidirectional_target_legs?: 'both' | 'self' | 'opposite', stream_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16' | 'default', stream_establish_before_call_originate?: boolean, stream_track?: 'inbound_track' | 'outbound_track' | 'both_tracks', stream_url?: string, supervise_call_control_id?: string, supervisor_role?: 'barge' | 'whisper' | 'monitor', time_limit_secs?: number, timeout_secs?: number, transcription?: boolean, transcription_config?: { client_state?: string; command_id?: string; transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'; transcription_engine_config?: transcription_engine_google_config | transcription_engine_telnyx_config | transcription_engine_azure_config | object | object | transcription_engine_a_config | transcription_engine_b_config | deepgram_nova2_config | deepgram_nova3_config; transcription_tracks?: string; }, webhook_retries_policies?: object, webhook_url?: string, webhook_url_method?: 'POST' | 'GET', webhook_urls?: object, webhook_urls_method?: 'POST' | 'GET'): { data?: object; }`\n\n**post** `/calls`\n\nDial a number or SIP URI from a given connection. A successful response will include a `call_leg_id` which can be used to correlate the command with subsequent webhooks.\n\n**Expected Webhooks:**\n\n- `call.initiated`\n- `call.answered` or `call.hangup`\n- `call.hold` and `call.unhold` if the call is held/unheld\n- `call.machine.detection.ended` if `answering_machine_detection` was requested\n- `call.machine.greeting.ended` if `answering_machine_detection` was requested to detect the end of machine greeting\n- `call.machine.premium.detection.ended` if `answering_machine_detection=premium` was requested\n- `call.machine.premium.greeting.ended` if `answering_machine_detection=premium` was requested and a beep was detected\n- `call.deepfake_detection.result` if `deepfake_detection` was enabled\n- `call.deepfake_detection.error` if `deepfake_detection` was enabled and an error occurred\n- `streaming.started`, `streaming.stopped` or `streaming.failed` if `stream_url` was set\n\nWhen the `record` parameter is set to `record-from-answer`, the response will include a `recording_id` field.\n\n\n### Parameters\n\n- `connection_id: string`\n The ID of the Call Control App (formerly ID of the connection) to be used when dialing the destination.\n\n- `from: string`\n The `from` number to be used as the caller id presented to the destination (`to` number). The number should be in +E164 format.\n\n- `to: string | string[]`\n The DID or SIP URI to dial out to. Multiple DID or SIP URIs can be provided using an array of strings\n\n- `answering_machine_detection?: 'premium' | 'detect' | 'detect_beep' | 'detect_words' | 'greeting_end' | 'disabled'`\n Enables Answering Machine Detection. Telnyx offers Premium and Standard detections. With Premium detection, when a call is answered, Telnyx runs real-time detection and sends a `call.machine.premium.detection.ended` webhook with one of the following results: `human_residence`, `human_business`, `machine`, `silence` or `fax_detected`. If we detect a beep, we also send a `call.machine.premium.greeting.ended` webhook with the result of `beep_detected`. If we detect a beep before `call.machine.premium.detection.ended` we only send `call.machine.premium.greeting.ended`, and if we detect a beep after `call.machine.premium.detection.ended`, we send both webhooks. With Standard detection, when a call is answered, Telnyx runs real-time detection to determine if it was picked up by a human or a machine and sends an `call.machine.detection.ended` webhook with the analysis result. If `greeting_end` or `detect_words` is used and a `machine` is detected, you will receive another `call.machine.greeting.ended` webhook when the answering machine greeting ends with a beep or silence. If `detect_beep` is used, you will only receive `call.machine.greeting.ended` if a beep is detected.\n\n- `answering_machine_detection_config?: { after_greeting_silence_millis?: number; between_words_silence_millis?: number; greeting_duration_millis?: number; greeting_silence_duration_millis?: number; greeting_total_analysis_time_millis?: number; initial_silence_millis?: number; maximum_number_of_words?: number; maximum_word_length_millis?: number; silence_threshold?: number; total_analysis_time_millis?: number; }`\n Optional configuration parameters to modify 'answering_machine_detection' performance. Only `total_analysis_time_millis` and `greeting_duration_millis` parameters are applicable when `premium` is selected as answering_machine_detection.\n - `after_greeting_silence_millis?: number`\n Silence duration threshold after a greeting message or voice for it be considered human.\n - `between_words_silence_millis?: number`\n Maximum threshold for silence between words.\n - `greeting_duration_millis?: number`\n Maximum threshold of a human greeting. If greeting longer than this value, considered machine.\n - `greeting_silence_duration_millis?: number`\n If machine already detected, maximum threshold for silence between words. If exceeded, the greeting is considered ended.\n - `greeting_total_analysis_time_millis?: number`\n If machine already detected, maximum timeout threshold to determine the end of the machine greeting.\n - `initial_silence_millis?: number`\n If initial silence duration is greater than this value, consider it a machine.\n - `maximum_number_of_words?: number`\n If number of detected words is greater than this value, consder it a machine.\n - `maximum_word_length_millis?: number`\n If a single word lasts longer than this threshold, consider it a machine.\n - `silence_threshold?: number`\n Minimum noise threshold for any analysis.\n - `total_analysis_time_millis?: number`\n Maximum timeout threshold for overall detection.\n\n- `assistant?: { id: string; dynamic_variables?: object; external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n AI Assistant configuration. All fields except `id` are optional — the assistant's stored configuration will be used as fallback for any omitted fields.\n - `id: string`\n The identifier of the AI assistant to use.\n - `dynamic_variables?: object`\n Map of dynamic variables and their default values. Dynamic variables can be referenced in instructions, greeting, and tool definitions using the `{{variable_name}}` syntax. Call-control-agent automatically merges in `telnyx_call_*` variables (telnyx_call_to, telnyx_call_from, telnyx_conversation_channel, telnyx_agent_target, telnyx_end_user_target, telnyx_call_caller_id_name) and custom header variables.\n - `external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }`\n External LLM configuration for bringing your own LLM endpoint.\n - `fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n Fallback LLM configuration used when the primary LLM provider is unavailable.\n - `greeting?: string`\n Initial greeting text spoken when the assistant starts. Can be plain text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n - `instructions?: string`\n System instructions for the voice assistant. Can be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). This will overwrite the instructions set in the assistant configuration.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the LLM provider API key. Use this field to reference an [integration secret](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) containing your LLM provider API key. Supports any LLM provider (OpenAI, Anthropic, etc.).\n - `mcp_servers?: object[]`\n MCP (Model Context Protocol) server configurations for extending the assistant's capabilities with external tools and data sources.\n - `model?: string`\n LLM model override for this call. If omitted, the assistant's configured model is used.\n - `name?: string`\n Assistant name override for this call.\n - `observability_settings?: object`\n Observability configuration for the assistant session, including Langfuse integration for tracing and monitoring.\n - `openai_api_key_ref?: string`\n Deprecated — use `llm_api_key_ref` instead. Integration secret identifier for the OpenAI API key. This field is maintained for backward compatibility; `llm_api_key_ref` is the canonical field name and supports all LLM providers.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n Inline tool definitions available to the assistant (webhook, retrieval, transfer, hangup, etc.). Overrides the assistant's stored tools if provided.\n\n- `audio_url?: string`\n The URL of a file to be played back to the callee when the call is answered. The URL can point to either a WAV or MP3 file. media_name and audio_url cannot be used together in one request.\n\n- `billing_group_id?: string`\n Use this field to set the Billing Group ID for the call. Must be a valid and existing Billing Group ID.\n\n- `bridge_intent?: boolean`\n Indicates the intent to bridge this call with the call specified in link_to. When bridge_intent is true, link_to becomes required and the from number will be overwritten by the from number from the linked call.\n\n- `bridge_on_answer?: boolean`\n Whether to automatically bridge answered call to the call specified in link_to. When bridge_on_answer is true, link_to becomes required.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore others Dial commands with the same `command_id`.\n\n- `conference_config?: { id?: string; beep_enabled?: 'always' | 'never' | 'on_enter' | 'on_exit'; conference_name?: string; early_media?: boolean; end_conference_on_exit?: boolean; hold?: boolean; hold_audio_url?: string; hold_media_name?: string; mute?: boolean; soft_end_conference_on_exit?: boolean; start_conference_on_create?: boolean; start_conference_on_enter?: boolean; supervisor_role?: 'barge' | 'monitor' | 'none' | 'whisper'; whisper_call_control_ids?: string[]; }`\n Optional configuration parameters to dial new participant into a conference.\n - `id?: string`\n Conference ID to be joined\n - `beep_enabled?: 'always' | 'never' | 'on_enter' | 'on_exit'`\n Whether a beep sound should be played when the participant joins and/or leaves the conference. Can be used to override the conference-level setting.\n - `conference_name?: string`\n Conference name to be joined\n - `early_media?: boolean`\n Controls the moment when dialled call is joined into conference. If set to `true` user will be joined as soon as media is available (ringback). If `false` user will be joined when call is answered. Defaults to `true`\n - `end_conference_on_exit?: boolean`\n Whether the conference should end and all remaining participants be hung up after the participant leaves the conference. Defaults to \"false\".\n - `hold?: boolean`\n Whether the participant should be put on hold immediately after joining the conference. Defaults to \"false\".\n - `hold_audio_url?: string`\n The URL of a file to be played to the participant when they are put on hold after joining the conference. hold_media_name and hold_audio_url cannot be used together in one request. Takes effect only when \"start_conference_on_create\" is set to \"false\". This property takes effect only if \"hold\" is set to \"true\".\n - `hold_media_name?: string`\n The media_name of a file to be played to the participant when they are put on hold after joining the conference. The media_name must point to a file previously uploaded to api.telnyx.com/v2/media by the same user/organization. The file must either be a WAV or MP3 file. Takes effect only when \"start_conference_on_create\" is set to \"false\". This property takes effect only if \"hold\" is set to \"true\".\n - `mute?: boolean`\n Whether the participant should be muted immediately after joining the conference. Defaults to \"false\".\n - `soft_end_conference_on_exit?: boolean`\n Whether the conference should end after the participant leaves the conference. NOTE this doesn't hang up the other participants. Defaults to \"false\".\n - `start_conference_on_create?: boolean`\n Whether the conference should be started on creation. If the conference isn't started all participants that join are automatically put on hold. Defaults to \"true\".\n - `start_conference_on_enter?: boolean`\n Whether the conference should be started after the participant joins the conference. Defaults to \"false\".\n - `supervisor_role?: 'barge' | 'monitor' | 'none' | 'whisper'`\n Sets the joining participant as a supervisor for the conference. A conference can have multiple supervisors. \"barge\" means the supervisor enters the conference as a normal participant. This is the same as \"none\". \"monitor\" means the supervisor is muted but can hear all participants. \"whisper\" means that only the specified \"whisper_call_control_ids\" can hear the supervisor. Defaults to \"none\".\n - `whisper_call_control_ids?: string[]`\n Array of unique call_control_ids the joining supervisor can whisper to. If none provided, the supervisor will join the conference as a monitoring participant only.\n\n- `custom_headers?: { name: string; value: string; }[]`\n Custom headers to be added to the SIP INVITE.\n\n- `deepfake_detection?: { enabled: boolean; rtp_timeout?: number; timeout?: number; }`\n Enables deepfake detection on the call. When enabled, audio from the remote party is streamed to a detection service that analyzes whether the voice is AI-generated. Results are delivered via the `call.deepfake_detection.result` webhook.\n - `enabled: boolean`\n Whether deepfake detection is enabled.\n - `rtp_timeout?: number`\n Maximum time in seconds to wait for RTP audio before timing out. If no audio is received within this window, detection stops with an error.\n - `timeout?: number`\n Maximum time in seconds to wait for a detection result before timing out.\n\n- `dialogflow_config?: { analyze_sentiment?: boolean; partial_automated_agent_reply?: boolean; }`\n - `analyze_sentiment?: boolean`\n Enable sentiment analysis from Dialogflow.\n - `partial_automated_agent_reply?: boolean`\n Enable partial automated agent reply from Dialogflow.\n\n- `enable_dialogflow?: boolean`\n Enables Dialogflow for the current call. The default value is false.\n\n- `from_display_name?: string`\n The `from_display_name` string to be used as the caller id name (SIP From Display Name) presented to the destination (`to` number). The string should have a maximum of 128 characters, containing only letters, numbers, spaces, and -_~!.+ special characters. If ommited, the display name will be the same as the number in the `from` field.\n\n- `link_to?: string`\n Use another call's control id for sharing the same call session id\n\n- `media_encryption?: 'disabled' | 'SRTP' | 'DTLS'`\n Defines whether media should be encrypted on the call.\n\n- `media_name?: string`\n The media_name of a file to be played back to the callee when the call is answered. The media_name must point to a file previously uploaded to api.telnyx.com/v2/media by the same user/organization. The file must either be a WAV or MP3 file.\n\n- `park_after_unbridge?: string`\n If supplied with the value `self`, the current leg will be parked after unbridge. If not set, the default behavior is to hang up the leg. When park_after_unbridge is set, link_to becomes required.\n\n- `preferred_codecs?: string`\n The list of comma-separated codecs in a preferred order for the forked media to be received.\n\n- `prevent_double_bridge?: boolean`\n Prevents bridging and hangs up the call if the target is already bridged. Disabled by default.\n\n- `privacy?: 'id' | 'none'`\n Indicates the privacy level to be used for the call. When set to `id`, caller ID information (name and number) will be hidden from the called party. When set to `none` or omitted, caller ID will be shown normally.\n\n- `record?: 'record-from-answer'`\n Start recording automatically after an event. Disabled by default.\n\n- `record_channels?: 'single' | 'dual'`\n Defines which channel should be recorded ('single' or 'dual') when `record` is specified.\n\n- `record_custom_file_name?: string`\n The custom recording file name to be used instead of the default `call_leg_id`. Telnyx will still add a Unix timestamp suffix.\n\n- `record_format?: 'wav' | 'mp3'`\n Defines the format of the recording ('wav' or 'mp3') when `record` is specified.\n\n- `record_max_length?: number`\n Defines the maximum length for the recording in seconds when `record` is specified. The minimum value is 0. The maximum value is 43200. The default value is 0 (infinite).\n\n- `record_timeout_secs?: number`\n The number of seconds that Telnyx will wait for the recording to be stopped if silence is detected when `record` is specified. The timer only starts when the speech is detected. Please note that call transcription is used to detect silence and the related charge will be applied. The minimum value is 0. The default value is 0 (infinite).\n\n- `record_track?: 'both' | 'inbound' | 'outbound'`\n The audio track to be recorded. Can be either `both`, `inbound` or `outbound`. If only single track is specified (`inbound`, `outbound`), `channels` configuration is ignored and it will be recorded as mono (single channel).\n\n- `record_trim?: 'trim-silence'`\n When set to `trim-silence`, silence will be removed from the beginning and end of the recording.\n\n- `send_silence_when_idle?: boolean`\n Generate silence RTP packets when no transmission available.\n\n- `sip_auth_password?: string`\n SIP Authentication password used for SIP challenges.\n\n- `sip_auth_username?: string`\n SIP Authentication username used for SIP challenges.\n\n- `sip_headers?: { name: 'User-to-User'; value: string; }[]`\n SIP headers to be added to the SIP INVITE request. Currently only User-to-User header is supported.\n\n- `sip_region?: 'US' | 'Europe' | 'Canada' | 'Australia' | 'Middle East'`\n Defines the SIP region to be used for the call.\n\n- `sip_transport_protocol?: 'UDP' | 'TCP' | 'TLS'`\n Defines SIP transport protocol to be used on the call.\n\n- `sound_modifications?: { octaves?: number; pitch?: number; semitone?: number; track?: string; }`\n Use this field to modify sound effects, for example adjust the pitch.\n - `octaves?: number`\n Adjust the pitch in octaves, values should be between -1 and 1, default 0\n - `pitch?: number`\n Set the pitch directly, value should be > 0, default 1 (lower = lower tone)\n - `semitone?: number`\n Adjust the pitch in semitones, values should be between -14 and 14, default 0\n - `track?: string`\n The track to which the sound modifications will be applied. Accepted values are `inbound` or `outbound`\n\n- `stream_auth_token?: string`\n An authentication token to be sent as part of the WebSocket connection when using streaming. Maximum length is 4000 characters.\n\n- `stream_bidirectional_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16'`\n Indicates codec for bidirectional streaming RTP payloads. Used only with stream_bidirectional_mode=rtp. Case sensitive.\n\n- `stream_bidirectional_mode?: 'mp3' | 'rtp'`\n Configures method of bidirectional streaming (mp3, rtp).\n\n- `stream_bidirectional_sampling_rate?: 8000 | 16000 | 22050 | 24000 | 48000`\n Audio sampling rate.\n\n- `stream_bidirectional_target_legs?: 'both' | 'self' | 'opposite'`\n Specifies which call legs should receive the bidirectional stream audio.\n\n- `stream_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16' | 'default'`\n Specifies the codec to be used for the streamed audio. When set to 'default' or when transcoding is not possible, the codec from the call will be used.\n\n- `stream_establish_before_call_originate?: boolean`\n Establish websocket connection before dialing the destination. This is useful for cases where the websocket connection takes a long time to establish.\n\n- `stream_track?: 'inbound_track' | 'outbound_track' | 'both_tracks'`\n Specifies which track should be streamed.\n\n- `stream_url?: string`\n The destination WebSocket address where the stream is going to be delivered.\n\n- `supervise_call_control_id?: string`\n The call leg which will be supervised by the new call.\n\n- `supervisor_role?: 'barge' | 'whisper' | 'monitor'`\n The role of the supervisor call. 'barge' means that supervisor call hears and is being heard by both ends of the call (caller & callee). 'whisper' means that only supervised_call_control_id hears supervisor but supervisor can hear everything. 'monitor' means that nobody can hear supervisor call, but supervisor can hear everything on the call.\n\n- `time_limit_secs?: number`\n Sets the maximum duration of a Call Control Leg in seconds. If the time limit is reached, the call will hangup and a `call.hangup` webhook with a `hangup_cause` of `time_limit` will be sent. For example, by setting a time limit of 120 seconds, a Call Leg will be automatically terminated two minutes after being answered. The default time limit is 14400 seconds or 4 hours and this is also the maximum allowed call length.\n\n- `timeout_secs?: number`\n The number of seconds that Telnyx will wait for the call to be answered by the destination to which it is being called. If the timeout is reached before an answer is received, the call will hangup and a `call.hangup` webhook with a `hangup_cause` of `timeout` will be sent. Minimum value is 5 seconds. Maximum value is 600 seconds.\n\n- `transcription?: boolean`\n Enable transcription upon call answer. The default value is false.\n\n- `transcription_config?: { client_state?: string; command_id?: string; transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'; transcription_engine_config?: { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: google_transcription_language; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: object[]; transcription_engine?: 'Google'; use_enhanced?: boolean; } | { language?: telnyx_transcription_language; transcription_engine?: 'Telnyx'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { region: 'australiaeast' | 'centralindia' | 'eastus' | 'northcentralus' | 'westeurope' | 'westus2'; transcription_engine: 'Azure'; api_key_ref?: string; language?: string; } | { interim_results?: boolean; language?: string; transcription_engine?: 'xAI'; transcription_model?: 'xai/grok-stt'; } | { interim_results?: boolean; transcription_engine?: 'AssemblyAI'; transcription_model?: 'assemblyai/universal-streaming'; } | { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: google_transcription_language; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: object[]; transcription_engine?: 'A'; use_enhanced?: boolean; } | { language?: telnyx_transcription_language; transcription_engine?: 'B'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { transcription_engine: 'deepgram/nova-2'; transcription_model: 'deepgram/nova-2'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; } | { transcription_engine: 'deepgram/nova-3'; transcription_model: 'deepgram/nova-3'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; }; transcription_tracks?: string; }`\n - `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n - `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n - `transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'`\n Engine to use for speech recognition. Legacy values `A` - `Google`, `B` - `Telnyx` are supported for backward compatibility.\n - `transcription_engine_config?: { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: string; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: { boost?: number; phrases?: string[]; }[]; transcription_engine?: 'Google'; use_enhanced?: boolean; } | { language?: string; transcription_engine?: 'Telnyx'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { region: 'australiaeast' | 'centralindia' | 'eastus' | 'northcentralus' | 'westeurope' | 'westus2'; transcription_engine: 'Azure'; api_key_ref?: string; language?: string; } | { interim_results?: boolean; language?: string; transcription_engine?: 'xAI'; transcription_model?: 'xai/grok-stt'; } | { interim_results?: boolean; transcription_engine?: 'AssemblyAI'; transcription_model?: 'assemblyai/universal-streaming'; } | { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: string; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: { boost?: number; phrases?: string[]; }[]; transcription_engine?: 'A'; use_enhanced?: boolean; } | { language?: string; transcription_engine?: 'B'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { transcription_engine: 'deepgram/nova-2'; transcription_model: 'deepgram/nova-2'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; } | { transcription_engine: 'deepgram/nova-3'; transcription_model: 'deepgram/nova-3'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; }`\n - `transcription_tracks?: string`\n Indicates which leg of the call will be transcribed. Use `inbound` for the leg that requested the transcription, `outbound` for the other leg, and `both` for both legs of the call. Will default to `inbound`.\n\n- `webhook_retries_policies?: object`\n A map of event types to retry policies. Each retry policy contains an array of `retries_ms` specifying the delays between retry attempts in milliseconds. Maximum 5 retries, total delay cannot exceed 60 seconds.\n\n- `webhook_url?: string`\n Use this field to override the URL for which Telnyx will send subsequent webhooks to for this call.\n\n- `webhook_url_method?: 'POST' | 'GET'`\n HTTP request type used for `webhook_url`.\n\n- `webhook_urls?: object`\n A map of event types to webhook URLs. When an event of the specified type occurs, the webhook URL associated with that event type will be called instead of the default webhook URL. Events not mapped here will use the default webhook URL.\n\n- `webhook_urls_method?: 'POST' | 'GET'`\n HTTP request method to invoke `webhook_urls`.\n\n### Returns\n\n- `{ data?: { call_control_id: string; call_leg_id: string; call_session_id: string; is_alive: boolean; record_type: 'call'; call_duration?: number; client_state?: string; end_time?: string; recording_id?: string; start_time?: string; }; }`\n\n - `data?: { call_control_id: string; call_leg_id: string; call_session_id: string; is_alive: boolean; record_type: 'call'; call_duration?: number; client_state?: string; end_time?: string; recording_id?: string; start_time?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.dial({\n connection_id: '7267xxxxxxxxxxxxxx',\n from: '+18005550101',\n to: '+18005550100 or sip:username@sip.telnyx.com',\n});\n\nconsole.log(response);\n```",
11041
+ markdown: "## dial\n\n`client.calls.dial(connection_id: string, from: string, to: string | string[], answering_machine_detection?: 'premium' | 'detect' | 'detect_beep' | 'detect_words' | 'greeting_end' | 'disabled', answering_machine_detection_config?: { after_greeting_silence_millis?: number; between_words_silence_millis?: number; greeting_duration_millis?: number; greeting_silence_duration_millis?: number; greeting_total_analysis_time_millis?: number; initial_silence_millis?: number; maximum_number_of_words?: number; maximum_word_length_millis?: number; silence_threshold?: number; total_analysis_time_millis?: number; }, assistant?: { id: string; dynamic_variables?: object; external_llm?: object; fallback_config?: object; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, audio_url?: string, billing_group_id?: string, bridge_intent?: boolean, bridge_on_answer?: boolean, client_state?: string, command_id?: string, conference_config?: { id?: string; beep_enabled?: 'always' | 'never' | 'on_enter' | 'on_exit'; conference_name?: string; early_media?: boolean; end_conference_on_exit?: boolean; hold?: boolean; hold_audio_url?: string; hold_media_name?: string; mute?: boolean; soft_end_conference_on_exit?: boolean; start_conference_on_create?: boolean; start_conference_on_enter?: boolean; supervisor_role?: 'barge' | 'monitor' | 'none' | 'whisper'; whisper_call_control_ids?: string[]; }, custom_headers?: { name: string; value: string; }[], deepfake_detection?: { enabled: boolean; rtp_timeout?: number; timeout?: number; }, dialogflow_config?: { analyze_sentiment?: boolean; partial_automated_agent_reply?: boolean; }, enable_dialogflow?: boolean, from_display_name?: string, link_to?: string, media_encryption?: 'disabled' | 'SRTP' | 'DTLS', media_name?: string, park_after_unbridge?: string, preferred_codecs?: string, prevent_double_bridge?: boolean, privacy?: 'id' | 'none', record?: 'record-from-answer', record_channels?: 'single' | 'dual', record_custom_file_name?: string, record_format?: 'wav' | 'mp3', record_max_length?: number, record_timeout_secs?: number, record_track?: 'both' | 'inbound' | 'outbound', record_trim?: 'trim-silence', send_silence_when_idle?: boolean, sip_auth_password?: string, sip_auth_username?: string, sip_headers?: { name: 'User-to-User'; value: string; }[], sip_region?: 'US' | 'Europe' | 'Canada' | 'Australia' | 'Middle East', sip_transport_protocol?: 'UDP' | 'TCP' | 'TLS', sound_modifications?: { octaves?: number; pitch?: number; semitone?: number; track?: string; }, stream_auth_token?: string, stream_bidirectional_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16', stream_bidirectional_mode?: 'mp3' | 'rtp', stream_bidirectional_sampling_rate?: 8000 | 16000 | 22050 | 24000 | 48000, stream_bidirectional_target_legs?: 'both' | 'self' | 'opposite', stream_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16' | 'default', stream_establish_before_call_originate?: boolean, stream_track?: 'inbound_track' | 'outbound_track' | 'both_tracks', stream_url?: string, supervise_call_control_id?: string, supervisor_role?: 'barge' | 'whisper' | 'monitor', time_limit_secs?: number, timeout_secs?: number, transcription?: boolean, transcription_config?: { client_state?: string; command_id?: string; transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'; transcription_engine_config?: transcription_engine_google_config | transcription_engine_telnyx_config | transcription_engine_azure_config | object | object | transcription_engine_a_config | transcription_engine_b_config | deepgram_nova2_config | deepgram_nova3_config; transcription_tracks?: string; }, webhook_retries_policies?: object, webhook_url?: string, webhook_url_method?: 'POST' | 'GET', webhook_urls?: object, webhook_urls_method?: 'POST' | 'GET'): { data?: object; }`\n\n**post** `/calls`\n\nDial a number or SIP URI from a given connection. A successful response will include a `call_leg_id` which can be used to correlate the command with subsequent webhooks.\n\n**Expected Webhooks:**\n\n- `call.initiated`\n- `call.answered` or `call.hangup`\n- `call.hold` and `call.unhold` if the call is held/unheld\n- `call.machine.detection.ended` if `answering_machine_detection` was requested\n- `call.machine.greeting.ended` if `answering_machine_detection` was requested to detect the end of machine greeting\n- `call.machine.premium.detection.ended` if `answering_machine_detection=premium` was requested\n- `call.machine.premium.greeting.ended` if `answering_machine_detection=premium` was requested and a beep was detected\n- `call.deepfake_detection.result` if `deepfake_detection` was enabled\n- `call.deepfake_detection.error` if `deepfake_detection` was enabled and an error occurred\n- `streaming.started`, `streaming.stopped` or `streaming.failed` if `stream_url` was set\n\nWhen the `record` parameter is set to `record-from-answer`, the response will include a `recording_id` field.\n\n\n### Parameters\n\n- `connection_id: string`\n The ID of the Call Control App (formerly ID of the connection) to be used when dialing the destination.\n\n- `from: string`\n The `from` number to be used as the caller id presented to the destination (`to` number). The number should be in +E164 format.\n\n- `to: string | string[]`\n The DID or SIP URI to dial out to. Multiple DID or SIP URIs can be provided using an array of strings\n\n- `answering_machine_detection?: 'premium' | 'detect' | 'detect_beep' | 'detect_words' | 'greeting_end' | 'disabled'`\n Enables Answering Machine Detection. Telnyx offers Premium and Standard detections. With Premium detection, when a call is answered, Telnyx runs real-time detection and sends a `call.machine.premium.detection.ended` webhook with one of the following results: `human_residence`, `human_business`, `machine`, `silence` or `fax_detected`. If we detect a beep, we also send a `call.machine.premium.greeting.ended` webhook with the result of `beep_detected`. If we detect a beep before `call.machine.premium.detection.ended` we only send `call.machine.premium.greeting.ended`, and if we detect a beep after `call.machine.premium.detection.ended`, we send both webhooks. With Standard detection, when a call is answered, Telnyx runs real-time detection to determine if it was picked up by a human or a machine and sends an `call.machine.detection.ended` webhook with the analysis result. If `greeting_end` or `detect_words` is used and a `machine` is detected, you will receive another `call.machine.greeting.ended` webhook when the answering machine greeting ends with a beep or silence. If `detect_beep` is used, you will only receive `call.machine.greeting.ended` if a beep is detected.\n\n- `answering_machine_detection_config?: { after_greeting_silence_millis?: number; between_words_silence_millis?: number; greeting_duration_millis?: number; greeting_silence_duration_millis?: number; greeting_total_analysis_time_millis?: number; initial_silence_millis?: number; maximum_number_of_words?: number; maximum_word_length_millis?: number; silence_threshold?: number; total_analysis_time_millis?: number; }`\n Optional configuration parameters to modify 'answering_machine_detection' performance. Only `total_analysis_time_millis` and `greeting_duration_millis` parameters are applicable when `premium` is selected as answering_machine_detection.\n - `after_greeting_silence_millis?: number`\n Silence duration threshold after a greeting message or voice for it be considered human.\n - `between_words_silence_millis?: number`\n Maximum threshold for silence between words.\n - `greeting_duration_millis?: number`\n Maximum threshold of a human greeting. If greeting longer than this value, considered machine.\n - `greeting_silence_duration_millis?: number`\n If machine already detected, maximum threshold for silence between words. If exceeded, the greeting is considered ended.\n - `greeting_total_analysis_time_millis?: number`\n If machine already detected, maximum timeout threshold to determine the end of the machine greeting.\n - `initial_silence_millis?: number`\n If initial silence duration is greater than this value, consider it a machine.\n - `maximum_number_of_words?: number`\n If number of detected words is greater than this value, consder it a machine.\n - `maximum_word_length_millis?: number`\n If a single word lasts longer than this threshold, consider it a machine.\n - `silence_threshold?: number`\n Minimum noise threshold for any analysis.\n - `total_analysis_time_millis?: number`\n Maximum timeout threshold for overall detection.\n\n- `assistant?: { id: string; dynamic_variables?: object; external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n AI Assistant configuration. All fields except `id` are optional — the assistant's stored configuration will be used as fallback for any omitted fields.\n - `id: string`\n The identifier of the AI assistant to use.\n - `dynamic_variables?: object`\n Map of dynamic variables and their default values. Dynamic variables can be referenced in instructions, greeting, and tool definitions using the `{{variable_name}}` syntax. Call-control-agent automatically merges in `telnyx_call_*` variables (telnyx_call_to, telnyx_call_from, telnyx_conversation_channel, telnyx_agent_target, telnyx_end_user_target, telnyx_call_caller_id_name) and custom header variables.\n - `external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }`\n External LLM configuration for bringing your own LLM endpoint.\n - `fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n Fallback LLM configuration used when the primary LLM provider is unavailable.\n - `greeting?: string`\n Initial greeting text spoken when the assistant starts. Can be plain text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n - `instructions?: string`\n System instructions for the voice assistant. Can be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). This will overwrite the instructions set in the assistant configuration.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the LLM provider API key. Use this field to reference an [integration secret](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) containing your LLM provider API key. Supports any LLM provider (OpenAI, Anthropic, etc.).\n - `mcp_servers?: object[]`\n MCP (Model Context Protocol) server configurations for extending the assistant's capabilities with external tools and data sources.\n - `model?: string`\n LLM model override for this call. If omitted, the assistant's configured model is used.\n - `name?: string`\n Assistant name override for this call.\n - `observability_settings?: object`\n Observability configuration for the assistant session, including Langfuse integration for tracing and monitoring.\n - `openai_api_key_ref?: string`\n Deprecated — use `llm_api_key_ref` instead. Integration secret identifier for the OpenAI API key. This field is maintained for backward compatibility; `llm_api_key_ref` is the canonical field name and supports all LLM providers.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n Inline tool definitions available to the assistant (webhook, retrieval, transfer, hangup, etc.). Overrides the assistant's stored tools if provided.\n\n- `audio_url?: string`\n The URL of a file to be played back to the callee when the call is answered. The URL can point to either a WAV or MP3 file. media_name and audio_url cannot be used together in one request.\n\n- `billing_group_id?: string`\n Use this field to set the Billing Group ID for the call. Must be a valid and existing Billing Group ID.\n\n- `bridge_intent?: boolean`\n Indicates the intent to bridge this call with the call specified in link_to. When bridge_intent is true, link_to becomes required and the from number will be overwritten by the from number from the linked call.\n\n- `bridge_on_answer?: boolean`\n Whether to automatically bridge answered call to the call specified in link_to. When bridge_on_answer is true, link_to becomes required.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore others Dial commands with the same `command_id`.\n\n- `conference_config?: { id?: string; beep_enabled?: 'always' | 'never' | 'on_enter' | 'on_exit'; conference_name?: string; early_media?: boolean; end_conference_on_exit?: boolean; hold?: boolean; hold_audio_url?: string; hold_media_name?: string; mute?: boolean; soft_end_conference_on_exit?: boolean; start_conference_on_create?: boolean; start_conference_on_enter?: boolean; supervisor_role?: 'barge' | 'monitor' | 'none' | 'whisper'; whisper_call_control_ids?: string[]; }`\n Optional configuration parameters to dial new participant into a conference.\n - `id?: string`\n Conference ID to be joined\n - `beep_enabled?: 'always' | 'never' | 'on_enter' | 'on_exit'`\n Whether a beep sound should be played when the participant joins and/or leaves the conference. Can be used to override the conference-level setting.\n - `conference_name?: string`\n Conference name to be joined\n - `early_media?: boolean`\n Controls the moment when dialled call is joined into conference. If set to `true` user will be joined as soon as media is available (ringback). If `false` user will be joined when call is answered. Defaults to `true`\n - `end_conference_on_exit?: boolean`\n Whether the conference should end and all remaining participants be hung up after the participant leaves the conference. Defaults to \"false\".\n - `hold?: boolean`\n Whether the participant should be put on hold immediately after joining the conference. Defaults to \"false\".\n - `hold_audio_url?: string`\n The URL of a file to be played to the participant when they are put on hold after joining the conference. hold_media_name and hold_audio_url cannot be used together in one request. Takes effect only when \"start_conference_on_create\" is set to \"false\". This property takes effect only if \"hold\" is set to \"true\".\n - `hold_media_name?: string`\n The media_name of a file to be played to the participant when they are put on hold after joining the conference. The media_name must point to a file previously uploaded to api.telnyx.com/v2/media by the same user/organization. The file must either be a WAV or MP3 file. Takes effect only when \"start_conference_on_create\" is set to \"false\". This property takes effect only if \"hold\" is set to \"true\".\n - `mute?: boolean`\n Whether the participant should be muted immediately after joining the conference. Defaults to \"false\".\n - `soft_end_conference_on_exit?: boolean`\n Whether the conference should end after the participant leaves the conference. NOTE this doesn't hang up the other participants. Defaults to \"false\".\n - `start_conference_on_create?: boolean`\n Whether the conference should be started on creation. If the conference isn't started all participants that join are automatically put on hold. Defaults to \"true\".\n - `start_conference_on_enter?: boolean`\n Whether the conference should be started after the participant joins the conference. Defaults to \"false\".\n - `supervisor_role?: 'barge' | 'monitor' | 'none' | 'whisper'`\n Sets the joining participant as a supervisor for the conference. A conference can have multiple supervisors. \"barge\" means the supervisor enters the conference as a normal participant. This is the same as \"none\". \"monitor\" means the supervisor is muted but can hear all participants. \"whisper\" means that only the specified \"whisper_call_control_ids\" can hear the supervisor. Defaults to \"none\".\n - `whisper_call_control_ids?: string[]`\n Array of unique call_control_ids the joining supervisor can whisper to. If none provided, the supervisor will join the conference as a monitoring participant only.\n\n- `custom_headers?: { name: string; value: string; }[]`\n Custom headers to be added to the SIP INVITE.\n\n- `deepfake_detection?: { enabled: boolean; rtp_timeout?: number; timeout?: number; }`\n Enables deepfake detection on the call. When enabled, audio from the remote party is streamed to a detection service that analyzes whether the voice is AI-generated. Results are delivered via the `call.deepfake_detection.result` webhook.\n - `enabled: boolean`\n Whether deepfake detection is enabled.\n - `rtp_timeout?: number`\n Maximum time in seconds to wait for RTP audio before timing out. If no audio is received within this window, detection stops with an error.\n - `timeout?: number`\n Maximum time in seconds to wait for a detection result before timing out.\n\n- `dialogflow_config?: { analyze_sentiment?: boolean; partial_automated_agent_reply?: boolean; }`\n - `analyze_sentiment?: boolean`\n Enable sentiment analysis from Dialogflow.\n - `partial_automated_agent_reply?: boolean`\n Enable partial automated agent reply from Dialogflow.\n\n- `enable_dialogflow?: boolean`\n Enables Dialogflow for the current call. The default value is false.\n\n- `from_display_name?: string`\n The `from_display_name` string to be used as the caller id name (SIP From Display Name) presented to the destination (`to` number). The string should have a maximum of 128 characters, containing only letters, numbers, spaces, and -_~!.+ special characters. If ommited, the display name will be the same as the number in the `from` field.\n\n- `link_to?: string`\n Use another call's control id for sharing the same call session id\n\n- `media_encryption?: 'disabled' | 'SRTP' | 'DTLS'`\n Defines whether media should be encrypted on the call.\n\n- `media_name?: string`\n The media_name of a file to be played back to the callee when the call is answered. The media_name must point to a file previously uploaded to api.telnyx.com/v2/media by the same user/organization. The file must either be a WAV or MP3 file.\n\n- `park_after_unbridge?: string`\n If supplied with the value `self`, the current leg will be parked after unbridge. If not set, the default behavior is to hang up the leg. When park_after_unbridge is set, link_to becomes required.\n\n- `preferred_codecs?: string`\n The list of comma-separated codecs in a preferred order for the forked media to be received.\n\n- `prevent_double_bridge?: boolean`\n Prevents bridging and hangs up the call if the target is already bridged. Disabled by default.\n\n- `privacy?: 'id' | 'none'`\n Indicates the privacy level to be used for the call. When set to `id`, caller ID information (name and number) will be hidden from the called party. When set to `none` or omitted, caller ID will be shown normally.\n\n- `record?: 'record-from-answer'`\n Start recording automatically after an event. Disabled by default.\n\n- `record_channels?: 'single' | 'dual'`\n Defines which channel should be recorded ('single' or 'dual') when `record` is specified.\n\n- `record_custom_file_name?: string`\n The custom recording file name to be used instead of the default `call_leg_id`. Telnyx will still add a Unix timestamp suffix.\n\n- `record_format?: 'wav' | 'mp3'`\n Defines the format of the recording ('wav' or 'mp3') when `record` is specified.\n\n- `record_max_length?: number`\n Defines the maximum length for the recording in seconds when `record` is specified. The minimum value is 0. The maximum value is 43200. The default value is 0 (infinite).\n\n- `record_timeout_secs?: number`\n The number of seconds that Telnyx will wait for the recording to be stopped if silence is detected when `record` is specified. The timer only starts when the speech is detected. Please note that call transcription is used to detect silence and the related charge will be applied. The minimum value is 0. The default value is 0 (infinite).\n\n- `record_track?: 'both' | 'inbound' | 'outbound'`\n The audio track to be recorded. Can be either `both`, `inbound` or `outbound`. If only single track is specified (`inbound`, `outbound`), `channels` configuration is ignored and it will be recorded as mono (single channel).\n\n- `record_trim?: 'trim-silence'`\n When set to `trim-silence`, silence will be removed from the beginning and end of the recording.\n\n- `send_silence_when_idle?: boolean`\n Generate silence RTP packets when no transmission available.\n\n- `sip_auth_password?: string`\n SIP Authentication password used for SIP challenges.\n\n- `sip_auth_username?: string`\n SIP Authentication username used for SIP challenges.\n\n- `sip_headers?: { name: 'User-to-User'; value: string; }[]`\n SIP headers to be added to the SIP INVITE request. Currently only User-to-User header is supported.\n\n- `sip_region?: 'US' | 'Europe' | 'Canada' | 'Australia' | 'Middle East'`\n Defines the SIP region to be used for the call.\n\n- `sip_transport_protocol?: 'UDP' | 'TCP' | 'TLS'`\n Defines SIP transport protocol to be used on the call.\n\n- `sound_modifications?: { octaves?: number; pitch?: number; semitone?: number; track?: string; }`\n Use this field to modify sound effects, for example adjust the pitch.\n - `octaves?: number`\n Adjust the pitch in octaves, values should be between -1 and 1, default 0\n - `pitch?: number`\n Set the pitch directly, value should be > 0, default 1 (lower = lower tone)\n - `semitone?: number`\n Adjust the pitch in semitones, values should be between -14 and 14, default 0\n - `track?: string`\n The track to which the sound modifications will be applied. Accepted values are `inbound` or `outbound`\n\n- `stream_auth_token?: string`\n An authentication token to be sent as part of the WebSocket connection when using streaming. Maximum length is 4000 characters.\n\n- `stream_bidirectional_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16'`\n Indicates codec for bidirectional streaming RTP payloads. Used only with stream_bidirectional_mode=rtp. Case sensitive.\n\n- `stream_bidirectional_mode?: 'mp3' | 'rtp'`\n Configures method of bidirectional streaming (mp3, rtp).\n\n- `stream_bidirectional_sampling_rate?: 8000 | 16000 | 22050 | 24000 | 48000`\n Audio sampling rate.\n\n- `stream_bidirectional_target_legs?: 'both' | 'self' | 'opposite'`\n Specifies which call legs should receive the bidirectional stream audio.\n\n- `stream_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16' | 'default'`\n Specifies the codec to be used for the streamed audio. When set to 'default' or when transcoding is not possible, the codec from the call will be used.\n\n- `stream_establish_before_call_originate?: boolean`\n Establish websocket connection before dialing the destination. This is useful for cases where the websocket connection takes a long time to establish.\n\n- `stream_track?: 'inbound_track' | 'outbound_track' | 'both_tracks'`\n Specifies which track should be streamed.\n\n- `stream_url?: string`\n The destination WebSocket address where the stream is going to be delivered.\n\n- `supervise_call_control_id?: string`\n The call leg which will be supervised by the new call.\n\n- `supervisor_role?: 'barge' | 'whisper' | 'monitor'`\n The role of the supervisor call. 'barge' means that supervisor call hears and is being heard by both ends of the call (caller & callee). 'whisper' means that only supervised_call_control_id hears supervisor but supervisor can hear everything. 'monitor' means that nobody can hear supervisor call, but supervisor can hear everything on the call.\n\n- `time_limit_secs?: number`\n Sets the maximum duration of a Call Control Leg in seconds. If the time limit is reached, the call will hangup and a `call.hangup` webhook with a `hangup_cause` of `time_limit` will be sent. For example, by setting a time limit of 120 seconds, a Call Leg will be automatically terminated two minutes after being answered. The default time limit is 14400 seconds or 4 hours and this is also the maximum allowed call length.\n\n- `timeout_secs?: number`\n The number of seconds that Telnyx will wait for the call to be answered by the destination to which it is being called. If the timeout is reached before an answer is received, the call will hangup and a `call.hangup` webhook with a `hangup_cause` of `timeout` will be sent. Minimum value is 5 seconds. Maximum value is 600 seconds.\n\n- `transcription?: boolean`\n Enable transcription upon call answer. The default value is false.\n\n- `transcription_config?: { client_state?: string; command_id?: string; transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'; transcription_engine_config?: { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: google_transcription_language; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: object[]; transcription_engine?: 'Google'; use_enhanced?: boolean; } | { language?: telnyx_transcription_language; transcription_engine?: 'Telnyx'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { region: 'australiaeast' | 'centralindia' | 'eastus' | 'northcentralus' | 'westeurope' | 'westus2'; transcription_engine: 'Azure'; api_key_ref?: string; language?: string; } | { interim_results?: boolean; language?: string; transcription_engine?: 'xAI'; transcription_model?: 'xai/grok-stt'; } | { interim_results?: boolean; transcription_engine?: 'AssemblyAI'; transcription_model?: 'assemblyai/universal-streaming'; } | { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: google_transcription_language; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: object[]; transcription_engine?: 'A'; use_enhanced?: boolean; } | { language?: telnyx_transcription_language; transcription_engine?: 'B'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { transcription_engine: 'deepgram/nova-2'; transcription_model: 'deepgram/nova-2'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; } | { transcription_engine: 'deepgram/nova-3'; transcription_model: 'deepgram/nova-3'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; }; transcription_tracks?: string; }`\n - `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n - `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n - `transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'`\n Engine to use for speech recognition. Legacy values `A` - `Google`, `B` - `Telnyx` are supported for backward compatibility.\n - `transcription_engine_config?: { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: string; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: { boost?: number; phrases?: string[]; }[]; transcription_engine?: 'Google'; use_enhanced?: boolean; } | { language?: string; transcription_engine?: 'Telnyx'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { region: 'australiaeast' | 'centralindia' | 'eastus' | 'northcentralus' | 'westeurope' | 'westus2'; transcription_engine: 'Azure'; api_key_ref?: string; language?: string; } | { interim_results?: boolean; language?: string; transcription_engine?: 'xAI'; transcription_model?: 'xai/grok-stt'; } | { interim_results?: boolean; transcription_engine?: 'AssemblyAI'; transcription_model?: 'assemblyai/universal-streaming'; } | { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: string; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: { boost?: number; phrases?: string[]; }[]; transcription_engine?: 'A'; use_enhanced?: boolean; } | { language?: string; transcription_engine?: 'B'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { transcription_engine: 'deepgram/nova-2'; transcription_model: 'deepgram/nova-2'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; } | { transcription_engine: 'deepgram/nova-3'; transcription_model: 'deepgram/nova-3'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; }`\n - `transcription_tracks?: string`\n Indicates which leg of the call will be transcribed. Use `inbound` for the leg that requested the transcription, `outbound` for the other leg, and `both` for both legs of the call. Will default to `inbound`.\n\n- `webhook_retries_policies?: object`\n A map of event types to retry policies. Each retry policy contains an array of `retries_ms` specifying the delays between retry attempts in milliseconds. Maximum 5 retries, total delay cannot exceed 60 seconds.\n\n- `webhook_url?: string`\n Use this field to override the URL for which Telnyx will send subsequent webhooks to for this call.\n\n- `webhook_url_method?: 'POST' | 'GET'`\n HTTP request type used for `webhook_url`.\n\n- `webhook_urls?: object`\n A map of event types to webhook URLs. When an event of the specified type occurs, the webhook URL associated with that event type will be called instead of the default webhook URL. Events not mapped here will use the default webhook URL.\n\n- `webhook_urls_method?: 'POST' | 'GET'`\n HTTP request method to invoke `webhook_urls`.\n\n### Returns\n\n- `{ data?: { call_control_id: string; call_leg_id: string; call_session_id: string; is_alive: boolean; record_type: 'call'; call_duration?: number; client_state?: string; end_time?: string; recording_id?: string; start_time?: string; }; }`\n\n - `data?: { call_control_id: string; call_leg_id: string; call_session_id: string; is_alive: boolean; record_type: 'call'; call_duration?: number; client_state?: string; end_time?: string; recording_id?: string; start_time?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.dial({\n connection_id: '7267xxxxxxxxxxxxxx',\n from: '+18005550101',\n to: '+18005550100 or sip:username@sip.telnyx.com',\n});\n\nconsole.log(response);\n```",
11042
11042
  perLanguage: {
11043
11043
  typescript: {
11044
11044
  method: 'client.calls.dial',
@@ -11141,7 +11141,7 @@ const EMBEDDED_METHODS = [
11141
11141
  "voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; };",
11142
11142
  ],
11143
11143
  response: '{ data?: { conversation_id?: string; result?: string; }; }',
11144
- markdown: "## start_ai_assistant\n\n`client.calls.actions.startAIAssistant(call_control_id: string, assistant?: { id: string; dynamic_variables?: object; external_llm?: object; fallback_config?: object; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, client_state?: string, command_id?: string, greeting?: string, interruption_settings?: { enable?: boolean; }, message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: object; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[], participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[], send_message_history_updates?: boolean, transcription?: { model?: string; }, voice?: string, voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; }): { data?: call_control_command_result_with_conversation_id; }`\n\n**post** `/calls/{call_control_id}/actions/ai_assistant_start`\n\nStart an AI assistant on the call.\n\n**Expected Webhooks:**\n\n- `call.conversation.ended`\n- `call.conversation_insights.generated`\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `assistant?: { id: string; dynamic_variables?: object; external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n AI Assistant configuration. All fields except `id` are optional — the assistant's stored configuration will be used as fallback for any omitted fields.\n - `id: string`\n The identifier of the AI assistant to use.\n - `dynamic_variables?: object`\n Map of dynamic variables and their default values. Dynamic variables can be referenced in instructions, greeting, and tool definitions using the `{{variable_name}}` syntax. Call-control-agent automatically merges in `telnyx_call_*` variables (telnyx_call_to, telnyx_call_from, telnyx_conversation_channel, telnyx_agent_target, telnyx_end_user_target, telnyx_call_caller_id_name) and custom header variables.\n - `external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }`\n External LLM configuration for bringing your own LLM endpoint.\n - `fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n Fallback LLM configuration used when the primary LLM provider is unavailable.\n - `greeting?: string`\n Initial greeting text spoken when the assistant starts. Can be plain text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n - `instructions?: string`\n System instructions for the voice assistant. Can be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). This will overwrite the instructions set in the assistant configuration.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the LLM provider API key. Use this field to reference an [integration secret](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) containing your LLM provider API key. Supports any LLM provider (OpenAI, Anthropic, etc.).\n - `mcp_servers?: object[]`\n MCP (Model Context Protocol) server configurations for extending the assistant's capabilities with external tools and data sources.\n - `model?: string`\n LLM model override for this call. If omitted, the assistant's configured model is used.\n - `name?: string`\n Assistant name override for this call.\n - `observability_settings?: object`\n Observability configuration for the assistant session, including Langfuse integration for tracing and monitoring.\n - `openai_api_key_ref?: string`\n Deprecated — use `llm_api_key_ref` instead. Integration secret identifier for the OpenAI API key. This field is maintained for backward compatibility; `llm_api_key_ref` is the canonical field name and supports all LLM providers.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n Inline tool definitions available to the assistant (webhook, retrieval, transfer, hangup, etc.). Overrides the assistant's stored tools if provided.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `greeting?: string`\n Text that will be played when the assistant starts, if none then nothing will be played when the assistant starts. The greeting can be text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n\n- `interruption_settings?: { enable?: boolean; }`\n Settings for handling user interruptions during assistant speech\n - `enable?: boolean`\n When true, allows users to interrupt the assistant while speaking\n\n- `message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: { name: string; }; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[]`\n A list of messages to seed the conversation history before the assistant starts. Follows the same message format as the `ai_assistant_add_messages` command.\n\n- `participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[]`\n A list of participants to add to the conversation when it starts.\n\n- `send_message_history_updates?: boolean`\n When `true`, a webhook is sent each time the conversation message history is updated.\n\n- `transcription?: { model?: string; }`\n The settings associated with speech to text for the voice assistant. This is only relevant if the assistant uses a text-to-text language model. Any assistant using a model with native audio support (e.g. `fixie-ai/ultravox-v0_4`) will ignore this field.\n - `model?: string`\n The speech to text model to be used by the voice assistant.\n\n- `distil-whisper/distil-large-v2` is lower latency but English-only.\n- `openai/whisper-large-v3-turbo` is multi-lingual with automatic language detection but slightly higher latency.\n- `google` is a multi-lingual option, please describe the language in the `language` field.\n\n- `voice?: string`\n The voice to be used by the voice assistant. Currently we support ElevenLabs, Telnyx and AWS voices.\n\n **Supported Providers:**\n- **AWS:** Use `AWS.Polly.<VoiceId>` (e.g., `AWS.Polly.Joanna`). For neural voices, which provide more realistic, human-like speech, append `-Neural` to the `VoiceId` (e.g., `AWS.Polly.Joanna-Neural`). Check the [available voices](https://docs.aws.amazon.com/polly/latest/dg/available-voices.html) for compatibility.\n- **Azure:** Use `Azure.<VoiceId>. (e.g. Azure.en-CA-ClaraNeural, Azure.en-CA-LiamNeural, Azure.en-US-BrianMultilingualNeural, Azure.en-US-Ava:DragonHDLatestNeural. For a complete list of voices, go to [Azure Voice Gallery](https://speech.microsoft.com/portal/voicegallery).)\n- **ElevenLabs:** Use `ElevenLabs.<ModelId>.<VoiceId>` (e.g., `ElevenLabs.BaseModel.John`). The `ModelId` part is optional. To use ElevenLabs, you must provide your ElevenLabs API key as an integration secret under `\"voice_settings\": {\"api_key_ref\": \"<secret_id>\"}`. See [integration secrets documentation](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) for details. Check [available voices](https://elevenlabs.io/docs/api-reference/get-voices).\n - **Telnyx:** Use `Telnyx.<model_id>.<voice_id>`\n- **Inworld:** Use `Inworld.<ModelId>.<VoiceId>` (e.g., `Inworld.Mini.Loretta`, `Inworld.Max.Oliver`). Supported models: `Mini`, `Max`.\n\n- `voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; }`\n The settings associated with the voice selected\n\n### Returns\n\n- `{ data?: { conversation_id?: string; result?: string; }; }`\n\n - `data?: { conversation_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.startAIAssistant('call_control_id');\n\nconsole.log(response);\n```",
11144
+ markdown: "## start_ai_assistant\n\n`client.calls.actions.startAIAssistant(call_control_id: string, assistant?: { id: string; dynamic_variables?: object; external_llm?: object; fallback_config?: object; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, client_state?: string, command_id?: string, greeting?: string, interruption_settings?: { enable?: boolean; }, message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: object; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[], participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[], send_message_history_updates?: boolean, transcription?: { model?: string; }, voice?: string, voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; }): { data?: call_control_command_result_with_conversation_id; }`\n\n**post** `/calls/{call_control_id}/actions/ai_assistant_start`\n\nStart an AI assistant on the call.\n\n**Expected Webhooks:**\n\n- `call.conversation.ended`\n- `call.conversation_insights.generated`\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `assistant?: { id: string; dynamic_variables?: object; external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n AI Assistant configuration. All fields except `id` are optional — the assistant's stored configuration will be used as fallback for any omitted fields.\n - `id: string`\n The identifier of the AI assistant to use.\n - `dynamic_variables?: object`\n Map of dynamic variables and their default values. Dynamic variables can be referenced in instructions, greeting, and tool definitions using the `{{variable_name}}` syntax. Call-control-agent automatically merges in `telnyx_call_*` variables (telnyx_call_to, telnyx_call_from, telnyx_conversation_channel, telnyx_agent_target, telnyx_end_user_target, telnyx_call_caller_id_name) and custom header variables.\n - `external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }`\n External LLM configuration for bringing your own LLM endpoint.\n - `fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n Fallback LLM configuration used when the primary LLM provider is unavailable.\n - `greeting?: string`\n Initial greeting text spoken when the assistant starts. Can be plain text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n - `instructions?: string`\n System instructions for the voice assistant. Can be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). This will overwrite the instructions set in the assistant configuration.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the LLM provider API key. Use this field to reference an [integration secret](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) containing your LLM provider API key. Supports any LLM provider (OpenAI, Anthropic, etc.).\n - `mcp_servers?: object[]`\n MCP (Model Context Protocol) server configurations for extending the assistant's capabilities with external tools and data sources.\n - `model?: string`\n LLM model override for this call. If omitted, the assistant's configured model is used.\n - `name?: string`\n Assistant name override for this call.\n - `observability_settings?: object`\n Observability configuration for the assistant session, including Langfuse integration for tracing and monitoring.\n - `openai_api_key_ref?: string`\n Deprecated — use `llm_api_key_ref` instead. Integration secret identifier for the OpenAI API key. This field is maintained for backward compatibility; `llm_api_key_ref` is the canonical field name and supports all LLM providers.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n Inline tool definitions available to the assistant (webhook, retrieval, transfer, hangup, etc.). Overrides the assistant's stored tools if provided.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `greeting?: string`\n Text that will be played when the assistant starts, if none then nothing will be played when the assistant starts. The greeting can be text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n\n- `interruption_settings?: { enable?: boolean; }`\n Settings for handling user interruptions during assistant speech\n - `enable?: boolean`\n When true, allows users to interrupt the assistant while speaking\n\n- `message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: { name: string; }; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[]`\n A list of messages to seed the conversation history before the assistant starts. Follows the same message format as the `ai_assistant_add_messages` command.\n\n- `participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[]`\n A list of participants to add to the conversation when it starts.\n\n- `send_message_history_updates?: boolean`\n When `true`, a webhook is sent each time the conversation message history is updated.\n\n- `transcription?: { model?: string; }`\n The settings associated with speech to text for the voice assistant. This is only relevant if the assistant uses a text-to-text language model. Any assistant using a model with native audio support (e.g. `fixie-ai/ultravox-v0_4`) will ignore this field.\n - `model?: string`\n The speech to text model to be used by the voice assistant.\n\n- `distil-whisper/distil-large-v2` is lower latency but English-only.\n- `openai/whisper-large-v3-turbo` is multi-lingual with automatic language detection but slightly higher latency.\n- `google` is a multi-lingual option, please describe the language in the `language` field.\n\n- `voice?: string`\n The voice to be used by the voice assistant. Currently we support ElevenLabs, Telnyx and AWS voices.\n\n **Supported Providers:**\n- **AWS:** Use `AWS.Polly.<VoiceId>` (e.g., `AWS.Polly.Joanna`). For neural voices, which provide more realistic, human-like speech, append `-Neural` to the `VoiceId` (e.g., `AWS.Polly.Joanna-Neural`). Check the [available voices](https://docs.aws.amazon.com/polly/latest/dg/available-voices.html) for compatibility.\n- **Azure:** Use `Azure.<VoiceId>. (e.g. Azure.en-CA-ClaraNeural, Azure.en-CA-LiamNeural, Azure.en-US-BrianMultilingualNeural, Azure.en-US-Ava:DragonHDLatestNeural. For a complete list of voices, go to [Azure Voice Gallery](https://speech.microsoft.com/portal/voicegallery).)\n- **ElevenLabs:** Use `ElevenLabs.<ModelId>.<VoiceId>` (e.g., `ElevenLabs.BaseModel.John`). The `ModelId` part is optional. To use ElevenLabs, you must provide your ElevenLabs API key as an integration secret under `\"voice_settings\": {\"api_key_ref\": \"<secret_id>\"}`. See [integration secrets documentation](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) for details. Check [available voices](https://elevenlabs.io/docs/api-reference/get-voices).\n - **Telnyx:** Use `Telnyx.<model_id>.<voice_id>`\n- **Inworld:** Use `Inworld.<ModelId>.<VoiceId>` (e.g., `Inworld.Mini.Loretta`, `Inworld.Max.Oliver`). Supported models: `Mini`, `Max`.\n\n- `voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; }`\n The settings associated with the voice selected\n\n### Returns\n\n- `{ data?: { conversation_id?: string; result?: string; }; }`\n\n - `data?: { conversation_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.startAIAssistant('call_control_id');\n\nconsole.log(response);\n```",
11145
11145
  perLanguage: {
11146
11146
  typescript: {
11147
11147
  method: 'client.calls.actions.startAIAssistant',
@@ -11264,7 +11264,7 @@ const EMBEDDED_METHODS = [
11264
11264
  "webhook_urls_method?: 'POST' | 'GET';",
11265
11265
  ],
11266
11266
  response: '{ data?: { recording_id?: string; result?: string; }; }',
11267
- markdown: "## answer\n\n`client.calls.actions.answer(call_control_id: string, assistant?: { id: string; dynamic_variables?: object; external_llm?: object; fallback_config?: object; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, billing_group_id?: string, client_state?: string, command_id?: string, custom_headers?: { name: string; value: string; }[], deepfake_detection?: { enabled: boolean; rtp_timeout?: number; timeout?: number; }, preferred_codecs?: 'G722,PCMU,PCMA,G729,OPUS,VP8,H264', record?: 'record-from-answer', record_channels?: 'single' | 'dual', record_custom_file_name?: string, record_format?: 'wav' | 'mp3', record_max_length?: number, record_timeout_secs?: number, record_track?: 'both' | 'inbound' | 'outbound', record_trim?: 'trim-silence', send_silence_when_idle?: boolean, sip_headers?: { name: 'User-to-User'; value: string; }[], sound_modifications?: { octaves?: number; pitch?: number; semitone?: number; track?: string; }, stream_bidirectional_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16', stream_bidirectional_mode?: 'mp3' | 'rtp', stream_bidirectional_target_legs?: 'both' | 'self' | 'opposite', stream_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16' | 'default', stream_track?: 'inbound_track' | 'outbound_track' | 'both_tracks', stream_url?: string, transcription?: boolean, transcription_config?: { client_state?: string; command_id?: string; transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'; transcription_engine_config?: transcription_engine_google_config | transcription_engine_telnyx_config | transcription_engine_azure_config | object | object | transcription_engine_a_config | transcription_engine_b_config | deepgram_nova2_config | deepgram_nova3_config; transcription_tracks?: string; }, webhook_retries_policies?: object, webhook_url?: string, webhook_url_method?: 'POST' | 'GET', webhook_urls?: object, webhook_urls_method?: 'POST' | 'GET'): { data?: object; }`\n\n**post** `/calls/{call_control_id}/actions/answer`\n\nAnswer an incoming call. You must issue this command before executing subsequent commands on an incoming call.\n\n**Expected Webhooks:**\n\n- `call.answered`\n- `call.hold` and `call.unhold` if the call is held/unheld\n- `call.deepfake_detection.result` if `deepfake_detection` was enabled\n- `call.deepfake_detection.error` if `deepfake_detection` was enabled and an error occurred\n- `streaming.started`, `streaming.stopped` or `streaming.failed` if `stream_url` was set\n\nWhen the `record` parameter is set to `record-from-answer`, the response will include a `recording_id` field.\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `assistant?: { id: string; dynamic_variables?: object; external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n AI Assistant configuration. All fields except `id` are optional — the assistant's stored configuration will be used as fallback for any omitted fields.\n - `id: string`\n The identifier of the AI assistant to use.\n - `dynamic_variables?: object`\n Map of dynamic variables and their default values. Dynamic variables can be referenced in instructions, greeting, and tool definitions using the `{{variable_name}}` syntax. Call-control-agent automatically merges in `telnyx_call_*` variables (telnyx_call_to, telnyx_call_from, telnyx_conversation_channel, telnyx_agent_target, telnyx_end_user_target, telnyx_call_caller_id_name) and custom header variables.\n - `external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }`\n External LLM configuration for bringing your own LLM endpoint.\n - `fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n Fallback LLM configuration used when the primary LLM provider is unavailable.\n - `greeting?: string`\n Initial greeting text spoken when the assistant starts. Can be plain text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n - `instructions?: string`\n System instructions for the voice assistant. Can be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). This will overwrite the instructions set in the assistant configuration.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the LLM provider API key. Use this field to reference an [integration secret](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) containing your LLM provider API key. Supports any LLM provider (OpenAI, Anthropic, etc.).\n - `mcp_servers?: object[]`\n MCP (Model Context Protocol) server configurations for extending the assistant's capabilities with external tools and data sources.\n - `model?: string`\n LLM model override for this call. If omitted, the assistant's configured model is used.\n - `name?: string`\n Assistant name override for this call.\n - `observability_settings?: object`\n Observability configuration for the assistant session, including Langfuse integration for tracing and monitoring.\n - `openai_api_key_ref?: string`\n Deprecated — use `llm_api_key_ref` instead. Integration secret identifier for the OpenAI API key. This field is maintained for backward compatibility; `llm_api_key_ref` is the canonical field name and supports all LLM providers.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n Inline tool definitions available to the assistant (webhook, retrieval, transfer, hangup, etc.). Overrides the assistant's stored tools if provided.\n\n- `billing_group_id?: string`\n Use this field to set the Billing Group ID for the call. Must be a valid and existing Billing Group ID.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `custom_headers?: { name: string; value: string; }[]`\n Custom headers to be added to the SIP INVITE response.\n\n- `deepfake_detection?: { enabled: boolean; rtp_timeout?: number; timeout?: number; }`\n Enables deepfake detection on the call. When enabled, audio from the remote party is streamed to a detection service that analyzes whether the voice is AI-generated. Results are delivered via the `call.deepfake_detection.result` webhook.\n - `enabled: boolean`\n Whether deepfake detection is enabled.\n - `rtp_timeout?: number`\n Maximum time in seconds to wait for RTP audio before timing out. If no audio is received within this window, detection stops with an error.\n - `timeout?: number`\n Maximum time in seconds to wait for a detection result before timing out.\n\n- `preferred_codecs?: 'G722,PCMU,PCMA,G729,OPUS,VP8,H264'`\n The list of comma-separated codecs in a preferred order for the forked media to be received.\n\n- `record?: 'record-from-answer'`\n Start recording automatically after an event. Disabled by default.\n\n- `record_channels?: 'single' | 'dual'`\n Defines which channel should be recorded ('single' or 'dual') when `record` is specified.\n\n- `record_custom_file_name?: string`\n The custom recording file name to be used instead of the default `call_leg_id`. Telnyx will still add a Unix timestamp suffix.\n\n- `record_format?: 'wav' | 'mp3'`\n Defines the format of the recording ('wav' or 'mp3') when `record` is specified.\n\n- `record_max_length?: number`\n Defines the maximum length for the recording in seconds when `record` is specified. The minimum value is 0. The maximum value is 43200. The default value is 0 (infinite).\n\n- `record_timeout_secs?: number`\n The number of seconds that Telnyx will wait for the recording to be stopped if silence is detected when `record` is specified. The timer only starts when the speech is detected. Please note that call transcription is used to detect silence and the related charge will be applied. The minimum value is 0. The default value is 0 (infinite).\n\n- `record_track?: 'both' | 'inbound' | 'outbound'`\n The audio track to be recorded. Can be either `both`, `inbound` or `outbound`. If only single track is specified (`inbound`, `outbound`), `channels` configuration is ignored and it will be recorded as mono (single channel).\n\n- `record_trim?: 'trim-silence'`\n When set to `trim-silence`, silence will be removed from the beginning and end of the recording.\n\n- `send_silence_when_idle?: boolean`\n Generate silence RTP packets when no transmission available.\n\n- `sip_headers?: { name: 'User-to-User'; value: string; }[]`\n SIP headers to be added to the SIP INVITE response. Currently only User-to-User header is supported.\n\n- `sound_modifications?: { octaves?: number; pitch?: number; semitone?: number; track?: string; }`\n Use this field to modify sound effects, for example adjust the pitch.\n - `octaves?: number`\n Adjust the pitch in octaves, values should be between -1 and 1, default 0\n - `pitch?: number`\n Set the pitch directly, value should be > 0, default 1 (lower = lower tone)\n - `semitone?: number`\n Adjust the pitch in semitones, values should be between -14 and 14, default 0\n - `track?: string`\n The track to which the sound modifications will be applied. Accepted values are `inbound` or `outbound`\n\n- `stream_bidirectional_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16'`\n Indicates codec for bidirectional streaming RTP payloads. Used only with stream_bidirectional_mode=rtp. Case sensitive.\n\n- `stream_bidirectional_mode?: 'mp3' | 'rtp'`\n Configures method of bidirectional streaming (mp3, rtp).\n\n- `stream_bidirectional_target_legs?: 'both' | 'self' | 'opposite'`\n Specifies which call legs should receive the bidirectional stream audio.\n\n- `stream_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16' | 'default'`\n Specifies the codec to be used for the streamed audio. When set to 'default' or when transcoding is not possible, the codec from the call will be used.\n\n- `stream_track?: 'inbound_track' | 'outbound_track' | 'both_tracks'`\n Specifies which track should be streamed.\n\n- `stream_url?: string`\n The destination WebSocket address where the stream is going to be delivered.\n\n- `transcription?: boolean`\n Enable transcription upon call answer. The default value is false.\n\n- `transcription_config?: { client_state?: string; command_id?: string; transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'; transcription_engine_config?: { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: google_transcription_language; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: object[]; transcription_engine?: 'Google'; use_enhanced?: boolean; } | { language?: telnyx_transcription_language; transcription_engine?: 'Telnyx'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { region: 'australiaeast' | 'centralindia' | 'eastus' | 'northcentralus' | 'westeurope' | 'westus2'; transcription_engine: 'Azure'; api_key_ref?: string; language?: string; } | { interim_results?: boolean; language?: string; transcription_engine?: 'xAI'; transcription_model?: 'xai/grok-stt'; } | { interim_results?: boolean; transcription_engine?: 'AssemblyAI'; transcription_model?: 'assemblyai/universal-streaming'; } | { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: google_transcription_language; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: object[]; transcription_engine?: 'A'; use_enhanced?: boolean; } | { language?: telnyx_transcription_language; transcription_engine?: 'B'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { transcription_engine: 'deepgram/nova-2'; transcription_model: 'deepgram/nova-2'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; } | { transcription_engine: 'deepgram/nova-3'; transcription_model: 'deepgram/nova-3'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; }; transcription_tracks?: string; }`\n - `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n - `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n - `transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'`\n Engine to use for speech recognition. Legacy values `A` - `Google`, `B` - `Telnyx` are supported for backward compatibility.\n - `transcription_engine_config?: { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: string; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: { boost?: number; phrases?: string[]; }[]; transcription_engine?: 'Google'; use_enhanced?: boolean; } | { language?: string; transcription_engine?: 'Telnyx'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { region: 'australiaeast' | 'centralindia' | 'eastus' | 'northcentralus' | 'westeurope' | 'westus2'; transcription_engine: 'Azure'; api_key_ref?: string; language?: string; } | { interim_results?: boolean; language?: string; transcription_engine?: 'xAI'; transcription_model?: 'xai/grok-stt'; } | { interim_results?: boolean; transcription_engine?: 'AssemblyAI'; transcription_model?: 'assemblyai/universal-streaming'; } | { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: string; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: { boost?: number; phrases?: string[]; }[]; transcription_engine?: 'A'; use_enhanced?: boolean; } | { language?: string; transcription_engine?: 'B'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { transcription_engine: 'deepgram/nova-2'; transcription_model: 'deepgram/nova-2'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; } | { transcription_engine: 'deepgram/nova-3'; transcription_model: 'deepgram/nova-3'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; }`\n - `transcription_tracks?: string`\n Indicates which leg of the call will be transcribed. Use `inbound` for the leg that requested the transcription, `outbound` for the other leg, and `both` for both legs of the call. Will default to `inbound`.\n\n- `webhook_retries_policies?: object`\n A map of event types to retry policies. Each retry policy contains an array of `retries_ms` specifying the delays between retry attempts in milliseconds. Maximum 5 retries, total delay cannot exceed 60 seconds.\n\n- `webhook_url?: string`\n Use this field to override the URL for which Telnyx will send subsequent webhooks to for this call.\n\n- `webhook_url_method?: 'POST' | 'GET'`\n HTTP request type used for `webhook_url`.\n\n- `webhook_urls?: object`\n A map of event types to webhook URLs. When an event of the specified type occurs, the webhook URL associated with that event type will be called instead of `webhook_url`. Events not mapped here will use the default `webhook_url`.\n\n- `webhook_urls_method?: 'POST' | 'GET'`\n HTTP request method to invoke `webhook_urls`.\n\n### Returns\n\n- `{ data?: { recording_id?: string; result?: string; }; }`\n\n - `data?: { recording_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.answer('call_control_id');\n\nconsole.log(response);\n```",
11267
+ markdown: "## answer\n\n`client.calls.actions.answer(call_control_id: string, assistant?: { id: string; dynamic_variables?: object; external_llm?: object; fallback_config?: object; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, billing_group_id?: string, client_state?: string, command_id?: string, custom_headers?: { name: string; value: string; }[], deepfake_detection?: { enabled: boolean; rtp_timeout?: number; timeout?: number; }, preferred_codecs?: 'G722,PCMU,PCMA,G729,OPUS,VP8,H264', record?: 'record-from-answer', record_channels?: 'single' | 'dual', record_custom_file_name?: string, record_format?: 'wav' | 'mp3', record_max_length?: number, record_timeout_secs?: number, record_track?: 'both' | 'inbound' | 'outbound', record_trim?: 'trim-silence', send_silence_when_idle?: boolean, sip_headers?: { name: 'User-to-User'; value: string; }[], sound_modifications?: { octaves?: number; pitch?: number; semitone?: number; track?: string; }, stream_bidirectional_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16', stream_bidirectional_mode?: 'mp3' | 'rtp', stream_bidirectional_target_legs?: 'both' | 'self' | 'opposite', stream_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16' | 'default', stream_track?: 'inbound_track' | 'outbound_track' | 'both_tracks', stream_url?: string, transcription?: boolean, transcription_config?: { client_state?: string; command_id?: string; transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'; transcription_engine_config?: transcription_engine_google_config | transcription_engine_telnyx_config | transcription_engine_azure_config | object | object | transcription_engine_a_config | transcription_engine_b_config | deepgram_nova2_config | deepgram_nova3_config; transcription_tracks?: string; }, webhook_retries_policies?: object, webhook_url?: string, webhook_url_method?: 'POST' | 'GET', webhook_urls?: object, webhook_urls_method?: 'POST' | 'GET'): { data?: object; }`\n\n**post** `/calls/{call_control_id}/actions/answer`\n\nAnswer an incoming call. You must issue this command before executing subsequent commands on an incoming call.\n\n**Expected Webhooks:**\n\n- `call.answered`\n- `call.hold` and `call.unhold` if the call is held/unheld\n- `call.deepfake_detection.result` if `deepfake_detection` was enabled\n- `call.deepfake_detection.error` if `deepfake_detection` was enabled and an error occurred\n- `streaming.started`, `streaming.stopped` or `streaming.failed` if `stream_url` was set\n\nWhen the `record` parameter is set to `record-from-answer`, the response will include a `recording_id` field.\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `assistant?: { id: string; dynamic_variables?: object; external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n AI Assistant configuration. All fields except `id` are optional — the assistant's stored configuration will be used as fallback for any omitted fields.\n - `id: string`\n The identifier of the AI assistant to use.\n - `dynamic_variables?: object`\n Map of dynamic variables and their default values. Dynamic variables can be referenced in instructions, greeting, and tool definitions using the `{{variable_name}}` syntax. Call-control-agent automatically merges in `telnyx_call_*` variables (telnyx_call_to, telnyx_call_from, telnyx_conversation_channel, telnyx_agent_target, telnyx_end_user_target, telnyx_call_caller_id_name) and custom header variables.\n - `external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }`\n External LLM configuration for bringing your own LLM endpoint.\n - `fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n Fallback LLM configuration used when the primary LLM provider is unavailable.\n - `greeting?: string`\n Initial greeting text spoken when the assistant starts. Can be plain text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n - `instructions?: string`\n System instructions for the voice assistant. Can be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). This will overwrite the instructions set in the assistant configuration.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the LLM provider API key. Use this field to reference an [integration secret](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) containing your LLM provider API key. Supports any LLM provider (OpenAI, Anthropic, etc.).\n - `mcp_servers?: object[]`\n MCP (Model Context Protocol) server configurations for extending the assistant's capabilities with external tools and data sources.\n - `model?: string`\n LLM model override for this call. If omitted, the assistant's configured model is used.\n - `name?: string`\n Assistant name override for this call.\n - `observability_settings?: object`\n Observability configuration for the assistant session, including Langfuse integration for tracing and monitoring.\n - `openai_api_key_ref?: string`\n Deprecated — use `llm_api_key_ref` instead. Integration secret identifier for the OpenAI API key. This field is maintained for backward compatibility; `llm_api_key_ref` is the canonical field name and supports all LLM providers.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n Inline tool definitions available to the assistant (webhook, retrieval, transfer, hangup, etc.). Overrides the assistant's stored tools if provided.\n\n- `billing_group_id?: string`\n Use this field to set the Billing Group ID for the call. Must be a valid and existing Billing Group ID.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `custom_headers?: { name: string; value: string; }[]`\n Custom headers to be added to the SIP INVITE response.\n\n- `deepfake_detection?: { enabled: boolean; rtp_timeout?: number; timeout?: number; }`\n Enables deepfake detection on the call. When enabled, audio from the remote party is streamed to a detection service that analyzes whether the voice is AI-generated. Results are delivered via the `call.deepfake_detection.result` webhook.\n - `enabled: boolean`\n Whether deepfake detection is enabled.\n - `rtp_timeout?: number`\n Maximum time in seconds to wait for RTP audio before timing out. If no audio is received within this window, detection stops with an error.\n - `timeout?: number`\n Maximum time in seconds to wait for a detection result before timing out.\n\n- `preferred_codecs?: 'G722,PCMU,PCMA,G729,OPUS,VP8,H264'`\n The list of comma-separated codecs in a preferred order for the forked media to be received.\n\n- `record?: 'record-from-answer'`\n Start recording automatically after an event. Disabled by default.\n\n- `record_channels?: 'single' | 'dual'`\n Defines which channel should be recorded ('single' or 'dual') when `record` is specified.\n\n- `record_custom_file_name?: string`\n The custom recording file name to be used instead of the default `call_leg_id`. Telnyx will still add a Unix timestamp suffix.\n\n- `record_format?: 'wav' | 'mp3'`\n Defines the format of the recording ('wav' or 'mp3') when `record` is specified.\n\n- `record_max_length?: number`\n Defines the maximum length for the recording in seconds when `record` is specified. The minimum value is 0. The maximum value is 43200. The default value is 0 (infinite).\n\n- `record_timeout_secs?: number`\n The number of seconds that Telnyx will wait for the recording to be stopped if silence is detected when `record` is specified. The timer only starts when the speech is detected. Please note that call transcription is used to detect silence and the related charge will be applied. The minimum value is 0. The default value is 0 (infinite).\n\n- `record_track?: 'both' | 'inbound' | 'outbound'`\n The audio track to be recorded. Can be either `both`, `inbound` or `outbound`. If only single track is specified (`inbound`, `outbound`), `channels` configuration is ignored and it will be recorded as mono (single channel).\n\n- `record_trim?: 'trim-silence'`\n When set to `trim-silence`, silence will be removed from the beginning and end of the recording.\n\n- `send_silence_when_idle?: boolean`\n Generate silence RTP packets when no transmission available.\n\n- `sip_headers?: { name: 'User-to-User'; value: string; }[]`\n SIP headers to be added to the SIP INVITE response. Currently only User-to-User header is supported.\n\n- `sound_modifications?: { octaves?: number; pitch?: number; semitone?: number; track?: string; }`\n Use this field to modify sound effects, for example adjust the pitch.\n - `octaves?: number`\n Adjust the pitch in octaves, values should be between -1 and 1, default 0\n - `pitch?: number`\n Set the pitch directly, value should be > 0, default 1 (lower = lower tone)\n - `semitone?: number`\n Adjust the pitch in semitones, values should be between -14 and 14, default 0\n - `track?: string`\n The track to which the sound modifications will be applied. Accepted values are `inbound` or `outbound`\n\n- `stream_bidirectional_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16'`\n Indicates codec for bidirectional streaming RTP payloads. Used only with stream_bidirectional_mode=rtp. Case sensitive.\n\n- `stream_bidirectional_mode?: 'mp3' | 'rtp'`\n Configures method of bidirectional streaming (mp3, rtp).\n\n- `stream_bidirectional_target_legs?: 'both' | 'self' | 'opposite'`\n Specifies which call legs should receive the bidirectional stream audio.\n\n- `stream_codec?: 'PCMU' | 'PCMA' | 'G722' | 'OPUS' | 'AMR-WB' | 'L16' | 'default'`\n Specifies the codec to be used for the streamed audio. When set to 'default' or when transcoding is not possible, the codec from the call will be used.\n\n- `stream_track?: 'inbound_track' | 'outbound_track' | 'both_tracks'`\n Specifies which track should be streamed.\n\n- `stream_url?: string`\n The destination WebSocket address where the stream is going to be delivered.\n\n- `transcription?: boolean`\n Enable transcription upon call answer. The default value is false.\n\n- `transcription_config?: { client_state?: string; command_id?: string; transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'; transcription_engine_config?: { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: google_transcription_language; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: object[]; transcription_engine?: 'Google'; use_enhanced?: boolean; } | { language?: telnyx_transcription_language; transcription_engine?: 'Telnyx'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { region: 'australiaeast' | 'centralindia' | 'eastus' | 'northcentralus' | 'westeurope' | 'westus2'; transcription_engine: 'Azure'; api_key_ref?: string; language?: string; } | { interim_results?: boolean; language?: string; transcription_engine?: 'xAI'; transcription_model?: 'xai/grok-stt'; } | { interim_results?: boolean; transcription_engine?: 'AssemblyAI'; transcription_model?: 'assemblyai/universal-streaming'; } | { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: google_transcription_language; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: object[]; transcription_engine?: 'A'; use_enhanced?: boolean; } | { language?: telnyx_transcription_language; transcription_engine?: 'B'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { transcription_engine: 'deepgram/nova-2'; transcription_model: 'deepgram/nova-2'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; } | { transcription_engine: 'deepgram/nova-3'; transcription_model: 'deepgram/nova-3'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; }; transcription_tracks?: string; }`\n - `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n - `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n - `transcription_engine?: 'Google' | 'Telnyx' | 'Deepgram' | 'Azure' | 'xAI' | 'AssemblyAI' | 'A' | 'B'`\n Engine to use for speech recognition. Legacy values `A` - `Google`, `B` - `Telnyx` are supported for backward compatibility.\n - `transcription_engine_config?: { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: string; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: { boost?: number; phrases?: string[]; }[]; transcription_engine?: 'Google'; use_enhanced?: boolean; } | { language?: string; transcription_engine?: 'Telnyx'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { region: 'australiaeast' | 'centralindia' | 'eastus' | 'northcentralus' | 'westeurope' | 'westus2'; transcription_engine: 'Azure'; api_key_ref?: string; language?: string; } | { interim_results?: boolean; language?: string; transcription_engine?: 'xAI'; transcription_model?: 'xai/grok-stt'; } | { interim_results?: boolean; transcription_engine?: 'AssemblyAI'; transcription_model?: 'assemblyai/universal-streaming'; } | { enable_speaker_diarization?: boolean; hints?: string[]; interim_results?: boolean; language?: string; max_speaker_count?: number; min_speaker_count?: number; model?: string; profanity_filter?: boolean; speech_context?: { boost?: number; phrases?: string[]; }[]; transcription_engine?: 'A'; use_enhanced?: boolean; } | { language?: string; transcription_engine?: 'B'; transcription_model?: 'openai/whisper-tiny' | 'openai/whisper-large-v3-turbo'; } | { transcription_engine: 'deepgram/nova-2'; transcription_model: 'deepgram/nova-2'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; } | { transcription_engine: 'deepgram/nova-3'; transcription_model: 'deepgram/nova-3'; interim_results?: boolean; keywords_boosting?: object; language?: string; utterance_end_ms?: number; }`\n - `transcription_tracks?: string`\n Indicates which leg of the call will be transcribed. Use `inbound` for the leg that requested the transcription, `outbound` for the other leg, and `both` for both legs of the call. Will default to `inbound`.\n\n- `webhook_retries_policies?: object`\n A map of event types to retry policies. Each retry policy contains an array of `retries_ms` specifying the delays between retry attempts in milliseconds. Maximum 5 retries, total delay cannot exceed 60 seconds.\n\n- `webhook_url?: string`\n Use this field to override the URL for which Telnyx will send subsequent webhooks to for this call.\n\n- `webhook_url_method?: 'POST' | 'GET'`\n HTTP request type used for `webhook_url`.\n\n- `webhook_urls?: object`\n A map of event types to webhook URLs. When an event of the specified type occurs, the webhook URL associated with that event type will be called instead of `webhook_url`. Events not mapped here will use the default `webhook_url`.\n\n- `webhook_urls_method?: 'POST' | 'GET'`\n HTTP request method to invoke `webhook_urls`.\n\n### Returns\n\n- `{ data?: { recording_id?: string; result?: string; }; }`\n\n - `data?: { recording_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.answer('call_control_id');\n\nconsole.log(response);\n```",
11268
11268
  perLanguage: {
11269
11269
  typescript: {
11270
11270
  method: 'client.calls.actions.answer',
@@ -11695,7 +11695,7 @@ const EMBEDDED_METHODS = [
11695
11695
  "voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; };",
11696
11696
  ],
11697
11697
  response: '{ data?: { conversation_id?: string; result?: string; }; }',
11698
- markdown: "## gather_using_ai\n\n`client.calls.actions.gatherUsingAI(call_control_id: string, parameters: object, assistant?: { instructions?: string; model?: string; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, client_state?: string, command_id?: string, gather_ended_speech?: string, greeting?: string, interruption_settings?: { enable?: boolean; }, language?: string, message_history?: { content?: string; role?: 'assistant' | 'user'; }[], send_message_history_updates?: boolean, send_partial_results?: boolean, transcription?: { model?: string; }, user_response_timeout_ms?: number, voice?: string, voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; }): { data?: call_control_command_result_with_conversation_id; }`\n\n**post** `/calls/{call_control_id}/actions/gather_using_ai`\n\nGather parameters defined in the request payload using a voice assistant.\n\n You can pass parameters described as a JSON Schema object and the voice assistant will attempt to gather these informations. \n\n**Expected Webhooks:**\n\n- `call.ai_gather.ended`\n- `call.conversation.ended`\n- `call.ai_gather.partial_results` (if `send_partial_results` is set to `true`)\n- `call.ai_gather.message_history_updated` (if `send_message_history_updates` is set to `true`)\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `parameters: object`\n The parameters described as a JSON Schema object that needs to be gathered by the voice assistant. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format\n\n- `assistant?: { instructions?: string; model?: string; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n Assistant configuration including choice of LLM, custom instructions, and tools.\n - `instructions?: string`\n The system instructions that the voice assistant uses during the gather command\n - `model?: string`\n The model to be used by the voice assistant.\n - `openai_api_key_ref?: string`\n This is necessary only if the model selected is from OpenAI. You would pass the `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) that refers to your OpenAI API Key. Warning: Free plans are unlikely to work with this integration.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { name?: string; to?: string; }[]; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n The tools that the voice assistant can use.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `gather_ended_speech?: string`\n Text that will be played when the gathering has finished. There is a 3,000 character limit.\n\n- `greeting?: string`\n Text that will be played when the gathering starts, if none then nothing will be played when the gathering starts. The greeting can be text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n\n- `interruption_settings?: { enable?: boolean; }`\n Settings for handling user interruptions during assistant speech\n - `enable?: boolean`\n When true, allows users to interrupt the assistant while speaking\n\n- `language?: string`\n Language to use for speech recognition\n\n- `message_history?: { content?: string; role?: 'assistant' | 'user'; }[]`\n The message history you want the voice assistant to be aware of, this can be useful to keep the context of the conversation, or to pass additional information to the voice assistant.\n\n- `send_message_history_updates?: boolean`\n Default is `false`. If set to `true`, the voice assistant will send updates to the message history via the `call.ai_gather.message_history_updated` callback in real time as the message history is updated.\n\n- `send_partial_results?: boolean`\n Default is `false`. If set to `true`, the voice assistant will send partial results via the `call.ai_gather.partial_results` callback in real time as individual fields are gathered. If set to `false`, the voice assistant will only send the final result via the `call.ai_gather.ended` callback.\n\n- `transcription?: { model?: string; }`\n The settings associated with speech to text for the voice assistant. This is only relevant if the assistant uses a text-to-text language model. Any assistant using a model with native audio support (e.g. `fixie-ai/ultravox-v0_4`) will ignore this field.\n - `model?: string`\n The speech to text model to be used by the voice assistant.\n\n- `distil-whisper/distil-large-v2` is lower latency but English-only.\n- `openai/whisper-large-v3-turbo` is multi-lingual with automatic language detection but slightly higher latency.\n- `google` is a multi-lingual option, please describe the language in the `language` field.\n\n- `user_response_timeout_ms?: number`\n The maximum time in milliseconds to wait for user response before timing out.\n\n- `voice?: string`\n The voice to be used by the voice assistant. Currently we support ElevenLabs, Telnyx and AWS voices.\n\n **Supported Providers:**\n- **AWS:** Use `AWS.Polly.<VoiceId>` (e.g., `AWS.Polly.Joanna`). For neural voices, which provide more realistic, human-like speech, append `-Neural` to the `VoiceId` (e.g., `AWS.Polly.Joanna-Neural`). Check the [available voices](https://docs.aws.amazon.com/polly/latest/dg/available-voices.html) for compatibility.\n- **Azure:** Use `Azure.<VoiceId>. (e.g. Azure.en-CA-ClaraNeural, Azure.en-CA-LiamNeural, Azure.en-US-BrianMultilingualNeural, Azure.en-US-Ava:DragonHDLatestNeural. For a complete list of voices, go to [Azure Voice Gallery](https://speech.microsoft.com/portal/voicegallery).)\n- **ElevenLabs:** Use `ElevenLabs.<ModelId>.<VoiceId>` (e.g., `ElevenLabs.BaseModel.John`). The `ModelId` part is optional. To use ElevenLabs, you must provide your ElevenLabs API key as an integration secret under `\"voice_settings\": {\"api_key_ref\": \"<secret_id>\"}`. See [integration secrets documentation](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) for details. Check [available voices](https://elevenlabs.io/docs/api-reference/get-voices).\n - **Telnyx:** Use `Telnyx.<model_id>.<voice_id>`\n- **Inworld:** Use `Inworld.<ModelId>.<VoiceId>` (e.g., `Inworld.Mini.Loretta`, `Inworld.Max.Oliver`). Supported models: `Mini`, `Max`.\n\n- `voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; }`\n The settings associated with the voice selected\n\n### Returns\n\n- `{ data?: { conversation_id?: string; result?: string; }; }`\n\n - `data?: { conversation_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.gatherUsingAI('call_control_id', { parameters: {\n properties: 'bar',\n required: 'bar',\n type: 'bar',\n} });\n\nconsole.log(response);\n```",
11698
+ markdown: "## gather_using_ai\n\n`client.calls.actions.gatherUsingAI(call_control_id: string, parameters: object, assistant?: { instructions?: string; model?: string; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, client_state?: string, command_id?: string, gather_ended_speech?: string, greeting?: string, interruption_settings?: { enable?: boolean; }, language?: string, message_history?: { content?: string; role?: 'assistant' | 'user'; }[], send_message_history_updates?: boolean, send_partial_results?: boolean, transcription?: { model?: string; }, user_response_timeout_ms?: number, voice?: string, voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; }): { data?: call_control_command_result_with_conversation_id; }`\n\n**post** `/calls/{call_control_id}/actions/gather_using_ai`\n\nGather parameters defined in the request payload using a voice assistant.\n\n You can pass parameters described as a JSON Schema object and the voice assistant will attempt to gather these informations. \n\n**Expected Webhooks:**\n\n- `call.ai_gather.ended`\n- `call.conversation.ended`\n- `call.ai_gather.partial_results` (if `send_partial_results` is set to `true`)\n- `call.ai_gather.message_history_updated` (if `send_message_history_updates` is set to `true`)\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `parameters: object`\n The parameters described as a JSON Schema object that needs to be gathered by the voice assistant. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format\n\n- `assistant?: { instructions?: string; model?: string; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n Assistant configuration including choice of LLM, custom instructions, and tools.\n - `instructions?: string`\n The system instructions that the voice assistant uses during the gather command\n - `model?: string`\n The model to be used by the voice assistant.\n - `openai_api_key_ref?: string`\n This is necessary only if the model selected is from OpenAI. You would pass the `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) that refers to your OpenAI API Key. Warning: Free plans are unlikely to work with this integration.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n The tools that the voice assistant can use.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `gather_ended_speech?: string`\n Text that will be played when the gathering has finished. There is a 3,000 character limit.\n\n- `greeting?: string`\n Text that will be played when the gathering starts, if none then nothing will be played when the gathering starts. The greeting can be text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n\n- `interruption_settings?: { enable?: boolean; }`\n Settings for handling user interruptions during assistant speech\n - `enable?: boolean`\n When true, allows users to interrupt the assistant while speaking\n\n- `language?: string`\n Language to use for speech recognition\n\n- `message_history?: { content?: string; role?: 'assistant' | 'user'; }[]`\n The message history you want the voice assistant to be aware of, this can be useful to keep the context of the conversation, or to pass additional information to the voice assistant.\n\n- `send_message_history_updates?: boolean`\n Default is `false`. If set to `true`, the voice assistant will send updates to the message history via the `call.ai_gather.message_history_updated` callback in real time as the message history is updated.\n\n- `send_partial_results?: boolean`\n Default is `false`. If set to `true`, the voice assistant will send partial results via the `call.ai_gather.partial_results` callback in real time as individual fields are gathered. If set to `false`, the voice assistant will only send the final result via the `call.ai_gather.ended` callback.\n\n- `transcription?: { model?: string; }`\n The settings associated with speech to text for the voice assistant. This is only relevant if the assistant uses a text-to-text language model. Any assistant using a model with native audio support (e.g. `fixie-ai/ultravox-v0_4`) will ignore this field.\n - `model?: string`\n The speech to text model to be used by the voice assistant.\n\n- `distil-whisper/distil-large-v2` is lower latency but English-only.\n- `openai/whisper-large-v3-turbo` is multi-lingual with automatic language detection but slightly higher latency.\n- `google` is a multi-lingual option, please describe the language in the `language` field.\n\n- `user_response_timeout_ms?: number`\n The maximum time in milliseconds to wait for user response before timing out.\n\n- `voice?: string`\n The voice to be used by the voice assistant. Currently we support ElevenLabs, Telnyx and AWS voices.\n\n **Supported Providers:**\n- **AWS:** Use `AWS.Polly.<VoiceId>` (e.g., `AWS.Polly.Joanna`). For neural voices, which provide more realistic, human-like speech, append `-Neural` to the `VoiceId` (e.g., `AWS.Polly.Joanna-Neural`). Check the [available voices](https://docs.aws.amazon.com/polly/latest/dg/available-voices.html) for compatibility.\n- **Azure:** Use `Azure.<VoiceId>. (e.g. Azure.en-CA-ClaraNeural, Azure.en-CA-LiamNeural, Azure.en-US-BrianMultilingualNeural, Azure.en-US-Ava:DragonHDLatestNeural. For a complete list of voices, go to [Azure Voice Gallery](https://speech.microsoft.com/portal/voicegallery).)\n- **ElevenLabs:** Use `ElevenLabs.<ModelId>.<VoiceId>` (e.g., `ElevenLabs.BaseModel.John`). The `ModelId` part is optional. To use ElevenLabs, you must provide your ElevenLabs API key as an integration secret under `\"voice_settings\": {\"api_key_ref\": \"<secret_id>\"}`. See [integration secrets documentation](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) for details. Check [available voices](https://elevenlabs.io/docs/api-reference/get-voices).\n - **Telnyx:** Use `Telnyx.<model_id>.<voice_id>`\n- **Inworld:** Use `Inworld.<ModelId>.<VoiceId>` (e.g., `Inworld.Mini.Loretta`, `Inworld.Max.Oliver`). Supported models: `Mini`, `Max`.\n\n- `voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; }`\n The settings associated with the voice selected\n\n### Returns\n\n- `{ data?: { conversation_id?: string; result?: string; }; }`\n\n - `data?: { conversation_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.gatherUsingAI('call_control_id', { parameters: {\n properties: 'bar',\n required: 'bar',\n type: 'bar',\n} });\n\nconsole.log(response);\n```",
11699
11699
  perLanguage: {
11700
11700
  typescript: {
11701
11701
  method: 'client.calls.actions.gatherUsingAI',