telnyx-mcp 6.50.0 → 6.52.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/local-docs-search.d.mts.map +1 -1
- package/local-docs-search.d.ts.map +1 -1
- package/local-docs-search.js +8 -8
- package/local-docs-search.js.map +1 -1
- package/local-docs-search.mjs +8 -8
- package/local-docs-search.mjs.map +1 -1
- package/package.json +2 -2
- package/server.js +1 -1
- package/server.mjs +1 -1
- package/src/local-docs-search.ts +9 -8
- package/src/server.ts +1 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"local-docs-search.d.mts","sourceRoot":"","sources":["src/local-docs-search.ts"],"names":[],"mappings":"AA+CA,KAAK,YAAY,GAAG;IAClB,OAAO,EAAE,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,EAAE,CAAC;CAC/C,CAAC;
|
|
1
|
+
{"version":3,"file":"local-docs-search.d.mts","sourceRoot":"","sources":["src/local-docs-search.ts"],"names":[],"mappings":"AA+CA,KAAK,YAAY,GAAG;IAClB,OAAO,EAAE,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,EAAE,CAAC;CAC/C,CAAC;AA6i1DF;;;;GAIG;AACH,qBAAa,eAAe;IAC1B,OAAO,CAAC,WAAW,CAAiC;IACpD,OAAO,CAAC,UAAU,CAAiC;IAEnD,OAAO;WAKM,MAAM,CAAC,IAAI,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAA;KAAE,GAAG,OAAO,CAAC,eAAe,CAAC;IAY1E,MAAM,CAAC,KAAK,EAAE;QACZ,KAAK,EAAE,MAAM,CAAC;QACd,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB,SAAS,CAAC,EAAE,MAAM,CAAC;KACpB,GAAG,YAAY;IA2EhB,OAAO,CAAC,YAAY;YAiBN,iBAAiB;IA4C/B,OAAO,CAAC,UAAU;CAgBnB"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"local-docs-search.d.ts","sourceRoot":"","sources":["src/local-docs-search.ts"],"names":[],"mappings":"AA+CA,KAAK,YAAY,GAAG;IAClB,OAAO,EAAE,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,EAAE,CAAC;CAC/C,CAAC;
|
|
1
|
+
{"version":3,"file":"local-docs-search.d.ts","sourceRoot":"","sources":["src/local-docs-search.ts"],"names":[],"mappings":"AA+CA,KAAK,YAAY,GAAG;IAClB,OAAO,EAAE,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,EAAE,CAAC;CAC/C,CAAC;AA6i1DF;;;;GAIG;AACH,qBAAa,eAAe;IAC1B,OAAO,CAAC,WAAW,CAAiC;IACpD,OAAO,CAAC,UAAU,CAAiC;IAEnD,OAAO;WAKM,MAAM,CAAC,IAAI,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAA;KAAE,GAAG,OAAO,CAAC,eAAe,CAAC;IAY1E,MAAM,CAAC,KAAK,EAAE;QACZ,KAAK,EAAE,MAAM,CAAC;QACd,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB,SAAS,CAAC,EAAE,MAAM,CAAC;KACpB,GAAG,YAAY;IA2EhB,OAAO,CAAC,YAAY;YAiBN,iBAAiB;IA4C/B,OAAO,CAAC,UAAU;CAgBnB"}
|
package/local-docs-search.js
CHANGED
|
@@ -11260,12 +11260,12 @@ const EMBEDDED_METHODS = [
|
|
|
11260
11260
|
"message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: { name: string; }; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[];",
|
|
11261
11261
|
"participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[];",
|
|
11262
11262
|
'send_message_history_updates?: boolean;',
|
|
11263
|
-
'transcription?: { model?: string; };',
|
|
11263
|
+
'transcription?: { language?: string; model?: string; };',
|
|
11264
11264
|
'voice?: string;',
|
|
11265
11265
|
"voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; };",
|
|
11266
11266
|
],
|
|
11267
11267
|
response: '{ data?: { conversation_id?: string; result?: string; }; }',
|
|
11268
|
-
markdown: "## start_ai_assistant\n\n`client.calls.actions.startAIAssistant(call_control_id: string, assistant?: { id: string; dynamic_variables?: object; external_llm?: object; fallback_config?: object; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, client_state?: string, command_id?: string, greeting?: string, interruption_settings?: { enable?: boolean; }, message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: object; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[], participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[], send_message_history_updates?: boolean, transcription?: { model?: string; }, voice?: string, voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; }): { data?: call_control_command_result_with_conversation_id; }`\n\n**post** `/calls/{call_control_id}/actions/ai_assistant_start`\n\nStart an AI assistant on the call.\n\n**Expected Webhooks:**\n\n- `call.conversation.ended`\n- `call.conversation_insights.generated`\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `assistant?: { id: string; dynamic_variables?: object; external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n AI Assistant configuration. All fields except `id` are optional — the assistant's stored configuration will be used as fallback for any omitted fields.\n - `id: string`\n The identifier of the AI assistant to use.\n - `dynamic_variables?: object`\n Map of dynamic variables and their default values. Dynamic variables can be referenced in instructions, greeting, and tool definitions using the `{{variable_name}}` syntax. Call-control-agent automatically merges in `telnyx_call_*` variables (telnyx_call_to, telnyx_call_from, telnyx_conversation_channel, telnyx_agent_target, telnyx_end_user_target, telnyx_call_caller_id_name) and custom header variables.\n - `external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }`\n External LLM configuration for bringing your own LLM endpoint.\n - `fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n Fallback LLM configuration used when the primary LLM provider is unavailable.\n - `greeting?: string`\n Initial greeting text spoken when the assistant starts. Can be plain text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n - `instructions?: string`\n System instructions for the voice assistant. Can be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). This will overwrite the instructions set in the assistant configuration.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the LLM provider API key. Use this field to reference an [integration secret](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) containing your LLM provider API key. Supports any LLM provider (OpenAI, Anthropic, etc.).\n - `mcp_servers?: object[]`\n MCP (Model Context Protocol) server configurations for extending the assistant's capabilities with external tools and data sources.\n - `model?: string`\n LLM model override for this call. If omitted, the assistant's configured model is used.\n - `name?: string`\n Assistant name override for this call.\n - `observability_settings?: object`\n Observability configuration for the assistant session, including Langfuse integration for tracing and monitoring.\n - `openai_api_key_ref?: string`\n Deprecated — use `llm_api_key_ref` instead. Integration secret identifier for the OpenAI API key. This field is maintained for backward compatibility; `llm_api_key_ref` is the canonical field name and supports all LLM providers.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n Inline tool definitions available to the assistant (webhook, retrieval, transfer, hangup, etc.). Overrides the assistant's stored tools if provided.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `greeting?: string`\n Text that will be played when the assistant starts, if none then nothing will be played when the assistant starts. The greeting can be text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n\n- `interruption_settings?: { enable?: boolean; }`\n Settings for handling user interruptions during assistant speech\n - `enable?: boolean`\n When true, allows users to interrupt the assistant while speaking\n\n- `message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: { name: string; }; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[]`\n A list of messages to seed the conversation history before the assistant starts. Follows the same message format as the `ai_assistant_add_messages` command.\n\n- `participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[]`\n A list of participants to add to the conversation when it starts.\n\n- `send_message_history_updates?: boolean`\n When `true`, a webhook is sent each time the conversation message history is updated.\n\n- `transcription?: { model?: string; }`\n The settings associated with speech to text for the voice assistant. This is only relevant if the assistant uses a text-to-text language model. Any assistant using a model with native audio support (e.g. `fixie-ai/ultravox-v0_4`) will ignore this field.\n - `model?: string`\n The speech to text model to be used by the voice assistant.\n\n- `distil-whisper/distil-large-v2` is lower latency but English-only.\n- `openai/whisper-large-v3-turbo` is multi-lingual with automatic language detection but slightly higher latency.\n- `google` is a multi-lingual option, please describe the language in the `language` field.\n\n- `voice?: string`\n The voice to be used by the voice assistant. Currently we support ElevenLabs, Telnyx and AWS voices.\n\n **Supported Providers:**\n- **AWS:** Use `AWS.Polly.<VoiceId>` (e.g., `AWS.Polly.Joanna`). For neural voices, which provide more realistic, human-like speech, append `-Neural` to the `VoiceId` (e.g., `AWS.Polly.Joanna-Neural`). Check the [available voices](https://docs.aws.amazon.com/polly/latest/dg/available-voices.html) for compatibility.\n- **Azure:** Use `Azure.<VoiceId>. (e.g. Azure.en-CA-ClaraNeural, Azure.en-CA-LiamNeural, Azure.en-US-BrianMultilingualNeural, Azure.en-US-Ava:DragonHDLatestNeural. For a complete list of voices, go to [Azure Voice Gallery](https://speech.microsoft.com/portal/voicegallery).)\n- **ElevenLabs:** Use `ElevenLabs.<ModelId>.<VoiceId>` (e.g., `ElevenLabs.BaseModel.John`). The `ModelId` part is optional. To use ElevenLabs, you must provide your ElevenLabs API key as an integration secret under `\"voice_settings\": {\"api_key_ref\": \"<secret_id>\"}`. See [integration secrets documentation](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) for details. Check [available voices](https://elevenlabs.io/docs/api-reference/get-voices).\n - **Telnyx:** Use `Telnyx.<model_id>.<voice_id>`\n- **Inworld:** Use `Inworld.<ModelId>.<VoiceId>` (e.g., `Inworld.Mini.Loretta`, `Inworld.Max.Oliver`). Supported models: `Mini`, `Max`.\n- **xAI:** Use `xAI.<VoiceId>` (e.g., `xAI.eve`). Available voices: `eve`, `ara`, `rex`, `sal`, `leo`.\n\n- `voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; }`\n The settings associated with the voice selected\n\n### Returns\n\n- `{ data?: { conversation_id?: string; result?: string; }; }`\n\n - `data?: { conversation_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.startAIAssistant('call_control_id');\n\nconsole.log(response);\n```",
|
|
11268
|
+
markdown: "## start_ai_assistant\n\n`client.calls.actions.startAIAssistant(call_control_id: string, assistant?: { id: string; dynamic_variables?: object; external_llm?: object; fallback_config?: object; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, client_state?: string, command_id?: string, greeting?: string, interruption_settings?: { enable?: boolean; }, message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: object; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[], participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[], send_message_history_updates?: boolean, transcription?: { language?: string; model?: string; }, voice?: string, voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; }): { data?: call_control_command_result_with_conversation_id; }`\n\n**post** `/calls/{call_control_id}/actions/ai_assistant_start`\n\nStart an AI assistant on the call.\n\n**Expected Webhooks:**\n\n- `call.conversation.ended`\n- `call.conversation_insights.generated`\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `assistant?: { id: string; dynamic_variables?: object; external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }; greeting?: string; instructions?: string; llm_api_key_ref?: string; mcp_servers?: object[]; model?: string; name?: string; observability_settings?: object; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n AI Assistant configuration. All fields except `id` are optional — the assistant's stored configuration will be used as fallback for any omitted fields.\n - `id: string`\n The identifier of the AI assistant to use.\n - `dynamic_variables?: object`\n Map of dynamic variables and their default values. Dynamic variables can be referenced in instructions, greeting, and tool definitions using the `{{variable_name}}` syntax. Call-control-agent automatically merges in `telnyx_call_*` variables (telnyx_call_to, telnyx_call_from, telnyx_conversation_channel, telnyx_agent_target, telnyx_end_user_target, telnyx_call_caller_id_name) and custom header variables.\n - `external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }`\n External LLM configuration for bringing your own LLM endpoint.\n - `fallback_config?: { external_llm?: { authentication_method?: 'token' | 'certificate'; base_url?: string; certificate_ref?: string; forward_metadata?: boolean; llm_api_key_ref?: string; model?: string; token_retrieval_url?: string; }; llm_api_key_ref?: string; model?: string; }`\n Fallback LLM configuration used when the primary LLM provider is unavailable.\n - `greeting?: string`\n Initial greeting text spoken when the assistant starts. Can be plain text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n - `instructions?: string`\n System instructions for the voice assistant. Can be templated with [dynamic variables](https://developers.telnyx.com/docs/inference/ai-assistants/dynamic-variables). This will overwrite the instructions set in the assistant configuration.\n - `llm_api_key_ref?: string`\n Integration secret identifier for the LLM provider API key. Use this field to reference an [integration secret](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) containing your LLM provider API key. Supports any LLM provider (OpenAI, Anthropic, etc.).\n - `mcp_servers?: object[]`\n MCP (Model Context Protocol) server configurations for extending the assistant's capabilities with external tools and data sources.\n - `model?: string`\n LLM model override for this call. If omitted, the assistant's configured model is used.\n - `name?: string`\n Assistant name override for this call.\n - `observability_settings?: object`\n Observability configuration for the assistant session, including Langfuse integration for tracing and monitoring.\n - `openai_api_key_ref?: string`\n Deprecated — use `llm_api_key_ref` instead. Integration secret identifier for the OpenAI API key. This field is maintained for backward compatibility; `llm_api_key_ref` is the canonical field name and supports all LLM providers.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n Inline tool definitions available to the assistant (webhook, retrieval, transfer, hangup, etc.). Overrides the assistant's stored tools if provided.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `greeting?: string`\n Text that will be played when the assistant starts, if none then nothing will be played when the assistant starts. The greeting can be text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n\n- `interruption_settings?: { enable?: boolean; }`\n Settings for handling user interruptions during assistant speech\n - `enable?: boolean`\n When true, allows users to interrupt the assistant while speaking\n\n- `message_history?: { content: string; role: 'user'; metadata?: object; } | { role: 'assistant'; content?: string; metadata?: object; tool_calls?: { id: string; function: { name: string; }; type: 'function'; }[]; } | { content: string; role: 'tool'; tool_call_id: string; metadata?: object; } | { content: string; role: 'system'; metadata?: object; } | { content: string; role: 'developer'; metadata?: object; }[]`\n A list of messages to seed the conversation history before the assistant starts. Follows the same message format as the `ai_assistant_add_messages` command.\n\n- `participants?: { id: string; role: 'user'; name?: string; on_hangup?: 'continue_conversation' | 'end_conversation'; }[]`\n A list of participants to add to the conversation when it starts.\n\n- `send_message_history_updates?: boolean`\n When `true`, a webhook is sent each time the conversation message history is updated.\n\n- `transcription?: { language?: string; model?: string; }`\n The settings associated with speech to text for the voice assistant. This is only relevant if the assistant uses a text-to-text language model. Any assistant using a model with native audio support (e.g. `fixie-ai/ultravox-v0_4`) will ignore this field.\n - `language?: string`\n The language of the audio to be transcribed. If not set, or if set to `auto`, supported models will automatically detect the language. Supported and meaningful values depend on the selected transcription `model`. For `deepgram/flux`, supported values are: `auto` (Telnyx language detection controls the language hint), `multi` (no language hint), and language-specific hints `en`, `es`, `fr`, `de`, `hi`, `ru`, `pt`, `ja`, `it`, and `nl`.\n - `model?: string`\n The speech to text model to be used by the voice assistant. Supported models include:\n\n- `deepgram/flux` (or `flux`) for live streaming turn-taking.\n- `deepgram/nova-3` and `deepgram/nova-2` for live streaming transcription.\n- `speechmatics/standard` and `speechmatics/enhanced` for live streaming transcription.\n- `assemblyai/universal-streaming` for live streaming transcription.\n- `xai/grok-stt` for live streaming transcription.\n- `azure/fast` and `azure/realtime`; Azure models require `region`, and unsupported regions require `api_key_ref`.\n- `google/latest_long` for non-streaming multilingual transcription.\n- `distil-whisper/distil-large-v2` for lower-latency English-only non-streaming transcription.\n- `openai/whisper-large-v3-turbo` for multilingual non-streaming transcription with automatic language detection.\n\n- `voice?: string`\n The voice to be used by the voice assistant. Currently we support ElevenLabs, Telnyx and AWS voices.\n\n **Supported Providers:**\n- **AWS:** Use `AWS.Polly.<VoiceId>` (e.g., `AWS.Polly.Joanna`). For neural voices, which provide more realistic, human-like speech, append `-Neural` to the `VoiceId` (e.g., `AWS.Polly.Joanna-Neural`). Check the [available voices](https://docs.aws.amazon.com/polly/latest/dg/available-voices.html) for compatibility.\n- **Azure:** Use `Azure.<VoiceId>. (e.g. Azure.en-CA-ClaraNeural, Azure.en-CA-LiamNeural, Azure.en-US-BrianMultilingualNeural, Azure.en-US-Ava:DragonHDLatestNeural. For a complete list of voices, go to [Azure Voice Gallery](https://speech.microsoft.com/portal/voicegallery).)\n- **ElevenLabs:** Use `ElevenLabs.<ModelId>.<VoiceId>` (e.g., `ElevenLabs.BaseModel.John`). The `ModelId` part is optional. To use ElevenLabs, you must provide your ElevenLabs API key as an integration secret under `\"voice_settings\": {\"api_key_ref\": \"<secret_id>\"}`. See [integration secrets documentation](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) for details. Check [available voices](https://elevenlabs.io/docs/api-reference/get-voices).\n - **Telnyx:** Use `Telnyx.<model_id>.<voice_id>`\n- **Inworld:** Use `Inworld.<ModelId>.<VoiceId>` (e.g., `Inworld.Mini.Loretta`, `Inworld.Max.Oliver`). Supported models: `Mini`, `Max`.\n- **xAI:** Use `xAI.<VoiceId>` (e.g., `xAI.eve`). Available voices: `eve`, `ara`, `rex`, `sal`, `leo`.\n\n- `voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; }`\n The settings associated with the voice selected\n\n### Returns\n\n- `{ data?: { conversation_id?: string; result?: string; }; }`\n\n - `data?: { conversation_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.startAIAssistant('call_control_id');\n\nconsole.log(response);\n```",
|
|
11269
11269
|
perLanguage: {
|
|
11270
11270
|
typescript: {
|
|
11271
11271
|
method: 'client.calls.actions.startAIAssistant',
|
|
@@ -11293,7 +11293,7 @@ const EMBEDDED_METHODS = [
|
|
|
11293
11293
|
},
|
|
11294
11294
|
php: {
|
|
11295
11295
|
method: 'calls->actions->startAIAssistant',
|
|
11296
|
-
example: "<?php\n\nrequire_once dirname(__DIR__) . '/vendor/autoload.php';\n\n$client = new Client(apiKey: 'My API Key');\n\n$response = $client->calls->actions->startAIAssistant(\n 'call_control_id',\n assistant: [\n 'id' => 'id',\n 'dynamicVariables' => [\n 'customer_name' => 'John', 'account_id' => 'ACC-12345'\n ],\n 'externalLlm' => [\n 'authenticationMethod' => 'token',\n 'baseURL' => 'base_url',\n 'certificateRef' => 'certificate_ref',\n 'forwardMetadata' => true,\n 'llmAPIKeyRef' => 'llm_api_key_ref',\n 'model' => 'model',\n 'tokenRetrievalURL' => 'token_retrieval_url',\n ],\n 'fallbackConfig' => [\n 'externalLlm' => [\n 'authenticationMethod' => 'token',\n 'baseURL' => 'base_url',\n 'certificateRef' => 'certificate_ref',\n 'forwardMetadata' => true,\n 'llmAPIKeyRef' => 'llm_api_key_ref',\n 'model' => 'model',\n 'tokenRetrievalURL' => 'token_retrieval_url',\n ],\n 'llmAPIKeyRef' => 'llm_api_key_ref',\n 'model' => 'model',\n ],\n 'greeting' => 'greeting',\n 'instructions' => 'You are a friendly voice assistant.',\n 'llmAPIKeyRef' => 'my_llm_api_key',\n 'mcpServers' => [['foo' => 'bar']],\n 'model' => 'gpt-4o',\n 'name' => 'name',\n 'observabilitySettings' => ['foo' => 'bar'],\n 'openaiAPIKeyRef' => 'my_openai_api_key',\n 'tools' => [\n [\n 'bookAppointment' => [\n 'apiKeyRef' => 'my_calcom_api_key',\n 'eventTypeID' => 0,\n 'attendeeName' => 'attendee_name',\n 'attendeeTimezone' => 'attendee_timezone',\n ],\n 'type' => 'book_appointment',\n ],\n ],\n ],\n clientState: 'aGF2ZSBhIG5pY2UgZGF5ID1d',\n commandID: '891510ac-f3e4-11e8-af5b-de00688a4901',\n greeting: 'Hello, can you tell me your age and where you live?',\n interruptionSettings: ['enable' => true],\n messageHistory: [\n [\n 'content' => 'Hello, I would like some help.',\n 'role' => 'user',\n 'metadata' => ['foo' => 'bar'],\n ],\n ],\n participants: [\n [\n 'id' => 'v3:abc123def456',\n 'role' => 'user',\n 'name' => 'John Doe',\n 'onHangup' => 'continue_conversation',\n ],\n ],\n sendMessageHistoryUpdates: true,\n transcription: ['model' => 'distil-whisper/distil-large-v2'],\n voice: 'Telnyx.KokoroTTS.af',\n voiceSettings: [\n 'type' => 'elevenlabs', 'apiKeyRef' => 'my_elevenlabs_api_key'\n ],\n);\n\nvar_dump($response);",
|
|
11296
|
+
example: "<?php\n\nrequire_once dirname(__DIR__) . '/vendor/autoload.php';\n\n$client = new Client(apiKey: 'My API Key');\n\n$response = $client->calls->actions->startAIAssistant(\n 'call_control_id',\n assistant: [\n 'id' => 'id',\n 'dynamicVariables' => [\n 'customer_name' => 'John', 'account_id' => 'ACC-12345'\n ],\n 'externalLlm' => [\n 'authenticationMethod' => 'token',\n 'baseURL' => 'base_url',\n 'certificateRef' => 'certificate_ref',\n 'forwardMetadata' => true,\n 'llmAPIKeyRef' => 'llm_api_key_ref',\n 'model' => 'model',\n 'tokenRetrievalURL' => 'token_retrieval_url',\n ],\n 'fallbackConfig' => [\n 'externalLlm' => [\n 'authenticationMethod' => 'token',\n 'baseURL' => 'base_url',\n 'certificateRef' => 'certificate_ref',\n 'forwardMetadata' => true,\n 'llmAPIKeyRef' => 'llm_api_key_ref',\n 'model' => 'model',\n 'tokenRetrievalURL' => 'token_retrieval_url',\n ],\n 'llmAPIKeyRef' => 'llm_api_key_ref',\n 'model' => 'model',\n ],\n 'greeting' => 'greeting',\n 'instructions' => 'You are a friendly voice assistant.',\n 'llmAPIKeyRef' => 'my_llm_api_key',\n 'mcpServers' => [['foo' => 'bar']],\n 'model' => 'gpt-4o',\n 'name' => 'name',\n 'observabilitySettings' => ['foo' => 'bar'],\n 'openaiAPIKeyRef' => 'my_openai_api_key',\n 'tools' => [\n [\n 'bookAppointment' => [\n 'apiKeyRef' => 'my_calcom_api_key',\n 'eventTypeID' => 0,\n 'attendeeName' => 'attendee_name',\n 'attendeeTimezone' => 'attendee_timezone',\n ],\n 'type' => 'book_appointment',\n ],\n ],\n ],\n clientState: 'aGF2ZSBhIG5pY2UgZGF5ID1d',\n commandID: '891510ac-f3e4-11e8-af5b-de00688a4901',\n greeting: 'Hello, can you tell me your age and where you live?',\n interruptionSettings: ['enable' => true],\n messageHistory: [\n [\n 'content' => 'Hello, I would like some help.',\n 'role' => 'user',\n 'metadata' => ['foo' => 'bar'],\n ],\n ],\n participants: [\n [\n 'id' => 'v3:abc123def456',\n 'role' => 'user',\n 'name' => 'John Doe',\n 'onHangup' => 'continue_conversation',\n ],\n ],\n sendMessageHistoryUpdates: true,\n transcription: [\n 'language' => 'auto', 'model' => 'distil-whisper/distil-large-v2'\n ],\n voice: 'Telnyx.KokoroTTS.af',\n voiceSettings: [\n 'type' => 'elevenlabs', 'apiKeyRef' => 'my_elevenlabs_api_key'\n ],\n);\n\nvar_dump($response);",
|
|
11297
11297
|
},
|
|
11298
11298
|
http: {
|
|
11299
11299
|
example: 'curl https://api.telnyx.com/v2/calls/$CALL_CONTROL_ID/actions/ai_assistant_start \\\n -H \'Content-Type: application/json\' \\\n -H "Authorization: Bearer $TELNYX_API_KEY" \\\n -d \'{\n "client_state": "aGF2ZSBhIG5pY2UgZGF5ID1d",\n "command_id": "891510ac-f3e4-11e8-af5b-de00688a4901",\n "greeting": "Hello, can you tell me your age and where you live?",\n "voice": "Telnyx.KokoroTTS.af"\n }\'',
|
|
@@ -11813,13 +11813,13 @@ const EMBEDDED_METHODS = [
|
|
|
11813
11813
|
"message_history?: { content?: string; role?: 'assistant' | 'user'; }[];",
|
|
11814
11814
|
'send_message_history_updates?: boolean;',
|
|
11815
11815
|
'send_partial_results?: boolean;',
|
|
11816
|
-
'transcription?: { model?: string; };',
|
|
11816
|
+
'transcription?: { language?: string; model?: string; };',
|
|
11817
11817
|
'user_response_timeout_ms?: number;',
|
|
11818
11818
|
'voice?: string;',
|
|
11819
11819
|
"voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; };",
|
|
11820
11820
|
],
|
|
11821
11821
|
response: '{ data?: { conversation_id?: string; result?: string; }; }',
|
|
11822
|
-
markdown: "## gather_using_ai\n\n`client.calls.actions.gatherUsingAI(call_control_id: string, parameters: object, assistant?: { instructions?: string; model?: string; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, client_state?: string, command_id?: string, gather_ended_speech?: string, greeting?: string, interruption_settings?: { enable?: boolean; }, language?: string, message_history?: { content?: string; role?: 'assistant' | 'user'; }[], send_message_history_updates?: boolean, send_partial_results?: boolean, transcription?: { model?: string; }, user_response_timeout_ms?: number, voice?: string, voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; }): { data?: call_control_command_result_with_conversation_id; }`\n\n**post** `/calls/{call_control_id}/actions/gather_using_ai`\n\nGather parameters defined in the request payload using a voice assistant.\n\n You can pass parameters described as a JSON Schema object and the voice assistant will attempt to gather these informations. \n\n**Expected Webhooks:**\n\n- `call.ai_gather.ended`\n- `call.conversation.ended`\n- `call.ai_gather.partial_results` (if `send_partial_results` is set to `true`)\n- `call.ai_gather.message_history_updated` (if `send_message_history_updates` is set to `true`)\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `parameters: object`\n The parameters described as a JSON Schema object that needs to be gathered by the voice assistant. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format\n\n- `assistant?: { instructions?: string; model?: string; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n Assistant configuration including choice of LLM, custom instructions, and tools.\n - `instructions?: string`\n The system instructions that the voice assistant uses during the gather command\n - `model?: string`\n The model to be used by the voice assistant.\n - `openai_api_key_ref?: string`\n This is necessary only if the model selected is from OpenAI. You would pass the `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) that refers to your OpenAI API Key. Warning: Free plans are unlikely to work with this integration.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n The tools that the voice assistant can use.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `gather_ended_speech?: string`\n Text that will be played when the gathering has finished. There is a 3,000 character limit.\n\n- `greeting?: string`\n Text that will be played when the gathering starts, if none then nothing will be played when the gathering starts. The greeting can be text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n\n- `interruption_settings?: { enable?: boolean; }`\n Settings for handling user interruptions during assistant speech\n - `enable?: boolean`\n When true, allows users to interrupt the assistant while speaking\n\n- `language?: string`\n Language to use for speech recognition\n\n- `message_history?: { content?: string; role?: 'assistant' | 'user'; }[]`\n The message history you want the voice assistant to be aware of, this can be useful to keep the context of the conversation, or to pass additional information to the voice assistant.\n\n- `send_message_history_updates?: boolean`\n Default is `false`. If set to `true`, the voice assistant will send updates to the message history via the `call.ai_gather.message_history_updated` callback in real time as the message history is updated.\n\n- `send_partial_results?: boolean`\n Default is `false`. If set to `true`, the voice assistant will send partial results via the `call.ai_gather.partial_results` callback in real time as individual fields are gathered. If set to `false`, the voice assistant will only send the final result via the `call.ai_gather.ended` callback.\n\n- `transcription?: { model?: string; }`\n The settings associated with speech to text for the voice assistant. This is only relevant if the assistant uses a text-to-text language model. Any assistant using a model with native audio support (e.g. `fixie-ai/ultravox-v0_4`) will ignore this field.\n - `model?: string`\n The speech to text model to be used by the voice assistant.\n\n- `distil-whisper/distil-large-v2` is lower latency but English-only.\n- `openai/whisper-large-v3-turbo` is multi-lingual with automatic language detection but slightly higher latency.\n- `google` is a multi-lingual option, please describe the language in the `language` field.\n\n- `user_response_timeout_ms?: number`\n The maximum time in milliseconds to wait for user response before timing out.\n\n- `voice?: string`\n The voice to be used by the voice assistant. Currently we support ElevenLabs, Telnyx and AWS voices.\n\n **Supported Providers:**\n- **AWS:** Use `AWS.Polly.<VoiceId>` (e.g., `AWS.Polly.Joanna`). For neural voices, which provide more realistic, human-like speech, append `-Neural` to the `VoiceId` (e.g., `AWS.Polly.Joanna-Neural`). Check the [available voices](https://docs.aws.amazon.com/polly/latest/dg/available-voices.html) for compatibility.\n- **Azure:** Use `Azure.<VoiceId>. (e.g. Azure.en-CA-ClaraNeural, Azure.en-CA-LiamNeural, Azure.en-US-BrianMultilingualNeural, Azure.en-US-Ava:DragonHDLatestNeural. For a complete list of voices, go to [Azure Voice Gallery](https://speech.microsoft.com/portal/voicegallery).)\n- **ElevenLabs:** Use `ElevenLabs.<ModelId>.<VoiceId>` (e.g., `ElevenLabs.BaseModel.John`). The `ModelId` part is optional. To use ElevenLabs, you must provide your ElevenLabs API key as an integration secret under `\"voice_settings\": {\"api_key_ref\": \"<secret_id>\"}`. See [integration secrets documentation](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) for details. Check [available voices](https://elevenlabs.io/docs/api-reference/get-voices).\n - **Telnyx:** Use `Telnyx.<model_id>.<voice_id>`\n- **Inworld:** Use `Inworld.<ModelId>.<VoiceId>` (e.g., `Inworld.Mini.Loretta`, `Inworld.Max.Oliver`). Supported models: `Mini`, `Max`.\n- **xAI:** Use `xAI.<VoiceId>` (e.g., `xAI.eve`). Available voices: `eve`, `ara`, `rex`, `sal`, `leo`.\n\n- `voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; }`\n The settings associated with the voice selected\n\n### Returns\n\n- `{ data?: { conversation_id?: string; result?: string; }; }`\n\n - `data?: { conversation_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.gatherUsingAI('call_control_id', { parameters: {\n properties: 'bar',\n required: 'bar',\n type: 'bar',\n} });\n\nconsole.log(response);\n```",
|
|
11822
|
+
markdown: "## gather_using_ai\n\n`client.calls.actions.gatherUsingAI(call_control_id: string, parameters: object, assistant?: { instructions?: string; model?: string; openai_api_key_ref?: string; tools?: book_appointment_tool | check_availability_tool | webhook_tool | hangup_tool | transfer_tool | call_control_retrieval_tool[]; }, client_state?: string, command_id?: string, gather_ended_speech?: string, greeting?: string, interruption_settings?: { enable?: boolean; }, language?: string, message_history?: { content?: string; role?: 'assistant' | 'user'; }[], send_message_history_updates?: boolean, send_partial_results?: boolean, transcription?: { language?: string; model?: string; }, user_response_timeout_ms?: number, voice?: string, voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; }): { data?: call_control_command_result_with_conversation_id; }`\n\n**post** `/calls/{call_control_id}/actions/gather_using_ai`\n\nGather parameters defined in the request payload using a voice assistant.\n\n You can pass parameters described as a JSON Schema object and the voice assistant will attempt to gather these informations. \n\n**Expected Webhooks:**\n\n- `call.ai_gather.ended`\n- `call.conversation.ended`\n- `call.ai_gather.partial_results` (if `send_partial_results` is set to `true`)\n- `call.ai_gather.message_history_updated` (if `send_message_history_updates` is set to `true`)\n\n\n### Parameters\n\n- `call_control_id: string`\n\n- `parameters: object`\n The parameters described as a JSON Schema object that needs to be gathered by the voice assistant. See the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format\n\n- `assistant?: { instructions?: string; model?: string; openai_api_key_ref?: string; tools?: { book_appointment: book_appointment_tool_params; type: 'book_appointment'; } | { check_availability: check_availability_tool_params; type: 'check_availability'; } | { type: 'webhook'; webhook: object; } | { hangup: hangup_tool_params; type: 'hangup'; } | { transfer: object; type: 'transfer'; } | { retrieval: call_control_bucket_ids; type: 'retrieval'; }[]; }`\n Assistant configuration including choice of LLM, custom instructions, and tools.\n - `instructions?: string`\n The system instructions that the voice assistant uses during the gather command\n - `model?: string`\n The model to be used by the voice assistant.\n - `openai_api_key_ref?: string`\n This is necessary only if the model selected is from OpenAI. You would pass the `identifier` for an integration secret [/v2/integration_secrets](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) that refers to your OpenAI API Key. Warning: Free plans are unlikely to work with this integration.\n - `tools?: { book_appointment: { api_key_ref: string; event_type_id: number; attendee_name?: string; attendee_timezone?: string; }; type: 'book_appointment'; } | { check_availability: { api_key_ref: string; event_type_id: number; }; type: 'check_availability'; } | { type: 'webhook'; webhook: { description: string; name: string; url: string; body_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; headers?: { name?: string; value?: string; }[]; method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'; path_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; query_parameters?: { properties?: object; required?: string[]; type?: 'object'; }; }; } | { hangup: { description?: string; }; type: 'hangup'; } | { transfer: { from: string; targets: { to: string; name?: string; }[] | string; }; type: 'transfer'; } | { retrieval: { bucket_ids: string[]; max_num_results?: number; }; type: 'retrieval'; }[]`\n The tools that the voice assistant can use.\n\n- `client_state?: string`\n Use this field to add state to every subsequent webhook. It must be a valid Base-64 encoded string.\n\n- `command_id?: string`\n Use this field to avoid duplicate commands. Telnyx will ignore any command with the same `command_id` for the same `call_control_id`.\n\n- `gather_ended_speech?: string`\n Text that will be played when the gathering has finished. There is a 3,000 character limit.\n\n- `greeting?: string`\n Text that will be played when the gathering starts, if none then nothing will be played when the gathering starts. The greeting can be text for any voice or SSML for `AWS.Polly.<voice_id>` voices. There is a 3,000 character limit.\n\n- `interruption_settings?: { enable?: boolean; }`\n Settings for handling user interruptions during assistant speech\n - `enable?: boolean`\n When true, allows users to interrupt the assistant while speaking\n\n- `language?: string`\n Language to use for speech recognition\n\n- `message_history?: { content?: string; role?: 'assistant' | 'user'; }[]`\n The message history you want the voice assistant to be aware of, this can be useful to keep the context of the conversation, or to pass additional information to the voice assistant.\n\n- `send_message_history_updates?: boolean`\n Default is `false`. If set to `true`, the voice assistant will send updates to the message history via the `call.ai_gather.message_history_updated` callback in real time as the message history is updated.\n\n- `send_partial_results?: boolean`\n Default is `false`. If set to `true`, the voice assistant will send partial results via the `call.ai_gather.partial_results` callback in real time as individual fields are gathered. If set to `false`, the voice assistant will only send the final result via the `call.ai_gather.ended` callback.\n\n- `transcription?: { language?: string; model?: string; }`\n The settings associated with speech to text for the voice assistant. This is only relevant if the assistant uses a text-to-text language model. Any assistant using a model with native audio support (e.g. `fixie-ai/ultravox-v0_4`) will ignore this field.\n - `language?: string`\n The language of the audio to be transcribed. If not set, or if set to `auto`, supported models will automatically detect the language. Supported and meaningful values depend on the selected transcription `model`. For `deepgram/flux`, supported values are: `auto` (Telnyx language detection controls the language hint), `multi` (no language hint), and language-specific hints `en`, `es`, `fr`, `de`, `hi`, `ru`, `pt`, `ja`, `it`, and `nl`.\n - `model?: string`\n The speech to text model to be used by the voice assistant. Supported models include:\n\n- `deepgram/flux` (or `flux`) for live streaming turn-taking.\n- `deepgram/nova-3` and `deepgram/nova-2` for live streaming transcription.\n- `speechmatics/standard` and `speechmatics/enhanced` for live streaming transcription.\n- `assemblyai/universal-streaming` for live streaming transcription.\n- `xai/grok-stt` for live streaming transcription.\n- `azure/fast` and `azure/realtime`; Azure models require `region`, and unsupported regions require `api_key_ref`.\n- `google/latest_long` for non-streaming multilingual transcription.\n- `distil-whisper/distil-large-v2` for lower-latency English-only non-streaming transcription.\n- `openai/whisper-large-v3-turbo` for multilingual non-streaming transcription with automatic language detection.\n\n- `user_response_timeout_ms?: number`\n The maximum time in milliseconds to wait for user response before timing out.\n\n- `voice?: string`\n The voice to be used by the voice assistant. Currently we support ElevenLabs, Telnyx and AWS voices.\n\n **Supported Providers:**\n- **AWS:** Use `AWS.Polly.<VoiceId>` (e.g., `AWS.Polly.Joanna`). For neural voices, which provide more realistic, human-like speech, append `-Neural` to the `VoiceId` (e.g., `AWS.Polly.Joanna-Neural`). Check the [available voices](https://docs.aws.amazon.com/polly/latest/dg/available-voices.html) for compatibility.\n- **Azure:** Use `Azure.<VoiceId>. (e.g. Azure.en-CA-ClaraNeural, Azure.en-CA-LiamNeural, Azure.en-US-BrianMultilingualNeural, Azure.en-US-Ava:DragonHDLatestNeural. For a complete list of voices, go to [Azure Voice Gallery](https://speech.microsoft.com/portal/voicegallery).)\n- **ElevenLabs:** Use `ElevenLabs.<ModelId>.<VoiceId>` (e.g., `ElevenLabs.BaseModel.John`). The `ModelId` part is optional. To use ElevenLabs, you must provide your ElevenLabs API key as an integration secret under `\"voice_settings\": {\"api_key_ref\": \"<secret_id>\"}`. See [integration secrets documentation](https://developers.telnyx.com/api/secrets-manager/integration-secrets/create-integration-secret) for details. Check [available voices](https://elevenlabs.io/docs/api-reference/get-voices).\n - **Telnyx:** Use `Telnyx.<model_id>.<voice_id>`\n- **Inworld:** Use `Inworld.<ModelId>.<VoiceId>` (e.g., `Inworld.Mini.Loretta`, `Inworld.Max.Oliver`). Supported models: `Mini`, `Max`.\n- **xAI:** Use `xAI.<VoiceId>` (e.g., `xAI.eve`). Available voices: `eve`, `ara`, `rex`, `sal`, `leo`.\n\n- `voice_settings?: { type: 'elevenlabs'; api_key_ref?: string; } | { type: 'telnyx'; voice_speed?: number; } | { type: 'aws'; } | { type: 'azure'; api_key_ref?: string; deployment_id?: string; effect?: 'eq_car' | 'eq_telecomhp8k'; gender?: 'Male' | 'Female'; region?: string; } | { type: 'rime'; voice_speed?: number; } | { type: 'resemble'; format?: 'wav' | 'mp3'; precision?: 'PCM_16' | 'PCM_24' | 'PCM_32' | 'MULAW'; sample_rate?: '8000' | '16000' | '22050' | '32000' | '44100' | '48000'; } | { type: 'xai'; language?: string; }`\n The settings associated with the voice selected\n\n### Returns\n\n- `{ data?: { conversation_id?: string; result?: string; }; }`\n\n - `data?: { conversation_id?: string; result?: string; }`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.calls.actions.gatherUsingAI('call_control_id', { parameters: {\n properties: 'bar',\n required: 'bar',\n type: 'bar',\n} });\n\nconsole.log(response);\n```",
|
|
11823
11823
|
perLanguage: {
|
|
11824
11824
|
typescript: {
|
|
11825
11825
|
method: 'client.calls.actions.gatherUsingAI',
|
|
@@ -11847,7 +11847,7 @@ const EMBEDDED_METHODS = [
|
|
|
11847
11847
|
},
|
|
11848
11848
|
php: {
|
|
11849
11849
|
method: 'calls->actions->gatherUsingAI',
|
|
11850
|
-
example: "<?php\n\nrequire_once dirname(__DIR__) . '/vendor/autoload.php';\n\n$client = new Client(apiKey: 'My API Key');\n\n$response = $client->calls->actions->gatherUsingAI(\n 'call_control_id',\n parameters: ['properties' => 'bar', 'required' => 'bar', 'type' => 'bar'],\n assistant: [\n 'instructions' => 'You are a friendly voice assistant.',\n 'model' => 'Qwen/Qwen3-235B-A22B',\n 'openaiAPIKeyRef' => 'my_openai_api_key',\n 'tools' => [\n [\n 'bookAppointment' => [\n 'apiKeyRef' => 'my_calcom_api_key',\n 'eventTypeID' => 0,\n 'attendeeName' => 'attendee_name',\n 'attendeeTimezone' => 'attendee_timezone',\n ],\n 'type' => 'book_appointment',\n ],\n ],\n ],\n clientState: 'aGF2ZSBhIG5pY2UgZGF5ID1d',\n commandID: '891510ac-f3e4-11e8-af5b-de00688a4901',\n gatherEndedSpeech: 'Thank you for providing the information.',\n greeting: 'Hello, can you tell me your age and where you live?',\n interruptionSettings: ['enable' => true],\n language: GoogleTranscriptionLanguage::EN,\n messageHistory: [\n ['content' => 'Hello, what\\'s your name?', 'role' => 'assistant'],\n ['content' => 'Hello, I\\'m John.', 'role' => 'user'],\n ],\n sendMessageHistoryUpdates: true,\n sendPartialResults: true,\n transcription: ['model' => 'distil-whisper/distil-large-v2'],\n userResponseTimeoutMs: 5000,\n voice: 'Telnyx.KokoroTTS.af',\n voiceSettings: [\n 'type' => 'elevenlabs', 'apiKeyRef' => 'my_elevenlabs_api_key'\n ],\n);\n\nvar_dump($response);",
|
|
11850
|
+
example: "<?php\n\nrequire_once dirname(__DIR__) . '/vendor/autoload.php';\n\n$client = new Client(apiKey: 'My API Key');\n\n$response = $client->calls->actions->gatherUsingAI(\n 'call_control_id',\n parameters: ['properties' => 'bar', 'required' => 'bar', 'type' => 'bar'],\n assistant: [\n 'instructions' => 'You are a friendly voice assistant.',\n 'model' => 'Qwen/Qwen3-235B-A22B',\n 'openaiAPIKeyRef' => 'my_openai_api_key',\n 'tools' => [\n [\n 'bookAppointment' => [\n 'apiKeyRef' => 'my_calcom_api_key',\n 'eventTypeID' => 0,\n 'attendeeName' => 'attendee_name',\n 'attendeeTimezone' => 'attendee_timezone',\n ],\n 'type' => 'book_appointment',\n ],\n ],\n ],\n clientState: 'aGF2ZSBhIG5pY2UgZGF5ID1d',\n commandID: '891510ac-f3e4-11e8-af5b-de00688a4901',\n gatherEndedSpeech: 'Thank you for providing the information.',\n greeting: 'Hello, can you tell me your age and where you live?',\n interruptionSettings: ['enable' => true],\n language: GoogleTranscriptionLanguage::EN,\n messageHistory: [\n ['content' => 'Hello, what\\'s your name?', 'role' => 'assistant'],\n ['content' => 'Hello, I\\'m John.', 'role' => 'user'],\n ],\n sendMessageHistoryUpdates: true,\n sendPartialResults: true,\n transcription: [\n 'language' => 'auto', 'model' => 'distil-whisper/distil-large-v2'\n ],\n userResponseTimeoutMs: 5000,\n voice: 'Telnyx.KokoroTTS.af',\n voiceSettings: [\n 'type' => 'elevenlabs', 'apiKeyRef' => 'my_elevenlabs_api_key'\n ],\n);\n\nvar_dump($response);",
|
|
11851
11851
|
},
|
|
11852
11852
|
http: {
|
|
11853
11853
|
example: 'curl https://api.telnyx.com/v2/calls/$CALL_CONTROL_ID/actions/gather_using_ai \\\n -H \'Content-Type: application/json\' \\\n -H "Authorization: Bearer $TELNYX_API_KEY" \\\n -d "{\n \\"parameters\\": {\n \\"properties\\": \\"bar\\",\n \\"required\\": \\"bar\\",\n \\"type\\": \\"bar\\"\n },\n \\"client_state\\": \\"aGF2ZSBhIG5pY2UgZGF5ID1d\\",\n \\"command_id\\": \\"891510ac-f3e4-11e8-af5b-de00688a4901\\",\n \\"gather_ended_speech\\": \\"Thank you for providing the information.\\",\n \\"greeting\\": \\"Hello, can you tell me your age and where you live?\\",\n \\"message_history\\": [\n {\n \\"content\\": \\"Hello, what\'s your name?\\",\n \\"role\\": \\"assistant\\"\n },\n {\n \\"content\\": \\"Hello, I\'m John.\\",\n \\"role\\": \\"user\\"\n }\n ],\n \\"send_message_history_updates\\": true,\n \\"send_partial_results\\": true,\n \\"user_response_timeout_ms\\": 5000,\n \\"voice\\": \\"Telnyx.KokoroTTS.af\\"\n }"',
|
|
@@ -44590,12 +44590,12 @@ const EMBEDDED_METHODS = [
|
|
|
44590
44590
|
endpoint: '/10dlc/brand/{brandId}/externalVetting',
|
|
44591
44591
|
httpMethod: 'post',
|
|
44592
44592
|
summary: 'Order Brand External Vetting',
|
|
44593
|
-
description: 'Order new external vetting for a brand',
|
|
44593
|
+
description: 'Order new external vetting for a brand.\n\nDuplicate orders for the same `evpId` and `vettingClass` return `400` with code `10012` if a successful vetting exists within the last 180 days, or one is currently being processed. Failed vettings can be retried immediately.',
|
|
44594
44594
|
stainlessPath: '(resource) messaging_10dlc.brand.external_vetting > (method) order',
|
|
44595
44595
|
qualified: 'client.messaging10dlc.brand.externalVetting.order',
|
|
44596
44596
|
params: ['brandId: string;', 'evpId: string;', 'vettingClass: string;'],
|
|
44597
44597
|
response: '{ createDate?: string; evpId?: string; vettedDate?: string; vettingClass?: string; vettingId?: string; vettingScore?: number; vettingToken?: string; }',
|
|
44598
|
-
markdown: "## order\n\n`client.messaging10dlc.brand.externalVetting.order(brandId: string, evpId: string, vettingClass: string): { createDate?: string; evpId?: string; vettedDate?: string; vettingClass?: string; vettingId?: string; vettingScore?: number; vettingToken?: string; }`\n\n**post** `/10dlc/brand/{brandId}/externalVetting`\n\nOrder new external vetting for a brand\n\n### Parameters\n\n- `brandId: string`\n\n- `evpId: string`\n External vetting provider ID for the brand.\n\n- `vettingClass: string`\n Identifies the vetting classification.\n\n### Returns\n\n- `{ createDate?: string; evpId?: string; vettedDate?: string; vettingClass?: string; vettingId?: string; vettingScore?: number; vettingToken?: string; }`\n\n - `createDate?: string`\n - `evpId?: string`\n - `vettedDate?: string`\n - `vettingClass?: string`\n - `vettingId?: string`\n - `vettingScore?: number`\n - `vettingToken?: string`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.messaging10dlc.brand.externalVetting.order('brandId', { evpId: 'evpId', vettingClass: 'vettingClass' });\n\nconsole.log(response);\n```",
|
|
44598
|
+
markdown: "## order\n\n`client.messaging10dlc.brand.externalVetting.order(brandId: string, evpId: string, vettingClass: string): { createDate?: string; evpId?: string; vettedDate?: string; vettingClass?: string; vettingId?: string; vettingScore?: number; vettingToken?: string; }`\n\n**post** `/10dlc/brand/{brandId}/externalVetting`\n\nOrder new external vetting for a brand.\n\nDuplicate orders for the same `evpId` and `vettingClass` return `400` with code `10012` if a successful vetting exists within the last 180 days, or one is currently being processed. Failed vettings can be retried immediately.\n\n### Parameters\n\n- `brandId: string`\n\n- `evpId: string`\n External vetting provider ID for the brand.\n\n- `vettingClass: string`\n Identifies the vetting classification.\n\n### Returns\n\n- `{ createDate?: string; evpId?: string; vettedDate?: string; vettingClass?: string; vettingId?: string; vettingScore?: number; vettingToken?: string; }`\n\n - `createDate?: string`\n - `evpId?: string`\n - `vettedDate?: string`\n - `vettingClass?: string`\n - `vettingId?: string`\n - `vettingScore?: number`\n - `vettingToken?: string`\n\n### Example\n\n```typescript\nimport Telnyx from 'telnyx';\n\nconst client = new Telnyx();\n\nconst response = await client.messaging10dlc.brand.externalVetting.order('brandId', { evpId: 'evpId', vettingClass: 'vettingClass' });\n\nconsole.log(response);\n```",
|
|
44599
44599
|
perLanguage: {
|
|
44600
44600
|
typescript: {
|
|
44601
44601
|
method: 'client.messaging10dlc.brand.externalVetting.order',
|