telnyx 5.100.0 → 5.102.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +16 -0
- data/README.md +1 -1
- data/lib/telnyx/models/ai/chat_create_completion_params.rb +36 -1
- data/lib/telnyx/models/ai/openai/chat_create_completion_params.rb +458 -0
- data/lib/telnyx/models/ai/openai/chat_create_completion_response.rb +12 -0
- data/lib/telnyx/models/ai/openai_list_models_params.rb +16 -0
- data/lib/telnyx/models/ai/openai_list_models_response.rb +52 -0
- data/lib/telnyx/resources/ai/chat.rb +9 -2
- data/lib/telnyx/resources/ai/openai/chat.rb +96 -0
- data/lib/telnyx/resources/ai/openai.rb +26 -0
- data/lib/telnyx/resources/ai.rb +7 -5
- data/lib/telnyx/resources/messaging_10dlc/brand/external_vetting.rb +5 -1
- data/lib/telnyx/version.rb +1 -1
- data/lib/telnyx.rb +5 -0
- data/rbi/telnyx/models/ai/chat_create_completion_params.rbi +58 -0
- data/rbi/telnyx/models/ai/openai/chat_create_completion_params.rbi +963 -0
- data/rbi/telnyx/models/ai/openai/chat_create_completion_response.rbi +15 -0
- data/rbi/telnyx/models/ai/openai_list_models_params.rbi +29 -0
- data/rbi/telnyx/models/ai/openai_list_models_response.rbi +96 -0
- data/rbi/telnyx/resources/ai/chat.rbi +11 -1
- data/rbi/telnyx/resources/ai/openai/chat.rbi +147 -0
- data/rbi/telnyx/resources/ai/openai.rbi +16 -0
- data/rbi/telnyx/resources/ai.rbi +5 -5
- data/rbi/telnyx/resources/messaging_10dlc/brand/external_vetting.rbi +5 -1
- data/sig/telnyx/models/ai/chat_create_completion_params.rbs +26 -0
- data/sig/telnyx/models/ai/openai/chat_create_completion_params.rbs +434 -0
- data/sig/telnyx/models/ai/openai/chat_create_completion_response.rbs +11 -0
- data/sig/telnyx/models/ai/openai_list_models_params.rbs +17 -0
- data/sig/telnyx/models/ai/openai_list_models_response.rbs +58 -0
- data/sig/telnyx/resources/ai/chat.rbs +2 -0
- data/sig/telnyx/resources/ai/openai/chat.rbs +41 -0
- data/sig/telnyx/resources/ai/openai.rbs +6 -0
- metadata +17 -2
|
@@ -5,14 +5,17 @@ module Telnyx
|
|
|
5
5
|
class AI
|
|
6
6
|
# Generate text with LLMs
|
|
7
7
|
class Chat
|
|
8
|
+
# @deprecated
|
|
9
|
+
#
|
|
8
10
|
# Some parameter documentations has been truncated, see
|
|
9
11
|
# {Telnyx::Models::AI::ChatCreateCompletionParams} for more details.
|
|
10
12
|
#
|
|
11
|
-
#
|
|
13
|
+
# **Deprecated**: Use `POST /v2/ai/openai/chat/completions` instead. Chat with a
|
|
14
|
+
# language model. This endpoint is consistent with the
|
|
12
15
|
# [OpenAI Chat Completions API](https://platform.openai.com/docs/api-reference/chat)
|
|
13
16
|
# and may be used with the OpenAI JS or Python SDK.
|
|
14
17
|
#
|
|
15
|
-
# @overload create_completion(messages:, api_key_ref: nil, best_of: nil, early_stopping: nil, enable_thinking: nil, frequency_penalty: nil, guided_choice: nil, guided_json: nil, guided_regex: nil, length_penalty: nil, logprobs: nil, max_tokens: nil, min_p: nil, model: nil, n: nil, presence_penalty: nil, response_format: nil, stream: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, use_beam_search: nil, request_options: {})
|
|
18
|
+
# @overload create_completion(messages:, api_key_ref: nil, best_of: nil, early_stopping: nil, enable_thinking: nil, frequency_penalty: nil, guided_choice: nil, guided_json: nil, guided_regex: nil, length_penalty: nil, logprobs: nil, max_tokens: nil, min_p: nil, model: nil, n: nil, presence_penalty: nil, response_format: nil, seed: nil, stop: nil, stream: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, use_beam_search: nil, request_options: {})
|
|
16
19
|
#
|
|
17
20
|
# @param messages [Array<Telnyx::Models::AI::ChatCreateCompletionParams::Message>] A list of the previous chat messages for context.
|
|
18
21
|
#
|
|
@@ -48,6 +51,10 @@ module Telnyx
|
|
|
48
51
|
#
|
|
49
52
|
# @param response_format [Telnyx::Models::AI::ChatCreateCompletionParams::ResponseFormat] Use this is you want to guarantee a JSON output without defining a schema. For c
|
|
50
53
|
#
|
|
54
|
+
# @param seed [Integer] If specified, the system will make a best effort to sample deterministically, su
|
|
55
|
+
#
|
|
56
|
+
# @param stop [String, Array<String>] Up to 4 sequences where the API will stop generating further tokens. The returne
|
|
57
|
+
#
|
|
51
58
|
# @param stream [Boolean] Whether or not to stream data-only server-sent events as they become available.
|
|
52
59
|
#
|
|
53
60
|
# @param temperature [Float] Adjusts the "creativity" of the model. Lower values make the model more determin
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Telnyx
|
|
4
|
+
module Resources
|
|
5
|
+
class AI
|
|
6
|
+
class OpenAI
|
|
7
|
+
class Chat
|
|
8
|
+
# Some parameter documentations has been truncated, see
|
|
9
|
+
# {Telnyx::Models::AI::OpenAI::ChatCreateCompletionParams} for more details.
|
|
10
|
+
#
|
|
11
|
+
# Chat with a language model. This endpoint is consistent with the
|
|
12
|
+
# [OpenAI Chat Completions API](https://platform.openai.com/docs/api-reference/chat)
|
|
13
|
+
# and may be used with the OpenAI JS or Python SDK by setting the base URL to
|
|
14
|
+
# `https://api.telnyx.com/v2/ai/openai`.
|
|
15
|
+
#
|
|
16
|
+
# @overload create_completion(messages:, api_key_ref: nil, best_of: nil, early_stopping: nil, enable_thinking: nil, frequency_penalty: nil, guided_choice: nil, guided_json: nil, guided_regex: nil, length_penalty: nil, logprobs: nil, max_tokens: nil, min_p: nil, model: nil, n: nil, presence_penalty: nil, response_format: nil, seed: nil, stop: nil, stream: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, use_beam_search: nil, request_options: {})
|
|
17
|
+
#
|
|
18
|
+
# @param messages [Array<Telnyx::Models::AI::OpenAI::ChatCreateCompletionParams::Message>] A list of the previous chat messages for context.
|
|
19
|
+
#
|
|
20
|
+
# @param api_key_ref [String] If you are using an external inference provider like xAI or OpenAI, this field a
|
|
21
|
+
#
|
|
22
|
+
# @param best_of [Integer] This is used with `use_beam_search` to determine how many candidate beams to exp
|
|
23
|
+
#
|
|
24
|
+
# @param early_stopping [Boolean] This is used with `use_beam_search`. If `true`, generation stops as soon as ther
|
|
25
|
+
#
|
|
26
|
+
# @param enable_thinking [Boolean] Whether to enable the thinking/reasoning phase for models that support it (e.g.,
|
|
27
|
+
#
|
|
28
|
+
# @param frequency_penalty [Float] Higher values will penalize the model from repeating the same output tokens.
|
|
29
|
+
#
|
|
30
|
+
# @param guided_choice [Array<String>] If specified, the output will be exactly one of the choices.
|
|
31
|
+
#
|
|
32
|
+
# @param guided_json [Hash{Symbol=>Object}] Must be a valid JSON schema. If specified, the output will follow the JSON schem
|
|
33
|
+
#
|
|
34
|
+
# @param guided_regex [String] If specified, the output will follow the regex pattern.
|
|
35
|
+
#
|
|
36
|
+
# @param length_penalty [Float] This is used with `use_beam_search` to prefer shorter or longer completions.
|
|
37
|
+
#
|
|
38
|
+
# @param logprobs [Boolean] Whether to return log probabilities of the output tokens or not. If true, return
|
|
39
|
+
#
|
|
40
|
+
# @param max_tokens [Integer] Maximum number of completion tokens the model should generate.
|
|
41
|
+
#
|
|
42
|
+
# @param min_p [Float] This is an alternative to `top_p` that [many prefer](https://github.com/huggingf
|
|
43
|
+
#
|
|
44
|
+
# @param model [String] The language model to chat with.
|
|
45
|
+
#
|
|
46
|
+
# @param n [Float] This will return multiple choices for you instead of a single chat completion.
|
|
47
|
+
#
|
|
48
|
+
# @param presence_penalty [Float] Higher values will penalize the model from repeating the same output tokens.
|
|
49
|
+
#
|
|
50
|
+
# @param response_format [Telnyx::Models::AI::OpenAI::ChatCreateCompletionParams::ResponseFormat] Use this is you want to guarantee a JSON output without defining a schema. For c
|
|
51
|
+
#
|
|
52
|
+
# @param seed [Integer] If specified, the system will make a best effort to sample deterministically, su
|
|
53
|
+
#
|
|
54
|
+
# @param stop [String, Array<String>] Up to 4 sequences where the API will stop generating further tokens. The returne
|
|
55
|
+
#
|
|
56
|
+
# @param stream [Boolean] Whether or not to stream data-only server-sent events as they become available.
|
|
57
|
+
#
|
|
58
|
+
# @param temperature [Float] Adjusts the "creativity" of the model. Lower values make the model more determin
|
|
59
|
+
#
|
|
60
|
+
# @param tool_choice [Symbol, Telnyx::Models::AI::OpenAI::ChatCreateCompletionParams::ToolChoice]
|
|
61
|
+
#
|
|
62
|
+
# @param tools [Array<Telnyx::Models::AI::OpenAI::ChatCreateCompletionParams::Tool::Function, Telnyx::Models::AI::OpenAI::ChatCreateCompletionParams::Tool::Retrieval>] The `function` tool type follows the same schema as the [OpenAI Chat Completions
|
|
63
|
+
#
|
|
64
|
+
# @param top_logprobs [Integer] This is used with `logprobs`. An integer between 0 and 20 specifying the number
|
|
65
|
+
#
|
|
66
|
+
# @param top_p [Float] An alternative or complement to `temperature`. This adjusts how many of the top
|
|
67
|
+
#
|
|
68
|
+
# @param use_beam_search [Boolean] Setting this to `true` will allow the model to [explore more completion options]
|
|
69
|
+
#
|
|
70
|
+
# @param request_options [Telnyx::RequestOptions, Hash{Symbol=>Object}, nil]
|
|
71
|
+
#
|
|
72
|
+
# @return [Hash{Symbol=>Object}]
|
|
73
|
+
#
|
|
74
|
+
# @see Telnyx::Models::AI::OpenAI::ChatCreateCompletionParams
|
|
75
|
+
def create_completion(params)
|
|
76
|
+
parsed, options = Telnyx::AI::OpenAI::ChatCreateCompletionParams.dump_request(params)
|
|
77
|
+
@client.request(
|
|
78
|
+
method: :post,
|
|
79
|
+
path: "ai/openai/chat/completions",
|
|
80
|
+
body: parsed,
|
|
81
|
+
model: Telnyx::Internal::Type::HashOf[Telnyx::Internal::Type::Unknown],
|
|
82
|
+
options: options
|
|
83
|
+
)
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
# @api private
|
|
87
|
+
#
|
|
88
|
+
# @param client [Telnyx::Client]
|
|
89
|
+
def initialize(client:)
|
|
90
|
+
@client = client
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
end
|
|
@@ -9,12 +9,38 @@ module Telnyx
|
|
|
9
9
|
# @return [Telnyx::Resources::AI::OpenAI::Embeddings]
|
|
10
10
|
attr_reader :embeddings
|
|
11
11
|
|
|
12
|
+
# @return [Telnyx::Resources::AI::OpenAI::Chat]
|
|
13
|
+
attr_reader :chat
|
|
14
|
+
|
|
15
|
+
# This endpoint returns a list of Open Source and OpenAI models that are available
|
|
16
|
+
# for use. <br /><br /> **Note**: Model `id`'s will be in the form
|
|
17
|
+
# `{source}/{model_name}`. For example `openai/gpt-4` or
|
|
18
|
+
# `mistralai/Mistral-7B-Instruct-v0.1` consistent with HuggingFace naming
|
|
19
|
+
# conventions.
|
|
20
|
+
#
|
|
21
|
+
# @overload list_models(request_options: {})
|
|
22
|
+
#
|
|
23
|
+
# @param request_options [Telnyx::RequestOptions, Hash{Symbol=>Object}, nil]
|
|
24
|
+
#
|
|
25
|
+
# @return [Telnyx::Models::AI::OpenAIListModelsResponse]
|
|
26
|
+
#
|
|
27
|
+
# @see Telnyx::Models::AI::OpenAIListModelsParams
|
|
28
|
+
def list_models(params = {})
|
|
29
|
+
@client.request(
|
|
30
|
+
method: :get,
|
|
31
|
+
path: "ai/openai/models",
|
|
32
|
+
model: Telnyx::Models::AI::OpenAIListModelsResponse,
|
|
33
|
+
options: params[:request_options]
|
|
34
|
+
)
|
|
35
|
+
end
|
|
36
|
+
|
|
12
37
|
# @api private
|
|
13
38
|
#
|
|
14
39
|
# @param client [Telnyx::Client]
|
|
15
40
|
def initialize(client:)
|
|
16
41
|
@client = client
|
|
17
42
|
@embeddings = Telnyx::Resources::AI::OpenAI::Embeddings.new(client: client)
|
|
43
|
+
@chat = Telnyx::Resources::AI::OpenAI::Chat.new(client: client)
|
|
18
44
|
end
|
|
19
45
|
end
|
|
20
46
|
end
|
data/lib/telnyx/resources/ai.rb
CHANGED
|
@@ -46,11 +46,13 @@ module Telnyx
|
|
|
46
46
|
# @return [Telnyx::Resources::AI::Tools]
|
|
47
47
|
attr_reader :tools
|
|
48
48
|
|
|
49
|
-
#
|
|
50
|
-
#
|
|
51
|
-
#
|
|
52
|
-
#
|
|
53
|
-
#
|
|
49
|
+
# @deprecated
|
|
50
|
+
#
|
|
51
|
+
# **Deprecated**: Use `GET /v2/ai/openai/models` instead. This endpoint returns a
|
|
52
|
+
# list of Open Source and OpenAI models that are available for use. <br /><br />
|
|
53
|
+
# **Note**: Model `id`'s will be in the form `{source}/{model_name}`. For example
|
|
54
|
+
# `openai/gpt-4` or `mistralai/Mistral-7B-Instruct-v0.1` consistent with
|
|
55
|
+
# HuggingFace naming conventions.
|
|
54
56
|
#
|
|
55
57
|
# @overload retrieve_models(request_options: {})
|
|
56
58
|
#
|
|
@@ -60,7 +60,11 @@ module Telnyx
|
|
|
60
60
|
)
|
|
61
61
|
end
|
|
62
62
|
|
|
63
|
-
# Order new external vetting for a brand
|
|
63
|
+
# Order new external vetting for a brand.
|
|
64
|
+
#
|
|
65
|
+
# Duplicate orders for the same `evpId` and `vettingClass` return `400` with code
|
|
66
|
+
# `10012` if a successful vetting exists within the last 180 days, or one is
|
|
67
|
+
# currently being processed. Failed vettings can be retried immediately.
|
|
64
68
|
#
|
|
65
69
|
# @overload order(brand_id, evp_id:, vetting_class:, request_options: {})
|
|
66
70
|
#
|
data/lib/telnyx/version.rb
CHANGED
data/lib/telnyx.rb
CHANGED
|
@@ -370,10 +370,14 @@ require_relative "telnyx/models/ai/mission_update_mission_params"
|
|
|
370
370
|
require_relative "telnyx/models/ai/mission_update_mission_response"
|
|
371
371
|
require_relative "telnyx/models/ai/observability"
|
|
372
372
|
require_relative "telnyx/models/ai/observability_req"
|
|
373
|
+
require_relative "telnyx/models/ai/openai/chat_create_completion_params"
|
|
374
|
+
require_relative "telnyx/models/ai/openai/chat_create_completion_response"
|
|
373
375
|
require_relative "telnyx/models/ai/openai/embedding_create_embeddings_params"
|
|
374
376
|
require_relative "telnyx/models/ai/openai/embedding_create_embeddings_response"
|
|
375
377
|
require_relative "telnyx/models/ai/openai/embedding_list_embedding_models_params"
|
|
376
378
|
require_relative "telnyx/models/ai/openai/embedding_list_embedding_models_response"
|
|
379
|
+
require_relative "telnyx/models/ai/openai_list_models_params"
|
|
380
|
+
require_relative "telnyx/models/ai/openai_list_models_response"
|
|
377
381
|
require_relative "telnyx/models/ai/post_conversation_settings"
|
|
378
382
|
require_relative "telnyx/models/ai/post_conversation_settings_req"
|
|
379
383
|
require_relative "telnyx/models/ai/privacy_settings"
|
|
@@ -2449,6 +2453,7 @@ require_relative "telnyx/resources/ai/missions/runs/plan"
|
|
|
2449
2453
|
require_relative "telnyx/resources/ai/missions/runs/telnyx_agents"
|
|
2450
2454
|
require_relative "telnyx/resources/ai/missions/tools"
|
|
2451
2455
|
require_relative "telnyx/resources/ai/openai"
|
|
2456
|
+
require_relative "telnyx/resources/ai/openai/chat"
|
|
2452
2457
|
require_relative "telnyx/resources/ai/openai/embeddings"
|
|
2453
2458
|
require_relative "telnyx/resources/ai/tools"
|
|
2454
2459
|
require_relative "telnyx/resources/alphanumeric_sender_ids"
|
|
@@ -156,6 +156,31 @@ module Telnyx
|
|
|
156
156
|
end
|
|
157
157
|
attr_writer :response_format
|
|
158
158
|
|
|
159
|
+
# If specified, the system will make a best effort to sample deterministically,
|
|
160
|
+
# such that repeated requests with the same `seed` and parameters should return
|
|
161
|
+
# the same result.
|
|
162
|
+
sig { returns(T.nilable(Integer)) }
|
|
163
|
+
attr_reader :seed
|
|
164
|
+
|
|
165
|
+
sig { params(seed: Integer).void }
|
|
166
|
+
attr_writer :seed
|
|
167
|
+
|
|
168
|
+
# Up to 4 sequences where the API will stop generating further tokens. The
|
|
169
|
+
# returned text will not contain the stop sequence.
|
|
170
|
+
sig do
|
|
171
|
+
returns(
|
|
172
|
+
T.nilable(Telnyx::AI::ChatCreateCompletionParams::Stop::Variants)
|
|
173
|
+
)
|
|
174
|
+
end
|
|
175
|
+
attr_reader :stop
|
|
176
|
+
|
|
177
|
+
sig do
|
|
178
|
+
params(
|
|
179
|
+
stop: Telnyx::AI::ChatCreateCompletionParams::Stop::Variants
|
|
180
|
+
).void
|
|
181
|
+
end
|
|
182
|
+
attr_writer :stop
|
|
183
|
+
|
|
159
184
|
# Whether or not to stream data-only server-sent events as they become available.
|
|
160
185
|
sig { returns(T.nilable(T::Boolean)) }
|
|
161
186
|
attr_reader :stream
|
|
@@ -268,6 +293,8 @@ module Telnyx
|
|
|
268
293
|
presence_penalty: Float,
|
|
269
294
|
response_format:
|
|
270
295
|
Telnyx::AI::ChatCreateCompletionParams::ResponseFormat::OrHash,
|
|
296
|
+
seed: Integer,
|
|
297
|
+
stop: Telnyx::AI::ChatCreateCompletionParams::Stop::Variants,
|
|
271
298
|
stream: T::Boolean,
|
|
272
299
|
temperature: Float,
|
|
273
300
|
tool_choice:
|
|
@@ -334,6 +361,13 @@ module Telnyx
|
|
|
334
361
|
# Use this is you want to guarantee a JSON output without defining a schema. For
|
|
335
362
|
# control over the schema, use `guided_json`.
|
|
336
363
|
response_format: nil,
|
|
364
|
+
# If specified, the system will make a best effort to sample deterministically,
|
|
365
|
+
# such that repeated requests with the same `seed` and parameters should return
|
|
366
|
+
# the same result.
|
|
367
|
+
seed: nil,
|
|
368
|
+
# Up to 4 sequences where the API will stop generating further tokens. The
|
|
369
|
+
# returned text will not contain the stop sequence.
|
|
370
|
+
stop: nil,
|
|
337
371
|
# Whether or not to stream data-only server-sent events as they become available.
|
|
338
372
|
stream: nil,
|
|
339
373
|
# Adjusts the "creativity" of the model. Lower values make the model more
|
|
@@ -384,6 +418,8 @@ module Telnyx
|
|
|
384
418
|
presence_penalty: Float,
|
|
385
419
|
response_format:
|
|
386
420
|
Telnyx::AI::ChatCreateCompletionParams::ResponseFormat,
|
|
421
|
+
seed: Integer,
|
|
422
|
+
stop: Telnyx::AI::ChatCreateCompletionParams::Stop::Variants,
|
|
387
423
|
stream: T::Boolean,
|
|
388
424
|
temperature: Float,
|
|
389
425
|
tool_choice:
|
|
@@ -689,6 +725,28 @@ module Telnyx
|
|
|
689
725
|
end
|
|
690
726
|
end
|
|
691
727
|
|
|
728
|
+
# Up to 4 sequences where the API will stop generating further tokens. The
|
|
729
|
+
# returned text will not contain the stop sequence.
|
|
730
|
+
module Stop
|
|
731
|
+
extend Telnyx::Internal::Type::Union
|
|
732
|
+
|
|
733
|
+
Variants = T.type_alias { T.any(String, T::Array[String]) }
|
|
734
|
+
|
|
735
|
+
sig do
|
|
736
|
+
override.returns(
|
|
737
|
+
T::Array[Telnyx::AI::ChatCreateCompletionParams::Stop::Variants]
|
|
738
|
+
)
|
|
739
|
+
end
|
|
740
|
+
def self.variants
|
|
741
|
+
end
|
|
742
|
+
|
|
743
|
+
StringArray =
|
|
744
|
+
T.let(
|
|
745
|
+
Telnyx::Internal::Type::ArrayOf[String],
|
|
746
|
+
Telnyx::Internal::Type::Converter
|
|
747
|
+
)
|
|
748
|
+
end
|
|
749
|
+
|
|
692
750
|
module ToolChoice
|
|
693
751
|
extend Telnyx::Internal::Type::Enum
|
|
694
752
|
|