telnyx 5.105.0 → 5.106.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 00c56398477600183f977eeff58815a2cc7aaa8c9698ff8f56b59b27590bdd35
4
- data.tar.gz: c2d9c2b3c47e12c4360a49835a7dcc46e68113a83b83d19ee2ae6ee27fc1b86e
3
+ metadata.gz: c40ebb14623acd3df51f2cd6eb3c5b1978936b42835dbf93f611e8a0a599ff7a
4
+ data.tar.gz: 936c360c1278f930e62111b8957b6f594ebd53be542f99983c12956f2ce0caf1
5
5
  SHA512:
6
- metadata.gz: fce247576924b863af47b999bc829f893d9fd60dfc2678b6589eb6ae54f61ba447c1575516d324e8cdc2ff348bbb38be6e101ec74caa5f4d170f3b678d7d4a7d
7
- data.tar.gz: 5a26d164a5e24c72e4d6d83e18ad6a0339ee98d3e5c09379021a24203130c4e5267cdf308030271005e02e3f6465e6213a3d13bdcbb39b80e2bf660ba6d3702d
6
+ metadata.gz: 4da3bea0b147d4c8084e9f86a4aa4328348e5020a8a60d4487c3b8954165fccdb661dd8d326da264852249c27b77cde02e7bba4de97dbd224531b5280cadef72
7
+ data.tar.gz: b5919a5f7b79f7fea42fc6e99f5d708dae9655d8b5838db050707464a677d4128b5be6278059a4de3060b93f001dc01fbe849f2d5b24e4d5bf2829e74ba67b13
data/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # Changelog
2
2
 
3
+ ## 5.106.0 (2026-05-08)
4
+
5
+ Full Changelog: [v5.105.0...v5.106.0](https://github.com/team-telnyx/telnyx-ruby/compare/v5.105.0...v5.106.0)
6
+
7
+ ### Features
8
+
9
+ * AI-2294: document /ai/openai/models response and refresh LLM examples ([fe820e3](https://github.com/team-telnyx/telnyx-ruby/commit/fe820e34daed754d06fb23d9667442e154fe51c9))
10
+
3
11
  ## 5.105.0 (2026-05-08)
4
12
 
5
13
  Full Changelog: [v5.104.0...v5.105.0](https://github.com/team-telnyx/telnyx-ruby/compare/v5.104.0...v5.105.0)
data/README.md CHANGED
@@ -24,7 +24,7 @@ To use this gem, install via Bundler by adding the following to your application
24
24
  <!-- x-release-please-start-version -->
25
25
 
26
26
  ```ruby
27
- gem "telnyx", "~> 5.105.0"
27
+ gem "telnyx", "~> 5.106.0"
28
28
  ```
29
29
 
30
30
  <!-- x-release-please-end -->
@@ -4,30 +4,207 @@ module Telnyx
4
4
  module Models
5
5
  class ModelMetadata < Telnyx::Internal::Type::BaseModel
6
6
  # @!attribute id
7
+ # Model identifier. For open-source models, follows the
8
+ # `{organization}/{model_name}` convention from Hugging Face (e.g.
9
+ # `moonshotai/Kimi-K2.6`).
7
10
  #
8
11
  # @return [String]
9
12
  required :id, String
10
13
 
11
- # @!attribute created
14
+ # @!attribute context_length
15
+ # Maximum total tokens (prompt + completion) supported by the model in a single
16
+ # request.
12
17
  #
13
18
  # @return [Integer]
14
- required :created, Integer
19
+ required :context_length, Integer
20
+
21
+ # @!attribute created
22
+ # Timestamp at which the model was registered on Telnyx Inference (ISO 8601).
23
+ #
24
+ # @return [Time]
25
+ required :created, Time
26
+
27
+ # @!attribute languages
28
+ # ISO language codes the model supports (e.g. `en`, `es`).
29
+ #
30
+ # @return [Array<String>]
31
+ required :languages, Telnyx::Internal::Type::ArrayOf[String]
32
+
33
+ # @!attribute license
34
+ # License the model is distributed under, e.g. `Apache 2.0`, `MIT`,
35
+ # `Llama 3 Community License`.
36
+ #
37
+ # @return [String]
38
+ required :license, String
39
+
40
+ # @!attribute organization
41
+ # Organization that originally published the model, matching the prefix of `id`
42
+ # for open-source models.
43
+ #
44
+ # @return [String]
45
+ required :organization, String
15
46
 
16
47
  # @!attribute owned_by
48
+ # Owner of the model. `Telnyx` for Telnyx-hosted open-source models, the upstream
49
+ # provider name for proxied models, or the Telnyx organization id for fine-tuned
50
+ # models.
17
51
  #
18
52
  # @return [String]
19
53
  required :owned_by, String
20
54
 
55
+ # @!attribute parameters
56
+ # Total parameter count of the model.
57
+ #
58
+ # @return [Integer]
59
+ required :parameters, Integer
60
+
61
+ # @!attribute tier
62
+ # Billing tier the model belongs to. Used together with `pricing` to determine
63
+ # cost per 1M tokens.
64
+ #
65
+ # @return [Symbol, Telnyx::Models::ModelMetadata::Tier]
66
+ required :tier, enum: -> { Telnyx::ModelMetadata::Tier }
67
+
68
+ # @!attribute base_model
69
+ # Base model the fine-tuned model was trained from. Only set for fine-tuned
70
+ # models.
71
+ #
72
+ # @return [String, nil]
73
+ optional :base_model, String, nil?: true
74
+
75
+ # @!attribute description
76
+ # Short, human-readable summary of what the model is best suited for.
77
+ #
78
+ # @return [String, nil]
79
+ optional :description, String, nil?: true
80
+
81
+ # @!attribute is_fine_tunable
82
+ # Whether the model can be used as a base for a fine-tuning job via
83
+ # `POST /v2/ai/fine_tuning/jobs`.
84
+ #
85
+ # @return [Boolean, nil]
86
+ optional :is_fine_tunable, Telnyx::Internal::Type::Boolean
87
+
88
+ # @!attribute is_vision_supported
89
+ # Whether the model accepts image inputs in chat completions (multimodal vision
90
+ # support).
91
+ #
92
+ # @return [Boolean, nil]
93
+ optional :is_vision_supported, Telnyx::Internal::Type::Boolean
94
+
95
+ # @!attribute max_completion_tokens
96
+ # Maximum number of completion (output) tokens the model will generate per
97
+ # request. `null` if unconstrained beyond `context_length`.
98
+ #
99
+ # @return [Integer, nil]
100
+ optional :max_completion_tokens, Integer, nil?: true
101
+
21
102
  # @!attribute object
103
+ # Object type. Always `model`.
22
104
  #
23
105
  # @return [String, nil]
24
106
  optional :object, String
25
107
 
26
- # @!method initialize(id:, created:, owned_by:, object: nil)
27
- # @param id [String]
28
- # @param created [Integer]
29
- # @param owned_by [String]
30
- # @param object [String]
108
+ # @!attribute parameters_str
109
+ # Human-readable parameter count, e.g. `1.0T`, `753.9B`, `8B`.
110
+ #
111
+ # @return [String, nil]
112
+ optional :parameters_str, String, nil?: true
113
+
114
+ # @!attribute pricing
115
+ # Mapping of token kind to price in USD per 1M tokens, as a string. Typical keys
116
+ # are `input` and `output`; embedding models expose `embedding`. Empty object when
117
+ # pricing is not yet published for the model.
118
+ #
119
+ # @return [Hash{Symbol=>String}, nil]
120
+ optional :pricing, Telnyx::Internal::Type::HashOf[String]
121
+
122
+ # @!attribute recommended_for_assistants
123
+ # Whether Telnyx currently recommends this model as the LLM powering a Telnyx AI
124
+ # Assistant.
125
+ #
126
+ # @return [Boolean, nil]
127
+ optional :recommended_for_assistants, Telnyx::Internal::Type::Boolean
128
+
129
+ # @!attribute regions
130
+ # Public region names where the model is currently deployed (e.g. `us-central-1`,
131
+ # `eu-central-1`).
132
+ #
133
+ # @return [Array<String>, nil]
134
+ optional :regions, Telnyx::Internal::Type::ArrayOf[String]
135
+
136
+ # @!attribute task
137
+ # Primary task the model is intended for, e.g. `text-generation`,
138
+ # `audio-text-to-text`, `feature-extraction` (embeddings).
139
+ #
140
+ # @return [String, nil]
141
+ optional :task, String
142
+
143
+ # @!method initialize(id:, context_length:, created:, languages:, license:, organization:, owned_by:, parameters:, tier:, base_model: nil, description: nil, is_fine_tunable: nil, is_vision_supported: nil, max_completion_tokens: nil, object: nil, parameters_str: nil, pricing: nil, recommended_for_assistants: nil, regions: nil, task: nil)
144
+ # Some parameter documentations has been truncated, see
145
+ # {Telnyx::Models::ModelMetadata} for more details.
146
+ #
147
+ # Metadata for a model available on Telnyx Inference. Returned by
148
+ # `GET /v2/ai/openai/models` (and the deprecated `GET /v2/ai/models`). Open-source
149
+ # models live under their Hugging Face organization (e.g. `moonshotai/Kimi-K2.6`,
150
+ # `zai-org/GLM-5.1-FP8`, `MiniMaxAI/MiniMax-M2.7`); fine-tuned models are owned by
151
+ # the Telnyx organization that trained them.
152
+ #
153
+ # @param id [String] Model identifier. For open-source models, follows the `{organization}/{model_nam
154
+ #
155
+ # @param context_length [Integer] Maximum total tokens (prompt + completion) supported by the model in a single re
156
+ #
157
+ # @param created [Time] Timestamp at which the model was registered on Telnyx Inference (ISO 8601).
158
+ #
159
+ # @param languages [Array<String>] ISO language codes the model supports (e.g. `en`, `es`).
160
+ #
161
+ # @param license [String] License the model is distributed under, e.g. `Apache 2.0`, `MIT`, `Llama 3 Commu
162
+ #
163
+ # @param organization [String] Organization that originally published the model, matching the prefix of `id` fo
164
+ #
165
+ # @param owned_by [String] Owner of the model. `Telnyx` for Telnyx-hosted open-source models, the upstream
166
+ #
167
+ # @param parameters [Integer] Total parameter count of the model.
168
+ #
169
+ # @param tier [Symbol, Telnyx::Models::ModelMetadata::Tier] Billing tier the model belongs to. Used together with `pricing` to determine cos
170
+ #
171
+ # @param base_model [String, nil] Base model the fine-tuned model was trained from. Only set for fine-tuned models
172
+ #
173
+ # @param description [String, nil] Short, human-readable summary of what the model is best suited for.
174
+ #
175
+ # @param is_fine_tunable [Boolean] Whether the model can be used as a base for a fine-tuning job via `POST /v2/ai/f
176
+ #
177
+ # @param is_vision_supported [Boolean] Whether the model accepts image inputs in chat completions (multimodal vision su
178
+ #
179
+ # @param max_completion_tokens [Integer, nil] Maximum number of completion (output) tokens the model will generate per request
180
+ #
181
+ # @param object [String] Object type. Always `model`.
182
+ #
183
+ # @param parameters_str [String, nil] Human-readable parameter count, e.g. `1.0T`, `753.9B`, `8B`.
184
+ #
185
+ # @param pricing [Hash{Symbol=>String}] Mapping of token kind to price in USD per 1M tokens, as a string. Typical keys a
186
+ #
187
+ # @param recommended_for_assistants [Boolean] Whether Telnyx currently recommends this model as the LLM powering a Telnyx AI A
188
+ #
189
+ # @param regions [Array<String>] Public region names where the model is currently deployed (e.g. `us-central-1`,
190
+ #
191
+ # @param task [String] Primary task the model is intended for, e.g. `text-generation`, `audio-text-to-t
192
+
193
+ # Billing tier the model belongs to. Used together with `pricing` to determine
194
+ # cost per 1M tokens.
195
+ #
196
+ # @see Telnyx::Models::ModelMetadata#tier
197
+ module Tier
198
+ extend Telnyx::Internal::Type::Enum
199
+
200
+ SMALL = :small
201
+ MEDIUM = :medium
202
+ LARGE = :large
203
+ UNLISTED = :unlisted
204
+
205
+ # @!method self.values
206
+ # @return [Array<Symbol>]
207
+ end
31
208
  end
32
209
  end
33
210
  end
@@ -12,11 +12,21 @@ module Telnyx
12
12
  # @return [Telnyx::Resources::AI::OpenAI::Chat]
13
13
  attr_reader :chat
14
14
 
15
- # This endpoint returns a list of Open Source and OpenAI models that are available
16
- # for use. <br /><br /> **Note**: Model `id`'s will be in the form
17
- # `{source}/{model_name}`. For example `openai/gpt-4` or
18
- # `mistralai/Mistral-7B-Instruct-v0.1` consistent with HuggingFace naming
19
- # conventions.
15
+ # Lists every model currently available to your account on Telnyx Inference,
16
+ # including SOTA open-source LLMs hosted on Telnyx GPUs (for example
17
+ # `moonshotai/Kimi-K2.6`, `zai-org/GLM-5.1-FP8`, and `MiniMaxAI/MiniMax-M2.7`),
18
+ # embedding models, and any fine-tuned models you have created.
19
+ #
20
+ # Each entry is a `ModelMetadata` object describing the model id, owner, task,
21
+ # context length, supported languages, billing tier, pricing per 1M tokens,
22
+ # deployment regions, and whether the model supports vision or fine-tuning. Use
23
+ # this endpoint to discover model ids you can pass to
24
+ # `POST /v2/ai/openai/chat/completions`.
25
+ #
26
+ # Model ids follow the `{organization}/{model_name}` convention from Hugging Face
27
+ # (for example `moonshotai/Kimi-K2.6`). This endpoint is OpenAI-compatible:
28
+ # clients pointed at `https://api.telnyx.com/v2/ai/openai` can call
29
+ # `client.models.list()` to retrieve the same payload.
20
30
  #
21
31
  # @overload list_models(request_options: {})
22
32
  #
@@ -48,11 +48,15 @@ module Telnyx
48
48
 
49
49
  # @deprecated
50
50
  #
51
- # **Deprecated**: Use `GET /v2/ai/openai/models` instead. This endpoint returns a
52
- # list of Open Source and OpenAI models that are available for use. <br /><br />
53
- # **Note**: Model `id`'s will be in the form `{source}/{model_name}`. For example
54
- # `openai/gpt-4` or `mistralai/Mistral-7B-Instruct-v0.1` consistent with
55
- # HuggingFace naming conventions.
51
+ # **Deprecated**: Use `GET /v2/ai/openai/models` instead.
52
+ #
53
+ # Returns the same `ModelsResponse` payload as the OpenAI-compatible endpoint
54
+ # open-source LLMs hosted on Telnyx (e.g. `moonshotai/Kimi-K2.6`,
55
+ # `zai-org/GLM-5.1-FP8`, `MiniMaxAI/MiniMax-M2.7`), embedding models, and
56
+ # fine-tuned models — kept around for backwards compatibility. New integrations
57
+ # should use `/v2/ai/openai/models`.
58
+ #
59
+ # Model ids follow the `{organization}/{model_name}` convention from Hugging Face.
56
60
  #
57
61
  # @overload retrieve_models(request_options: {})
58
62
  #
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Telnyx
4
- VERSION = "5.105.0"
4
+ VERSION = "5.106.0"
5
5
  end
@@ -6,39 +6,263 @@ module Telnyx
6
6
  OrHash =
7
7
  T.type_alias { T.any(Telnyx::ModelMetadata, Telnyx::Internal::AnyHash) }
8
8
 
9
+ # Model identifier. For open-source models, follows the
10
+ # `{organization}/{model_name}` convention from Hugging Face (e.g.
11
+ # `moonshotai/Kimi-K2.6`).
9
12
  sig { returns(String) }
10
13
  attr_accessor :id
11
14
 
15
+ # Maximum total tokens (prompt + completion) supported by the model in a single
16
+ # request.
12
17
  sig { returns(Integer) }
18
+ attr_accessor :context_length
19
+
20
+ # Timestamp at which the model was registered on Telnyx Inference (ISO 8601).
21
+ sig { returns(Time) }
13
22
  attr_accessor :created
14
23
 
24
+ # ISO language codes the model supports (e.g. `en`, `es`).
25
+ sig { returns(T::Array[String]) }
26
+ attr_accessor :languages
27
+
28
+ # License the model is distributed under, e.g. `Apache 2.0`, `MIT`,
29
+ # `Llama 3 Community License`.
30
+ sig { returns(String) }
31
+ attr_accessor :license
32
+
33
+ # Organization that originally published the model, matching the prefix of `id`
34
+ # for open-source models.
35
+ sig { returns(String) }
36
+ attr_accessor :organization
37
+
38
+ # Owner of the model. `Telnyx` for Telnyx-hosted open-source models, the upstream
39
+ # provider name for proxied models, or the Telnyx organization id for fine-tuned
40
+ # models.
15
41
  sig { returns(String) }
16
42
  attr_accessor :owned_by
17
43
 
44
+ # Total parameter count of the model.
45
+ sig { returns(Integer) }
46
+ attr_accessor :parameters
47
+
48
+ # Billing tier the model belongs to. Used together with `pricing` to determine
49
+ # cost per 1M tokens.
50
+ sig { returns(Telnyx::ModelMetadata::Tier::TaggedSymbol) }
51
+ attr_accessor :tier
52
+
53
+ # Base model the fine-tuned model was trained from. Only set for fine-tuned
54
+ # models.
55
+ sig { returns(T.nilable(String)) }
56
+ attr_accessor :base_model
57
+
58
+ # Short, human-readable summary of what the model is best suited for.
59
+ sig { returns(T.nilable(String)) }
60
+ attr_accessor :description
61
+
62
+ # Whether the model can be used as a base for a fine-tuning job via
63
+ # `POST /v2/ai/fine_tuning/jobs`.
64
+ sig { returns(T.nilable(T::Boolean)) }
65
+ attr_reader :is_fine_tunable
66
+
67
+ sig { params(is_fine_tunable: T::Boolean).void }
68
+ attr_writer :is_fine_tunable
69
+
70
+ # Whether the model accepts image inputs in chat completions (multimodal vision
71
+ # support).
72
+ sig { returns(T.nilable(T::Boolean)) }
73
+ attr_reader :is_vision_supported
74
+
75
+ sig { params(is_vision_supported: T::Boolean).void }
76
+ attr_writer :is_vision_supported
77
+
78
+ # Maximum number of completion (output) tokens the model will generate per
79
+ # request. `null` if unconstrained beyond `context_length`.
80
+ sig { returns(T.nilable(Integer)) }
81
+ attr_accessor :max_completion_tokens
82
+
83
+ # Object type. Always `model`.
18
84
  sig { returns(T.nilable(String)) }
19
85
  attr_reader :object
20
86
 
21
87
  sig { params(object: String).void }
22
88
  attr_writer :object
23
89
 
90
+ # Human-readable parameter count, e.g. `1.0T`, `753.9B`, `8B`.
91
+ sig { returns(T.nilable(String)) }
92
+ attr_accessor :parameters_str
93
+
94
+ # Mapping of token kind to price in USD per 1M tokens, as a string. Typical keys
95
+ # are `input` and `output`; embedding models expose `embedding`. Empty object when
96
+ # pricing is not yet published for the model.
97
+ sig { returns(T.nilable(T::Hash[Symbol, String])) }
98
+ attr_reader :pricing
99
+
100
+ sig { params(pricing: T::Hash[Symbol, String]).void }
101
+ attr_writer :pricing
102
+
103
+ # Whether Telnyx currently recommends this model as the LLM powering a Telnyx AI
104
+ # Assistant.
105
+ sig { returns(T.nilable(T::Boolean)) }
106
+ attr_reader :recommended_for_assistants
107
+
108
+ sig { params(recommended_for_assistants: T::Boolean).void }
109
+ attr_writer :recommended_for_assistants
110
+
111
+ # Public region names where the model is currently deployed (e.g. `us-central-1`,
112
+ # `eu-central-1`).
113
+ sig { returns(T.nilable(T::Array[String])) }
114
+ attr_reader :regions
115
+
116
+ sig { params(regions: T::Array[String]).void }
117
+ attr_writer :regions
118
+
119
+ # Primary task the model is intended for, e.g. `text-generation`,
120
+ # `audio-text-to-text`, `feature-extraction` (embeddings).
121
+ sig { returns(T.nilable(String)) }
122
+ attr_reader :task
123
+
124
+ sig { params(task: String).void }
125
+ attr_writer :task
126
+
127
+ # Metadata for a model available on Telnyx Inference. Returned by
128
+ # `GET /v2/ai/openai/models` (and the deprecated `GET /v2/ai/models`). Open-source
129
+ # models live under their Hugging Face organization (e.g. `moonshotai/Kimi-K2.6`,
130
+ # `zai-org/GLM-5.1-FP8`, `MiniMaxAI/MiniMax-M2.7`); fine-tuned models are owned by
131
+ # the Telnyx organization that trained them.
24
132
  sig do
25
133
  params(
26
134
  id: String,
27
- created: Integer,
135
+ context_length: Integer,
136
+ created: Time,
137
+ languages: T::Array[String],
138
+ license: String,
139
+ organization: String,
28
140
  owned_by: String,
29
- object: String
141
+ parameters: Integer,
142
+ tier: Telnyx::ModelMetadata::Tier::OrSymbol,
143
+ base_model: T.nilable(String),
144
+ description: T.nilable(String),
145
+ is_fine_tunable: T::Boolean,
146
+ is_vision_supported: T::Boolean,
147
+ max_completion_tokens: T.nilable(Integer),
148
+ object: String,
149
+ parameters_str: T.nilable(String),
150
+ pricing: T::Hash[Symbol, String],
151
+ recommended_for_assistants: T::Boolean,
152
+ regions: T::Array[String],
153
+ task: String
30
154
  ).returns(T.attached_class)
31
155
  end
32
- def self.new(id:, created:, owned_by:, object: nil)
156
+ def self.new(
157
+ # Model identifier. For open-source models, follows the
158
+ # `{organization}/{model_name}` convention from Hugging Face (e.g.
159
+ # `moonshotai/Kimi-K2.6`).
160
+ id:,
161
+ # Maximum total tokens (prompt + completion) supported by the model in a single
162
+ # request.
163
+ context_length:,
164
+ # Timestamp at which the model was registered on Telnyx Inference (ISO 8601).
165
+ created:,
166
+ # ISO language codes the model supports (e.g. `en`, `es`).
167
+ languages:,
168
+ # License the model is distributed under, e.g. `Apache 2.0`, `MIT`,
169
+ # `Llama 3 Community License`.
170
+ license:,
171
+ # Organization that originally published the model, matching the prefix of `id`
172
+ # for open-source models.
173
+ organization:,
174
+ # Owner of the model. `Telnyx` for Telnyx-hosted open-source models, the upstream
175
+ # provider name for proxied models, or the Telnyx organization id for fine-tuned
176
+ # models.
177
+ owned_by:,
178
+ # Total parameter count of the model.
179
+ parameters:,
180
+ # Billing tier the model belongs to. Used together with `pricing` to determine
181
+ # cost per 1M tokens.
182
+ tier:,
183
+ # Base model the fine-tuned model was trained from. Only set for fine-tuned
184
+ # models.
185
+ base_model: nil,
186
+ # Short, human-readable summary of what the model is best suited for.
187
+ description: nil,
188
+ # Whether the model can be used as a base for a fine-tuning job via
189
+ # `POST /v2/ai/fine_tuning/jobs`.
190
+ is_fine_tunable: nil,
191
+ # Whether the model accepts image inputs in chat completions (multimodal vision
192
+ # support).
193
+ is_vision_supported: nil,
194
+ # Maximum number of completion (output) tokens the model will generate per
195
+ # request. `null` if unconstrained beyond `context_length`.
196
+ max_completion_tokens: nil,
197
+ # Object type. Always `model`.
198
+ object: nil,
199
+ # Human-readable parameter count, e.g. `1.0T`, `753.9B`, `8B`.
200
+ parameters_str: nil,
201
+ # Mapping of token kind to price in USD per 1M tokens, as a string. Typical keys
202
+ # are `input` and `output`; embedding models expose `embedding`. Empty object when
203
+ # pricing is not yet published for the model.
204
+ pricing: nil,
205
+ # Whether Telnyx currently recommends this model as the LLM powering a Telnyx AI
206
+ # Assistant.
207
+ recommended_for_assistants: nil,
208
+ # Public region names where the model is currently deployed (e.g. `us-central-1`,
209
+ # `eu-central-1`).
210
+ regions: nil,
211
+ # Primary task the model is intended for, e.g. `text-generation`,
212
+ # `audio-text-to-text`, `feature-extraction` (embeddings).
213
+ task: nil
214
+ )
33
215
  end
34
216
 
35
217
  sig do
36
218
  override.returns(
37
- { id: String, created: Integer, owned_by: String, object: String }
219
+ {
220
+ id: String,
221
+ context_length: Integer,
222
+ created: Time,
223
+ languages: T::Array[String],
224
+ license: String,
225
+ organization: String,
226
+ owned_by: String,
227
+ parameters: Integer,
228
+ tier: Telnyx::ModelMetadata::Tier::TaggedSymbol,
229
+ base_model: T.nilable(String),
230
+ description: T.nilable(String),
231
+ is_fine_tunable: T::Boolean,
232
+ is_vision_supported: T::Boolean,
233
+ max_completion_tokens: T.nilable(Integer),
234
+ object: String,
235
+ parameters_str: T.nilable(String),
236
+ pricing: T::Hash[Symbol, String],
237
+ recommended_for_assistants: T::Boolean,
238
+ regions: T::Array[String],
239
+ task: String
240
+ }
38
241
  )
39
242
  end
40
243
  def to_hash
41
244
  end
245
+
246
+ # Billing tier the model belongs to. Used together with `pricing` to determine
247
+ # cost per 1M tokens.
248
+ module Tier
249
+ extend Telnyx::Internal::Type::Enum
250
+
251
+ TaggedSymbol =
252
+ T.type_alias { T.all(Symbol, Telnyx::ModelMetadata::Tier) }
253
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
254
+
255
+ SMALL = T.let(:small, Telnyx::ModelMetadata::Tier::TaggedSymbol)
256
+ MEDIUM = T.let(:medium, Telnyx::ModelMetadata::Tier::TaggedSymbol)
257
+ LARGE = T.let(:large, Telnyx::ModelMetadata::Tier::TaggedSymbol)
258
+ UNLISTED = T.let(:unlisted, Telnyx::ModelMetadata::Tier::TaggedSymbol)
259
+
260
+ sig do
261
+ override.returns(T::Array[Telnyx::ModelMetadata::Tier::TaggedSymbol])
262
+ end
263
+ def self.values
264
+ end
265
+ end
42
266
  end
43
267
  end
44
268
  end
@@ -12,11 +12,21 @@ module Telnyx
12
12
  sig { returns(Telnyx::Resources::AI::OpenAI::Chat) }
13
13
  attr_reader :chat
14
14
 
15
- # This endpoint returns a list of Open Source and OpenAI models that are available
16
- # for use. <br /><br /> **Note**: Model `id`'s will be in the form
17
- # `{source}/{model_name}`. For example `openai/gpt-4` or
18
- # `mistralai/Mistral-7B-Instruct-v0.1` consistent with HuggingFace naming
19
- # conventions.
15
+ # Lists every model currently available to your account on Telnyx Inference,
16
+ # including SOTA open-source LLMs hosted on Telnyx GPUs (for example
17
+ # `moonshotai/Kimi-K2.6`, `zai-org/GLM-5.1-FP8`, and `MiniMaxAI/MiniMax-M2.7`),
18
+ # embedding models, and any fine-tuned models you have created.
19
+ #
20
+ # Each entry is a `ModelMetadata` object describing the model id, owner, task,
21
+ # context length, supported languages, billing tier, pricing per 1M tokens,
22
+ # deployment regions, and whether the model supports vision or fine-tuning. Use
23
+ # this endpoint to discover model ids you can pass to
24
+ # `POST /v2/ai/openai/chat/completions`.
25
+ #
26
+ # Model ids follow the `{organization}/{model_name}` convention from Hugging Face
27
+ # (for example `moonshotai/Kimi-K2.6`). This endpoint is OpenAI-compatible:
28
+ # clients pointed at `https://api.telnyx.com/v2/ai/openai` can call
29
+ # `client.models.list()` to retrieve the same payload.
20
30
  sig do
21
31
  params(request_options: Telnyx::RequestOptions::OrHash).returns(
22
32
  Telnyx::Models::AI::OpenAIListModelsResponse
@@ -46,11 +46,15 @@ module Telnyx
46
46
  sig { returns(Telnyx::Resources::AI::Tools) }
47
47
  attr_reader :tools
48
48
 
49
- # **Deprecated**: Use `GET /v2/ai/openai/models` instead. This endpoint returns a
50
- # list of Open Source and OpenAI models that are available for use. <br /><br />
51
- # **Note**: Model `id`'s will be in the form `{source}/{model_name}`. For example
52
- # `openai/gpt-4` or `mistralai/Mistral-7B-Instruct-v0.1` consistent with
53
- # HuggingFace naming conventions.
49
+ # **Deprecated**: Use `GET /v2/ai/openai/models` instead.
50
+ #
51
+ # Returns the same `ModelsResponse` payload as the OpenAI-compatible endpoint
52
+ # open-source LLMs hosted on Telnyx (e.g. `moonshotai/Kimi-K2.6`,
53
+ # `zai-org/GLM-5.1-FP8`, `MiniMaxAI/MiniMax-M2.7`), embedding models, and
54
+ # fine-tuned models — kept around for backwards compatibility. New integrations
55
+ # should use `/v2/ai/openai/models`.
56
+ #
57
+ # Model ids follow the `{organization}/{model_name}` convention from Hugging Face.
54
58
  sig do
55
59
  params(request_options: Telnyx::RequestOptions::OrHash).returns(
56
60
  Telnyx::Models::AIRetrieveModelsResponse
@@ -1,32 +1,142 @@
1
1
  module Telnyx
2
2
  module Models
3
3
  type model_metadata =
4
- { id: String, created: Integer, owned_by: String, object: String }
4
+ {
5
+ id: String,
6
+ context_length: Integer,
7
+ created: Time,
8
+ languages: ::Array[String],
9
+ license: String,
10
+ organization: String,
11
+ owned_by: String,
12
+ parameters: Integer,
13
+ tier: Telnyx::Models::ModelMetadata::tier,
14
+ base_model: String?,
15
+ description: String?,
16
+ is_fine_tunable: bool,
17
+ is_vision_supported: bool,
18
+ max_completion_tokens: Integer?,
19
+ object: String,
20
+ parameters_str: String?,
21
+ pricing: ::Hash[Symbol, String],
22
+ recommended_for_assistants: bool,
23
+ regions: ::Array[String],
24
+ task: String
25
+ }
5
26
 
6
27
  class ModelMetadata < Telnyx::Internal::Type::BaseModel
7
28
  attr_accessor id: String
8
29
 
9
- attr_accessor created: Integer
30
+ attr_accessor context_length: Integer
31
+
32
+ attr_accessor created: Time
33
+
34
+ attr_accessor languages: ::Array[String]
35
+
36
+ attr_accessor license: String
37
+
38
+ attr_accessor organization: String
10
39
 
11
40
  attr_accessor owned_by: String
12
41
 
42
+ attr_accessor parameters: Integer
43
+
44
+ attr_accessor tier: Telnyx::Models::ModelMetadata::tier
45
+
46
+ attr_accessor base_model: String?
47
+
48
+ attr_accessor description: String?
49
+
50
+ attr_reader is_fine_tunable: bool?
51
+
52
+ def is_fine_tunable=: (bool) -> bool
53
+
54
+ attr_reader is_vision_supported: bool?
55
+
56
+ def is_vision_supported=: (bool) -> bool
57
+
58
+ attr_accessor max_completion_tokens: Integer?
59
+
13
60
  attr_reader object: String?
14
61
 
15
62
  def object=: (String) -> String
16
63
 
64
+ attr_accessor parameters_str: String?
65
+
66
+ attr_reader pricing: ::Hash[Symbol, String]?
67
+
68
+ def pricing=: (::Hash[Symbol, String]) -> ::Hash[Symbol, String]
69
+
70
+ attr_reader recommended_for_assistants: bool?
71
+
72
+ def recommended_for_assistants=: (bool) -> bool
73
+
74
+ attr_reader regions: ::Array[String]?
75
+
76
+ def regions=: (::Array[String]) -> ::Array[String]
77
+
78
+ attr_reader task: String?
79
+
80
+ def task=: (String) -> String
81
+
17
82
  def initialize: (
18
83
  id: String,
19
- created: Integer,
84
+ context_length: Integer,
85
+ created: Time,
86
+ languages: ::Array[String],
87
+ license: String,
88
+ organization: String,
20
89
  owned_by: String,
21
- ?object: String
90
+ parameters: Integer,
91
+ tier: Telnyx::Models::ModelMetadata::tier,
92
+ ?base_model: String?,
93
+ ?description: String?,
94
+ ?is_fine_tunable: bool,
95
+ ?is_vision_supported: bool,
96
+ ?max_completion_tokens: Integer?,
97
+ ?object: String,
98
+ ?parameters_str: String?,
99
+ ?pricing: ::Hash[Symbol, String],
100
+ ?recommended_for_assistants: bool,
101
+ ?regions: ::Array[String],
102
+ ?task: String
22
103
  ) -> void
23
104
 
24
105
  def to_hash: -> {
25
106
  id: String,
26
- created: Integer,
107
+ context_length: Integer,
108
+ created: Time,
109
+ languages: ::Array[String],
110
+ license: String,
111
+ organization: String,
27
112
  owned_by: String,
28
- object: String
113
+ parameters: Integer,
114
+ tier: Telnyx::Models::ModelMetadata::tier,
115
+ base_model: String?,
116
+ description: String?,
117
+ is_fine_tunable: bool,
118
+ is_vision_supported: bool,
119
+ max_completion_tokens: Integer?,
120
+ object: String,
121
+ parameters_str: String?,
122
+ pricing: ::Hash[Symbol, String],
123
+ recommended_for_assistants: bool,
124
+ regions: ::Array[String],
125
+ task: String
29
126
  }
127
+
128
+ type tier = :small | :medium | :large | :unlisted
129
+
130
+ module Tier
131
+ extend Telnyx::Internal::Type::Enum
132
+
133
+ SMALL: :small
134
+ MEDIUM: :medium
135
+ LARGE: :large
136
+ UNLISTED: :unlisted
137
+
138
+ def self?.values: -> ::Array[Telnyx::Models::ModelMetadata::tier]
139
+ end
30
140
  end
31
141
  end
32
142
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: telnyx
3
3
  version: !ruby/object:Gem::Version
4
- version: 5.105.0
4
+ version: 5.106.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Telnyx