weave-python 0.27.0__py3-none-any.whl → 0.28.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- weave/weaveapi/llmx/v1/architecture_pb2.py +74 -0
- weave/weaveapi/llmx/v1/architecture_pb2.pyi +1323 -0
- weave/weaveapi/llmx/v1/capabilities_pb2.py +88 -0
- weave/weaveapi/llmx/v1/capabilities_pb2.pyi +1613 -0
- weave/weaveapi/llmx/v1/model_pb2.py +54 -0
- weave/weaveapi/{modex → llmx}/v1/model_pb2.pyi +294 -189
- weave/weaveapi/llmx/v1/model_pb2_grpc.py +2 -0
- weave/weaveapi/llmx/v1/model_pb2_grpc.pyi +20 -0
- weave/weaveapi/llmx/v1/pricing_pb2.py +54 -0
- weave/weaveapi/llmx/v1/pricing_pb2.pyi +597 -0
- weave/weaveapi/llmx/v1/pricing_pb2_grpc.py +2 -0
- weave/weaveapi/llmx/v1/pricing_pb2_grpc.pyi +20 -0
- weave/weaveapi/llmx/v1/provider_pb2.py +38 -0
- weave/weaveapi/{modex → llmx}/v1/provider_pb2.pyi +31 -19
- weave/weaveapi/llmx/v1/provider_pb2_grpc.py +2 -0
- weave/weaveapi/llmx/v1/provider_pb2_grpc.pyi +20 -0
- weave/weaveapi/llmx/v1/service_pb2.py +180 -0
- weave/weaveapi/{modex → llmx}/v1/service_pb2.pyi +174 -44
- weave/weaveapi/{modex → llmx}/v1/service_pb2_grpc.py +103 -105
- weave/weaveapi/llmx/v1/service_pb2_grpc.pyi +266 -0
- {weave_python-0.27.0.dist-info → weave_python-0.28.1.dist-info}/METADATA +1 -1
- {weave_python-0.27.0.dist-info → weave_python-0.28.1.dist-info}/RECORD +27 -17
- weave/weaveapi/modex/v1/model_pb2.py +0 -58
- weave/weaveapi/modex/v1/provider_pb2.py +0 -38
- weave/weaveapi/modex/v1/service_pb2.py +0 -180
- weave/weaveapi/modex/v1/service_pb2_grpc.pyi +0 -268
- weave/weavesql/weavedb/models.py +0 -124
- weave/weavesql/weavedb/queries.py +0 -306
- /weave/weaveapi/{modex/v1/model_pb2_grpc.py → llmx/v1/architecture_pb2_grpc.py} +0 -0
- /weave/weaveapi/{modex/v1/model_pb2_grpc.pyi → llmx/v1/architecture_pb2_grpc.pyi} +0 -0
- /weave/weaveapi/{modex/v1/provider_pb2_grpc.py → llmx/v1/capabilities_pb2_grpc.py} +0 -0
- /weave/weaveapi/{modex/v1/provider_pb2_grpc.pyi → llmx/v1/capabilities_pb2_grpc.pyi} +0 -0
- {weave_python-0.27.0.dist-info → weave_python-0.28.1.dist-info}/WHEEL +0 -0
|
@@ -10,11 +10,18 @@ import google.protobuf.internal.containers
|
|
|
10
10
|
import google.protobuf.message
|
|
11
11
|
import google.protobuf.timestamp_pb2
|
|
12
12
|
import typing
|
|
13
|
+
import weaveapi.llmx.v1.architecture_pb2
|
|
14
|
+
import weaveapi.llmx.v1.capabilities_pb2
|
|
15
|
+
import weaveapi.llmx.v1.pricing_pb2
|
|
13
16
|
|
|
14
17
|
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
|
|
15
18
|
|
|
16
19
|
@typing.final
|
|
17
20
|
class Model(google.protobuf.message.Message):
|
|
21
|
+
"""Model represents a complete AI model with all its metadata, capabilities, and pricing.
|
|
22
|
+
This is the primary entity for model discovery and comparison.
|
|
23
|
+
"""
|
|
24
|
+
|
|
18
25
|
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
19
26
|
|
|
20
27
|
ID_FIELD_NUMBER: builtins.int
|
|
@@ -38,81 +45,208 @@ class Model(google.protobuf.message.Message):
|
|
|
38
45
|
CONFIGURATION_FIELD_NUMBER: builtins.int
|
|
39
46
|
API_DETAILS_FIELD_NUMBER: builtins.int
|
|
40
47
|
AVAILABILITY_FIELD_NUMBER: builtins.int
|
|
48
|
+
ARCHITECTURE_FIELD_NUMBER: builtins.int
|
|
49
|
+
TRAINING_FIELD_NUMBER: builtins.int
|
|
50
|
+
SAFETY_FIELD_NUMBER: builtins.int
|
|
51
|
+
LICENSING_FIELD_NUMBER: builtins.int
|
|
52
|
+
TECHNICAL_SPECS_FIELD_NUMBER: builtins.int
|
|
41
53
|
LAST_SCRAPED_AT_FIELD_NUMBER: builtins.int
|
|
42
54
|
DATA_SOURCES_FIELD_NUMBER: builtins.int
|
|
43
55
|
IS_ACTIVE_FIELD_NUMBER: builtins.int
|
|
44
56
|
IS_DEPRECATED_FIELD_NUMBER: builtins.int
|
|
45
57
|
REPLACEMENT_MODEL_ID_FIELD_NUMBER: builtins.int
|
|
46
|
-
CREATED_AT_FIELD_NUMBER: builtins.int
|
|
47
|
-
UPDATED_AT_FIELD_NUMBER: builtins.int
|
|
48
58
|
id: builtins.str
|
|
59
|
+
"""Unique internal identifier for this model record in our database.
|
|
60
|
+
Example: "mdl_01234567-89ab-cdef-0123-456789abcdef"
|
|
61
|
+
Generated by our system, not provider-specific
|
|
62
|
+
"""
|
|
49
63
|
provider_id: builtins.str
|
|
64
|
+
"""Provider's unique identifier in our system.
|
|
65
|
+
Example: "pvd_openai_2024", "pvd_anthropic_2024"
|
|
66
|
+
Links to Provider entity
|
|
67
|
+
"""
|
|
50
68
|
provider_slug: builtins.str
|
|
69
|
+
"""URL-friendly provider identifier for routing.
|
|
70
|
+
Examples: "openai", "anthropic", "google", "meta"
|
|
71
|
+
Used in API paths like /models/openai/gpt-4
|
|
72
|
+
"""
|
|
51
73
|
provider_name: builtins.str
|
|
74
|
+
"""Human-readable provider name for display.
|
|
75
|
+
Examples: "OpenAI", "Anthropic", "Google AI", "Meta AI"
|
|
76
|
+
"""
|
|
52
77
|
model_id: builtins.str
|
|
78
|
+
"""Provider's specific model identifier as they define it.
|
|
79
|
+
Examples: "gpt-4-turbo-2024-04-09", "claude-3-opus-20240229"
|
|
80
|
+
This is what you use in their API calls
|
|
81
|
+
"""
|
|
53
82
|
slug: builtins.str
|
|
54
|
-
"""provider/
|
|
83
|
+
"""Combined provider/model identifier for unique referencing.
|
|
84
|
+
Format: "{provider_slug}/{model_id}"
|
|
85
|
+
Examples: "openai/gpt-4", "anthropic/claude-3-opus"
|
|
86
|
+
"""
|
|
55
87
|
name: builtins.str
|
|
88
|
+
"""Model's full technical name from the provider.
|
|
89
|
+
Examples: "gpt-4-turbo-2024-04-09", "claude-3-opus-20240229"
|
|
90
|
+
May include version dates or technical details
|
|
91
|
+
"""
|
|
56
92
|
display_name: builtins.str
|
|
93
|
+
"""User-friendly display name for UI presentation.
|
|
94
|
+
Examples: "GPT-4 Turbo", "Claude 3 Opus", "Gemini Pro"
|
|
95
|
+
Simplified version of technical name
|
|
96
|
+
"""
|
|
57
97
|
description: builtins.str
|
|
98
|
+
"""Human-readable description of the model's purpose and capabilities.
|
|
99
|
+
Example: "Most capable GPT-4 model, optimized for complex tasks requiring
|
|
100
|
+
advanced reasoning, coding, and creative writing. Supports vision, function
|
|
101
|
+
calling, and JSON mode."
|
|
102
|
+
"""
|
|
58
103
|
version: builtins.str
|
|
104
|
+
"""Model version string from provider.
|
|
105
|
+
Examples: "2024-04-09", "v1.0", "preview-0125"
|
|
106
|
+
May be date-based or semantic versioning
|
|
107
|
+
"""
|
|
59
108
|
is_active: builtins.bool
|
|
109
|
+
"""Whether the model is currently available for use.
|
|
110
|
+
false if model is down, in maintenance, or discontinued
|
|
111
|
+
"""
|
|
60
112
|
is_deprecated: builtins.bool
|
|
113
|
+
"""Whether the model has been officially deprecated.
|
|
114
|
+
true once deprecation_date has passed or provider announces EOL
|
|
115
|
+
"""
|
|
61
116
|
replacement_model_id: builtins.str
|
|
117
|
+
"""If deprecated, the recommended replacement model's ID.
|
|
118
|
+
Example: "mdl_gpt4_turbo_2024" replacing "mdl_gpt4_2023"
|
|
119
|
+
Helps with migration paths
|
|
120
|
+
"""
|
|
62
121
|
@property
|
|
63
122
|
def release_date(self) -> google.protobuf.timestamp_pb2.Timestamp:
|
|
64
|
-
"""
|
|
123
|
+
"""=== Temporal Information ===
|
|
124
|
+
|
|
125
|
+
When the model was publicly released by the provider.
|
|
126
|
+
Example: 2024-04-09 for GPT-4 Turbo April release
|
|
127
|
+
"""
|
|
65
128
|
|
|
66
129
|
@property
|
|
67
|
-
def training_data_cutoff(self) -> google.protobuf.timestamp_pb2.Timestamp:
|
|
130
|
+
def training_data_cutoff(self) -> google.protobuf.timestamp_pb2.Timestamp:
|
|
131
|
+
"""Last date of data used in training (knowledge cutoff).
|
|
132
|
+
Example: 2023-12-01 means model knows events up to this date
|
|
133
|
+
Important for factual queries and current events
|
|
134
|
+
"""
|
|
135
|
+
|
|
68
136
|
@property
|
|
69
|
-
def deprecation_date(self) -> google.protobuf.timestamp_pb2.Timestamp:
|
|
137
|
+
def deprecation_date(self) -> google.protobuf.timestamp_pb2.Timestamp:
|
|
138
|
+
"""When the model will be/was deprecated (if scheduled).
|
|
139
|
+
Example: 2025-01-15 for planned sunset
|
|
140
|
+
Null if no deprecation planned
|
|
141
|
+
"""
|
|
142
|
+
|
|
70
143
|
@property
|
|
71
|
-
def capabilities(
|
|
72
|
-
|
|
144
|
+
def capabilities(
|
|
145
|
+
self,
|
|
146
|
+
) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[
|
|
147
|
+
weaveapi.llmx.v1.capabilities_pb2.Capability
|
|
148
|
+
]:
|
|
149
|
+
"""=== Core Properties ===
|
|
150
|
+
|
|
151
|
+
List of specific capabilities this model supports.
|
|
152
|
+
Examples: text_generation, function_calling, vision, code_interpreter
|
|
153
|
+
See capabilities.proto for full enumeration
|
|
154
|
+
"""
|
|
73
155
|
|
|
74
156
|
@property
|
|
75
157
|
def classification(self) -> global___ModelClassification:
|
|
76
|
-
"""
|
|
158
|
+
"""Model type, architecture, and licensing classification.
|
|
159
|
+
Categorizes the model for filtering and comparison
|
|
160
|
+
"""
|
|
77
161
|
|
|
78
162
|
@property
|
|
79
163
|
def performance(self) -> global___ModelPerformance:
|
|
80
|
-
"""
|
|
164
|
+
"""Benchmark scores and performance metrics.
|
|
165
|
+
Standardized scores for comparing model capabilities
|
|
166
|
+
"""
|
|
81
167
|
|
|
82
168
|
@property
|
|
83
169
|
def tokens(self) -> global___TokenInfo:
|
|
84
|
-
"""
|
|
170
|
+
"""Token limits, processing speed, and tokenizer information.
|
|
171
|
+
Critical for understanding model constraints
|
|
172
|
+
"""
|
|
85
173
|
|
|
86
174
|
@property
|
|
87
|
-
def pricing(self) ->
|
|
88
|
-
"""
|
|
175
|
+
def pricing(self) -> weaveapi.llmx.v1.pricing_pb2.Pricing:
|
|
176
|
+
"""Complete pricing structure for all operations.
|
|
177
|
+
Includes standard, batch, cached, and tool pricing
|
|
178
|
+
"""
|
|
89
179
|
|
|
90
180
|
@property
|
|
91
181
|
def configuration(self) -> global___Configuration:
|
|
92
|
-
"""
|
|
182
|
+
"""Supported parameter ranges for generation.
|
|
183
|
+
Temperature, top_p, and other sampling parameters
|
|
184
|
+
"""
|
|
93
185
|
|
|
94
186
|
@property
|
|
95
187
|
def api_details(self) -> global___APIDetails:
|
|
96
|
-
"""
|
|
188
|
+
"""=== Integration Details ===
|
|
189
|
+
|
|
190
|
+
API endpoint information and rate limits.
|
|
191
|
+
Everything needed to call the model programmatically
|
|
192
|
+
"""
|
|
97
193
|
|
|
98
194
|
@property
|
|
99
195
|
def availability(self) -> global___Availability:
|
|
100
|
-
"""
|
|
196
|
+
"""Available regions and platforms.
|
|
197
|
+
Where and how the model can be accessed
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
@property
|
|
201
|
+
def architecture(self) -> weaveapi.llmx.v1.architecture_pb2.Architecture:
|
|
202
|
+
"""=== Technical Details ===
|
|
203
|
+
|
|
204
|
+
Technical architecture information.
|
|
205
|
+
Transformer type, layer count, attention mechanism, etc.
|
|
206
|
+
"""
|
|
207
|
+
|
|
208
|
+
@property
|
|
209
|
+
def training(self) -> weaveapi.llmx.v1.architecture_pb2.Training:
|
|
210
|
+
"""Training dataset and methodology information.
|
|
211
|
+
How the model was trained and on what data
|
|
212
|
+
"""
|
|
213
|
+
|
|
214
|
+
@property
|
|
215
|
+
def safety(self) -> weaveapi.llmx.v1.architecture_pb2.Safety:
|
|
216
|
+
"""Built-in safety features and content filtering.
|
|
217
|
+
Moderation capabilities and safety guardrails
|
|
218
|
+
"""
|
|
219
|
+
|
|
220
|
+
@property
|
|
221
|
+
def licensing(self) -> weaveapi.llmx.v1.architecture_pb2.Licensing:
|
|
222
|
+
"""License type and usage restrictions.
|
|
223
|
+
Legal terms for using the model
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
@property
|
|
227
|
+
def technical_specs(self) -> weaveapi.llmx.v1.architecture_pb2.TechnicalSpecs:
|
|
228
|
+
"""Hardware requirements and technical details.
|
|
229
|
+
Memory, compute requirements for self-hosting
|
|
230
|
+
"""
|
|
101
231
|
|
|
102
232
|
@property
|
|
103
233
|
def last_scraped_at(self) -> google.protobuf.timestamp_pb2.Timestamp:
|
|
104
|
-
"""Metadata
|
|
234
|
+
"""=== Metadata ===
|
|
235
|
+
|
|
236
|
+
When this model information was last updated in our system.
|
|
237
|
+
Example: 2024-08-25T10:30:00Z
|
|
238
|
+
For tracking data freshness
|
|
239
|
+
"""
|
|
105
240
|
|
|
106
241
|
@property
|
|
107
242
|
def data_sources(
|
|
108
243
|
self,
|
|
109
|
-
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
def updated_at(self) -> google.protobuf.timestamp_pb2.Timestamp: ...
|
|
244
|
+
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
|
|
245
|
+
"""Sources where this model information was collected from.
|
|
246
|
+
Examples: ["https://openai.com/pricing", "https://platform.openai.com/docs"]
|
|
247
|
+
For data provenance and verification
|
|
248
|
+
"""
|
|
249
|
+
|
|
116
250
|
def __init__(
|
|
117
251
|
self,
|
|
118
252
|
*,
|
|
@@ -129,53 +263,63 @@ class Model(google.protobuf.message.Message):
|
|
|
129
263
|
release_date: google.protobuf.timestamp_pb2.Timestamp | None = ...,
|
|
130
264
|
training_data_cutoff: google.protobuf.timestamp_pb2.Timestamp | None = ...,
|
|
131
265
|
deprecation_date: google.protobuf.timestamp_pb2.Timestamp | None = ...,
|
|
132
|
-
capabilities:
|
|
266
|
+
capabilities: collections.abc.Iterable[
|
|
267
|
+
weaveapi.llmx.v1.capabilities_pb2.Capability
|
|
268
|
+
]
|
|
269
|
+
| None = ...,
|
|
133
270
|
classification: global___ModelClassification | None = ...,
|
|
134
271
|
performance: global___ModelPerformance | None = ...,
|
|
135
272
|
tokens: global___TokenInfo | None = ...,
|
|
136
|
-
pricing:
|
|
273
|
+
pricing: weaveapi.llmx.v1.pricing_pb2.Pricing | None = ...,
|
|
137
274
|
configuration: global___Configuration | None = ...,
|
|
138
275
|
api_details: global___APIDetails | None = ...,
|
|
139
276
|
availability: global___Availability | None = ...,
|
|
277
|
+
architecture: weaveapi.llmx.v1.architecture_pb2.Architecture | None = ...,
|
|
278
|
+
training: weaveapi.llmx.v1.architecture_pb2.Training | None = ...,
|
|
279
|
+
safety: weaveapi.llmx.v1.architecture_pb2.Safety | None = ...,
|
|
280
|
+
licensing: weaveapi.llmx.v1.architecture_pb2.Licensing | None = ...,
|
|
281
|
+
technical_specs: weaveapi.llmx.v1.architecture_pb2.TechnicalSpecs | None = ...,
|
|
140
282
|
last_scraped_at: google.protobuf.timestamp_pb2.Timestamp | None = ...,
|
|
141
283
|
data_sources: collections.abc.Iterable[builtins.str] | None = ...,
|
|
142
284
|
is_active: builtins.bool = ...,
|
|
143
285
|
is_deprecated: builtins.bool = ...,
|
|
144
286
|
replacement_model_id: builtins.str = ...,
|
|
145
|
-
created_at: google.protobuf.timestamp_pb2.Timestamp | None = ...,
|
|
146
|
-
updated_at: google.protobuf.timestamp_pb2.Timestamp | None = ...,
|
|
147
287
|
) -> None: ...
|
|
148
288
|
def HasField(
|
|
149
289
|
self,
|
|
150
290
|
field_name: typing.Literal[
|
|
151
291
|
"api_details",
|
|
152
292
|
b"api_details",
|
|
293
|
+
"architecture",
|
|
294
|
+
b"architecture",
|
|
153
295
|
"availability",
|
|
154
296
|
b"availability",
|
|
155
|
-
"capabilities",
|
|
156
|
-
b"capabilities",
|
|
157
297
|
"classification",
|
|
158
298
|
b"classification",
|
|
159
299
|
"configuration",
|
|
160
300
|
b"configuration",
|
|
161
|
-
"created_at",
|
|
162
|
-
b"created_at",
|
|
163
301
|
"deprecation_date",
|
|
164
302
|
b"deprecation_date",
|
|
165
303
|
"last_scraped_at",
|
|
166
304
|
b"last_scraped_at",
|
|
305
|
+
"licensing",
|
|
306
|
+
b"licensing",
|
|
167
307
|
"performance",
|
|
168
308
|
b"performance",
|
|
169
309
|
"pricing",
|
|
170
310
|
b"pricing",
|
|
171
311
|
"release_date",
|
|
172
312
|
b"release_date",
|
|
313
|
+
"safety",
|
|
314
|
+
b"safety",
|
|
315
|
+
"technical_specs",
|
|
316
|
+
b"technical_specs",
|
|
173
317
|
"tokens",
|
|
174
318
|
b"tokens",
|
|
319
|
+
"training",
|
|
320
|
+
b"training",
|
|
175
321
|
"training_data_cutoff",
|
|
176
322
|
b"training_data_cutoff",
|
|
177
|
-
"updated_at",
|
|
178
|
-
b"updated_at",
|
|
179
323
|
],
|
|
180
324
|
) -> builtins.bool: ...
|
|
181
325
|
def ClearField(
|
|
@@ -183,6 +327,8 @@ class Model(google.protobuf.message.Message):
|
|
|
183
327
|
field_name: typing.Literal[
|
|
184
328
|
"api_details",
|
|
185
329
|
b"api_details",
|
|
330
|
+
"architecture",
|
|
331
|
+
b"architecture",
|
|
186
332
|
"availability",
|
|
187
333
|
b"availability",
|
|
188
334
|
"capabilities",
|
|
@@ -191,8 +337,6 @@ class Model(google.protobuf.message.Message):
|
|
|
191
337
|
b"classification",
|
|
192
338
|
"configuration",
|
|
193
339
|
b"configuration",
|
|
194
|
-
"created_at",
|
|
195
|
-
b"created_at",
|
|
196
340
|
"data_sources",
|
|
197
341
|
b"data_sources",
|
|
198
342
|
"deprecation_date",
|
|
@@ -209,6 +353,8 @@ class Model(google.protobuf.message.Message):
|
|
|
209
353
|
b"is_deprecated",
|
|
210
354
|
"last_scraped_at",
|
|
211
355
|
b"last_scraped_at",
|
|
356
|
+
"licensing",
|
|
357
|
+
b"licensing",
|
|
212
358
|
"model_id",
|
|
213
359
|
b"model_id",
|
|
214
360
|
"name",
|
|
@@ -227,14 +373,18 @@ class Model(google.protobuf.message.Message):
|
|
|
227
373
|
b"release_date",
|
|
228
374
|
"replacement_model_id",
|
|
229
375
|
b"replacement_model_id",
|
|
376
|
+
"safety",
|
|
377
|
+
b"safety",
|
|
230
378
|
"slug",
|
|
231
379
|
b"slug",
|
|
380
|
+
"technical_specs",
|
|
381
|
+
b"technical_specs",
|
|
232
382
|
"tokens",
|
|
233
383
|
b"tokens",
|
|
384
|
+
"training",
|
|
385
|
+
b"training",
|
|
234
386
|
"training_data_cutoff",
|
|
235
387
|
b"training_data_cutoff",
|
|
236
|
-
"updated_at",
|
|
237
|
-
b"updated_at",
|
|
238
388
|
"version",
|
|
239
389
|
b"version",
|
|
240
390
|
],
|
|
@@ -242,99 +392,10 @@ class Model(google.protobuf.message.Message):
|
|
|
242
392
|
|
|
243
393
|
global___Model = Model
|
|
244
394
|
|
|
245
|
-
@typing.final
|
|
246
|
-
class ModelCapabilities(google.protobuf.message.Message):
|
|
247
|
-
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
248
|
-
|
|
249
|
-
SUPPORTS_CHAT_FIELD_NUMBER: builtins.int
|
|
250
|
-
SUPPORTS_COMPLETION_FIELD_NUMBER: builtins.int
|
|
251
|
-
SUPPORTS_EMBEDDINGS_FIELD_NUMBER: builtins.int
|
|
252
|
-
SUPPORTS_VISION_FIELD_NUMBER: builtins.int
|
|
253
|
-
SUPPORTS_AUDIO_FIELD_NUMBER: builtins.int
|
|
254
|
-
SUPPORTS_VIDEO_FIELD_NUMBER: builtins.int
|
|
255
|
-
SUPPORTS_TOOL_CALLS_FIELD_NUMBER: builtins.int
|
|
256
|
-
SUPPORTS_FUNCTION_CALLS_FIELD_NUMBER: builtins.int
|
|
257
|
-
SUPPORTS_JSON_MODE_FIELD_NUMBER: builtins.int
|
|
258
|
-
SUPPORTS_STREAMING_FIELD_NUMBER: builtins.int
|
|
259
|
-
SUPPORTS_REASONING_FIELD_NUMBER: builtins.int
|
|
260
|
-
INPUT_MODALITIES_FIELD_NUMBER: builtins.int
|
|
261
|
-
OUTPUT_MODALITIES_FIELD_NUMBER: builtins.int
|
|
262
|
-
supports_chat: builtins.bool
|
|
263
|
-
supports_completion: builtins.bool
|
|
264
|
-
supports_embeddings: builtins.bool
|
|
265
|
-
supports_vision: builtins.bool
|
|
266
|
-
supports_audio: builtins.bool
|
|
267
|
-
supports_video: builtins.bool
|
|
268
|
-
supports_tool_calls: builtins.bool
|
|
269
|
-
supports_function_calls: builtins.bool
|
|
270
|
-
supports_json_mode: builtins.bool
|
|
271
|
-
supports_streaming: builtins.bool
|
|
272
|
-
supports_reasoning: builtins.bool
|
|
273
|
-
@property
|
|
274
|
-
def input_modalities(
|
|
275
|
-
self,
|
|
276
|
-
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
|
|
277
|
-
builtins.str
|
|
278
|
-
]: ...
|
|
279
|
-
@property
|
|
280
|
-
def output_modalities(
|
|
281
|
-
self,
|
|
282
|
-
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
|
|
283
|
-
builtins.str
|
|
284
|
-
]: ...
|
|
285
|
-
def __init__(
|
|
286
|
-
self,
|
|
287
|
-
*,
|
|
288
|
-
supports_chat: builtins.bool = ...,
|
|
289
|
-
supports_completion: builtins.bool = ...,
|
|
290
|
-
supports_embeddings: builtins.bool = ...,
|
|
291
|
-
supports_vision: builtins.bool = ...,
|
|
292
|
-
supports_audio: builtins.bool = ...,
|
|
293
|
-
supports_video: builtins.bool = ...,
|
|
294
|
-
supports_tool_calls: builtins.bool = ...,
|
|
295
|
-
supports_function_calls: builtins.bool = ...,
|
|
296
|
-
supports_json_mode: builtins.bool = ...,
|
|
297
|
-
supports_streaming: builtins.bool = ...,
|
|
298
|
-
supports_reasoning: builtins.bool = ...,
|
|
299
|
-
input_modalities: collections.abc.Iterable[builtins.str] | None = ...,
|
|
300
|
-
output_modalities: collections.abc.Iterable[builtins.str] | None = ...,
|
|
301
|
-
) -> None: ...
|
|
302
|
-
def ClearField(
|
|
303
|
-
self,
|
|
304
|
-
field_name: typing.Literal[
|
|
305
|
-
"input_modalities",
|
|
306
|
-
b"input_modalities",
|
|
307
|
-
"output_modalities",
|
|
308
|
-
b"output_modalities",
|
|
309
|
-
"supports_audio",
|
|
310
|
-
b"supports_audio",
|
|
311
|
-
"supports_chat",
|
|
312
|
-
b"supports_chat",
|
|
313
|
-
"supports_completion",
|
|
314
|
-
b"supports_completion",
|
|
315
|
-
"supports_embeddings",
|
|
316
|
-
b"supports_embeddings",
|
|
317
|
-
"supports_function_calls",
|
|
318
|
-
b"supports_function_calls",
|
|
319
|
-
"supports_json_mode",
|
|
320
|
-
b"supports_json_mode",
|
|
321
|
-
"supports_reasoning",
|
|
322
|
-
b"supports_reasoning",
|
|
323
|
-
"supports_streaming",
|
|
324
|
-
b"supports_streaming",
|
|
325
|
-
"supports_tool_calls",
|
|
326
|
-
b"supports_tool_calls",
|
|
327
|
-
"supports_video",
|
|
328
|
-
b"supports_video",
|
|
329
|
-
"supports_vision",
|
|
330
|
-
b"supports_vision",
|
|
331
|
-
],
|
|
332
|
-
) -> None: ...
|
|
333
|
-
|
|
334
|
-
global___ModelCapabilities = ModelCapabilities
|
|
335
|
-
|
|
336
395
|
@typing.final
|
|
337
396
|
class ModelClassification(google.protobuf.message.Message):
|
|
397
|
+
"""ModelClassification categorizes models for filtering and comparison."""
|
|
398
|
+
|
|
338
399
|
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
339
400
|
|
|
340
401
|
MODEL_TYPE_FIELD_NUMBER: builtins.int
|
|
@@ -343,12 +404,26 @@ class ModelClassification(google.protobuf.message.Message):
|
|
|
343
404
|
IS_OPEN_SOURCE_FIELD_NUMBER: builtins.int
|
|
344
405
|
LICENSE_TYPE_FIELD_NUMBER: builtins.int
|
|
345
406
|
model_type: builtins.str
|
|
346
|
-
"""
|
|
407
|
+
"""Primary model type defining its training approach.
|
|
408
|
+
Examples: "foundation", "fine-tuned", "instruct", "chat", "reasoning"
|
|
409
|
+
"foundation" = base model, "instruct" = instruction-following
|
|
410
|
+
"""
|
|
347
411
|
architecture: builtins.str
|
|
348
|
-
"""
|
|
412
|
+
"""Underlying model architecture family.
|
|
413
|
+
Examples: "GPT", "LLaMA", "BERT", "T5", "Transformer", "Mamba"
|
|
414
|
+
"""
|
|
349
415
|
parameter_count: builtins.int
|
|
416
|
+
"""Approximate number of parameters in billions.
|
|
417
|
+
Examples: 7 (7B), 70 (70B), 175 (175B for GPT-3)
|
|
418
|
+
"""
|
|
350
419
|
is_open_source: builtins.bool
|
|
420
|
+
"""Whether model weights are publicly available.
|
|
421
|
+
true for LLaMA, Mistral; false for GPT-4, Claude
|
|
422
|
+
"""
|
|
351
423
|
license_type: builtins.str
|
|
424
|
+
"""Software license if open source.
|
|
425
|
+
Examples: "MIT", "Apache-2.0", "Custom", "Proprietary"
|
|
426
|
+
"""
|
|
352
427
|
def __init__(
|
|
353
428
|
self,
|
|
354
429
|
*,
|
|
@@ -378,6 +453,8 @@ global___ModelClassification = ModelClassification
|
|
|
378
453
|
|
|
379
454
|
@typing.final
|
|
380
455
|
class ModelPerformance(google.protobuf.message.Message):
|
|
456
|
+
"""ModelPerformance captures standardized benchmark scores."""
|
|
457
|
+
|
|
381
458
|
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
382
459
|
|
|
383
460
|
@typing.final
|
|
@@ -405,16 +482,39 @@ class ModelPerformance(google.protobuf.message.Message):
|
|
|
405
482
|
MATH_SCORE_FIELD_NUMBER: builtins.int
|
|
406
483
|
BENCHMARK_SCORES_FIELD_NUMBER: builtins.int
|
|
407
484
|
reasoning_score: builtins.float
|
|
408
|
-
"""0-10
|
|
485
|
+
"""General reasoning ability score (0-10 scale).
|
|
486
|
+
Based on benchmarks like ARC, HellaSwag, MMLU
|
|
487
|
+
Example: 8.5 for strong reasoning models
|
|
488
|
+
"""
|
|
409
489
|
coding_score: builtins.float
|
|
490
|
+
"""Programming and code generation score (0-10).
|
|
491
|
+
Based on HumanEval, MBPP benchmarks
|
|
492
|
+
Example: 9.2 for specialized coding models
|
|
493
|
+
"""
|
|
410
494
|
creative_score: builtins.float
|
|
495
|
+
"""Creative writing and generation score (0-10).
|
|
496
|
+
Based on human evaluations and creative benchmarks
|
|
497
|
+
Example: 7.8 for models good at storytelling
|
|
498
|
+
"""
|
|
411
499
|
factual_score: builtins.float
|
|
500
|
+
"""Factual accuracy and knowledge score (0-10).
|
|
501
|
+
Based on TruthfulQA, factual benchmarks
|
|
502
|
+
Example: 8.0 for models with good factual recall
|
|
503
|
+
"""
|
|
412
504
|
math_score: builtins.float
|
|
505
|
+
"""Mathematical reasoning score (0-10).
|
|
506
|
+
Based on GSM8K, MATH benchmarks
|
|
507
|
+
Example: 9.5 for models excelling at math
|
|
508
|
+
"""
|
|
413
509
|
@property
|
|
414
510
|
def benchmark_scores(
|
|
415
511
|
self,
|
|
416
512
|
) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.float]:
|
|
417
|
-
"""
|
|
513
|
+
"""Raw benchmark scores by name.
|
|
514
|
+
Keys: "mmlu", "humaneval", "gsm8k", "truthfulqa", "arc", "hellaswag"
|
|
515
|
+
Values: Actual benchmark scores (usually 0-100)
|
|
516
|
+
Example: {"mmlu": 86.4, "humaneval": 92.0, "gsm8k": 95.0}
|
|
517
|
+
"""
|
|
418
518
|
|
|
419
519
|
def __init__(
|
|
420
520
|
self,
|
|
@@ -449,6 +549,8 @@ global___ModelPerformance = ModelPerformance
|
|
|
449
549
|
|
|
450
550
|
@typing.final
|
|
451
551
|
class TokenInfo(google.protobuf.message.Message):
|
|
552
|
+
"""TokenInfo describes token limits and processing capabilities."""
|
|
553
|
+
|
|
452
554
|
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
453
555
|
|
|
454
556
|
CONTEXT_WINDOW_FIELD_NUMBER: builtins.int
|
|
@@ -456,9 +558,24 @@ class TokenInfo(google.protobuf.message.Message):
|
|
|
456
558
|
TOKENIZER_FIELD_NUMBER: builtins.int
|
|
457
559
|
TOKENS_PER_SECOND_FIELD_NUMBER: builtins.int
|
|
458
560
|
context_window: builtins.int
|
|
561
|
+
"""Maximum tokens the model can process in a single request.
|
|
562
|
+
Examples: 4096, 8192, 32768, 128000, 200000
|
|
563
|
+
Includes both input and output tokens
|
|
564
|
+
"""
|
|
459
565
|
max_output_tokens: builtins.int
|
|
566
|
+
"""Maximum tokens that can be generated in response.
|
|
567
|
+
Examples: 4096 for most models, 16384 for some
|
|
568
|
+
May be less than context_window
|
|
569
|
+
"""
|
|
460
570
|
tokenizer: builtins.str
|
|
571
|
+
"""Tokenizer algorithm used.
|
|
572
|
+
Examples: "cl100k_base" (GPT-4), "claude", "sentencepiece"
|
|
573
|
+
"""
|
|
461
574
|
tokens_per_second: builtins.int
|
|
575
|
+
"""Average generation speed in tokens per second.
|
|
576
|
+
Example: 50 for typical API, 100+ for optimized inference
|
|
577
|
+
May vary based on load and tier
|
|
578
|
+
"""
|
|
462
579
|
def __init__(
|
|
463
580
|
self,
|
|
464
581
|
*,
|
|
@@ -483,56 +600,10 @@ class TokenInfo(google.protobuf.message.Message):
|
|
|
483
600
|
|
|
484
601
|
global___TokenInfo = TokenInfo
|
|
485
602
|
|
|
486
|
-
@typing.final
|
|
487
|
-
class Pricing(google.protobuf.message.Message):
|
|
488
|
-
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
489
|
-
|
|
490
|
-
INPUT_PRICE_FIELD_NUMBER: builtins.int
|
|
491
|
-
OUTPUT_PRICE_FIELD_NUMBER: builtins.int
|
|
492
|
-
CACHE_READ_PRICE_FIELD_NUMBER: builtins.int
|
|
493
|
-
CACHE_WRITE_PRICE_FIELD_NUMBER: builtins.int
|
|
494
|
-
FINE_TUNING_PRICE_FIELD_NUMBER: builtins.int
|
|
495
|
-
CURRENCY_FIELD_NUMBER: builtins.int
|
|
496
|
-
input_price: builtins.float
|
|
497
|
-
"""per 1M tokens"""
|
|
498
|
-
output_price: builtins.float
|
|
499
|
-
cache_read_price: builtins.float
|
|
500
|
-
cache_write_price: builtins.float
|
|
501
|
-
fine_tuning_price: builtins.float
|
|
502
|
-
currency: builtins.str
|
|
503
|
-
"""USD"""
|
|
504
|
-
def __init__(
|
|
505
|
-
self,
|
|
506
|
-
*,
|
|
507
|
-
input_price: builtins.float = ...,
|
|
508
|
-
output_price: builtins.float = ...,
|
|
509
|
-
cache_read_price: builtins.float = ...,
|
|
510
|
-
cache_write_price: builtins.float = ...,
|
|
511
|
-
fine_tuning_price: builtins.float = ...,
|
|
512
|
-
currency: builtins.str = ...,
|
|
513
|
-
) -> None: ...
|
|
514
|
-
def ClearField(
|
|
515
|
-
self,
|
|
516
|
-
field_name: typing.Literal[
|
|
517
|
-
"cache_read_price",
|
|
518
|
-
b"cache_read_price",
|
|
519
|
-
"cache_write_price",
|
|
520
|
-
b"cache_write_price",
|
|
521
|
-
"currency",
|
|
522
|
-
b"currency",
|
|
523
|
-
"fine_tuning_price",
|
|
524
|
-
b"fine_tuning_price",
|
|
525
|
-
"input_price",
|
|
526
|
-
b"input_price",
|
|
527
|
-
"output_price",
|
|
528
|
-
b"output_price",
|
|
529
|
-
],
|
|
530
|
-
) -> None: ...
|
|
531
|
-
|
|
532
|
-
global___Pricing = Pricing
|
|
533
|
-
|
|
534
603
|
@typing.final
|
|
535
604
|
class Configuration(google.protobuf.message.Message):
|
|
605
|
+
"""Configuration defines supported generation parameters."""
|
|
606
|
+
|
|
536
607
|
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
537
608
|
|
|
538
609
|
TEMPERATURE_MIN_FIELD_NUMBER: builtins.int
|
|
@@ -541,10 +612,25 @@ class Configuration(google.protobuf.message.Message):
|
|
|
541
612
|
TOP_P_MIN_FIELD_NUMBER: builtins.int
|
|
542
613
|
TOP_P_MAX_FIELD_NUMBER: builtins.int
|
|
543
614
|
temperature_min: builtins.float
|
|
615
|
+
"""Minimum allowed temperature value.
|
|
616
|
+
Usually 0.0 for deterministic output
|
|
617
|
+
"""
|
|
544
618
|
temperature_max: builtins.float
|
|
619
|
+
"""Maximum allowed temperature value.
|
|
620
|
+
Usually 1.0 or 2.0 for high randomness
|
|
621
|
+
"""
|
|
545
622
|
temperature_default: builtins.float
|
|
623
|
+
"""Default temperature if not specified.
|
|
624
|
+
Typically 0.7 or 1.0
|
|
625
|
+
"""
|
|
546
626
|
top_p_min: builtins.float
|
|
627
|
+
"""Minimum allowed top_p (nucleus sampling).
|
|
628
|
+
Usually 0.0 to disable
|
|
629
|
+
"""
|
|
547
630
|
top_p_max: builtins.float
|
|
631
|
+
"""Maximum allowed top_p value.
|
|
632
|
+
Usually 1.0 for full vocabulary
|
|
633
|
+
"""
|
|
548
634
|
def __init__(
|
|
549
635
|
self,
|
|
550
636
|
*,
|
|
@@ -574,6 +660,8 @@ global___Configuration = Configuration
|
|
|
574
660
|
|
|
575
661
|
@typing.final
|
|
576
662
|
class APIDetails(google.protobuf.message.Message):
|
|
663
|
+
"""APIDetails provides integration information."""
|
|
664
|
+
|
|
577
665
|
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
578
666
|
|
|
579
667
|
ENDPOINT_FIELD_NUMBER: builtins.int
|
|
@@ -581,11 +669,21 @@ class APIDetails(google.protobuf.message.Message):
|
|
|
581
669
|
RATE_LIMIT_RPM_FIELD_NUMBER: builtins.int
|
|
582
670
|
RATE_LIMIT_TPM_FIELD_NUMBER: builtins.int
|
|
583
671
|
endpoint: builtins.str
|
|
672
|
+
"""Base API endpoint URL.
|
|
673
|
+
Example: "https://api.openai.com/v1/chat/completions"
|
|
674
|
+
"""
|
|
584
675
|
version: builtins.str
|
|
676
|
+
"""API version identifier.
|
|
677
|
+
Examples: "v1", "2024-02-01", "beta"
|
|
678
|
+
"""
|
|
585
679
|
rate_limit_rpm: builtins.int
|
|
586
|
-
"""requests per minute
|
|
680
|
+
"""Rate limit in requests per minute.
|
|
681
|
+
Example: 500 for standard tier, 10000 for enterprise
|
|
682
|
+
"""
|
|
587
683
|
rate_limit_tpm: builtins.int
|
|
588
|
-
"""tokens per minute
|
|
684
|
+
"""Rate limit in tokens per minute.
|
|
685
|
+
Example: 90000 for GPT-4, 200000 for GPT-3.5
|
|
686
|
+
"""
|
|
589
687
|
def __init__(
|
|
590
688
|
self,
|
|
591
689
|
*,
|
|
@@ -612,6 +710,8 @@ global___APIDetails = APIDetails
|
|
|
612
710
|
|
|
613
711
|
@typing.final
|
|
614
712
|
class Availability(google.protobuf.message.Message):
|
|
713
|
+
"""Availability describes where the model can be accessed."""
|
|
714
|
+
|
|
615
715
|
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
616
716
|
|
|
617
717
|
REGIONS_FIELD_NUMBER: builtins.int
|
|
@@ -619,15 +719,20 @@ class Availability(google.protobuf.message.Message):
|
|
|
619
719
|
@property
|
|
620
720
|
def regions(
|
|
621
721
|
self,
|
|
622
|
-
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
|
|
623
|
-
|
|
624
|
-
|
|
722
|
+
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
|
|
723
|
+
"""Geographic regions where available.
|
|
724
|
+
Examples: ["us-east-1", "eu-west-1", "asia-pacific"]
|
|
725
|
+
May use provider-specific region codes
|
|
726
|
+
"""
|
|
727
|
+
|
|
625
728
|
@property
|
|
626
729
|
def platforms(
|
|
627
730
|
self,
|
|
628
|
-
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
|
|
629
|
-
|
|
630
|
-
|
|
731
|
+
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
|
|
732
|
+
"""Platforms/services offering the model.
|
|
733
|
+
Examples: ["api", "playground", "azure", "vertex-ai", "bedrock"]
|
|
734
|
+
"""
|
|
735
|
+
|
|
631
736
|
def __init__(
|
|
632
737
|
self,
|
|
633
738
|
*,
|