weave-python 0.27.0__py3-none-any.whl → 0.28.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. weave/weaveapi/llmx/v1/architecture_pb2.py +74 -0
  2. weave/weaveapi/llmx/v1/architecture_pb2.pyi +1323 -0
  3. weave/weaveapi/llmx/v1/capabilities_pb2.py +88 -0
  4. weave/weaveapi/llmx/v1/capabilities_pb2.pyi +1613 -0
  5. weave/weaveapi/llmx/v1/model_pb2.py +54 -0
  6. weave/weaveapi/{modex → llmx}/v1/model_pb2.pyi +294 -189
  7. weave/weaveapi/llmx/v1/model_pb2_grpc.py +2 -0
  8. weave/weaveapi/llmx/v1/model_pb2_grpc.pyi +20 -0
  9. weave/weaveapi/llmx/v1/pricing_pb2.py +54 -0
  10. weave/weaveapi/llmx/v1/pricing_pb2.pyi +597 -0
  11. weave/weaveapi/llmx/v1/pricing_pb2_grpc.py +2 -0
  12. weave/weaveapi/llmx/v1/pricing_pb2_grpc.pyi +20 -0
  13. weave/weaveapi/llmx/v1/provider_pb2.py +38 -0
  14. weave/weaveapi/{modex → llmx}/v1/provider_pb2.pyi +31 -19
  15. weave/weaveapi/llmx/v1/provider_pb2_grpc.py +2 -0
  16. weave/weaveapi/llmx/v1/provider_pb2_grpc.pyi +20 -0
  17. weave/weaveapi/llmx/v1/service_pb2.py +180 -0
  18. weave/weaveapi/{modex → llmx}/v1/service_pb2.pyi +174 -44
  19. weave/weaveapi/{modex → llmx}/v1/service_pb2_grpc.py +103 -105
  20. weave/weaveapi/llmx/v1/service_pb2_grpc.pyi +266 -0
  21. {weave_python-0.27.0.dist-info → weave_python-0.28.0.dist-info}/METADATA +1 -1
  22. {weave_python-0.27.0.dist-info → weave_python-0.28.0.dist-info}/RECORD +27 -17
  23. weave/weaveapi/modex/v1/model_pb2.py +0 -58
  24. weave/weaveapi/modex/v1/provider_pb2.py +0 -38
  25. weave/weaveapi/modex/v1/service_pb2.py +0 -180
  26. weave/weaveapi/modex/v1/service_pb2_grpc.pyi +0 -268
  27. weave/weavesql/weavedb/models.py +0 -124
  28. weave/weavesql/weavedb/queries.py +0 -306
  29. /weave/weaveapi/{modex/v1/model_pb2_grpc.py → llmx/v1/architecture_pb2_grpc.py} +0 -0
  30. /weave/weaveapi/{modex/v1/model_pb2_grpc.pyi → llmx/v1/architecture_pb2_grpc.pyi} +0 -0
  31. /weave/weaveapi/{modex/v1/provider_pb2_grpc.py → llmx/v1/capabilities_pb2_grpc.py} +0 -0
  32. /weave/weaveapi/{modex/v1/provider_pb2_grpc.pyi → llmx/v1/capabilities_pb2_grpc.pyi} +0 -0
  33. {weave_python-0.27.0.dist-info → weave_python-0.28.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1613 @@
1
+ """
2
+ @generated by mypy-protobuf. Do not edit manually!
3
+ isort:skip_file
4
+ """
5
+
6
+ import builtins
7
+ import collections.abc
8
+ import google.protobuf.descriptor
9
+ import google.protobuf.internal.containers
10
+ import google.protobuf.internal.enum_type_wrapper
11
+ import google.protobuf.message
12
+ import sys
13
+ import typing
14
+
15
+ if sys.version_info >= (3, 10):
16
+ import typing as typing_extensions
17
+ else:
18
+ import typing_extensions
19
+
20
+ DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
21
+
22
+ class _CapabilityType:
23
+ ValueType = typing.NewType("ValueType", builtins.int)
24
+ V: typing_extensions.TypeAlias = ValueType
25
+
26
+ class _CapabilityTypeEnumTypeWrapper(
27
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[
28
+ _CapabilityType.ValueType
29
+ ],
30
+ builtins.type,
31
+ ):
32
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
33
+ CAPABILITY_TYPE_UNSPECIFIED: _CapabilityType.ValueType # 0
34
+ CAPABILITY_TYPE_TEXT: _CapabilityType.ValueType # 1
35
+ """Basic text input/output capability"""
36
+ CAPABILITY_TYPE_STRUCTURED_RESPONSE: _CapabilityType.ValueType # 2
37
+ """Structured output (JSON, XML, etc.)"""
38
+ CAPABILITY_TYPE_STREAMING: _CapabilityType.ValueType # 3
39
+ """Server-sent or websocket streaming of partial outputs"""
40
+ CAPABILITY_TYPE_FUNCTION_CALLING: _CapabilityType.ValueType # 4
41
+ """Tool/function calling with schemaed arguments"""
42
+ CAPABILITY_TYPE_VISION: _CapabilityType.ValueType # 5
43
+ """Image understanding and/or generation"""
44
+ CAPABILITY_TYPE_TOOL_USE: _CapabilityType.ValueType # 6
45
+ """General tool use beyond plain function calls"""
46
+ CAPABILITY_TYPE_SYSTEM_PROMPT: _CapabilityType.ValueType # 7
47
+ """System messages/prompt steering support"""
48
+ CAPABILITY_TYPE_CACHING: _CapabilityType.ValueType # 8
49
+ """Prompt/context caching capabilities"""
50
+ CAPABILITY_TYPE_REASONING: _CapabilityType.ValueType # 9
51
+ """Advanced reasoning controls/strategies"""
52
+ CAPABILITY_TYPE_AUDIO: _CapabilityType.ValueType # 10
53
+ """Audio STT/TTS support"""
54
+ CAPABILITY_TYPE_VIDEO: _CapabilityType.ValueType # 11
55
+ """Video analysis/generation support"""
56
+ CAPABILITY_TYPE_EMBEDDINGS: _CapabilityType.ValueType # 12
57
+ """Vector embedding generation"""
58
+ CAPABILITY_TYPE_FINE_TUNING: _CapabilityType.ValueType # 13
59
+ """Custom model training/fine-tuning"""
60
+
61
+ class CapabilityType(_CapabilityType, metaclass=_CapabilityTypeEnumTypeWrapper):
62
+ """Capabilities describe the discrete features that an LLM may support
63
+ (e.g., function calling, vision, streaming). This file defines the
64
+ enums and messages used to declare and configure these capabilities
65
+ on a per-model basis. See model.proto for how capabilities are attached
66
+ to a Model.
67
+
68
+ Core capability types that models can support
69
+ """
70
+
71
+ CAPABILITY_TYPE_UNSPECIFIED: CapabilityType.ValueType # 0
72
+ CAPABILITY_TYPE_TEXT: CapabilityType.ValueType # 1
73
+ """Basic text input/output capability"""
74
+ CAPABILITY_TYPE_STRUCTURED_RESPONSE: CapabilityType.ValueType # 2
75
+ """Structured output (JSON, XML, etc.)"""
76
+ CAPABILITY_TYPE_STREAMING: CapabilityType.ValueType # 3
77
+ """Server-sent or websocket streaming of partial outputs"""
78
+ CAPABILITY_TYPE_FUNCTION_CALLING: CapabilityType.ValueType # 4
79
+ """Tool/function calling with schemaed arguments"""
80
+ CAPABILITY_TYPE_VISION: CapabilityType.ValueType # 5
81
+ """Image understanding and/or generation"""
82
+ CAPABILITY_TYPE_TOOL_USE: CapabilityType.ValueType # 6
83
+ """General tool use beyond plain function calls"""
84
+ CAPABILITY_TYPE_SYSTEM_PROMPT: CapabilityType.ValueType # 7
85
+ """System messages/prompt steering support"""
86
+ CAPABILITY_TYPE_CACHING: CapabilityType.ValueType # 8
87
+ """Prompt/context caching capabilities"""
88
+ CAPABILITY_TYPE_REASONING: CapabilityType.ValueType # 9
89
+ """Advanced reasoning controls/strategies"""
90
+ CAPABILITY_TYPE_AUDIO: CapabilityType.ValueType # 10
91
+ """Audio STT/TTS support"""
92
+ CAPABILITY_TYPE_VIDEO: CapabilityType.ValueType # 11
93
+ """Video analysis/generation support"""
94
+ CAPABILITY_TYPE_EMBEDDINGS: CapabilityType.ValueType # 12
95
+ """Vector embedding generation"""
96
+ CAPABILITY_TYPE_FINE_TUNING: CapabilityType.ValueType # 13
97
+ """Custom model training/fine-tuning"""
98
+ global___CapabilityType = CapabilityType
99
+
100
+ class _DataFormat:
101
+ ValueType = typing.NewType("ValueType", builtins.int)
102
+ V: typing_extensions.TypeAlias = ValueType
103
+
104
+ class _DataFormatEnumTypeWrapper(
105
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_DataFormat.ValueType],
106
+ builtins.type,
107
+ ):
108
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
109
+ DATA_FORMAT_UNSPECIFIED: _DataFormat.ValueType # 0
110
+ DATA_FORMAT_JSON: _DataFormat.ValueType # 1
111
+ DATA_FORMAT_YAML: _DataFormat.ValueType # 2
112
+ DATA_FORMAT_XML: _DataFormat.ValueType # 3
113
+ DATA_FORMAT_JSONL: _DataFormat.ValueType # 4
114
+ DATA_FORMAT_CSV: _DataFormat.ValueType # 5
115
+ DATA_FORMAT_PARQUET: _DataFormat.ValueType # 6
116
+ DATA_FORMAT_PLAIN: _DataFormat.ValueType # 7
117
+ DATA_FORMAT_MARKDOWN: _DataFormat.ValueType # 8
118
+ DATA_FORMAT_STRUCTURED: _DataFormat.ValueType # 9
119
+
120
+ class DataFormat(_DataFormat, metaclass=_DataFormatEnumTypeWrapper):
121
+ """Data format types for structured responses, fine-tuning, etc."""
122
+
123
+ DATA_FORMAT_UNSPECIFIED: DataFormat.ValueType # 0
124
+ DATA_FORMAT_JSON: DataFormat.ValueType # 1
125
+ DATA_FORMAT_YAML: DataFormat.ValueType # 2
126
+ DATA_FORMAT_XML: DataFormat.ValueType # 3
127
+ DATA_FORMAT_JSONL: DataFormat.ValueType # 4
128
+ DATA_FORMAT_CSV: DataFormat.ValueType # 5
129
+ DATA_FORMAT_PARQUET: DataFormat.ValueType # 6
130
+ DATA_FORMAT_PLAIN: DataFormat.ValueType # 7
131
+ DATA_FORMAT_MARKDOWN: DataFormat.ValueType # 8
132
+ DATA_FORMAT_STRUCTURED: DataFormat.ValueType # 9
133
+ global___DataFormat = DataFormat
134
+
135
+ class _JsonSchemaType:
136
+ ValueType = typing.NewType("ValueType", builtins.int)
137
+ V: typing_extensions.TypeAlias = ValueType
138
+
139
+ class _JsonSchemaTypeEnumTypeWrapper(
140
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[
141
+ _JsonSchemaType.ValueType
142
+ ],
143
+ builtins.type,
144
+ ):
145
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
146
+ JSON_SCHEMA_TYPE_UNSPECIFIED: _JsonSchemaType.ValueType # 0
147
+ JSON_SCHEMA_TYPE_OBJECT: _JsonSchemaType.ValueType # 1
148
+ JSON_SCHEMA_TYPE_ARRAY: _JsonSchemaType.ValueType # 2
149
+ JSON_SCHEMA_TYPE_STRING: _JsonSchemaType.ValueType # 3
150
+ JSON_SCHEMA_TYPE_NUMBER: _JsonSchemaType.ValueType # 4
151
+ JSON_SCHEMA_TYPE_BOOLEAN: _JsonSchemaType.ValueType # 5
152
+ JSON_SCHEMA_TYPE_NULL: _JsonSchemaType.ValueType # 6
153
+ JSON_SCHEMA_TYPE_INTEGER: _JsonSchemaType.ValueType # 7
154
+
155
+ class JsonSchemaType(_JsonSchemaType, metaclass=_JsonSchemaTypeEnumTypeWrapper):
156
+ """JSON schema types supported in structured responses"""
157
+
158
+ JSON_SCHEMA_TYPE_UNSPECIFIED: JsonSchemaType.ValueType # 0
159
+ JSON_SCHEMA_TYPE_OBJECT: JsonSchemaType.ValueType # 1
160
+ JSON_SCHEMA_TYPE_ARRAY: JsonSchemaType.ValueType # 2
161
+ JSON_SCHEMA_TYPE_STRING: JsonSchemaType.ValueType # 3
162
+ JSON_SCHEMA_TYPE_NUMBER: JsonSchemaType.ValueType # 4
163
+ JSON_SCHEMA_TYPE_BOOLEAN: JsonSchemaType.ValueType # 5
164
+ JSON_SCHEMA_TYPE_NULL: JsonSchemaType.ValueType # 6
165
+ JSON_SCHEMA_TYPE_INTEGER: JsonSchemaType.ValueType # 7
166
+ global___JsonSchemaType = JsonSchemaType
167
+
168
+ class _ImageFormat:
169
+ ValueType = typing.NewType("ValueType", builtins.int)
170
+ V: typing_extensions.TypeAlias = ValueType
171
+
172
+ class _ImageFormatEnumTypeWrapper(
173
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_ImageFormat.ValueType],
174
+ builtins.type,
175
+ ):
176
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
177
+ IMAGE_FORMAT_UNSPECIFIED: _ImageFormat.ValueType # 0
178
+ IMAGE_FORMAT_JPEG: _ImageFormat.ValueType # 1
179
+ IMAGE_FORMAT_PNG: _ImageFormat.ValueType # 2
180
+ IMAGE_FORMAT_GIF: _ImageFormat.ValueType # 3
181
+ IMAGE_FORMAT_WEBP: _ImageFormat.ValueType # 4
182
+ IMAGE_FORMAT_BMP: _ImageFormat.ValueType # 5
183
+ IMAGE_FORMAT_TIFF: _ImageFormat.ValueType # 6
184
+ IMAGE_FORMAT_SVG: _ImageFormat.ValueType # 7
185
+
186
+ class ImageFormat(_ImageFormat, metaclass=_ImageFormatEnumTypeWrapper):
187
+ """Image formats"""
188
+
189
+ IMAGE_FORMAT_UNSPECIFIED: ImageFormat.ValueType # 0
190
+ IMAGE_FORMAT_JPEG: ImageFormat.ValueType # 1
191
+ IMAGE_FORMAT_PNG: ImageFormat.ValueType # 2
192
+ IMAGE_FORMAT_GIF: ImageFormat.ValueType # 3
193
+ IMAGE_FORMAT_WEBP: ImageFormat.ValueType # 4
194
+ IMAGE_FORMAT_BMP: ImageFormat.ValueType # 5
195
+ IMAGE_FORMAT_TIFF: ImageFormat.ValueType # 6
196
+ IMAGE_FORMAT_SVG: ImageFormat.ValueType # 7
197
+ global___ImageFormat = ImageFormat
198
+
199
+ class _AudioFormat:
200
+ ValueType = typing.NewType("ValueType", builtins.int)
201
+ V: typing_extensions.TypeAlias = ValueType
202
+
203
+ class _AudioFormatEnumTypeWrapper(
204
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_AudioFormat.ValueType],
205
+ builtins.type,
206
+ ):
207
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
208
+ AUDIO_FORMAT_UNSPECIFIED: _AudioFormat.ValueType # 0
209
+ AUDIO_FORMAT_MP3: _AudioFormat.ValueType # 1
210
+ AUDIO_FORMAT_WAV: _AudioFormat.ValueType # 2
211
+ AUDIO_FORMAT_OGG: _AudioFormat.ValueType # 3
212
+ AUDIO_FORMAT_M4A: _AudioFormat.ValueType # 4
213
+ AUDIO_FORMAT_FLAC: _AudioFormat.ValueType # 5
214
+ AUDIO_FORMAT_AAC: _AudioFormat.ValueType # 6
215
+ AUDIO_FORMAT_WMA: _AudioFormat.ValueType # 7
216
+ AUDIO_FORMAT_OPUS: _AudioFormat.ValueType # 8
217
+
218
+ class AudioFormat(_AudioFormat, metaclass=_AudioFormatEnumTypeWrapper):
219
+ """Audio formats"""
220
+
221
+ AUDIO_FORMAT_UNSPECIFIED: AudioFormat.ValueType # 0
222
+ AUDIO_FORMAT_MP3: AudioFormat.ValueType # 1
223
+ AUDIO_FORMAT_WAV: AudioFormat.ValueType # 2
224
+ AUDIO_FORMAT_OGG: AudioFormat.ValueType # 3
225
+ AUDIO_FORMAT_M4A: AudioFormat.ValueType # 4
226
+ AUDIO_FORMAT_FLAC: AudioFormat.ValueType # 5
227
+ AUDIO_FORMAT_AAC: AudioFormat.ValueType # 6
228
+ AUDIO_FORMAT_WMA: AudioFormat.ValueType # 7
229
+ AUDIO_FORMAT_OPUS: AudioFormat.ValueType # 8
230
+ global___AudioFormat = AudioFormat
231
+
232
+ class _VideoFormat:
233
+ ValueType = typing.NewType("ValueType", builtins.int)
234
+ V: typing_extensions.TypeAlias = ValueType
235
+
236
+ class _VideoFormatEnumTypeWrapper(
237
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_VideoFormat.ValueType],
238
+ builtins.type,
239
+ ):
240
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
241
+ VIDEO_FORMAT_UNSPECIFIED: _VideoFormat.ValueType # 0
242
+ VIDEO_FORMAT_MP4: _VideoFormat.ValueType # 1
243
+ VIDEO_FORMAT_AVI: _VideoFormat.ValueType # 2
244
+ VIDEO_FORMAT_MOV: _VideoFormat.ValueType # 3
245
+ VIDEO_FORMAT_MKV: _VideoFormat.ValueType # 4
246
+ VIDEO_FORMAT_WEBM: _VideoFormat.ValueType # 5
247
+ VIDEO_FORMAT_FLV: _VideoFormat.ValueType # 6
248
+ VIDEO_FORMAT_WMV: _VideoFormat.ValueType # 7
249
+
250
+ class VideoFormat(_VideoFormat, metaclass=_VideoFormatEnumTypeWrapper):
251
+ """Video formats"""
252
+
253
+ VIDEO_FORMAT_UNSPECIFIED: VideoFormat.ValueType # 0
254
+ VIDEO_FORMAT_MP4: VideoFormat.ValueType # 1
255
+ VIDEO_FORMAT_AVI: VideoFormat.ValueType # 2
256
+ VIDEO_FORMAT_MOV: VideoFormat.ValueType # 3
257
+ VIDEO_FORMAT_MKV: VideoFormat.ValueType # 4
258
+ VIDEO_FORMAT_WEBM: VideoFormat.ValueType # 5
259
+ VIDEO_FORMAT_FLV: VideoFormat.ValueType # 6
260
+ VIDEO_FORMAT_WMV: VideoFormat.ValueType # 7
261
+ global___VideoFormat = VideoFormat
262
+
263
+ class _ToolType:
264
+ ValueType = typing.NewType("ValueType", builtins.int)
265
+ V: typing_extensions.TypeAlias = ValueType
266
+
267
+ class _ToolTypeEnumTypeWrapper(
268
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_ToolType.ValueType],
269
+ builtins.type,
270
+ ):
271
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
272
+ TOOL_TYPE_UNSPECIFIED: _ToolType.ValueType # 0
273
+ TOOL_TYPE_FUNCTION: _ToolType.ValueType # 1
274
+ TOOL_TYPE_RETRIEVAL: _ToolType.ValueType # 2
275
+ TOOL_TYPE_CODE_INTERPRETER: _ToolType.ValueType # 3
276
+ TOOL_TYPE_WEB_BROWSER: _ToolType.ValueType # 4
277
+ TOOL_TYPE_DATABASE: _ToolType.ValueType # 5
278
+ TOOL_TYPE_API: _ToolType.ValueType # 6
279
+ TOOL_TYPE_CUSTOM: _ToolType.ValueType # 7
280
+
281
+ class ToolType(_ToolType, metaclass=_ToolTypeEnumTypeWrapper):
282
+ """Tool types supported"""
283
+
284
+ TOOL_TYPE_UNSPECIFIED: ToolType.ValueType # 0
285
+ TOOL_TYPE_FUNCTION: ToolType.ValueType # 1
286
+ TOOL_TYPE_RETRIEVAL: ToolType.ValueType # 2
287
+ TOOL_TYPE_CODE_INTERPRETER: ToolType.ValueType # 3
288
+ TOOL_TYPE_WEB_BROWSER: ToolType.ValueType # 4
289
+ TOOL_TYPE_DATABASE: ToolType.ValueType # 5
290
+ TOOL_TYPE_API: ToolType.ValueType # 6
291
+ TOOL_TYPE_CUSTOM: ToolType.ValueType # 7
292
+ global___ToolType = ToolType
293
+
294
+ class _CacheStrategy:
295
+ ValueType = typing.NewType("ValueType", builtins.int)
296
+ V: typing_extensions.TypeAlias = ValueType
297
+
298
+ class _CacheStrategyEnumTypeWrapper(
299
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[
300
+ _CacheStrategy.ValueType
301
+ ],
302
+ builtins.type,
303
+ ):
304
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
305
+ CACHE_STRATEGY_UNSPECIFIED: _CacheStrategy.ValueType # 0
306
+ CACHE_STRATEGY_HASH: _CacheStrategy.ValueType # 1
307
+ CACHE_STRATEGY_SEMANTIC: _CacheStrategy.ValueType # 2
308
+ CACHE_STRATEGY_CUSTOM: _CacheStrategy.ValueType # 3
309
+ CACHE_STRATEGY_PREFIX: _CacheStrategy.ValueType # 4
310
+ CACHE_STRATEGY_SUFFIX: _CacheStrategy.ValueType # 5
311
+
312
+ class CacheStrategy(_CacheStrategy, metaclass=_CacheStrategyEnumTypeWrapper):
313
+ """Cache key strategies"""
314
+
315
+ CACHE_STRATEGY_UNSPECIFIED: CacheStrategy.ValueType # 0
316
+ CACHE_STRATEGY_HASH: CacheStrategy.ValueType # 1
317
+ CACHE_STRATEGY_SEMANTIC: CacheStrategy.ValueType # 2
318
+ CACHE_STRATEGY_CUSTOM: CacheStrategy.ValueType # 3
319
+ CACHE_STRATEGY_PREFIX: CacheStrategy.ValueType # 4
320
+ CACHE_STRATEGY_SUFFIX: CacheStrategy.ValueType # 5
321
+ global___CacheStrategy = CacheStrategy
322
+
323
+ class _ReasoningStrategy:
324
+ ValueType = typing.NewType("ValueType", builtins.int)
325
+ V: typing_extensions.TypeAlias = ValueType
326
+
327
+ class _ReasoningStrategyEnumTypeWrapper(
328
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[
329
+ _ReasoningStrategy.ValueType
330
+ ],
331
+ builtins.type,
332
+ ):
333
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
334
+ REASONING_STRATEGY_UNSPECIFIED: _ReasoningStrategy.ValueType # 0
335
+ REASONING_STRATEGY_CHAIN_OF_THOUGHT: _ReasoningStrategy.ValueType # 1
336
+ REASONING_STRATEGY_TREE_OF_THOUGHTS: _ReasoningStrategy.ValueType # 2
337
+ REASONING_STRATEGY_GRAPH_OF_THOUGHTS: _ReasoningStrategy.ValueType # 3
338
+ REASONING_STRATEGY_STEP_BY_STEP: _ReasoningStrategy.ValueType # 4
339
+ REASONING_STRATEGY_SELF_CONSISTENCY: _ReasoningStrategy.ValueType # 5
340
+ REASONING_STRATEGY_LEAST_TO_MOST: _ReasoningStrategy.ValueType # 6
341
+
342
+ class ReasoningStrategy(
343
+ _ReasoningStrategy, metaclass=_ReasoningStrategyEnumTypeWrapper
344
+ ):
345
+ """Reasoning strategies"""
346
+
347
+ REASONING_STRATEGY_UNSPECIFIED: ReasoningStrategy.ValueType # 0
348
+ REASONING_STRATEGY_CHAIN_OF_THOUGHT: ReasoningStrategy.ValueType # 1
349
+ REASONING_STRATEGY_TREE_OF_THOUGHTS: ReasoningStrategy.ValueType # 2
350
+ REASONING_STRATEGY_GRAPH_OF_THOUGHTS: ReasoningStrategy.ValueType # 3
351
+ REASONING_STRATEGY_STEP_BY_STEP: ReasoningStrategy.ValueType # 4
352
+ REASONING_STRATEGY_SELF_CONSISTENCY: ReasoningStrategy.ValueType # 5
353
+ REASONING_STRATEGY_LEAST_TO_MOST: ReasoningStrategy.ValueType # 6
354
+ global___ReasoningStrategy = ReasoningStrategy
355
+
356
+ class _DistanceMetric:
357
+ ValueType = typing.NewType("ValueType", builtins.int)
358
+ V: typing_extensions.TypeAlias = ValueType
359
+
360
+ class _DistanceMetricEnumTypeWrapper(
361
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[
362
+ _DistanceMetric.ValueType
363
+ ],
364
+ builtins.type,
365
+ ):
366
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
367
+ DISTANCE_METRIC_UNSPECIFIED: _DistanceMetric.ValueType # 0
368
+ DISTANCE_METRIC_COSINE: _DistanceMetric.ValueType # 1
369
+ DISTANCE_METRIC_EUCLIDEAN: _DistanceMetric.ValueType # 2
370
+ DISTANCE_METRIC_DOT_PRODUCT: _DistanceMetric.ValueType # 3
371
+ DISTANCE_METRIC_MANHATTAN: _DistanceMetric.ValueType # 4
372
+ DISTANCE_METRIC_HAMMING: _DistanceMetric.ValueType # 5
373
+
374
+ class DistanceMetric(_DistanceMetric, metaclass=_DistanceMetricEnumTypeWrapper):
375
+ """Distance metrics for embeddings"""
376
+
377
+ DISTANCE_METRIC_UNSPECIFIED: DistanceMetric.ValueType # 0
378
+ DISTANCE_METRIC_COSINE: DistanceMetric.ValueType # 1
379
+ DISTANCE_METRIC_EUCLIDEAN: DistanceMetric.ValueType # 2
380
+ DISTANCE_METRIC_DOT_PRODUCT: DistanceMetric.ValueType # 3
381
+ DISTANCE_METRIC_MANHATTAN: DistanceMetric.ValueType # 4
382
+ DISTANCE_METRIC_HAMMING: DistanceMetric.ValueType # 5
383
+ global___DistanceMetric = DistanceMetric
384
+
385
+ class _Hyperparameter:
386
+ ValueType = typing.NewType("ValueType", builtins.int)
387
+ V: typing_extensions.TypeAlias = ValueType
388
+
389
+ class _HyperparameterEnumTypeWrapper(
390
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[
391
+ _Hyperparameter.ValueType
392
+ ],
393
+ builtins.type,
394
+ ):
395
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
396
+ HYPERPARAMETER_UNSPECIFIED: _Hyperparameter.ValueType # 0
397
+ HYPERPARAMETER_LEARNING_RATE: _Hyperparameter.ValueType # 1
398
+ HYPERPARAMETER_BATCH_SIZE: _Hyperparameter.ValueType # 2
399
+ HYPERPARAMETER_EPOCHS: _Hyperparameter.ValueType # 3
400
+ HYPERPARAMETER_WARMUP_STEPS: _Hyperparameter.ValueType # 4
401
+ HYPERPARAMETER_WEIGHT_DECAY: _Hyperparameter.ValueType # 5
402
+ HYPERPARAMETER_GRADIENT_ACCUMULATION: _Hyperparameter.ValueType # 6
403
+ HYPERPARAMETER_LR_SCHEDULER: _Hyperparameter.ValueType # 7
404
+ HYPERPARAMETER_OPTIMIZER: _Hyperparameter.ValueType # 8
405
+ HYPERPARAMETER_DROPOUT: _Hyperparameter.ValueType # 9
406
+ HYPERPARAMETER_MAX_SEQUENCE_LENGTH: _Hyperparameter.ValueType # 10
407
+
408
+ class Hyperparameter(_Hyperparameter, metaclass=_HyperparameterEnumTypeWrapper):
409
+ """Hyperparameter types for fine-tuning"""
410
+
411
+ HYPERPARAMETER_UNSPECIFIED: Hyperparameter.ValueType # 0
412
+ HYPERPARAMETER_LEARNING_RATE: Hyperparameter.ValueType # 1
413
+ HYPERPARAMETER_BATCH_SIZE: Hyperparameter.ValueType # 2
414
+ HYPERPARAMETER_EPOCHS: Hyperparameter.ValueType # 3
415
+ HYPERPARAMETER_WARMUP_STEPS: Hyperparameter.ValueType # 4
416
+ HYPERPARAMETER_WEIGHT_DECAY: Hyperparameter.ValueType # 5
417
+ HYPERPARAMETER_GRADIENT_ACCUMULATION: Hyperparameter.ValueType # 6
418
+ HYPERPARAMETER_LR_SCHEDULER: Hyperparameter.ValueType # 7
419
+ HYPERPARAMETER_OPTIMIZER: Hyperparameter.ValueType # 8
420
+ HYPERPARAMETER_DROPOUT: Hyperparameter.ValueType # 9
421
+ HYPERPARAMETER_MAX_SEQUENCE_LENGTH: Hyperparameter.ValueType # 10
422
+ global___Hyperparameter = Hyperparameter
423
+
424
+ class _ModalityDirection:
425
+ ValueType = typing.NewType("ValueType", builtins.int)
426
+ V: typing_extensions.TypeAlias = ValueType
427
+
428
+ class _ModalityDirectionEnumTypeWrapper(
429
+ google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[
430
+ _ModalityDirection.ValueType
431
+ ],
432
+ builtins.type,
433
+ ):
434
+ DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
435
+ MODALITY_DIRECTION_UNSPECIFIED: _ModalityDirection.ValueType # 0
436
+ MODALITY_DIRECTION_INPUT_ONLY: _ModalityDirection.ValueType # 1
437
+ """Can only process/analyze (e.g., image analysis)"""
438
+ MODALITY_DIRECTION_OUTPUT_ONLY: _ModalityDirection.ValueType # 2
439
+ """Can only generate (e.g., TTS without STT)"""
440
+ MODALITY_DIRECTION_INPUT_OUTPUT: _ModalityDirection.ValueType # 3
441
+ """Can both process and generate"""
442
+
443
+ class ModalityDirection(
444
+ _ModalityDirection, metaclass=_ModalityDirectionEnumTypeWrapper
445
+ ):
446
+ """Modality direction - whether a modality supports input, output, or both"""
447
+
448
+ MODALITY_DIRECTION_UNSPECIFIED: ModalityDirection.ValueType # 0
449
+ MODALITY_DIRECTION_INPUT_ONLY: ModalityDirection.ValueType # 1
450
+ """Can only process/analyze (e.g., image analysis)"""
451
+ MODALITY_DIRECTION_OUTPUT_ONLY: ModalityDirection.ValueType # 2
452
+ """Can only generate (e.g., TTS without STT)"""
453
+ MODALITY_DIRECTION_INPUT_OUTPUT: ModalityDirection.ValueType # 3
454
+ """Can both process and generate"""
455
+ global___ModalityDirection = ModalityDirection
456
+
457
+ @typing.final
458
+ class Capability(google.protobuf.message.Message):
459
+ """Capability represents a specific feature/ability of a model with its configuration.
460
+ Each capability has a type and optional detailed configuration.
461
+ """
462
+
463
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
464
+
465
+ TYPE_FIELD_NUMBER: builtins.int
466
+ ENABLED_FIELD_NUMBER: builtins.int
467
+ TEXT_FIELD_NUMBER: builtins.int
468
+ STRUCTURED_RESPONSE_FIELD_NUMBER: builtins.int
469
+ STREAMING_FIELD_NUMBER: builtins.int
470
+ FUNCTION_CALLING_FIELD_NUMBER: builtins.int
471
+ VISION_FIELD_NUMBER: builtins.int
472
+ TOOL_USE_FIELD_NUMBER: builtins.int
473
+ SYSTEM_PROMPT_FIELD_NUMBER: builtins.int
474
+ CACHING_FIELD_NUMBER: builtins.int
475
+ REASONING_FIELD_NUMBER: builtins.int
476
+ AUDIO_FIELD_NUMBER: builtins.int
477
+ VIDEO_FIELD_NUMBER: builtins.int
478
+ EMBEDDINGS_FIELD_NUMBER: builtins.int
479
+ FINE_TUNING_FIELD_NUMBER: builtins.int
480
+ ADDITIONAL_INFO_FIELD_NUMBER: builtins.int
481
+ type: global___CapabilityType.ValueType
482
+ """The type of capability this represents.
483
+ Example: CAPABILITY_TYPE_FUNCTION_CALLING for function/tool calling
484
+ """
485
+ enabled: builtins.bool
486
+ """Whether this capability is currently enabled/available.
487
+ Example: true if the model supports and has this feature active
488
+ """
489
+ additional_info: builtins.str
490
+ """Unstructured additional information about this capability.
491
+ Used for provider-specific details that don't fit the structured fields.
492
+ Example: "Supports up to 10 parallel function calls with automatic retry"
493
+ Example: "Beta feature - may have unexpected behavior"
494
+ Example: "Optimized for conversational use cases"
495
+ """
496
+ @property
497
+ def text(self) -> global___Text: ...
498
+ @property
499
+ def structured_response(self) -> global___StructuredResponse: ...
500
+ @property
501
+ def streaming(self) -> global___Streaming: ...
502
+ @property
503
+ def function_calling(self) -> global___FunctionCalling: ...
504
+ @property
505
+ def vision(self) -> global___Vision: ...
506
+ @property
507
+ def tool_use(self) -> global___ToolUse: ...
508
+ @property
509
+ def system_prompt(self) -> global___SystemPrompt: ...
510
+ @property
511
+ def caching(self) -> global___Caching: ...
512
+ @property
513
+ def reasoning(self) -> global___Reasoning: ...
514
+ @property
515
+ def audio(self) -> global___Audio: ...
516
+ @property
517
+ def video(self) -> global___Video: ...
518
+ @property
519
+ def embeddings(self) -> global___Embeddings: ...
520
+ @property
521
+ def fine_tuning(self) -> global___FineTuning: ...
522
+ def __init__(
523
+ self,
524
+ *,
525
+ type: global___CapabilityType.ValueType = ...,
526
+ enabled: builtins.bool = ...,
527
+ text: global___Text | None = ...,
528
+ structured_response: global___StructuredResponse | None = ...,
529
+ streaming: global___Streaming | None = ...,
530
+ function_calling: global___FunctionCalling | None = ...,
531
+ vision: global___Vision | None = ...,
532
+ tool_use: global___ToolUse | None = ...,
533
+ system_prompt: global___SystemPrompt | None = ...,
534
+ caching: global___Caching | None = ...,
535
+ reasoning: global___Reasoning | None = ...,
536
+ audio: global___Audio | None = ...,
537
+ video: global___Video | None = ...,
538
+ embeddings: global___Embeddings | None = ...,
539
+ fine_tuning: global___FineTuning | None = ...,
540
+ additional_info: builtins.str = ...,
541
+ ) -> None: ...
542
+ def HasField(
543
+ self,
544
+ field_name: typing.Literal[
545
+ "audio",
546
+ b"audio",
547
+ "caching",
548
+ b"caching",
549
+ "config",
550
+ b"config",
551
+ "embeddings",
552
+ b"embeddings",
553
+ "fine_tuning",
554
+ b"fine_tuning",
555
+ "function_calling",
556
+ b"function_calling",
557
+ "reasoning",
558
+ b"reasoning",
559
+ "streaming",
560
+ b"streaming",
561
+ "structured_response",
562
+ b"structured_response",
563
+ "system_prompt",
564
+ b"system_prompt",
565
+ "text",
566
+ b"text",
567
+ "tool_use",
568
+ b"tool_use",
569
+ "video",
570
+ b"video",
571
+ "vision",
572
+ b"vision",
573
+ ],
574
+ ) -> builtins.bool: ...
575
+ def ClearField(
576
+ self,
577
+ field_name: typing.Literal[
578
+ "additional_info",
579
+ b"additional_info",
580
+ "audio",
581
+ b"audio",
582
+ "caching",
583
+ b"caching",
584
+ "config",
585
+ b"config",
586
+ "embeddings",
587
+ b"embeddings",
588
+ "enabled",
589
+ b"enabled",
590
+ "fine_tuning",
591
+ b"fine_tuning",
592
+ "function_calling",
593
+ b"function_calling",
594
+ "reasoning",
595
+ b"reasoning",
596
+ "streaming",
597
+ b"streaming",
598
+ "structured_response",
599
+ b"structured_response",
600
+ "system_prompt",
601
+ b"system_prompt",
602
+ "text",
603
+ b"text",
604
+ "tool_use",
605
+ b"tool_use",
606
+ "type",
607
+ b"type",
608
+ "video",
609
+ b"video",
610
+ "vision",
611
+ b"vision",
612
+ ],
613
+ ) -> None: ...
614
+ def WhichOneof(
615
+ self, oneof_group: typing.Literal["config", b"config"]
616
+ ) -> (
617
+ typing.Literal[
618
+ "text",
619
+ "structured_response",
620
+ "streaming",
621
+ "function_calling",
622
+ "vision",
623
+ "tool_use",
624
+ "system_prompt",
625
+ "caching",
626
+ "reasoning",
627
+ "audio",
628
+ "video",
629
+ "embeddings",
630
+ "fine_tuning",
631
+ ]
632
+ | None
633
+ ): ...
634
+
635
+ global___Capability = Capability
636
+
637
+ @typing.final
638
+ class Text(google.protobuf.message.Message):
639
+ """Text capability configuration for basic text input/output"""
640
+
641
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
642
+
643
+ DIRECTION_FIELD_NUMBER: builtins.int
644
+ MAX_INPUT_LENGTH_FIELD_NUMBER: builtins.int
645
+ MAX_OUTPUT_LENGTH_FIELD_NUMBER: builtins.int
646
+ SUPPORTED_LANGUAGES_FIELD_NUMBER: builtins.int
647
+ SUPPORTS_CONVERSATION_FIELD_NUMBER: builtins.int
648
+ SUPPORTS_CONTEXT_FIELD_NUMBER: builtins.int
649
+ direction: global___ModalityDirection.ValueType
650
+ """Direction of text support (input, output, or both)
651
+ Example: MODALITY_DIRECTION_INPUT_OUTPUT for chat models
652
+ """
653
+ max_input_length: builtins.int
654
+ """Maximum input text length in characters (if limited)
655
+ Example: 32000 for models with character limits
656
+ """
657
+ max_output_length: builtins.int
658
+ """Maximum output text length in characters (if limited)
659
+ Example: 4096 for models with output limits
660
+ """
661
+ supports_conversation: builtins.bool
662
+ """Whether the model supports multi-turn conversations
663
+ Example: true for chat models, false for completion-only models
664
+ """
665
+ supports_context: builtins.bool
666
+ """Whether the model can maintain context across messages
667
+ Example: true for stateful models
668
+ """
669
+ @property
670
+ def supported_languages(
671
+ self,
672
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
673
+ """Supported languages for text processing
674
+ Examples: ["en", "es", "fr", "de", "zh", "ja"]
675
+ """
676
+
677
+ def __init__(
678
+ self,
679
+ *,
680
+ direction: global___ModalityDirection.ValueType = ...,
681
+ max_input_length: builtins.int = ...,
682
+ max_output_length: builtins.int = ...,
683
+ supported_languages: collections.abc.Iterable[builtins.str] | None = ...,
684
+ supports_conversation: builtins.bool = ...,
685
+ supports_context: builtins.bool = ...,
686
+ ) -> None: ...
687
+ def ClearField(
688
+ self,
689
+ field_name: typing.Literal[
690
+ "direction",
691
+ b"direction",
692
+ "max_input_length",
693
+ b"max_input_length",
694
+ "max_output_length",
695
+ b"max_output_length",
696
+ "supported_languages",
697
+ b"supported_languages",
698
+ "supports_context",
699
+ b"supports_context",
700
+ "supports_conversation",
701
+ b"supports_conversation",
702
+ ],
703
+ ) -> None: ...
704
+
705
+ global___Text = Text
706
+
707
+ @typing.final
708
+ class StructuredResponse(google.protobuf.message.Message):
709
+ """Structured response capability configuration"""
710
+
711
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
712
+
713
+ SYSTEM_PROMPT_HINT_FIELD_NUMBER: builtins.int
714
+ SUPPORTED_FORMATS_FIELD_NUMBER: builtins.int
715
+ MAX_SCHEMA_DEPTH_FIELD_NUMBER: builtins.int
716
+ REQUIRES_TOOL_USE_FIELD_NUMBER: builtins.int
717
+ REQUIRES_JSON_MODE_FIELD_NUMBER: builtins.int
718
+ MAX_PROPERTIES_FIELD_NUMBER: builtins.int
719
+ SUPPORTED_TYPES_FIELD_NUMBER: builtins.int
720
+ SUPPORTS_STREAMING_FIELD_NUMBER: builtins.int
721
+ system_prompt_hint: builtins.str
722
+ """Optional guidance to include in the system prompt to elicit structured output
723
+ Example: "Always return valid JSON matching the provided schema"
724
+ """
725
+ max_schema_depth: builtins.int
726
+ """Maximum allowed nesting depth for JSON schema objects/arrays
727
+ Example: 5 for moderately complex schemas
728
+ """
729
+ requires_tool_use: builtins.bool
730
+ """Whether structured output requires tool/function use (vs. direct generation)
731
+ Example: true for providers that only return JSON via tool calls
732
+ """
733
+ requires_json_mode: builtins.bool
734
+ """Whether the model must be in a special "JSON mode" to honor schemas
735
+ Example: true for models that enforce strict JSON output when enabled
736
+ """
737
+ max_properties: builtins.int
738
+ """Maximum number of top-level properties in object schemas
739
+ Example: 50
740
+ """
741
+ supports_streaming: builtins.bool
742
+ """Can stream structured responses (not just generate them)
743
+ Example: true if partial JSON chunks are streamed
744
+ Can stream structured responses (not just generate them)
745
+ """
746
+ @property
747
+ def supported_formats(
748
+ self,
749
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
750
+ global___DataFormat.ValueType
751
+ ]:
752
+ """Supported output formats for structured responses
753
+ Examples: [DATA_FORMAT_JSON, DATA_FORMAT_YAML]
754
+ """
755
+
756
+ @property
757
+ def supported_types(
758
+ self,
759
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
760
+ global___JsonSchemaType.ValueType
761
+ ]:
762
+ """JSON Schema primitive/types supported by the model
763
+ Examples: [JSON_SCHEMA_TYPE_OBJECT, JSON_SCHEMA_TYPE_ARRAY, JSON_SCHEMA_TYPE_STRING]
764
+ """
765
+
766
+ def __init__(
767
+ self,
768
+ *,
769
+ system_prompt_hint: builtins.str = ...,
770
+ supported_formats: collections.abc.Iterable[global___DataFormat.ValueType]
771
+ | None = ...,
772
+ max_schema_depth: builtins.int = ...,
773
+ requires_tool_use: builtins.bool = ...,
774
+ requires_json_mode: builtins.bool = ...,
775
+ max_properties: builtins.int = ...,
776
+ supported_types: collections.abc.Iterable[global___JsonSchemaType.ValueType]
777
+ | None = ...,
778
+ supports_streaming: builtins.bool = ...,
779
+ ) -> None: ...
780
+ def ClearField(
781
+ self,
782
+ field_name: typing.Literal[
783
+ "max_properties",
784
+ b"max_properties",
785
+ "max_schema_depth",
786
+ b"max_schema_depth",
787
+ "requires_json_mode",
788
+ b"requires_json_mode",
789
+ "requires_tool_use",
790
+ b"requires_tool_use",
791
+ "supported_formats",
792
+ b"supported_formats",
793
+ "supported_types",
794
+ b"supported_types",
795
+ "supports_streaming",
796
+ b"supports_streaming",
797
+ "system_prompt_hint",
798
+ b"system_prompt_hint",
799
+ ],
800
+ ) -> None: ...
801
+
802
+ global___StructuredResponse = StructuredResponse
803
+
804
+ @typing.final
805
+ class Streaming(google.protobuf.message.Message):
806
+ """Streaming capability configuration"""
807
+
808
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
809
+
810
+ CHUNK_DELIMITER_FIELD_NUMBER: builtins.int
811
+ BUFFER_SIZE_FIELD_NUMBER: builtins.int
812
+ SUPPORTS_SSE_FIELD_NUMBER: builtins.int
813
+ SUPPORTS_USAGE_FIELD_NUMBER: builtins.int
814
+ AVG_CHUNK_SIZE_BYTES_FIELD_NUMBER: builtins.int
815
+ MAX_CHUNK_DELAY_MS_FIELD_NUMBER: builtins.int
816
+ chunk_delimiter: builtins.str
817
+ """Delimiter used between streamed chunks, if any
818
+ Example: "\\n\\n" for event-stream boundaries
819
+ """
820
+ buffer_size: builtins.int
821
+ """Preferred buffer size used by the provider when batching output
822
+ Example: 1024 (bytes)
823
+ """
824
+ supports_sse: builtins.bool
825
+ """Whether Server-Sent Events (SSE) is supported
826
+ Example: true if provider uses text/event-stream
827
+ """
828
+ supports_usage: builtins.bool
829
+ """Whether token usage info can be emitted incrementally during streaming
830
+ Example: true if usage deltas are included in stream
831
+ """
832
+ avg_chunk_size_bytes: builtins.int
833
+ """Typical size of individual streamed chunks in bytes
834
+ Example: 256
835
+ """
836
+ max_chunk_delay_ms: builtins.int
837
+ """Maximum delay between streamed chunks in milliseconds
838
+ Example: 200
839
+ """
840
+ def __init__(
841
+ self,
842
+ *,
843
+ chunk_delimiter: builtins.str = ...,
844
+ buffer_size: builtins.int = ...,
845
+ supports_sse: builtins.bool = ...,
846
+ supports_usage: builtins.bool = ...,
847
+ avg_chunk_size_bytes: builtins.int = ...,
848
+ max_chunk_delay_ms: builtins.int = ...,
849
+ ) -> None: ...
850
+ def ClearField(
851
+ self,
852
+ field_name: typing.Literal[
853
+ "avg_chunk_size_bytes",
854
+ b"avg_chunk_size_bytes",
855
+ "buffer_size",
856
+ b"buffer_size",
857
+ "chunk_delimiter",
858
+ b"chunk_delimiter",
859
+ "max_chunk_delay_ms",
860
+ b"max_chunk_delay_ms",
861
+ "supports_sse",
862
+ b"supports_sse",
863
+ "supports_usage",
864
+ b"supports_usage",
865
+ ],
866
+ ) -> None: ...
867
+
868
+ global___Streaming = Streaming
869
+
870
+ @typing.final
871
+ class FunctionCalling(google.protobuf.message.Message):
872
+ """Function calling capability configuration"""
873
+
874
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
875
+
876
+ MAX_FUNCTIONS_FIELD_NUMBER: builtins.int
877
+ MAX_PARALLEL_CALLS_FIELD_NUMBER: builtins.int
878
+ SUPPORTS_PARALLEL_FIELD_NUMBER: builtins.int
879
+ REQUIRES_TOOL_ROLE_FIELD_NUMBER: builtins.int
880
+ SUPPORTS_STREAMING_FIELD_NUMBER: builtins.int
881
+ SUPPORTED_PARAMETER_TYPES_FIELD_NUMBER: builtins.int
882
+ MAX_NESTING_DEPTH_FIELD_NUMBER: builtins.int
883
+ max_functions: builtins.int
884
+ """Maximum number of functions that can be registered per request
885
+ Example: 128
886
+ """
887
+ max_parallel_calls: builtins.int
888
+ """Maximum number of tool/function calls that may be executed in parallel
889
+ Example: 5
890
+ """
891
+ supports_parallel: builtins.bool
892
+ """Whether the model can plan and invoke multiple tools concurrently
893
+ Example: true for models with parallel tool-use orchestration
894
+ """
895
+ requires_tool_role: builtins.bool
896
+ """Whether requests must use a special "tool" role or channel for calls
897
+ Example: true if provider requires tool role messages
898
+ """
899
+ supports_streaming: builtins.bool
900
+ """Whether intermediate tool call tokens/results are streamable
901
+ Example: true if tool call arguments/results are streamed as they are produced
902
+ """
903
+ max_nesting_depth: builtins.int
904
+ """Maximum nesting depth allowed for function/tool call plans
905
+ Example: 3 for limited recursion
906
+ """
907
+ @property
908
+ def supported_parameter_types(
909
+ self,
910
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
911
+ global___JsonSchemaType.ValueType
912
+ ]:
913
+ """JSON schema types supported for function parameters
914
+ Examples: [JSON_SCHEMA_TYPE_OBJECT, JSON_SCHEMA_TYPE_ARRAY, JSON_SCHEMA_TYPE_STRING]
915
+ """
916
+
917
+ def __init__(
918
+ self,
919
+ *,
920
+ max_functions: builtins.int = ...,
921
+ max_parallel_calls: builtins.int = ...,
922
+ supports_parallel: builtins.bool = ...,
923
+ requires_tool_role: builtins.bool = ...,
924
+ supports_streaming: builtins.bool = ...,
925
+ supported_parameter_types: collections.abc.Iterable[
926
+ global___JsonSchemaType.ValueType
927
+ ]
928
+ | None = ...,
929
+ max_nesting_depth: builtins.int = ...,
930
+ ) -> None: ...
931
+ def ClearField(
932
+ self,
933
+ field_name: typing.Literal[
934
+ "max_functions",
935
+ b"max_functions",
936
+ "max_nesting_depth",
937
+ b"max_nesting_depth",
938
+ "max_parallel_calls",
939
+ b"max_parallel_calls",
940
+ "requires_tool_role",
941
+ b"requires_tool_role",
942
+ "supported_parameter_types",
943
+ b"supported_parameter_types",
944
+ "supports_parallel",
945
+ b"supports_parallel",
946
+ "supports_streaming",
947
+ b"supports_streaming",
948
+ ],
949
+ ) -> None: ...
950
+
951
+ global___FunctionCalling = FunctionCalling
952
+
953
+ @typing.final
954
+ class Vision(google.protobuf.message.Message):
955
+ """Vision capability configuration"""
956
+
957
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
958
+
959
+ DIRECTION_FIELD_NUMBER: builtins.int
960
+ SUPPORTED_FORMATS_FIELD_NUMBER: builtins.int
961
+ MAX_IMAGE_SIZE_BYTES_FIELD_NUMBER: builtins.int
962
+ MAX_IMAGES_PER_REQUEST_FIELD_NUMBER: builtins.int
963
+ MAX_RESOLUTION_WIDTH_FIELD_NUMBER: builtins.int
964
+ MAX_RESOLUTION_HEIGHT_FIELD_NUMBER: builtins.int
965
+ SUPPORTS_OCR_FIELD_NUMBER: builtins.int
966
+ SUPPORTS_OBJECT_DETECTION_FIELD_NUMBER: builtins.int
967
+ SUPPORTS_VIDEO_FRAMES_FIELD_NUMBER: builtins.int
968
+ direction: global___ModalityDirection.ValueType
969
+ """Direction of vision support
970
+ Example: MODALITY_DIRECTION_INPUT_ONLY for analysis-only models
971
+ Example: MODALITY_DIRECTION_OUTPUT_ONLY for image generation models (DALL-E)
972
+ Example: MODALITY_DIRECTION_INPUT_OUTPUT for models that can both analyze and generate
973
+ """
974
+ max_image_size_bytes: builtins.int
975
+ """Maximum size per image in bytes
976
+ Example: 20971520 (20MB) for GPT-4-vision
977
+ """
978
+ max_images_per_request: builtins.int
979
+ """Maximum images per API request (for input)
980
+ Example: 10 for GPT-4-vision, 1 for some models
981
+ """
982
+ max_resolution_width: builtins.int
983
+ """Maximum image width in pixels
984
+ Example: 4096 for high-resolution support
985
+ """
986
+ max_resolution_height: builtins.int
987
+ """Maximum image height in pixels
988
+ Example: 4096 for high-resolution support
989
+ """
990
+ supports_ocr: builtins.bool
991
+ """Supports optical character recognition
992
+ Example: true if model can extract text from images
993
+ """
994
+ supports_object_detection: builtins.bool
995
+ """Supports object detection/localization
996
+ Example: true if model can identify and locate objects
997
+ """
998
+ supports_video_frames: builtins.bool
999
+ """Can process video frames as images
1000
+ Example: true if model accepts video frame extraction
1001
+ """
1002
+ @property
1003
+ def supported_formats(
1004
+ self,
1005
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
1006
+ global___ImageFormat.ValueType
1007
+ ]:
1008
+ """Supported image file formats
1009
+ Examples: [IMAGE_FORMAT_JPEG, IMAGE_FORMAT_PNG, IMAGE_FORMAT_WEBP]
1010
+ """
1011
+
1012
+ def __init__(
1013
+ self,
1014
+ *,
1015
+ direction: global___ModalityDirection.ValueType = ...,
1016
+ supported_formats: collections.abc.Iterable[global___ImageFormat.ValueType]
1017
+ | None = ...,
1018
+ max_image_size_bytes: builtins.int = ...,
1019
+ max_images_per_request: builtins.int = ...,
1020
+ max_resolution_width: builtins.int = ...,
1021
+ max_resolution_height: builtins.int = ...,
1022
+ supports_ocr: builtins.bool = ...,
1023
+ supports_object_detection: builtins.bool = ...,
1024
+ supports_video_frames: builtins.bool = ...,
1025
+ ) -> None: ...
1026
+ def ClearField(
1027
+ self,
1028
+ field_name: typing.Literal[
1029
+ "direction",
1030
+ b"direction",
1031
+ "max_image_size_bytes",
1032
+ b"max_image_size_bytes",
1033
+ "max_images_per_request",
1034
+ b"max_images_per_request",
1035
+ "max_resolution_height",
1036
+ b"max_resolution_height",
1037
+ "max_resolution_width",
1038
+ b"max_resolution_width",
1039
+ "supported_formats",
1040
+ b"supported_formats",
1041
+ "supports_object_detection",
1042
+ b"supports_object_detection",
1043
+ "supports_ocr",
1044
+ b"supports_ocr",
1045
+ "supports_video_frames",
1046
+ b"supports_video_frames",
1047
+ ],
1048
+ ) -> None: ...
1049
+
1050
+ global___Vision = Vision
1051
+
1052
+ @typing.final
1053
+ class ToolUse(google.protobuf.message.Message):
1054
+ """Tool use capability configuration"""
1055
+
1056
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
1057
+
1058
+ MAX_TOOLS_FIELD_NUMBER: builtins.int
1059
+ SUPPORTS_SEQUENTIAL_FIELD_NUMBER: builtins.int
1060
+ SUPPORTS_PARALLEL_FIELD_NUMBER: builtins.int
1061
+ MAX_TOOL_ROUNDS_FIELD_NUMBER: builtins.int
1062
+ SUPPORTED_TOOL_TYPES_FIELD_NUMBER: builtins.int
1063
+ max_tools: builtins.int
1064
+ """Maximum number of tools that can be registered or considered
1065
+ Example: 64
1066
+ """
1067
+ supports_sequential: builtins.bool
1068
+ """Whether the model can chain tool invocations one after another
1069
+ Example: true for stepwise tool planning
1070
+ """
1071
+ supports_parallel: builtins.bool
1072
+ """Whether the model can invoke multiple tools concurrently
1073
+ Example: true for parallelized tool execution
1074
+ """
1075
+ max_tool_rounds: builtins.int
1076
+ """Maximum number of tool-use rounds allowed in a single request
1077
+ Example: 10
1078
+ """
1079
+ @property
1080
+ def supported_tool_types(
1081
+ self,
1082
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
1083
+ global___ToolType.ValueType
1084
+ ]:
1085
+ """Types of tools supported by the provider/model
1086
+ Examples: [TOOL_TYPE_FUNCTION, TOOL_TYPE_RETRIEVAL]
1087
+ """
1088
+
1089
+ def __init__(
1090
+ self,
1091
+ *,
1092
+ max_tools: builtins.int = ...,
1093
+ supports_sequential: builtins.bool = ...,
1094
+ supports_parallel: builtins.bool = ...,
1095
+ max_tool_rounds: builtins.int = ...,
1096
+ supported_tool_types: collections.abc.Iterable[global___ToolType.ValueType]
1097
+ | None = ...,
1098
+ ) -> None: ...
1099
+ def ClearField(
1100
+ self,
1101
+ field_name: typing.Literal[
1102
+ "max_tool_rounds",
1103
+ b"max_tool_rounds",
1104
+ "max_tools",
1105
+ b"max_tools",
1106
+ "supported_tool_types",
1107
+ b"supported_tool_types",
1108
+ "supports_parallel",
1109
+ b"supports_parallel",
1110
+ "supports_sequential",
1111
+ b"supports_sequential",
1112
+ ],
1113
+ ) -> None: ...
1114
+
1115
+ global___ToolUse = ToolUse
1116
+
1117
+ @typing.final
1118
+ class SystemPrompt(google.protobuf.message.Message):
1119
+ """System prompt capability configuration"""
1120
+
1121
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
1122
+
1123
+ MAX_LENGTH_FIELD_NUMBER: builtins.int
1124
+ SUPPORTS_MULTIPLE_FIELD_NUMBER: builtins.int
1125
+ SUPPORTS_CACHING_FIELD_NUMBER: builtins.int
1126
+ FORMAT_FIELD_NUMBER: builtins.int
1127
+ max_length: builtins.int
1128
+ """Maximum allowed length of system prompt content (characters or tokens)
1129
+ Example: 8000 (characters)
1130
+ """
1131
+ supports_multiple: builtins.bool
1132
+ """Whether multiple system prompts (or segments) can be supplied
1133
+ Example: true if the provider supports multiple system messages
1134
+ """
1135
+ supports_caching: builtins.bool
1136
+ """Whether system prompts can be cached for re-use
1137
+ Example: true if provider supports prompt caching hints
1138
+ """
1139
+ format: global___DataFormat.ValueType
1140
+ """Preferred/required format for system prompts (plain, markdown, etc.)
1141
+ Example: DATA_FORMAT_MARKDOWN
1142
+ """
1143
+ def __init__(
1144
+ self,
1145
+ *,
1146
+ max_length: builtins.int = ...,
1147
+ supports_multiple: builtins.bool = ...,
1148
+ supports_caching: builtins.bool = ...,
1149
+ format: global___DataFormat.ValueType = ...,
1150
+ ) -> None: ...
1151
+ def ClearField(
1152
+ self,
1153
+ field_name: typing.Literal[
1154
+ "format",
1155
+ b"format",
1156
+ "max_length",
1157
+ b"max_length",
1158
+ "supports_caching",
1159
+ b"supports_caching",
1160
+ "supports_multiple",
1161
+ b"supports_multiple",
1162
+ ],
1163
+ ) -> None: ...
1164
+
1165
+ global___SystemPrompt = SystemPrompt
1166
+
1167
+ @typing.final
1168
+ class Caching(google.protobuf.message.Message):
1169
+ """Caching capability configuration"""
1170
+
1171
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
1172
+
1173
+ CACHE_KEY_STRATEGY_FIELD_NUMBER: builtins.int
1174
+ MAX_CACHE_SIZE_BYTES_FIELD_NUMBER: builtins.int
1175
+ CACHE_TTL_SECONDS_FIELD_NUMBER: builtins.int
1176
+ SUPPORTS_CONTEXT_CACHING_FIELD_NUMBER: builtins.int
1177
+ SUPPORTS_PROMPT_CACHING_FIELD_NUMBER: builtins.int
1178
+ MIN_CACHEABLE_TOKENS_FIELD_NUMBER: builtins.int
1179
+ cache_key_strategy: global___CacheStrategy.ValueType
1180
+ """Strategy used to compute cache keys for requests/responses
1181
+ Examples: CACHE_STRATEGY_HASH, CACHE_STRATEGY_SEMANTIC
1182
+ """
1183
+ max_cache_size_bytes: builtins.int
1184
+ """Maximum total cache capacity (bytes)
1185
+ Example: 1073741824 for 1GB
1186
+ """
1187
+ cache_ttl_seconds: builtins.int
1188
+ """Default time-to-live for cache entries (seconds)
1189
+ Example: 3600 for 1 hour
1190
+ """
1191
+ supports_context_caching: builtins.bool
1192
+ """Whether the model/provider supports caching of conversation context
1193
+ Example: true for context window reuse
1194
+ """
1195
+ supports_prompt_caching: builtins.bool
1196
+ """Whether the model/provider supports caching of prompts/prefixes
1197
+ Example: true for prefix caching/token reuse
1198
+ """
1199
+ min_cacheable_tokens: builtins.int
1200
+ """Minimum number of tokens required for an entry to be cacheable
1201
+ Example: 128
1202
+ """
1203
+ def __init__(
1204
+ self,
1205
+ *,
1206
+ cache_key_strategy: global___CacheStrategy.ValueType = ...,
1207
+ max_cache_size_bytes: builtins.int = ...,
1208
+ cache_ttl_seconds: builtins.int = ...,
1209
+ supports_context_caching: builtins.bool = ...,
1210
+ supports_prompt_caching: builtins.bool = ...,
1211
+ min_cacheable_tokens: builtins.int = ...,
1212
+ ) -> None: ...
1213
+ def ClearField(
1214
+ self,
1215
+ field_name: typing.Literal[
1216
+ "cache_key_strategy",
1217
+ b"cache_key_strategy",
1218
+ "cache_ttl_seconds",
1219
+ b"cache_ttl_seconds",
1220
+ "max_cache_size_bytes",
1221
+ b"max_cache_size_bytes",
1222
+ "min_cacheable_tokens",
1223
+ b"min_cacheable_tokens",
1224
+ "supports_context_caching",
1225
+ b"supports_context_caching",
1226
+ "supports_prompt_caching",
1227
+ b"supports_prompt_caching",
1228
+ ],
1229
+ ) -> None: ...
1230
+
1231
+ global___Caching = Caching
1232
+
1233
+ @typing.final
1234
+ class Reasoning(google.protobuf.message.Message):
1235
+ """Reasoning capability configuration"""
1236
+
1237
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
1238
+
1239
+ SUPPORTS_CHAIN_OF_THOUGHT_FIELD_NUMBER: builtins.int
1240
+ SUPPORTS_STEP_TRACKING_FIELD_NUMBER: builtins.int
1241
+ MAX_REASONING_STEPS_FIELD_NUMBER: builtins.int
1242
+ SUPPORTS_SELF_CORRECTION_FIELD_NUMBER: builtins.int
1243
+ REASONING_STRATEGIES_FIELD_NUMBER: builtins.int
1244
+ supports_chain_of_thought: builtins.bool
1245
+ """Whether the model can produce hidden chain-of-thought/assistant reasoning
1246
+ Example: true for models supporting private CoT traces
1247
+ """
1248
+ supports_step_tracking: builtins.bool
1249
+ """Whether the model can expose discrete step indices or markers
1250
+ Example: true for models that number reasoning steps
1251
+ """
1252
+ max_reasoning_steps: builtins.int
1253
+ """Maximum number of internal reasoning steps supported
1254
+ Example: 64
1255
+ """
1256
+ supports_self_correction: builtins.bool
1257
+ """Whether the model can revise earlier steps based on later insights
1258
+ Example: true for self-correction/self-reflection
1259
+ """
1260
+ @property
1261
+ def reasoning_strategies(
1262
+ self,
1263
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
1264
+ global___ReasoningStrategy.ValueType
1265
+ ]:
1266
+ """Supported high-level reasoning strategies
1267
+ Examples: [REASONING_STRATEGY_STEP_BY_STEP, REASONING_STRATEGY_SELF_CONSISTENCY]
1268
+ """
1269
+
1270
+ def __init__(
1271
+ self,
1272
+ *,
1273
+ supports_chain_of_thought: builtins.bool = ...,
1274
+ supports_step_tracking: builtins.bool = ...,
1275
+ max_reasoning_steps: builtins.int = ...,
1276
+ supports_self_correction: builtins.bool = ...,
1277
+ reasoning_strategies: collections.abc.Iterable[
1278
+ global___ReasoningStrategy.ValueType
1279
+ ]
1280
+ | None = ...,
1281
+ ) -> None: ...
1282
+ def ClearField(
1283
+ self,
1284
+ field_name: typing.Literal[
1285
+ "max_reasoning_steps",
1286
+ b"max_reasoning_steps",
1287
+ "reasoning_strategies",
1288
+ b"reasoning_strategies",
1289
+ "supports_chain_of_thought",
1290
+ b"supports_chain_of_thought",
1291
+ "supports_self_correction",
1292
+ b"supports_self_correction",
1293
+ "supports_step_tracking",
1294
+ b"supports_step_tracking",
1295
+ ],
1296
+ ) -> None: ...
1297
+
1298
+ global___Reasoning = Reasoning
1299
+
1300
+ @typing.final
1301
+ class Audio(google.protobuf.message.Message):
1302
+ """Audio capability configuration"""
1303
+
1304
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
1305
+
1306
+ DIRECTION_FIELD_NUMBER: builtins.int
1307
+ SUPPORTED_FORMATS_FIELD_NUMBER: builtins.int
1308
+ MAX_DURATION_SECONDS_FIELD_NUMBER: builtins.int
1309
+ MAX_FILE_SIZE_BYTES_FIELD_NUMBER: builtins.int
1310
+ SUPPORTED_LANGUAGES_FIELD_NUMBER: builtins.int
1311
+ SUPPORTS_STREAMING_FIELD_NUMBER: builtins.int
1312
+ SUPPORTS_VOICE_SELECTION_FIELD_NUMBER: builtins.int
1313
+ direction: global___ModalityDirection.ValueType
1314
+ """Direction of audio support
1315
+ Example: MODALITY_DIRECTION_INPUT_ONLY for speech-to-text only
1316
+ Example: MODALITY_DIRECTION_OUTPUT_ONLY for text-to-speech only
1317
+ Example: MODALITY_DIRECTION_INPUT_OUTPUT for models supporting both STT and TTS
1318
+ """
1319
+ max_duration_seconds: builtins.int
1320
+ """Maximum audio duration in seconds
1321
+ Example: 600 for 10-minute limit
1322
+ """
1323
+ max_file_size_bytes: builtins.int
1324
+ """Maximum audio file size in bytes
1325
+ Example: 26214400 (25MB) limit
1326
+ """
1327
+ supports_streaming: builtins.bool
1328
+ """Supports real-time streaming (for live audio)
1329
+ Example: true for real-time voice models
1330
+ """
1331
+ supports_voice_selection: builtins.bool
1332
+ """Supports voice cloning or voice selection
1333
+ Example: true if TTS can use different voices
1334
+ """
1335
+ @property
1336
+ def supported_formats(
1337
+ self,
1338
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
1339
+ global___AudioFormat.ValueType
1340
+ ]:
1341
+ """Supported audio file formats
1342
+ Examples: [AUDIO_FORMAT_MP3, AUDIO_FORMAT_WAV, AUDIO_FORMAT_M4A]
1343
+ """
1344
+
1345
+ @property
1346
+ def supported_languages(
1347
+ self,
1348
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
1349
+ """Supported languages for audio processing
1350
+ Examples: ["en", "es", "fr", "de", "zh", "ja"]
1351
+ """
1352
+
1353
+ def __init__(
1354
+ self,
1355
+ *,
1356
+ direction: global___ModalityDirection.ValueType = ...,
1357
+ supported_formats: collections.abc.Iterable[global___AudioFormat.ValueType]
1358
+ | None = ...,
1359
+ max_duration_seconds: builtins.int = ...,
1360
+ max_file_size_bytes: builtins.int = ...,
1361
+ supported_languages: collections.abc.Iterable[builtins.str] | None = ...,
1362
+ supports_streaming: builtins.bool = ...,
1363
+ supports_voice_selection: builtins.bool = ...,
1364
+ ) -> None: ...
1365
+ def ClearField(
1366
+ self,
1367
+ field_name: typing.Literal[
1368
+ "direction",
1369
+ b"direction",
1370
+ "max_duration_seconds",
1371
+ b"max_duration_seconds",
1372
+ "max_file_size_bytes",
1373
+ b"max_file_size_bytes",
1374
+ "supported_formats",
1375
+ b"supported_formats",
1376
+ "supported_languages",
1377
+ b"supported_languages",
1378
+ "supports_streaming",
1379
+ b"supports_streaming",
1380
+ "supports_voice_selection",
1381
+ b"supports_voice_selection",
1382
+ ],
1383
+ ) -> None: ...
1384
+
1385
+ global___Audio = Audio
1386
+
1387
+ @typing.final
1388
+ class Video(google.protobuf.message.Message):
1389
+ """Video capability configuration"""
1390
+
1391
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
1392
+
1393
+ DIRECTION_FIELD_NUMBER: builtins.int
1394
+ SUPPORTED_FORMATS_FIELD_NUMBER: builtins.int
1395
+ MAX_DURATION_SECONDS_FIELD_NUMBER: builtins.int
1396
+ MAX_FILE_SIZE_BYTES_FIELD_NUMBER: builtins.int
1397
+ MAX_FPS_FIELD_NUMBER: builtins.int
1398
+ SUPPORTS_FRAME_EXTRACTION_FIELD_NUMBER: builtins.int
1399
+ MAX_FRAMES_FIELD_NUMBER: builtins.int
1400
+ direction: global___ModalityDirection.ValueType
1401
+ """Direction of video support
1402
+ Example: MODALITY_DIRECTION_INPUT_ONLY for video analysis/understanding
1403
+ Example: MODALITY_DIRECTION_OUTPUT_ONLY for video generation
1404
+ Example: MODALITY_DIRECTION_INPUT_OUTPUT for models that can both analyze and generate
1405
+ """
1406
+ max_duration_seconds: builtins.int
1407
+ """Maximum video duration in seconds
1408
+ Example: 120 for 2-minute limit
1409
+ """
1410
+ max_file_size_bytes: builtins.int
1411
+ """Maximum video file size in bytes
1412
+ Example: 1073741824 (1GB) limit
1413
+ """
1414
+ max_fps: builtins.int
1415
+ """Maximum frames per second supported
1416
+ Example: 30 for standard frame rate
1417
+ """
1418
+ supports_frame_extraction: builtins.bool
1419
+ """Supports extracting and analyzing individual frames
1420
+ Example: true if model can process video as a sequence of images
1421
+ """
1422
+ max_frames: builtins.int
1423
+ """Maximum number of frames that can be analyzed
1424
+ Example: 100 for frame-by-frame analysis limit
1425
+ """
1426
+ @property
1427
+ def supported_formats(
1428
+ self,
1429
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
1430
+ global___VideoFormat.ValueType
1431
+ ]:
1432
+ """Supported video file formats
1433
+ Examples: [VIDEO_FORMAT_MP4, VIDEO_FORMAT_MOV, VIDEO_FORMAT_AVI]
1434
+ """
1435
+
1436
+ def __init__(
1437
+ self,
1438
+ *,
1439
+ direction: global___ModalityDirection.ValueType = ...,
1440
+ supported_formats: collections.abc.Iterable[global___VideoFormat.ValueType]
1441
+ | None = ...,
1442
+ max_duration_seconds: builtins.int = ...,
1443
+ max_file_size_bytes: builtins.int = ...,
1444
+ max_fps: builtins.int = ...,
1445
+ supports_frame_extraction: builtins.bool = ...,
1446
+ max_frames: builtins.int = ...,
1447
+ ) -> None: ...
1448
+ def ClearField(
1449
+ self,
1450
+ field_name: typing.Literal[
1451
+ "direction",
1452
+ b"direction",
1453
+ "max_duration_seconds",
1454
+ b"max_duration_seconds",
1455
+ "max_file_size_bytes",
1456
+ b"max_file_size_bytes",
1457
+ "max_fps",
1458
+ b"max_fps",
1459
+ "max_frames",
1460
+ b"max_frames",
1461
+ "supported_formats",
1462
+ b"supported_formats",
1463
+ "supports_frame_extraction",
1464
+ b"supports_frame_extraction",
1465
+ ],
1466
+ ) -> None: ...
1467
+
1468
+ global___Video = Video
1469
+
1470
+ @typing.final
1471
+ class Embeddings(google.protobuf.message.Message):
1472
+ """Embeddings capability configuration"""
1473
+
1474
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
1475
+
1476
+ EMBEDDING_DIMENSIONS_FIELD_NUMBER: builtins.int
1477
+ MAX_INPUT_TOKENS_FIELD_NUMBER: builtins.int
1478
+ SUPPORTS_BATCH_FIELD_NUMBER: builtins.int
1479
+ MAX_BATCH_SIZE_FIELD_NUMBER: builtins.int
1480
+ DISTANCE_METRICS_FIELD_NUMBER: builtins.int
1481
+ embedding_dimensions: builtins.int
1482
+ """Dimensionality of the vector space produced by the model
1483
+ Example: 1536
1484
+ """
1485
+ max_input_tokens: builtins.int
1486
+ """Maximum number of tokens accepted per input item
1487
+ Example: 8192
1488
+ """
1489
+ supports_batch: builtins.bool
1490
+ """Whether the API supports batching multiple inputs in one call
1491
+ Example: true
1492
+ """
1493
+ max_batch_size: builtins.int
1494
+ """Maximum number of items allowed per batch
1495
+ Example: 128
1496
+ """
1497
+ @property
1498
+ def distance_metrics(
1499
+ self,
1500
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
1501
+ global___DistanceMetric.ValueType
1502
+ ]:
1503
+ """Supported similarity/distance metrics for index/search
1504
+ Examples: [DISTANCE_METRIC_COSINE, DISTANCE_METRIC_DOT_PRODUCT]
1505
+ """
1506
+
1507
+ def __init__(
1508
+ self,
1509
+ *,
1510
+ embedding_dimensions: builtins.int = ...,
1511
+ max_input_tokens: builtins.int = ...,
1512
+ supports_batch: builtins.bool = ...,
1513
+ max_batch_size: builtins.int = ...,
1514
+ distance_metrics: collections.abc.Iterable[global___DistanceMetric.ValueType]
1515
+ | None = ...,
1516
+ ) -> None: ...
1517
+ def ClearField(
1518
+ self,
1519
+ field_name: typing.Literal[
1520
+ "distance_metrics",
1521
+ b"distance_metrics",
1522
+ "embedding_dimensions",
1523
+ b"embedding_dimensions",
1524
+ "max_batch_size",
1525
+ b"max_batch_size",
1526
+ "max_input_tokens",
1527
+ b"max_input_tokens",
1528
+ "supports_batch",
1529
+ b"supports_batch",
1530
+ ],
1531
+ ) -> None: ...
1532
+
1533
+ global___Embeddings = Embeddings
1534
+
1535
+ @typing.final
1536
+ class FineTuning(google.protobuf.message.Message):
1537
+ """Fine-tuning capability configuration"""
1538
+
1539
+ DESCRIPTOR: google.protobuf.descriptor.Descriptor
1540
+
1541
+ MIN_EXAMPLES_FIELD_NUMBER: builtins.int
1542
+ MAX_EXAMPLES_FIELD_NUMBER: builtins.int
1543
+ SUPPORTED_FORMATS_FIELD_NUMBER: builtins.int
1544
+ MAX_FILE_SIZE_MB_FIELD_NUMBER: builtins.int
1545
+ SUPPORTS_VALIDATION_SET_FIELD_NUMBER: builtins.int
1546
+ HYPERPARAMETERS_FIELD_NUMBER: builtins.int
1547
+ min_examples: builtins.int
1548
+ """Minimum number of training examples required to start fine-tuning
1549
+ Example: 50
1550
+ """
1551
+ max_examples: builtins.int
1552
+ """Maximum number of training examples supported in a single job
1553
+ Example: 500000
1554
+ """
1555
+ max_file_size_mb: builtins.int
1556
+ """Maximum size of individual training files (MB)
1557
+ Example: 512
1558
+ """
1559
+ supports_validation_set: builtins.bool
1560
+ """Whether a separate validation set can be provided
1561
+ Example: true
1562
+ """
1563
+ @property
1564
+ def supported_formats(
1565
+ self,
1566
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
1567
+ global___DataFormat.ValueType
1568
+ ]:
1569
+ """Supported dataset/file formats for fine-tuning
1570
+ Examples: [DATA_FORMAT_JSONL, DATA_FORMAT_CSV]
1571
+ """
1572
+
1573
+ @property
1574
+ def hyperparameters(
1575
+ self,
1576
+ ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
1577
+ global___Hyperparameter.ValueType
1578
+ ]:
1579
+ """Hyperparameters that can be configured for training
1580
+ Examples: [HYPERPARAMETER_LEARNING_RATE, HYPERPARAMETER_EPOCHS]
1581
+ """
1582
+
1583
+ def __init__(
1584
+ self,
1585
+ *,
1586
+ min_examples: builtins.int = ...,
1587
+ max_examples: builtins.int = ...,
1588
+ supported_formats: collections.abc.Iterable[global___DataFormat.ValueType]
1589
+ | None = ...,
1590
+ max_file_size_mb: builtins.int = ...,
1591
+ supports_validation_set: builtins.bool = ...,
1592
+ hyperparameters: collections.abc.Iterable[global___Hyperparameter.ValueType]
1593
+ | None = ...,
1594
+ ) -> None: ...
1595
+ def ClearField(
1596
+ self,
1597
+ field_name: typing.Literal[
1598
+ "hyperparameters",
1599
+ b"hyperparameters",
1600
+ "max_examples",
1601
+ b"max_examples",
1602
+ "max_file_size_mb",
1603
+ b"max_file_size_mb",
1604
+ "min_examples",
1605
+ b"min_examples",
1606
+ "supported_formats",
1607
+ b"supported_formats",
1608
+ "supports_validation_set",
1609
+ b"supports_validation_set",
1610
+ ],
1611
+ ) -> None: ...
1612
+
1613
+ global___FineTuning = FineTuning