weave-typescript 0.5.1 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/dist/weaveapi/llmx/v1/architecture.pb.d.ts +377 -0
  2. package/dist/weaveapi/llmx/v1/architecture.pb.js +2756 -0
  3. package/dist/weaveapi/llmx/v1/capabilities.pb.d.ts +491 -0
  4. package/dist/weaveapi/llmx/v1/capabilities.pb.js +3159 -0
  5. package/dist/weaveapi/{modex → llmx}/v1/model.pb.d.ts +86 -42
  6. package/dist/weaveapi/{modex → llmx}/v1/model.pb.js +119 -442
  7. package/dist/weaveapi/llmx/v1/pricing.pb.d.ts +142 -0
  8. package/dist/weaveapi/llmx/v1/pricing.pb.js +825 -0
  9. package/dist/weaveapi/{modex → llmx}/v1/provider.pb.d.ts +1 -3
  10. package/dist/weaveapi/{modex → llmx}/v1/provider.pb.js +3 -57
  11. package/dist/weaveapi/{modex → llmx}/v1/service.pb.d.ts +20 -20
  12. package/dist/weaveapi/{modex → llmx}/v1/service.pb.js +17 -17
  13. package/dist/weavesql/llmxdb/capabilities_sql.d.ts +151 -0
  14. package/dist/weavesql/llmxdb/capabilities_sql.js +241 -0
  15. package/dist/weavesql/llmxdb/changes_sql.d.ts +81 -0
  16. package/dist/weavesql/llmxdb/changes_sql.js +118 -0
  17. package/dist/weavesql/llmxdb/models_sql.d.ts +198 -0
  18. package/dist/weavesql/llmxdb/models_sql.js +244 -0
  19. package/dist/weavesql/llmxdb/providers_sql.d.ts +122 -0
  20. package/dist/weavesql/llmxdb/providers_sql.js +179 -0
  21. package/dist/weavesql/llmxdb/scraper_runs_sql.d.ts +83 -0
  22. package/dist/weavesql/llmxdb/scraper_runs_sql.js +137 -0
  23. package/dist/weavesql/llmxdb/search_sql.d.ts +272 -0
  24. package/dist/weavesql/llmxdb/search_sql.js +348 -0
  25. package/dist/weavesql/weavedb/dataset_sql.d.ts +17 -0
  26. package/dist/weavesql/weavedb/dataset_sql.js +21 -0
  27. package/dist/weavesql/weavedb/relationships_sql.d.ts +16 -0
  28. package/dist/weavesql/weavedb/relationships_sql.js +32 -0
  29. package/dist/weavesql/weavedb/storage_sql.d.ts +33 -0
  30. package/dist/weavesql/weavedb/storage_sql.js +54 -0
  31. package/dist/weavesql/weavedb/synthesizer_sql.d.ts +28 -0
  32. package/dist/weavesql/weavedb/synthesizer_sql.js +42 -0
  33. package/package.json +4 -1
@@ -0,0 +1,491 @@
1
+ import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire";
2
+ export declare const protobufPackage = "weaveapi.llmx.v1";
3
+ /** Core capability types that models can support */
4
+ export declare enum CapabilityType {
5
+ CAPABILITY_TYPE_UNSPECIFIED = 0,
6
+ /** CAPABILITY_TYPE_TEXT - Basic text input/output capability */
7
+ CAPABILITY_TYPE_TEXT = 1,
8
+ /** CAPABILITY_TYPE_STRUCTURED_RESPONSE - Structured output (JSON, XML, etc.) */
9
+ CAPABILITY_TYPE_STRUCTURED_RESPONSE = 2,
10
+ CAPABILITY_TYPE_STREAMING = 3,
11
+ CAPABILITY_TYPE_FUNCTION_CALLING = 4,
12
+ CAPABILITY_TYPE_VISION = 5,
13
+ CAPABILITY_TYPE_TOOL_USE = 6,
14
+ CAPABILITY_TYPE_SYSTEM_PROMPT = 7,
15
+ CAPABILITY_TYPE_CACHING = 8,
16
+ CAPABILITY_TYPE_REASONING = 9,
17
+ CAPABILITY_TYPE_AUDIO = 10,
18
+ CAPABILITY_TYPE_VIDEO = 11,
19
+ CAPABILITY_TYPE_EMBEDDINGS = 12,
20
+ CAPABILITY_TYPE_FINE_TUNING = 13,
21
+ UNRECOGNIZED = -1
22
+ }
23
+ export declare function capabilityTypeFromJSON(object: any): CapabilityType;
24
+ export declare function capabilityTypeToJSON(object: CapabilityType): string;
25
+ /** Data format types for structured responses, fine-tuning, etc. */
26
+ export declare enum DataFormat {
27
+ DATA_FORMAT_UNSPECIFIED = 0,
28
+ DATA_FORMAT_JSON = 1,
29
+ DATA_FORMAT_YAML = 2,
30
+ DATA_FORMAT_XML = 3,
31
+ DATA_FORMAT_JSONL = 4,
32
+ DATA_FORMAT_CSV = 5,
33
+ DATA_FORMAT_PARQUET = 6,
34
+ DATA_FORMAT_PLAIN = 7,
35
+ DATA_FORMAT_MARKDOWN = 8,
36
+ DATA_FORMAT_STRUCTURED = 9,
37
+ UNRECOGNIZED = -1
38
+ }
39
+ export declare function dataFormatFromJSON(object: any): DataFormat;
40
+ export declare function dataFormatToJSON(object: DataFormat): string;
41
+ /** JSON schema types supported in structured responses */
42
+ export declare enum JsonSchemaType {
43
+ JSON_SCHEMA_TYPE_UNSPECIFIED = 0,
44
+ JSON_SCHEMA_TYPE_OBJECT = 1,
45
+ JSON_SCHEMA_TYPE_ARRAY = 2,
46
+ JSON_SCHEMA_TYPE_STRING = 3,
47
+ JSON_SCHEMA_TYPE_NUMBER = 4,
48
+ JSON_SCHEMA_TYPE_BOOLEAN = 5,
49
+ JSON_SCHEMA_TYPE_NULL = 6,
50
+ JSON_SCHEMA_TYPE_INTEGER = 7,
51
+ UNRECOGNIZED = -1
52
+ }
53
+ export declare function jsonSchemaTypeFromJSON(object: any): JsonSchemaType;
54
+ export declare function jsonSchemaTypeToJSON(object: JsonSchemaType): string;
55
+ /** Image formats */
56
+ export declare enum ImageFormat {
57
+ IMAGE_FORMAT_UNSPECIFIED = 0,
58
+ IMAGE_FORMAT_JPEG = 1,
59
+ IMAGE_FORMAT_PNG = 2,
60
+ IMAGE_FORMAT_GIF = 3,
61
+ IMAGE_FORMAT_WEBP = 4,
62
+ IMAGE_FORMAT_BMP = 5,
63
+ IMAGE_FORMAT_TIFF = 6,
64
+ IMAGE_FORMAT_SVG = 7,
65
+ UNRECOGNIZED = -1
66
+ }
67
+ export declare function imageFormatFromJSON(object: any): ImageFormat;
68
+ export declare function imageFormatToJSON(object: ImageFormat): string;
69
+ /** Audio formats */
70
+ export declare enum AudioFormat {
71
+ AUDIO_FORMAT_UNSPECIFIED = 0,
72
+ AUDIO_FORMAT_MP3 = 1,
73
+ AUDIO_FORMAT_WAV = 2,
74
+ AUDIO_FORMAT_OGG = 3,
75
+ AUDIO_FORMAT_M4A = 4,
76
+ AUDIO_FORMAT_FLAC = 5,
77
+ AUDIO_FORMAT_AAC = 6,
78
+ AUDIO_FORMAT_WMA = 7,
79
+ AUDIO_FORMAT_OPUS = 8,
80
+ UNRECOGNIZED = -1
81
+ }
82
+ export declare function audioFormatFromJSON(object: any): AudioFormat;
83
+ export declare function audioFormatToJSON(object: AudioFormat): string;
84
+ /** Video formats */
85
+ export declare enum VideoFormat {
86
+ VIDEO_FORMAT_UNSPECIFIED = 0,
87
+ VIDEO_FORMAT_MP4 = 1,
88
+ VIDEO_FORMAT_AVI = 2,
89
+ VIDEO_FORMAT_MOV = 3,
90
+ VIDEO_FORMAT_MKV = 4,
91
+ VIDEO_FORMAT_WEBM = 5,
92
+ VIDEO_FORMAT_FLV = 6,
93
+ VIDEO_FORMAT_WMV = 7,
94
+ UNRECOGNIZED = -1
95
+ }
96
+ export declare function videoFormatFromJSON(object: any): VideoFormat;
97
+ export declare function videoFormatToJSON(object: VideoFormat): string;
98
+ /** Tool types supported */
99
+ export declare enum ToolType {
100
+ TOOL_TYPE_UNSPECIFIED = 0,
101
+ TOOL_TYPE_FUNCTION = 1,
102
+ TOOL_TYPE_RETRIEVAL = 2,
103
+ TOOL_TYPE_CODE_INTERPRETER = 3,
104
+ TOOL_TYPE_WEB_BROWSER = 4,
105
+ TOOL_TYPE_DATABASE = 5,
106
+ TOOL_TYPE_API = 6,
107
+ TOOL_TYPE_CUSTOM = 7,
108
+ UNRECOGNIZED = -1
109
+ }
110
+ export declare function toolTypeFromJSON(object: any): ToolType;
111
+ export declare function toolTypeToJSON(object: ToolType): string;
112
+ /** Cache key strategies */
113
+ export declare enum CacheStrategy {
114
+ CACHE_STRATEGY_UNSPECIFIED = 0,
115
+ CACHE_STRATEGY_HASH = 1,
116
+ CACHE_STRATEGY_SEMANTIC = 2,
117
+ CACHE_STRATEGY_CUSTOM = 3,
118
+ CACHE_STRATEGY_PREFIX = 4,
119
+ CACHE_STRATEGY_SUFFIX = 5,
120
+ UNRECOGNIZED = -1
121
+ }
122
+ export declare function cacheStrategyFromJSON(object: any): CacheStrategy;
123
+ export declare function cacheStrategyToJSON(object: CacheStrategy): string;
124
+ /** Reasoning strategies */
125
+ export declare enum ReasoningStrategy {
126
+ REASONING_STRATEGY_UNSPECIFIED = 0,
127
+ REASONING_STRATEGY_CHAIN_OF_THOUGHT = 1,
128
+ REASONING_STRATEGY_TREE_OF_THOUGHTS = 2,
129
+ REASONING_STRATEGY_GRAPH_OF_THOUGHTS = 3,
130
+ REASONING_STRATEGY_STEP_BY_STEP = 4,
131
+ REASONING_STRATEGY_SELF_CONSISTENCY = 5,
132
+ REASONING_STRATEGY_LEAST_TO_MOST = 6,
133
+ UNRECOGNIZED = -1
134
+ }
135
+ export declare function reasoningStrategyFromJSON(object: any): ReasoningStrategy;
136
+ export declare function reasoningStrategyToJSON(object: ReasoningStrategy): string;
137
+ /** Distance metrics for embeddings */
138
+ export declare enum DistanceMetric {
139
+ DISTANCE_METRIC_UNSPECIFIED = 0,
140
+ DISTANCE_METRIC_COSINE = 1,
141
+ DISTANCE_METRIC_EUCLIDEAN = 2,
142
+ DISTANCE_METRIC_DOT_PRODUCT = 3,
143
+ DISTANCE_METRIC_MANHATTAN = 4,
144
+ DISTANCE_METRIC_HAMMING = 5,
145
+ UNRECOGNIZED = -1
146
+ }
147
+ export declare function distanceMetricFromJSON(object: any): DistanceMetric;
148
+ export declare function distanceMetricToJSON(object: DistanceMetric): string;
149
+ /** Hyperparameter types for fine-tuning */
150
+ export declare enum Hyperparameter {
151
+ HYPERPARAMETER_UNSPECIFIED = 0,
152
+ HYPERPARAMETER_LEARNING_RATE = 1,
153
+ HYPERPARAMETER_BATCH_SIZE = 2,
154
+ HYPERPARAMETER_EPOCHS = 3,
155
+ HYPERPARAMETER_WARMUP_STEPS = 4,
156
+ HYPERPARAMETER_WEIGHT_DECAY = 5,
157
+ HYPERPARAMETER_GRADIENT_ACCUMULATION = 6,
158
+ HYPERPARAMETER_LR_SCHEDULER = 7,
159
+ HYPERPARAMETER_OPTIMIZER = 8,
160
+ HYPERPARAMETER_DROPOUT = 9,
161
+ HYPERPARAMETER_MAX_SEQUENCE_LENGTH = 10,
162
+ UNRECOGNIZED = -1
163
+ }
164
+ export declare function hyperparameterFromJSON(object: any): Hyperparameter;
165
+ export declare function hyperparameterToJSON(object: Hyperparameter): string;
166
+ /** Modality direction - whether a modality supports input, output, or both */
167
+ export declare enum ModalityDirection {
168
+ MODALITY_DIRECTION_UNSPECIFIED = 0,
169
+ /** MODALITY_DIRECTION_INPUT_ONLY - Can only process/analyze (e.g., image analysis) */
170
+ MODALITY_DIRECTION_INPUT_ONLY = 1,
171
+ /** MODALITY_DIRECTION_OUTPUT_ONLY - Can only generate (e.g., TTS without STT) */
172
+ MODALITY_DIRECTION_OUTPUT_ONLY = 2,
173
+ /** MODALITY_DIRECTION_INPUT_OUTPUT - Can both process and generate */
174
+ MODALITY_DIRECTION_INPUT_OUTPUT = 3,
175
+ UNRECOGNIZED = -1
176
+ }
177
+ export declare function modalityDirectionFromJSON(object: any): ModalityDirection;
178
+ export declare function modalityDirectionToJSON(object: ModalityDirection): string;
179
+ /**
180
+ * Capability represents a specific feature/ability of a model with its configuration.
181
+ * Each capability has a type and optional detailed configuration.
182
+ */
183
+ export interface Capability {
184
+ /**
185
+ * The type of capability this represents.
186
+ * Example: CAPABILITY_TYPE_FUNCTION_CALLING for function/tool calling
187
+ */
188
+ type: CapabilityType;
189
+ /**
190
+ * Whether this capability is currently enabled/available.
191
+ * Example: true if the model supports and has this feature active
192
+ */
193
+ enabled: boolean;
194
+ text?: Text | undefined;
195
+ structuredResponse?: StructuredResponse | undefined;
196
+ streaming?: Streaming | undefined;
197
+ functionCalling?: FunctionCalling | undefined;
198
+ vision?: Vision | undefined;
199
+ toolUse?: ToolUse | undefined;
200
+ systemPrompt?: SystemPrompt | undefined;
201
+ caching?: Caching | undefined;
202
+ reasoning?: Reasoning | undefined;
203
+ audio?: Audio | undefined;
204
+ video?: Video | undefined;
205
+ embeddings?: Embeddings | undefined;
206
+ fineTuning?: FineTuning | undefined;
207
+ /**
208
+ * Unstructured additional information about this capability.
209
+ * Used for provider-specific details that don't fit the structured fields.
210
+ * Example: "Supports up to 10 parallel function calls with automatic retry"
211
+ * Example: "Beta feature - may have unexpected behavior"
212
+ * Example: "Optimized for conversational use cases"
213
+ */
214
+ additionalInfo: string;
215
+ }
216
+ /** Text capability configuration for basic text input/output */
217
+ export interface Text {
218
+ /**
219
+ * Direction of text support (input, output, or both)
220
+ * Example: MODALITY_DIRECTION_INPUT_OUTPUT for chat models
221
+ */
222
+ direction: ModalityDirection;
223
+ /**
224
+ * Maximum input text length in characters (if limited)
225
+ * Example: 32000 for models with character limits
226
+ */
227
+ maxInputLength: number;
228
+ /**
229
+ * Maximum output text length in characters (if limited)
230
+ * Example: 4096 for models with output limits
231
+ */
232
+ maxOutputLength: number;
233
+ /**
234
+ * Supported languages for text processing
235
+ * Examples: ["en", "es", "fr", "de", "zh", "ja"]
236
+ */
237
+ supportedLanguages: string[];
238
+ /**
239
+ * Whether the model supports multi-turn conversations
240
+ * Example: true for chat models, false for completion-only models
241
+ */
242
+ supportsConversation: boolean;
243
+ /**
244
+ * Whether the model can maintain context across messages
245
+ * Example: true for stateful models
246
+ */
247
+ supportsContext: boolean;
248
+ }
249
+ /** Structured response capability configuration */
250
+ export interface StructuredResponse {
251
+ systemPromptHint: string;
252
+ supportedFormats: DataFormat[];
253
+ maxSchemaDepth: number;
254
+ requiresToolUse: boolean;
255
+ requiresJsonMode: boolean;
256
+ maxProperties: number;
257
+ supportedTypes: JsonSchemaType[];
258
+ /** Can stream structured responses (not just generate them) */
259
+ supportsStreaming: boolean;
260
+ }
261
+ /** Streaming capability configuration */
262
+ export interface Streaming {
263
+ chunkDelimiter: string;
264
+ bufferSize: number;
265
+ supportsSse: boolean;
266
+ supportsUsage: boolean;
267
+ avgChunkSizeBytes: number;
268
+ maxChunkDelayMs: number;
269
+ }
270
+ /** Function calling capability configuration */
271
+ export interface FunctionCalling {
272
+ maxFunctions: number;
273
+ maxParallelCalls: number;
274
+ supportsParallel: boolean;
275
+ requiresToolRole: boolean;
276
+ supportsStreaming: boolean;
277
+ supportedParameterTypes: JsonSchemaType[];
278
+ maxNestingDepth: number;
279
+ }
280
+ /** Vision capability configuration */
281
+ export interface Vision {
282
+ /**
283
+ * Direction of vision support
284
+ * Example: MODALITY_DIRECTION_INPUT_ONLY for analysis-only models
285
+ * Example: MODALITY_DIRECTION_OUTPUT_ONLY for image generation models (DALL-E)
286
+ * Example: MODALITY_DIRECTION_INPUT_OUTPUT for models that can both analyze and generate
287
+ */
288
+ direction: ModalityDirection;
289
+ /**
290
+ * Supported image file formats
291
+ * Examples: [IMAGE_FORMAT_JPEG, IMAGE_FORMAT_PNG, IMAGE_FORMAT_WEBP]
292
+ */
293
+ supportedFormats: ImageFormat[];
294
+ /**
295
+ * Maximum size per image in bytes
296
+ * Example: 20971520 (20MB) for GPT-4-vision
297
+ */
298
+ maxImageSizeBytes: number;
299
+ /**
300
+ * Maximum images per API request (for input)
301
+ * Example: 10 for GPT-4-vision, 1 for some models
302
+ */
303
+ maxImagesPerRequest: number;
304
+ /**
305
+ * Maximum image width in pixels
306
+ * Example: 4096 for high-resolution support
307
+ */
308
+ maxResolutionWidth: number;
309
+ /**
310
+ * Maximum image height in pixels
311
+ * Example: 4096 for high-resolution support
312
+ */
313
+ maxResolutionHeight: number;
314
+ /**
315
+ * Supports optical character recognition
316
+ * Example: true if model can extract text from images
317
+ */
318
+ supportsOcr: boolean;
319
+ /**
320
+ * Supports object detection/localization
321
+ * Example: true if model can identify and locate objects
322
+ */
323
+ supportsObjectDetection: boolean;
324
+ /**
325
+ * Can process video frames as images
326
+ * Example: true if model accepts video frame extraction
327
+ */
328
+ supportsVideoFrames: boolean;
329
+ }
330
+ /** Tool use capability configuration */
331
+ export interface ToolUse {
332
+ maxTools: number;
333
+ supportsSequential: boolean;
334
+ supportsParallel: boolean;
335
+ maxToolRounds: number;
336
+ supportedToolTypes: ToolType[];
337
+ }
338
+ /** System prompt capability configuration */
339
+ export interface SystemPrompt {
340
+ maxLength: number;
341
+ supportsMultiple: boolean;
342
+ supportsCaching: boolean;
343
+ format: DataFormat;
344
+ }
345
+ /** Caching capability configuration */
346
+ export interface Caching {
347
+ cacheKeyStrategy: CacheStrategy;
348
+ maxCacheSizeBytes: number;
349
+ cacheTtlSeconds: number;
350
+ supportsContextCaching: boolean;
351
+ supportsPromptCaching: boolean;
352
+ minCacheableTokens: number;
353
+ }
354
+ /** Reasoning capability configuration */
355
+ export interface Reasoning {
356
+ supportsChainOfThought: boolean;
357
+ supportsStepTracking: boolean;
358
+ maxReasoningSteps: number;
359
+ supportsSelfCorrection: boolean;
360
+ reasoningStrategies: ReasoningStrategy[];
361
+ }
362
+ /** Audio capability configuration */
363
+ export interface Audio {
364
+ /**
365
+ * Direction of audio support
366
+ * Example: MODALITY_DIRECTION_INPUT_ONLY for speech-to-text only
367
+ * Example: MODALITY_DIRECTION_OUTPUT_ONLY for text-to-speech only
368
+ * Example: MODALITY_DIRECTION_INPUT_OUTPUT for models supporting both STT and TTS
369
+ */
370
+ direction: ModalityDirection;
371
+ /**
372
+ * Supported audio file formats
373
+ * Examples: [AUDIO_FORMAT_MP3, AUDIO_FORMAT_WAV, AUDIO_FORMAT_M4A]
374
+ */
375
+ supportedFormats: AudioFormat[];
376
+ /**
377
+ * Maximum audio duration in seconds
378
+ * Example: 600 for 10-minute limit
379
+ */
380
+ maxDurationSeconds: number;
381
+ /**
382
+ * Maximum audio file size in bytes
383
+ * Example: 26214400 (25MB) limit
384
+ */
385
+ maxFileSizeBytes: number;
386
+ /**
387
+ * Supported languages for audio processing
388
+ * Examples: ["en", "es", "fr", "de", "zh", "ja"]
389
+ */
390
+ supportedLanguages: string[];
391
+ /**
392
+ * Supports real-time streaming (for live audio)
393
+ * Example: true for real-time voice models
394
+ */
395
+ supportsStreaming: boolean;
396
+ /**
397
+ * Supports voice cloning or voice selection
398
+ * Example: true if TTS can use different voices
399
+ */
400
+ supportsVoiceSelection: boolean;
401
+ }
402
+ /** Video capability configuration */
403
+ export interface Video {
404
+ /**
405
+ * Direction of video support
406
+ * Example: MODALITY_DIRECTION_INPUT_ONLY for video analysis/understanding
407
+ * Example: MODALITY_DIRECTION_OUTPUT_ONLY for video generation
408
+ * Example: MODALITY_DIRECTION_INPUT_OUTPUT for models that can both analyze and generate
409
+ */
410
+ direction: ModalityDirection;
411
+ /**
412
+ * Supported video file formats
413
+ * Examples: [VIDEO_FORMAT_MP4, VIDEO_FORMAT_MOV, VIDEO_FORMAT_AVI]
414
+ */
415
+ supportedFormats: VideoFormat[];
416
+ /**
417
+ * Maximum video duration in seconds
418
+ * Example: 120 for 2-minute limit
419
+ */
420
+ maxDurationSeconds: number;
421
+ /**
422
+ * Maximum video file size in bytes
423
+ * Example: 1073741824 (1GB) limit
424
+ */
425
+ maxFileSizeBytes: number;
426
+ /**
427
+ * Maximum frames per second supported
428
+ * Example: 30 for standard frame rate
429
+ */
430
+ maxFps: number;
431
+ /**
432
+ * Supports extracting and analyzing individual frames
433
+ * Example: true if model can process video as a sequence of images
434
+ */
435
+ supportsFrameExtraction: boolean;
436
+ /**
437
+ * Maximum number of frames that can be analyzed
438
+ * Example: 100 for frame-by-frame analysis limit
439
+ */
440
+ maxFrames: number;
441
+ }
442
+ /** Embeddings capability configuration */
443
+ export interface Embeddings {
444
+ embeddingDimensions: number;
445
+ maxInputTokens: number;
446
+ supportsBatch: boolean;
447
+ maxBatchSize: number;
448
+ distanceMetrics: DistanceMetric[];
449
+ }
450
+ /** Fine-tuning capability configuration */
451
+ export interface FineTuning {
452
+ minExamples: number;
453
+ maxExamples: number;
454
+ supportedFormats: DataFormat[];
455
+ maxFileSizeMb: number;
456
+ supportsValidationSet: boolean;
457
+ hyperparameters: Hyperparameter[];
458
+ }
459
+ export declare const Capability: MessageFns<Capability>;
460
+ export declare const Text: MessageFns<Text>;
461
+ export declare const StructuredResponse: MessageFns<StructuredResponse>;
462
+ export declare const Streaming: MessageFns<Streaming>;
463
+ export declare const FunctionCalling: MessageFns<FunctionCalling>;
464
+ export declare const Vision: MessageFns<Vision>;
465
+ export declare const ToolUse: MessageFns<ToolUse>;
466
+ export declare const SystemPrompt: MessageFns<SystemPrompt>;
467
+ export declare const Caching: MessageFns<Caching>;
468
+ export declare const Reasoning: MessageFns<Reasoning>;
469
+ export declare const Audio: MessageFns<Audio>;
470
+ export declare const Video: MessageFns<Video>;
471
+ export declare const Embeddings: MessageFns<Embeddings>;
472
+ export declare const FineTuning: MessageFns<FineTuning>;
473
+ type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined;
474
+ export type DeepPartial<T> = T extends Builtin ? T : T extends globalThis.Array<infer U> ? globalThis.Array<DeepPartial<U>> : T extends ReadonlyArray<infer U> ? ReadonlyArray<DeepPartial<U>> : T extends {} ? {
475
+ [K in keyof T]?: DeepPartial<T[K]>;
476
+ } : Partial<T>;
477
+ type KeysOfUnion<T> = T extends T ? keyof T : never;
478
+ export type Exact<P, I extends P> = P extends Builtin ? P : P & {
479
+ [K in keyof P]: Exact<P[K], I[K]>;
480
+ } & {
481
+ [K in Exclude<keyof I, KeysOfUnion<P>>]: never;
482
+ };
483
+ export interface MessageFns<T> {
484
+ encode(message: T, writer?: BinaryWriter): BinaryWriter;
485
+ decode(input: BinaryReader | Uint8Array, length?: number): T;
486
+ fromJSON(object: any): T;
487
+ toJSON(message: T): unknown;
488
+ create<I extends Exact<DeepPartial<T>, I>>(base?: I): T;
489
+ fromPartial<I extends Exact<DeepPartial<T>, I>>(object: I): T;
490
+ }
491
+ export {};