weave-typescript 0.8.0 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/weaveapi/llmx/v1/capabilities.pb.d.ts +188 -17
- package/dist/weaveapi/llmx/v1/capabilities.pb.js +438 -162
- package/dist/weaveapi/llmx/v1/model.pb.d.ts +1 -11
- package/dist/weaveapi/llmx/v1/model.pb.js +7 -135
- package/dist/weaveapi/llmx/v1/pricing.pb.d.ts +142 -0
- package/dist/weaveapi/llmx/v1/pricing.pb.js +825 -0
- package/package.json +1 -1
|
@@ -3,18 +3,21 @@ export declare const protobufPackage = "weaveapi.llmx.v1";
|
|
|
3
3
|
/** Core capability types that models can support */
|
|
4
4
|
export declare enum CapabilityType {
|
|
5
5
|
CAPABILITY_TYPE_UNSPECIFIED = 0,
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
6
|
+
/** CAPABILITY_TYPE_TEXT - Basic text input/output capability */
|
|
7
|
+
CAPABILITY_TYPE_TEXT = 1,
|
|
8
|
+
/** CAPABILITY_TYPE_STRUCTURED_RESPONSE - Structured output (JSON, XML, etc.) */
|
|
9
|
+
CAPABILITY_TYPE_STRUCTURED_RESPONSE = 2,
|
|
10
|
+
CAPABILITY_TYPE_STREAMING = 3,
|
|
11
|
+
CAPABILITY_TYPE_FUNCTION_CALLING = 4,
|
|
12
|
+
CAPABILITY_TYPE_VISION = 5,
|
|
13
|
+
CAPABILITY_TYPE_TOOL_USE = 6,
|
|
14
|
+
CAPABILITY_TYPE_SYSTEM_PROMPT = 7,
|
|
15
|
+
CAPABILITY_TYPE_CACHING = 8,
|
|
16
|
+
CAPABILITY_TYPE_REASONING = 9,
|
|
17
|
+
CAPABILITY_TYPE_AUDIO = 10,
|
|
18
|
+
CAPABILITY_TYPE_VIDEO = 11,
|
|
19
|
+
CAPABILITY_TYPE_EMBEDDINGS = 12,
|
|
20
|
+
CAPABILITY_TYPE_FINE_TUNING = 13,
|
|
18
21
|
UNRECOGNIZED = -1
|
|
19
22
|
}
|
|
20
23
|
export declare function capabilityTypeFromJSON(object: any): CapabilityType;
|
|
@@ -160,10 +163,35 @@ export declare enum Hyperparameter {
|
|
|
160
163
|
}
|
|
161
164
|
export declare function hyperparameterFromJSON(object: any): Hyperparameter;
|
|
162
165
|
export declare function hyperparameterToJSON(object: Hyperparameter): string;
|
|
163
|
-
/**
|
|
166
|
+
/** Modality direction - whether a modality supports input, output, or both */
|
|
167
|
+
export declare enum ModalityDirection {
|
|
168
|
+
MODALITY_DIRECTION_UNSPECIFIED = 0,
|
|
169
|
+
/** MODALITY_DIRECTION_INPUT_ONLY - Can only process/analyze (e.g., image analysis) */
|
|
170
|
+
MODALITY_DIRECTION_INPUT_ONLY = 1,
|
|
171
|
+
/** MODALITY_DIRECTION_OUTPUT_ONLY - Can only generate (e.g., TTS without STT) */
|
|
172
|
+
MODALITY_DIRECTION_OUTPUT_ONLY = 2,
|
|
173
|
+
/** MODALITY_DIRECTION_INPUT_OUTPUT - Can both process and generate */
|
|
174
|
+
MODALITY_DIRECTION_INPUT_OUTPUT = 3,
|
|
175
|
+
UNRECOGNIZED = -1
|
|
176
|
+
}
|
|
177
|
+
export declare function modalityDirectionFromJSON(object: any): ModalityDirection;
|
|
178
|
+
export declare function modalityDirectionToJSON(object: ModalityDirection): string;
|
|
179
|
+
/**
|
|
180
|
+
* Capability represents a specific feature/ability of a model with its configuration.
|
|
181
|
+
* Each capability has a type and optional detailed configuration.
|
|
182
|
+
*/
|
|
164
183
|
export interface Capability {
|
|
184
|
+
/**
|
|
185
|
+
* The type of capability this represents.
|
|
186
|
+
* Example: CAPABILITY_TYPE_FUNCTION_CALLING for function/tool calling
|
|
187
|
+
*/
|
|
165
188
|
type: CapabilityType;
|
|
189
|
+
/**
|
|
190
|
+
* Whether this capability is currently enabled/available.
|
|
191
|
+
* Example: true if the model supports and has this feature active
|
|
192
|
+
*/
|
|
166
193
|
enabled: boolean;
|
|
194
|
+
text?: Text | undefined;
|
|
167
195
|
structuredResponse?: StructuredResponse | undefined;
|
|
168
196
|
streaming?: Streaming | undefined;
|
|
169
197
|
functionCalling?: FunctionCalling | undefined;
|
|
@@ -176,6 +204,47 @@ export interface Capability {
|
|
|
176
204
|
video?: Video | undefined;
|
|
177
205
|
embeddings?: Embeddings | undefined;
|
|
178
206
|
fineTuning?: FineTuning | undefined;
|
|
207
|
+
/**
|
|
208
|
+
* Unstructured additional information about this capability.
|
|
209
|
+
* Used for provider-specific details that don't fit the structured fields.
|
|
210
|
+
* Example: "Supports up to 10 parallel function calls with automatic retry"
|
|
211
|
+
* Example: "Beta feature - may have unexpected behavior"
|
|
212
|
+
* Example: "Optimized for conversational use cases"
|
|
213
|
+
*/
|
|
214
|
+
additionalInfo: string;
|
|
215
|
+
}
|
|
216
|
+
/** Text capability configuration for basic text input/output */
|
|
217
|
+
export interface Text {
|
|
218
|
+
/**
|
|
219
|
+
* Direction of text support (input, output, or both)
|
|
220
|
+
* Example: MODALITY_DIRECTION_INPUT_OUTPUT for chat models
|
|
221
|
+
*/
|
|
222
|
+
direction: ModalityDirection;
|
|
223
|
+
/**
|
|
224
|
+
* Maximum input text length in characters (if limited)
|
|
225
|
+
* Example: 32000 for models with character limits
|
|
226
|
+
*/
|
|
227
|
+
maxInputLength: number;
|
|
228
|
+
/**
|
|
229
|
+
* Maximum output text length in characters (if limited)
|
|
230
|
+
* Example: 4096 for models with output limits
|
|
231
|
+
*/
|
|
232
|
+
maxOutputLength: number;
|
|
233
|
+
/**
|
|
234
|
+
* Supported languages for text processing
|
|
235
|
+
* Examples: ["en", "es", "fr", "de", "zh", "ja"]
|
|
236
|
+
*/
|
|
237
|
+
supportedLanguages: string[];
|
|
238
|
+
/**
|
|
239
|
+
* Whether the model supports multi-turn conversations
|
|
240
|
+
* Example: true for chat models, false for completion-only models
|
|
241
|
+
*/
|
|
242
|
+
supportsConversation: boolean;
|
|
243
|
+
/**
|
|
244
|
+
* Whether the model can maintain context across messages
|
|
245
|
+
* Example: true for stateful models
|
|
246
|
+
*/
|
|
247
|
+
supportsContext: boolean;
|
|
179
248
|
}
|
|
180
249
|
/** Structured response capability configuration */
|
|
181
250
|
export interface StructuredResponse {
|
|
@@ -210,15 +279,53 @@ export interface FunctionCalling {
|
|
|
210
279
|
}
|
|
211
280
|
/** Vision capability configuration */
|
|
212
281
|
export interface Vision {
|
|
282
|
+
/**
|
|
283
|
+
* Direction of vision support
|
|
284
|
+
* Example: MODALITY_DIRECTION_INPUT_ONLY for analysis-only models
|
|
285
|
+
* Example: MODALITY_DIRECTION_OUTPUT_ONLY for image generation models (DALL-E)
|
|
286
|
+
* Example: MODALITY_DIRECTION_INPUT_OUTPUT for models that can both analyze and generate
|
|
287
|
+
*/
|
|
288
|
+
direction: ModalityDirection;
|
|
289
|
+
/**
|
|
290
|
+
* Supported image file formats
|
|
291
|
+
* Examples: [IMAGE_FORMAT_JPEG, IMAGE_FORMAT_PNG, IMAGE_FORMAT_WEBP]
|
|
292
|
+
*/
|
|
213
293
|
supportedFormats: ImageFormat[];
|
|
294
|
+
/**
|
|
295
|
+
* Maximum size per image in bytes
|
|
296
|
+
* Example: 20971520 (20MB) for GPT-4-vision
|
|
297
|
+
*/
|
|
214
298
|
maxImageSizeBytes: number;
|
|
299
|
+
/**
|
|
300
|
+
* Maximum images per API request (for input)
|
|
301
|
+
* Example: 10 for GPT-4-vision, 1 for some models
|
|
302
|
+
*/
|
|
215
303
|
maxImagesPerRequest: number;
|
|
216
|
-
|
|
217
|
-
|
|
304
|
+
/**
|
|
305
|
+
* Maximum image width in pixels
|
|
306
|
+
* Example: 4096 for high-resolution support
|
|
307
|
+
*/
|
|
218
308
|
maxResolutionWidth: number;
|
|
309
|
+
/**
|
|
310
|
+
* Maximum image height in pixels
|
|
311
|
+
* Example: 4096 for high-resolution support
|
|
312
|
+
*/
|
|
219
313
|
maxResolutionHeight: number;
|
|
314
|
+
/**
|
|
315
|
+
* Supports optical character recognition
|
|
316
|
+
* Example: true if model can extract text from images
|
|
317
|
+
*/
|
|
220
318
|
supportsOcr: boolean;
|
|
319
|
+
/**
|
|
320
|
+
* Supports object detection/localization
|
|
321
|
+
* Example: true if model can identify and locate objects
|
|
322
|
+
*/
|
|
221
323
|
supportsObjectDetection: boolean;
|
|
324
|
+
/**
|
|
325
|
+
* Can process video frames as images
|
|
326
|
+
* Example: true if model accepts video frame extraction
|
|
327
|
+
*/
|
|
328
|
+
supportsVideoFrames: boolean;
|
|
222
329
|
}
|
|
223
330
|
/** Tool use capability configuration */
|
|
224
331
|
export interface ToolUse {
|
|
@@ -254,20 +361,83 @@ export interface Reasoning {
|
|
|
254
361
|
}
|
|
255
362
|
/** Audio capability configuration */
|
|
256
363
|
export interface Audio {
|
|
364
|
+
/**
|
|
365
|
+
* Direction of audio support
|
|
366
|
+
* Example: MODALITY_DIRECTION_INPUT_ONLY for speech-to-text only
|
|
367
|
+
* Example: MODALITY_DIRECTION_OUTPUT_ONLY for text-to-speech only
|
|
368
|
+
* Example: MODALITY_DIRECTION_INPUT_OUTPUT for models supporting both STT and TTS
|
|
369
|
+
*/
|
|
370
|
+
direction: ModalityDirection;
|
|
371
|
+
/**
|
|
372
|
+
* Supported audio file formats
|
|
373
|
+
* Examples: [AUDIO_FORMAT_MP3, AUDIO_FORMAT_WAV, AUDIO_FORMAT_M4A]
|
|
374
|
+
*/
|
|
257
375
|
supportedFormats: AudioFormat[];
|
|
376
|
+
/**
|
|
377
|
+
* Maximum audio duration in seconds
|
|
378
|
+
* Example: 600 for 10-minute limit
|
|
379
|
+
*/
|
|
258
380
|
maxDurationSeconds: number;
|
|
381
|
+
/**
|
|
382
|
+
* Maximum audio file size in bytes
|
|
383
|
+
* Example: 26214400 (25MB) limit
|
|
384
|
+
*/
|
|
259
385
|
maxFileSizeBytes: number;
|
|
260
|
-
|
|
261
|
-
|
|
386
|
+
/**
|
|
387
|
+
* Supported languages for audio processing
|
|
388
|
+
* Examples: ["en", "es", "fr", "de", "zh", "ja"]
|
|
389
|
+
*/
|
|
262
390
|
supportedLanguages: string[];
|
|
391
|
+
/**
|
|
392
|
+
* Supports real-time streaming (for live audio)
|
|
393
|
+
* Example: true for real-time voice models
|
|
394
|
+
*/
|
|
395
|
+
supportsStreaming: boolean;
|
|
396
|
+
/**
|
|
397
|
+
* Supports voice cloning or voice selection
|
|
398
|
+
* Example: true if TTS can use different voices
|
|
399
|
+
*/
|
|
400
|
+
supportsVoiceSelection: boolean;
|
|
263
401
|
}
|
|
264
402
|
/** Video capability configuration */
|
|
265
403
|
export interface Video {
|
|
404
|
+
/**
|
|
405
|
+
* Direction of video support
|
|
406
|
+
* Example: MODALITY_DIRECTION_INPUT_ONLY for video analysis/understanding
|
|
407
|
+
* Example: MODALITY_DIRECTION_OUTPUT_ONLY for video generation
|
|
408
|
+
* Example: MODALITY_DIRECTION_INPUT_OUTPUT for models that can both analyze and generate
|
|
409
|
+
*/
|
|
410
|
+
direction: ModalityDirection;
|
|
411
|
+
/**
|
|
412
|
+
* Supported video file formats
|
|
413
|
+
* Examples: [VIDEO_FORMAT_MP4, VIDEO_FORMAT_MOV, VIDEO_FORMAT_AVI]
|
|
414
|
+
*/
|
|
266
415
|
supportedFormats: VideoFormat[];
|
|
416
|
+
/**
|
|
417
|
+
* Maximum video duration in seconds
|
|
418
|
+
* Example: 120 for 2-minute limit
|
|
419
|
+
*/
|
|
267
420
|
maxDurationSeconds: number;
|
|
421
|
+
/**
|
|
422
|
+
* Maximum video file size in bytes
|
|
423
|
+
* Example: 1073741824 (1GB) limit
|
|
424
|
+
*/
|
|
268
425
|
maxFileSizeBytes: number;
|
|
426
|
+
/**
|
|
427
|
+
* Maximum frames per second supported
|
|
428
|
+
* Example: 30 for standard frame rate
|
|
429
|
+
*/
|
|
269
430
|
maxFps: number;
|
|
431
|
+
/**
|
|
432
|
+
* Supports extracting and analyzing individual frames
|
|
433
|
+
* Example: true if model can process video as a sequence of images
|
|
434
|
+
*/
|
|
270
435
|
supportsFrameExtraction: boolean;
|
|
436
|
+
/**
|
|
437
|
+
* Maximum number of frames that can be analyzed
|
|
438
|
+
* Example: 100 for frame-by-frame analysis limit
|
|
439
|
+
*/
|
|
440
|
+
maxFrames: number;
|
|
271
441
|
}
|
|
272
442
|
/** Embeddings capability configuration */
|
|
273
443
|
export interface Embeddings {
|
|
@@ -287,6 +457,7 @@ export interface FineTuning {
|
|
|
287
457
|
hyperparameters: Hyperparameter[];
|
|
288
458
|
}
|
|
289
459
|
export declare const Capability: MessageFns<Capability>;
|
|
460
|
+
export declare const Text: MessageFns<Text>;
|
|
290
461
|
export declare const StructuredResponse: MessageFns<StructuredResponse>;
|
|
291
462
|
export declare const Streaming: MessageFns<Streaming>;
|
|
292
463
|
export declare const FunctionCalling: MessageFns<FunctionCalling>;
|