weave-typescript 0.8.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,53 @@
1
1
  import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire";
2
2
  export declare const protobufPackage = "weaveapi.llmx.v1";
3
+ /** Provider represents an AI model provider/vendor organization. */
3
4
  export interface Provider {
5
+ /**
6
+ * Unique internal identifier for this provider.
7
+ * Example: "pvd_01234567-89ab-cdef-0123-456789abcdef"
8
+ */
4
9
  id: string;
10
+ /**
11
+ * URL-friendly identifier for routing.
12
+ * Examples: "openai", "anthropic", "google", "meta"
13
+ * Used in paths like /providers/openai
14
+ */
5
15
  slug: string;
16
+ /**
17
+ * Display name of the provider.
18
+ * Examples: "OpenAI", "Anthropic", "Google AI", "Meta AI"
19
+ */
6
20
  name: string;
21
+ /**
22
+ * Description of the provider and their focus.
23
+ * Example: "OpenAI develops general-purpose AI models including GPT-4,
24
+ * DALL-E, and Whisper, focusing on safe and beneficial AI."
25
+ */
7
26
  description: string;
27
+ /**
28
+ * Main website URL.
29
+ * Example: "https://openai.com"
30
+ */
8
31
  websiteUrl: string;
32
+ /**
33
+ * API documentation URL.
34
+ * Example: "https://platform.openai.com/docs"
35
+ */
9
36
  documentationUrl: string;
37
+ /**
38
+ * URL to provider's logo image.
39
+ * Example: "https://assets.example.com/logos/openai.svg"
40
+ */
10
41
  logoUrl: string;
42
+ /**
43
+ * Whether provider is currently operational.
44
+ * Example: false if provider has shut down or paused service
45
+ */
11
46
  isActive: boolean;
47
+ /**
48
+ * Number of models offered by this provider.
49
+ * Example: 25 for a provider with 25 different models
50
+ */
12
51
  modelCount: number;
13
52
  }
14
53
  export declare const Provider: MessageFns<Provider>;
@@ -20,25 +20,78 @@ export interface GetProviderRequest {
20
20
  export interface GetProviderResponse {
21
21
  provider: Provider | undefined;
22
22
  }
23
+ /** ListModelsRequest defines filters and pagination for listing models. */
23
24
  export interface ListModelsRequest {
25
+ /**
26
+ * Maximum number of models to return (1-100).
27
+ * Example: 20 for paginated results
28
+ */
24
29
  pageSize: number;
30
+ /**
31
+ * Token for fetching next page of results.
32
+ * Example: "eyJvZmZzZXQiOjIwfQ==" from previous response
33
+ */
25
34
  pageToken: string;
26
- /** Filters */
35
+ /**
36
+ * Filter by provider slug.
37
+ * Example: "openai" to only show OpenAI models
38
+ */
27
39
  provider: string;
40
+ /**
41
+ * Filter by model type.
42
+ * Examples: "chat", "embedding", "image", "audio"
43
+ */
28
44
  modelType: string;
45
+ /**
46
+ * Only show currently available models.
47
+ * Example: true to exclude deprecated models
48
+ */
29
49
  activeOnly: boolean;
30
- /** Capability filters */
50
+ /**
51
+ * Only models with vision/image understanding.
52
+ * Example: true for GPT-4V, Claude-3 Vision
53
+ */
31
54
  visionOnly: boolean;
55
+ /**
56
+ * Only models supporting function/tool calling.
57
+ * Example: true for GPT-4, Claude-3
58
+ */
32
59
  toolCallsOnly: boolean;
60
+ /**
61
+ * Only models with advanced reasoning capabilities.
62
+ * Example: true for o1-preview, o1-mini
63
+ */
33
64
  reasoningOnly: boolean;
65
+ /**
66
+ * Only open-source models with public weights.
67
+ * Example: true for LLaMA, Mistral, false for GPT-4
68
+ */
34
69
  openSourceOnly: boolean;
35
- /** Price filters (per 1M tokens) */
70
+ /**
71
+ * Maximum acceptable input token price.
72
+ * Example: 5.0 for models under $5/1M input tokens
73
+ */
36
74
  maxInputPrice: number;
75
+ /**
76
+ * Maximum acceptable output token price.
77
+ * Example: 15.0 for models under $15/1M output tokens
78
+ */
37
79
  maxOutputPrice: number;
38
- /** Token filters */
80
+ /**
81
+ * Minimum required context window size.
82
+ * Example: 32000 for models with 32k+ context
83
+ */
39
84
  minContextWindow: number;
40
- /** Sorting */
85
+ /**
86
+ * Field to sort results by.
87
+ * Options: "name", "release_date", "input_price", "context_window"
88
+ * Example: "release_date" for newest models first
89
+ */
41
90
  orderBy: string;
91
+ /**
92
+ * Sort in descending order.
93
+ * Example: true for newest/highest first, false for oldest/lowest
94
+ */
42
95
  descending: boolean;
43
96
  }
44
97
  export interface ListModelsResponse {
@@ -55,45 +108,139 @@ export interface GetModelRequest {
55
108
  export interface GetModelResponse {
56
109
  model: Model | undefined;
57
110
  }
111
+ /** SearchModelsRequest enables advanced model search with multiple filter criteria. */
58
112
  export interface SearchModelsRequest {
59
- /** Full-text search */
113
+ /**
114
+ * Full-text search across model names and descriptions.
115
+ * Example: "code generation" to find coding-focused models
116
+ */
60
117
  query: string;
61
- /** Advanced filters */
118
+ /**
119
+ * Filter by multiple providers.
120
+ * Example: ["openai", "anthropic"] for models from either provider
121
+ */
62
122
  providers: string[];
123
+ /**
124
+ * Filter by multiple model types.
125
+ * Example: ["chat", "instruct"] for conversational models
126
+ */
63
127
  modelTypes: string[];
128
+ /**
129
+ * Filter by architecture families.
130
+ * Example: ["GPT", "LLaMA", "Mistral"] for specific architectures
131
+ */
64
132
  architectures: string[];
65
- /** Capability requirements (ALL must be true) */
133
+ /**
134
+ * Required capabilities (model must have all).
135
+ * Example: ["function_calling", "vision"] for multi-modal tool-use models
136
+ */
66
137
  requiredCapabilities: string[];
138
+ /**
139
+ * Required input modalities (model must support all).
140
+ * Example: ["text", "image"] for vision-language models
141
+ */
67
142
  requiredInputModalities: string[];
143
+ /**
144
+ * Required output modalities (model must support all).
145
+ * Example: ["text", "code"] for code generation models
146
+ */
68
147
  requiredOutputModalities: string[];
69
- /** Numeric ranges */
148
+ /** Price range constraints per 1M tokens. */
70
149
  priceRange: PriceRange | undefined;
150
+ /** Token limit constraints. */
71
151
  tokenRange: TokenRange | undefined;
152
+ /** Minimum performance scores required. */
72
153
  performanceRange: PerformanceRange | undefined;
73
- /** Pagination */
154
+ /**
155
+ * Results per page (1-100).
156
+ * Example: 25
157
+ */
74
158
  pageSize: number;
159
+ /** Continuation token from previous response. */
75
160
  pageToken: string;
76
- /** Sorting */
161
+ /**
162
+ * Sort field name.
163
+ * Options: "relevance", "name", "release_date", "price", "performance"
164
+ */
77
165
  orderBy: string;
166
+ /**
167
+ * Sort direction.
168
+ * Example: true for descending (highest/newest first)
169
+ */
78
170
  descending: boolean;
79
171
  }
172
+ /** PriceRange defines min/max price constraints for filtering models. */
80
173
  export interface PriceRange {
174
+ /**
175
+ * Minimum input token price per 1M tokens.
176
+ * Example: 0.5 for at least $0.50/1M input tokens
177
+ */
81
178
  minInputPrice: number;
179
+ /**
180
+ * Maximum input token price per 1M tokens.
181
+ * Example: 10.0 for at most $10/1M input tokens
182
+ */
82
183
  maxInputPrice: number;
184
+ /**
185
+ * Minimum output token price per 1M tokens.
186
+ * Example: 1.0 for at least $1/1M output tokens
187
+ */
83
188
  minOutputPrice: number;
189
+ /**
190
+ * Maximum output token price per 1M tokens.
191
+ * Example: 30.0 for at most $30/1M output tokens
192
+ */
84
193
  maxOutputPrice: number;
85
194
  }
195
+ /** TokenRange defines min/max token constraints for filtering models. */
86
196
  export interface TokenRange {
197
+ /**
198
+ * Minimum context window size required.
199
+ * Example: 8192 for at least 8k context
200
+ */
87
201
  minContextWindow: number;
202
+ /**
203
+ * Maximum context window size allowed.
204
+ * Example: 128000 for at most 128k context
205
+ */
88
206
  maxContextWindow: number;
207
+ /**
208
+ * Minimum output token limit required.
209
+ * Example: 2048 for at least 2k output tokens
210
+ */
89
211
  minOutputTokens: number;
212
+ /**
213
+ * Maximum output token limit allowed.
214
+ * Example: 16384 for at most 16k output tokens
215
+ */
90
216
  maxOutputTokens: number;
91
217
  }
218
+ /** PerformanceRange defines minimum performance scores for filtering. */
92
219
  export interface PerformanceRange {
220
+ /**
221
+ * Minimum reasoning score (0-10).
222
+ * Example: 7.5 for strong reasoning models only
223
+ */
93
224
  minReasoningScore: number;
225
+ /**
226
+ * Minimum coding score (0-10).
227
+ * Example: 8.0 for excellent coding models only
228
+ */
94
229
  minCodingScore: number;
230
+ /**
231
+ * Minimum creative writing score (0-10).
232
+ * Example: 7.0 for good creative models only
233
+ */
95
234
  minCreativeScore: number;
235
+ /**
236
+ * Minimum factual accuracy score (0-10).
237
+ * Example: 8.5 for highly accurate models only
238
+ */
96
239
  minFactualScore: number;
240
+ /**
241
+ * Minimum mathematical ability score (0-10).
242
+ * Example: 9.0 for advanced math models only
243
+ */
97
244
  minMathScore: number;
98
245
  }
99
246
  export interface SearchModelsResponse {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "weave-typescript",
3
- "version": "0.8.0",
3
+ "version": "0.10.0",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "files": [