@reverbia/sdk 1.0.0-next.20251114165311 → 1.0.0-next.20251119123747

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,433 @@
1
+ type ClientOptions = {
2
+ baseUrl: `${string}://${string}` | (string & {});
3
+ };
4
+ type HandlersHealthResponse = {
5
+ /**
6
+ * Status indicates the service health status
7
+ */
8
+ status?: string;
9
+ /**
10
+ * Timestamp is the Unix timestamp of the response
11
+ */
12
+ timestamp?: number;
13
+ /**
14
+ * Version is the current API version
15
+ */
16
+ version?: string;
17
+ };
18
+ /**
19
+ * ExtraFields contains additional metadata
20
+ */
21
+ type LlmapiChatCompletionExtraFields = {
22
+ /**
23
+ * Latency is the request latency in milliseconds
24
+ */
25
+ latency?: number;
26
+ /**
27
+ * ModelRequested is the model that was requested
28
+ */
29
+ model_requested?: string;
30
+ /**
31
+ * Provider is the LLM provider used (e.g., "openai", "anthropic")
32
+ */
33
+ provider?: string;
34
+ /**
35
+ * RequestType is always "chat_completion"
36
+ */
37
+ request_type?: string;
38
+ };
39
+ type LlmapiChatCompletionRequest = {
40
+ /**
41
+ * Messages is the conversation history
42
+ */
43
+ messages?: Array<LlmapiMessage>;
44
+ /**
45
+ * Model is the model identifier
46
+ */
47
+ model?: string;
48
+ /**
49
+ * Stream indicates if response should be streamed
50
+ */
51
+ stream?: boolean;
52
+ };
53
+ type LlmapiChatCompletionResponse = {
54
+ /**
55
+ * Choices contains the completion choices
56
+ */
57
+ choices?: Array<LlmapiChoice>;
58
+ extra_fields?: LlmapiChatCompletionExtraFields;
59
+ /**
60
+ * ID is the completion ID
61
+ */
62
+ id?: string;
63
+ /**
64
+ * Model is the model used
65
+ */
66
+ model?: string;
67
+ usage?: LlmapiChatCompletionUsage;
68
+ };
69
+ /**
70
+ * Usage contains token usage information
71
+ */
72
+ type LlmapiChatCompletionUsage = {
73
+ /**
74
+ * CompletionTokens is the number of tokens in the completion
75
+ */
76
+ completion_tokens?: number;
77
+ /**
78
+ * PromptTokens is the number of tokens in the prompt
79
+ */
80
+ prompt_tokens?: number;
81
+ /**
82
+ * TotalTokens is the total number of tokens used
83
+ */
84
+ total_tokens?: number;
85
+ };
86
+ type LlmapiChoice = {
87
+ /**
88
+ * FinishReason indicates why the completion stopped
89
+ */
90
+ finish_reason?: string;
91
+ /**
92
+ * Index is the choice index
93
+ */
94
+ index?: number;
95
+ message?: LlmapiMessage;
96
+ };
97
+ type LlmapiEmbeddingData = {
98
+ /**
99
+ * Embedding vector
100
+ */
101
+ embedding?: Array<number>;
102
+ /**
103
+ * Index of the embedding
104
+ */
105
+ index?: number;
106
+ /**
107
+ * Object type identifier
108
+ */
109
+ object?: string;
110
+ };
111
+ /**
112
+ * ExtraFields contains additional metadata
113
+ */
114
+ type LlmapiEmbeddingExtraFields = {
115
+ /**
116
+ * ChunkIndex is the chunk index (0 for single requests)
117
+ */
118
+ chunk_index?: number;
119
+ /**
120
+ * Latency is the request latency in milliseconds
121
+ */
122
+ latency?: number;
123
+ /**
124
+ * ModelRequested is the model that was requested
125
+ */
126
+ model_requested?: string;
127
+ /**
128
+ * Provider is the LLM provider used (e.g., "openai", "anthropic")
129
+ */
130
+ provider?: string;
131
+ /**
132
+ * RequestType is always "embedding"
133
+ */
134
+ request_type?: string;
135
+ };
136
+ type LlmapiEmbeddingRequest = {
137
+ /**
138
+ * Dimensions is the number of dimensions the resulting output embeddings should have (optional)
139
+ */
140
+ dimensions?: number;
141
+ /**
142
+ * EncodingFormat is the format to return the embeddings in (optional: "float" or "base64")
143
+ */
144
+ encoding_format?: string;
145
+ /**
146
+ * Input text or tokens to embed (can be string, []string, []int, or [][]int)
147
+ */
148
+ input?: unknown;
149
+ /**
150
+ * Model identifier in 'provider/model' format
151
+ */
152
+ model?: string;
153
+ };
154
+ type LlmapiEmbeddingResponse = {
155
+ /**
156
+ * Data contains the embeddings
157
+ */
158
+ data?: Array<LlmapiEmbeddingData>;
159
+ extra_fields?: LlmapiEmbeddingExtraFields;
160
+ /**
161
+ * Model is the model used
162
+ */
163
+ model?: string;
164
+ /**
165
+ * Object is always "list"
166
+ */
167
+ object?: string;
168
+ usage?: LlmapiEmbeddingUsage;
169
+ };
170
+ /**
171
+ * Usage contains token usage information
172
+ */
173
+ type LlmapiEmbeddingUsage = {
174
+ /**
175
+ * PromptTokens is the number of tokens in the prompt
176
+ */
177
+ prompt_tokens?: number;
178
+ /**
179
+ * TotalTokens is the total number of tokens used
180
+ */
181
+ total_tokens?: number;
182
+ };
183
+ /**
184
+ * Message is the generated message
185
+ */
186
+ type LlmapiMessage = {
187
+ /**
188
+ * Content is the message content
189
+ */
190
+ content?: string;
191
+ role?: LlmapiRole;
192
+ };
193
+ type LlmapiModel = {
194
+ architecture?: LlmapiModelArchitecture;
195
+ /**
196
+ * CanonicalSlug is the canonical slug for the model
197
+ */
198
+ canonical_slug?: string;
199
+ /**
200
+ * ContextLength is the maximum context length in tokens
201
+ */
202
+ context_length?: number;
203
+ /**
204
+ * Created is the Unix timestamp of when the model was created
205
+ */
206
+ created?: number;
207
+ /**
208
+ * DefaultParameters contains default parameter values
209
+ */
210
+ default_parameters?: {
211
+ [key: string]: unknown;
212
+ };
213
+ /**
214
+ * Description describes the model and its capabilities
215
+ */
216
+ description?: string;
217
+ /**
218
+ * HuggingFaceID is the Hugging Face model identifier
219
+ */
220
+ hugging_face_id?: string;
221
+ /**
222
+ * ID is the model identifier (e.g., "openai/gpt-4")
223
+ */
224
+ id?: string;
225
+ /**
226
+ * MaxInputTokens is the maximum input tokens
227
+ */
228
+ max_input_tokens?: number;
229
+ /**
230
+ * MaxOutputTokens is the maximum output tokens
231
+ */
232
+ max_output_tokens?: number;
233
+ /**
234
+ * Name is the human-readable model name (optional)
235
+ */
236
+ name?: string;
237
+ /**
238
+ * OwnedBy is the organization that owns the model
239
+ */
240
+ owned_by?: string;
241
+ per_request_limits?: LlmapiModelPerRequestLimits;
242
+ pricing?: LlmapiModelPricing;
243
+ /**
244
+ * SupportedMethods is a list of supported API methods
245
+ */
246
+ supported_methods?: Array<string>;
247
+ /**
248
+ * SupportedParameters is a list of supported parameter names
249
+ */
250
+ supported_parameters?: Array<string>;
251
+ top_provider?: LlmapiModelTopProvider;
252
+ };
253
+ /**
254
+ * Architecture describes the model's technical capabilities
255
+ */
256
+ type LlmapiModelArchitecture = {
257
+ instruct_type?: string;
258
+ modality?: string;
259
+ prompt_formatting?: string;
260
+ tokenizer?: string;
261
+ };
262
+ /**
263
+ * PerRequestLimits contains rate limiting information
264
+ */
265
+ type LlmapiModelPerRequestLimits = {
266
+ completion_tokens?: number;
267
+ prompt_tokens?: number;
268
+ };
269
+ /**
270
+ * Pricing contains the pricing structure for using this model
271
+ */
272
+ type LlmapiModelPricing = {
273
+ completion?: string;
274
+ image?: string;
275
+ prompt?: string;
276
+ request?: string;
277
+ };
278
+ /**
279
+ * TopProvider contains configuration details for the primary provider
280
+ */
281
+ type LlmapiModelTopProvider = {
282
+ context_length?: number;
283
+ is_moderated?: boolean;
284
+ max_completion_tokens?: number;
285
+ };
286
+ /**
287
+ * ExtraFields contains additional metadata
288
+ */
289
+ type LlmapiModelsListExtraFields = {
290
+ /**
291
+ * ChunkIndex is the chunk index (0 for single requests)
292
+ */
293
+ chunk_index?: number;
294
+ /**
295
+ * Latency is the request latency in milliseconds
296
+ */
297
+ latency?: number;
298
+ /**
299
+ * RequestType is always "list_models"
300
+ */
301
+ request_type?: string;
302
+ };
303
+ type LlmapiModelsListResponse = {
304
+ /**
305
+ * Data contains the list of available models
306
+ */
307
+ data?: Array<LlmapiModel>;
308
+ extra_fields?: LlmapiModelsListExtraFields;
309
+ /**
310
+ * NextPageToken is the token to retrieve the next page of results (omitted if no more pages)
311
+ */
312
+ next_page_token?: string;
313
+ };
314
+ /**
315
+ * Role is the message role (system, user, assistant)
316
+ */
317
+ type LlmapiRole = string;
318
+ type ResponseErrorResponse = {
319
+ error?: string;
320
+ };
321
+ type PostApiV1ChatCompletionsData = {
322
+ /**
323
+ * Chat completion request
324
+ */
325
+ body: LlmapiChatCompletionRequest;
326
+ path?: never;
327
+ query?: never;
328
+ url: '/api/v1/chat/completions';
329
+ };
330
+ type PostApiV1ChatCompletionsErrors = {
331
+ /**
332
+ * Bad Request
333
+ */
334
+ 400: ResponseErrorResponse;
335
+ /**
336
+ * Internal Server Error
337
+ */
338
+ 500: ResponseErrorResponse;
339
+ };
340
+ type PostApiV1ChatCompletionsError = PostApiV1ChatCompletionsErrors[keyof PostApiV1ChatCompletionsErrors];
341
+ type PostApiV1ChatCompletionsResponses = {
342
+ /**
343
+ * OK
344
+ */
345
+ 200: LlmapiChatCompletionResponse;
346
+ };
347
+ type PostApiV1ChatCompletionsResponse = PostApiV1ChatCompletionsResponses[keyof PostApiV1ChatCompletionsResponses];
348
+ type PostApiV1EmbeddingsData = {
349
+ /**
350
+ * Embedding request
351
+ */
352
+ body: LlmapiEmbeddingRequest;
353
+ path?: never;
354
+ query?: never;
355
+ url: '/api/v1/embeddings';
356
+ };
357
+ type PostApiV1EmbeddingsErrors = {
358
+ /**
359
+ * Bad Request
360
+ */
361
+ 400: ResponseErrorResponse;
362
+ /**
363
+ * Internal Server Error
364
+ */
365
+ 500: ResponseErrorResponse;
366
+ };
367
+ type PostApiV1EmbeddingsError = PostApiV1EmbeddingsErrors[keyof PostApiV1EmbeddingsErrors];
368
+ type PostApiV1EmbeddingsResponses = {
369
+ /**
370
+ * OK
371
+ */
372
+ 200: LlmapiEmbeddingResponse;
373
+ };
374
+ type PostApiV1EmbeddingsResponse = PostApiV1EmbeddingsResponses[keyof PostApiV1EmbeddingsResponses];
375
+ type GetApiV1ModelsData = {
376
+ body?: never;
377
+ path?: never;
378
+ query?: {
379
+ /**
380
+ * Filter by provider (e.g., openai, anthropic)
381
+ */
382
+ provider?: string;
383
+ /**
384
+ * Number of models to return per page
385
+ */
386
+ page_size?: number;
387
+ /**
388
+ * Token to get next page of results
389
+ */
390
+ page_token?: string;
391
+ };
392
+ url: '/api/v1/models';
393
+ };
394
+ type GetApiV1ModelsErrors = {
395
+ /**
396
+ * Bad Request
397
+ */
398
+ 400: ResponseErrorResponse;
399
+ /**
400
+ * Internal Server Error
401
+ */
402
+ 500: ResponseErrorResponse;
403
+ };
404
+ type GetApiV1ModelsError = GetApiV1ModelsErrors[keyof GetApiV1ModelsErrors];
405
+ type GetApiV1ModelsResponses = {
406
+ /**
407
+ * OK
408
+ */
409
+ 200: LlmapiModelsListResponse;
410
+ };
411
+ type GetApiV1ModelsResponse = GetApiV1ModelsResponses[keyof GetApiV1ModelsResponses];
412
+ type GetHealthData = {
413
+ body?: never;
414
+ path?: never;
415
+ query?: never;
416
+ url: '/health';
417
+ };
418
+ type GetHealthErrors = {
419
+ /**
420
+ * Internal Server Error
421
+ */
422
+ 500: ResponseErrorResponse;
423
+ };
424
+ type GetHealthError = GetHealthErrors[keyof GetHealthErrors];
425
+ type GetHealthResponses = {
426
+ /**
427
+ * OK
428
+ */
429
+ 200: HandlersHealthResponse;
430
+ };
431
+ type GetHealthResponse = GetHealthResponses[keyof GetHealthResponses];
432
+
433
+ export type { LlmapiModelsListResponse as A, LlmapiRole as B, ClientOptions as C, PostApiV1ChatCompletionsError as D, PostApiV1ChatCompletionsResponse as E, PostApiV1EmbeddingsError as F, GetApiV1ModelsData as G, HandlersHealthResponse as H, PostApiV1EmbeddingsResponse as I, GetApiV1ModelsError as J, GetApiV1ModelsResponse as K, LlmapiChatCompletionExtraFields as L, GetHealthError as M, GetHealthResponse as N, PostApiV1ChatCompletionsData as P, ResponseErrorResponse as R, PostApiV1ChatCompletionsResponses as a, PostApiV1ChatCompletionsErrors as b, PostApiV1EmbeddingsData as c, PostApiV1EmbeddingsResponses as d, PostApiV1EmbeddingsErrors as e, GetApiV1ModelsResponses as f, GetApiV1ModelsErrors as g, GetHealthData as h, GetHealthResponses as i, GetHealthErrors as j, LlmapiChatCompletionRequest as k, LlmapiChatCompletionResponse as l, LlmapiChatCompletionUsage as m, LlmapiChoice as n, LlmapiEmbeddingData as o, LlmapiEmbeddingExtraFields as p, LlmapiEmbeddingRequest as q, LlmapiEmbeddingResponse as r, LlmapiEmbeddingUsage as s, LlmapiMessage as t, LlmapiModel as u, LlmapiModelArchitecture as v, LlmapiModelPerRequestLimits as w, LlmapiModelPricing as x, LlmapiModelTopProvider as y, LlmapiModelsListExtraFields as z };
@@ -1,5 +1,5 @@
1
1
  import { UIMessage } from 'ai';
2
- import { o as LlmapiMessage } from '../types.gen-DENXHZhp.mjs';
2
+ import { t as LlmapiMessage } from '../types.gen-Ar4CxyYC.mjs';
3
3
 
4
4
  /**
5
5
  * Converts an array of Vercel AI {@link UIMessage} objects into the
@@ -1,5 +1,5 @@
1
1
  import { UIMessage } from 'ai';
2
- import { o as LlmapiMessage } from '../types.gen-DENXHZhp.js';
2
+ import { t as LlmapiMessage } from '../types.gen-Ar4CxyYC.js';
3
3
 
4
4
  /**
5
5
  * Converts an array of Vercel AI {@link UIMessage} objects into the
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@reverbia/sdk",
3
- "version": "1.0.0-next.20251114165311",
3
+ "version": "1.0.0-next.20251119123747",
4
4
  "description": "",
5
5
  "main": "./dist/client/index.cjs",
6
6
  "module": "./dist/client/index.mjs",
@@ -41,7 +41,7 @@
41
41
  },
42
42
  "homepage": "https://github.com/zeta-chain/ai-sdk#readme",
43
43
  "dependencies": {
44
- "@reverbia/portal": "^1.0.0-next.20251113192414",
44
+ "@reverbia/portal": "^1.0.0-next.20251118022211",
45
45
  "ai": "5.0.93"
46
46
  },
47
47
  "devDependencies": {
@@ -1,235 +0,0 @@
1
- type ClientOptions = {
2
- baseUrl: `${string}://${string}` | (string & {});
3
- };
4
- type HandlersHealthResponse = {
5
- /**
6
- * Status indicates the service health status
7
- */
8
- status?: string;
9
- /**
10
- * Timestamp is the Unix timestamp of the response
11
- */
12
- timestamp?: number;
13
- /**
14
- * Version is the current API version
15
- */
16
- version?: string;
17
- };
18
- type LlmapiChatCompletionRequest = {
19
- /**
20
- * Messages is the conversation history
21
- */
22
- messages?: Array<LlmapiMessage>;
23
- /**
24
- * Model is the model identifier
25
- */
26
- model?: string;
27
- /**
28
- * Stream indicates if response should be streamed
29
- */
30
- stream?: boolean;
31
- };
32
- type LlmapiChatCompletionResponse = {
33
- /**
34
- * Choices contains the completion choices
35
- */
36
- choices?: Array<LlmapiChoice>;
37
- /**
38
- * ID is the completion ID
39
- */
40
- id?: string;
41
- /**
42
- * Model is the model used
43
- */
44
- model?: string;
45
- };
46
- type LlmapiChoice = {
47
- /**
48
- * FinishReason indicates why the completion stopped
49
- */
50
- finish_reason?: string;
51
- /**
52
- * Index is the choice index
53
- */
54
- index?: number;
55
- message?: LlmapiMessage;
56
- };
57
- type LlmapiEmbeddingData = {
58
- /**
59
- * Embedding vector
60
- */
61
- embedding?: Array<number>;
62
- /**
63
- * Index of the embedding
64
- */
65
- index?: number;
66
- /**
67
- * Object type identifier
68
- */
69
- object?: string;
70
- };
71
- /**
72
- * ExtraFields contains additional metadata
73
- */
74
- type LlmapiEmbeddingExtraFields = {
75
- /**
76
- * ChunkIndex is the chunk index (0 for single requests)
77
- */
78
- chunk_index?: number;
79
- /**
80
- * Latency is the request latency in milliseconds
81
- */
82
- latency?: number;
83
- /**
84
- * ModelRequested is the model that was requested
85
- */
86
- model_requested?: string;
87
- /**
88
- * Provider is the LLM provider used (e.g., "openai", "anthropic")
89
- */
90
- provider?: string;
91
- /**
92
- * RequestType is always "embedding"
93
- */
94
- request_type?: string;
95
- };
96
- type LlmapiEmbeddingRequest = {
97
- /**
98
- * Dimensions is the number of dimensions the resulting output embeddings should have (optional)
99
- */
100
- dimensions?: number;
101
- /**
102
- * EncodingFormat is the format to return the embeddings in (optional: "float" or "base64")
103
- */
104
- encoding_format?: string;
105
- /**
106
- * Input text or tokens to embed (can be string, []string, []int, or [][]int)
107
- */
108
- input?: unknown;
109
- /**
110
- * Model identifier in 'provider/model' format
111
- */
112
- model?: string;
113
- };
114
- type LlmapiEmbeddingResponse = {
115
- /**
116
- * Data contains the embeddings
117
- */
118
- data?: Array<LlmapiEmbeddingData>;
119
- extra_fields?: LlmapiEmbeddingExtraFields;
120
- /**
121
- * Model is the model used
122
- */
123
- model?: string;
124
- /**
125
- * Object is always "list"
126
- */
127
- object?: string;
128
- usage?: LlmapiEmbeddingUsage;
129
- };
130
- /**
131
- * Usage contains token usage information
132
- */
133
- type LlmapiEmbeddingUsage = {
134
- /**
135
- * PromptTokens is the number of tokens in the prompt
136
- */
137
- prompt_tokens?: number;
138
- /**
139
- * TotalTokens is the total number of tokens used
140
- */
141
- total_tokens?: number;
142
- };
143
- /**
144
- * Message is the generated message
145
- */
146
- type LlmapiMessage = {
147
- /**
148
- * Content is the message content
149
- */
150
- content?: string;
151
- role?: LlmapiRole;
152
- };
153
- /**
154
- * Role is the message role (system, user, assistant)
155
- */
156
- type LlmapiRole = string;
157
- type ResponseErrorResponse = {
158
- error?: string;
159
- };
160
- type PostApiV1ChatCompletionsData = {
161
- /**
162
- * Chat completion request
163
- */
164
- body: LlmapiChatCompletionRequest;
165
- path?: never;
166
- query?: never;
167
- url: '/api/v1/chat/completions';
168
- };
169
- type PostApiV1ChatCompletionsErrors = {
170
- /**
171
- * Bad Request
172
- */
173
- 400: ResponseErrorResponse;
174
- /**
175
- * Internal Server Error
176
- */
177
- 500: ResponseErrorResponse;
178
- };
179
- type PostApiV1ChatCompletionsError = PostApiV1ChatCompletionsErrors[keyof PostApiV1ChatCompletionsErrors];
180
- type PostApiV1ChatCompletionsResponses = {
181
- /**
182
- * OK
183
- */
184
- 200: LlmapiChatCompletionResponse;
185
- };
186
- type PostApiV1ChatCompletionsResponse = PostApiV1ChatCompletionsResponses[keyof PostApiV1ChatCompletionsResponses];
187
- type PostApiV1EmbeddingsData = {
188
- /**
189
- * Embedding request
190
- */
191
- body: LlmapiEmbeddingRequest;
192
- path?: never;
193
- query?: never;
194
- url: '/api/v1/embeddings';
195
- };
196
- type PostApiV1EmbeddingsErrors = {
197
- /**
198
- * Bad Request
199
- */
200
- 400: ResponseErrorResponse;
201
- /**
202
- * Internal Server Error
203
- */
204
- 500: ResponseErrorResponse;
205
- };
206
- type PostApiV1EmbeddingsError = PostApiV1EmbeddingsErrors[keyof PostApiV1EmbeddingsErrors];
207
- type PostApiV1EmbeddingsResponses = {
208
- /**
209
- * OK
210
- */
211
- 200: LlmapiEmbeddingResponse;
212
- };
213
- type PostApiV1EmbeddingsResponse = PostApiV1EmbeddingsResponses[keyof PostApiV1EmbeddingsResponses];
214
- type GetHealthData = {
215
- body?: never;
216
- path?: never;
217
- query?: never;
218
- url: '/health';
219
- };
220
- type GetHealthErrors = {
221
- /**
222
- * Internal Server Error
223
- */
224
- 500: ResponseErrorResponse;
225
- };
226
- type GetHealthError = GetHealthErrors[keyof GetHealthErrors];
227
- type GetHealthResponses = {
228
- /**
229
- * OK
230
- */
231
- 200: HandlersHealthResponse;
232
- };
233
- type GetHealthResponse = GetHealthResponses[keyof GetHealthResponses];
234
-
235
- export type { ClientOptions as C, GetHealthData as G, HandlersHealthResponse as H, LlmapiChatCompletionRequest as L, PostApiV1ChatCompletionsData as P, ResponseErrorResponse as R, PostApiV1ChatCompletionsResponses as a, PostApiV1ChatCompletionsErrors as b, PostApiV1EmbeddingsData as c, PostApiV1EmbeddingsResponses as d, PostApiV1EmbeddingsErrors as e, GetHealthResponses as f, GetHealthErrors as g, LlmapiChatCompletionResponse as h, LlmapiChoice as i, LlmapiEmbeddingData as j, LlmapiEmbeddingExtraFields as k, LlmapiEmbeddingRequest as l, LlmapiEmbeddingResponse as m, LlmapiEmbeddingUsage as n, LlmapiMessage as o, LlmapiRole as p, PostApiV1ChatCompletionsError as q, PostApiV1ChatCompletionsResponse as r, PostApiV1EmbeddingsError as s, PostApiV1EmbeddingsResponse as t, GetHealthError as u, GetHealthResponse as v };