@reverbia/sdk 1.0.0-next.20251119170952 → 1.0.0-next.20251121094738
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{client/index.cjs → index.cjs} +4 -4
- package/dist/{client/index.d.mts → index.d.mts} +433 -4
- package/dist/{client/index.d.ts → index.d.ts} +433 -4
- package/dist/react/index.cjs +1531 -0
- package/dist/react/index.d.mts +244 -0
- package/dist/react/index.d.ts +244 -0
- package/dist/react/index.mjs +1492 -0
- package/dist/vercel/index.cjs +3 -3
- package/dist/vercel/index.d.mts +15 -1
- package/dist/vercel/index.d.ts +15 -1
- package/package.json +25 -11
- package/dist/types.gen-Ar4CxyYC.d.mts +0 -433
- package/dist/types.gen-Ar4CxyYC.d.ts +0 -433
- /package/dist/{client/index.mjs → index.mjs} +0 -0
|
@@ -17,15 +17,15 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
17
17
|
};
|
|
18
18
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
19
|
|
|
20
|
-
// src/
|
|
21
|
-
var
|
|
22
|
-
__export(
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
23
|
getApiV1Models: () => getApiV1Models,
|
|
24
24
|
getHealth: () => getHealth,
|
|
25
25
|
postApiV1ChatCompletions: () => postApiV1ChatCompletions,
|
|
26
26
|
postApiV1Embeddings: () => postApiV1Embeddings
|
|
27
27
|
});
|
|
28
|
-
module.exports = __toCommonJS(
|
|
28
|
+
module.exports = __toCommonJS(index_exports);
|
|
29
29
|
|
|
30
30
|
// src/client/core/bodySerializer.gen.ts
|
|
31
31
|
var jsonBodySerializer = {
|
|
@@ -1,5 +1,434 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
type ClientOptions$1 = {
|
|
2
|
+
baseUrl: `${string}://${string}` | (string & {});
|
|
3
|
+
};
|
|
4
|
+
type HandlersHealthResponse = {
|
|
5
|
+
/**
|
|
6
|
+
* Status indicates the service health status
|
|
7
|
+
*/
|
|
8
|
+
status?: string;
|
|
9
|
+
/**
|
|
10
|
+
* Timestamp is the Unix timestamp of the response
|
|
11
|
+
*/
|
|
12
|
+
timestamp?: number;
|
|
13
|
+
/**
|
|
14
|
+
* Version is the current API version
|
|
15
|
+
*/
|
|
16
|
+
version?: string;
|
|
17
|
+
};
|
|
18
|
+
/**
|
|
19
|
+
* ExtraFields contains additional metadata
|
|
20
|
+
*/
|
|
21
|
+
type LlmapiChatCompletionExtraFields = {
|
|
22
|
+
/**
|
|
23
|
+
* Latency is the request latency in milliseconds
|
|
24
|
+
*/
|
|
25
|
+
latency?: number;
|
|
26
|
+
/**
|
|
27
|
+
* ModelRequested is the model that was requested
|
|
28
|
+
*/
|
|
29
|
+
model_requested?: string;
|
|
30
|
+
/**
|
|
31
|
+
* Provider is the LLM provider used (e.g., "openai", "anthropic")
|
|
32
|
+
*/
|
|
33
|
+
provider?: string;
|
|
34
|
+
/**
|
|
35
|
+
* RequestType is always "chat_completion"
|
|
36
|
+
*/
|
|
37
|
+
request_type?: string;
|
|
38
|
+
};
|
|
39
|
+
type LlmapiChatCompletionRequest = {
|
|
40
|
+
/**
|
|
41
|
+
* Messages is the conversation history
|
|
42
|
+
*/
|
|
43
|
+
messages?: Array<LlmapiMessage>;
|
|
44
|
+
/**
|
|
45
|
+
* Model is the model identifier
|
|
46
|
+
*/
|
|
47
|
+
model?: string;
|
|
48
|
+
/**
|
|
49
|
+
* Stream indicates if response should be streamed
|
|
50
|
+
*/
|
|
51
|
+
stream?: boolean;
|
|
52
|
+
};
|
|
53
|
+
type LlmapiChatCompletionResponse = {
|
|
54
|
+
/**
|
|
55
|
+
* Choices contains the completion choices
|
|
56
|
+
*/
|
|
57
|
+
choices?: Array<LlmapiChoice>;
|
|
58
|
+
extra_fields?: LlmapiChatCompletionExtraFields;
|
|
59
|
+
/**
|
|
60
|
+
* ID is the completion ID
|
|
61
|
+
*/
|
|
62
|
+
id?: string;
|
|
63
|
+
/**
|
|
64
|
+
* Model is the model used
|
|
65
|
+
*/
|
|
66
|
+
model?: string;
|
|
67
|
+
usage?: LlmapiChatCompletionUsage;
|
|
68
|
+
};
|
|
69
|
+
/**
|
|
70
|
+
* Usage contains token usage information
|
|
71
|
+
*/
|
|
72
|
+
type LlmapiChatCompletionUsage = {
|
|
73
|
+
/**
|
|
74
|
+
* CompletionTokens is the number of tokens in the completion
|
|
75
|
+
*/
|
|
76
|
+
completion_tokens?: number;
|
|
77
|
+
/**
|
|
78
|
+
* PromptTokens is the number of tokens in the prompt
|
|
79
|
+
*/
|
|
80
|
+
prompt_tokens?: number;
|
|
81
|
+
/**
|
|
82
|
+
* TotalTokens is the total number of tokens used
|
|
83
|
+
*/
|
|
84
|
+
total_tokens?: number;
|
|
85
|
+
};
|
|
86
|
+
type LlmapiChoice = {
|
|
87
|
+
/**
|
|
88
|
+
* FinishReason indicates why the completion stopped
|
|
89
|
+
*/
|
|
90
|
+
finish_reason?: string;
|
|
91
|
+
/**
|
|
92
|
+
* Index is the choice index
|
|
93
|
+
*/
|
|
94
|
+
index?: number;
|
|
95
|
+
message?: LlmapiMessage;
|
|
96
|
+
};
|
|
97
|
+
type LlmapiEmbeddingData = {
|
|
98
|
+
/**
|
|
99
|
+
* Embedding vector
|
|
100
|
+
*/
|
|
101
|
+
embedding?: Array<number>;
|
|
102
|
+
/**
|
|
103
|
+
* Index of the embedding
|
|
104
|
+
*/
|
|
105
|
+
index?: number;
|
|
106
|
+
/**
|
|
107
|
+
* Object type identifier
|
|
108
|
+
*/
|
|
109
|
+
object?: string;
|
|
110
|
+
};
|
|
111
|
+
/**
|
|
112
|
+
* ExtraFields contains additional metadata
|
|
113
|
+
*/
|
|
114
|
+
type LlmapiEmbeddingExtraFields = {
|
|
115
|
+
/**
|
|
116
|
+
* ChunkIndex is the chunk index (0 for single requests)
|
|
117
|
+
*/
|
|
118
|
+
chunk_index?: number;
|
|
119
|
+
/**
|
|
120
|
+
* Latency is the request latency in milliseconds
|
|
121
|
+
*/
|
|
122
|
+
latency?: number;
|
|
123
|
+
/**
|
|
124
|
+
* ModelRequested is the model that was requested
|
|
125
|
+
*/
|
|
126
|
+
model_requested?: string;
|
|
127
|
+
/**
|
|
128
|
+
* Provider is the LLM provider used (e.g., "openai", "anthropic")
|
|
129
|
+
*/
|
|
130
|
+
provider?: string;
|
|
131
|
+
/**
|
|
132
|
+
* RequestType is always "embedding"
|
|
133
|
+
*/
|
|
134
|
+
request_type?: string;
|
|
135
|
+
};
|
|
136
|
+
type LlmapiEmbeddingRequest = {
|
|
137
|
+
/**
|
|
138
|
+
* Dimensions is the number of dimensions the resulting output embeddings should have (optional)
|
|
139
|
+
*/
|
|
140
|
+
dimensions?: number;
|
|
141
|
+
/**
|
|
142
|
+
* EncodingFormat is the format to return the embeddings in (optional: "float" or "base64")
|
|
143
|
+
*/
|
|
144
|
+
encoding_format?: string;
|
|
145
|
+
/**
|
|
146
|
+
* Input text or tokens to embed (can be string, []string, []int, or [][]int)
|
|
147
|
+
*/
|
|
148
|
+
input?: unknown;
|
|
149
|
+
/**
|
|
150
|
+
* Model identifier in 'provider/model' format
|
|
151
|
+
*/
|
|
152
|
+
model?: string;
|
|
153
|
+
};
|
|
154
|
+
type LlmapiEmbeddingResponse = {
|
|
155
|
+
/**
|
|
156
|
+
* Data contains the embeddings
|
|
157
|
+
*/
|
|
158
|
+
data?: Array<LlmapiEmbeddingData>;
|
|
159
|
+
extra_fields?: LlmapiEmbeddingExtraFields;
|
|
160
|
+
/**
|
|
161
|
+
* Model is the model used
|
|
162
|
+
*/
|
|
163
|
+
model?: string;
|
|
164
|
+
/**
|
|
165
|
+
* Object is always "list"
|
|
166
|
+
*/
|
|
167
|
+
object?: string;
|
|
168
|
+
usage?: LlmapiEmbeddingUsage;
|
|
169
|
+
};
|
|
170
|
+
/**
|
|
171
|
+
* Usage contains token usage information
|
|
172
|
+
*/
|
|
173
|
+
type LlmapiEmbeddingUsage = {
|
|
174
|
+
/**
|
|
175
|
+
* PromptTokens is the number of tokens in the prompt
|
|
176
|
+
*/
|
|
177
|
+
prompt_tokens?: number;
|
|
178
|
+
/**
|
|
179
|
+
* TotalTokens is the total number of tokens used
|
|
180
|
+
*/
|
|
181
|
+
total_tokens?: number;
|
|
182
|
+
};
|
|
183
|
+
/**
|
|
184
|
+
* Message is the generated message
|
|
185
|
+
*/
|
|
186
|
+
type LlmapiMessage = {
|
|
187
|
+
/**
|
|
188
|
+
* Content is the message content
|
|
189
|
+
*/
|
|
190
|
+
content?: string;
|
|
191
|
+
role?: LlmapiRole;
|
|
192
|
+
};
|
|
193
|
+
type LlmapiModel = {
|
|
194
|
+
architecture?: LlmapiModelArchitecture;
|
|
195
|
+
/**
|
|
196
|
+
* CanonicalSlug is the canonical slug for the model
|
|
197
|
+
*/
|
|
198
|
+
canonical_slug?: string;
|
|
199
|
+
/**
|
|
200
|
+
* ContextLength is the maximum context length in tokens
|
|
201
|
+
*/
|
|
202
|
+
context_length?: number;
|
|
203
|
+
/**
|
|
204
|
+
* Created is the Unix timestamp of when the model was created
|
|
205
|
+
*/
|
|
206
|
+
created?: number;
|
|
207
|
+
/**
|
|
208
|
+
* DefaultParameters contains default parameter values
|
|
209
|
+
*/
|
|
210
|
+
default_parameters?: {
|
|
211
|
+
[key: string]: unknown;
|
|
212
|
+
};
|
|
213
|
+
/**
|
|
214
|
+
* Description describes the model and its capabilities
|
|
215
|
+
*/
|
|
216
|
+
description?: string;
|
|
217
|
+
/**
|
|
218
|
+
* HuggingFaceID is the Hugging Face model identifier
|
|
219
|
+
*/
|
|
220
|
+
hugging_face_id?: string;
|
|
221
|
+
/**
|
|
222
|
+
* ID is the model identifier (e.g., "openai/gpt-4")
|
|
223
|
+
*/
|
|
224
|
+
id?: string;
|
|
225
|
+
/**
|
|
226
|
+
* MaxInputTokens is the maximum input tokens
|
|
227
|
+
*/
|
|
228
|
+
max_input_tokens?: number;
|
|
229
|
+
/**
|
|
230
|
+
* MaxOutputTokens is the maximum output tokens
|
|
231
|
+
*/
|
|
232
|
+
max_output_tokens?: number;
|
|
233
|
+
/**
|
|
234
|
+
* Name is the human-readable model name (optional)
|
|
235
|
+
*/
|
|
236
|
+
name?: string;
|
|
237
|
+
/**
|
|
238
|
+
* OwnedBy is the organization that owns the model
|
|
239
|
+
*/
|
|
240
|
+
owned_by?: string;
|
|
241
|
+
per_request_limits?: LlmapiModelPerRequestLimits;
|
|
242
|
+
pricing?: LlmapiModelPricing;
|
|
243
|
+
/**
|
|
244
|
+
* SupportedMethods is a list of supported API methods
|
|
245
|
+
*/
|
|
246
|
+
supported_methods?: Array<string>;
|
|
247
|
+
/**
|
|
248
|
+
* SupportedParameters is a list of supported parameter names
|
|
249
|
+
*/
|
|
250
|
+
supported_parameters?: Array<string>;
|
|
251
|
+
top_provider?: LlmapiModelTopProvider;
|
|
252
|
+
};
|
|
253
|
+
/**
|
|
254
|
+
* Architecture describes the model's technical capabilities
|
|
255
|
+
*/
|
|
256
|
+
type LlmapiModelArchitecture = {
|
|
257
|
+
instruct_type?: string;
|
|
258
|
+
modality?: string;
|
|
259
|
+
prompt_formatting?: string;
|
|
260
|
+
tokenizer?: string;
|
|
261
|
+
};
|
|
262
|
+
/**
|
|
263
|
+
* PerRequestLimits contains rate limiting information
|
|
264
|
+
*/
|
|
265
|
+
type LlmapiModelPerRequestLimits = {
|
|
266
|
+
completion_tokens?: number;
|
|
267
|
+
prompt_tokens?: number;
|
|
268
|
+
};
|
|
269
|
+
/**
|
|
270
|
+
* Pricing contains the pricing structure for using this model
|
|
271
|
+
*/
|
|
272
|
+
type LlmapiModelPricing = {
|
|
273
|
+
completion?: string;
|
|
274
|
+
image?: string;
|
|
275
|
+
prompt?: string;
|
|
276
|
+
request?: string;
|
|
277
|
+
};
|
|
278
|
+
/**
|
|
279
|
+
* TopProvider contains configuration details for the primary provider
|
|
280
|
+
*/
|
|
281
|
+
type LlmapiModelTopProvider = {
|
|
282
|
+
context_length?: number;
|
|
283
|
+
is_moderated?: boolean;
|
|
284
|
+
max_completion_tokens?: number;
|
|
285
|
+
};
|
|
286
|
+
/**
|
|
287
|
+
* ExtraFields contains additional metadata
|
|
288
|
+
*/
|
|
289
|
+
type LlmapiModelsListExtraFields = {
|
|
290
|
+
/**
|
|
291
|
+
* ChunkIndex is the chunk index (0 for single requests)
|
|
292
|
+
*/
|
|
293
|
+
chunk_index?: number;
|
|
294
|
+
/**
|
|
295
|
+
* Latency is the request latency in milliseconds
|
|
296
|
+
*/
|
|
297
|
+
latency?: number;
|
|
298
|
+
/**
|
|
299
|
+
* RequestType is always "list_models"
|
|
300
|
+
*/
|
|
301
|
+
request_type?: string;
|
|
302
|
+
};
|
|
303
|
+
type LlmapiModelsListResponse = {
|
|
304
|
+
/**
|
|
305
|
+
* Data contains the list of available models
|
|
306
|
+
*/
|
|
307
|
+
data?: Array<LlmapiModel>;
|
|
308
|
+
extra_fields?: LlmapiModelsListExtraFields;
|
|
309
|
+
/**
|
|
310
|
+
* NextPageToken is the token to retrieve the next page of results (omitted if no more pages)
|
|
311
|
+
*/
|
|
312
|
+
next_page_token?: string;
|
|
313
|
+
};
|
|
314
|
+
/**
|
|
315
|
+
* Role is the message role (system, user, assistant)
|
|
316
|
+
*/
|
|
317
|
+
type LlmapiRole = string;
|
|
318
|
+
type ResponseErrorResponse = {
|
|
319
|
+
error?: string;
|
|
320
|
+
};
|
|
321
|
+
type PostApiV1ChatCompletionsData = {
|
|
322
|
+
/**
|
|
323
|
+
* Chat completion request
|
|
324
|
+
*/
|
|
325
|
+
body: LlmapiChatCompletionRequest;
|
|
326
|
+
path?: never;
|
|
327
|
+
query?: never;
|
|
328
|
+
url: '/api/v1/chat/completions';
|
|
329
|
+
};
|
|
330
|
+
type PostApiV1ChatCompletionsErrors = {
|
|
331
|
+
/**
|
|
332
|
+
* Bad Request
|
|
333
|
+
*/
|
|
334
|
+
400: ResponseErrorResponse;
|
|
335
|
+
/**
|
|
336
|
+
* Internal Server Error
|
|
337
|
+
*/
|
|
338
|
+
500: ResponseErrorResponse;
|
|
339
|
+
};
|
|
340
|
+
type PostApiV1ChatCompletionsError = PostApiV1ChatCompletionsErrors[keyof PostApiV1ChatCompletionsErrors];
|
|
341
|
+
type PostApiV1ChatCompletionsResponses = {
|
|
342
|
+
/**
|
|
343
|
+
* OK
|
|
344
|
+
*/
|
|
345
|
+
200: LlmapiChatCompletionResponse | string;
|
|
346
|
+
};
|
|
347
|
+
type PostApiV1ChatCompletionsResponse = PostApiV1ChatCompletionsResponses[keyof PostApiV1ChatCompletionsResponses];
|
|
348
|
+
type PostApiV1EmbeddingsData = {
|
|
349
|
+
/**
|
|
350
|
+
* Embedding request
|
|
351
|
+
*/
|
|
352
|
+
body: LlmapiEmbeddingRequest;
|
|
353
|
+
path?: never;
|
|
354
|
+
query?: never;
|
|
355
|
+
url: '/api/v1/embeddings';
|
|
356
|
+
};
|
|
357
|
+
type PostApiV1EmbeddingsErrors = {
|
|
358
|
+
/**
|
|
359
|
+
* Bad Request
|
|
360
|
+
*/
|
|
361
|
+
400: ResponseErrorResponse;
|
|
362
|
+
/**
|
|
363
|
+
* Internal Server Error
|
|
364
|
+
*/
|
|
365
|
+
500: ResponseErrorResponse;
|
|
366
|
+
};
|
|
367
|
+
type PostApiV1EmbeddingsError = PostApiV1EmbeddingsErrors[keyof PostApiV1EmbeddingsErrors];
|
|
368
|
+
type PostApiV1EmbeddingsResponses = {
|
|
369
|
+
/**
|
|
370
|
+
* OK
|
|
371
|
+
*/
|
|
372
|
+
200: LlmapiEmbeddingResponse;
|
|
373
|
+
};
|
|
374
|
+
type PostApiV1EmbeddingsResponse = PostApiV1EmbeddingsResponses[keyof PostApiV1EmbeddingsResponses];
|
|
375
|
+
type GetApiV1ModelsData = {
|
|
376
|
+
body?: never;
|
|
377
|
+
path?: never;
|
|
378
|
+
query?: {
|
|
379
|
+
/**
|
|
380
|
+
* Filter by provider (e.g., openai, anthropic)
|
|
381
|
+
*/
|
|
382
|
+
provider?: string;
|
|
383
|
+
/**
|
|
384
|
+
* Number of models to return per page
|
|
385
|
+
*/
|
|
386
|
+
page_size?: number;
|
|
387
|
+
/**
|
|
388
|
+
* Token to get next page of results
|
|
389
|
+
*/
|
|
390
|
+
page_token?: string;
|
|
391
|
+
};
|
|
392
|
+
url: '/api/v1/models';
|
|
393
|
+
};
|
|
394
|
+
type GetApiV1ModelsErrors = {
|
|
395
|
+
/**
|
|
396
|
+
* Bad Request
|
|
397
|
+
*/
|
|
398
|
+
400: ResponseErrorResponse;
|
|
399
|
+
/**
|
|
400
|
+
* Internal Server Error
|
|
401
|
+
*/
|
|
402
|
+
500: ResponseErrorResponse;
|
|
403
|
+
};
|
|
404
|
+
type GetApiV1ModelsError = GetApiV1ModelsErrors[keyof GetApiV1ModelsErrors];
|
|
405
|
+
type GetApiV1ModelsResponses = {
|
|
406
|
+
/**
|
|
407
|
+
* OK
|
|
408
|
+
*/
|
|
409
|
+
200: LlmapiModelsListResponse;
|
|
410
|
+
};
|
|
411
|
+
type GetApiV1ModelsResponse = GetApiV1ModelsResponses[keyof GetApiV1ModelsResponses];
|
|
412
|
+
type GetHealthData = {
|
|
413
|
+
body?: never;
|
|
414
|
+
path?: never;
|
|
415
|
+
query?: never;
|
|
416
|
+
url: '/health';
|
|
417
|
+
};
|
|
418
|
+
type GetHealthErrors = {
|
|
419
|
+
/**
|
|
420
|
+
* Internal Server Error
|
|
421
|
+
*/
|
|
422
|
+
500: ResponseErrorResponse;
|
|
423
|
+
};
|
|
424
|
+
type GetHealthError = GetHealthErrors[keyof GetHealthErrors];
|
|
425
|
+
type GetHealthResponses = {
|
|
426
|
+
/**
|
|
427
|
+
* OK
|
|
428
|
+
*/
|
|
429
|
+
200: HandlersHealthResponse;
|
|
430
|
+
};
|
|
431
|
+
type GetHealthResponse = GetHealthResponses[keyof GetHealthResponses];
|
|
3
432
|
|
|
4
433
|
type AuthToken = string | undefined;
|
|
5
434
|
interface Auth {
|
|
@@ -306,7 +735,7 @@ type Options<TData extends TDataShape = TDataShape, ThrowOnError extends boolean
|
|
|
306
735
|
/**
|
|
307
736
|
* Create chat completion
|
|
308
737
|
*
|
|
309
|
-
* Generates a chat completion using the configured gateway.
|
|
738
|
+
* Generates a chat completion using the configured gateway. Supports streaming when stream=true.
|
|
310
739
|
*/
|
|
311
740
|
declare const postApiV1ChatCompletions: <ThrowOnError extends boolean = false>(options: Options<PostApiV1ChatCompletionsData, ThrowOnError>) => RequestResult<PostApiV1ChatCompletionsResponses, PostApiV1ChatCompletionsErrors, ThrowOnError>;
|
|
312
741
|
/**
|
|
@@ -328,4 +757,4 @@ declare const getApiV1Models: <ThrowOnError extends boolean = false>(options?: O
|
|
|
328
757
|
*/
|
|
329
758
|
declare const getHealth: <ThrowOnError extends boolean = false>(options?: Options<GetHealthData, ThrowOnError>) => RequestResult<GetHealthResponses, GetHealthErrors, ThrowOnError>;
|
|
330
759
|
|
|
331
|
-
export { GetApiV1ModelsData, GetApiV1ModelsErrors, GetApiV1ModelsResponses, GetHealthData, GetHealthErrors, GetHealthResponses, type Options, PostApiV1ChatCompletionsData, PostApiV1ChatCompletionsErrors, PostApiV1ChatCompletionsResponses, PostApiV1EmbeddingsData, PostApiV1EmbeddingsErrors, PostApiV1EmbeddingsResponses, getApiV1Models, getHealth, postApiV1ChatCompletions, postApiV1Embeddings };
|
|
760
|
+
export { type ClientOptions$1 as ClientOptions, type GetApiV1ModelsData, type GetApiV1ModelsError, type GetApiV1ModelsErrors, type GetApiV1ModelsResponse, type GetApiV1ModelsResponses, type GetHealthData, type GetHealthError, type GetHealthErrors, type GetHealthResponse, type GetHealthResponses, type HandlersHealthResponse, type LlmapiChatCompletionExtraFields, type LlmapiChatCompletionRequest, type LlmapiChatCompletionResponse, type LlmapiChatCompletionUsage, type LlmapiChoice, type LlmapiEmbeddingData, type LlmapiEmbeddingExtraFields, type LlmapiEmbeddingRequest, type LlmapiEmbeddingResponse, type LlmapiEmbeddingUsage, type LlmapiMessage, type LlmapiModel, type LlmapiModelArchitecture, type LlmapiModelPerRequestLimits, type LlmapiModelPricing, type LlmapiModelTopProvider, type LlmapiModelsListExtraFields, type LlmapiModelsListResponse, type LlmapiRole, type Options, type PostApiV1ChatCompletionsData, type PostApiV1ChatCompletionsError, type PostApiV1ChatCompletionsErrors, type PostApiV1ChatCompletionsResponse, type PostApiV1ChatCompletionsResponses, type PostApiV1EmbeddingsData, type PostApiV1EmbeddingsError, type PostApiV1EmbeddingsErrors, type PostApiV1EmbeddingsResponse, type PostApiV1EmbeddingsResponses, type ResponseErrorResponse, getApiV1Models, getHealth, postApiV1ChatCompletions, postApiV1Embeddings };
|