@bodhiapp/ts-client 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,12 +10,12 @@ export type Alias = (UserAlias & {
10
10
  source: 'api';
11
11
  });
12
12
  export type ApiAlias = {
13
+ id: string;
13
14
  api_format: ApiFormat;
14
15
  base_url: string;
15
- created_at: string;
16
- id: string;
17
16
  models: Array<string>;
18
17
  prefix?: string | null;
18
+ created_at: string;
19
19
  updated_at: string;
20
20
  };
21
21
  /**
@@ -28,34 +28,51 @@ export type ApiFormat = 'openai' | 'placeholder';
28
28
  export type ApiFormatsResponse = {
29
29
  data: Array<ApiFormat>;
30
30
  };
31
+ /**
32
+ * Validated API key wrapper - validates length when Some, allows None for public APIs
33
+ */
34
+ export type ApiKey = string | null;
35
+ /**
36
+ * Represents an API key update action for API model updates
37
+ */
38
+ export type ApiKeyUpdateAction = {
39
+ action: 'keep';
40
+ } | {
41
+ /**
42
+ * Set a new API key (or add one if none exists) - can be None for public APIs
43
+ */
44
+ value: ApiKey;
45
+ action: 'set';
46
+ };
31
47
  /**
32
48
  * Response containing API model configuration
33
49
  */
34
50
  export type ApiModelResponse = {
51
+ id: string;
35
52
  api_format: ApiFormat;
36
- api_key_masked: string;
37
53
  base_url: string;
38
- created_at: string;
39
- id: string;
54
+ api_key_masked?: string | null;
40
55
  models: Array<string>;
41
56
  prefix?: string | null;
57
+ created_at: string;
42
58
  updated_at: string;
43
59
  };
44
60
  export type ApiToken = {
45
- created_at: string;
46
61
  id: string;
62
+ user_id: string;
47
63
  name: string;
48
- status: TokenStatus;
64
+ token_prefix: string;
49
65
  token_hash: string;
50
- token_id: string;
66
+ scopes: string;
67
+ status: TokenStatus;
68
+ created_at: string;
51
69
  updated_at: string;
52
- user_id: string;
53
70
  };
54
71
  export type ApiTokenResponse = {
55
72
  /**
56
- * API token with bapp_ prefix for programmatic access
73
+ * API token with bodhiapp_ prefix for programmatic access
57
74
  */
58
- offline_token: string;
75
+ token: string;
59
76
  };
60
77
  export type AppAccessRequest = {
61
78
  app_client_id: string;
@@ -67,16 +84,20 @@ export type AppAccessResponse = {
67
84
  * Application information and status
68
85
  */
69
86
  export type AppInfo = {
70
- /**
71
- * Current application setup and operational status
72
- */
73
- status: AppStatus;
74
87
  /**
75
88
  * Application version number (semantic versioning)
76
89
  */
77
90
  version: string;
91
+ /**
92
+ * Git commit SHA of the build
93
+ */
94
+ commit_sha: string;
95
+ /**
96
+ * Current application setup and operational status
97
+ */
98
+ status: AppStatus;
78
99
  };
79
- export type AppRole = Role | TokenScope | UserScope;
100
+ export type AppRole = ResourceRole | TokenScope | UserScope;
80
101
  export type AppStatus = 'setup' | 'ready' | 'resource-admin';
81
102
  /**
82
103
  * Request body for approving access with role assignment
@@ -85,13 +106,17 @@ export type ApproveUserAccessRequest = {
85
106
  /**
86
107
  * Role to assign to the user
87
108
  */
88
- role: Role;
109
+ role: ResourceRole;
89
110
  };
90
111
  export type AuthCallbackRequest = {
91
112
  /**
92
113
  * OAuth authorization code from successful authentication (required for success flow)
93
114
  */
94
115
  code?: string | null;
116
+ /**
117
+ * OAuth state parameter for CSRF protection (must match initiated request)
118
+ */
119
+ state?: string | null;
95
120
  /**
96
121
  * OAuth error code if authentication failed (e.g., "access_denied")
97
122
  */
@@ -100,10 +125,6 @@ export type AuthCallbackRequest = {
100
125
  * Human-readable OAuth error description if authentication failed
101
126
  */
102
127
  error_description?: string | null;
103
- /**
104
- * OAuth state parameter for CSRF protection (must match initiated request)
105
- */
106
- state?: string | null;
107
128
  [key: string]: string | (string | null) | (string | null) | (string | null) | (string | null) | undefined;
108
129
  };
109
130
  /**
@@ -115,21 +136,393 @@ export type ChangeRoleRequest = {
115
136
  */
116
137
  role: string;
117
138
  };
139
+ export type ChatChoice = {
140
+ /**
141
+ * The index of the choice in the list of choices.
142
+ */
143
+ index: number;
144
+ message: ChatCompletionResponseMessage;
145
+ finish_reason?: null | FinishReason;
146
+ logprobs?: null | ChatChoiceLogprobs;
147
+ };
148
+ export type ChatChoiceLogprobs = {
149
+ /**
150
+ * A list of message content tokens with log probability information.
151
+ */
152
+ content?: Array<ChatCompletionTokenLogprob> | null;
153
+ refusal?: Array<ChatCompletionTokenLogprob> | null;
154
+ };
155
+ export type ChatChoiceStream = {
156
+ /**
157
+ * The index of the choice in the list of choices.
158
+ */
159
+ index: number;
160
+ delta: ChatCompletionStreamResponseDelta;
161
+ finish_reason?: null | FinishReason;
162
+ logprobs?: null | ChatChoiceLogprobs;
163
+ };
164
+ export type ChatCompletionAudio = {
165
+ /**
166
+ * The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive).
167
+ */
168
+ voice: ChatCompletionAudioVoice;
169
+ /**
170
+ * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
171
+ */
172
+ format: ChatCompletionAudioFormat;
173
+ };
174
+ export type ChatCompletionAudioFormat = 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16';
175
+ export type ChatCompletionAudioVoice = 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
176
+ export type ChatCompletionFunctionCall = 'none' | 'auto' | {
177
+ /**
178
+ * Forces the model to call the specified function.
179
+ */
180
+ Function: {
181
+ name: string;
182
+ };
183
+ };
184
+ /**
185
+ * @deprecated
186
+ */
187
+ export type ChatCompletionFunctions = {
188
+ /**
189
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
190
+ */
191
+ name: string;
192
+ /**
193
+ * A description of what the function does, used by the model to choose when and how to call the function.
194
+ */
195
+ description?: string | null;
196
+ /**
197
+ * The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
198
+ *
199
+ * Omitting `parameters` defines a function with an empty parameter list.
200
+ */
201
+ parameters: unknown;
202
+ };
203
+ export type ChatCompletionMessageToolCall = {
204
+ /**
205
+ * The ID of the tool call.
206
+ */
207
+ id: string;
208
+ /**
209
+ * The type of the tool. Currently, only `function` is supported.
210
+ */
211
+ type: ChatCompletionToolType;
212
+ /**
213
+ * The function that the model called.
214
+ */
215
+ function: FunctionCall;
216
+ };
217
+ export type ChatCompletionMessageToolCallChunk = {
218
+ index: number;
219
+ /**
220
+ * The ID of the tool call.
221
+ */
222
+ id?: string | null;
223
+ type?: null | ChatCompletionToolType;
224
+ function?: null | FunctionCallStream;
225
+ };
226
+ /**
227
+ * Output types that you would like the model to generate for this request.
228
+ *
229
+ * Most models are capable of generating text, which is the default: `["text"]`
230
+ *
231
+ * The `gpt-4o-audio-preview` model can also be used to [generate
232
+ * audio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `["text", "audio"]`
233
+ */
234
+ export type ChatCompletionModalities = 'text' | 'audio';
235
+ /**
236
+ * Specifies a tool the model should use. Use to force the model to call a specific function.
237
+ */
238
+ export type ChatCompletionNamedToolChoice = {
239
+ /**
240
+ * The type of the tool. Currently, only `function` is supported.
241
+ */
242
+ type: ChatCompletionToolType;
243
+ function: FunctionName;
244
+ };
245
+ export type ChatCompletionRequestAssistantMessage = {
246
+ content?: null | ChatCompletionRequestAssistantMessageContent;
247
+ /**
248
+ * The refusal message by the assistant.
249
+ */
250
+ refusal?: string | null;
251
+ /**
252
+ * An optional name for the participant. Provides the model information to differentiate between participants of the same role.
253
+ */
254
+ name?: string | null;
255
+ audio?: null | ChatCompletionRequestAssistantMessageAudio;
256
+ tool_calls?: Array<ChatCompletionMessageToolCall> | null;
257
+ function_call?: null | FunctionCall;
258
+ };
259
+ export type ChatCompletionRequestAssistantMessageAudio = {
260
+ /**
261
+ * Unique identifier for a previous audio response from the model.
262
+ */
263
+ id: string;
264
+ };
265
+ export type ChatCompletionRequestAssistantMessageContent = string | Array<ChatCompletionRequestAssistantMessageContentPart>;
266
+ export type ChatCompletionRequestAssistantMessageContentPart = (ChatCompletionRequestMessageContentPartText & {
267
+ type: 'text';
268
+ }) | (ChatCompletionRequestMessageContentPartRefusal & {
269
+ type: 'refusal';
270
+ });
271
+ export type ChatCompletionRequestDeveloperMessage = {
272
+ /**
273
+ * The contents of the developer message.
274
+ */
275
+ content: ChatCompletionRequestDeveloperMessageContent;
276
+ /**
277
+ * An optional name for the participant. Provides the model information to differentiate between participants of the same role.
278
+ */
279
+ name?: string | null;
280
+ };
281
+ export type ChatCompletionRequestDeveloperMessageContent = string | Array<ChatCompletionRequestMessageContentPartText>;
282
+ export type ChatCompletionRequestFunctionMessage = {
283
+ /**
284
+ * The return value from the function call, to return to the model.
285
+ */
286
+ content?: string | null;
287
+ /**
288
+ * The name of the function to call.
289
+ */
290
+ name: string;
291
+ };
292
+ export type ChatCompletionRequestMessage = (ChatCompletionRequestDeveloperMessage & {
293
+ role: 'developer';
294
+ }) | (ChatCompletionRequestSystemMessage & {
295
+ role: 'system';
296
+ }) | (ChatCompletionRequestUserMessage & {
297
+ role: 'user';
298
+ }) | (ChatCompletionRequestAssistantMessage & {
299
+ role: 'assistant';
300
+ }) | (ChatCompletionRequestToolMessage & {
301
+ role: 'tool';
302
+ }) | (ChatCompletionRequestFunctionMessage & {
303
+ role: 'function';
304
+ });
305
+ /**
306
+ * Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).
307
+ */
308
+ export type ChatCompletionRequestMessageContentPartAudio = {
309
+ input_audio: InputAudio;
310
+ };
311
+ export type ChatCompletionRequestMessageContentPartImage = {
312
+ image_url: ImageUrl;
313
+ };
314
+ export type ChatCompletionRequestMessageContentPartRefusal = {
315
+ /**
316
+ * The refusal message generated by the model.
317
+ */
318
+ refusal: string;
319
+ };
320
+ export type ChatCompletionRequestMessageContentPartText = {
321
+ text: string;
322
+ };
323
+ export type ChatCompletionRequestSystemMessage = {
324
+ /**
325
+ * The contents of the system message.
326
+ */
327
+ content: ChatCompletionRequestSystemMessageContent;
328
+ /**
329
+ * An optional name for the participant. Provides the model information to differentiate between participants of the same role.
330
+ */
331
+ name?: string | null;
332
+ };
333
+ export type ChatCompletionRequestSystemMessageContent = string | Array<ChatCompletionRequestSystemMessageContentPart>;
334
+ export type ChatCompletionRequestSystemMessageContentPart = ChatCompletionRequestMessageContentPartText & {
335
+ type: 'text';
336
+ };
337
+ /**
338
+ * Tool message
339
+ */
340
+ export type ChatCompletionRequestToolMessage = {
341
+ /**
342
+ * The contents of the tool message.
343
+ */
344
+ content: ChatCompletionRequestToolMessageContent;
345
+ tool_call_id: string;
346
+ };
347
+ export type ChatCompletionRequestToolMessageContent = string | Array<ChatCompletionRequestToolMessageContentPart>;
348
+ export type ChatCompletionRequestToolMessageContentPart = ChatCompletionRequestMessageContentPartText & {
349
+ type: 'text';
350
+ };
351
+ export type ChatCompletionRequestUserMessage = {
352
+ /**
353
+ * The contents of the user message.
354
+ */
355
+ content: ChatCompletionRequestUserMessageContent;
356
+ /**
357
+ * An optional name for the participant. Provides the model information to differentiate between participants of the same role.
358
+ */
359
+ name?: string | null;
360
+ };
361
+ export type ChatCompletionRequestUserMessageContent = string | Array<ChatCompletionRequestUserMessageContentPart>;
362
+ export type ChatCompletionRequestUserMessageContentPart = (ChatCompletionRequestMessageContentPartText & {
363
+ type: 'text';
364
+ }) | (ChatCompletionRequestMessageContentPartImage & {
365
+ type: 'image_url';
366
+ }) | (ChatCompletionRequestMessageContentPartAudio & {
367
+ type: 'input_audio';
368
+ });
369
+ /**
370
+ * A chat completion message generated by the model.
371
+ */
372
+ export type ChatCompletionResponseMessage = {
373
+ /**
374
+ * The contents of the message.
375
+ */
376
+ content?: string | null;
377
+ /**
378
+ * The refusal message generated by the model.
379
+ */
380
+ refusal?: string | null;
381
+ /**
382
+ * The tool calls generated by the model, such as function calls.
383
+ */
384
+ tool_calls?: Array<ChatCompletionMessageToolCall> | null;
385
+ /**
386
+ * The role of the author of this message.
387
+ */
388
+ role: Role;
389
+ function_call?: null | FunctionCall;
390
+ audio?: null | ChatCompletionResponseMessageAudio;
391
+ };
392
+ export type ChatCompletionResponseMessageAudio = {
393
+ /**
394
+ * Unique identifier for this audio response.
395
+ */
396
+ id: string;
397
+ /**
398
+ * The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.
399
+ */
400
+ expires_at: number;
401
+ /**
402
+ * Base64 encoded audio bytes generated by the model, in the format specified in the request.
403
+ */
404
+ data: string;
405
+ /**
406
+ * Transcript of the audio generated by the model.
407
+ */
408
+ transcript: string;
409
+ };
410
+ /**
411
+ * Options for streaming response. Only set this when you set `stream: true`.
412
+ */
413
+ export type ChatCompletionStreamOptions = {
414
+ /**
415
+ * If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.
416
+ */
417
+ include_usage: boolean;
418
+ };
419
+ /**
420
+ * A chat completion delta generated by streamed model responses.
421
+ */
422
+ export type ChatCompletionStreamResponseDelta = {
423
+ /**
424
+ * The contents of the chunk message.
425
+ */
426
+ content?: string | null;
427
+ function_call?: null | FunctionCallStream;
428
+ tool_calls?: Array<ChatCompletionMessageToolCallChunk> | null;
429
+ role?: null | Role;
430
+ /**
431
+ * The refusal message generated by the model.
432
+ */
433
+ refusal?: string | null;
434
+ };
435
+ export type ChatCompletionTokenLogprob = {
436
+ /**
437
+ * The token.
438
+ */
439
+ token: string;
440
+ /**
441
+ * The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
442
+ */
443
+ logprob: number;
444
+ /**
445
+ * A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
446
+ */
447
+ bytes?: Array<number> | null;
448
+ /**
449
+ * List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.
450
+ */
451
+ top_logprobs: Array<TopLogprobs>;
452
+ };
453
+ export type ChatCompletionTool = {
454
+ type: ChatCompletionToolType;
455
+ function: FunctionObject;
456
+ };
457
+ /**
458
+ * Controls which (if any) tool is called by the model.
459
+ * `none` means the model will not call any tool and instead generates a message.
460
+ * `auto` means the model can pick between generating a message or calling one or more tools.
461
+ * `required` means the model must call one or more tools.
462
+ * Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
463
+ *
464
+ * `none` is the default when no tools are present. `auto` is the default if tools are present.
465
+ */
466
+ export type ChatCompletionToolChoiceOption = 'none' | 'auto' | 'required' | {
467
+ named: ChatCompletionNamedToolChoice;
468
+ };
469
+ export type ChatCompletionToolType = 'function';
118
470
  export type ChatRequest = {
471
+ model: string;
472
+ messages: Array<Message>;
473
+ stream?: boolean | null;
119
474
  format?: string | null;
120
475
  keep_alive?: null | Duration;
121
- messages: Array<Message>;
122
- model: string;
123
476
  options?: null | Options;
124
- stream?: boolean | null;
477
+ };
478
+ /**
479
+ * Breakdown of tokens used in a completion.
480
+ */
481
+ export type CompletionTokensDetails = {
482
+ accepted_prediction_tokens?: number | null;
483
+ /**
484
+ * Audio input tokens generated by the model.
485
+ */
486
+ audio_tokens?: number | null;
487
+ /**
488
+ * Tokens generated by the model for reasoning.
489
+ */
490
+ reasoning_tokens?: number | null;
491
+ /**
492
+ * When using Predicted Outputs, the number of tokens in the
493
+ * prediction that did not appear in the completion. However, like
494
+ * reasoning tokens, these tokens are still counted in the total
495
+ * completion tokens for purposes of billing, output, and context
496
+ * window limits.
497
+ */
498
+ rejected_prediction_tokens?: number | null;
499
+ };
500
+ /**
501
+ * Usage statistics for the completion request.
502
+ */
503
+ export type CompletionUsage = {
504
+ /**
505
+ * Number of tokens in the prompt.
506
+ */
507
+ prompt_tokens: number;
508
+ /**
509
+ * Number of tokens in the generated completion.
510
+ */
511
+ completion_tokens: number;
512
+ /**
513
+ * Total number of tokens used in the request (prompt + completion).
514
+ */
515
+ total_tokens: number;
516
+ prompt_tokens_details?: null | PromptTokensDetails;
517
+ completion_tokens_details?: null | CompletionTokensDetails;
125
518
  };
126
519
  export type CreateAliasRequest = {
127
520
  alias: string;
128
- context_params?: Array<string> | null;
129
- filename: string;
130
521
  repo: string;
131
- request_params?: null | OaiRequestParams;
522
+ filename: string;
132
523
  snapshot?: string | null;
524
+ request_params?: null | OaiRequestParams;
525
+ context_params?: Array<string> | null;
133
526
  };
134
527
  /**
135
528
  * Request to create a new API model configuration
@@ -139,14 +532,14 @@ export type CreateApiModelRequest = {
139
532
  * API format/protocol (e.g., "openai")
140
533
  */
141
534
  api_format: ApiFormat;
142
- /**
143
- * API key for authentication
144
- */
145
- api_key: string;
146
535
  /**
147
536
  * API base URL
148
537
  */
149
538
  base_url: string;
539
+ /**
540
+ * API key for authentication (null for public APIs)
541
+ */
542
+ api_key?: ApiKey;
150
543
  /**
151
544
  * List of available models
152
545
  */
@@ -164,58 +557,311 @@ export type CreateApiTokenRequest = {
164
557
  * Descriptive name for the API token (minimum 3 characters)
165
558
  */
166
559
  name?: string | null;
560
+ /**
561
+ * Token scope defining access level
562
+ */
563
+ scope: TokenScope;
564
+ };
565
+ export type CreateChatCompletionRequest = {
566
+ /**
567
+ * A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message types (modalities) are supported, like [text](https://platform.openai.com/docs/guides/text-generation), [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio).
568
+ */
569
+ messages: Array<ChatCompletionRequestMessage>;
570
+ /**
571
+ * ID of the model to use.
572
+ * See the [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
573
+ */
574
+ model: string;
575
+ /**
576
+ * Whether or not to store the output of this chat completion request
577
+ *
578
+ * for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products.
579
+ */
580
+ store?: boolean | null;
581
+ reasoning_effort?: null | ReasoningEffort;
582
+ /**
583
+ * Developer-defined tags and values used for filtering completions in the [dashboard](https://platform.openai.com/chat-completions).
584
+ */
585
+ metadata?: unknown;
586
+ /**
587
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
588
+ */
589
+ frequency_penalty?: number | null;
590
+ /**
591
+ * Modify the likelihood of specified tokens appearing in the completion.
592
+ *
593
+ * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
594
+ * Mathematically, the bias is added to the logits generated by the model prior to sampling.
595
+ * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
596
+ * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
597
+ */
598
+ logit_bias?: {} | null;
599
+ /**
600
+ * Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
601
+ */
602
+ logprobs?: boolean | null;
603
+ /**
604
+ * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.
605
+ */
606
+ top_logprobs?: number | null;
607
+ /**
608
+ * The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion.
609
+ *
610
+ * This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.
611
+ * This value is now deprecated in favor of `max_completion_tokens`, and is
612
+ * not compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning).
613
+ * @deprecated
614
+ */
615
+ max_tokens?: number | null;
616
+ /**
617
+ * An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
618
+ */
619
+ max_completion_tokens?: number | null;
620
+ /**
621
+ * How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
622
+ */
623
+ n?: number | null;
624
+ modalities?: Array<ChatCompletionModalities> | null;
625
+ prediction?: null | PredictionContent;
626
+ audio?: null | ChatCompletionAudio;
627
+ /**
628
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
629
+ */
630
+ presence_penalty?: number | null;
631
+ response_format?: null | ResponseFormat;
632
+ /**
633
+ * This feature is in Beta.
634
+ * If specified, our system will make a best effort to sample deterministically, such that repeated requests
635
+ * with the same `seed` and parameters should return the same result.
636
+ * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
637
+ */
638
+ seed?: number | null;
639
+ service_tier?: null | ServiceTier;
640
+ stop?: null | Stop;
641
+ /**
642
+ * If set, partial message deltas will be sent, like in ChatGPT.
643
+ * Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
644
+ * as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
645
+ */
646
+ stream?: boolean | null;
647
+ stream_options?: null | ChatCompletionStreamOptions;
648
+ /**
649
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random,
650
+ * while lower values like 0.2 will make it more focused and deterministic.
651
+ *
652
+ * We generally recommend altering this or `top_p` but not both.
653
+ */
654
+ temperature?: number | null;
655
+ /**
656
+ * An alternative to sampling with temperature, called nucleus sampling,
657
+ * where the model considers the results of the tokens with top_p probability mass.
658
+ * So 0.1 means only the tokens comprising the top 10% probability mass are considered.
659
+ *
660
+ * We generally recommend altering this or `temperature` but not both.
661
+ */
662
+ top_p?: number | null;
663
+ /**
664
+ * A list of tools the model may call. Currently, only functions are supported as a tool.
665
+ * Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
666
+ */
667
+ tools?: Array<ChatCompletionTool> | null;
668
+ tool_choice?: null | ChatCompletionToolChoiceOption;
669
+ /**
670
+ * Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use.
671
+ */
672
+ parallel_tool_calls?: boolean | null;
673
+ /**
674
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
675
+ */
676
+ user?: string | null;
677
+ web_search_options?: null | WebSearchOptions;
678
+ function_call?: null | ChatCompletionFunctionCall;
679
+ /**
680
+ * Deprecated in favor of `tools`.
681
+ *
682
+ * A list of functions the model may generate JSON inputs for.
683
+ * @deprecated
684
+ */
685
+ functions?: Array<ChatCompletionFunctions> | null;
686
+ };
687
+ /**
688
+ * Represents a chat completion response returned by model, based on the provided input.
689
+ */
690
+ export type CreateChatCompletionResponse = {
691
+ /**
692
+ * A unique identifier for the chat completion.
693
+ */
694
+ id: string;
695
+ /**
696
+ * A list of chat completion choices. Can be more than one if `n` is greater than 1.
697
+ */
698
+ choices: Array<ChatChoice>;
699
+ /**
700
+ * The Unix timestamp (in seconds) of when the chat completion was created.
701
+ */
702
+ created: number;
703
+ /**
704
+ * The model used for the chat completion.
705
+ */
706
+ model: string;
707
+ service_tier?: null | ServiceTierResponse;
708
+ /**
709
+ * This fingerprint represents the backend configuration that the model runs with.
710
+ *
711
+ * Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
712
+ */
713
+ system_fingerprint?: string | null;
714
+ /**
715
+ * The object type, which is always `chat.completion`.
716
+ */
717
+ object: string;
718
+ usage?: null | CompletionUsage;
719
+ };
720
+ /**
721
+ * Represents a streamed chunk of a chat completion response returned by model, based on the provided input.
722
+ */
723
+ export type CreateChatCompletionStreamResponse = {
724
+ /**
725
+ * A unique identifier for the chat completion. Each chunk has the same ID.
726
+ */
727
+ id: string;
728
+ /**
729
+ * A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {"include_usage": true}`.
730
+ */
731
+ choices: Array<ChatChoiceStream>;
732
+ /**
733
+ * The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
734
+ */
735
+ created: number;
736
+ /**
737
+ * The model to generate the completion.
738
+ */
739
+ model: string;
740
+ service_tier?: null | ServiceTierResponse;
741
+ /**
742
+ * This fingerprint represents the backend configuration that the model runs with.
743
+ * Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
744
+ */
745
+ system_fingerprint?: string | null;
746
+ /**
747
+ * The object type, which is always `chat.completion.chunk`.
748
+ */
749
+ object: string;
750
+ usage?: null | CompletionUsage;
751
+ };
752
+ export type CreateEmbeddingRequest = {
753
+ /**
754
+ * ID of the model to use. You can use the
755
+ * [List models](https://platform.openai.com/docs/api-reference/models/list)
756
+ * API to see all of your available models, or see our
757
+ * [Model overview](https://platform.openai.com/docs/models/overview)
758
+ * for descriptions of them.
759
+ */
760
+ model: string;
761
+ /**
762
+ * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
763
+ */
764
+ input: EmbeddingInput;
765
+ encoding_format?: null | EncodingFormat;
766
+ /**
767
+ * A unique identifier representing your end-user, which will help OpenAI
768
+ * to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/usage-policies/end-user-ids).
769
+ */
770
+ user?: string | null;
771
+ /**
772
+ * The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.
773
+ */
774
+ dimensions?: number | null;
775
+ };
776
+ export type CreateEmbeddingResponse = {
777
+ object: string;
778
+ /**
779
+ * The name of the model used to generate the embedding.
780
+ */
781
+ model: string;
782
+ /**
783
+ * The list of embeddings generated by the model.
784
+ */
785
+ data: Array<Embedding>;
786
+ /**
787
+ * The usage information for the request.
788
+ */
789
+ usage: EmbeddingUsage;
167
790
  };
168
791
  export type DownloadRequest = {
169
- created_at: string;
170
- downloaded_bytes?: number;
171
- error?: string | null;
172
- filename: string;
173
792
  id: string;
174
793
  repo: string;
175
- started_at: string;
794
+ filename: string;
176
795
  status: DownloadStatus;
177
- total_bytes?: number | null;
796
+ error?: string | null;
797
+ created_at: string;
178
798
  updated_at: string;
799
+ total_bytes?: number | null;
800
+ downloaded_bytes?: number;
801
+ started_at: string;
179
802
  };
180
803
  export type DownloadStatus = 'pending' | 'completed' | 'error';
181
804
  export type Duration = string;
182
- export type EmptyResponse = {
183
- [key: string]: unknown;
805
+ /**
806
+ * Represents an embedding vector returned by embedding endpoint.
807
+ */
808
+ export type Embedding = {
809
+ /**
810
+ * The index of the embedding in the list of embeddings.
811
+ */
812
+ index: number;
813
+ /**
814
+ * The object type, which is always "embedding".
815
+ */
816
+ object: string;
817
+ /**
818
+ * The embedding vector, which is a list of floats. The length of vector
819
+ * depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings).
820
+ */
821
+ embedding: Array<number>;
184
822
  };
185
- export type ErrorBody = {
823
+ export type EmbeddingInput = string | Array<string> | Array<number> | Array<Array<number>>;
824
+ export type EmbeddingUsage = {
186
825
  /**
187
- * Specific error code for programmatic error handling
826
+ * The number of tokens used by the prompt.
188
827
  */
189
- code?: string | null;
828
+ prompt_tokens: number;
190
829
  /**
191
- * Human-readable error message describing what went wrong
830
+ * The total number of tokens used by the request.
192
831
  */
193
- message: string;
832
+ total_tokens: number;
833
+ };
834
+ export type EncodingFormat = 'float' | 'base64';
835
+ export type ErrorBody = {
194
836
  /**
195
- * Parameter name that caused the error (for validation errors)
837
+ * Human-readable error message describing what went wrong
196
838
  */
197
- param?: string | null;
839
+ message: string;
198
840
  /**
199
841
  * Error type categorizing the kind of error that occurred
200
842
  */
201
843
  type: string;
844
+ /**
845
+ * Specific error code for programmatic error handling
846
+ */
847
+ code?: string | null;
848
+ /**
849
+ * Parameter name that caused the error (for validation errors)
850
+ */
851
+ param?: string | null;
202
852
  };
203
853
  /**
204
854
  * Request to fetch available models from provider
205
855
  */
206
856
  export type FetchModelsRequest = {
207
857
  /**
208
- * API key for authentication (provide either api_key OR id, api_key takes preference if both provided)
858
+ * Credentials to use for fetching models
209
859
  */
210
- api_key?: string;
860
+ creds?: TestCreds;
211
861
  /**
212
- * API base URL (optional when using id)
862
+ * API base URL (required - always needed to know where to fetch models from)
213
863
  */
214
864
  base_url: string;
215
- /**
216
- * API model ID to look up stored credentials (provide either api_key OR id, api_key takes preference if both provided)
217
- */
218
- id?: string;
219
865
  };
220
866
  /**
221
867
  * Response containing available models from provider
@@ -223,12 +869,81 @@ export type FetchModelsRequest = {
223
869
  export type FetchModelsResponse = {
224
870
  models: Array<string>;
225
871
  };
872
+ export type FinishReason = 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call';
873
+ /**
874
+ * The name and arguments of a function that should be called, as generated by the model.
875
+ */
876
+ export type FunctionCall = {
877
+ /**
878
+ * The name of the function to call.
879
+ */
880
+ name: string;
881
+ /**
882
+ * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
883
+ */
884
+ arguments: string;
885
+ };
886
+ export type FunctionCallStream = {
887
+ /**
888
+ * The name of the function to call.
889
+ */
890
+ name?: string | null;
891
+ /**
892
+ * The arguments to call the function with, as generated by the model in JSON format.
893
+ * Note that the model does not always generate valid JSON, and may hallucinate
894
+ * parameters not defined by your function schema. Validate the arguments in your
895
+ * code before calling your function.
896
+ */
897
+ arguments?: string | null;
898
+ };
899
+ export type FunctionName = {
900
+ /**
901
+ * The name of the function to call.
902
+ */
903
+ name: string;
904
+ };
905
+ export type FunctionObject = {
906
+ /**
907
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
908
+ */
909
+ name: string;
910
+ /**
911
+ * A description of what the function does, used by the model to choose when and how to call the function.
912
+ */
913
+ description?: string | null;
914
+ /**
915
+ * The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
916
+ *
917
+ * Omitting `parameters` defines a function with an empty parameter list.
918
+ */
919
+ parameters?: unknown;
920
+ /**
921
+ * Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](https://platform.openai.com/docs/guides/function-calling).
922
+ */
923
+ strict?: boolean | null;
924
+ };
925
+ export type ImageDetail = 'auto' | 'low' | 'high';
926
+ export type ImageUrl = {
927
+ /**
928
+ * Either a URL of the image or the base64 encoded image data.
929
+ */
930
+ url: string;
931
+ detail?: null | ImageDetail;
932
+ };
933
+ export type InputAudio = {
934
+ /**
935
+ * Base64 encoded audio data.
936
+ */
937
+ data: string;
938
+ /**
939
+ * The format of the encoded audio data. Currently supports "wav" and "mp3".
940
+ */
941
+ format: InputAudioFormat;
942
+ };
943
+ export type InputAudioFormat = 'wav' | 'mp3';
226
944
  export type ListModelResponse = {
945
+ object: string;
227
946
  data: Array<{
228
- /**
229
- * The Unix timestamp (in seconds) when the model was created.
230
- */
231
- created: number;
232
947
  /**
233
948
  * The model identifier, which can be referenced in the API endpoints.
234
949
  */
@@ -237,12 +952,15 @@ export type ListModelResponse = {
237
952
  * The object type, which is always "model".
238
953
  */
239
954
  object: string;
955
+ /**
956
+ * The Unix timestamp (in seconds) when the model was created.
957
+ */
958
+ created: number;
240
959
  /**
241
960
  * The organization that owns the model.
242
961
  */
243
962
  owned_by: string;
244
963
  }>;
245
- object: string;
246
964
  };
247
965
  /**
248
966
  * List users query parameters
@@ -252,46 +970,42 @@ export type ListUsersParams = {
252
970
  page_size?: number | null;
253
971
  };
254
972
  export type LocalModelResponse = {
255
- filename: string;
256
- model_params: {};
257
973
  repo: string;
258
- size?: number | null;
974
+ filename: string;
259
975
  snapshot: string;
976
+ size?: number | null;
977
+ model_params: {};
260
978
  };
261
979
  export type Message = {
980
+ role: string;
262
981
  content: string;
263
982
  images?: Array<string> | null;
264
- role: string;
265
983
  };
266
984
  export type Model = {
267
- details: ModelDetails;
268
- digest: string;
269
985
  model: string;
270
986
  modified_at: number;
271
987
  size: number;
988
+ digest: string;
989
+ details: ModelDetails;
272
990
  };
273
991
  export type ModelAlias = {
274
992
  alias: string;
275
- filename: string;
276
993
  repo: string;
994
+ filename: string;
277
995
  snapshot: string;
278
996
  };
279
997
  export type ModelDetails = {
280
- families?: Array<string> | null;
281
- family: string;
998
+ parent_model?: string | null;
282
999
  format: string;
1000
+ family: string;
1001
+ families?: Array<string> | null;
283
1002
  parameter_size: string;
284
- parent_model?: string | null;
285
1003
  quantization_level: string;
286
1004
  };
287
1005
  /**
288
1006
  * Describes an OpenAI model offering that can be used with the API.
289
1007
  */
290
1008
  export type ModelResponse = {
291
- /**
292
- * The Unix timestamp (in seconds) when the model was created.
293
- */
294
- created: number;
295
1009
  /**
296
1010
  * The model identifier, which can be referenced in the API endpoints.
297
1011
  */
@@ -300,6 +1014,10 @@ export type ModelResponse = {
300
1014
  * The object type, which is always "model".
301
1015
  */
302
1016
  object: string;
1017
+ /**
1018
+ * The Unix timestamp (in seconds) when the model was created.
1019
+ */
1020
+ created: number;
303
1021
  /**
304
1022
  * The organization that owns the model.
305
1023
  */
@@ -312,14 +1030,14 @@ export type ModelsResponse = {
312
1030
  * Request to pull a model file from HuggingFace
313
1031
  */
314
1032
  export type NewDownloadRequest = {
315
- /**
316
- * Model file name to download (typically .gguf format)
317
- */
318
- filename: string;
319
1033
  /**
320
1034
  * HuggingFace repository name in format 'username/repository-name'
321
1035
  */
322
1036
  repo: string;
1037
+ /**
1038
+ * Model file name to download (typically .gguf format)
1039
+ */
1040
+ filename: string;
323
1041
  };
324
1042
  export type OaiRequestParams = {
325
1043
  frequency_penalty?: number | null;
@@ -341,81 +1059,73 @@ export type OpenAiApiError = {
341
1059
  error: ErrorBody;
342
1060
  };
343
1061
  export type Options = {
344
- f16_kv?: boolean | null;
345
- frequency_penalty?: number | null;
346
- logits_all?: boolean | null;
347
- low_vram?: boolean | null;
348
- main_gpu?: number | null;
349
- mirostat?: number | null;
350
- mirostat_eta?: number | null;
351
- mirostat_tau?: number | null;
352
- num_batch?: number | null;
353
- num_ctx?: number | null;
354
- num_gpu?: number | null;
355
1062
  num_keep?: number | null;
356
- num_predict?: number | null;
357
- num_thread?: number | null;
358
- numa?: boolean | null;
359
- penalize_newline?: boolean | null;
360
- presence_penalty?: number | null;
361
- repeat_last_n?: number | null;
362
- repeat_penalty?: number | null;
363
1063
  seed?: number | null;
364
- stop?: Array<string> | null;
365
- temperature?: number | null;
366
- tfs_z?: number | null;
1064
+ num_predict?: number | null;
367
1065
  top_k?: number | null;
368
1066
  top_p?: number | null;
1067
+ tfs_z?: number | null;
369
1068
  typical_p?: number | null;
370
- use_mlock?: boolean | null;
371
- use_mmap?: boolean | null;
1069
+ repeat_last_n?: number | null;
1070
+ temperature?: number | null;
1071
+ repeat_penalty?: number | null;
1072
+ presence_penalty?: number | null;
1073
+ frequency_penalty?: number | null;
1074
+ mirostat?: number | null;
1075
+ mirostat_tau?: number | null;
1076
+ mirostat_eta?: number | null;
1077
+ penalize_newline?: boolean | null;
1078
+ stop?: Array<string> | null;
1079
+ numa?: boolean | null;
1080
+ num_ctx?: number | null;
1081
+ num_batch?: number | null;
1082
+ num_gpu?: number | null;
1083
+ main_gpu?: number | null;
1084
+ low_vram?: boolean | null;
1085
+ f16_kv?: boolean | null;
1086
+ logits_all?: boolean | null;
372
1087
  vocab_only?: boolean | null;
1088
+ use_mmap?: boolean | null;
1089
+ use_mlock?: boolean | null;
1090
+ num_thread?: number | null;
373
1091
  };
374
1092
  export type PaginatedAliasResponse = {
375
1093
  data: Array<Alias>;
1094
+ total: number;
376
1095
  page: number;
377
1096
  page_size: number;
378
- total: number;
379
1097
  };
380
1098
  /**
381
1099
  * Paginated response for API model listings
382
1100
  */
383
1101
  export type PaginatedApiModelResponse = {
384
1102
  data: Array<ApiModelResponse>;
1103
+ total: number;
385
1104
  page: number;
386
1105
  page_size: number;
387
- total: number;
388
1106
  };
389
1107
  export type PaginatedApiTokenResponse = {
390
1108
  data: Array<ApiToken>;
1109
+ total: number;
391
1110
  page: number;
392
1111
  page_size: number;
393
- total: number;
394
1112
  };
395
1113
  export type PaginatedDownloadResponse = {
396
1114
  data: Array<DownloadRequest>;
1115
+ total: number;
397
1116
  page: number;
398
1117
  page_size: number;
399
- total: number;
400
1118
  };
401
1119
  export type PaginatedLocalModelResponse = {
402
1120
  data: Array<LocalModelResponse>;
1121
+ total: number;
403
1122
  page: number;
404
1123
  page_size: number;
405
- total: number;
406
1124
  };
407
1125
  /**
408
1126
  * Paginated response for access requests
409
1127
  */
410
1128
  export type PaginatedUserAccessResponse = {
411
- /**
412
- * Current page number
413
- */
414
- page: number;
415
- /**
416
- * Number of items per page
417
- */
418
- page_size: number;
419
1129
  /**
420
1130
  * List of access requests
421
1131
  */
@@ -424,12 +1134,20 @@ export type PaginatedUserAccessResponse = {
424
1134
  * Total number of requests
425
1135
  */
426
1136
  total: number;
1137
+ /**
1138
+ * Current page number
1139
+ */
1140
+ page: number;
1141
+ /**
1142
+ * Number of items per page
1143
+ */
1144
+ page_size: number;
427
1145
  };
428
1146
  export type PaginatedUserAliasResponse = {
429
1147
  data: Array<UserAliasResponse>;
1148
+ total: number;
430
1149
  page: number;
431
1150
  page_size: number;
432
- total: number;
433
1151
  };
434
1152
  /**
435
1153
  * Query parameters for pagination and sorting
@@ -461,25 +1179,84 @@ export type PingResponse = {
461
1179
  */
462
1180
  message: string;
463
1181
  };
1182
+ /**
1183
+ * The type of the predicted content you want to provide. This type is
1184
+ * currently always `content`.
1185
+ */
1186
+ export type PredictionContent = {
1187
+ /**
1188
+ * The type of the predicted content you want to provide. This type is
1189
+ * currently always `content`.
1190
+ */
1191
+ content: PredictionContentContent;
1192
+ type: 'content';
1193
+ };
1194
+ /**
1195
+ * The content that should be matched when generating a model response. If generated tokens would match this content, the entire model response can be returned much more quickly.
1196
+ */
1197
+ export type PredictionContentContent = string | Array<ChatCompletionRequestMessageContentPartText>;
1198
+ /**
1199
+ * Breakdown of tokens used in a completion.
1200
+ */
1201
+ export type PromptTokensDetails = {
1202
+ /**
1203
+ * Audio input tokens present in the prompt.
1204
+ */
1205
+ audio_tokens?: number | null;
1206
+ /**
1207
+ * Cached tokens present in the prompt.
1208
+ */
1209
+ cached_tokens?: number | null;
1210
+ };
1211
+ export type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high';
464
1212
  export type RedirectResponse = {
465
1213
  /**
466
1214
  * The URL to redirect to (OAuth authorization URL or application home page)
467
1215
  */
468
1216
  location: string;
469
1217
  };
470
- export type Role = 'resource_user' | 'resource_power_user' | 'resource_manager' | 'resource_admin';
1218
+ export type ResourceRole = 'resource_user' | 'resource_power_user' | 'resource_manager' | 'resource_admin';
1219
+ export type ResponseFormat = {
1220
+ type: 'text';
1221
+ } | {
1222
+ type: 'json_object';
1223
+ } | {
1224
+ json_schema: ResponseFormatJsonSchema;
1225
+ type: 'json_schema';
1226
+ };
1227
+ export type ResponseFormatJsonSchema = {
1228
+ /**
1229
+ * A description of what the response format is for, used by the model to determine how to respond in the format.
1230
+ */
1231
+ description?: string | null;
1232
+ /**
1233
+ * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
1234
+ */
1235
+ name: string;
1236
+ /**
1237
+ * The schema for the response format, described as a JSON Schema object.
1238
+ */
1239
+ schema?: unknown;
1240
+ /**
1241
+ * Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1242
+ */
1243
+ strict?: boolean | null;
1244
+ };
1245
+ export type Role = 'system' | 'user' | 'assistant' | 'tool' | 'function';
1246
+ export type ServiceTier = 'auto' | 'default' | 'flex' | 'scale' | 'priority';
1247
+ export type ServiceTierResponse = 'scale' | 'default' | 'flex' | 'priority';
471
1248
  export type SettingInfo = {
1249
+ key: string;
472
1250
  current_value: unknown;
473
1251
  default_value: unknown;
474
- key: string;
475
- metadata: SettingMetadata;
476
1252
  source: SettingSource;
1253
+ metadata: SettingMetadata;
477
1254
  };
478
1255
  export type SettingMetadata = {
479
1256
  type: 'string';
480
1257
  } | {
481
- max: number;
482
1258
  min: number;
1259
+ max: number;
483
1260
  type: 'number';
484
1261
  } | {
485
1262
  type: 'boolean';
@@ -492,14 +1269,14 @@ export type SettingSource = 'system' | 'command_line' | 'environment' | 'setting
492
1269
  * Request to setup the application in authenticated mode
493
1270
  */
494
1271
  export type SetupRequest = {
495
- /**
496
- * Optional description of the server's purpose
497
- */
498
- description?: string | null;
499
1272
  /**
500
1273
  * Server name for identification (minimum 10 characters)
501
1274
  */
502
1275
  name: string;
1276
+ /**
1277
+ * Optional description of the server's purpose
1278
+ */
1279
+ description?: string | null;
503
1280
  };
504
1281
  /**
505
1282
  * Response containing the updated application status after setup
@@ -522,22 +1299,35 @@ export type ShowResponse = {
522
1299
  parameters: string;
523
1300
  template: string;
524
1301
  };
1302
+ export type Stop = string | Array<string>;
1303
+ /**
1304
+ * Credentials for test/fetch operations
1305
+ */
1306
+ export type TestCreds = {
1307
+ /**
1308
+ * Look up credentials from stored API model
1309
+ */
1310
+ value: string;
1311
+ type: 'id';
1312
+ } | {
1313
+ /**
1314
+ * Use direct API key (null for no authentication)
1315
+ */
1316
+ value: ApiKey;
1317
+ type: 'api_key';
1318
+ };
525
1319
  /**
526
1320
  * Request to test API connectivity with a prompt
527
1321
  */
528
1322
  export type TestPromptRequest = {
529
1323
  /**
530
- * API key for authentication (provide either api_key OR id, api_key takes preference if both provided)
1324
+ * Credentials to use for testing
531
1325
  */
532
- api_key?: string;
1326
+ creds?: TestCreds;
533
1327
  /**
534
- * API base URL (optional when using id)
1328
+ * API base URL
535
1329
  */
536
1330
  base_url: string;
537
- /**
538
- * API model ID to look up stored credentials (provide either api_key OR id, api_key takes preference if both provided)
539
- */
540
- id?: string;
541
1331
  /**
542
1332
  * Model to use for testing
543
1333
  */
@@ -551,18 +1341,38 @@ export type TestPromptRequest = {
551
1341
  * Response from testing API connectivity
552
1342
  */
553
1343
  export type TestPromptResponse = {
554
- error?: string | null;
555
- response?: string | null;
556
1344
  success: boolean;
1345
+ response?: string | null;
1346
+ error?: string | null;
1347
+ };
1348
+ /**
1349
+ * API Token information response
1350
+ */
1351
+ export type TokenInfo = {
1352
+ role: TokenScope;
557
1353
  };
558
1354
  export type TokenScope = 'scope_token_user' | 'scope_token_power_user' | 'scope_token_manager' | 'scope_token_admin';
559
1355
  export type TokenStatus = 'active' | 'inactive';
1356
+ export type TopLogprobs = {
1357
+ /**
1358
+ * The token.
1359
+ */
1360
+ token: string;
1361
+ /**
1362
+ * The log probability of this token.
1363
+ */
1364
+ logprob: number;
1365
+ /**
1366
+ * A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
1367
+ */
1368
+ bytes?: Array<number> | null;
1369
+ };
560
1370
  export type UpdateAliasRequest = {
561
- context_params?: Array<string> | null;
562
- filename: string;
563
1371
  repo: string;
564
- request_params?: null | OaiRequestParams;
1372
+ filename: string;
565
1373
  snapshot?: string | null;
1374
+ request_params?: null | OaiRequestParams;
1375
+ context_params?: Array<string> | null;
566
1376
  };
567
1377
  /**
568
1378
  * Request to update an existing API model configuration
@@ -572,14 +1382,14 @@ export type UpdateApiModelRequest = {
572
1382
  * API format/protocol (required)
573
1383
  */
574
1384
  api_format: ApiFormat;
575
- /**
576
- * API key for authentication (optional, only update if provided for security)
577
- */
578
- api_key?: string | null;
579
1385
  /**
580
1386
  * API base URL (required)
581
1387
  */
582
1388
  base_url: string;
1389
+ /**
1390
+ * API key update action (Keep/Set with Some or None)
1391
+ */
1392
+ api_key?: ApiKeyUpdateAction;
583
1393
  /**
584
1394
  * List of available models (required)
585
1395
  */
@@ -612,31 +1422,31 @@ export type UpdateSettingRequest = {
612
1422
  value: unknown;
613
1423
  };
614
1424
  export type UserAccessRequest = {
615
- /**
616
- * Creation timestamp
617
- */
618
- created_at: string;
619
1425
  /**
620
1426
  * Unique identifier for the request
621
1427
  */
622
1428
  id: number;
1429
+ /**
1430
+ * Username of the requesting user
1431
+ */
1432
+ username: string;
1433
+ /**
1434
+ * User ID (UUID) of the requesting user
1435
+ */
1436
+ user_id: string;
623
1437
  reviewer?: string | null;
624
1438
  /**
625
1439
  * Current status of the request
626
1440
  */
627
1441
  status: UserAccessRequestStatus;
628
1442
  /**
629
- * Last update timestamp
630
- */
631
- updated_at: string;
632
- /**
633
- * User ID (UUID) of the requesting user
1443
+ * Creation timestamp
634
1444
  */
635
- user_id: string;
1445
+ created_at: string;
636
1446
  /**
637
- * Username of the requesting user
1447
+ * Last update timestamp
638
1448
  */
639
- username: string;
1449
+ updated_at: string;
640
1450
  };
641
1451
  export type UserAccessRequestStatus = 'pending' | 'approved' | 'rejected';
642
1452
  /**
@@ -644,56 +1454,56 @@ export type UserAccessRequestStatus = 'pending' | 'approved' | 'rejected';
644
1454
  */
645
1455
  export type UserAccessStatusResponse = {
646
1456
  /**
647
- * Creation timestamp
1457
+ * Username of the requesting user
648
1458
  */
649
- created_at: string;
1459
+ username: string;
650
1460
  /**
651
1461
  * Current status of the request (pending, approved, rejected)
652
1462
  */
653
1463
  status: UserAccessRequestStatus;
654
1464
  /**
655
- * Last update timestamp
1465
+ * Creation timestamp
656
1466
  */
657
- updated_at: string;
1467
+ created_at: string;
658
1468
  /**
659
- * Username of the requesting user
1469
+ * Last update timestamp
660
1470
  */
661
- username: string;
1471
+ updated_at: string;
662
1472
  };
663
1473
  export type UserAlias = {
664
1474
  alias: string;
665
- context_params?: Array<string>;
666
- filename: string;
667
1475
  repo: string;
668
- request_params?: OaiRequestParams;
1476
+ filename: string;
669
1477
  snapshot: string;
1478
+ request_params?: OaiRequestParams;
1479
+ context_params?: Array<string>;
670
1480
  };
671
1481
  export type UserAliasResponse = {
672
1482
  alias: string;
673
- context_params: Array<string>;
674
- filename: string;
675
- model_params: {};
676
1483
  repo: string;
677
- request_params: OaiRequestParams;
1484
+ filename: string;
678
1485
  snapshot: string;
679
1486
  source: string;
1487
+ model_params: {};
1488
+ request_params: OaiRequestParams;
1489
+ context_params: Array<string>;
680
1490
  };
681
1491
  export type UserInfo = {
1492
+ user_id: string;
1493
+ username: string;
682
1494
  first_name?: string | null;
683
1495
  last_name?: string | null;
684
1496
  role?: null | AppRole;
685
- user_id: string;
686
- username: string;
687
1497
  };
688
1498
  export type UserListResponse = {
689
1499
  client_id: string;
690
- has_next: boolean;
691
- has_previous: boolean;
1500
+ users: Array<UserInfo>;
692
1501
  page: number;
693
1502
  page_size: number;
694
1503
  total_pages: number;
695
1504
  total_users: number;
696
- users: Array<UserInfo>;
1505
+ has_next: boolean;
1506
+ has_previous: boolean;
697
1507
  };
698
1508
  /**
699
1509
  * User authentication response with discriminated union
@@ -702,8 +1512,47 @@ export type UserResponse = {
702
1512
  auth_status: 'logged_out';
703
1513
  } | (UserInfo & {
704
1514
  auth_status: 'logged_in';
1515
+ }) | (TokenInfo & {
1516
+ auth_status: 'api_token';
705
1517
  });
706
1518
  export type UserScope = 'scope_user_user' | 'scope_user_power_user' | 'scope_user_manager' | 'scope_user_admin';
1519
+ /**
1520
+ * The amount of context window space to use for the search.
1521
+ */
1522
+ export type WebSearchContextSize = 'low' | 'medium' | 'high';
1523
+ /**
1524
+ * Approximate location parameters for the search.
1525
+ */
1526
+ export type WebSearchLocation = {
1527
+ /**
1528
+ * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`.
1529
+ */
1530
+ country?: string | null;
1531
+ /**
1532
+ * Free text input for the region of the user, e.g. `California`.
1533
+ */
1534
+ region?: string | null;
1535
+ /**
1536
+ * Free text input for the city of the user, e.g. `San Francisco`.
1537
+ */
1538
+ city?: string | null;
1539
+ /**
1540
+ * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`.
1541
+ */
1542
+ timezone?: string | null;
1543
+ };
1544
+ /**
1545
+ * Options for the web search tool.
1546
+ */
1547
+ export type WebSearchOptions = {
1548
+ search_context_size?: null | WebSearchContextSize;
1549
+ user_location?: null | WebSearchUserLocation;
1550
+ };
1551
+ export type WebSearchUserLocation = {
1552
+ type: WebSearchUserLocationType;
1553
+ approximate: WebSearchLocation;
1554
+ };
1555
+ export type WebSearchUserLocationType = 'approximate';
707
1556
  export type ChatOllamaModelData = {
708
1557
  /**
709
1558
  * Chat request in Ollama format
@@ -715,9 +1564,17 @@ export type ChatOllamaModelData = {
715
1564
  };
716
1565
  export type ChatOllamaModelErrors = {
717
1566
  /**
718
- * Invalid request
1567
+ * Invalid request parameters
1568
+ */
1569
+ 400: OpenAiApiError;
1570
+ /**
1571
+ * Not authenticated
1572
+ */
1573
+ 401: OpenAiApiError;
1574
+ /**
1575
+ * Insufficient permissions
719
1576
  */
720
- 400: OllamaError;
1577
+ 403: OpenAiApiError;
721
1578
  /**
722
1579
  * Model not found
723
1580
  */
@@ -725,7 +1582,7 @@ export type ChatOllamaModelErrors = {
725
1582
  /**
726
1583
  * Internal server error
727
1584
  */
728
- 500: OllamaError;
1585
+ 500: OpenAiApiError;
729
1586
  };
730
1587
  export type ChatOllamaModelError = ChatOllamaModelErrors[keyof ChatOllamaModelErrors];
731
1588
  export type ChatOllamaModelResponses = {
@@ -744,6 +1601,18 @@ export type ShowOllamaModelData = {
744
1601
  url: '/api/show';
745
1602
  };
746
1603
  export type ShowOllamaModelErrors = {
1604
+ /**
1605
+ * Invalid request parameters
1606
+ */
1607
+ 400: OpenAiApiError;
1608
+ /**
1609
+ * Not authenticated
1610
+ */
1611
+ 401: OpenAiApiError;
1612
+ /**
1613
+ * Insufficient permissions
1614
+ */
1615
+ 403: OpenAiApiError;
747
1616
  /**
748
1617
  * Model not found
749
1618
  */
@@ -751,7 +1620,7 @@ export type ShowOllamaModelErrors = {
751
1620
  /**
752
1621
  * Internal server error
753
1622
  */
754
- 500: OllamaError;
1623
+ 500: OpenAiApiError;
755
1624
  };
756
1625
  export type ShowOllamaModelError = ShowOllamaModelErrors[keyof ShowOllamaModelErrors];
757
1626
  export type ShowOllamaModelResponses = {
@@ -768,10 +1637,22 @@ export type ListOllamaModelsData = {
768
1637
  url: '/api/tags';
769
1638
  };
770
1639
  export type ListOllamaModelsErrors = {
1640
+ /**
1641
+ * Invalid request parameters
1642
+ */
1643
+ 400: OpenAiApiError;
1644
+ /**
1645
+ * Not authenticated
1646
+ */
1647
+ 401: OpenAiApiError;
1648
+ /**
1649
+ * Insufficient permissions
1650
+ */
1651
+ 403: OpenAiApiError;
771
1652
  /**
772
1653
  * Internal server error
773
1654
  */
774
- 500: OllamaError;
1655
+ 500: OpenAiApiError;
775
1656
  };
776
1657
  export type ListOllamaModelsError = ListOllamaModelsErrors[keyof ListOllamaModelsErrors];
777
1658
  export type ListOllamaModelsResponses = {
@@ -805,6 +1686,10 @@ export type ListAllAccessRequestsData = {
805
1686
  url: '/bodhi/v1/access-requests';
806
1687
  };
807
1688
  export type ListAllAccessRequestsErrors = {
1689
+ /**
1690
+ * Invalid request parameters
1691
+ */
1692
+ 400: OpenAiApiError;
808
1693
  /**
809
1694
  * Not authenticated
810
1695
  */
@@ -813,6 +1698,10 @@ export type ListAllAccessRequestsErrors = {
813
1698
  * Insufficient permissions
814
1699
  */
815
1700
  403: OpenAiApiError;
1701
+ /**
1702
+ * Internal server error
1703
+ */
1704
+ 500: OpenAiApiError;
816
1705
  };
817
1706
  export type ListAllAccessRequestsError = ListAllAccessRequestsErrors[keyof ListAllAccessRequestsErrors];
818
1707
  export type ListAllAccessRequestsResponses = {
@@ -846,6 +1735,10 @@ export type ListPendingAccessRequestsData = {
846
1735
  url: '/bodhi/v1/access-requests/pending';
847
1736
  };
848
1737
  export type ListPendingAccessRequestsErrors = {
1738
+ /**
1739
+ * Invalid request parameters
1740
+ */
1741
+ 400: OpenAiApiError;
849
1742
  /**
850
1743
  * Not authenticated
851
1744
  */
@@ -854,6 +1747,10 @@ export type ListPendingAccessRequestsErrors = {
854
1747
  * Insufficient permissions
855
1748
  */
856
1749
  403: OpenAiApiError;
1750
+ /**
1751
+ * Internal server error
1752
+ */
1753
+ 500: OpenAiApiError;
857
1754
  };
858
1755
  export type ListPendingAccessRequestsError = ListPendingAccessRequestsErrors[keyof ListPendingAccessRequestsErrors];
859
1756
  export type ListPendingAccessRequestsResponses = {
@@ -878,6 +1775,10 @@ export type ApproveAccessRequestData = {
878
1775
  url: '/bodhi/v1/access-requests/{id}/approve';
879
1776
  };
880
1777
  export type ApproveAccessRequestErrors = {
1778
+ /**
1779
+ * Invalid request parameters
1780
+ */
1781
+ 400: OpenAiApiError;
881
1782
  /**
882
1783
  * Not authenticated
883
1784
  */
@@ -890,6 +1791,10 @@ export type ApproveAccessRequestErrors = {
890
1791
  * Request not found
891
1792
  */
892
1793
  404: OpenAiApiError;
1794
+ /**
1795
+ * Internal server error
1796
+ */
1797
+ 500: OpenAiApiError;
893
1798
  };
894
1799
  export type ApproveAccessRequestError = ApproveAccessRequestErrors[keyof ApproveAccessRequestErrors];
895
1800
  export type ApproveAccessRequestResponses = {
@@ -910,6 +1815,10 @@ export type RejectAccessRequestData = {
910
1815
  url: '/bodhi/v1/access-requests/{id}/reject';
911
1816
  };
912
1817
  export type RejectAccessRequestErrors = {
1818
+ /**
1819
+ * Invalid request parameters
1820
+ */
1821
+ 400: OpenAiApiError;
913
1822
  /**
914
1823
  * Not authenticated
915
1824
  */
@@ -922,6 +1831,10 @@ export type RejectAccessRequestErrors = {
922
1831
  * Request not found
923
1832
  */
924
1833
  404: OpenAiApiError;
1834
+ /**
1835
+ * Internal server error
1836
+ */
1837
+ 500: OpenAiApiError;
925
1838
  };
926
1839
  export type RejectAccessRequestError = RejectAccessRequestErrors[keyof RejectAccessRequestErrors];
927
1840
  export type RejectAccessRequestResponses = {
@@ -955,7 +1868,19 @@ export type ListApiModelsData = {
955
1868
  };
956
1869
  export type ListApiModelsErrors = {
957
1870
  /**
958
- * Internal server error during API model retrieval
1871
+ * Invalid request parameters
1872
+ */
1873
+ 400: OpenAiApiError;
1874
+ /**
1875
+ * Not authenticated
1876
+ */
1877
+ 401: OpenAiApiError;
1878
+ /**
1879
+ * Insufficient permissions
1880
+ */
1881
+ 403: OpenAiApiError;
1882
+ /**
1883
+ * Internal server error
959
1884
  */
960
1885
  500: OpenAiApiError;
961
1886
  };
@@ -975,9 +1900,17 @@ export type CreateApiModelData = {
975
1900
  };
976
1901
  export type CreateApiModelErrors = {
977
1902
  /**
978
- * Invalid request
1903
+ * Invalid request parameters
979
1904
  */
980
1905
  400: OpenAiApiError;
1906
+ /**
1907
+ * Not authenticated
1908
+ */
1909
+ 401: OpenAiApiError;
1910
+ /**
1911
+ * Insufficient permissions
1912
+ */
1913
+ 403: OpenAiApiError;
981
1914
  /**
982
1915
  * Alias already exists
983
1916
  */
@@ -1003,7 +1936,19 @@ export type GetApiFormatsData = {
1003
1936
  };
1004
1937
  export type GetApiFormatsErrors = {
1005
1938
  /**
1006
- * Internal server error during API format retrieval
1939
+ * Invalid request parameters
1940
+ */
1941
+ 400: OpenAiApiError;
1942
+ /**
1943
+ * Not authenticated
1944
+ */
1945
+ 401: OpenAiApiError;
1946
+ /**
1947
+ * Insufficient permissions
1948
+ */
1949
+ 403: OpenAiApiError;
1950
+ /**
1951
+ * Internal server error
1007
1952
  */
1008
1953
  500: OpenAiApiError;
1009
1954
  };
@@ -1023,9 +1968,17 @@ export type FetchApiModelsData = {
1023
1968
  };
1024
1969
  export type FetchApiModelsErrors = {
1025
1970
  /**
1026
- * Invalid request
1971
+ * Invalid request parameters
1027
1972
  */
1028
1973
  400: OpenAiApiError;
1974
+ /**
1975
+ * Not authenticated
1976
+ */
1977
+ 401: OpenAiApiError;
1978
+ /**
1979
+ * Insufficient permissions
1980
+ */
1981
+ 403: OpenAiApiError;
1029
1982
  /**
1030
1983
  * Internal server error
1031
1984
  */
@@ -1047,9 +2000,17 @@ export type TestApiModelData = {
1047
2000
  };
1048
2001
  export type TestApiModelErrors = {
1049
2002
  /**
1050
- * Invalid request
2003
+ * Invalid request parameters
1051
2004
  */
1052
2005
  400: OpenAiApiError;
2006
+ /**
2007
+ * Not authenticated
2008
+ */
2009
+ 401: OpenAiApiError;
2010
+ /**
2011
+ * Insufficient permissions
2012
+ */
2013
+ 403: OpenAiApiError;
1053
2014
  /**
1054
2015
  * Internal server error
1055
2016
  */
@@ -1067,14 +2028,26 @@ export type DeleteApiModelData = {
1067
2028
  body?: never;
1068
2029
  path: {
1069
2030
  /**
1070
- * API model alias
2031
+ * API model ID
1071
2032
  */
1072
- alias: string;
2033
+ id: string;
1073
2034
  };
1074
2035
  query?: never;
1075
- url: '/bodhi/v1/api-models/{alias}';
2036
+ url: '/bodhi/v1/api-models/{id}';
1076
2037
  };
1077
2038
  export type DeleteApiModelErrors = {
2039
+ /**
2040
+ * Invalid request parameters
2041
+ */
2042
+ 400: OpenAiApiError;
2043
+ /**
2044
+ * Not authenticated
2045
+ */
2046
+ 401: OpenAiApiError;
2047
+ /**
2048
+ * Insufficient permissions
2049
+ */
2050
+ 403: OpenAiApiError;
1078
2051
  /**
1079
2052
  * API model not found
1080
2053
  */
@@ -1092,24 +2065,32 @@ export type DeleteApiModelResponses = {
1092
2065
  204: void;
1093
2066
  };
1094
2067
  export type DeleteApiModelResponse = DeleteApiModelResponses[keyof DeleteApiModelResponses];
1095
- export type UpdateApiModelData = {
1096
- body: UpdateApiModelRequest;
2068
+ export type GetApiModelData = {
2069
+ body?: never;
1097
2070
  path: {
1098
2071
  /**
1099
- * API model alias
2072
+ * Unique identifier for the API model alias
1100
2073
  */
1101
- alias: string;
2074
+ id: string;
1102
2075
  };
1103
2076
  query?: never;
1104
- url: '/bodhi/v1/api-models/{alias}';
2077
+ url: '/bodhi/v1/api-models/{id}';
1105
2078
  };
1106
- export type UpdateApiModelErrors = {
2079
+ export type GetApiModelErrors = {
1107
2080
  /**
1108
- * Invalid request
2081
+ * Invalid request parameters
1109
2082
  */
1110
2083
  400: OpenAiApiError;
1111
2084
  /**
1112
- * API model not found
2085
+ * Not authenticated
2086
+ */
2087
+ 401: OpenAiApiError;
2088
+ /**
2089
+ * Insufficient permissions
2090
+ */
2091
+ 403: OpenAiApiError;
2092
+ /**
2093
+ * API model with specified ID not found
1113
2094
  */
1114
2095
  404: OpenAiApiError;
1115
2096
  /**
@@ -1117,43 +2098,55 @@ export type UpdateApiModelErrors = {
1117
2098
  */
1118
2099
  500: OpenAiApiError;
1119
2100
  };
1120
- export type UpdateApiModelError = UpdateApiModelErrors[keyof UpdateApiModelErrors];
1121
- export type UpdateApiModelResponses = {
2101
+ export type GetApiModelError = GetApiModelErrors[keyof GetApiModelErrors];
2102
+ export type GetApiModelResponses = {
1122
2103
  /**
1123
- * API model updated
2104
+ * API model configuration retrieved successfully
1124
2105
  */
1125
2106
  200: ApiModelResponse;
1126
2107
  };
1127
- export type UpdateApiModelResponse = UpdateApiModelResponses[keyof UpdateApiModelResponses];
1128
- export type GetApiModelData = {
1129
- body?: never;
2108
+ export type GetApiModelResponse = GetApiModelResponses[keyof GetApiModelResponses];
2109
+ export type UpdateApiModelData = {
2110
+ body: UpdateApiModelRequest;
1130
2111
  path: {
1131
2112
  /**
1132
- * Unique identifier for the API model alias
2113
+ * API model ID
1133
2114
  */
1134
2115
  id: string;
1135
2116
  };
1136
2117
  query?: never;
1137
2118
  url: '/bodhi/v1/api-models/{id}';
1138
2119
  };
1139
- export type GetApiModelErrors = {
2120
+ export type UpdateApiModelErrors = {
1140
2121
  /**
1141
- * API model with specified ID not found
2122
+ * Invalid request parameters
2123
+ */
2124
+ 400: OpenAiApiError;
2125
+ /**
2126
+ * Not authenticated
2127
+ */
2128
+ 401: OpenAiApiError;
2129
+ /**
2130
+ * Insufficient permissions
2131
+ */
2132
+ 403: OpenAiApiError;
2133
+ /**
2134
+ * API model not found
1142
2135
  */
1143
2136
  404: OpenAiApiError;
1144
2137
  /**
1145
- * Internal server error during model retrieval
2138
+ * Internal server error
1146
2139
  */
1147
2140
  500: OpenAiApiError;
1148
2141
  };
1149
- export type GetApiModelError = GetApiModelErrors[keyof GetApiModelErrors];
1150
- export type GetApiModelResponses = {
2142
+ export type UpdateApiModelError = UpdateApiModelErrors[keyof UpdateApiModelErrors];
2143
+ export type UpdateApiModelResponses = {
1151
2144
  /**
1152
- * API model configuration retrieved successfully
2145
+ * API model updated
1153
2146
  */
1154
2147
  200: ApiModelResponse;
1155
2148
  };
1156
- export type GetApiModelResponse = GetApiModelResponses[keyof GetApiModelResponses];
2149
+ export type UpdateApiModelResponse = UpdateApiModelResponses[keyof UpdateApiModelResponses];
1157
2150
  export type RequestAccessData = {
1158
2151
  /**
1159
2152
  * Application client requesting access
@@ -1165,11 +2158,19 @@ export type RequestAccessData = {
1165
2158
  };
1166
2159
  export type RequestAccessErrors = {
1167
2160
  /**
1168
- * Invalid request, application not registered, or incorrect app status
2161
+ * Invalid request parameters
1169
2162
  */
1170
2163
  400: OpenAiApiError;
1171
2164
  /**
1172
- * Internal server error during access request
2165
+ * Not authenticated
2166
+ */
2167
+ 401: OpenAiApiError;
2168
+ /**
2169
+ * Insufficient permissions
2170
+ */
2171
+ 403: OpenAiApiError;
2172
+ /**
2173
+ * Internal server error
1173
2174
  */
1174
2175
  500: OpenAiApiError;
1175
2176
  };
@@ -1191,12 +2192,24 @@ export type CompleteOAuthFlowData = {
1191
2192
  url: '/bodhi/v1/auth/callback';
1192
2193
  };
1193
2194
  export type CompleteOAuthFlowErrors = {
2195
+ /**
2196
+ * Invalid request parameters
2197
+ */
2198
+ 400: OpenAiApiError;
2199
+ /**
2200
+ * Not authenticated
2201
+ */
2202
+ 401: OpenAiApiError;
2203
+ /**
2204
+ * Insufficient permissions
2205
+ */
2206
+ 403: OpenAiApiError;
1194
2207
  /**
1195
2208
  * OAuth error, invalid request parameters, or state mismatch
1196
2209
  */
1197
2210
  422: OpenAiApiError;
1198
2211
  /**
1199
- * Internal server error during token exchange
2212
+ * Internal server error
1200
2213
  */
1201
2214
  500: OpenAiApiError;
1202
2215
  };
@@ -1216,7 +2229,19 @@ export type InitiateOAuthFlowData = {
1216
2229
  };
1217
2230
  export type InitiateOAuthFlowErrors = {
1218
2231
  /**
1219
- * Internal server error during OAuth initialization
2232
+ * Invalid request parameters
2233
+ */
2234
+ 400: OpenAiApiError;
2235
+ /**
2236
+ * Not authenticated
2237
+ */
2238
+ 401: OpenAiApiError;
2239
+ /**
2240
+ * Insufficient permissions
2241
+ */
2242
+ 403: OpenAiApiError;
2243
+ /**
2244
+ * Internal server error
1220
2245
  */
1221
2246
  500: OpenAiApiError;
1222
2247
  };
@@ -1239,6 +2264,10 @@ export type GetAppInfoData = {
1239
2264
  url: '/bodhi/v1/info';
1240
2265
  };
1241
2266
  export type GetAppInfoErrors = {
2267
+ /**
2268
+ * Invalid request parameters
2269
+ */
2270
+ 400: OpenAiApiError;
1242
2271
  /**
1243
2272
  * Internal server error
1244
2273
  */
@@ -1260,7 +2289,19 @@ export type LogoutUserData = {
1260
2289
  };
1261
2290
  export type LogoutUserErrors = {
1262
2291
  /**
1263
- * Session deletion failed
2292
+ * Invalid request parameters
2293
+ */
2294
+ 400: OpenAiApiError;
2295
+ /**
2296
+ * Not authenticated
2297
+ */
2298
+ 401: OpenAiApiError;
2299
+ /**
2300
+ * Insufficient permissions
2301
+ */
2302
+ 403: OpenAiApiError;
2303
+ /**
2304
+ * Internal server error
1264
2305
  */
1265
2306
  500: OpenAiApiError;
1266
2307
  };
@@ -1296,6 +2337,18 @@ export type ListModelFilesData = {
1296
2337
  url: '/bodhi/v1/modelfiles';
1297
2338
  };
1298
2339
  export type ListModelFilesErrors = {
2340
+ /**
2341
+ * Invalid request parameters
2342
+ */
2343
+ 400: OpenAiApiError;
2344
+ /**
2345
+ * Not authenticated
2346
+ */
2347
+ 401: OpenAiApiError;
2348
+ /**
2349
+ * Insufficient permissions
2350
+ */
2351
+ 403: OpenAiApiError;
1299
2352
  /**
1300
2353
  * Internal server error
1301
2354
  */
@@ -1334,7 +2387,19 @@ export type ListDownloadsData = {
1334
2387
  };
1335
2388
  export type ListDownloadsErrors = {
1336
2389
  /**
1337
- * Internal server error during download list retrieval
2390
+ * Invalid request parameters
2391
+ */
2392
+ 400: OpenAiApiError;
2393
+ /**
2394
+ * Not authenticated
2395
+ */
2396
+ 401: OpenAiApiError;
2397
+ /**
2398
+ * Insufficient permissions
2399
+ */
2400
+ 403: OpenAiApiError;
2401
+ /**
2402
+ * Internal server error
1338
2403
  */
1339
2404
  500: OpenAiApiError;
1340
2405
  };
@@ -1357,9 +2422,17 @@ export type PullModelFileData = {
1357
2422
  };
1358
2423
  export type PullModelFileErrors = {
1359
2424
  /**
1360
- * File already exists or invalid input
2425
+ * Invalid request parameters
1361
2426
  */
1362
2427
  400: OpenAiApiError;
2428
+ /**
2429
+ * Not authenticated
2430
+ */
2431
+ 401: OpenAiApiError;
2432
+ /**
2433
+ * Insufficient permissions
2434
+ */
2435
+ 403: OpenAiApiError;
1363
2436
  /**
1364
2437
  * Internal server error
1365
2438
  */
@@ -1390,9 +2463,17 @@ export type PullModelByAliasData = {
1390
2463
  };
1391
2464
  export type PullModelByAliasErrors = {
1392
2465
  /**
1393
- * File already exists
2466
+ * Invalid request parameters
1394
2467
  */
1395
2468
  400: OpenAiApiError;
2469
+ /**
2470
+ * Not authenticated
2471
+ */
2472
+ 401: OpenAiApiError;
2473
+ /**
2474
+ * Insufficient permissions
2475
+ */
2476
+ 403: OpenAiApiError;
1396
2477
  /**
1397
2478
  * Alias not found
1398
2479
  */
@@ -1426,6 +2507,18 @@ export type GetDownloadStatusData = {
1426
2507
  url: '/bodhi/v1/modelfiles/pull/{id}';
1427
2508
  };
1428
2509
  export type GetDownloadStatusErrors = {
2510
+ /**
2511
+ * Invalid request parameters
2512
+ */
2513
+ 400: OpenAiApiError;
2514
+ /**
2515
+ * Not authenticated
2516
+ */
2517
+ 401: OpenAiApiError;
2518
+ /**
2519
+ * Insufficient permissions
2520
+ */
2521
+ 403: OpenAiApiError;
1429
2522
  /**
1430
2523
  * Download request not found
1431
2524
  */
@@ -1467,6 +2560,18 @@ export type ListAllModelsData = {
1467
2560
  url: '/bodhi/v1/models';
1468
2561
  };
1469
2562
  export type ListAllModelsErrors = {
2563
+ /**
2564
+ * Invalid request parameters
2565
+ */
2566
+ 400: OpenAiApiError;
2567
+ /**
2568
+ * Not authenticated
2569
+ */
2570
+ 401: OpenAiApiError;
2571
+ /**
2572
+ * Insufficient permissions
2573
+ */
2574
+ 403: OpenAiApiError;
1470
2575
  /**
1471
2576
  * Internal server error
1472
2577
  */
@@ -1488,9 +2593,17 @@ export type CreateAliasData = {
1488
2593
  };
1489
2594
  export type CreateAliasErrors = {
1490
2595
  /**
1491
- * Invalid request
2596
+ * Invalid request parameters
1492
2597
  */
1493
2598
  400: OpenAiApiError;
2599
+ /**
2600
+ * Not authenticated
2601
+ */
2602
+ 401: OpenAiApiError;
2603
+ /**
2604
+ * Insufficient permissions
2605
+ */
2606
+ 403: OpenAiApiError;
1494
2607
  /**
1495
2608
  * Internal server error
1496
2609
  */
@@ -1516,6 +2629,18 @@ export type GetAliasData = {
1516
2629
  url: '/bodhi/v1/models/{alias}';
1517
2630
  };
1518
2631
  export type GetAliasErrors = {
2632
+ /**
2633
+ * Invalid request parameters
2634
+ */
2635
+ 400: OpenAiApiError;
2636
+ /**
2637
+ * Not authenticated
2638
+ */
2639
+ 401: OpenAiApiError;
2640
+ /**
2641
+ * Insufficient permissions
2642
+ */
2643
+ 403: OpenAiApiError;
1519
2644
  /**
1520
2645
  * Alias not found
1521
2646
  */
@@ -1546,9 +2671,17 @@ export type UpdateAliasData = {
1546
2671
  };
1547
2672
  export type UpdateAliasErrors = {
1548
2673
  /**
1549
- * Invalid request
2674
+ * Invalid request parameters
1550
2675
  */
1551
2676
  400: OpenAiApiError;
2677
+ /**
2678
+ * Not authenticated
2679
+ */
2680
+ 401: OpenAiApiError;
2681
+ /**
2682
+ * Insufficient permissions
2683
+ */
2684
+ 403: OpenAiApiError;
1552
2685
  /**
1553
2686
  * Internal server error
1554
2687
  */
@@ -1557,9 +2690,9 @@ export type UpdateAliasErrors = {
1557
2690
  export type UpdateAliasError = UpdateAliasErrors[keyof UpdateAliasErrors];
1558
2691
  export type UpdateAliasResponses = {
1559
2692
  /**
1560
- * Alias created succesfully
2693
+ * Alias updated succesfully
1561
2694
  */
1562
- 201: UserAliasResponse;
2695
+ 200: UserAliasResponse;
1563
2696
  };
1564
2697
  export type UpdateAliasResponse = UpdateAliasResponses[keyof UpdateAliasResponses];
1565
2698
  export type ListSettingsData = {
@@ -1570,9 +2703,17 @@ export type ListSettingsData = {
1570
2703
  };
1571
2704
  export type ListSettingsErrors = {
1572
2705
  /**
1573
- * Unauthorized - User is not an admin
2706
+ * Invalid request parameters
2707
+ */
2708
+ 400: OpenAiApiError;
2709
+ /**
2710
+ * Not authenticated
1574
2711
  */
1575
2712
  401: OpenAiApiError;
2713
+ /**
2714
+ * Insufficient permissions
2715
+ */
2716
+ 403: OpenAiApiError;
1576
2717
  /**
1577
2718
  * Internal server error
1578
2719
  */
@@ -1598,10 +2739,26 @@ export type DeleteSettingData = {
1598
2739
  url: '/bodhi/v1/settings/{key}';
1599
2740
  };
1600
2741
  export type DeleteSettingErrors = {
2742
+ /**
2743
+ * Invalid request parameters
2744
+ */
2745
+ 400: OpenAiApiError;
2746
+ /**
2747
+ * Not authenticated
2748
+ */
2749
+ 401: OpenAiApiError;
2750
+ /**
2751
+ * Insufficient permissions
2752
+ */
2753
+ 403: OpenAiApiError;
1601
2754
  /**
1602
2755
  * Setting not found
1603
2756
  */
1604
2757
  404: OpenAiApiError;
2758
+ /**
2759
+ * Internal server error
2760
+ */
2761
+ 500: OpenAiApiError;
1605
2762
  };
1606
2763
  export type DeleteSettingError = DeleteSettingErrors[keyof DeleteSettingErrors];
1607
2764
  export type DeleteSettingResponses = {
@@ -1632,13 +2789,25 @@ export type UpdateSettingData = {
1632
2789
  };
1633
2790
  export type UpdateSettingErrors = {
1634
2791
  /**
1635
- * Invalid setting or value
2792
+ * Invalid request parameters
1636
2793
  */
1637
2794
  400: OpenAiApiError;
2795
+ /**
2796
+ * Not authenticated
2797
+ */
2798
+ 401: OpenAiApiError;
2799
+ /**
2800
+ * Insufficient permissions
2801
+ */
2802
+ 403: OpenAiApiError;
1638
2803
  /**
1639
2804
  * Setting not found
1640
2805
  */
1641
2806
  404: OpenAiApiError;
2807
+ /**
2808
+ * Internal server error
2809
+ */
2810
+ 500: OpenAiApiError;
1642
2811
  };
1643
2812
  export type UpdateSettingError = UpdateSettingErrors[keyof UpdateSettingErrors];
1644
2813
  export type UpdateSettingResponses = {
@@ -1659,11 +2828,11 @@ export type SetupAppData = {
1659
2828
  };
1660
2829
  export type SetupAppErrors = {
1661
2830
  /**
1662
- * Invalid request or application already setup
2831
+ * Invalid request parameters
1663
2832
  */
1664
2833
  400: OpenAiApiError;
1665
2834
  /**
1666
- * Internal server error during setup
2835
+ * Internal server error
1667
2836
  */
1668
2837
  500: OpenAiApiError;
1669
2838
  };
@@ -1700,9 +2869,17 @@ export type ListApiTokensData = {
1700
2869
  };
1701
2870
  export type ListApiTokensErrors = {
1702
2871
  /**
1703
- * Unauthorized - Token missing or invalid
2872
+ * Invalid request parameters
2873
+ */
2874
+ 400: OpenAiApiError;
2875
+ /**
2876
+ * Not authenticated
1704
2877
  */
1705
2878
  401: OpenAiApiError;
2879
+ /**
2880
+ * Insufficient permissions
2881
+ */
2882
+ 403: OpenAiApiError;
1706
2883
  /**
1707
2884
  * Internal server error
1708
2885
  */
@@ -1727,11 +2904,19 @@ export type CreateApiTokenData = {
1727
2904
  };
1728
2905
  export type CreateApiTokenErrors = {
1729
2906
  /**
1730
- * Invalid request parameters or token name already exists
2907
+ * Invalid request parameters
1731
2908
  */
1732
2909
  400: OpenAiApiError;
1733
2910
  /**
1734
- * Internal server error during token creation
2911
+ * Not authenticated
2912
+ */
2913
+ 401: OpenAiApiError;
2914
+ /**
2915
+ * Insufficient permissions
2916
+ */
2917
+ 403: OpenAiApiError;
2918
+ /**
2919
+ * Internal server error
1735
2920
  */
1736
2921
  500: OpenAiApiError;
1737
2922
  };
@@ -1759,9 +2944,17 @@ export type UpdateApiTokenData = {
1759
2944
  };
1760
2945
  export type UpdateApiTokenErrors = {
1761
2946
  /**
1762
- * Unauthorized - Token missing or invalid
2947
+ * Invalid request parameters
2948
+ */
2949
+ 400: OpenAiApiError;
2950
+ /**
2951
+ * Not authenticated
1763
2952
  */
1764
2953
  401: OpenAiApiError;
2954
+ /**
2955
+ * Insufficient permissions
2956
+ */
2957
+ 403: OpenAiApiError;
1765
2958
  /**
1766
2959
  * Token not found
1767
2960
  */
@@ -1787,14 +2980,26 @@ export type GetCurrentUserData = {
1787
2980
  };
1788
2981
  export type GetCurrentUserErrors = {
1789
2982
  /**
1790
- * Authentication error or invalid token
2983
+ * Invalid request parameters
2984
+ */
2985
+ 400: OpenAiApiError;
2986
+ /**
2987
+ * Not authenticated
2988
+ */
2989
+ 401: OpenAiApiError;
2990
+ /**
2991
+ * Insufficient permissions
2992
+ */
2993
+ 403: OpenAiApiError;
2994
+ /**
2995
+ * Internal server error
1791
2996
  */
1792
2997
  500: OpenAiApiError;
1793
2998
  };
1794
2999
  export type GetCurrentUserError = GetCurrentUserErrors[keyof GetCurrentUserErrors];
1795
3000
  export type GetCurrentUserResponses = {
1796
3001
  /**
1797
- * Current user information retrieved successfully
3002
+ * User information (authenticated or not)
1798
3003
  */
1799
3004
  200: UserResponse;
1800
3005
  };
@@ -1806,10 +3011,18 @@ export type RequestUserAccessData = {
1806
3011
  url: '/bodhi/v1/user/request-access';
1807
3012
  };
1808
3013
  export type RequestUserAccessErrors = {
3014
+ /**
3015
+ * Invalid request parameters
3016
+ */
3017
+ 400: OpenAiApiError;
1809
3018
  /**
1810
3019
  * Not authenticated
1811
3020
  */
1812
3021
  401: OpenAiApiError;
3022
+ /**
3023
+ * Insufficient permissions
3024
+ */
3025
+ 403: OpenAiApiError;
1813
3026
  /**
1814
3027
  * Pending request already exists
1815
3028
  */
@@ -1818,15 +3031,18 @@ export type RequestUserAccessErrors = {
1818
3031
  * User already has role
1819
3032
  */
1820
3033
  422: OpenAiApiError;
3034
+ /**
3035
+ * Internal server error
3036
+ */
3037
+ 500: OpenAiApiError;
1821
3038
  };
1822
3039
  export type RequestUserAccessError = RequestUserAccessErrors[keyof RequestUserAccessErrors];
1823
3040
  export type RequestUserAccessResponses = {
1824
3041
  /**
1825
3042
  * Access request created successfully
1826
3043
  */
1827
- 201: EmptyResponse;
3044
+ 201: unknown;
1828
3045
  };
1829
- export type RequestUserAccessResponse = RequestUserAccessResponses[keyof RequestUserAccessResponses];
1830
3046
  export type GetUserAccessStatusData = {
1831
3047
  body?: never;
1832
3048
  path?: never;
@@ -1835,17 +3051,25 @@ export type GetUserAccessStatusData = {
1835
3051
  };
1836
3052
  export type GetUserAccessStatusErrors = {
1837
3053
  /**
1838
- * Bad Request
3054
+ * Invalid request parameters
1839
3055
  */
1840
3056
  400: OpenAiApiError;
1841
3057
  /**
1842
3058
  * Not authenticated
1843
3059
  */
1844
3060
  401: OpenAiApiError;
3061
+ /**
3062
+ * Insufficient permissions
3063
+ */
3064
+ 403: OpenAiApiError;
1845
3065
  /**
1846
3066
  * Request not found
1847
3067
  */
1848
3068
  404: OpenAiApiError;
3069
+ /**
3070
+ * Internal server error
3071
+ */
3072
+ 500: OpenAiApiError;
1849
3073
  };
1850
3074
  export type GetUserAccessStatusError = GetUserAccessStatusErrors[keyof GetUserAccessStatusErrors];
1851
3075
  export type GetUserAccessStatusResponses = {
@@ -1909,7 +3133,7 @@ export type RemoveUserData = {
1909
3133
  };
1910
3134
  export type RemoveUserErrors = {
1911
3135
  /**
1912
- * Invalid request
3136
+ * Invalid request parameters
1913
3137
  */
1914
3138
  400: OpenAiApiError;
1915
3139
  /**
@@ -1949,7 +3173,7 @@ export type ChangeUserRoleData = {
1949
3173
  };
1950
3174
  export type ChangeUserRoleErrors = {
1951
3175
  /**
1952
- * Invalid request
3176
+ * Invalid request parameters
1953
3177
  */
1954
3178
  400: OpenAiApiError;
1955
3179
  /**
@@ -1982,6 +3206,17 @@ export type HealthCheckData = {
1982
3206
  query?: never;
1983
3207
  url: '/health';
1984
3208
  };
3209
+ export type HealthCheckErrors = {
3210
+ /**
3211
+ * Invalid request parameters
3212
+ */
3213
+ 400: OpenAiApiError;
3214
+ /**
3215
+ * Internal server error
3216
+ */
3217
+ 500: OpenAiApiError;
3218
+ };
3219
+ export type HealthCheckError = HealthCheckErrors[keyof HealthCheckErrors];
1985
3220
  export type HealthCheckResponses = {
1986
3221
  /**
1987
3222
  * Application is healthy and fully operational
@@ -1995,6 +3230,17 @@ export type PingServerData = {
1995
3230
  query?: never;
1996
3231
  url: '/ping';
1997
3232
  };
3233
+ export type PingServerErrors = {
3234
+ /**
3235
+ * Invalid request parameters
3236
+ */
3237
+ 400: OpenAiApiError;
3238
+ /**
3239
+ * Internal server error
3240
+ */
3241
+ 500: OpenAiApiError;
3242
+ };
3243
+ export type PingServerError = PingServerErrors[keyof PingServerErrors];
1998
3244
  export type PingServerResponses = {
1999
3245
  /**
2000
3246
  * Server is responding normally
@@ -2003,7 +3249,7 @@ export type PingServerResponses = {
2003
3249
  };
2004
3250
  export type PingServerResponse = PingServerResponses[keyof PingServerResponses];
2005
3251
  export type CreateChatCompletionData = {
2006
- body: unknown;
3252
+ body: CreateChatCompletionRequest;
2007
3253
  path?: never;
2008
3254
  query?: never;
2009
3255
  url: '/v1/chat/completions';
@@ -2014,9 +3260,13 @@ export type CreateChatCompletionErrors = {
2014
3260
  */
2015
3261
  400: OpenAiApiError;
2016
3262
  /**
2017
- * Invalid authentication
3263
+ * Not authenticated
2018
3264
  */
2019
3265
  401: OpenAiApiError;
3266
+ /**
3267
+ * Insufficient permissions
3268
+ */
3269
+ 403: OpenAiApiError;
2020
3270
  /**
2021
3271
  * Internal server error
2022
3272
  */
@@ -2027,12 +3277,45 @@ export type CreateChatCompletionResponses = {
2027
3277
  /**
2028
3278
  * Chat completion response
2029
3279
  */
2030
- 200: unknown;
3280
+ 200: CreateChatCompletionResponse;
2031
3281
  /**
2032
3282
  * Chat completion stream, the status is 200, using 201 to avoid OpenAPI format limitation.
2033
3283
  */
2034
- 201: unknown;
3284
+ 201: CreateChatCompletionStreamResponse;
3285
+ };
3286
+ export type CreateChatCompletionResponse2 = CreateChatCompletionResponses[keyof CreateChatCompletionResponses];
3287
+ export type CreateEmbeddingData = {
3288
+ body: CreateEmbeddingRequest;
3289
+ path?: never;
3290
+ query?: never;
3291
+ url: '/v1/embeddings';
3292
+ };
3293
+ export type CreateEmbeddingErrors = {
3294
+ /**
3295
+ * Invalid request parameters
3296
+ */
3297
+ 400: OpenAiApiError;
3298
+ /**
3299
+ * Not authenticated
3300
+ */
3301
+ 401: OpenAiApiError;
3302
+ /**
3303
+ * Insufficient permissions
3304
+ */
3305
+ 403: OpenAiApiError;
3306
+ /**
3307
+ * Internal server error
3308
+ */
3309
+ 500: OpenAiApiError;
3310
+ };
3311
+ export type CreateEmbeddingError = CreateEmbeddingErrors[keyof CreateEmbeddingErrors];
3312
+ export type CreateEmbeddingResponses = {
3313
+ /**
3314
+ * Embedding response
3315
+ */
3316
+ 200: CreateEmbeddingResponse;
2035
3317
  };
3318
+ export type CreateEmbeddingResponse2 = CreateEmbeddingResponses[keyof CreateEmbeddingResponses];
2036
3319
  export type ListModelsData = {
2037
3320
  body?: never;
2038
3321
  path?: never;
@@ -2041,9 +3324,17 @@ export type ListModelsData = {
2041
3324
  };
2042
3325
  export type ListModelsErrors = {
2043
3326
  /**
2044
- * Invalid authentication
3327
+ * Invalid request parameters
3328
+ */
3329
+ 400: OpenAiApiError;
3330
+ /**
3331
+ * Not authenticated
2045
3332
  */
2046
3333
  401: OpenAiApiError;
3334
+ /**
3335
+ * Insufficient permissions
3336
+ */
3337
+ 403: OpenAiApiError;
2047
3338
  /**
2048
3339
  * Internal server error
2049
3340
  */
@@ -2070,9 +3361,17 @@ export type GetModelData = {
2070
3361
  };
2071
3362
  export type GetModelErrors = {
2072
3363
  /**
2073
- * Invalid authentication
3364
+ * Invalid request parameters
3365
+ */
3366
+ 400: OpenAiApiError;
3367
+ /**
3368
+ * Not authenticated
2074
3369
  */
2075
3370
  401: OpenAiApiError;
3371
+ /**
3372
+ * Insufficient permissions
3373
+ */
3374
+ 403: OpenAiApiError;
2076
3375
  /**
2077
3376
  * Model not found
2078
3377
  */