@bodhiapp/ts-client 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,32 +1,78 @@
1
1
  // This file is auto-generated by @hey-api/openapi-ts
2
2
 
3
- export type AliasResponse = {
4
- alias: string;
5
- context_params: GptContextParams;
6
- filename: string;
7
- model_params: {};
8
- repo: string;
9
- request_params: OaiRequestParams;
10
- snapshot: string;
11
- source: string;
3
+ /**
4
+ * Flat enum representing all types of model aliases
5
+ * Each variant is identified by the source field
6
+ */
7
+ export type Alias = (UserAlias & {
8
+ source: 'user';
9
+ }) | (ModelAlias & {
10
+ source: 'model';
11
+ }) | (ApiAlias & {
12
+ source: 'api';
13
+ });
14
+
15
+ export type ApiAlias = {
16
+ id: string;
17
+ api_format: ApiFormat;
18
+ base_url: string;
19
+ models: Array<string>;
20
+ prefix?: string | null;
21
+ created_at: string;
22
+ updated_at: string;
12
23
  };
13
24
 
14
- export type ApiToken = {
25
+ /**
26
+ * API format/protocol specification
27
+ */
28
+ export type ApiFormat = 'openai' | 'placeholder';
29
+
30
+ /**
31
+ * Response containing available API formats
32
+ */
33
+ export type ApiFormatsResponse = {
34
+ data: Array<ApiFormat>;
35
+ };
36
+
37
+ /**
38
+ * Response containing API model configuration
39
+ */
40
+ export type ApiModelResponse = {
41
+ id: string;
42
+ api_format: ApiFormat;
43
+ base_url: string;
44
+ api_key_masked: string;
45
+ models: Array<string>;
46
+ prefix?: string | null;
15
47
  created_at: string;
48
+ updated_at: string;
49
+ };
50
+
51
+ export type ApiToken = {
16
52
  id: string;
53
+ user_id: string;
17
54
  name: string;
18
- status: TokenStatus;
55
+ token_prefix: string;
19
56
  token_hash: string;
20
- token_id: string;
57
+ scopes: string;
58
+ status: TokenStatus;
59
+ created_at: string;
21
60
  updated_at: string;
22
- user_id: string;
23
61
  };
24
62
 
25
63
  export type ApiTokenResponse = {
26
64
  /**
27
- * Offline token that can be used as API Token
65
+ * API token with bodhiapp_ prefix for programmatic access
28
66
  */
29
- offline_token: string;
67
+ token: string;
68
+ };
69
+
70
+ export type AppAccessRequest = {
71
+ app_client_id: string;
72
+ };
73
+
74
+ export type AppAccessResponse = {
75
+ scope: string;
30
76
  };
31
77
 
32
78
  /**
@@ -34,735 +80,2953 @@ export type ApiTokenResponse = {
34
80
  */
35
81
  export type AppInfo = {
36
82
  /**
37
- * Current application status
83
+ * Application version number (semantic versioning)
38
84
  */
39
- status: AppStatus;
85
+ version: string;
40
86
  /**
41
- * Application version
87
+ * Current application setup and operational status
42
88
  */
43
- version: string;
89
+ status: AppStatus;
44
90
  };
45
91
 
92
+ export type AppRole = ResourceRole | TokenScope | UserScope;
93
+
46
94
  export type AppStatus = 'setup' | 'ready' | 'resource-admin';
47
95
 
96
+ /**
97
+ * Request body for approving access with role assignment
98
+ */
99
+ export type ApproveUserAccessRequest = {
100
+ /**
101
+ * Role to assign to the user
102
+ */
103
+ role: ResourceRole;
104
+ };
105
+
48
106
  export type AuthCallbackRequest = {
49
107
  /**
50
- * OAuth authorization code from successful authentication
108
+ * OAuth authorization code from successful authentication (required for success flow)
51
109
  */
52
110
  code?: string | null;
53
111
  /**
54
- * OAuth error code if authentication failed
112
+ * OAuth state parameter for CSRF protection (must match initiated request)
113
+ */
114
+ state?: string | null;
115
+ /**
116
+ * OAuth error code if authentication failed (e.g., "access_denied")
55
117
  */
56
118
  error?: string | null;
57
119
  /**
58
- * OAuth error description if authentication failed
120
+ * Human-readable OAuth error description if authentication failed
59
121
  */
60
122
  error_description?: string | null;
123
+ [key: string]: string | (string | null) | (string | null) | (string | null) | (string | null) | undefined;
124
+ };
125
+
126
+ /**
127
+ * Change user role request
128
+ */
129
+ export type ChangeRoleRequest = {
61
130
  /**
62
- * OAuth state parameter for CSRF protection
131
+ * Role to assign to the user
63
132
  */
64
- state?: string | null;
65
- [key: string]: string | (string | null) | (string | null) | (string | null) | (string | null) | undefined;
133
+ role: string;
66
134
  };
67
135
 
68
- export type ChatRequest = {
69
- format?: string | null;
70
- keep_alive?: null | Duration;
71
- messages: Array<Message>;
72
- model: string;
73
- options?: null | Options;
74
- stream?: boolean | null;
136
+ export type ChatChoice = {
137
+ /**
138
+ * The index of the choice in the list of choices.
139
+ */
140
+ index: number;
141
+ message: ChatCompletionResponseMessage;
142
+ finish_reason?: null | FinishReason;
143
+ logprobs?: null | ChatChoiceLogprobs;
144
+ };
145
+
146
+ export type ChatChoiceLogprobs = {
147
+ /**
148
+ * A list of message content tokens with log probability information.
149
+ */
150
+ content?: Array<ChatCompletionTokenLogprob> | null;
151
+ refusal?: Array<ChatCompletionTokenLogprob> | null;
152
+ };
153
+
154
+ export type ChatChoiceStream = {
155
+ /**
156
+ * The index of the choice in the list of choices.
157
+ */
158
+ index: number;
159
+ delta: ChatCompletionStreamResponseDelta;
160
+ finish_reason?: null | FinishReason;
161
+ logprobs?: null | ChatChoiceLogprobs;
162
+ };
163
+
164
+ export type ChatCompletionAudio = {
165
+ /**
166
+ * The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive).
167
+ */
168
+ voice: ChatCompletionAudioVoice;
169
+ /**
170
+ * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
171
+ */
172
+ format: ChatCompletionAudioFormat;
173
+ };
174
+
175
+ export type ChatCompletionAudioFormat = 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16';
176
+
177
+ export type ChatCompletionAudioVoice = 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
178
+
179
+ export type ChatCompletionFunctionCall = 'none' | 'auto' | {
180
+ /**
181
+ * Forces the model to call the specified function.
182
+ */
183
+ Function: {
184
+ name: string;
185
+ };
75
186
  };
76
187
 
77
188
  /**
78
- * Request to create a new API token
189
+ * @deprecated
79
190
  */
80
- export type CreateApiTokenRequest = {
191
+ export type ChatCompletionFunctions = {
192
+ /**
193
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
194
+ */
195
+ name: string;
196
+ /**
197
+ * A description of what the function does, used by the model to choose when and how to call the function.
198
+ */
199
+ description?: string | null;
81
200
  /**
82
- * Optional name for the API token
201
+ * The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
202
+ *
203
+ * Omitting `parameters` defines a function with an empty parameter list.
204
+ */
205
+ parameters: unknown;
206
+ };
207
+
208
+ export type ChatCompletionMessageToolCall = {
209
+ /**
210
+ * The ID of the tool call.
211
+ */
212
+ id: string;
213
+ /**
214
+ * The type of the tool. Currently, only `function` is supported.
215
+ */
216
+ type: ChatCompletionToolType;
217
+ /**
218
+ * The function that the model called.
219
+ */
220
+ function: FunctionCall;
221
+ };
222
+
223
+ export type ChatCompletionMessageToolCallChunk = {
224
+ index: number;
225
+ /**
226
+ * The ID of the tool call.
227
+ */
228
+ id?: string | null;
229
+ type?: null | ChatCompletionToolType;
230
+ function?: null | FunctionCallStream;
231
+ };
232
+
233
+ /**
234
+ * Output types that you would like the model to generate for this request.
235
+ *
236
+ * Most models are capable of generating text, which is the default: `["text"]`
237
+ *
238
+ * The `gpt-4o-audio-preview` model can also be used to [generate
239
+ * audio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `["text", "audio"]`
240
+ */
241
+ export type ChatCompletionModalities = 'text' | 'audio';
242
+
243
+ /**
244
+ * Specifies a tool the model should use. Use to force the model to call a specific function.
245
+ */
246
+ export type ChatCompletionNamedToolChoice = {
247
+ /**
248
+ * The type of the tool. Currently, only `function` is supported.
249
+ */
250
+ type: ChatCompletionToolType;
251
+ function: FunctionName;
252
+ };
253
+
254
+ export type ChatCompletionRequestAssistantMessage = {
255
+ content?: null | ChatCompletionRequestAssistantMessageContent;
256
+ /**
257
+ * The refusal message by the assistant.
258
+ */
259
+ refusal?: string | null;
260
+ /**
261
+ * An optional name for the participant. Provides the model information to differentiate between participants of the same role.
83
262
  */
84
263
  name?: string | null;
264
+ audio?: null | ChatCompletionRequestAssistantMessageAudio;
265
+ tool_calls?: Array<ChatCompletionMessageToolCall> | null;
266
+ function_call?: null | FunctionCall;
85
267
  };
86
268
 
87
- export type DownloadRequest = {
88
- created_at: string;
89
- error?: string | null;
90
- filename: string;
269
+ export type ChatCompletionRequestAssistantMessageAudio = {
270
+ /**
271
+ * Unique identifier for a previous audio response from the model.
272
+ */
91
273
  id: string;
92
- repo: string;
93
- status: DownloadStatus;
94
- updated_at: string;
95
274
  };
96
275
 
97
- export type DownloadStatus = 'pending' | 'completed' | 'error';
276
+ export type ChatCompletionRequestAssistantMessageContent = string | Array<ChatCompletionRequestAssistantMessageContentPart>;
98
277
 
99
- export type Duration = string;
278
+ export type ChatCompletionRequestAssistantMessageContentPart = (ChatCompletionRequestMessageContentPartText & {
279
+ type: 'text';
280
+ }) | (ChatCompletionRequestMessageContentPartRefusal & {
281
+ type: 'refusal';
282
+ });
100
283
 
101
- export type ErrorBody = {
102
- code?: string | null;
103
- message: string;
104
- param?: string | null;
105
- type: string;
284
+ export type ChatCompletionRequestDeveloperMessage = {
285
+ /**
286
+ * The contents of the developer message.
287
+ */
288
+ content: ChatCompletionRequestDeveloperMessageContent;
289
+ /**
290
+ * An optional name for the participant. Provides the model information to differentiate between participants of the same role.
291
+ */
292
+ name?: string | null;
106
293
  };
107
294
 
108
- export type GptContextParams = {
109
- n_ctx?: number | null;
110
- n_keep?: number | null;
111
- n_parallel?: number | null;
112
- n_predict?: number | null;
113
- n_seed?: number | null;
114
- n_threads?: number | null;
295
+ export type ChatCompletionRequestDeveloperMessageContent = string | Array<ChatCompletionRequestMessageContentPartText>;
296
+
297
+ export type ChatCompletionRequestFunctionMessage = {
298
+ /**
299
+ * The return value from the function call, to return to the model.
300
+ */
301
+ content?: string | null;
302
+ /**
303
+ * The name of the function to call.
304
+ */
305
+ name: string;
115
306
  };
116
307
 
117
- export type ListModelResponseWrapper = {
118
- data: Array<{
119
- /**
120
- * The Unix timestamp (in seconds) when the model was created.
121
- */
122
- created: number;
123
- /**
124
- * The model identifier, which can be referenced in the API endpoints.
125
- */
126
- id: string;
127
- /**
128
- * The object type, which is always "model".
129
- */
130
- object: string;
131
- /**
132
- * The organization that owns the model.
133
- */
134
- owned_by: string;
135
- }>;
136
- object: string;
308
+ export type ChatCompletionRequestMessage = (ChatCompletionRequestDeveloperMessage & {
309
+ role: 'developer';
310
+ }) | (ChatCompletionRequestSystemMessage & {
311
+ role: 'system';
312
+ }) | (ChatCompletionRequestUserMessage & {
313
+ role: 'user';
314
+ }) | (ChatCompletionRequestAssistantMessage & {
315
+ role: 'assistant';
316
+ }) | (ChatCompletionRequestToolMessage & {
317
+ role: 'tool';
318
+ }) | (ChatCompletionRequestFunctionMessage & {
319
+ role: 'function';
320
+ });
321
+
322
+ /**
323
+ * Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).
324
+ */
325
+ export type ChatCompletionRequestMessageContentPartAudio = {
326
+ input_audio: InputAudio;
137
327
  };
138
328
 
139
- export type LocalModelResponse = {
140
- filename: string;
141
- model_params: {};
142
- repo: string;
143
- size?: number | null;
144
- snapshot: string;
329
+ export type ChatCompletionRequestMessageContentPartImage = {
330
+ image_url: ImageUrl;
145
331
  };
146
332
 
147
- export type Message = {
148
- content: string;
149
- images?: Array<string> | null;
150
- role: string;
333
+ export type ChatCompletionRequestMessageContentPartRefusal = {
334
+ /**
335
+ * The refusal message generated by the model.
336
+ */
337
+ refusal: string;
151
338
  };
152
339
 
153
- export type Model = {
154
- details: ModelDetails;
155
- digest: string;
156
- model: string;
157
- modified_at: number;
158
- size: number;
340
+ export type ChatCompletionRequestMessageContentPartText = {
341
+ text: string;
159
342
  };
160
343
 
161
- export type ModelDetails = {
162
- families?: Array<string> | null;
163
- family: string;
164
- format: string;
165
- parameter_size: string;
166
- parent_model?: string | null;
167
- quantization_level: string;
344
+ export type ChatCompletionRequestSystemMessage = {
345
+ /**
346
+ * The contents of the system message.
347
+ */
348
+ content: ChatCompletionRequestSystemMessageContent;
349
+ /**
350
+ * An optional name for the participant. Provides the model information to differentiate between participants of the same role.
351
+ */
352
+ name?: string | null;
168
353
  };
169
354
 
170
- export type ModelsResponse = {
171
- models: Array<Model>;
355
+ export type ChatCompletionRequestSystemMessageContent = string | Array<ChatCompletionRequestSystemMessageContentPart>;
356
+
357
+ export type ChatCompletionRequestSystemMessageContentPart = ChatCompletionRequestMessageContentPartText & {
358
+ type: 'text';
172
359
  };
173
360
 
174
361
  /**
175
- * Request to pull a model file from HuggingFace
362
+ * Tool message
176
363
  */
177
- export type NewDownloadRequest = {
364
+ export type ChatCompletionRequestToolMessage = {
178
365
  /**
179
- * Model file name to pull
366
+ * The contents of the tool message.
180
367
  */
181
- filename: string;
368
+ content: ChatCompletionRequestToolMessageContent;
369
+ tool_call_id: string;
370
+ };
371
+
372
+ export type ChatCompletionRequestToolMessageContent = string | Array<ChatCompletionRequestToolMessageContentPart>;
373
+
374
+ export type ChatCompletionRequestToolMessageContentPart = ChatCompletionRequestMessageContentPartText & {
375
+ type: 'text';
376
+ };
377
+
378
+ export type ChatCompletionRequestUserMessage = {
182
379
  /**
183
- * HuggingFace repository name
380
+ * The contents of the user message.
184
381
  */
185
- repo: string;
382
+ content: ChatCompletionRequestUserMessageContent;
383
+ /**
384
+ * An optional name for the participant. Provides the model information to differentiate between participants of the same role.
385
+ */
386
+ name?: string | null;
186
387
  };
187
388
 
188
- export type OaiRequestParams = {
189
- frequency_penalty?: number | null;
190
- max_tokens?: number | null;
191
- presence_penalty?: number | null;
192
- seed?: number | null;
193
- stop?: Array<string>;
194
- temperature?: number | null;
195
- top_p?: number | null;
196
- user?: string | null;
389
+ export type ChatCompletionRequestUserMessageContent = string | Array<ChatCompletionRequestUserMessageContentPart>;
390
+
391
+ export type ChatCompletionRequestUserMessageContentPart = (ChatCompletionRequestMessageContentPartText & {
392
+ type: 'text';
393
+ }) | (ChatCompletionRequestMessageContentPartImage & {
394
+ type: 'image_url';
395
+ }) | (ChatCompletionRequestMessageContentPartAudio & {
396
+ type: 'input_audio';
397
+ });
398
+
399
+ /**
400
+ * A chat completion message generated by the model.
401
+ */
402
+ export type ChatCompletionResponseMessage = {
403
+ /**
404
+ * The contents of the message.
405
+ */
406
+ content?: string | null;
407
+ /**
408
+ * The refusal message generated by the model.
409
+ */
410
+ refusal?: string | null;
411
+ /**
412
+ * The tool calls generated by the model, such as function calls.
413
+ */
414
+ tool_calls?: Array<ChatCompletionMessageToolCall> | null;
415
+ /**
416
+ * The role of the author of this message.
417
+ */
418
+ role: Role;
419
+ function_call?: null | FunctionCall;
420
+ audio?: null | ChatCompletionResponseMessageAudio;
197
421
  };
198
422
 
199
- export type OllamaError = {
200
- error: string;
423
+ export type ChatCompletionResponseMessageAudio = {
424
+ /**
425
+ * Unique identifier for this audio response.
426
+ */
427
+ id: string;
428
+ /**
429
+ * The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.
430
+ */
431
+ expires_at: number;
432
+ /**
433
+ * Base64 encoded audio bytes generated by the model, in the format specified in the request.
434
+ */
435
+ data: string;
436
+ /**
437
+ * Transcript of the audio generated by the model.
438
+ */
439
+ transcript: string;
201
440
  };
202
441
 
203
- export type OpenAiApiError = {
204
- error: ErrorBody;
442
+ /**
443
+ * Options for streaming response. Only set this when you set `stream: true`.
444
+ */
445
+ export type ChatCompletionStreamOptions = {
446
+ /**
447
+ * If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.
448
+ */
449
+ include_usage: boolean;
205
450
  };
206
451
 
207
- export type Options = {
208
- f16_kv?: boolean | null;
209
- frequency_penalty?: number | null;
210
- logits_all?: boolean | null;
211
- low_vram?: boolean | null;
212
- main_gpu?: number | null;
213
- mirostat?: number | null;
214
- mirostat_eta?: number | null;
215
- mirostat_tau?: number | null;
216
- num_batch?: number | null;
217
- num_ctx?: number | null;
218
- num_gpu?: number | null;
219
- num_keep?: number | null;
220
- num_predict?: number | null;
221
- num_thread?: number | null;
222
- numa?: boolean | null;
223
- penalize_newline?: boolean | null;
224
- presence_penalty?: number | null;
225
- repeat_last_n?: number | null;
226
- repeat_penalty?: number | null;
227
- seed?: number | null;
228
- stop?: Array<string> | null;
229
- temperature?: number | null;
230
- tfs_z?: number | null;
231
- top_k?: number | null;
232
- top_p?: number | null;
233
- typical_p?: number | null;
234
- use_mlock?: boolean | null;
235
- use_mmap?: boolean | null;
236
- vocab_only?: boolean | null;
452
+ /**
453
+ * A chat completion delta generated by streamed model responses.
454
+ */
455
+ export type ChatCompletionStreamResponseDelta = {
456
+ /**
457
+ * The contents of the chunk message.
458
+ */
459
+ content?: string | null;
460
+ function_call?: null | FunctionCallStream;
461
+ tool_calls?: Array<ChatCompletionMessageToolCallChunk> | null;
462
+ role?: null | Role;
463
+ /**
464
+ * The refusal message generated by the model.
465
+ */
466
+ refusal?: string | null;
237
467
  };
238
468
 
239
- export type PaginatedResponseAliasResponse = {
240
- data: Array<{
241
- alias: string;
242
- context_params: GptContextParams;
243
- filename: string;
244
- model_params: {};
245
- repo: string;
246
- request_params: OaiRequestParams;
247
- snapshot: string;
248
- source: string;
249
- }>;
250
- page: number;
251
- page_size: number;
252
- total: number;
469
+ export type ChatCompletionTokenLogprob = {
470
+ /**
471
+ * The token.
472
+ */
473
+ token: string;
474
+ /**
475
+ * The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
476
+ */
477
+ logprob: number;
478
+ /**
479
+ * A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
480
+ */
481
+ bytes?: Array<number> | null;
482
+ /**
483
+ * List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.
484
+ */
485
+ top_logprobs: Array<TopLogprobs>;
253
486
  };
254
487
 
255
- export type PaginatedResponseApiToken = {
256
- data: Array<{
257
- created_at: string;
258
- id: string;
259
- name: string;
260
- status: TokenStatus;
261
- token_hash: string;
262
- token_id: string;
263
- updated_at: string;
264
- user_id: string;
265
- }>;
266
- page: number;
267
- page_size: number;
268
- total: number;
488
+ export type ChatCompletionTool = {
489
+ type: ChatCompletionToolType;
490
+ function: FunctionObject;
269
491
  };
270
492
 
271
- export type PaginatedResponseDownloadRequest = {
272
- data: Array<{
273
- created_at: string;
274
- error?: string | null;
275
- filename: string;
276
- id: string;
277
- repo: string;
278
- status: DownloadStatus;
279
- updated_at: string;
280
- }>;
281
- page: number;
282
- page_size: number;
283
- total: number;
493
+ /**
494
+ * Controls which (if any) tool is called by the model.
495
+ * `none` means the model will not call any tool and instead generates a message.
496
+ * `auto` means the model can pick between generating a message or calling one or more tools.
497
+ * `required` means the model must call one or more tools.
498
+ * Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
499
+ *
500
+ * `none` is the default when no tools are present. `auto` is the default if tools are present.
501
+ */
502
+ export type ChatCompletionToolChoiceOption = 'none' | 'auto' | 'required' | {
503
+ named: ChatCompletionNamedToolChoice;
284
504
  };
285
505
 
286
- export type PaginatedResponseLocalModelResponse = {
287
- data: Array<{
288
- filename: string;
289
- model_params: {};
290
- repo: string;
291
- size?: number | null;
292
- snapshot: string;
293
- }>;
294
- page: number;
295
- page_size: number;
296
- total: number;
506
+ export type ChatCompletionToolType = 'function';
507
+
508
+ export type ChatRequest = {
509
+ model: string;
510
+ messages: Array<Message>;
511
+ stream?: boolean | null;
512
+ format?: string | null;
513
+ keep_alive?: null | Duration;
514
+ options?: null | Options;
297
515
  };
298
516
 
299
517
  /**
300
- * Response to the ping endpoint
518
+ * Breakdown of tokens used in a completion.
301
519
  */
302
- export type PingResponse = {
520
+ export type CompletionTokensDetails = {
521
+ accepted_prediction_tokens?: number | null;
303
522
  /**
304
- * always returns "pong"
523
+ * Audio input tokens generated by the model.
305
524
  */
306
- message: string;
307
- };
308
-
309
- export type RedirectResponse = {
525
+ audio_tokens?: number | null;
310
526
  /**
311
- * The URL to redirect to for OAuth authentication
527
+ * Tokens generated by the model for reasoning.
312
528
  */
313
- location: string;
314
- };
315
-
316
- export type Repo = {
317
- name: string;
318
- user: string;
529
+ reasoning_tokens?: number | null;
530
+ /**
531
+ * When using Predicted Outputs, the number of tokens in the
532
+ * prediction that did not appear in the completion. However, like
533
+ * reasoning tokens, these tokens are still counted in the total
534
+ * completion tokens for purposes of billing, output, and context
535
+ * window limits.
536
+ */
537
+ rejected_prediction_tokens?: number | null;
319
538
  };
320
539
 
321
- export type SettingInfo = {
322
- current_value: unknown;
323
- default_value: unknown;
324
- key: string;
325
- metadata: SettingMetadata;
326
- source: SettingSource;
540
+ /**
541
+ * Usage statistics for the completion request.
542
+ */
543
+ export type CompletionUsage = {
544
+ /**
545
+ * Number of tokens in the prompt.
546
+ */
547
+ prompt_tokens: number;
548
+ /**
549
+ * Number of tokens in the generated completion.
550
+ */
551
+ completion_tokens: number;
552
+ /**
553
+ * Total number of tokens used in the request (prompt + completion).
554
+ */
555
+ total_tokens: number;
556
+ prompt_tokens_details?: null | PromptTokensDetails;
557
+ completion_tokens_details?: null | CompletionTokensDetails;
327
558
  };
328
559
 
329
- export type SettingMetadata = {
330
- type: 'string';
331
- } | {
332
- max: number;
333
- min: number;
334
- type: 'number';
335
- } | {
336
- type: 'boolean';
337
- } | {
338
- options: Array<string>;
339
- type: 'option';
560
+ export type CreateAliasRequest = {
561
+ alias: string;
562
+ repo: string;
563
+ filename: string;
564
+ snapshot?: string | null;
565
+ request_params?: null | OaiRequestParams;
566
+ context_params?: Array<string> | null;
340
567
  };
341
568
 
342
- export type SettingSource = 'system' | 'command_line' | 'environment' | 'settings_file' | 'default';
343
-
344
569
  /**
345
- * Request to setup the application in authenticated mode
570
+ * Request to create a new API model configuration
346
571
  */
347
- export type SetupRequest = {
348
- [key: string]: unknown;
572
+ export type CreateApiModelRequest = {
573
+ /**
574
+ * API format/protocol (e.g., "openai")
575
+ */
576
+ api_format: ApiFormat;
577
+ /**
578
+ * API base URL
579
+ */
580
+ base_url: string;
581
+ /**
582
+ * API key for authentication
583
+ */
584
+ api_key: string;
585
+ /**
586
+ * List of available models
587
+ */
588
+ models: Array<string>;
589
+ /**
590
+ * Optional prefix for model namespacing (e.g., "azure/" for "azure/gpt-4", "openai:" for "openai:gpt-4")
591
+ */
592
+ prefix?: string | null;
349
593
  };
350
594
 
351
595
  /**
352
- * Response containing the updated application status after setup
596
+ * Request to create a new API token
353
597
  */
354
- export type SetupResponse = {
598
+ export type CreateApiTokenRequest = {
355
599
  /**
356
- * New application status after setup
357
- * - resource-admin: When setup in authenticated mode
358
- * - ready: When setup in non-authenticated mode
600
+ * Descriptive name for the API token (minimum 3 characters)
359
601
  */
360
- status: AppStatus;
361
- };
362
-
363
- export type ShowRequest = {
364
- name: string;
602
+ name?: string | null;
603
+ /**
604
+ * Token scope defining access level
605
+ */
606
+ scope: TokenScope;
365
607
  };
366
608
 
367
- export type ShowResponse = {
368
- details: ModelDetails;
369
- license: string;
370
- model_info: {};
371
- modelfile: string;
372
- modified_at: number;
373
- parameters: string;
374
- template: string;
609
+ export type CreateChatCompletionRequest = {
610
+ /**
611
+ * A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message types (modalities) are supported, like [text](https://platform.openai.com/docs/guides/text-generation), [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio).
612
+ */
613
+ messages: Array<ChatCompletionRequestMessage>;
614
+ /**
615
+ * ID of the model to use.
616
+ * See the [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
617
+ */
618
+ model: string;
619
+ /**
620
+ * Whether or not to store the output of this chat completion request
621
+ *
622
+ * for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products.
623
+ */
624
+ store?: boolean | null;
625
+ reasoning_effort?: null | ReasoningEffort;
626
+ /**
627
+ * Developer-defined tags and values used for filtering completions in the [dashboard](https://platform.openai.com/chat-completions).
628
+ */
629
+ metadata?: unknown;
630
+ /**
631
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
632
+ */
633
+ frequency_penalty?: number | null;
634
+ /**
635
+ * Modify the likelihood of specified tokens appearing in the completion.
636
+ *
637
+ * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
638
+ * Mathematically, the bias is added to the logits generated by the model prior to sampling.
639
+ * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
640
+ * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
641
+ */
642
+ logit_bias?: {} | null;
643
+ /**
644
+ * Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
645
+ */
646
+ logprobs?: boolean | null;
647
+ /**
648
+ * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.
649
+ */
650
+ top_logprobs?: number | null;
651
+ /**
652
+ * The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion.
653
+ *
654
+ * This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.
655
+ * This value is now deprecated in favor of `max_completion_tokens`, and is
656
+ * not compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning).
657
+ * @deprecated
658
+ */
659
+ max_tokens?: number | null;
660
+ /**
661
+ * An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
662
+ */
663
+ max_completion_tokens?: number | null;
664
+ /**
665
+ * How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
666
+ */
667
+ n?: number | null;
668
+ modalities?: Array<ChatCompletionModalities> | null;
669
+ prediction?: null | PredictionContent;
670
+ audio?: null | ChatCompletionAudio;
671
+ /**
672
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
673
+ */
674
+ presence_penalty?: number | null;
675
+ response_format?: null | ResponseFormat;
676
+ /**
677
+ * This feature is in Beta.
678
+ * If specified, our system will make a best effort to sample deterministically, such that repeated requests
679
+ * with the same `seed` and parameters should return the same result.
680
+ * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
681
+ */
682
+ seed?: number | null;
683
+ service_tier?: null | ServiceTier;
684
+ stop?: null | Stop;
685
+ /**
686
+ * If set, partial message deltas will be sent, like in ChatGPT.
687
+ * Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
688
+ * as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
689
+ */
690
+ stream?: boolean | null;
691
+ stream_options?: null | ChatCompletionStreamOptions;
692
+ /**
693
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random,
694
+ * while lower values like 0.2 will make it more focused and deterministic.
695
+ *
696
+ * We generally recommend altering this or `top_p` but not both.
697
+ */
698
+ temperature?: number | null;
699
+ /**
700
+ * An alternative to sampling with temperature, called nucleus sampling,
701
+ * where the model considers the results of the tokens with top_p probability mass.
702
+ * So 0.1 means only the tokens comprising the top 10% probability mass are considered.
703
+ *
704
+ * We generally recommend altering this or `temperature` but not both.
705
+ */
706
+ top_p?: number | null;
707
+ /**
708
+ * A list of tools the model may call. Currently, only functions are supported as a tool.
709
+ * Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
710
+ */
711
+ tools?: Array<ChatCompletionTool> | null;
712
+ tool_choice?: null | ChatCompletionToolChoiceOption;
713
+ /**
714
+ * Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use.
715
+ */
716
+ parallel_tool_calls?: boolean | null;
717
+ /**
718
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
719
+ */
720
+ user?: string | null;
721
+ web_search_options?: null | WebSearchOptions;
722
+ function_call?: null | ChatCompletionFunctionCall;
723
+ /**
724
+ * Deprecated in favor of `tools`.
725
+ *
726
+ * A list of functions the model may generate JSON inputs for.
727
+ * @deprecated
728
+ */
729
+ functions?: Array<ChatCompletionFunctions> | null;
375
730
  };
376
731
 
377
- export type TokenStatus = 'active' | 'inactive';
378
-
379
732
  /**
380
- * Request to update an existing API token
733
+ * Represents a chat completion response returned by model, based on the provided input.
381
734
  */
382
- export type UpdateApiTokenRequest = {
735
+ export type CreateChatCompletionResponse = {
383
736
  /**
384
- * New name for the token
737
+ * A unique identifier for the chat completion.
385
738
  */
386
- name: string;
739
+ id: string;
387
740
  /**
388
- * New status for the token (active/inactive)
741
+ * A list of chat completion choices. Can be more than one if `n` is greater than 1.
389
742
  */
390
- status: TokenStatus;
743
+ choices: Array<ChatChoice>;
744
+ /**
745
+ * The Unix timestamp (in seconds) of when the chat completion was created.
746
+ */
747
+ created: number;
748
+ /**
749
+ * The model used for the chat completion.
750
+ */
751
+ model: string;
752
+ service_tier?: null | ServiceTierResponse;
753
+ /**
754
+ * This fingerprint represents the backend configuration that the model runs with.
755
+ *
756
+ * Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
757
+ */
758
+ system_fingerprint?: string | null;
759
+ /**
760
+ * The object type, which is always `chat.completion`.
761
+ */
762
+ object: string;
763
+ usage?: null | CompletionUsage;
391
764
  };
392
765
 
393
766
  /**
394
- * Request to update a setting value
767
+ * Represents a streamed chunk of a chat completion response returned by model, based on the provided input.
395
768
  */
396
- export type UpdateSettingRequest = {
397
- value: unknown;
769
+ export type CreateChatCompletionStreamResponse = {
770
+ /**
771
+ * A unique identifier for the chat completion. Each chunk has the same ID.
772
+ */
773
+ id: string;
774
+ /**
775
+ * A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {"include_usage": true}`.
776
+ */
777
+ choices: Array<ChatChoiceStream>;
778
+ /**
779
+ * The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
780
+ */
781
+ created: number;
782
+ /**
783
+ * The model to generate the completion.
784
+ */
785
+ model: string;
786
+ service_tier?: null | ServiceTierResponse;
787
+ /**
788
+ * This fingerprint represents the backend configuration that the model runs with.
789
+ * Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
790
+ */
791
+ system_fingerprint?: string | null;
792
+ /**
793
+ * The object type, which is always `chat.completion.chunk`.
794
+ */
795
+ object: string;
796
+ usage?: null | CompletionUsage;
398
797
  };
399
798
 
400
- /**
401
- * Information about the currently logged in user
402
- */
403
- export type UserInfo = {
799
+ export type CreateEmbeddingRequest = {
800
+ /**
801
+ * ID of the model to use. You can use the
802
+ * [List models](https://platform.openai.com/docs/api-reference/models/list)
803
+ * API to see all of your available models, or see our
804
+ * [Model overview](https://platform.openai.com/docs/models/overview)
805
+ * for descriptions of them.
806
+ */
807
+ model: string;
808
+ /**
809
+ * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
810
+ */
811
+ input: EmbeddingInput;
812
+ encoding_format?: null | EncodingFormat;
813
+ /**
814
+ * A unique identifier representing your end-user, which will help OpenAI
815
+ * to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/usage-policies/end-user-ids).
816
+ */
817
+ user?: string | null;
818
+ /**
819
+ * The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.
820
+ */
821
+ dimensions?: number | null;
822
+ };
823
+
824
+ export type CreateEmbeddingResponse = {
825
+ object: string;
826
+ /**
827
+ * The name of the model used to generate the embedding.
828
+ */
829
+ model: string;
830
+ /**
831
+ * The list of embeddings generated by the model.
832
+ */
833
+ data: Array<Embedding>;
834
+ /**
835
+ * The usage information for the request.
836
+ */
837
+ usage: EmbeddingUsage;
838
+ };
839
+
840
+ export type DownloadRequest = {
841
+ id: string;
842
+ repo: string;
843
+ filename: string;
844
+ status: DownloadStatus;
845
+ error?: string | null;
846
+ created_at: string;
847
+ updated_at: string;
848
+ total_bytes?: number | null;
849
+ downloaded_bytes?: number;
850
+ started_at: string;
851
+ };
852
+
853
+ export type DownloadStatus = 'pending' | 'completed' | 'error';
854
+
855
+ export type Duration = string;
856
+
857
+ /**
858
+ * Represents an embedding vector returned by embedding endpoint.
859
+ */
860
+ export type Embedding = {
861
+ /**
862
+ * The index of the embedding in the list of embeddings.
863
+ */
864
+ index: number;
865
+ /**
866
+ * The object type, which is always "embedding".
867
+ */
868
+ object: string;
869
+ /**
870
+ * The embedding vector, which is a list of floats. The length of vector
871
+ * depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings).
872
+ */
873
+ embedding: Array<number>;
874
+ };
875
+
876
+ export type EmbeddingInput = string | Array<string> | Array<number> | Array<Array<number>>;
877
+
878
+ export type EmbeddingUsage = {
879
+ /**
880
+ * The number of tokens used by the prompt.
881
+ */
882
+ prompt_tokens: number;
883
+ /**
884
+ * The total number of tokens used by the request.
885
+ */
886
+ total_tokens: number;
887
+ };
888
+
889
+ export type EncodingFormat = 'float' | 'base64';
890
+
891
+ export type ErrorBody = {
892
+ /**
893
+ * Human-readable error message describing what went wrong
894
+ */
895
+ message: string;
896
+ /**
897
+ * Error type categorizing the kind of error that occurred
898
+ */
899
+ type: string;
900
+ /**
901
+ * Specific error code for programmatic error handling
902
+ */
903
+ code?: string | null;
904
+ /**
905
+ * Parameter name that caused the error (for validation errors)
906
+ */
907
+ param?: string | null;
908
+ };
909
+
910
+ /**
911
+ * Request to fetch available models from provider
912
+ */
913
+ export type FetchModelsRequest = {
914
+ /**
915
+ * API key for authentication (provide either api_key OR id, api_key takes preference if both provided)
916
+ */
917
+ api_key?: string;
918
+ /**
919
+ * API model ID to look up stored credentials (provide either api_key OR id, api_key takes preference if both provided)
920
+ */
921
+ id?: string;
922
+ /**
923
+ * API base URL (optional when using id)
924
+ */
925
+ base_url: string;
926
+ };
927
+
928
+ /**
929
+ * Response containing available models from provider
930
+ */
931
+ export type FetchModelsResponse = {
932
+ models: Array<string>;
933
+ };
934
+
935
+ export type FinishReason = 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call';
936
+
937
+ /**
938
+ * The name and arguments of a function that should be called, as generated by the model.
939
+ */
940
+ export type FunctionCall = {
941
+ /**
942
+ * The name of the function to call.
943
+ */
944
+ name: string;
945
+ /**
946
+ * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
947
+ */
948
+ arguments: string;
949
+ };
950
+
951
+ export type FunctionCallStream = {
952
+ /**
953
+ * The name of the function to call.
954
+ */
955
+ name?: string | null;
956
+ /**
957
+ * The arguments to call the function with, as generated by the model in JSON format.
958
+ * Note that the model does not always generate valid JSON, and may hallucinate
959
+ * parameters not defined by your function schema. Validate the arguments in your
960
+ * code before calling your function.
961
+ */
962
+ arguments?: string | null;
963
+ };
964
+
965
+ export type FunctionName = {
966
+ /**
967
+ * The name of the function to call.
968
+ */
969
+ name: string;
970
+ };
971
+
972
+ export type FunctionObject = {
973
+ /**
974
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
975
+ */
976
+ name: string;
977
+ /**
978
+ * A description of what the function does, used by the model to choose when and how to call the function.
979
+ */
980
+ description?: string | null;
981
+ /**
982
+ * The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
983
+ *
984
+ * Omitting `parameters` defines a function with an empty parameter list.
985
+ */
986
+ parameters?: unknown;
987
+ /**
988
+ * Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](https://platform.openai.com/docs/guides/function-calling).
989
+ */
990
+ strict?: boolean | null;
991
+ };
992
+
993
+ export type ImageDetail = 'auto' | 'low' | 'high';
994
+
995
+ export type ImageUrl = {
996
+ /**
997
+ * Either a URL of the image or the base64 encoded image data.
998
+ */
999
+ url: string;
1000
+ detail?: null | ImageDetail;
1001
+ };
1002
+
1003
+ export type InputAudio = {
1004
+ /**
1005
+ * Base64 encoded audio data.
1006
+ */
1007
+ data: string;
1008
+ /**
1009
+ * The format of the encoded audio data. Currently supports "wav" and "mp3".
1010
+ */
1011
+ format: InputAudioFormat;
1012
+ };
1013
+
1014
+ export type InputAudioFormat = 'wav' | 'mp3';
1015
+
1016
+ export type ListModelResponse = {
1017
+ object: string;
1018
+ data: Array<{
1019
+ /**
1020
+ * The model identifier, which can be referenced in the API endpoints.
1021
+ */
1022
+ id: string;
1023
+ /**
1024
+ * The object type, which is always "model".
1025
+ */
1026
+ object: string;
1027
+ /**
1028
+ * The Unix timestamp (in seconds) when the model was created.
1029
+ */
1030
+ created: number;
1031
+ /**
1032
+ * The organization that owns the model.
1033
+ */
1034
+ owned_by: string;
1035
+ }>;
1036
+ };
1037
+
1038
+ /**
1039
+ * List users query parameters
1040
+ */
1041
+ export type ListUsersParams = {
1042
+ page?: number | null;
1043
+ page_size?: number | null;
1044
+ };
1045
+
1046
+ export type LocalModelResponse = {
1047
+ repo: string;
1048
+ filename: string;
1049
+ snapshot: string;
1050
+ size?: number | null;
1051
+ model_params: {};
1052
+ };
1053
+
1054
+ export type Message = {
1055
+ role: string;
1056
+ content: string;
1057
+ images?: Array<string> | null;
1058
+ };
1059
+
1060
+ export type Model = {
1061
+ model: string;
1062
+ modified_at: number;
1063
+ size: number;
1064
+ digest: string;
1065
+ details: ModelDetails;
1066
+ };
1067
+
1068
+ export type ModelAlias = {
1069
+ alias: string;
1070
+ repo: string;
1071
+ filename: string;
1072
+ snapshot: string;
1073
+ };
1074
+
1075
+ export type ModelDetails = {
1076
+ parent_model?: string | null;
1077
+ format: string;
1078
+ family: string;
1079
+ families?: Array<string> | null;
1080
+ parameter_size: string;
1081
+ quantization_level: string;
1082
+ };
1083
+
1084
+ /**
1085
+ * Describes an OpenAI model offering that can be used with the API.
1086
+ */
1087
+ export type ModelResponse = {
1088
+ /**
1089
+ * The model identifier, which can be referenced in the API endpoints.
1090
+ */
1091
+ id: string;
1092
+ /**
1093
+ * The object type, which is always "model".
1094
+ */
1095
+ object: string;
1096
+ /**
1097
+ * The Unix timestamp (in seconds) when the model was created.
1098
+ */
1099
+ created: number;
1100
+ /**
1101
+ * The organization that owns the model.
1102
+ */
1103
+ owned_by: string;
1104
+ };
1105
+
1106
+ export type ModelsResponse = {
1107
+ models: Array<Model>;
1108
+ };
1109
+
1110
+ /**
1111
+ * Request to pull a model file from HuggingFace
1112
+ */
1113
+ export type NewDownloadRequest = {
1114
+ /**
1115
+ * HuggingFace repository name in format 'username/repository-name'
1116
+ */
1117
+ repo: string;
1118
+ /**
1119
+ * Model file name to download (typically .gguf format)
1120
+ */
1121
+ filename: string;
1122
+ };
1123
+
1124
+ export type OaiRequestParams = {
1125
+ frequency_penalty?: number | null;
1126
+ max_tokens?: number | null;
1127
+ presence_penalty?: number | null;
1128
+ seed?: number | null;
1129
+ stop?: Array<string>;
1130
+ temperature?: number | null;
1131
+ top_p?: number | null;
1132
+ user?: string | null;
1133
+ };
1134
+
1135
+ export type OllamaError = {
1136
+ error: string;
1137
+ };
1138
+
1139
+ export type OpenAiApiError = {
1140
+ /**
1141
+ * Error details following OpenAI API error format
1142
+ */
1143
+ error: ErrorBody;
1144
+ };
1145
+
1146
+ export type Options = {
1147
+ num_keep?: number | null;
1148
+ seed?: number | null;
1149
+ num_predict?: number | null;
1150
+ top_k?: number | null;
1151
+ top_p?: number | null;
1152
+ tfs_z?: number | null;
1153
+ typical_p?: number | null;
1154
+ repeat_last_n?: number | null;
1155
+ temperature?: number | null;
1156
+ repeat_penalty?: number | null;
1157
+ presence_penalty?: number | null;
1158
+ frequency_penalty?: number | null;
1159
+ mirostat?: number | null;
1160
+ mirostat_tau?: number | null;
1161
+ mirostat_eta?: number | null;
1162
+ penalize_newline?: boolean | null;
1163
+ stop?: Array<string> | null;
1164
+ numa?: boolean | null;
1165
+ num_ctx?: number | null;
1166
+ num_batch?: number | null;
1167
+ num_gpu?: number | null;
1168
+ main_gpu?: number | null;
1169
+ low_vram?: boolean | null;
1170
+ f16_kv?: boolean | null;
1171
+ logits_all?: boolean | null;
1172
+ vocab_only?: boolean | null;
1173
+ use_mmap?: boolean | null;
1174
+ use_mlock?: boolean | null;
1175
+ num_thread?: number | null;
1176
+ };
1177
+
1178
+ export type PaginatedAliasResponse = {
1179
+ data: Array<Alias>;
1180
+ total: number;
1181
+ page: number;
1182
+ page_size: number;
1183
+ };
1184
+
1185
+ /**
1186
+ * Paginated response for API model listings
1187
+ */
1188
+ export type PaginatedApiModelResponse = {
1189
+ data: Array<ApiModelResponse>;
1190
+ total: number;
1191
+ page: number;
1192
+ page_size: number;
1193
+ };
1194
+
1195
+ export type PaginatedApiTokenResponse = {
1196
+ data: Array<ApiToken>;
1197
+ total: number;
1198
+ page: number;
1199
+ page_size: number;
1200
+ };
1201
+
1202
+ export type PaginatedDownloadResponse = {
1203
+ data: Array<DownloadRequest>;
1204
+ total: number;
1205
+ page: number;
1206
+ page_size: number;
1207
+ };
1208
+
1209
+ export type PaginatedLocalModelResponse = {
1210
+ data: Array<LocalModelResponse>;
1211
+ total: number;
1212
+ page: number;
1213
+ page_size: number;
1214
+ };
1215
+
1216
+ /**
1217
+ * Paginated response for access requests
1218
+ */
1219
+ export type PaginatedUserAccessResponse = {
1220
+ /**
1221
+ * List of access requests
1222
+ */
1223
+ requests: Array<UserAccessRequest>;
1224
+ /**
1225
+ * Total number of requests
1226
+ */
1227
+ total: number;
1228
+ /**
1229
+ * Current page number
1230
+ */
1231
+ page: number;
1232
+ /**
1233
+ * Number of items per page
1234
+ */
1235
+ page_size: number;
1236
+ };
1237
+
1238
+ export type PaginatedUserAliasResponse = {
1239
+ data: Array<UserAliasResponse>;
1240
+ total: number;
1241
+ page: number;
1242
+ page_size: number;
1243
+ };
1244
+
1245
+ /**
1246
+ * Query parameters for pagination and sorting
1247
+ */
1248
+ export type PaginationSortParams = {
1249
+ /**
1250
+ * Page number (1-based indexing)
1251
+ */
1252
+ page?: number;
1253
+ /**
1254
+ * Number of items to return per page (maximum 100)
1255
+ */
1256
+ page_size?: number;
1257
+ /**
1258
+ * Field to sort by. Common values: repo, filename, size, updated_at, snapshot, created_at
1259
+ */
1260
+ sort?: string | null;
1261
+ /**
1262
+ * Sort order: 'asc' for ascending, 'desc' for descending
1263
+ */
1264
+ sort_order?: string;
1265
+ };
1266
+
1267
+ /**
1268
+ * Response to the ping endpoint
1269
+ */
1270
+ export type PingResponse = {
1271
+ /**
1272
+ * Simple ping response message
1273
+ */
1274
+ message: string;
1275
+ };
1276
+
1277
+ /**
1278
+ * The type of the predicted content you want to provide. This type is
1279
+ * currently always `content`.
1280
+ */
1281
+ export type PredictionContent = {
1282
+ /**
1283
+ * The type of the predicted content you want to provide. This type is
1284
+ * currently always `content`.
1285
+ */
1286
+ content: PredictionContentContent;
1287
+ type: 'content';
1288
+ };
1289
+
1290
+ /**
1291
+ * The content that should be matched when generating a model response. If generated tokens would match this content, the entire model response can be returned much more quickly.
1292
+ */
1293
+ export type PredictionContentContent = string | Array<ChatCompletionRequestMessageContentPartText>;
1294
+
1295
+ /**
1296
+ * Breakdown of tokens used in a completion.
1297
+ */
1298
+ export type PromptTokensDetails = {
1299
+ /**
1300
+ * Audio input tokens present in the prompt.
1301
+ */
1302
+ audio_tokens?: number | null;
1303
+ /**
1304
+ * Cached tokens present in the prompt.
1305
+ */
1306
+ cached_tokens?: number | null;
1307
+ };
1308
+
1309
+ export type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high';
1310
+
1311
+ export type RedirectResponse = {
1312
+ /**
1313
+ * The URL to redirect to (OAuth authorization URL or application home page)
1314
+ */
1315
+ location: string;
1316
+ };
1317
+
1318
+ export type ResourceRole = 'resource_user' | 'resource_power_user' | 'resource_manager' | 'resource_admin';
1319
+
1320
+ export type ResponseFormat = {
1321
+ type: 'text';
1322
+ } | {
1323
+ type: 'json_object';
1324
+ } | {
1325
+ json_schema: ResponseFormatJsonSchema;
1326
+ type: 'json_schema';
1327
+ };
1328
+
1329
+ export type ResponseFormatJsonSchema = {
1330
+ /**
1331
+ * A description of what the response format is for, used by the model to determine how to respond in the format.
1332
+ */
1333
+ description?: string | null;
1334
+ /**
1335
+ * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
1336
+ */
1337
+ name: string;
1338
+ /**
1339
+ * The schema for the response format, described as a JSON Schema object.
1340
+ */
1341
+ schema?: unknown;
1342
+ /**
1343
+ * Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1344
+ */
1345
+ strict?: boolean | null;
1346
+ };
1347
+
1348
+ export type Role = 'system' | 'user' | 'assistant' | 'tool' | 'function';
1349
+
1350
+ export type ServiceTier = 'auto' | 'default' | 'flex' | 'scale' | 'priority';
1351
+
1352
+ export type ServiceTierResponse = 'scale' | 'default' | 'flex' | 'priority';
1353
+
1354
+ export type SettingInfo = {
1355
+ key: string;
1356
+ current_value: unknown;
1357
+ default_value: unknown;
1358
+ source: SettingSource;
1359
+ metadata: SettingMetadata;
1360
+ };
1361
+
1362
+ export type SettingMetadata = {
1363
+ type: 'string';
1364
+ } | {
1365
+ min: number;
1366
+ max: number;
1367
+ type: 'number';
1368
+ } | {
1369
+ type: 'boolean';
1370
+ } | {
1371
+ options: Array<string>;
1372
+ type: 'option';
1373
+ };
1374
+
1375
+ export type SettingSource = 'system' | 'command_line' | 'environment' | 'settings_file' | 'default';
1376
+
1377
+ /**
1378
+ * Request to setup the application in authenticated mode
1379
+ */
1380
+ export type SetupRequest = {
1381
+ /**
1382
+ * Server name for identification (minimum 10 characters)
1383
+ */
1384
+ name: string;
1385
+ /**
1386
+ * Optional description of the server's purpose
1387
+ */
1388
+ description?: string | null;
1389
+ };
1390
+
1391
+ /**
1392
+ * Response containing the updated application status after setup
1393
+ */
1394
+ export type SetupResponse = {
1395
+ /**
1396
+ * New application status after successful setup
1397
+ */
1398
+ status: AppStatus;
1399
+ };
1400
+
1401
+ export type ShowRequest = {
1402
+ name: string;
1403
+ };
1404
+
1405
+ export type ShowResponse = {
1406
+ details: ModelDetails;
1407
+ license: string;
1408
+ model_info: {};
1409
+ modelfile: string;
1410
+ modified_at: number;
1411
+ parameters: string;
1412
+ template: string;
1413
+ };
1414
+
1415
+ export type Stop = string | Array<string>;
1416
+
1417
+ /**
1418
+ * Request to test API connectivity with a prompt
1419
+ */
1420
+ export type TestPromptRequest = {
1421
+ /**
1422
+ * API key for authentication (provide either api_key OR id, api_key takes preference if both provided)
1423
+ */
1424
+ api_key?: string;
1425
+ /**
1426
+ * API model ID to look up stored credentials (provide either api_key OR id, api_key takes preference if both provided)
1427
+ */
1428
+ id?: string;
1429
+ /**
1430
+ * API base URL (optional when using id)
1431
+ */
1432
+ base_url: string;
1433
+ /**
1434
+ * Model to use for testing
1435
+ */
1436
+ model: string;
1437
+ /**
1438
+ * Test prompt (max 30 characters for cost control)
1439
+ */
1440
+ prompt: string;
1441
+ };
1442
+
1443
+ /**
1444
+ * Response from testing API connectivity
1445
+ */
1446
+ export type TestPromptResponse = {
1447
+ success: boolean;
1448
+ response?: string | null;
1449
+ error?: string | null;
1450
+ };
1451
+
1452
+ /**
1453
+ * API Token information response
1454
+ */
1455
+ export type TokenInfo = {
1456
+ role: TokenScope;
1457
+ };
1458
+
1459
+ export type TokenScope = 'scope_token_user' | 'scope_token_power_user' | 'scope_token_manager' | 'scope_token_admin';
1460
+
1461
+ export type TokenStatus = 'active' | 'inactive';
1462
+
1463
+ export type TopLogprobs = {
1464
+ /**
1465
+ * The token.
1466
+ */
1467
+ token: string;
1468
+ /**
1469
+ * The log probability of this token.
1470
+ */
1471
+ logprob: number;
1472
+ /**
1473
+ * A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
1474
+ */
1475
+ bytes?: Array<number> | null;
1476
+ };
1477
+
1478
+ export type UpdateAliasRequest = {
1479
+ repo: string;
1480
+ filename: string;
1481
+ snapshot?: string | null;
1482
+ request_params?: null | OaiRequestParams;
1483
+ context_params?: Array<string> | null;
1484
+ };
1485
+
1486
+ /**
1487
+ * Request to update an existing API model configuration
1488
+ */
1489
+ export type UpdateApiModelRequest = {
1490
+ /**
1491
+ * API format/protocol (required)
1492
+ */
1493
+ api_format: ApiFormat;
1494
+ /**
1495
+ * API base URL (required)
1496
+ */
1497
+ base_url: string;
1498
+ /**
1499
+ * API key for authentication (optional, only update if provided for security)
1500
+ */
1501
+ api_key?: string | null;
1502
+ /**
1503
+ * List of available models (required)
1504
+ */
1505
+ models: Array<string>;
1506
+ /**
1507
+ * Optional prefix for model namespacing
1508
+ */
1509
+ prefix?: string | null;
1510
+ };
1511
+
1512
+ /**
1513
+ * Request to update an existing API token
1514
+ */
1515
+ export type UpdateApiTokenRequest = {
1516
+ /**
1517
+ * New descriptive name for the token (minimum 3 characters)
1518
+ */
1519
+ name: string;
1520
+ /**
1521
+ * New status for the token (active/inactive)
1522
+ */
1523
+ status: TokenStatus;
1524
+ };
1525
+
1526
+ /**
1527
+ * Request to update a setting value
1528
+ */
1529
+ export type UpdateSettingRequest = {
1530
+ /**
1531
+ * New value for the setting (type depends on setting metadata)
1532
+ */
1533
+ value: unknown;
1534
+ };
1535
+
1536
+ export type UserAccessRequest = {
1537
+ /**
1538
+ * Unique identifier for the request
1539
+ */
1540
+ id: number;
1541
+ /**
1542
+ * Username of the requesting user
1543
+ */
1544
+ username: string;
1545
+ /**
1546
+ * User ID (UUID) of the requesting user
1547
+ */
1548
+ user_id: string;
1549
+ reviewer?: string | null;
1550
+ /**
1551
+ * Current status of the request
1552
+ */
1553
+ status: UserAccessRequestStatus;
1554
+ /**
1555
+ * Creation timestamp
1556
+ */
1557
+ created_at: string;
1558
+ /**
1559
+ * Last update timestamp
1560
+ */
1561
+ updated_at: string;
1562
+ };
1563
+
1564
+ export type UserAccessRequestStatus = 'pending' | 'approved' | 'rejected';
1565
+
1566
+ /**
1567
+ * Response for checking access request status
1568
+ */
1569
+ export type UserAccessStatusResponse = {
1570
+ /**
1571
+ * Username of the requesting user
1572
+ */
1573
+ username: string;
1574
+ /**
1575
+ * Current status of the request (pending, approved, rejected)
1576
+ */
1577
+ status: UserAccessRequestStatus;
1578
+ /**
1579
+ * Creation timestamp
1580
+ */
1581
+ created_at: string;
1582
+ /**
1583
+ * Last update timestamp
1584
+ */
1585
+ updated_at: string;
1586
+ };
1587
+
1588
+ export type UserAlias = {
1589
+ alias: string;
1590
+ repo: string;
1591
+ filename: string;
1592
+ snapshot: string;
1593
+ request_params?: OaiRequestParams;
1594
+ context_params?: Array<string>;
1595
+ };
1596
+
1597
+ export type UserAliasResponse = {
1598
+ alias: string;
1599
+ repo: string;
1600
+ filename: string;
1601
+ snapshot: string;
1602
+ source: string;
1603
+ model_params: {};
1604
+ request_params: OaiRequestParams;
1605
+ context_params: Array<string>;
1606
+ };
1607
+
1608
+ export type UserInfo = {
1609
+ user_id: string;
1610
+ username: string;
1611
+ first_name?: string | null;
1612
+ last_name?: string | null;
1613
+ role?: null | AppRole;
1614
+ };
1615
+
1616
+ export type UserListResponse = {
1617
+ client_id: string;
1618
+ users: Array<UserInfo>;
1619
+ page: number;
1620
+ page_size: number;
1621
+ total_pages: number;
1622
+ total_users: number;
1623
+ has_next: boolean;
1624
+ has_previous: boolean;
1625
+ };
1626
+
1627
+ /**
1628
+ * User authentication response with discriminated union
1629
+ */
1630
+ export type UserResponse = {
1631
+ auth_status: 'logged_out';
1632
+ } | (UserInfo & {
1633
+ auth_status: 'logged_in';
1634
+ }) | (TokenInfo & {
1635
+ auth_status: 'api_token';
1636
+ });
1637
+
1638
+ export type UserScope = 'scope_user_user' | 'scope_user_power_user' | 'scope_user_manager' | 'scope_user_admin';
1639
+
1640
+ /**
1641
+ * The amount of context window space to use for the search.
1642
+ */
1643
+ export type WebSearchContextSize = 'low' | 'medium' | 'high';
1644
+
1645
+ /**
1646
+ * Approximate location parameters for the search.
1647
+ */
1648
+ export type WebSearchLocation = {
1649
+ /**
1650
+ * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`.
1651
+ */
1652
+ country?: string | null;
1653
+ /**
1654
+ * Free text input for the region of the user, e.g. `California`.
1655
+ */
1656
+ region?: string | null;
1657
+ /**
1658
+ * Free text input for the city of the user, e.g. `San Francisco`.
1659
+ */
1660
+ city?: string | null;
1661
+ /**
1662
+ * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`.
1663
+ */
1664
+ timezone?: string | null;
1665
+ };
1666
+
1667
+ /**
1668
+ * Options for the web search tool.
1669
+ */
1670
+ export type WebSearchOptions = {
1671
+ search_context_size?: null | WebSearchContextSize;
1672
+ user_location?: null | WebSearchUserLocation;
1673
+ };
1674
+
1675
+ export type WebSearchUserLocation = {
1676
+ type: WebSearchUserLocationType;
1677
+ approximate: WebSearchLocation;
1678
+ };
1679
+
1680
+ export type WebSearchUserLocationType = 'approximate';
1681
+
1682
+ export type ChatOllamaModelData = {
1683
+ /**
1684
+ * Chat request in Ollama format
1685
+ */
1686
+ body: ChatRequest;
1687
+ path?: never;
1688
+ query?: never;
1689
+ url: '/api/chat';
1690
+ };
1691
+
1692
+ export type ChatOllamaModelErrors = {
1693
+ /**
1694
+ * Invalid request parameters
1695
+ */
1696
+ 400: OpenAiApiError;
1697
+ /**
1698
+ * Not authenticated
1699
+ */
1700
+ 401: OpenAiApiError;
1701
+ /**
1702
+ * Insufficient permissions
1703
+ */
1704
+ 403: OpenAiApiError;
1705
+ /**
1706
+ * Model not found
1707
+ */
1708
+ 404: OllamaError;
1709
+ /**
1710
+ * Internal server error
1711
+ */
1712
+ 500: OpenAiApiError;
1713
+ };
1714
+
1715
+ export type ChatOllamaModelError = ChatOllamaModelErrors[keyof ChatOllamaModelErrors];
1716
+
1717
+ export type ChatOllamaModelResponses = {
1718
+ /**
1719
+ * Chat response
1720
+ */
1721
+ 200: unknown;
1722
+ };
1723
+
1724
+ export type ShowOllamaModelData = {
1725
+ /**
1726
+ * Model name to get details for
1727
+ */
1728
+ body: ShowRequest;
1729
+ path?: never;
1730
+ query?: never;
1731
+ url: '/api/show';
1732
+ };
1733
+
1734
+ export type ShowOllamaModelErrors = {
1735
+ /**
1736
+ * Invalid request parameters
1737
+ */
1738
+ 400: OpenAiApiError;
1739
+ /**
1740
+ * Not authenticated
1741
+ */
1742
+ 401: OpenAiApiError;
1743
+ /**
1744
+ * Insufficient permissions
1745
+ */
1746
+ 403: OpenAiApiError;
1747
+ /**
1748
+ * Model not found
1749
+ */
1750
+ 404: OllamaError;
1751
+ /**
1752
+ * Internal server error
1753
+ */
1754
+ 500: OpenAiApiError;
1755
+ };
1756
+
1757
+ export type ShowOllamaModelError = ShowOllamaModelErrors[keyof ShowOllamaModelErrors];
1758
+
1759
+ export type ShowOllamaModelResponses = {
1760
+ /**
1761
+ * Model details
1762
+ */
1763
+ 200: ShowResponse;
1764
+ };
1765
+
1766
+ export type ShowOllamaModelResponse = ShowOllamaModelResponses[keyof ShowOllamaModelResponses];
1767
+
1768
+ export type ListOllamaModelsData = {
1769
+ body?: never;
1770
+ path?: never;
1771
+ query?: never;
1772
+ url: '/api/tags';
1773
+ };
1774
+
1775
+ export type ListOllamaModelsErrors = {
1776
+ /**
1777
+ * Invalid request parameters
1778
+ */
1779
+ 400: OpenAiApiError;
1780
+ /**
1781
+ * Not authenticated
1782
+ */
1783
+ 401: OpenAiApiError;
1784
+ /**
1785
+ * Insufficient permissions
1786
+ */
1787
+ 403: OpenAiApiError;
1788
+ /**
1789
+ * Internal server error
1790
+ */
1791
+ 500: OpenAiApiError;
1792
+ };
1793
+
1794
+ export type ListOllamaModelsError = ListOllamaModelsErrors[keyof ListOllamaModelsErrors];
1795
+
1796
+ export type ListOllamaModelsResponses = {
1797
+ /**
1798
+ * List of available models
1799
+ */
1800
+ 200: ModelsResponse;
1801
+ };
1802
+
1803
+ export type ListOllamaModelsResponse = ListOllamaModelsResponses[keyof ListOllamaModelsResponses];
1804
+
1805
+ export type ListAllAccessRequestsData = {
1806
+ body?: never;
1807
+ path?: never;
1808
+ query?: {
1809
+ /**
1810
+ * Page number (1-based indexing)
1811
+ */
1812
+ page?: number;
1813
+ /**
1814
+ * Number of items to return per page (maximum 100)
1815
+ */
1816
+ page_size?: number;
1817
+ /**
1818
+ * Field to sort by. Common values: repo, filename, size, updated_at, snapshot, created_at
1819
+ */
1820
+ sort?: string;
1821
+ /**
1822
+ * Sort order: 'asc' for ascending, 'desc' for descending
1823
+ */
1824
+ sort_order?: string;
1825
+ };
1826
+ url: '/bodhi/v1/access-requests';
1827
+ };
1828
+
1829
+ export type ListAllAccessRequestsErrors = {
1830
+ /**
1831
+ * Invalid request parameters
1832
+ */
1833
+ 400: OpenAiApiError;
1834
+ /**
1835
+ * Not authenticated
1836
+ */
1837
+ 401: OpenAiApiError;
1838
+ /**
1839
+ * Insufficient permissions
1840
+ */
1841
+ 403: OpenAiApiError;
1842
+ /**
1843
+ * Internal server error
1844
+ */
1845
+ 500: OpenAiApiError;
1846
+ };
1847
+
1848
+ export type ListAllAccessRequestsError = ListAllAccessRequestsErrors[keyof ListAllAccessRequestsErrors];
1849
+
1850
+ export type ListAllAccessRequestsResponses = {
1851
+ /**
1852
+ * All requests retrieved
1853
+ */
1854
+ 200: PaginatedUserAccessResponse;
1855
+ };
1856
+
1857
+ export type ListAllAccessRequestsResponse = ListAllAccessRequestsResponses[keyof ListAllAccessRequestsResponses];
1858
+
1859
+ export type ListPendingAccessRequestsData = {
1860
+ body?: never;
1861
+ path?: never;
1862
+ query?: {
1863
+ /**
1864
+ * Page number (1-based indexing)
1865
+ */
1866
+ page?: number;
1867
+ /**
1868
+ * Number of items to return per page (maximum 100)
1869
+ */
1870
+ page_size?: number;
1871
+ /**
1872
+ * Field to sort by. Common values: repo, filename, size, updated_at, snapshot, created_at
1873
+ */
1874
+ sort?: string;
1875
+ /**
1876
+ * Sort order: 'asc' for ascending, 'desc' for descending
1877
+ */
1878
+ sort_order?: string;
1879
+ };
1880
+ url: '/bodhi/v1/access-requests/pending';
1881
+ };
1882
+
1883
+ export type ListPendingAccessRequestsErrors = {
1884
+ /**
1885
+ * Invalid request parameters
1886
+ */
1887
+ 400: OpenAiApiError;
1888
+ /**
1889
+ * Not authenticated
1890
+ */
1891
+ 401: OpenAiApiError;
1892
+ /**
1893
+ * Insufficient permissions
1894
+ */
1895
+ 403: OpenAiApiError;
1896
+ /**
1897
+ * Internal server error
1898
+ */
1899
+ 500: OpenAiApiError;
1900
+ };
1901
+
1902
+ export type ListPendingAccessRequestsError = ListPendingAccessRequestsErrors[keyof ListPendingAccessRequestsErrors];
1903
+
1904
+ export type ListPendingAccessRequestsResponses = {
1905
+ /**
1906
+ * Pending requests retrieved
1907
+ */
1908
+ 200: PaginatedUserAccessResponse;
1909
+ };
1910
+
1911
+ export type ListPendingAccessRequestsResponse = ListPendingAccessRequestsResponses[keyof ListPendingAccessRequestsResponses];
1912
+
1913
+ export type ApproveAccessRequestData = {
1914
+ /**
1915
+ * Role to assign to the user
1916
+ */
1917
+ body: ApproveUserAccessRequest;
1918
+ path: {
1919
+ /**
1920
+ * Access request ID
1921
+ */
1922
+ id: number;
1923
+ };
1924
+ query?: never;
1925
+ url: '/bodhi/v1/access-requests/{id}/approve';
1926
+ };
1927
+
1928
+ export type ApproveAccessRequestErrors = {
1929
+ /**
1930
+ * Invalid request parameters
1931
+ */
1932
+ 400: OpenAiApiError;
1933
+ /**
1934
+ * Not authenticated
1935
+ */
1936
+ 401: OpenAiApiError;
1937
+ /**
1938
+ * Insufficient permissions
1939
+ */
1940
+ 403: OpenAiApiError;
1941
+ /**
1942
+ * Request not found
1943
+ */
1944
+ 404: OpenAiApiError;
1945
+ /**
1946
+ * Internal server error
1947
+ */
1948
+ 500: OpenAiApiError;
1949
+ };
1950
+
1951
+ export type ApproveAccessRequestError = ApproveAccessRequestErrors[keyof ApproveAccessRequestErrors];
1952
+
1953
+ export type ApproveAccessRequestResponses = {
1954
+ /**
1955
+ * Request approved successfully
1956
+ */
1957
+ 200: unknown;
1958
+ };
1959
+
1960
+ export type RejectAccessRequestData = {
1961
+ body?: never;
1962
+ path: {
1963
+ /**
1964
+ * Access request ID
1965
+ */
1966
+ id: number;
1967
+ };
1968
+ query?: never;
1969
+ url: '/bodhi/v1/access-requests/{id}/reject';
1970
+ };
1971
+
1972
+ export type RejectAccessRequestErrors = {
1973
+ /**
1974
+ * Invalid request parameters
1975
+ */
1976
+ 400: OpenAiApiError;
1977
+ /**
1978
+ * Not authenticated
1979
+ */
1980
+ 401: OpenAiApiError;
1981
+ /**
1982
+ * Insufficient permissions
1983
+ */
1984
+ 403: OpenAiApiError;
1985
+ /**
1986
+ * Request not found
1987
+ */
1988
+ 404: OpenAiApiError;
1989
+ /**
1990
+ * Internal server error
1991
+ */
1992
+ 500: OpenAiApiError;
1993
+ };
1994
+
1995
+ export type RejectAccessRequestError = RejectAccessRequestErrors[keyof RejectAccessRequestErrors];
1996
+
1997
+ export type RejectAccessRequestResponses = {
1998
+ /**
1999
+ * Request rejected successfully
2000
+ */
2001
+ 200: unknown;
2002
+ };
2003
+
2004
+ export type ListApiModelsData = {
2005
+ body?: never;
2006
+ path?: never;
2007
+ query?: {
2008
+ /**
2009
+ * Page number (1-based indexing)
2010
+ */
2011
+ page?: number;
2012
+ /**
2013
+ * Number of items to return per page (maximum 100)
2014
+ */
2015
+ page_size?: number;
2016
+ /**
2017
+ * Field to sort by. Common values: repo, filename, size, updated_at, snapshot, created_at
2018
+ */
2019
+ sort?: string;
2020
+ /**
2021
+ * Sort order: 'asc' for ascending, 'desc' for descending
2022
+ */
2023
+ sort_order?: string;
2024
+ };
2025
+ url: '/bodhi/v1/api-models';
2026
+ };
2027
+
2028
+ export type ListApiModelsErrors = {
2029
+ /**
2030
+ * Invalid request parameters
2031
+ */
2032
+ 400: OpenAiApiError;
2033
+ /**
2034
+ * Not authenticated
2035
+ */
2036
+ 401: OpenAiApiError;
2037
+ /**
2038
+ * Insufficient permissions
2039
+ */
2040
+ 403: OpenAiApiError;
2041
+ /**
2042
+ * Internal server error
2043
+ */
2044
+ 500: OpenAiApiError;
2045
+ };
2046
+
2047
+ export type ListApiModelsError = ListApiModelsErrors[keyof ListApiModelsErrors];
2048
+
2049
+ export type ListApiModelsResponses = {
2050
+ /**
2051
+ * API model configurations retrieved successfully
2052
+ */
2053
+ 200: PaginatedApiModelResponse;
2054
+ };
2055
+
2056
+ export type ListApiModelsResponse = ListApiModelsResponses[keyof ListApiModelsResponses];
2057
+
2058
+ export type CreateApiModelData = {
2059
+ body: CreateApiModelRequest;
2060
+ path?: never;
2061
+ query?: never;
2062
+ url: '/bodhi/v1/api-models';
2063
+ };
2064
+
2065
+ export type CreateApiModelErrors = {
2066
+ /**
2067
+ * Invalid request parameters
2068
+ */
2069
+ 400: OpenAiApiError;
2070
+ /**
2071
+ * Not authenticated
2072
+ */
2073
+ 401: OpenAiApiError;
2074
+ /**
2075
+ * Insufficient permissions
2076
+ */
2077
+ 403: OpenAiApiError;
2078
+ /**
2079
+ * Alias already exists
2080
+ */
2081
+ 409: OpenAiApiError;
2082
+ /**
2083
+ * Internal server error
2084
+ */
2085
+ 500: OpenAiApiError;
2086
+ };
2087
+
2088
+ export type CreateApiModelError = CreateApiModelErrors[keyof CreateApiModelErrors];
2089
+
2090
+ export type CreateApiModelResponses = {
2091
+ /**
2092
+ * API model created
2093
+ */
2094
+ 201: ApiModelResponse;
2095
+ };
2096
+
2097
+ export type CreateApiModelResponse = CreateApiModelResponses[keyof CreateApiModelResponses];
2098
+
2099
+ export type GetApiFormatsData = {
2100
+ body?: never;
2101
+ path?: never;
2102
+ query?: never;
2103
+ url: '/bodhi/v1/api-models/api-formats';
2104
+ };
2105
+
2106
+ export type GetApiFormatsErrors = {
2107
+ /**
2108
+ * Invalid request parameters
2109
+ */
2110
+ 400: OpenAiApiError;
2111
+ /**
2112
+ * Not authenticated
2113
+ */
2114
+ 401: OpenAiApiError;
2115
+ /**
2116
+ * Insufficient permissions
2117
+ */
2118
+ 403: OpenAiApiError;
2119
+ /**
2120
+ * Internal server error
2121
+ */
2122
+ 500: OpenAiApiError;
2123
+ };
2124
+
2125
+ export type GetApiFormatsError = GetApiFormatsErrors[keyof GetApiFormatsErrors];
2126
+
2127
+ export type GetApiFormatsResponses = {
2128
+ /**
2129
+ * API formats retrieved successfully
2130
+ */
2131
+ 200: ApiFormatsResponse;
2132
+ };
2133
+
2134
+ export type GetApiFormatsResponse = GetApiFormatsResponses[keyof GetApiFormatsResponses];
2135
+
2136
+ export type FetchApiModelsData = {
2137
+ body: FetchModelsRequest;
2138
+ path?: never;
2139
+ query?: never;
2140
+ url: '/bodhi/v1/api-models/fetch-models';
2141
+ };
2142
+
2143
+ export type FetchApiModelsErrors = {
2144
+ /**
2145
+ * Invalid request parameters
2146
+ */
2147
+ 400: OpenAiApiError;
2148
+ /**
2149
+ * Not authenticated
2150
+ */
2151
+ 401: OpenAiApiError;
2152
+ /**
2153
+ * Insufficient permissions
2154
+ */
2155
+ 403: OpenAiApiError;
2156
+ /**
2157
+ * Internal server error
2158
+ */
2159
+ 500: OpenAiApiError;
2160
+ };
2161
+
2162
+ export type FetchApiModelsError = FetchApiModelsErrors[keyof FetchApiModelsErrors];
2163
+
2164
+ export type FetchApiModelsResponses = {
2165
+ /**
2166
+ * Available models
2167
+ */
2168
+ 200: FetchModelsResponse;
2169
+ };
2170
+
2171
+ export type FetchApiModelsResponse = FetchApiModelsResponses[keyof FetchApiModelsResponses];
2172
+
2173
+ export type TestApiModelData = {
2174
+ body: TestPromptRequest;
2175
+ path?: never;
2176
+ query?: never;
2177
+ url: '/bodhi/v1/api-models/test';
2178
+ };
2179
+
2180
+ export type TestApiModelErrors = {
2181
+ /**
2182
+ * Invalid request parameters
2183
+ */
2184
+ 400: OpenAiApiError;
2185
+ /**
2186
+ * Not authenticated
2187
+ */
2188
+ 401: OpenAiApiError;
2189
+ /**
2190
+ * Insufficient permissions
2191
+ */
2192
+ 403: OpenAiApiError;
2193
+ /**
2194
+ * Internal server error
2195
+ */
2196
+ 500: OpenAiApiError;
2197
+ };
2198
+
2199
+ export type TestApiModelError = TestApiModelErrors[keyof TestApiModelErrors];
2200
+
2201
+ export type TestApiModelResponses = {
2202
+ /**
2203
+ * Test result
2204
+ */
2205
+ 200: TestPromptResponse;
2206
+ };
2207
+
2208
+ export type TestApiModelResponse = TestApiModelResponses[keyof TestApiModelResponses];
2209
+
2210
+ export type DeleteApiModelData = {
2211
+ body?: never;
2212
+ path: {
2213
+ /**
2214
+ * API model ID
2215
+ */
2216
+ id: string;
2217
+ };
2218
+ query?: never;
2219
+ url: '/bodhi/v1/api-models/{id}';
2220
+ };
2221
+
2222
+ export type DeleteApiModelErrors = {
2223
+ /**
2224
+ * Invalid request parameters
2225
+ */
2226
+ 400: OpenAiApiError;
2227
+ /**
2228
+ * Not authenticated
2229
+ */
2230
+ 401: OpenAiApiError;
2231
+ /**
2232
+ * Insufficient permissions
2233
+ */
2234
+ 403: OpenAiApiError;
2235
+ /**
2236
+ * API model not found
2237
+ */
2238
+ 404: OpenAiApiError;
2239
+ /**
2240
+ * Internal server error
2241
+ */
2242
+ 500: OpenAiApiError;
2243
+ };
2244
+
2245
+ export type DeleteApiModelError = DeleteApiModelErrors[keyof DeleteApiModelErrors];
2246
+
2247
+ export type DeleteApiModelResponses = {
2248
+ /**
2249
+ * API model deleted
2250
+ */
2251
+ 204: void;
2252
+ };
2253
+
2254
+ export type DeleteApiModelResponse = DeleteApiModelResponses[keyof DeleteApiModelResponses];
2255
+
2256
+ export type GetApiModelData = {
2257
+ body?: never;
2258
+ path: {
2259
+ /**
2260
+ * Unique identifier for the API model alias
2261
+ */
2262
+ id: string;
2263
+ };
2264
+ query?: never;
2265
+ url: '/bodhi/v1/api-models/{id}';
2266
+ };
2267
+
2268
+ export type GetApiModelErrors = {
2269
+ /**
2270
+ * Invalid request parameters
2271
+ */
2272
+ 400: OpenAiApiError;
2273
+ /**
2274
+ * Not authenticated
2275
+ */
2276
+ 401: OpenAiApiError;
2277
+ /**
2278
+ * Insufficient permissions
2279
+ */
2280
+ 403: OpenAiApiError;
2281
+ /**
2282
+ * API model with specified ID not found
2283
+ */
2284
+ 404: OpenAiApiError;
2285
+ /**
2286
+ * Internal server error
2287
+ */
2288
+ 500: OpenAiApiError;
2289
+ };
2290
+
2291
+ export type GetApiModelError = GetApiModelErrors[keyof GetApiModelErrors];
2292
+
2293
+ export type GetApiModelResponses = {
2294
+ /**
2295
+ * API model configuration retrieved successfully
2296
+ */
2297
+ 200: ApiModelResponse;
2298
+ };
2299
+
2300
+ export type GetApiModelResponse = GetApiModelResponses[keyof GetApiModelResponses];
2301
+
2302
+ export type UpdateApiModelData = {
2303
+ body: UpdateApiModelRequest;
2304
+ path: {
2305
+ /**
2306
+ * API model ID
2307
+ */
2308
+ id: string;
2309
+ };
2310
+ query?: never;
2311
+ url: '/bodhi/v1/api-models/{id}';
2312
+ };
2313
+
2314
+ export type UpdateApiModelErrors = {
2315
+ /**
2316
+ * Invalid request parameters
2317
+ */
2318
+ 400: OpenAiApiError;
2319
+ /**
2320
+ * Not authenticated
2321
+ */
2322
+ 401: OpenAiApiError;
2323
+ /**
2324
+ * Insufficient permissions
2325
+ */
2326
+ 403: OpenAiApiError;
2327
+ /**
2328
+ * API model not found
2329
+ */
2330
+ 404: OpenAiApiError;
2331
+ /**
2332
+ * Internal server error
2333
+ */
2334
+ 500: OpenAiApiError;
2335
+ };
2336
+
2337
+ export type UpdateApiModelError = UpdateApiModelErrors[keyof UpdateApiModelErrors];
2338
+
2339
+ export type UpdateApiModelResponses = {
2340
+ /**
2341
+ * API model updated
2342
+ */
2343
+ 200: ApiModelResponse;
2344
+ };
2345
+
2346
+ export type UpdateApiModelResponse = UpdateApiModelResponses[keyof UpdateApiModelResponses];
2347
+
2348
+ export type RequestAccessData = {
2349
+ /**
2350
+ * Application client requesting access
2351
+ */
2352
+ body: AppAccessRequest;
2353
+ path?: never;
2354
+ query?: never;
2355
+ url: '/bodhi/v1/apps/request-access';
2356
+ };
2357
+
2358
+ export type RequestAccessErrors = {
2359
+ /**
2360
+ * Invalid request parameters
2361
+ */
2362
+ 400: OpenAiApiError;
2363
+ /**
2364
+ * Not authenticated
2365
+ */
2366
+ 401: OpenAiApiError;
2367
+ /**
2368
+ * Insufficient permissions
2369
+ */
2370
+ 403: OpenAiApiError;
2371
+ /**
2372
+ * Internal server error
2373
+ */
2374
+ 500: OpenAiApiError;
2375
+ };
2376
+
2377
+ export type RequestAccessError = RequestAccessErrors[keyof RequestAccessErrors];
2378
+
2379
+ export type RequestAccessResponses = {
2380
+ /**
2381
+ * Access granted successfully
2382
+ */
2383
+ 200: AppAccessResponse;
2384
+ };
2385
+
2386
+ export type RequestAccessResponse = RequestAccessResponses[keyof RequestAccessResponses];
2387
+
2388
+ export type CompleteOAuthFlowData = {
2389
+ /**
2390
+ * OAuth callback parameters from authorization server
2391
+ */
2392
+ body: AuthCallbackRequest;
2393
+ path?: never;
2394
+ query?: never;
2395
+ url: '/bodhi/v1/auth/callback';
2396
+ };
2397
+
2398
+ export type CompleteOAuthFlowErrors = {
2399
+ /**
2400
+ * Invalid request parameters
2401
+ */
2402
+ 400: OpenAiApiError;
2403
+ /**
2404
+ * Not authenticated
2405
+ */
2406
+ 401: OpenAiApiError;
2407
+ /**
2408
+ * Insufficient permissions
2409
+ */
2410
+ 403: OpenAiApiError;
2411
+ /**
2412
+ * OAuth error, invalid request parameters, or state mismatch
2413
+ */
2414
+ 422: OpenAiApiError;
2415
+ /**
2416
+ * Internal server error
2417
+ */
2418
+ 500: OpenAiApiError;
2419
+ };
2420
+
2421
+ export type CompleteOAuthFlowError = CompleteOAuthFlowErrors[keyof CompleteOAuthFlowErrors];
2422
+
2423
+ export type CompleteOAuthFlowResponses = {
2424
+ /**
2425
+ * OAuth flow completed successfully, user authenticated
2426
+ */
2427
+ 200: RedirectResponse;
2428
+ };
2429
+
2430
+ export type CompleteOAuthFlowResponse = CompleteOAuthFlowResponses[keyof CompleteOAuthFlowResponses];
2431
+
2432
+ export type InitiateOAuthFlowData = {
2433
+ body: unknown;
2434
+ path?: never;
2435
+ query?: never;
2436
+ url: '/bodhi/v1/auth/initiate';
2437
+ };
2438
+
2439
+ export type InitiateOAuthFlowErrors = {
2440
+ /**
2441
+ * Invalid request parameters
2442
+ */
2443
+ 400: OpenAiApiError;
2444
+ /**
2445
+ * Not authenticated
2446
+ */
2447
+ 401: OpenAiApiError;
2448
+ /**
2449
+ * Insufficient permissions
2450
+ */
2451
+ 403: OpenAiApiError;
2452
+ /**
2453
+ * Internal server error
2454
+ */
2455
+ 500: OpenAiApiError;
2456
+ };
2457
+
2458
+ export type InitiateOAuthFlowError = InitiateOAuthFlowErrors[keyof InitiateOAuthFlowErrors];
2459
+
2460
+ export type InitiateOAuthFlowResponses = {
2461
+ /**
2462
+ * User already authenticated, home page URL provided
2463
+ */
2464
+ 200: RedirectResponse;
2465
+ /**
2466
+ * User not authenticated, OAuth authorization URL provided
2467
+ */
2468
+ 201: RedirectResponse;
2469
+ };
2470
+
2471
+ export type InitiateOAuthFlowResponse = InitiateOAuthFlowResponses[keyof InitiateOAuthFlowResponses];
2472
+
2473
+ export type GetAppInfoData = {
2474
+ body?: never;
2475
+ path?: never;
2476
+ query?: never;
2477
+ url: '/bodhi/v1/info';
2478
+ };
2479
+
2480
+ export type GetAppInfoErrors = {
2481
+ /**
2482
+ * Invalid request parameters
2483
+ */
2484
+ 400: OpenAiApiError;
2485
+ /**
2486
+ * Internal server error
2487
+ */
2488
+ 500: OpenAiApiError;
2489
+ };
2490
+
2491
+ export type GetAppInfoError = GetAppInfoErrors[keyof GetAppInfoErrors];
2492
+
2493
+ export type GetAppInfoResponses = {
2494
+ /**
2495
+ * Application information retrieved successfully
2496
+ */
2497
+ 200: AppInfo;
2498
+ };
2499
+
2500
+ export type GetAppInfoResponse = GetAppInfoResponses[keyof GetAppInfoResponses];
2501
+
2502
+ export type LogoutUserData = {
2503
+ body?: never;
2504
+ path?: never;
2505
+ query?: never;
2506
+ url: '/bodhi/v1/logout';
2507
+ };
2508
+
2509
+ export type LogoutUserErrors = {
2510
+ /**
2511
+ * Invalid request parameters
2512
+ */
2513
+ 400: OpenAiApiError;
2514
+ /**
2515
+ * Not authenticated
2516
+ */
2517
+ 401: OpenAiApiError;
2518
+ /**
2519
+ * Insufficient permissions
2520
+ */
2521
+ 403: OpenAiApiError;
2522
+ /**
2523
+ * Internal server error
2524
+ */
2525
+ 500: OpenAiApiError;
2526
+ };
2527
+
2528
+ export type LogoutUserError = LogoutUserErrors[keyof LogoutUserErrors];
2529
+
2530
+ export type LogoutUserResponses = {
2531
+ /**
2532
+ * User logged out successfully
2533
+ */
2534
+ 200: RedirectResponse;
2535
+ };
2536
+
2537
+ export type LogoutUserResponse = LogoutUserResponses[keyof LogoutUserResponses];
2538
+
2539
+ export type ListModelFilesData = {
2540
+ body?: never;
2541
+ path?: never;
2542
+ query?: {
2543
+ /**
2544
+ * Page number (1-based indexing)
2545
+ */
2546
+ page?: number;
2547
+ /**
2548
+ * Number of items to return per page (maximum 100)
2549
+ */
2550
+ page_size?: number;
2551
+ /**
2552
+ * Field to sort by. Common values: repo, filename, size, updated_at, snapshot, created_at
2553
+ */
2554
+ sort?: string;
2555
+ /**
2556
+ * Sort order: 'asc' for ascending, 'desc' for descending
2557
+ */
2558
+ sort_order?: string;
2559
+ };
2560
+ url: '/bodhi/v1/modelfiles';
2561
+ };
2562
+
2563
+ export type ListModelFilesErrors = {
2564
+ /**
2565
+ * Invalid request parameters
2566
+ */
2567
+ 400: OpenAiApiError;
404
2568
  /**
405
- * User's email address
2569
+ * Not authenticated
406
2570
  */
407
- email?: string | null;
2571
+ 401: OpenAiApiError;
408
2572
  /**
409
- * If user is logged in
2573
+ * Insufficient permissions
410
2574
  */
411
- logged_in: boolean;
2575
+ 403: OpenAiApiError;
412
2576
  /**
413
- * List of roles assigned to the user
2577
+ * Internal server error
414
2578
  */
415
- roles: Array<string>;
2579
+ 500: OpenAiApiError;
416
2580
  };
417
2581
 
418
- export type ChatOllamaModelData = {
2582
+ export type ListModelFilesError = ListModelFilesErrors[keyof ListModelFilesErrors];
2583
+
2584
+ export type ListModelFilesResponses = {
419
2585
  /**
420
- * Chat request in Ollama format
2586
+ * Local model files retrieved successfully from cache
421
2587
  */
422
- body: ChatRequest;
2588
+ 200: PaginatedLocalModelResponse;
2589
+ };
2590
+
2591
+ export type ListModelFilesResponse = ListModelFilesResponses[keyof ListModelFilesResponses];
2592
+
2593
+ export type ListDownloadsData = {
2594
+ body?: never;
423
2595
  path?: never;
424
- query?: never;
425
- url: '/api/chat';
2596
+ query?: {
2597
+ /**
2598
+ * Page number (1-based indexing)
2599
+ */
2600
+ page?: number;
2601
+ /**
2602
+ * Number of items to return per page (maximum 100)
2603
+ */
2604
+ page_size?: number;
2605
+ /**
2606
+ * Field to sort by. Common values: repo, filename, size, updated_at, snapshot, created_at
2607
+ */
2608
+ sort?: string;
2609
+ /**
2610
+ * Sort order: 'asc' for ascending, 'desc' for descending
2611
+ */
2612
+ sort_order?: string;
2613
+ };
2614
+ url: '/bodhi/v1/modelfiles/pull';
426
2615
  };
427
2616
 
428
- export type ChatOllamaModelErrors = {
2617
+ export type ListDownloadsErrors = {
429
2618
  /**
430
- * Invalid request
2619
+ * Invalid request parameters
431
2620
  */
432
- 400: OllamaError;
2621
+ 400: OpenAiApiError;
433
2622
  /**
434
- * Model not found
2623
+ * Not authenticated
435
2624
  */
436
- 404: OllamaError;
2625
+ 401: OpenAiApiError;
2626
+ /**
2627
+ * Insufficient permissions
2628
+ */
2629
+ 403: OpenAiApiError;
437
2630
  /**
438
2631
  * Internal server error
439
2632
  */
440
- 500: OllamaError;
2633
+ 500: OpenAiApiError;
441
2634
  };
442
2635
 
443
- export type ChatOllamaModelError = ChatOllamaModelErrors[keyof ChatOllamaModelErrors];
2636
+ export type ListDownloadsError = ListDownloadsErrors[keyof ListDownloadsErrors];
444
2637
 
445
- export type ChatOllamaModelResponses = {
2638
+ export type ListDownloadsResponses = {
446
2639
  /**
447
- * Chat response
2640
+ * Model download requests retrieved successfully
448
2641
  */
449
- 200: unknown;
2642
+ 200: PaginatedDownloadResponse;
450
2643
  };
451
2644
 
452
- export type ShowOllamaModelData = {
2645
+ export type ListDownloadsResponse = ListDownloadsResponses[keyof ListDownloadsResponses];
2646
+
2647
+ export type PullModelFileData = {
453
2648
  /**
454
- * Model name to get details for
2649
+ * Model file download specification with repository and filename
455
2650
  */
456
- body: ShowRequest;
2651
+ body: NewDownloadRequest;
457
2652
  path?: never;
458
2653
  query?: never;
459
- url: '/api/show';
2654
+ url: '/bodhi/v1/modelfiles/pull';
460
2655
  };
461
2656
 
462
- export type ShowOllamaModelErrors = {
2657
+ export type PullModelFileErrors = {
463
2658
  /**
464
- * Model not found
2659
+ * Invalid request parameters
465
2660
  */
466
- 404: OllamaError;
2661
+ 400: OpenAiApiError;
2662
+ /**
2663
+ * Not authenticated
2664
+ */
2665
+ 401: OpenAiApiError;
2666
+ /**
2667
+ * Insufficient permissions
2668
+ */
2669
+ 403: OpenAiApiError;
467
2670
  /**
468
2671
  * Internal server error
469
2672
  */
470
- 500: OllamaError;
2673
+ 500: OpenAiApiError;
471
2674
  };
472
2675
 
473
- export type ShowOllamaModelError = ShowOllamaModelErrors[keyof ShowOllamaModelErrors];
2676
+ export type PullModelFileError = PullModelFileErrors[keyof PullModelFileErrors];
474
2677
 
475
- export type ShowOllamaModelResponses = {
2678
+ export type PullModelFileResponses = {
476
2679
  /**
477
- * Model details
2680
+ * Existing download request found
478
2681
  */
479
- 200: ShowResponse;
2682
+ 200: DownloadRequest;
2683
+ /**
2684
+ * Download request created
2685
+ */
2686
+ 201: DownloadRequest;
480
2687
  };
481
2688
 
482
- export type ShowOllamaModelResponse = ShowOllamaModelResponses[keyof ShowOllamaModelResponses];
2689
+ export type PullModelFileResponse = PullModelFileResponses[keyof PullModelFileResponses];
483
2690
 
484
- export type ListOllamaModelsData = {
2691
+ export type PullModelByAliasData = {
485
2692
  body?: never;
486
- path?: never;
2693
+ path: {
2694
+ /**
2695
+ * Predefined model alias. Available aliases include popular models like llama2:chat, mistral:instruct, phi3:mini, etc. Use the /models endpoint to see all available aliases.
2696
+ */
2697
+ alias: string;
2698
+ };
487
2699
  query?: never;
488
- url: '/api/tags';
2700
+ url: '/bodhi/v1/modelfiles/pull/{alias}';
489
2701
  };
490
2702
 
491
- export type ListOllamaModelsErrors = {
2703
+ export type PullModelByAliasErrors = {
492
2704
  /**
493
- * Internal server error
2705
+ * Invalid request parameters
494
2706
  */
495
- 500: OllamaError;
496
- };
497
-
498
- export type ListOllamaModelsError = ListOllamaModelsErrors[keyof ListOllamaModelsErrors];
499
-
500
- export type ListOllamaModelsResponses = {
2707
+ 400: OpenAiApiError;
501
2708
  /**
502
- * List of available models
2709
+ * Not authenticated
503
2710
  */
504
- 200: ModelsResponse;
505
- };
506
-
507
- export type ListOllamaModelsResponse = ListOllamaModelsResponses[keyof ListOllamaModelsResponses];
508
-
509
- export type CompleteOAuthFlowData = {
510
- body: AuthCallbackRequest;
511
- path?: never;
512
- query?: never;
513
- url: '/bodhi/v1/auth/callback';
514
- };
515
-
516
- export type CompleteOAuthFlowErrors = {
2711
+ 401: OpenAiApiError;
517
2712
  /**
518
- * OAuth error or invalid request
2713
+ * Insufficient permissions
519
2714
  */
520
- 422: OpenAiApiError;
2715
+ 403: OpenAiApiError;
2716
+ /**
2717
+ * Alias not found
2718
+ */
2719
+ 404: OpenAiApiError;
521
2720
  /**
522
2721
  * Internal server error
523
2722
  */
524
2723
  500: OpenAiApiError;
525
2724
  };
526
2725
 
527
- export type CompleteOAuthFlowError = CompleteOAuthFlowErrors[keyof CompleteOAuthFlowErrors];
2726
+ export type PullModelByAliasError = PullModelByAliasErrors[keyof PullModelByAliasErrors];
528
2727
 
529
- export type CompleteOAuthFlowResponses = {
2728
+ export type PullModelByAliasResponses = {
2729
+ /**
2730
+ * Existing download request found
2731
+ */
2732
+ 200: DownloadRequest;
530
2733
  /**
531
- * OAuth flow completed successfully, return redirect URL
2734
+ * Download request created
532
2735
  */
533
- 200: RedirectResponse;
2736
+ 201: DownloadRequest;
534
2737
  };
535
2738
 
536
- export type CompleteOAuthFlowResponse = CompleteOAuthFlowResponses[keyof CompleteOAuthFlowResponses];
2739
+ export type PullModelByAliasResponse = PullModelByAliasResponses[keyof PullModelByAliasResponses];
537
2740
 
538
- export type InitiateOAuthFlowData = {
539
- body: unknown;
540
- path?: never;
2741
+ export type GetDownloadStatusData = {
2742
+ body?: never;
2743
+ path: {
2744
+ /**
2745
+ * Unique identifier of the download request (UUID format)
2746
+ */
2747
+ id: string;
2748
+ };
541
2749
  query?: never;
542
- url: '/bodhi/v1/auth/initiate';
2750
+ url: '/bodhi/v1/modelfiles/pull/{id}';
543
2751
  };
544
2752
 
545
- export type InitiateOAuthFlowErrors = {
2753
+ export type GetDownloadStatusErrors = {
2754
+ /**
2755
+ * Invalid request parameters
2756
+ */
2757
+ 400: OpenAiApiError;
2758
+ /**
2759
+ * Not authenticated
2760
+ */
2761
+ 401: OpenAiApiError;
2762
+ /**
2763
+ * Insufficient permissions
2764
+ */
2765
+ 403: OpenAiApiError;
2766
+ /**
2767
+ * Download request not found
2768
+ */
2769
+ 404: OpenAiApiError;
546
2770
  /**
547
2771
  * Internal server error
548
2772
  */
549
2773
  500: OpenAiApiError;
550
2774
  };
551
2775
 
552
- export type InitiateOAuthFlowError = InitiateOAuthFlowErrors[keyof InitiateOAuthFlowErrors];
2776
+ export type GetDownloadStatusError = GetDownloadStatusErrors[keyof GetDownloadStatusErrors];
553
2777
 
554
- export type InitiateOAuthFlowResponses = {
555
- /**
556
- * User already authenticated, return home page URL
557
- */
558
- 200: RedirectResponse;
2778
+ export type GetDownloadStatusResponses = {
559
2779
  /**
560
- * User not authenticated, return OAuth authorization URL
2780
+ * Download request found
561
2781
  */
562
- 201: RedirectResponse;
2782
+ 200: DownloadRequest;
563
2783
  };
564
2784
 
565
- export type InitiateOAuthFlowResponse = InitiateOAuthFlowResponses[keyof InitiateOAuthFlowResponses];
2785
+ export type GetDownloadStatusResponse = GetDownloadStatusResponses[keyof GetDownloadStatusResponses];
566
2786
 
567
- export type GetAppInfoData = {
2787
+ export type ListAllModelsData = {
568
2788
  body?: never;
569
2789
  path?: never;
570
- query?: never;
571
- url: '/bodhi/v1/info';
2790
+ query?: {
2791
+ /**
2792
+ * Page number (1-based indexing)
2793
+ */
2794
+ page?: number;
2795
+ /**
2796
+ * Number of items to return per page (maximum 100)
2797
+ */
2798
+ page_size?: number;
2799
+ /**
2800
+ * Field to sort by. Common values: repo, filename, size, updated_at, snapshot, created_at
2801
+ */
2802
+ sort?: string;
2803
+ /**
2804
+ * Sort order: 'asc' for ascending, 'desc' for descending
2805
+ */
2806
+ sort_order?: string;
2807
+ };
2808
+ url: '/bodhi/v1/models';
572
2809
  };
573
2810
 
574
- export type GetAppInfoErrors = {
2811
+ export type ListAllModelsErrors = {
2812
+ /**
2813
+ * Invalid request parameters
2814
+ */
2815
+ 400: OpenAiApiError;
2816
+ /**
2817
+ * Not authenticated
2818
+ */
2819
+ 401: OpenAiApiError;
2820
+ /**
2821
+ * Insufficient permissions
2822
+ */
2823
+ 403: OpenAiApiError;
575
2824
  /**
576
2825
  * Internal server error
577
2826
  */
578
2827
  500: OpenAiApiError;
579
2828
  };
580
2829
 
581
- export type GetAppInfoError = GetAppInfoErrors[keyof GetAppInfoErrors];
2830
+ export type ListAllModelsError = ListAllModelsErrors[keyof ListAllModelsErrors];
582
2831
 
583
- export type GetAppInfoResponses = {
2832
+ export type ListAllModelsResponses = {
584
2833
  /**
585
- * Returns the status information about the Application
2834
+ * Paginated list of model aliases retrieved successfully
586
2835
  */
587
- 200: AppInfo;
2836
+ 200: PaginatedAliasResponse;
588
2837
  };
589
2838
 
590
- export type GetAppInfoResponse = GetAppInfoResponses[keyof GetAppInfoResponses];
2839
+ export type ListAllModelsResponse = ListAllModelsResponses[keyof ListAllModelsResponses];
591
2840
 
592
- export type LogoutUserData = {
593
- body?: never;
2841
+ export type CreateAliasData = {
2842
+ body: CreateAliasRequest;
594
2843
  path?: never;
595
2844
  query?: never;
596
- url: '/bodhi/v1/logout';
2845
+ url: '/bodhi/v1/models';
597
2846
  };
598
2847
 
599
- export type LogoutUserErrors = {
2848
+ export type CreateAliasErrors = {
2849
+ /**
2850
+ * Invalid request parameters
2851
+ */
2852
+ 400: OpenAiApiError;
2853
+ /**
2854
+ * Not authenticated
2855
+ */
2856
+ 401: OpenAiApiError;
2857
+ /**
2858
+ * Insufficient permissions
2859
+ */
2860
+ 403: OpenAiApiError;
600
2861
  /**
601
- * Session deletion failed
2862
+ * Internal server error
602
2863
  */
603
2864
  500: OpenAiApiError;
604
2865
  };
605
2866
 
606
- export type LogoutUserError = LogoutUserErrors[keyof LogoutUserErrors];
2867
+ export type CreateAliasError = CreateAliasErrors[keyof CreateAliasErrors];
607
2868
 
608
- export type LogoutUserResponses = {
2869
+ export type CreateAliasResponses = {
609
2870
  /**
610
- * Logout successful, return redirect URL
2871
+ * Alias created succesfully
611
2872
  */
612
- 200: RedirectResponse;
2873
+ 201: UserAliasResponse;
613
2874
  };
614
2875
 
615
- export type LogoutUserResponse = LogoutUserResponses[keyof LogoutUserResponses];
2876
+ export type CreateAliasResponse = CreateAliasResponses[keyof CreateAliasResponses];
616
2877
 
617
- export type ListModelFilesData = {
2878
+ export type GetAliasData = {
618
2879
  body?: never;
619
- path?: never;
620
- query?: {
621
- /**
622
- * Page number (1-based)
623
- */
624
- page?: number;
625
- /**
626
- * Number of items per page (max 100)
627
- */
628
- page_size?: number;
629
- /**
630
- * Field to sort by (repo, filename, size, updated_at, snapshot)
631
- */
632
- sort?: string | null;
2880
+ path: {
633
2881
  /**
634
- * Sort order (asc or desc)
2882
+ * Alias identifier for the model
635
2883
  */
636
- sort_order?: string;
2884
+ alias: string;
637
2885
  };
638
- url: '/bodhi/v1/modelfiles';
2886
+ query?: never;
2887
+ url: '/bodhi/v1/models/{alias}';
639
2888
  };
640
2889
 
641
- export type ListModelFilesErrors = {
2890
+ export type GetAliasErrors = {
2891
+ /**
2892
+ * Invalid request parameters
2893
+ */
2894
+ 400: OpenAiApiError;
2895
+ /**
2896
+ * Not authenticated
2897
+ */
2898
+ 401: OpenAiApiError;
2899
+ /**
2900
+ * Insufficient permissions
2901
+ */
2902
+ 403: OpenAiApiError;
2903
+ /**
2904
+ * Alias not found
2905
+ */
2906
+ 404: OpenAiApiError;
642
2907
  /**
643
2908
  * Internal server error
644
2909
  */
645
2910
  500: OpenAiApiError;
646
2911
  };
647
2912
 
648
- export type ListModelFilesError = ListModelFilesErrors[keyof ListModelFilesErrors];
2913
+ export type GetAliasError = GetAliasErrors[keyof GetAliasErrors];
649
2914
 
650
- export type ListModelFilesResponses = {
2915
+ export type GetAliasResponses = {
651
2916
  /**
652
- * List of supported model files from local HuggingFace cache folder
2917
+ * Model alias details
653
2918
  */
654
- 200: PaginatedResponseLocalModelResponse;
2919
+ 200: UserAliasResponse;
655
2920
  };
656
2921
 
657
- export type ListModelFilesResponse = ListModelFilesResponses[keyof ListModelFilesResponses];
2922
+ export type GetAliasResponse = GetAliasResponses[keyof GetAliasResponses];
658
2923
 
659
- export type ListDownloadsData = {
660
- body?: never;
661
- path?: never;
662
- query?: {
663
- /**
664
- * Page number (1-based)
665
- */
666
- page?: number;
667
- /**
668
- * Number of items per page (max 100)
669
- */
670
- page_size?: number;
671
- /**
672
- * Field to sort by (repo, filename, size, updated_at, snapshot)
673
- */
674
- sort?: string | null;
2924
+ export type UpdateAliasData = {
2925
+ body: UpdateAliasRequest;
2926
+ path: {
675
2927
  /**
676
- * Sort order (asc or desc)
2928
+ * Alias identifier
677
2929
  */
678
- sort_order?: string;
2930
+ id: string;
679
2931
  };
680
- url: '/bodhi/v1/modelfiles/pull';
2932
+ query?: never;
2933
+ url: '/bodhi/v1/models/{id}';
681
2934
  };
682
2935
 
683
- export type ListDownloadsErrors = {
2936
+ export type UpdateAliasErrors = {
2937
+ /**
2938
+ * Invalid request parameters
2939
+ */
2940
+ 400: OpenAiApiError;
2941
+ /**
2942
+ * Not authenticated
2943
+ */
2944
+ 401: OpenAiApiError;
2945
+ /**
2946
+ * Insufficient permissions
2947
+ */
2948
+ 403: OpenAiApiError;
684
2949
  /**
685
2950
  * Internal server error
686
2951
  */
687
2952
  500: OpenAiApiError;
688
2953
  };
689
2954
 
690
- export type ListDownloadsError = ListDownloadsErrors[keyof ListDownloadsErrors];
2955
+ export type UpdateAliasError = UpdateAliasErrors[keyof UpdateAliasErrors];
691
2956
 
692
- export type ListDownloadsResponses = {
2957
+ export type UpdateAliasResponses = {
693
2958
  /**
694
- * List of download requests
2959
+ * Alias updated succesfully
695
2960
  */
696
- 200: PaginatedResponseDownloadRequest;
2961
+ 200: UserAliasResponse;
697
2962
  };
698
2963
 
699
- export type ListDownloadsResponse = ListDownloadsResponses[keyof ListDownloadsResponses];
2964
+ export type UpdateAliasResponse = UpdateAliasResponses[keyof UpdateAliasResponses];
700
2965
 
701
- export type PullModelFileData = {
702
- /**
703
- * Model file download request
704
- */
705
- body: NewDownloadRequest;
2966
+ export type ListSettingsData = {
2967
+ body?: never;
706
2968
  path?: never;
707
2969
  query?: never;
708
- url: '/bodhi/v1/modelfiles/pull';
2970
+ url: '/bodhi/v1/settings';
709
2971
  };
710
2972
 
711
- export type PullModelFileErrors = {
2973
+ export type ListSettingsErrors = {
712
2974
  /**
713
- * File already exists or invalid input
2975
+ * Invalid request parameters
714
2976
  */
715
2977
  400: OpenAiApiError;
2978
+ /**
2979
+ * Not authenticated
2980
+ */
2981
+ 401: OpenAiApiError;
2982
+ /**
2983
+ * Insufficient permissions
2984
+ */
2985
+ 403: OpenAiApiError;
716
2986
  /**
717
2987
  * Internal server error
718
2988
  */
719
2989
  500: OpenAiApiError;
720
2990
  };
721
2991
 
722
- export type PullModelFileError = PullModelFileErrors[keyof PullModelFileErrors];
2992
+ export type ListSettingsError = ListSettingsErrors[keyof ListSettingsErrors];
723
2993
 
724
- export type PullModelFileResponses = {
725
- /**
726
- * Existing download request found
727
- */
728
- 200: DownloadRequest;
2994
+ export type ListSettingsResponses = {
729
2995
  /**
730
- * Download request created
2996
+ * Application settings retrieved successfully
731
2997
  */
732
- 201: DownloadRequest;
2998
+ 200: Array<SettingInfo>;
733
2999
  };
734
3000
 
735
- export type PullModelFileResponse = PullModelFileResponses[keyof PullModelFileResponses];
3001
+ export type ListSettingsResponse = ListSettingsResponses[keyof ListSettingsResponses];
736
3002
 
737
- export type PullModelByAliasData = {
3003
+ export type DeleteSettingData = {
738
3004
  body?: never;
739
3005
  path: {
740
3006
  /**
741
- * Available model aliases:
742
- * - llama3:instruct - Meta Llama 3 8B Instruct
743
- * - llama3:70b-instruct - Meta Llama 3 70B Instruct
744
- * - llama2:chat - Llama 2 7B Chat
745
- * - llama2:13b-chat - Llama 2 13B Chat
746
- * - llama2:70b-chat - Llama 2 70B Chat
747
- * - phi3:mini - Phi 3 Mini
748
- * - mistral:instruct - Mistral 7B Instruct
749
- * - mixtral:instruct - Mixtral 8x7B Instruct
750
- * - gemma:instruct - Gemma 7B Instruct
751
- * - gemma:7b-instruct-v1.1-q8_0 - Gemma 1.1 7B Instruct
3007
+ * Setting key identifier to reset to default value
752
3008
  */
753
- alias: string;
3009
+ key: string;
754
3010
  };
755
3011
  query?: never;
756
- url: '/bodhi/v1/modelfiles/pull/{alias}';
3012
+ url: '/bodhi/v1/settings/{key}';
757
3013
  };
758
3014
 
759
- export type PullModelByAliasErrors = {
3015
+ export type DeleteSettingErrors = {
760
3016
  /**
761
- * File already exists
3017
+ * Invalid request parameters
762
3018
  */
763
3019
  400: OpenAiApiError;
764
3020
  /**
765
- * Alias not found
3021
+ * Not authenticated
3022
+ */
3023
+ 401: OpenAiApiError;
3024
+ /**
3025
+ * Insufficient permissions
3026
+ */
3027
+ 403: OpenAiApiError;
3028
+ /**
3029
+ * Setting not found
766
3030
  */
767
3031
  404: OpenAiApiError;
768
3032
  /**
@@ -771,36 +3035,52 @@ export type PullModelByAliasErrors = {
771
3035
  500: OpenAiApiError;
772
3036
  };
773
3037
 
774
- export type PullModelByAliasError = PullModelByAliasErrors[keyof PullModelByAliasErrors];
3038
+ export type DeleteSettingError = DeleteSettingErrors[keyof DeleteSettingErrors];
775
3039
 
776
- export type PullModelByAliasResponses = {
777
- /**
778
- * Existing download request found
779
- */
780
- 200: DownloadRequest;
3040
+ export type DeleteSettingResponses = {
781
3041
  /**
782
- * Download request created
3042
+ * Setting reset to default successfully
783
3043
  */
784
- 201: DownloadRequest;
3044
+ 200: SettingInfo;
785
3045
  };
786
3046
 
787
- export type PullModelByAliasResponse = PullModelByAliasResponses[keyof PullModelByAliasResponses];
3047
+ export type DeleteSettingResponse = DeleteSettingResponses[keyof DeleteSettingResponses];
788
3048
 
789
- export type GetDownloadStatusData = {
790
- body?: never;
3049
+ export type UpdateSettingData = {
3050
+ /**
3051
+ * Request to update a setting value
3052
+ */
3053
+ body: {
3054
+ /**
3055
+ * New value for the setting (type depends on setting metadata)
3056
+ */
3057
+ value: unknown;
3058
+ };
791
3059
  path: {
792
3060
  /**
793
- * Download request identifier
3061
+ * Setting key identifier (e.g., BODHI_LOG_LEVEL, BODHI_PORT)
794
3062
  */
795
- id: string;
3063
+ key: string;
796
3064
  };
797
3065
  query?: never;
798
- url: '/bodhi/v1/modelfiles/pull/{id}';
3066
+ url: '/bodhi/v1/settings/{key}';
799
3067
  };
800
3068
 
801
- export type GetDownloadStatusErrors = {
3069
+ export type UpdateSettingErrors = {
802
3070
  /**
803
- * Download request not found
3071
+ * Invalid request parameters
3072
+ */
3073
+ 400: OpenAiApiError;
3074
+ /**
3075
+ * Not authenticated
3076
+ */
3077
+ 401: OpenAiApiError;
3078
+ /**
3079
+ * Insufficient permissions
3080
+ */
3081
+ 403: OpenAiApiError;
3082
+ /**
3083
+ * Setting not found
804
3084
  */
805
3085
  404: OpenAiApiError;
806
3086
  /**
@@ -809,74 +3089,173 @@ export type GetDownloadStatusErrors = {
809
3089
  500: OpenAiApiError;
810
3090
  };
811
3091
 
812
- export type GetDownloadStatusError = GetDownloadStatusErrors[keyof GetDownloadStatusErrors];
3092
+ export type UpdateSettingError = UpdateSettingErrors[keyof UpdateSettingErrors];
813
3093
 
814
- export type GetDownloadStatusResponses = {
3094
+ export type UpdateSettingResponses = {
815
3095
  /**
816
- * Download request found
3096
+ * Setting updated successfully
817
3097
  */
818
- 200: DownloadRequest;
3098
+ 200: SettingInfo;
819
3099
  };
820
3100
 
821
- export type GetDownloadStatusResponse = GetDownloadStatusResponses[keyof GetDownloadStatusResponses];
3101
+ export type UpdateSettingResponse = UpdateSettingResponses[keyof UpdateSettingResponses];
3102
+
3103
+ export type SetupAppData = {
3104
+ /**
3105
+ * Application setup configuration
3106
+ */
3107
+ body: SetupRequest;
3108
+ path?: never;
3109
+ query?: never;
3110
+ url: '/bodhi/v1/setup';
3111
+ };
3112
+
3113
+ export type SetupAppErrors = {
3114
+ /**
3115
+ * Invalid request parameters
3116
+ */
3117
+ 400: OpenAiApiError;
3118
+ /**
3119
+ * Internal server error
3120
+ */
3121
+ 500: OpenAiApiError;
3122
+ };
3123
+
3124
+ export type SetupAppError = SetupAppErrors[keyof SetupAppErrors];
3125
+
3126
+ export type SetupAppResponses = {
3127
+ /**
3128
+ * Application setup completed successfully
3129
+ */
3130
+ 200: SetupResponse;
3131
+ };
3132
+
3133
+ export type SetupAppResponse = SetupAppResponses[keyof SetupAppResponses];
822
3134
 
823
- export type ListModelAliasesData = {
3135
+ export type ListApiTokensData = {
824
3136
  body?: never;
825
3137
  path?: never;
826
3138
  query?: {
827
3139
  /**
828
- * Page number (1-based)
3140
+ * Page number (1-based indexing)
829
3141
  */
830
3142
  page?: number;
831
3143
  /**
832
- * Number of items per page (max 100)
3144
+ * Number of items to return per page (maximum 100)
833
3145
  */
834
3146
  page_size?: number;
835
3147
  /**
836
- * Field to sort by (repo, filename, size, updated_at, snapshot)
3148
+ * Field to sort by. Common values: repo, filename, size, updated_at, snapshot, created_at
837
3149
  */
838
- sort?: string | null;
3150
+ sort?: string;
839
3151
  /**
840
- * Sort order (asc or desc)
3152
+ * Sort order: 'asc' for ascending, 'desc' for descending
841
3153
  */
842
3154
  sort_order?: string;
843
3155
  };
844
- url: '/bodhi/v1/models';
3156
+ url: '/bodhi/v1/tokens';
845
3157
  };
846
3158
 
847
- export type ListModelAliasesErrors = {
3159
+ export type ListApiTokensErrors = {
3160
+ /**
3161
+ * Invalid request parameters
3162
+ */
3163
+ 400: OpenAiApiError;
3164
+ /**
3165
+ * Not authenticated
3166
+ */
3167
+ 401: OpenAiApiError;
3168
+ /**
3169
+ * Insufficient permissions
3170
+ */
3171
+ 403: OpenAiApiError;
848
3172
  /**
849
3173
  * Internal server error
850
3174
  */
851
3175
  500: OpenAiApiError;
852
3176
  };
853
3177
 
854
- export type ListModelAliasesError = ListModelAliasesErrors[keyof ListModelAliasesErrors];
3178
+ export type ListApiTokensError = ListApiTokensErrors[keyof ListApiTokensErrors];
855
3179
 
856
- export type ListModelAliasesResponses = {
3180
+ export type ListApiTokensResponses = {
857
3181
  /**
858
- * List of configured model aliases
3182
+ * List of API tokens
859
3183
  */
860
- 200: PaginatedResponseAliasResponse;
3184
+ 200: PaginatedApiTokenResponse;
861
3185
  };
862
3186
 
863
- export type ListModelAliasesResponse = ListModelAliasesResponses[keyof ListModelAliasesResponses];
3187
+ export type ListApiTokensResponse = ListApiTokensResponses[keyof ListApiTokensResponses];
864
3188
 
865
- export type GetAliasData = {
866
- body?: never;
3189
+ export type CreateApiTokenData = {
3190
+ /**
3191
+ * API token creation parameters
3192
+ */
3193
+ body: CreateApiTokenRequest;
3194
+ path?: never;
3195
+ query?: never;
3196
+ url: '/bodhi/v1/tokens';
3197
+ };
3198
+
3199
+ export type CreateApiTokenErrors = {
3200
+ /**
3201
+ * Invalid request parameters
3202
+ */
3203
+ 400: OpenAiApiError;
3204
+ /**
3205
+ * Not authenticated
3206
+ */
3207
+ 401: OpenAiApiError;
3208
+ /**
3209
+ * Insufficient permissions
3210
+ */
3211
+ 403: OpenAiApiError;
3212
+ /**
3213
+ * Internal server error
3214
+ */
3215
+ 500: OpenAiApiError;
3216
+ };
3217
+
3218
+ export type CreateApiTokenError = CreateApiTokenErrors[keyof CreateApiTokenErrors];
3219
+
3220
+ export type CreateApiTokenResponses = {
3221
+ /**
3222
+ * API token created successfully
3223
+ */
3224
+ 201: ApiTokenResponse;
3225
+ };
3226
+
3227
+ export type CreateApiTokenResponse = CreateApiTokenResponses[keyof CreateApiTokenResponses];
3228
+
3229
+ export type UpdateApiTokenData = {
3230
+ /**
3231
+ * Token update request
3232
+ */
3233
+ body: UpdateApiTokenRequest;
867
3234
  path: {
868
3235
  /**
869
- * Alias identifier for the model
3236
+ * Unique identifier of the API token to update
870
3237
  */
871
- alias: string;
3238
+ id: string;
872
3239
  };
873
3240
  query?: never;
874
- url: '/bodhi/v1/models/{alias}';
3241
+ url: '/bodhi/v1/tokens/{id}';
875
3242
  };
876
3243
 
877
- export type GetAliasErrors = {
3244
+ export type UpdateApiTokenErrors = {
878
3245
  /**
879
- * Alias not found
3246
+ * Invalid request parameters
3247
+ */
3248
+ 400: OpenAiApiError;
3249
+ /**
3250
+ * Not authenticated
3251
+ */
3252
+ 401: OpenAiApiError;
3253
+ /**
3254
+ * Insufficient permissions
3255
+ */
3256
+ 403: OpenAiApiError;
3257
+ /**
3258
+ * Token not found
880
3259
  */
881
3260
  404: OpenAiApiError;
882
3261
  /**
@@ -885,145 +3264,139 @@ export type GetAliasErrors = {
885
3264
  500: OpenAiApiError;
886
3265
  };
887
3266
 
888
- export type GetAliasError = GetAliasErrors[keyof GetAliasErrors];
3267
+ export type UpdateApiTokenError = UpdateApiTokenErrors[keyof UpdateApiTokenErrors];
889
3268
 
890
- export type GetAliasResponses = {
3269
+ export type UpdateApiTokenResponses = {
891
3270
  /**
892
- * Model alias details
3271
+ * Token updated successfully
893
3272
  */
894
- 200: AliasResponse;
3273
+ 200: ApiToken;
895
3274
  };
896
3275
 
897
- export type GetAliasResponse = GetAliasResponses[keyof GetAliasResponses];
3276
+ export type UpdateApiTokenResponse = UpdateApiTokenResponses[keyof UpdateApiTokenResponses];
898
3277
 
899
- export type ListSettingsData = {
3278
+ export type GetCurrentUserData = {
900
3279
  body?: never;
901
3280
  path?: never;
902
3281
  query?: never;
903
- url: '/bodhi/v1/settings';
3282
+ url: '/bodhi/v1/user';
904
3283
  };
905
3284
 
906
- export type ListSettingsErrors = {
3285
+ export type GetCurrentUserErrors = {
3286
+ /**
3287
+ * Invalid request parameters
3288
+ */
3289
+ 400: OpenAiApiError;
907
3290
  /**
908
- * Unauthorized - User is not an admin
3291
+ * Not authenticated
909
3292
  */
910
3293
  401: OpenAiApiError;
3294
+ /**
3295
+ * Insufficient permissions
3296
+ */
3297
+ 403: OpenAiApiError;
911
3298
  /**
912
3299
  * Internal server error
913
3300
  */
914
3301
  500: OpenAiApiError;
915
3302
  };
916
3303
 
917
- export type ListSettingsError = ListSettingsErrors[keyof ListSettingsErrors];
3304
+ export type GetCurrentUserError = GetCurrentUserErrors[keyof GetCurrentUserErrors];
918
3305
 
919
- export type ListSettingsResponses = {
3306
+ export type GetCurrentUserResponses = {
920
3307
  /**
921
- * List of application settings
3308
+ * User information (authenticated or not)
922
3309
  */
923
- 200: Array<SettingInfo>;
3310
+ 200: UserResponse;
924
3311
  };
925
3312
 
926
- export type ListSettingsResponse = ListSettingsResponses[keyof ListSettingsResponses];
3313
+ export type GetCurrentUserResponse = GetCurrentUserResponses[keyof GetCurrentUserResponses];
927
3314
 
928
- export type DeleteSettingData = {
3315
+ export type RequestUserAccessData = {
929
3316
  body?: never;
930
- path: {
931
- /**
932
- * Setting key to reset
933
- */
934
- key: string;
935
- };
3317
+ path?: never;
936
3318
  query?: never;
937
- url: '/bodhi/v1/settings/{key}';
3319
+ url: '/bodhi/v1/user/request-access';
938
3320
  };
939
3321
 
940
- export type DeleteSettingErrors = {
3322
+ export type RequestUserAccessErrors = {
941
3323
  /**
942
- * Setting not found
3324
+ * Invalid request parameters
943
3325
  */
944
- 404: OpenAiApiError;
945
- };
946
-
947
- export type DeleteSettingError = DeleteSettingErrors[keyof DeleteSettingErrors];
948
-
949
- export type DeleteSettingResponses = {
3326
+ 400: OpenAiApiError;
950
3327
  /**
951
- * Setting reset to default successfully
3328
+ * Not authenticated
952
3329
  */
953
- 200: SettingInfo;
954
- };
955
-
956
- export type DeleteSettingResponse = DeleteSettingResponses[keyof DeleteSettingResponses];
957
-
958
- export type UpdateSettingData = {
3330
+ 401: OpenAiApiError;
959
3331
  /**
960
- * Request to update a setting value
3332
+ * Insufficient permissions
961
3333
  */
962
- body: {
963
- value: unknown;
964
- };
965
- path: {
966
- /**
967
- * Setting key to update
968
- */
969
- key: string;
970
- };
971
- query?: never;
972
- url: '/bodhi/v1/settings/{key}';
973
- };
974
-
975
- export type UpdateSettingErrors = {
3334
+ 403: OpenAiApiError;
976
3335
  /**
977
- * Invalid setting or value
3336
+ * Pending request already exists
978
3337
  */
979
- 400: OpenAiApiError;
3338
+ 409: OpenAiApiError;
980
3339
  /**
981
- * Setting not found
3340
+ * User already has role
982
3341
  */
983
- 404: OpenAiApiError;
3342
+ 422: OpenAiApiError;
3343
+ /**
3344
+ * Internal server error
3345
+ */
3346
+ 500: OpenAiApiError;
984
3347
  };
985
3348
 
986
- export type UpdateSettingError = UpdateSettingErrors[keyof UpdateSettingErrors];
3349
+ export type RequestUserAccessError = RequestUserAccessErrors[keyof RequestUserAccessErrors];
987
3350
 
988
- export type UpdateSettingResponses = {
3351
+ export type RequestUserAccessResponses = {
989
3352
  /**
990
- * Setting updated successfully
3353
+ * Access request created successfully
991
3354
  */
992
- 200: SettingInfo;
3355
+ 201: unknown;
993
3356
  };
994
3357
 
995
- export type UpdateSettingResponse = UpdateSettingResponses[keyof UpdateSettingResponses];
996
-
997
- export type SetupAppData = {
998
- body: SetupRequest;
3358
+ export type GetUserAccessStatusData = {
3359
+ body?: never;
999
3360
  path?: never;
1000
3361
  query?: never;
1001
- url: '/bodhi/v1/setup';
3362
+ url: '/bodhi/v1/user/request-status';
1002
3363
  };
1003
3364
 
1004
- export type SetupAppErrors = {
3365
+ export type GetUserAccessStatusErrors = {
1005
3366
  /**
1006
- * Application is already setup
3367
+ * Invalid request parameters
1007
3368
  */
1008
3369
  400: OpenAiApiError;
3370
+ /**
3371
+ * Not authenticated
3372
+ */
3373
+ 401: OpenAiApiError;
3374
+ /**
3375
+ * Insufficient permissions
3376
+ */
3377
+ 403: OpenAiApiError;
3378
+ /**
3379
+ * Request not found
3380
+ */
3381
+ 404: OpenAiApiError;
1009
3382
  /**
1010
3383
  * Internal server error
1011
3384
  */
1012
3385
  500: OpenAiApiError;
1013
3386
  };
1014
3387
 
1015
- export type SetupAppError = SetupAppErrors[keyof SetupAppErrors];
3388
+ export type GetUserAccessStatusError = GetUserAccessStatusErrors[keyof GetUserAccessStatusErrors];
1016
3389
 
1017
- export type SetupAppResponses = {
3390
+ export type GetUserAccessStatusResponses = {
1018
3391
  /**
1019
- * Application setup successful
3392
+ * Request status retrieved
1020
3393
  */
1021
- 200: SetupResponse;
3394
+ 200: UserAccessStatusResponse;
1022
3395
  };
1023
3396
 
1024
- export type SetupAppResponse = SetupAppResponses[keyof SetupAppResponses];
3397
+ export type GetUserAccessStatusResponse = GetUserAccessStatusResponses[keyof GetUserAccessStatusResponses];
1025
3398
 
1026
- export type ListApiTokensData = {
3399
+ export type ListUsersData = {
1027
3400
  body?: never;
1028
3401
  path?: never;
1029
3402
  query?: {
@@ -1032,94 +3405,114 @@ export type ListApiTokensData = {
1032
3405
  */
1033
3406
  page?: number;
1034
3407
  /**
1035
- * Number of items per page (max 100)
3408
+ * Number of users per page
1036
3409
  */
1037
3410
  page_size?: number;
1038
- /**
1039
- * Field to sort by (repo, filename, size, updated_at, snapshot)
1040
- */
1041
- sort?: string | null;
1042
- /**
1043
- * Sort order (asc or desc)
1044
- */
1045
- sort_order?: string;
1046
3411
  };
1047
- url: '/bodhi/v1/tokens';
3412
+ url: '/bodhi/v1/users';
1048
3413
  };
1049
3414
 
1050
- export type ListApiTokensErrors = {
3415
+ export type ListUsersErrors = {
3416
+ /**
3417
+ * Invalid request parameters
3418
+ */
3419
+ 400: OpenAiApiError;
1051
3420
  /**
1052
- * Unauthorized - Token missing or invalid
3421
+ * Not authenticated
1053
3422
  */
1054
3423
  401: OpenAiApiError;
3424
+ /**
3425
+ * Insufficient permissions
3426
+ */
3427
+ 403: OpenAiApiError;
1055
3428
  /**
1056
3429
  * Internal server error
1057
3430
  */
1058
3431
  500: OpenAiApiError;
1059
3432
  };
1060
3433
 
1061
- export type ListApiTokensError = ListApiTokensErrors[keyof ListApiTokensErrors];
3434
+ export type ListUsersError = ListUsersErrors[keyof ListUsersErrors];
1062
3435
 
1063
- export type ListApiTokensResponses = {
3436
+ export type ListUsersResponses = {
1064
3437
  /**
1065
- * List of API tokens
3438
+ * Users retrieved successfully
1066
3439
  */
1067
- 200: PaginatedResponseApiToken;
3440
+ 200: UserListResponse;
1068
3441
  };
1069
3442
 
1070
- export type ListApiTokensResponse = ListApiTokensResponses[keyof ListApiTokensResponses];
3443
+ export type ListUsersResponse = ListUsersResponses[keyof ListUsersResponses];
1071
3444
 
1072
- export type CreateApiTokenData = {
1073
- body: CreateApiTokenRequest;
1074
- path?: never;
3445
+ export type RemoveUserData = {
3446
+ body?: never;
3447
+ path: {
3448
+ /**
3449
+ * User ID to remove
3450
+ */
3451
+ user_id: string;
3452
+ };
1075
3453
  query?: never;
1076
- url: '/bodhi/v1/tokens';
3454
+ url: '/bodhi/v1/users/{user_id}';
1077
3455
  };
1078
3456
 
1079
- export type CreateApiTokenErrors = {
3457
+ export type RemoveUserErrors = {
1080
3458
  /**
1081
- * Invalid request
3459
+ * Invalid request parameters
1082
3460
  */
1083
3461
  400: OpenAiApiError;
3462
+ /**
3463
+ * Not authenticated
3464
+ */
3465
+ 401: OpenAiApiError;
3466
+ /**
3467
+ * Insufficient permissions
3468
+ */
3469
+ 403: OpenAiApiError;
3470
+ /**
3471
+ * User not found
3472
+ */
3473
+ 404: OpenAiApiError;
1084
3474
  /**
1085
3475
  * Internal server error
1086
3476
  */
1087
3477
  500: OpenAiApiError;
1088
3478
  };
1089
3479
 
1090
- export type CreateApiTokenError = CreateApiTokenErrors[keyof CreateApiTokenErrors];
3480
+ export type RemoveUserError = RemoveUserErrors[keyof RemoveUserErrors];
1091
3481
 
1092
- export type CreateApiTokenResponses = {
3482
+ export type RemoveUserResponses = {
1093
3483
  /**
1094
- * API token created successfully
3484
+ * User removed successfully
1095
3485
  */
1096
- 201: ApiTokenResponse;
3486
+ 200: unknown;
1097
3487
  };
1098
3488
 
1099
- export type CreateApiTokenResponse = CreateApiTokenResponses[keyof CreateApiTokenResponses];
1100
-
1101
- export type UpdateApiTokenData = {
1102
- /**
1103
- * Token update request
1104
- */
1105
- body: UpdateApiTokenRequest;
3489
+ export type ChangeUserRoleData = {
3490
+ body: ChangeRoleRequest;
1106
3491
  path: {
1107
3492
  /**
1108
- * Token identifier
3493
+ * User ID to change role for
1109
3494
  */
1110
- id: string;
3495
+ user_id: string;
1111
3496
  };
1112
3497
  query?: never;
1113
- url: '/bodhi/v1/tokens/{id}';
3498
+ url: '/bodhi/v1/users/{user_id}/role';
1114
3499
  };
1115
3500
 
1116
- export type UpdateApiTokenErrors = {
3501
+ export type ChangeUserRoleErrors = {
3502
+ /**
3503
+ * Invalid request parameters
3504
+ */
3505
+ 400: OpenAiApiError;
1117
3506
  /**
1118
- * Unauthorized - Token missing or invalid
3507
+ * Not authenticated
1119
3508
  */
1120
3509
  401: OpenAiApiError;
1121
3510
  /**
1122
- * Token not found
3511
+ * Insufficient permissions
3512
+ */
3513
+ 403: OpenAiApiError;
3514
+ /**
3515
+ * User not found
1123
3516
  */
1124
3517
  404: OpenAiApiError;
1125
3518
  /**
@@ -1128,41 +3521,43 @@ export type UpdateApiTokenErrors = {
1128
3521
  500: OpenAiApiError;
1129
3522
  };
1130
3523
 
1131
- export type UpdateApiTokenError = UpdateApiTokenErrors[keyof UpdateApiTokenErrors];
3524
+ export type ChangeUserRoleError = ChangeUserRoleErrors[keyof ChangeUserRoleErrors];
1132
3525
 
1133
- export type UpdateApiTokenResponses = {
3526
+ export type ChangeUserRoleResponses = {
1134
3527
  /**
1135
- * Token updated successfully
3528
+ * Role changed successfully
1136
3529
  */
1137
- 200: ApiToken;
3530
+ 200: unknown;
1138
3531
  };
1139
3532
 
1140
- export type UpdateApiTokenResponse = UpdateApiTokenResponses[keyof UpdateApiTokenResponses];
1141
-
1142
- export type GetCurrentUserData = {
3533
+ export type HealthCheckData = {
1143
3534
  body?: never;
1144
3535
  path?: never;
1145
3536
  query?: never;
1146
- url: '/bodhi/v1/user';
3537
+ url: '/health';
1147
3538
  };
1148
3539
 
1149
- export type GetCurrentUserErrors = {
3540
+ export type HealthCheckErrors = {
3541
+ /**
3542
+ * Invalid request parameters
3543
+ */
3544
+ 400: OpenAiApiError;
1150
3545
  /**
1151
- * Error in extracting user info from token
3546
+ * Internal server error
1152
3547
  */
1153
3548
  500: OpenAiApiError;
1154
3549
  };
1155
3550
 
1156
- export type GetCurrentUserError = GetCurrentUserErrors[keyof GetCurrentUserErrors];
3551
+ export type HealthCheckError = HealthCheckErrors[keyof HealthCheckErrors];
1157
3552
 
1158
- export type GetCurrentUserResponses = {
3553
+ export type HealthCheckResponses = {
1159
3554
  /**
1160
- * Returns current user information
3555
+ * Application is healthy and fully operational
1161
3556
  */
1162
- 200: UserInfo;
3557
+ 200: PingResponse;
1163
3558
  };
1164
3559
 
1165
- export type GetCurrentUserResponse = GetCurrentUserResponses[keyof GetCurrentUserResponses];
3560
+ export type HealthCheckResponse = HealthCheckResponses[keyof HealthCheckResponses];
1166
3561
 
1167
3562
  export type PingServerData = {
1168
3563
  body?: never;
@@ -1171,9 +3566,22 @@ export type PingServerData = {
1171
3566
  url: '/ping';
1172
3567
  };
1173
3568
 
3569
+ export type PingServerErrors = {
3570
+ /**
3571
+ * Invalid request parameters
3572
+ */
3573
+ 400: OpenAiApiError;
3574
+ /**
3575
+ * Internal server error
3576
+ */
3577
+ 500: OpenAiApiError;
3578
+ };
3579
+
3580
+ export type PingServerError = PingServerErrors[keyof PingServerErrors];
3581
+
1174
3582
  export type PingServerResponses = {
1175
3583
  /**
1176
- * Server is healthy
3584
+ * Server is responding normally
1177
3585
  */
1178
3586
  200: PingResponse;
1179
3587
  };
@@ -1181,7 +3589,7 @@ export type PingServerResponses = {
1181
3589
  export type PingServerResponse = PingServerResponses[keyof PingServerResponses];
1182
3590
 
1183
3591
  export type CreateChatCompletionData = {
1184
- body: unknown;
3592
+ body: CreateChatCompletionRequest;
1185
3593
  path?: never;
1186
3594
  query?: never;
1187
3595
  url: '/v1/chat/completions';
@@ -1193,9 +3601,13 @@ export type CreateChatCompletionErrors = {
1193
3601
  */
1194
3602
  400: OpenAiApiError;
1195
3603
  /**
1196
- * Invalid authentication
3604
+ * Not authenticated
1197
3605
  */
1198
3606
  401: OpenAiApiError;
3607
+ /**
3608
+ * Insufficient permissions
3609
+ */
3610
+ 403: OpenAiApiError;
1199
3611
  /**
1200
3612
  * Internal server error
1201
3613
  */
@@ -1208,13 +3620,52 @@ export type CreateChatCompletionResponses = {
1208
3620
  /**
1209
3621
  * Chat completion response
1210
3622
  */
1211
- 200: unknown;
3623
+ 200: CreateChatCompletionResponse;
1212
3624
  /**
1213
3625
  * Chat completion stream, the status is 200, using 201 to avoid OpenAPI format limitation.
1214
3626
  */
1215
- 201: unknown;
3627
+ 201: CreateChatCompletionStreamResponse;
3628
+ };
3629
+
3630
+ export type CreateChatCompletionResponse2 = CreateChatCompletionResponses[keyof CreateChatCompletionResponses];
3631
+
3632
+ export type CreateEmbeddingData = {
3633
+ body: CreateEmbeddingRequest;
3634
+ path?: never;
3635
+ query?: never;
3636
+ url: '/v1/embeddings';
3637
+ };
3638
+
3639
+ export type CreateEmbeddingErrors = {
3640
+ /**
3641
+ * Invalid request parameters
3642
+ */
3643
+ 400: OpenAiApiError;
3644
+ /**
3645
+ * Not authenticated
3646
+ */
3647
+ 401: OpenAiApiError;
3648
+ /**
3649
+ * Insufficient permissions
3650
+ */
3651
+ 403: OpenAiApiError;
3652
+ /**
3653
+ * Internal server error
3654
+ */
3655
+ 500: OpenAiApiError;
3656
+ };
3657
+
3658
+ export type CreateEmbeddingError = CreateEmbeddingErrors[keyof CreateEmbeddingErrors];
3659
+
3660
+ export type CreateEmbeddingResponses = {
3661
+ /**
3662
+ * Embedding response
3663
+ */
3664
+ 200: CreateEmbeddingResponse;
1216
3665
  };
1217
3666
 
3667
+ export type CreateEmbeddingResponse2 = CreateEmbeddingResponses[keyof CreateEmbeddingResponses];
3668
+
1218
3669
  export type ListModelsData = {
1219
3670
  body?: never;
1220
3671
  path?: never;
@@ -1224,9 +3675,17 @@ export type ListModelsData = {
1224
3675
 
1225
3676
  export type ListModelsErrors = {
1226
3677
  /**
1227
- * Invalid authentication
3678
+ * Invalid request parameters
3679
+ */
3680
+ 400: OpenAiApiError;
3681
+ /**
3682
+ * Not authenticated
1228
3683
  */
1229
3684
  401: OpenAiApiError;
3685
+ /**
3686
+ * Insufficient permissions
3687
+ */
3688
+ 403: OpenAiApiError;
1230
3689
  /**
1231
3690
  * Internal server error
1232
3691
  */
@@ -1239,11 +3698,57 @@ export type ListModelsResponses = {
1239
3698
  /**
1240
3699
  * List of available models
1241
3700
  */
1242
- 200: ListModelResponseWrapper;
3701
+ 200: ListModelResponse;
1243
3702
  };
1244
3703
 
1245
3704
  export type ListModelsResponse = ListModelsResponses[keyof ListModelsResponses];
1246
3705
 
3706
+ export type GetModelData = {
3707
+ body?: never;
3708
+ path: {
3709
+ /**
3710
+ * Model identifier - can be user alias (e.g., 'llama2:chat'), model alias, or API provider alias
3711
+ */
3712
+ id: string;
3713
+ };
3714
+ query?: never;
3715
+ url: '/v1/models/{id}';
3716
+ };
3717
+
3718
+ export type GetModelErrors = {
3719
+ /**
3720
+ * Invalid request parameters
3721
+ */
3722
+ 400: OpenAiApiError;
3723
+ /**
3724
+ * Not authenticated
3725
+ */
3726
+ 401: OpenAiApiError;
3727
+ /**
3728
+ * Insufficient permissions
3729
+ */
3730
+ 403: OpenAiApiError;
3731
+ /**
3732
+ * Model not found
3733
+ */
3734
+ 404: OpenAiApiError;
3735
+ /**
3736
+ * Internal server error
3737
+ */
3738
+ 500: OpenAiApiError;
3739
+ };
3740
+
3741
+ export type GetModelError = GetModelErrors[keyof GetModelErrors];
3742
+
3743
+ export type GetModelResponses = {
3744
+ /**
3745
+ * Model details
3746
+ */
3747
+ 200: ModelResponse;
3748
+ };
3749
+
3750
+ export type GetModelResponse = GetModelResponses[keyof GetModelResponses];
3751
+
1247
3752
  export type ClientOptions = {
1248
3753
  baseUrl: 'http://localhost:1135' | (string & {});
1249
3754
  };