@bodhiapp/ts-client 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1059 @@
1
+ export type AliasResponse = {
2
+ alias: string;
3
+ chat_template: string;
4
+ context_params: GptContextParams;
5
+ filename: string;
6
+ model_params: {};
7
+ repo: string;
8
+ request_params: OaiRequestParams;
9
+ snapshot: string;
10
+ source: string;
11
+ };
12
+ export type ApiToken = {
13
+ created_at: string;
14
+ id: string;
15
+ name: string;
16
+ status: TokenStatus;
17
+ token_hash: string;
18
+ token_id: string;
19
+ updated_at: string;
20
+ user_id: string;
21
+ };
22
+ export type ApiTokenResponse = {
23
+ /**
24
+ * Offline token that can be used as API Token
25
+ */
26
+ offline_token: string;
27
+ };
28
+ /**
29
+ * Application information and status
30
+ */
31
+ export type AppInfo = {
32
+ /**
33
+ * Whether authentication is enabled
34
+ */
35
+ authz: boolean;
36
+ /**
37
+ * Current application status
38
+ */
39
+ status: AppStatus;
40
+ /**
41
+ * Application version
42
+ */
43
+ version: string;
44
+ };
45
+ export type AppStatus = 'setup' | 'ready' | 'resource-admin';
46
+ export type ChatRequest = {
47
+ format?: string | null;
48
+ keep_alive?: null | Duration;
49
+ messages: Array<Message>;
50
+ model: string;
51
+ options?: null | Options;
52
+ stream?: boolean | null;
53
+ };
54
+ /**
55
+ * Chat template identifier for built-in templates
56
+ */
57
+ export type ChatTemplateId = 'llama3' | 'llama2' | 'llama2-legacy' | 'phi3' | 'gemma' | 'deepseek' | 'command-r' | 'openchat' | 'tinyllama';
58
+ /**
59
+ * Chat template type that can be either built-in or from a repository
60
+ */
61
+ export type ChatTemplateType = 'Embedded' | {
62
+ /**
63
+ * Built-in chat template using Id
64
+ */
65
+ Id: ChatTemplateId;
66
+ } | {
67
+ /**
68
+ * Custom chat template from a repository
69
+ */
70
+ Repo: Repo;
71
+ };
72
+ /**
73
+ * Request to create a new API token
74
+ */
75
+ export type CreateApiTokenRequest = {
76
+ /**
77
+ * Optional name for the API token
78
+ */
79
+ name?: string | null;
80
+ };
81
+ export type DownloadRequest = {
82
+ created_at: string;
83
+ error?: string | null;
84
+ filename: string;
85
+ id: string;
86
+ repo: string;
87
+ status: DownloadStatus;
88
+ updated_at: string;
89
+ };
90
+ export type DownloadStatus = 'pending' | 'completed' | 'error';
91
+ export type Duration = string;
92
+ export type ErrorBody = {
93
+ code?: string | null;
94
+ message: string;
95
+ param?: string | null;
96
+ type: string;
97
+ };
98
+ export type GptContextParams = {
99
+ n_ctx?: number | null;
100
+ n_keep?: number | null;
101
+ n_parallel?: number | null;
102
+ n_predict?: number | null;
103
+ n_seed?: number | null;
104
+ n_threads?: number | null;
105
+ };
106
+ export type ListModelResponseWrapper = {
107
+ data: Array<{
108
+ /**
109
+ * The Unix timestamp (in seconds) when the model was created.
110
+ */
111
+ created: number;
112
+ /**
113
+ * The model identifier, which can be referenced in the API endpoints.
114
+ */
115
+ id: string;
116
+ /**
117
+ * The object type, which is always "model".
118
+ */
119
+ object: string;
120
+ /**
121
+ * The organization that owns the model.
122
+ */
123
+ owned_by: string;
124
+ }>;
125
+ object: string;
126
+ };
127
+ export type LocalModelResponse = {
128
+ filename: string;
129
+ model_params: {};
130
+ repo: string;
131
+ size?: number | null;
132
+ snapshot: string;
133
+ };
134
+ export type Message = {
135
+ content: string;
136
+ images?: Array<string> | null;
137
+ role: string;
138
+ };
139
+ export type Model = {
140
+ details: ModelDetails;
141
+ digest: string;
142
+ model: string;
143
+ modified_at: number;
144
+ size: number;
145
+ };
146
+ export type ModelDetails = {
147
+ families?: Array<string> | null;
148
+ family: string;
149
+ format: string;
150
+ parameter_size: string;
151
+ parent_model?: string | null;
152
+ quantization_level: string;
153
+ };
154
+ export type ModelsResponse = {
155
+ models: Array<Model>;
156
+ };
157
+ /**
158
+ * Request to pull a model file from HuggingFace
159
+ */
160
+ export type NewDownloadRequest = {
161
+ /**
162
+ * Model file name to pull
163
+ */
164
+ filename: string;
165
+ /**
166
+ * HuggingFace repository name
167
+ */
168
+ repo: string;
169
+ };
170
+ export type OaiRequestParams = {
171
+ frequency_penalty?: number | null;
172
+ max_tokens?: number | null;
173
+ presence_penalty?: number | null;
174
+ seed?: number | null;
175
+ stop?: Array<string>;
176
+ temperature?: number | null;
177
+ top_p?: number | null;
178
+ user?: string | null;
179
+ };
180
+ export type OllamaError = {
181
+ error: string;
182
+ };
183
+ export type OpenAiApiError = {
184
+ error: ErrorBody;
185
+ };
186
+ export type Options = {
187
+ f16_kv?: boolean | null;
188
+ frequency_penalty?: number | null;
189
+ logits_all?: boolean | null;
190
+ low_vram?: boolean | null;
191
+ main_gpu?: number | null;
192
+ mirostat?: number | null;
193
+ mirostat_eta?: number | null;
194
+ mirostat_tau?: number | null;
195
+ num_batch?: number | null;
196
+ num_ctx?: number | null;
197
+ num_gpu?: number | null;
198
+ num_keep?: number | null;
199
+ num_predict?: number | null;
200
+ num_thread?: number | null;
201
+ numa?: boolean | null;
202
+ penalize_newline?: boolean | null;
203
+ presence_penalty?: number | null;
204
+ repeat_last_n?: number | null;
205
+ repeat_penalty?: number | null;
206
+ seed?: number | null;
207
+ stop?: Array<string> | null;
208
+ temperature?: number | null;
209
+ tfs_z?: number | null;
210
+ top_k?: number | null;
211
+ top_p?: number | null;
212
+ typical_p?: number | null;
213
+ use_mlock?: boolean | null;
214
+ use_mmap?: boolean | null;
215
+ vocab_only?: boolean | null;
216
+ };
217
+ export type PaginatedResponseAliasResponse = {
218
+ data: Array<{
219
+ alias: string;
220
+ chat_template: string;
221
+ context_params: GptContextParams;
222
+ filename: string;
223
+ model_params: {};
224
+ repo: string;
225
+ request_params: OaiRequestParams;
226
+ snapshot: string;
227
+ source: string;
228
+ }>;
229
+ page: number;
230
+ page_size: number;
231
+ total: number;
232
+ };
233
+ export type PaginatedResponseApiToken = {
234
+ data: Array<{
235
+ created_at: string;
236
+ id: string;
237
+ name: string;
238
+ status: TokenStatus;
239
+ token_hash: string;
240
+ token_id: string;
241
+ updated_at: string;
242
+ user_id: string;
243
+ }>;
244
+ page: number;
245
+ page_size: number;
246
+ total: number;
247
+ };
248
+ export type PaginatedResponseDownloadRequest = {
249
+ data: Array<{
250
+ created_at: string;
251
+ error?: string | null;
252
+ filename: string;
253
+ id: string;
254
+ repo: string;
255
+ status: DownloadStatus;
256
+ updated_at: string;
257
+ }>;
258
+ page: number;
259
+ page_size: number;
260
+ total: number;
261
+ };
262
+ export type PaginatedResponseLocalModelResponse = {
263
+ data: Array<{
264
+ filename: string;
265
+ model_params: {};
266
+ repo: string;
267
+ size?: number | null;
268
+ snapshot: string;
269
+ }>;
270
+ page: number;
271
+ page_size: number;
272
+ total: number;
273
+ };
274
+ /**
275
+ * Response to the ping endpoint
276
+ */
277
+ export type PingResponse = {
278
+ /**
279
+ * always returns "pong"
280
+ */
281
+ message: string;
282
+ };
283
+ export type Repo = {
284
+ name: string;
285
+ user: string;
286
+ };
287
+ export type SettingInfo = {
288
+ current_value: unknown;
289
+ default_value: unknown;
290
+ key: string;
291
+ metadata: SettingMetadata;
292
+ source: SettingSource;
293
+ };
294
+ export type SettingMetadata = {
295
+ type: 'string';
296
+ } | {
297
+ max: number;
298
+ min: number;
299
+ type: 'number';
300
+ } | {
301
+ type: 'boolean';
302
+ } | {
303
+ options: Array<string>;
304
+ type: 'option';
305
+ };
306
+ export type SettingSource = 'system' | 'command_line' | 'environment' | 'settings_file' | 'default';
307
+ /**
308
+ * Request to setup the application in authenticated or non-authenticated mode
309
+ */
310
+ export type SetupRequest = {
311
+ /**
312
+ * Whether to enable authentication
313
+ * - true: Setup app in authenticated mode with role-based access
314
+ * - false: Setup app in non-authenticated mode for open access
315
+ */
316
+ authz: boolean;
317
+ };
318
+ /**
319
+ * Response containing the updated application status after setup
320
+ */
321
+ export type SetupResponse = {
322
+ /**
323
+ * New application status after setup
324
+ * - resource-admin: When setup in authenticated mode
325
+ * - ready: When setup in non-authenticated mode
326
+ */
327
+ status: AppStatus;
328
+ };
329
+ export type ShowRequest = {
330
+ name: string;
331
+ };
332
+ export type ShowResponse = {
333
+ details: ModelDetails;
334
+ license: string;
335
+ model_info: {};
336
+ modelfile: string;
337
+ modified_at: number;
338
+ parameters: string;
339
+ template: string;
340
+ };
341
+ export type TokenStatus = 'active' | 'inactive';
342
+ /**
343
+ * Request to update an existing API token
344
+ */
345
+ export type UpdateApiTokenRequest = {
346
+ /**
347
+ * New name for the token
348
+ */
349
+ name: string;
350
+ /**
351
+ * New status for the token (active/inactive)
352
+ */
353
+ status: TokenStatus;
354
+ };
355
+ /**
356
+ * Request to update a setting value
357
+ */
358
+ export type UpdateSettingRequest = {
359
+ value: unknown;
360
+ };
361
+ /**
362
+ * Information about the currently logged in user
363
+ */
364
+ export type UserInfo = {
365
+ /**
366
+ * User's email address
367
+ */
368
+ email?: string | null;
369
+ /**
370
+ * If user is logged in
371
+ */
372
+ logged_in: boolean;
373
+ /**
374
+ * List of roles assigned to the user
375
+ */
376
+ roles: Array<string>;
377
+ };
378
+ export type ChatOllamaModelData = {
379
+ /**
380
+ * Chat request in Ollama format
381
+ */
382
+ body: ChatRequest;
383
+ path?: never;
384
+ query?: never;
385
+ url: '/api/chat';
386
+ };
387
+ export type ChatOllamaModelErrors = {
388
+ /**
389
+ * Invalid request
390
+ */
391
+ 400: OllamaError;
392
+ /**
393
+ * Model not found
394
+ */
395
+ 404: OllamaError;
396
+ /**
397
+ * Internal server error
398
+ */
399
+ 500: OllamaError;
400
+ };
401
+ export type ChatOllamaModelError = ChatOllamaModelErrors[keyof ChatOllamaModelErrors];
402
+ export type ChatOllamaModelResponses = {
403
+ /**
404
+ * Chat response
405
+ */
406
+ 200: unknown;
407
+ };
408
+ export type ShowOllamaModelData = {
409
+ /**
410
+ * Model name to get details for
411
+ */
412
+ body: ShowRequest;
413
+ path?: never;
414
+ query?: never;
415
+ url: '/api/show';
416
+ };
417
+ export type ShowOllamaModelErrors = {
418
+ /**
419
+ * Model not found
420
+ */
421
+ 404: OllamaError;
422
+ /**
423
+ * Internal server error
424
+ */
425
+ 500: OllamaError;
426
+ };
427
+ export type ShowOllamaModelError = ShowOllamaModelErrors[keyof ShowOllamaModelErrors];
428
+ export type ShowOllamaModelResponses = {
429
+ /**
430
+ * Model details
431
+ */
432
+ 200: ShowResponse;
433
+ };
434
+ export type ShowOllamaModelResponse = ShowOllamaModelResponses[keyof ShowOllamaModelResponses];
435
+ export type ListOllamaModelsData = {
436
+ body?: never;
437
+ path?: never;
438
+ query?: never;
439
+ url: '/api/tags';
440
+ };
441
+ export type ListOllamaModelsErrors = {
442
+ /**
443
+ * Internal server error
444
+ */
445
+ 500: OllamaError;
446
+ };
447
+ export type ListOllamaModelsError = ListOllamaModelsErrors[keyof ListOllamaModelsErrors];
448
+ export type ListOllamaModelsResponses = {
449
+ /**
450
+ * List of available models
451
+ */
452
+ 200: ModelsResponse;
453
+ };
454
+ export type ListOllamaModelsResponse = ListOllamaModelsResponses[keyof ListOllamaModelsResponses];
455
+ export type ListChatTemplatesData = {
456
+ body?: never;
457
+ path?: never;
458
+ query?: never;
459
+ url: '/bodhi/v1/chat_templates';
460
+ };
461
+ export type ListChatTemplatesErrors = {
462
+ /**
463
+ * Internal server error
464
+ */
465
+ 500: OpenAiApiError;
466
+ };
467
+ export type ListChatTemplatesError = ListChatTemplatesErrors[keyof ListChatTemplatesErrors];
468
+ export type ListChatTemplatesResponses = {
469
+ /**
470
+ * List of available chat templates
471
+ */
472
+ 200: Array<ChatTemplateType>;
473
+ };
474
+ export type ListChatTemplatesResponse = ListChatTemplatesResponses[keyof ListChatTemplatesResponses];
475
+ export type GetAppInfoData = {
476
+ body?: never;
477
+ path?: never;
478
+ query?: never;
479
+ url: '/bodhi/v1/info';
480
+ };
481
+ export type GetAppInfoErrors = {
482
+ /**
483
+ * Internal server error
484
+ */
485
+ 500: OpenAiApiError;
486
+ };
487
+ export type GetAppInfoError = GetAppInfoErrors[keyof GetAppInfoErrors];
488
+ export type GetAppInfoResponses = {
489
+ /**
490
+ * Returns the status information about the Application
491
+ */
492
+ 200: AppInfo;
493
+ };
494
+ export type GetAppInfoResponse = GetAppInfoResponses[keyof GetAppInfoResponses];
495
+ export type LogoutUserData = {
496
+ body?: never;
497
+ path?: never;
498
+ query?: never;
499
+ url: '/bodhi/v1/logout';
500
+ };
501
+ export type LogoutUserErrors = {
502
+ /**
503
+ * Session deletion failed
504
+ */
505
+ 500: OpenAiApiError;
506
+ };
507
+ export type LogoutUserError = LogoutUserErrors[keyof LogoutUserErrors];
508
+ export type LogoutUserResponses = {
509
+ /**
510
+ * Logout successful, redirects to login page
511
+ */
512
+ 200: unknown;
513
+ };
514
+ export type ListModelFilesData = {
515
+ body?: never;
516
+ path?: never;
517
+ query?: {
518
+ /**
519
+ * Page number (1-based)
520
+ */
521
+ page?: number;
522
+ /**
523
+ * Number of items per page (max 100)
524
+ */
525
+ page_size?: number;
526
+ /**
527
+ * Field to sort by (repo, filename, size, updated_at, snapshot)
528
+ */
529
+ sort?: string | null;
530
+ /**
531
+ * Sort order (asc or desc)
532
+ */
533
+ sort_order?: string;
534
+ };
535
+ url: '/bodhi/v1/modelfiles';
536
+ };
537
+ export type ListModelFilesErrors = {
538
+ /**
539
+ * Internal server error
540
+ */
541
+ 500: OpenAiApiError;
542
+ };
543
+ export type ListModelFilesError = ListModelFilesErrors[keyof ListModelFilesErrors];
544
+ export type ListModelFilesResponses = {
545
+ /**
546
+ * List of supported model files from local HuggingFace cache folder
547
+ */
548
+ 200: PaginatedResponseLocalModelResponse;
549
+ };
550
+ export type ListModelFilesResponse = ListModelFilesResponses[keyof ListModelFilesResponses];
551
+ export type ListDownloadsData = {
552
+ body?: never;
553
+ path?: never;
554
+ query?: {
555
+ /**
556
+ * Page number (1-based)
557
+ */
558
+ page?: number;
559
+ /**
560
+ * Number of items per page (max 100)
561
+ */
562
+ page_size?: number;
563
+ /**
564
+ * Field to sort by (repo, filename, size, updated_at, snapshot)
565
+ */
566
+ sort?: string | null;
567
+ /**
568
+ * Sort order (asc or desc)
569
+ */
570
+ sort_order?: string;
571
+ };
572
+ url: '/bodhi/v1/modelfiles/pull';
573
+ };
574
+ export type ListDownloadsErrors = {
575
+ /**
576
+ * Internal server error
577
+ */
578
+ 500: OpenAiApiError;
579
+ };
580
+ export type ListDownloadsError = ListDownloadsErrors[keyof ListDownloadsErrors];
581
+ export type ListDownloadsResponses = {
582
+ /**
583
+ * List of download requests
584
+ */
585
+ 200: PaginatedResponseDownloadRequest;
586
+ };
587
+ export type ListDownloadsResponse = ListDownloadsResponses[keyof ListDownloadsResponses];
588
+ export type PullModelFileData = {
589
+ /**
590
+ * Model file download request
591
+ */
592
+ body: NewDownloadRequest;
593
+ path?: never;
594
+ query?: never;
595
+ url: '/bodhi/v1/modelfiles/pull';
596
+ };
597
+ export type PullModelFileErrors = {
598
+ /**
599
+ * File already exists or invalid input
600
+ */
601
+ 400: OpenAiApiError;
602
+ /**
603
+ * Internal server error
604
+ */
605
+ 500: OpenAiApiError;
606
+ };
607
+ export type PullModelFileError = PullModelFileErrors[keyof PullModelFileErrors];
608
+ export type PullModelFileResponses = {
609
+ /**
610
+ * Existing download request found
611
+ */
612
+ 200: DownloadRequest;
613
+ /**
614
+ * Download request created
615
+ */
616
+ 201: DownloadRequest;
617
+ };
618
+ export type PullModelFileResponse = PullModelFileResponses[keyof PullModelFileResponses];
619
+ export type PullModelByAliasData = {
620
+ body?: never;
621
+ path: {
622
+ /**
623
+ * Available model aliases:
624
+ * - llama3:instruct - Meta Llama 3 8B Instruct
625
+ * - llama3:70b-instruct - Meta Llama 3 70B Instruct
626
+ * - llama2:chat - Llama 2 7B Chat
627
+ * - llama2:13b-chat - Llama 2 13B Chat
628
+ * - llama2:70b-chat - Llama 2 70B Chat
629
+ * - phi3:mini - Phi 3 Mini
630
+ * - mistral:instruct - Mistral 7B Instruct
631
+ * - mixtral:instruct - Mixtral 8x7B Instruct
632
+ * - gemma:instruct - Gemma 7B Instruct
633
+ * - gemma:7b-instruct-v1.1-q8_0 - Gemma 1.1 7B Instruct
634
+ */
635
+ alias: string;
636
+ };
637
+ query?: never;
638
+ url: '/bodhi/v1/modelfiles/pull/{alias}';
639
+ };
640
+ export type PullModelByAliasErrors = {
641
+ /**
642
+ * File already exists
643
+ */
644
+ 400: OpenAiApiError;
645
+ /**
646
+ * Alias not found
647
+ */
648
+ 404: OpenAiApiError;
649
+ /**
650
+ * Internal server error
651
+ */
652
+ 500: OpenAiApiError;
653
+ };
654
+ export type PullModelByAliasError = PullModelByAliasErrors[keyof PullModelByAliasErrors];
655
+ export type PullModelByAliasResponses = {
656
+ /**
657
+ * Existing download request found
658
+ */
659
+ 200: DownloadRequest;
660
+ /**
661
+ * Download request created
662
+ */
663
+ 201: DownloadRequest;
664
+ };
665
+ export type PullModelByAliasResponse = PullModelByAliasResponses[keyof PullModelByAliasResponses];
666
+ export type GetDownloadStatusData = {
667
+ body?: never;
668
+ path: {
669
+ /**
670
+ * Download request identifier
671
+ */
672
+ id: string;
673
+ };
674
+ query?: never;
675
+ url: '/bodhi/v1/modelfiles/pull/{id}';
676
+ };
677
+ export type GetDownloadStatusErrors = {
678
+ /**
679
+ * Download request not found
680
+ */
681
+ 404: OpenAiApiError;
682
+ /**
683
+ * Internal server error
684
+ */
685
+ 500: OpenAiApiError;
686
+ };
687
+ export type GetDownloadStatusError = GetDownloadStatusErrors[keyof GetDownloadStatusErrors];
688
+ export type GetDownloadStatusResponses = {
689
+ /**
690
+ * Download request found
691
+ */
692
+ 200: DownloadRequest;
693
+ };
694
+ export type GetDownloadStatusResponse = GetDownloadStatusResponses[keyof GetDownloadStatusResponses];
695
+ export type ListModelAliasesData = {
696
+ body?: never;
697
+ path?: never;
698
+ query?: {
699
+ /**
700
+ * Page number (1-based)
701
+ */
702
+ page?: number;
703
+ /**
704
+ * Number of items per page (max 100)
705
+ */
706
+ page_size?: number;
707
+ /**
708
+ * Field to sort by (repo, filename, size, updated_at, snapshot)
709
+ */
710
+ sort?: string | null;
711
+ /**
712
+ * Sort order (asc or desc)
713
+ */
714
+ sort_order?: string;
715
+ };
716
+ url: '/bodhi/v1/models';
717
+ };
718
+ export type ListModelAliasesErrors = {
719
+ /**
720
+ * Internal server error
721
+ */
722
+ 500: OpenAiApiError;
723
+ };
724
+ export type ListModelAliasesError = ListModelAliasesErrors[keyof ListModelAliasesErrors];
725
+ export type ListModelAliasesResponses = {
726
+ /**
727
+ * List of configured model aliases
728
+ */
729
+ 200: PaginatedResponseAliasResponse;
730
+ };
731
+ export type ListModelAliasesResponse = ListModelAliasesResponses[keyof ListModelAliasesResponses];
732
+ export type GetAliasData = {
733
+ body?: never;
734
+ path: {
735
+ /**
736
+ * Alias identifier for the model
737
+ */
738
+ alias: string;
739
+ };
740
+ query?: never;
741
+ url: '/bodhi/v1/models/{alias}';
742
+ };
743
+ export type GetAliasErrors = {
744
+ /**
745
+ * Alias not found
746
+ */
747
+ 404: OpenAiApiError;
748
+ /**
749
+ * Internal server error
750
+ */
751
+ 500: OpenAiApiError;
752
+ };
753
+ export type GetAliasError = GetAliasErrors[keyof GetAliasErrors];
754
+ export type GetAliasResponses = {
755
+ /**
756
+ * Model alias details
757
+ */
758
+ 200: AliasResponse;
759
+ };
760
+ export type GetAliasResponse = GetAliasResponses[keyof GetAliasResponses];
761
+ export type ListSettingsData = {
762
+ body?: never;
763
+ path?: never;
764
+ query?: never;
765
+ url: '/bodhi/v1/settings';
766
+ };
767
+ export type ListSettingsErrors = {
768
+ /**
769
+ * Unauthorized - User is not an admin
770
+ */
771
+ 401: OpenAiApiError;
772
+ /**
773
+ * Internal server error
774
+ */
775
+ 500: OpenAiApiError;
776
+ };
777
+ export type ListSettingsError = ListSettingsErrors[keyof ListSettingsErrors];
778
+ export type ListSettingsResponses = {
779
+ /**
780
+ * List of application settings
781
+ */
782
+ 200: Array<SettingInfo>;
783
+ };
784
+ export type ListSettingsResponse = ListSettingsResponses[keyof ListSettingsResponses];
785
+ export type DeleteSettingData = {
786
+ body?: never;
787
+ path: {
788
+ /**
789
+ * Setting key to reset
790
+ */
791
+ key: string;
792
+ };
793
+ query?: never;
794
+ url: '/bodhi/v1/settings/{key}';
795
+ };
796
+ export type DeleteSettingErrors = {
797
+ /**
798
+ * Setting not found
799
+ */
800
+ 404: OpenAiApiError;
801
+ };
802
+ export type DeleteSettingError = DeleteSettingErrors[keyof DeleteSettingErrors];
803
+ export type DeleteSettingResponses = {
804
+ /**
805
+ * Setting reset to default successfully
806
+ */
807
+ 200: SettingInfo;
808
+ };
809
+ export type DeleteSettingResponse = DeleteSettingResponses[keyof DeleteSettingResponses];
810
+ export type UpdateSettingData = {
811
+ /**
812
+ * Request to update a setting value
813
+ */
814
+ body: {
815
+ value: unknown;
816
+ };
817
+ path: {
818
+ /**
819
+ * Setting key to update
820
+ */
821
+ key: string;
822
+ };
823
+ query?: never;
824
+ url: '/bodhi/v1/settings/{key}';
825
+ };
826
+ export type UpdateSettingErrors = {
827
+ /**
828
+ * Invalid setting or value
829
+ */
830
+ 400: OpenAiApiError;
831
+ /**
832
+ * Setting not found
833
+ */
834
+ 404: OpenAiApiError;
835
+ };
836
+ export type UpdateSettingError = UpdateSettingErrors[keyof UpdateSettingErrors];
837
+ export type UpdateSettingResponses = {
838
+ /**
839
+ * Setting updated successfully
840
+ */
841
+ 200: SettingInfo;
842
+ };
843
+ export type UpdateSettingResponse = UpdateSettingResponses[keyof UpdateSettingResponses];
844
+ export type SetupAppData = {
845
+ body: SetupRequest;
846
+ path?: never;
847
+ query?: never;
848
+ url: '/bodhi/v1/setup';
849
+ };
850
+ export type SetupAppErrors = {
851
+ /**
852
+ * Application is already setup
853
+ */
854
+ 400: OpenAiApiError;
855
+ /**
856
+ * Internal server error
857
+ */
858
+ 500: OpenAiApiError;
859
+ };
860
+ export type SetupAppError = SetupAppErrors[keyof SetupAppErrors];
861
+ export type SetupAppResponses = {
862
+ /**
863
+ * Application setup successful
864
+ */
865
+ 200: SetupResponse;
866
+ };
867
+ export type SetupAppResponse = SetupAppResponses[keyof SetupAppResponses];
868
+ export type ListApiTokensData = {
869
+ body?: never;
870
+ path?: never;
871
+ query?: {
872
+ /**
873
+ * Page number (1-based)
874
+ */
875
+ page?: number;
876
+ /**
877
+ * Number of items per page (max 100)
878
+ */
879
+ page_size?: number;
880
+ /**
881
+ * Field to sort by (repo, filename, size, updated_at, snapshot)
882
+ */
883
+ sort?: string | null;
884
+ /**
885
+ * Sort order (asc or desc)
886
+ */
887
+ sort_order?: string;
888
+ };
889
+ url: '/bodhi/v1/tokens';
890
+ };
891
+ export type ListApiTokensErrors = {
892
+ /**
893
+ * Unauthorized - Token missing or invalid
894
+ */
895
+ 401: OpenAiApiError;
896
+ /**
897
+ * Internal server error
898
+ */
899
+ 500: OpenAiApiError;
900
+ };
901
+ export type ListApiTokensError = ListApiTokensErrors[keyof ListApiTokensErrors];
902
+ export type ListApiTokensResponses = {
903
+ /**
904
+ * List of API tokens
905
+ */
906
+ 200: PaginatedResponseApiToken;
907
+ };
908
+ export type ListApiTokensResponse = ListApiTokensResponses[keyof ListApiTokensResponses];
909
+ export type CreateApiTokenData = {
910
+ body: CreateApiTokenRequest;
911
+ path?: never;
912
+ query?: never;
913
+ url: '/bodhi/v1/tokens';
914
+ };
915
+ export type CreateApiTokenErrors = {
916
+ /**
917
+ * Invalid request
918
+ */
919
+ 400: OpenAiApiError;
920
+ /**
921
+ * Internal server error
922
+ */
923
+ 500: OpenAiApiError;
924
+ };
925
+ export type CreateApiTokenError = CreateApiTokenErrors[keyof CreateApiTokenErrors];
926
+ export type CreateApiTokenResponses = {
927
+ /**
928
+ * API token created successfully
929
+ */
930
+ 201: ApiTokenResponse;
931
+ };
932
+ export type CreateApiTokenResponse = CreateApiTokenResponses[keyof CreateApiTokenResponses];
933
+ export type UpdateApiTokenData = {
934
+ /**
935
+ * Token update request
936
+ */
937
+ body: UpdateApiTokenRequest;
938
+ path: {
939
+ /**
940
+ * Token identifier
941
+ */
942
+ id: string;
943
+ };
944
+ query?: never;
945
+ url: '/bodhi/v1/tokens/{id}';
946
+ };
947
+ export type UpdateApiTokenErrors = {
948
+ /**
949
+ * Unauthorized - Token missing or invalid
950
+ */
951
+ 401: OpenAiApiError;
952
+ /**
953
+ * Token not found
954
+ */
955
+ 404: OpenAiApiError;
956
+ /**
957
+ * Internal server error
958
+ */
959
+ 500: OpenAiApiError;
960
+ };
961
+ export type UpdateApiTokenError = UpdateApiTokenErrors[keyof UpdateApiTokenErrors];
962
+ export type UpdateApiTokenResponses = {
963
+ /**
964
+ * Token updated successfully
965
+ */
966
+ 200: ApiToken;
967
+ };
968
+ export type UpdateApiTokenResponse = UpdateApiTokenResponses[keyof UpdateApiTokenResponses];
969
+ export type GetCurrentUserData = {
970
+ body?: never;
971
+ path?: never;
972
+ query?: never;
973
+ url: '/bodhi/v1/user';
974
+ };
975
+ export type GetCurrentUserErrors = {
976
+ /**
977
+ * Error in extracting user info from token
978
+ */
979
+ 500: OpenAiApiError;
980
+ };
981
+ export type GetCurrentUserError = GetCurrentUserErrors[keyof GetCurrentUserErrors];
982
+ export type GetCurrentUserResponses = {
983
+ /**
984
+ * Returns current user information
985
+ */
986
+ 200: UserInfo;
987
+ };
988
+ export type GetCurrentUserResponse = GetCurrentUserResponses[keyof GetCurrentUserResponses];
989
+ export type PingServerData = {
990
+ body?: never;
991
+ path?: never;
992
+ query?: never;
993
+ url: '/ping';
994
+ };
995
+ export type PingServerResponses = {
996
+ /**
997
+ * Server is healthy
998
+ */
999
+ 200: PingResponse;
1000
+ };
1001
+ export type PingServerResponse = PingServerResponses[keyof PingServerResponses];
1002
+ export type CreateChatCompletionData = {
1003
+ body: unknown;
1004
+ path?: never;
1005
+ query?: never;
1006
+ url: '/v1/chat/completions';
1007
+ };
1008
+ export type CreateChatCompletionErrors = {
1009
+ /**
1010
+ * Invalid request parameters
1011
+ */
1012
+ 400: OpenAiApiError;
1013
+ /**
1014
+ * Invalid authentication
1015
+ */
1016
+ 401: OpenAiApiError;
1017
+ /**
1018
+ * Internal server error
1019
+ */
1020
+ 500: OpenAiApiError;
1021
+ };
1022
+ export type CreateChatCompletionError = CreateChatCompletionErrors[keyof CreateChatCompletionErrors];
1023
+ export type CreateChatCompletionResponses = {
1024
+ /**
1025
+ * Chat completion response
1026
+ */
1027
+ 200: unknown;
1028
+ /**
1029
+ * Chat completion stream, the status is 200, using 201 to avoid OpenAPI format limitation.
1030
+ */
1031
+ 201: unknown;
1032
+ };
1033
+ export type ListModelsData = {
1034
+ body?: never;
1035
+ path?: never;
1036
+ query?: never;
1037
+ url: '/v1/models';
1038
+ };
1039
+ export type ListModelsErrors = {
1040
+ /**
1041
+ * Invalid authentication
1042
+ */
1043
+ 401: OpenAiApiError;
1044
+ /**
1045
+ * Internal server error
1046
+ */
1047
+ 500: OpenAiApiError;
1048
+ };
1049
+ export type ListModelsError = ListModelsErrors[keyof ListModelsErrors];
1050
+ export type ListModelsResponses = {
1051
+ /**
1052
+ * List of available models
1053
+ */
1054
+ 200: ListModelResponseWrapper;
1055
+ };
1056
+ export type ListModelsResponse = ListModelsResponses[keyof ListModelsResponses];
1057
+ export type ClientOptions = {
1058
+ baseUrl: 'http://localhost:1135' | (string & {});
1059
+ };