indoxrouter 0.1.23__py3-none-any.whl → 0.1.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- indoxrouter/__init__.py +67 -67
- indoxrouter/client.py +1166 -1079
- indoxrouter/constants.py +2 -0
- indoxrouter/exceptions.py +86 -86
- {indoxrouter-0.1.23.dist-info → indoxrouter-0.1.26.dist-info}/METADATA +179 -234
- indoxrouter-0.1.26.dist-info/RECORD +9 -0
- indoxrouter-0.1.26.dist-info/licenses/LICENSE +344 -0
- indoxrouter-0.1.23.dist-info/RECORD +0 -8
- {indoxrouter-0.1.23.dist-info → indoxrouter-0.1.26.dist-info}/WHEEL +0 -0
- {indoxrouter-0.1.23.dist-info → indoxrouter-0.1.26.dist-info}/top_level.txt +0 -0
indoxrouter/client.py
CHANGED
@@ -1,1079 +1,1166 @@
|
|
1
|
-
"""
|
2
|
-
IndoxRouter Client Module
|
3
|
-
|
4
|
-
This module provides a client for interacting with the IndoxRouter API, which serves as a unified
|
5
|
-
interface to multiple AI providers and models. The client handles authentication, rate limiting,
|
6
|
-
error handling, and provides a standardized response format across different AI services.
|
7
|
-
|
8
|
-
IMPORTANT: The IndoxRouter server now supports only cookie-based authentication. This client
|
9
|
-
automatically handles authentication by exchanging your API key for a JWT token through the login endpoint.
|
10
|
-
|
11
|
-
The Client class offers methods for:
|
12
|
-
- Authentication and session management
|
13
|
-
- Making API requests with automatic token refresh
|
14
|
-
- Accessing AI capabilities: chat completions, text completions, embeddings,
|
15
|
-
- Retrieving information about available providers and models
|
16
|
-
- Monitoring usage statistics and credit consumption
|
17
|
-
|
18
|
-
Usage example:
|
19
|
-
```python
|
20
|
-
from indoxRouter import Client
|
21
|
-
|
22
|
-
# Initialize client with API key
|
23
|
-
client = Client(api_key="your_api_key")
|
24
|
-
|
25
|
-
# Get available models
|
26
|
-
models = client.models()
|
27
|
-
|
28
|
-
# Generate a chat completion
|
29
|
-
response = client.chat([
|
30
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
31
|
-
{"role": "user", "content": "Tell me a joke."}
|
32
|
-
], model="openai/gpt-4o-mini")
|
33
|
-
|
34
|
-
# Generate text embeddings
|
35
|
-
embeddings = client.embeddings("This is a sample text", model="openai/text-embedding-ada-002")
|
36
|
-
|
37
|
-
#
|
38
|
-
client.
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
```
|
43
|
-
|
44
|
-
|
45
|
-
```
|
46
|
-
""
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
import
|
53
|
-
import
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
self.
|
115
|
-
self.
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
""
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
#
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
"
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
"""
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
"""
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
# Add
|
253
|
-
if
|
254
|
-
|
255
|
-
|
256
|
-
#
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
)
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
else:
|
344
|
-
raise
|
345
|
-
elif status_code ==
|
346
|
-
raise
|
347
|
-
elif status_code ==
|
348
|
-
#
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
raise
|
354
|
-
|
355
|
-
)
|
356
|
-
|
357
|
-
|
358
|
-
elif status_code ==
|
359
|
-
#
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
#
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
"""
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
""
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
#
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
|
639
|
-
|
640
|
-
|
641
|
-
|
642
|
-
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
#
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
data["
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
753
|
-
|
754
|
-
|
755
|
-
if
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
764
|
-
|
765
|
-
#
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
"
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
842
|
-
|
843
|
-
|
844
|
-
|
845
|
-
|
846
|
-
|
847
|
-
|
848
|
-
|
849
|
-
|
850
|
-
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
856
|
-
|
857
|
-
|
858
|
-
|
859
|
-
|
860
|
-
|
861
|
-
|
862
|
-
""
|
863
|
-
|
864
|
-
|
865
|
-
|
866
|
-
|
867
|
-
"""
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
|
878
|
-
|
879
|
-
"""
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
886
|
-
|
887
|
-
|
888
|
-
|
889
|
-
|
890
|
-
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
return
|
895
|
-
"
|
896
|
-
"
|
897
|
-
"
|
898
|
-
"
|
899
|
-
"
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
"
|
904
|
-
"
|
905
|
-
"
|
906
|
-
"
|
907
|
-
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
|
913
|
-
|
914
|
-
|
915
|
-
|
916
|
-
|
917
|
-
|
918
|
-
|
919
|
-
|
920
|
-
|
921
|
-
|
922
|
-
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
933
|
-
|
934
|
-
|
935
|
-
|
936
|
-
|
937
|
-
|
938
|
-
|
939
|
-
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
|
945
|
-
|
946
|
-
|
947
|
-
|
948
|
-
|
949
|
-
|
950
|
-
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
|
960
|
-
|
961
|
-
|
962
|
-
|
963
|
-
|
964
|
-
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
973
|
-
|
974
|
-
|
975
|
-
|
976
|
-
|
977
|
-
|
978
|
-
|
979
|
-
|
980
|
-
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
|
985
|
-
|
986
|
-
|
987
|
-
|
988
|
-
|
989
|
-
|
990
|
-
|
991
|
-
|
992
|
-
|
993
|
-
|
994
|
-
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1024
|
-
|
1025
|
-
|
1026
|
-
|
1027
|
-
|
1028
|
-
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
|
1033
|
-
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
1037
|
-
|
1038
|
-
|
1039
|
-
|
1040
|
-
|
1041
|
-
|
1042
|
-
|
1043
|
-
|
1044
|
-
|
1045
|
-
|
1046
|
-
|
1047
|
-
|
1048
|
-
|
1049
|
-
|
1050
|
-
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
|
1065
|
-
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1069
|
-
|
1070
|
-
|
1071
|
-
|
1072
|
-
|
1073
|
-
|
1074
|
-
|
1075
|
-
|
1076
|
-
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1
|
+
"""
|
2
|
+
IndoxRouter Client Module
|
3
|
+
|
4
|
+
This module provides a client for interacting with the IndoxRouter API, which serves as a unified
|
5
|
+
interface to multiple AI providers and models. The client handles authentication, rate limiting,
|
6
|
+
error handling, and provides a standardized response format across different AI services.
|
7
|
+
|
8
|
+
IMPORTANT: The IndoxRouter server now supports only cookie-based authentication. This client
|
9
|
+
automatically handles authentication by exchanging your API key for a JWT token through the login endpoint.
|
10
|
+
|
11
|
+
The Client class offers methods for:
|
12
|
+
- Authentication and session management
|
13
|
+
- Making API requests with automatic token refresh
|
14
|
+
- Accessing AI capabilities: chat completions, text completions, embeddings, image generation, and text-to-speech
|
15
|
+
- Retrieving information about available providers and models
|
16
|
+
- Monitoring usage statistics and credit consumption
|
17
|
+
|
18
|
+
Usage example:
|
19
|
+
```python
|
20
|
+
from indoxRouter import Client
|
21
|
+
|
22
|
+
# Initialize client with API key
|
23
|
+
client = Client(api_key="your_api_key")
|
24
|
+
|
25
|
+
# Get available models
|
26
|
+
models = client.models()
|
27
|
+
|
28
|
+
# Generate a chat completion
|
29
|
+
response = client.chat([
|
30
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
31
|
+
{"role": "user", "content": "Tell me a joke."}
|
32
|
+
], model="openai/gpt-4o-mini")
|
33
|
+
|
34
|
+
# Generate text embeddings
|
35
|
+
embeddings = client.embeddings("This is a sample text", model="openai/text-embedding-ada-002")
|
36
|
+
|
37
|
+
# Generate text-to-speech audio
|
38
|
+
audio = client.text_to_speech("Hello, welcome to IndoxRouter!", model="openai/tts-1", voice="alloy")
|
39
|
+
|
40
|
+
# Clean up resources when done
|
41
|
+
client.close()
|
42
|
+
```
|
43
|
+
|
44
|
+
The client can also be used as a context manager:
|
45
|
+
```python
|
46
|
+
with Client(api_key="your_api_key") as client:
|
47
|
+
response = client.chat([{"role": "user", "content": "Hello!"}], model="openai/gpt-4o-mini")
|
48
|
+
```
|
49
|
+
"""
|
50
|
+
|
51
|
+
import os
|
52
|
+
import logging
|
53
|
+
from datetime import datetime, timedelta
|
54
|
+
from typing import Dict, List, Any, Optional, Union
|
55
|
+
import requests
|
56
|
+
import json
|
57
|
+
|
58
|
+
from .exceptions import (
|
59
|
+
AuthenticationError,
|
60
|
+
NetworkError,
|
61
|
+
ProviderNotFoundError,
|
62
|
+
ModelNotFoundError,
|
63
|
+
ModelNotAvailableError,
|
64
|
+
InvalidParametersError,
|
65
|
+
RateLimitError,
|
66
|
+
ProviderError,
|
67
|
+
RequestError,
|
68
|
+
InsufficientCreditsError,
|
69
|
+
ValidationError,
|
70
|
+
APIError,
|
71
|
+
)
|
72
|
+
from .constants import (
|
73
|
+
DEFAULT_BASE_URL,
|
74
|
+
DEFAULT_TIMEOUT,
|
75
|
+
DEFAULT_MODEL,
|
76
|
+
DEFAULT_EMBEDDING_MODEL,
|
77
|
+
DEFAULT_IMAGE_MODEL,
|
78
|
+
DEFAULT_TTS_MODEL,
|
79
|
+
CHAT_ENDPOINT,
|
80
|
+
COMPLETION_ENDPOINT,
|
81
|
+
EMBEDDING_ENDPOINT,
|
82
|
+
IMAGE_ENDPOINT,
|
83
|
+
TTS_ENDPOINT,
|
84
|
+
MODEL_ENDPOINT,
|
85
|
+
USAGE_ENDPOINT,
|
86
|
+
USE_COOKIES,
|
87
|
+
)
|
88
|
+
|
89
|
+
logger = logging.getLogger(__name__)
|
90
|
+
|
91
|
+
|
92
|
+
class Client:
|
93
|
+
"""
|
94
|
+
Client for interacting with the IndoxRouter API.
|
95
|
+
"""
|
96
|
+
|
97
|
+
def __init__(
|
98
|
+
self,
|
99
|
+
api_key: Optional[str] = None,
|
100
|
+
timeout: int = DEFAULT_TIMEOUT,
|
101
|
+
base_url: Optional[str] = None,
|
102
|
+
):
|
103
|
+
"""
|
104
|
+
Initialize the client.
|
105
|
+
|
106
|
+
Args:
|
107
|
+
api_key: API key for authentication. If not provided, the client will look for the
|
108
|
+
INDOX_ROUTER_API_KEY environment variable.
|
109
|
+
timeout: Request timeout in seconds.
|
110
|
+
base_url: Base URL for the API. If not provided, the client will use the default URL.
|
111
|
+
"""
|
112
|
+
|
113
|
+
use_cookies = USE_COOKIES
|
114
|
+
self.api_key = api_key or os.environ.get("INDOX_ROUTER_API_KEY")
|
115
|
+
if not self.api_key:
|
116
|
+
raise ValueError(
|
117
|
+
"API key must be provided either as an argument or as the INDOX_ROUTER_API_KEY environment variable."
|
118
|
+
)
|
119
|
+
|
120
|
+
self.base_url = base_url if base_url is not None else DEFAULT_BASE_URL
|
121
|
+
|
122
|
+
if self.base_url.endswith("/"):
|
123
|
+
self.base_url = self.base_url.rstrip("/")
|
124
|
+
|
125
|
+
self.timeout = timeout
|
126
|
+
self.use_cookies = use_cookies
|
127
|
+
self.session = requests.Session()
|
128
|
+
|
129
|
+
# Authenticate and get JWT tokens
|
130
|
+
self._authenticate()
|
131
|
+
|
132
|
+
def _authenticate(self):
|
133
|
+
"""
|
134
|
+
Authenticate with the server and get JWT tokens.
|
135
|
+
This uses the /auth/token endpoint to get JWT tokens using the API key.
|
136
|
+
"""
|
137
|
+
try:
|
138
|
+
# First try with the dedicated API key endpoint
|
139
|
+
logger.debug("Authenticating with dedicated API key endpoint")
|
140
|
+
response = self.session.post(
|
141
|
+
f"{self.base_url}/api/v1/auth/api-key",
|
142
|
+
headers={"X-API-Key": self.api_key},
|
143
|
+
timeout=self.timeout,
|
144
|
+
)
|
145
|
+
|
146
|
+
if response.status_code != 200:
|
147
|
+
# If dedicated endpoint fails, try using the API key as a username
|
148
|
+
logger.debug("API key endpoint failed, trying with API key as username")
|
149
|
+
response = self.session.post(
|
150
|
+
f"{self.base_url}/api/v1/auth/token",
|
151
|
+
data={
|
152
|
+
"username": self.api_key,
|
153
|
+
"password": self.api_key, # Try using API key as both username and password
|
154
|
+
},
|
155
|
+
timeout=self.timeout,
|
156
|
+
)
|
157
|
+
|
158
|
+
if response.status_code != 200:
|
159
|
+
# Try one more method - the token endpoint with different format
|
160
|
+
logger.debug("Trying with API key as token parameter")
|
161
|
+
response = self.session.post(
|
162
|
+
f"{self.base_url}/api/v1/auth/token",
|
163
|
+
data={
|
164
|
+
"username": "pip_client",
|
165
|
+
"password": self.api_key,
|
166
|
+
},
|
167
|
+
timeout=self.timeout,
|
168
|
+
)
|
169
|
+
|
170
|
+
if response.status_code != 200:
|
171
|
+
error_data = {}
|
172
|
+
try:
|
173
|
+
error_data = response.json()
|
174
|
+
except:
|
175
|
+
error_data = {"detail": response.text}
|
176
|
+
|
177
|
+
raise AuthenticationError(
|
178
|
+
f"Authentication failed: {error_data.get('detail', 'Unknown error')}"
|
179
|
+
)
|
180
|
+
|
181
|
+
# Check if we have a token in the response body
|
182
|
+
try:
|
183
|
+
response_data = response.json()
|
184
|
+
if "access_token" in response_data:
|
185
|
+
# Store token in the session object for later use
|
186
|
+
self.access_token = response_data["access_token"]
|
187
|
+
logger.debug("Retrieved access token from response body")
|
188
|
+
except:
|
189
|
+
# If we couldn't parse JSON, that's fine - we'll rely on cookies
|
190
|
+
logger.debug("No token found in response body, will rely on cookies")
|
191
|
+
|
192
|
+
# At this point, the cookies should be set in the session
|
193
|
+
logger.debug("Authentication successful")
|
194
|
+
|
195
|
+
# Check if we have the cookies we need
|
196
|
+
if "access_token" not in self.session.cookies:
|
197
|
+
logger.warning(
|
198
|
+
"Authentication succeeded but no access_token cookie was set"
|
199
|
+
)
|
200
|
+
|
201
|
+
except requests.RequestException as e:
|
202
|
+
logger.error(f"Authentication request failed: {str(e)}")
|
203
|
+
raise NetworkError(f"Network error during authentication: {str(e)}")
|
204
|
+
|
205
|
+
def _get_domain(self):
|
206
|
+
"""
|
207
|
+
Extract domain from the base URL for cookie setting.
|
208
|
+
"""
|
209
|
+
try:
|
210
|
+
from urllib.parse import urlparse
|
211
|
+
|
212
|
+
parsed_url = urlparse(self.base_url)
|
213
|
+
return parsed_url.netloc
|
214
|
+
except Exception:
|
215
|
+
# If parsing fails, return a default value
|
216
|
+
return ""
|
217
|
+
|
218
|
+
def enable_debug(self, level=logging.DEBUG):
|
219
|
+
"""
|
220
|
+
Enable debug logging for the client.
|
221
|
+
|
222
|
+
Args:
|
223
|
+
level: Logging level (default: logging.DEBUG)
|
224
|
+
"""
|
225
|
+
handler = logging.StreamHandler()
|
226
|
+
handler.setFormatter(
|
227
|
+
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
228
|
+
)
|
229
|
+
logger.addHandler(handler)
|
230
|
+
logger.setLevel(level)
|
231
|
+
logger.debug("Debug logging enabled")
|
232
|
+
|
233
|
+
def _request(
|
234
|
+
self,
|
235
|
+
method: str,
|
236
|
+
endpoint: str,
|
237
|
+
data: Optional[Dict[str, Any]] = None,
|
238
|
+
stream: bool = False,
|
239
|
+
) -> Any:
|
240
|
+
"""
|
241
|
+
Make a request to the API.
|
242
|
+
|
243
|
+
Args:
|
244
|
+
method: HTTP method (GET, POST, etc.)
|
245
|
+
endpoint: API endpoint
|
246
|
+
data: Request data
|
247
|
+
stream: Whether to stream the response
|
248
|
+
|
249
|
+
Returns:
|
250
|
+
Response data
|
251
|
+
"""
|
252
|
+
# Add API version prefix if not already present
|
253
|
+
if not endpoint.startswith("api/v1/") and not endpoint.startswith("/api/v1/"):
|
254
|
+
endpoint = f"api/v1/{endpoint}"
|
255
|
+
|
256
|
+
# Remove any leading slash for consistent URL construction
|
257
|
+
if endpoint.startswith("/"):
|
258
|
+
endpoint = endpoint[1:]
|
259
|
+
|
260
|
+
url = f"{self.base_url}/{endpoint}"
|
261
|
+
headers = {"Content-Type": "application/json"}
|
262
|
+
|
263
|
+
# Add Authorization header if we have an access token
|
264
|
+
if hasattr(self, "access_token") and self.access_token:
|
265
|
+
headers["Authorization"] = f"Bearer {self.access_token}"
|
266
|
+
|
267
|
+
# logger.debug(f"Making {method} request to {url}")
|
268
|
+
# if data:
|
269
|
+
# logger.debug(f"Request data: {json.dumps(data, indent=2)}")
|
270
|
+
|
271
|
+
# Diagnose potential issues with the request
|
272
|
+
if method == "POST" and data:
|
273
|
+
diagnosis = self.diagnose_request(endpoint, data)
|
274
|
+
if not diagnosis["is_valid"]:
|
275
|
+
issues_str = "\n".join([f"- {issue}" for issue in diagnosis["issues"]])
|
276
|
+
logger.warning(f"Request validation issues:\n{issues_str}")
|
277
|
+
# We'll still send the request, but log the issues
|
278
|
+
|
279
|
+
try:
|
280
|
+
response = self.session.request(
|
281
|
+
method,
|
282
|
+
url,
|
283
|
+
headers=headers,
|
284
|
+
json=data,
|
285
|
+
timeout=self.timeout,
|
286
|
+
stream=stream,
|
287
|
+
)
|
288
|
+
|
289
|
+
if stream:
|
290
|
+
return response
|
291
|
+
|
292
|
+
# Check if we need to reauthenticate (401 Unauthorized)
|
293
|
+
if response.status_code == 401:
|
294
|
+
logger.debug("Received 401, attempting to reauthenticate")
|
295
|
+
self._authenticate()
|
296
|
+
|
297
|
+
# Update Authorization header with new token if available
|
298
|
+
if hasattr(self, "access_token") and self.access_token:
|
299
|
+
headers["Authorization"] = f"Bearer {self.access_token}"
|
300
|
+
|
301
|
+
# Retry the request after reauthentication
|
302
|
+
response = self.session.request(
|
303
|
+
method,
|
304
|
+
url,
|
305
|
+
headers=headers,
|
306
|
+
json=data,
|
307
|
+
timeout=self.timeout,
|
308
|
+
stream=stream,
|
309
|
+
)
|
310
|
+
|
311
|
+
if stream:
|
312
|
+
return response
|
313
|
+
|
314
|
+
response.raise_for_status()
|
315
|
+
return response.json()
|
316
|
+
except requests.HTTPError as e:
|
317
|
+
error_data = {}
|
318
|
+
try:
|
319
|
+
error_data = e.response.json()
|
320
|
+
logger.error(f"HTTP error response: {json.dumps(error_data, indent=2)}")
|
321
|
+
except (ValueError, AttributeError):
|
322
|
+
error_data = {"detail": str(e)}
|
323
|
+
logger.error(f"HTTP error (no JSON response): {str(e)}")
|
324
|
+
|
325
|
+
status_code = getattr(e.response, "status_code", 500)
|
326
|
+
error_message = error_data.get("detail", str(e))
|
327
|
+
|
328
|
+
if status_code == 401:
|
329
|
+
raise AuthenticationError(f"Authentication failed: {error_message}")
|
330
|
+
elif status_code == 404:
|
331
|
+
if "provider" in error_message.lower():
|
332
|
+
raise ProviderNotFoundError(error_message)
|
333
|
+
elif "model" in error_message.lower():
|
334
|
+
# Check if it's a model not found vs model not available
|
335
|
+
if (
|
336
|
+
"not supported" in error_message.lower()
|
337
|
+
or "disabled" in error_message.lower()
|
338
|
+
or "unavailable" in error_message.lower()
|
339
|
+
):
|
340
|
+
raise ModelNotAvailableError(error_message)
|
341
|
+
else:
|
342
|
+
raise ModelNotFoundError(error_message)
|
343
|
+
else:
|
344
|
+
raise APIError(f"Resource not found: {error_message} (URL: {url})")
|
345
|
+
elif status_code == 429:
|
346
|
+
raise RateLimitError(f"Rate limit exceeded: {error_message}")
|
347
|
+
elif status_code == 400:
|
348
|
+
# Check if it's a validation error or invalid parameters
|
349
|
+
if (
|
350
|
+
"validation" in error_message.lower()
|
351
|
+
or "invalid format" in error_message.lower()
|
352
|
+
):
|
353
|
+
raise ValidationError(f"Request validation failed: {error_message}")
|
354
|
+
else:
|
355
|
+
raise InvalidParametersError(f"Invalid parameters: {error_message}")
|
356
|
+
elif status_code == 402:
|
357
|
+
raise InsufficientCreditsError(f"Insufficient credits: {error_message}")
|
358
|
+
elif status_code == 422:
|
359
|
+
# Unprocessable Entity - typically validation errors
|
360
|
+
raise ValidationError(f"Request validation failed: {error_message}")
|
361
|
+
elif status_code == 503:
|
362
|
+
# Service Unavailable - model might be temporarily unavailable
|
363
|
+
if "model" in error_message.lower():
|
364
|
+
raise ModelNotAvailableError(
|
365
|
+
f"Model temporarily unavailable: {error_message}"
|
366
|
+
)
|
367
|
+
else:
|
368
|
+
raise APIError(f"Service unavailable: {error_message}")
|
369
|
+
elif status_code == 500:
|
370
|
+
# Provide more detailed information for server errors
|
371
|
+
error_detail = error_data.get("detail", "No details provided")
|
372
|
+
# Include the request data in the error message for better debugging
|
373
|
+
request_data_str = json.dumps(data, indent=2) if data else "None"
|
374
|
+
raise RequestError(
|
375
|
+
f"Server error (500): {error_detail}. URL: {url}.\n"
|
376
|
+
f"Request data: {request_data_str}\n"
|
377
|
+
f"This may indicate an issue with the server configuration or a problem with the provider service."
|
378
|
+
)
|
379
|
+
elif status_code >= 400 and status_code < 500:
|
380
|
+
# Client errors
|
381
|
+
raise APIError(f"Client error ({status_code}): {error_message}")
|
382
|
+
else:
|
383
|
+
# Server errors
|
384
|
+
raise RequestError(f"Server error ({status_code}): {error_message}")
|
385
|
+
except requests.RequestException as e:
|
386
|
+
logger.error(f"Request exception: {str(e)}")
|
387
|
+
raise NetworkError(f"Network error: {str(e)}")
|
388
|
+
|
389
|
+
def _format_model_string(self, model: str) -> str:
|
390
|
+
"""
|
391
|
+
Format the model string in a way that the server expects.
|
392
|
+
|
393
|
+
The server might be expecting a different format than "provider/model".
|
394
|
+
This method handles different formatting requirements.
|
395
|
+
|
396
|
+
Args:
|
397
|
+
model: Model string in the format "provider/model"
|
398
|
+
|
399
|
+
Returns:
|
400
|
+
Formatted model string
|
401
|
+
"""
|
402
|
+
if not model or "/" not in model:
|
403
|
+
return model
|
404
|
+
|
405
|
+
# The standard format is "provider/model"
|
406
|
+
# But the server might be expecting something different
|
407
|
+
provider, model_name = model.split("/", 1)
|
408
|
+
|
409
|
+
# For now, return the original format as it seems the server
|
410
|
+
# is having issues with JSON formatted model strings
|
411
|
+
return model
|
412
|
+
|
413
|
+
def _format_image_size_for_provider(
|
414
|
+
self, size: str, provider: str, model: str
|
415
|
+
) -> str:
|
416
|
+
"""
|
417
|
+
Format the image size parameter based on the provider's requirements.
|
418
|
+
|
419
|
+
Google requires aspect ratios like "1:1", "4:3", etc. while OpenAI uses pixel dimensions
|
420
|
+
like "1024x1024", "512x512", etc.
|
421
|
+
|
422
|
+
Args:
|
423
|
+
size: The size parameter (e.g., "1024x1024")
|
424
|
+
provider: The provider name (e.g., "google", "openai")
|
425
|
+
model: The model name
|
426
|
+
|
427
|
+
Returns:
|
428
|
+
Formatted size parameter appropriate for the provider
|
429
|
+
"""
|
430
|
+
if provider.lower() == "google":
|
431
|
+
# Google uses aspect ratios instead of pixel dimensions
|
432
|
+
# Convert common pixel dimensions to aspect ratios
|
433
|
+
size_to_aspect_ratio = {
|
434
|
+
"1024x1024": "1:1",
|
435
|
+
"512x512": "1:1",
|
436
|
+
"256x256": "1:1",
|
437
|
+
"1024x768": "4:3",
|
438
|
+
"768x1024": "3:4",
|
439
|
+
"1024x1536": "2:3",
|
440
|
+
"1536x1024": "3:2",
|
441
|
+
"1792x1024": "16:9",
|
442
|
+
"1024x1792": "9:16",
|
443
|
+
}
|
444
|
+
|
445
|
+
# Check if size is already in aspect ratio format (contains a colon)
|
446
|
+
if ":" in size:
|
447
|
+
return size
|
448
|
+
|
449
|
+
# Convert to aspect ratio if we have a mapping, otherwise use default 1:1
|
450
|
+
return size_to_aspect_ratio.get(size, "1:1")
|
451
|
+
|
452
|
+
# For other providers, return the original size
|
453
|
+
return size
|
454
|
+
|
455
|
+
def chat(
|
456
|
+
self,
|
457
|
+
messages: List[Dict[str, str]],
|
458
|
+
model: str = DEFAULT_MODEL,
|
459
|
+
temperature: float = 0.7,
|
460
|
+
max_tokens: Optional[int] = None,
|
461
|
+
stream: bool = False,
|
462
|
+
**kwargs,
|
463
|
+
) -> Dict[str, Any]:
|
464
|
+
"""
|
465
|
+
Generate a chat completion.
|
466
|
+
|
467
|
+
Args:
|
468
|
+
messages: List of messages in the conversation
|
469
|
+
model: Model to use in the format "provider/model" (e.g., "openai/gpt-4o-mini")
|
470
|
+
temperature: Sampling temperature
|
471
|
+
max_tokens: Maximum number of tokens to generate
|
472
|
+
stream: Whether to stream the response
|
473
|
+
**kwargs: Additional parameters to pass to the API
|
474
|
+
|
475
|
+
Returns:
|
476
|
+
Response data
|
477
|
+
"""
|
478
|
+
# Format the model string
|
479
|
+
formatted_model = self._format_model_string(model)
|
480
|
+
|
481
|
+
# Filter out problematic parameters
|
482
|
+
filtered_kwargs = {}
|
483
|
+
for key, value in kwargs.items():
|
484
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
485
|
+
filtered_kwargs[key] = value
|
486
|
+
|
487
|
+
data = {
|
488
|
+
"messages": messages,
|
489
|
+
"model": formatted_model,
|
490
|
+
"temperature": temperature,
|
491
|
+
"max_tokens": max_tokens,
|
492
|
+
"stream": stream,
|
493
|
+
"additional_params": filtered_kwargs,
|
494
|
+
}
|
495
|
+
|
496
|
+
if stream:
|
497
|
+
response = self._request("POST", CHAT_ENDPOINT, data, stream=True)
|
498
|
+
return self._handle_streaming_response(response)
|
499
|
+
else:
|
500
|
+
return self._request("POST", CHAT_ENDPOINT, data)
|
501
|
+
|
502
|
+
def completion(
|
503
|
+
self,
|
504
|
+
prompt: str,
|
505
|
+
model: str = DEFAULT_MODEL,
|
506
|
+
temperature: float = 0.7,
|
507
|
+
max_tokens: Optional[int] = None,
|
508
|
+
stream: bool = False,
|
509
|
+
**kwargs,
|
510
|
+
) -> Dict[str, Any]:
|
511
|
+
"""
|
512
|
+
Generate a text completion.
|
513
|
+
|
514
|
+
Args:
|
515
|
+
prompt: Text prompt
|
516
|
+
model: Model to use in the format "provider/model" (e.g., "openai/gpt-4o-mini")
|
517
|
+
temperature: Sampling temperature
|
518
|
+
max_tokens: Maximum number of tokens to generate
|
519
|
+
stream: Whether to stream the response
|
520
|
+
**kwargs: Additional parameters to pass to the API
|
521
|
+
|
522
|
+
Returns:
|
523
|
+
Response data
|
524
|
+
"""
|
525
|
+
# Format the model string
|
526
|
+
formatted_model = self._format_model_string(model)
|
527
|
+
|
528
|
+
# Filter out problematic parameters
|
529
|
+
filtered_kwargs = {}
|
530
|
+
for key, value in kwargs.items():
|
531
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
532
|
+
filtered_kwargs[key] = value
|
533
|
+
|
534
|
+
data = {
|
535
|
+
"prompt": prompt,
|
536
|
+
"model": formatted_model,
|
537
|
+
"temperature": temperature,
|
538
|
+
"max_tokens": max_tokens,
|
539
|
+
"stream": stream,
|
540
|
+
"additional_params": filtered_kwargs,
|
541
|
+
}
|
542
|
+
|
543
|
+
if stream:
|
544
|
+
response = self._request("POST", COMPLETION_ENDPOINT, data, stream=True)
|
545
|
+
return self._handle_streaming_response(response)
|
546
|
+
else:
|
547
|
+
return self._request("POST", COMPLETION_ENDPOINT, data)
|
548
|
+
|
549
|
+
def embeddings(
|
550
|
+
self,
|
551
|
+
text: Union[str, List[str]],
|
552
|
+
model: str = DEFAULT_EMBEDDING_MODEL,
|
553
|
+
**kwargs,
|
554
|
+
) -> Dict[str, Any]:
|
555
|
+
"""
|
556
|
+
Generate embeddings for text.
|
557
|
+
|
558
|
+
Args:
|
559
|
+
text: Text to embed (string or list of strings)
|
560
|
+
model: Model to use in the format "provider/model" (e.g., "openai/text-embedding-ada-002")
|
561
|
+
**kwargs: Additional parameters to pass to the API
|
562
|
+
|
563
|
+
Returns:
|
564
|
+
Response data with embeddings
|
565
|
+
"""
|
566
|
+
# Format the model string
|
567
|
+
formatted_model = self._format_model_string(model)
|
568
|
+
|
569
|
+
# Filter out problematic parameters
|
570
|
+
filtered_kwargs = {}
|
571
|
+
for key, value in kwargs.items():
|
572
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
573
|
+
filtered_kwargs[key] = value
|
574
|
+
|
575
|
+
data = {
|
576
|
+
"text": text if isinstance(text, list) else [text],
|
577
|
+
"model": formatted_model,
|
578
|
+
"additional_params": filtered_kwargs,
|
579
|
+
}
|
580
|
+
|
581
|
+
return self._request("POST", EMBEDDING_ENDPOINT, data)
|
582
|
+
|
583
|
+
def images(
|
584
|
+
self,
|
585
|
+
prompt: str,
|
586
|
+
model: str = DEFAULT_IMAGE_MODEL,
|
587
|
+
size: Optional[str] = None,
|
588
|
+
n: Optional[int] = None,
|
589
|
+
quality: Optional[str] = None,
|
590
|
+
style: Optional[str] = None,
|
591
|
+
# Standard parameters
|
592
|
+
response_format: Optional[str] = None,
|
593
|
+
user: Optional[str] = None,
|
594
|
+
# OpenAI-specific parameters
|
595
|
+
background: Optional[str] = None,
|
596
|
+
moderation: Optional[str] = None,
|
597
|
+
output_compression: Optional[int] = None,
|
598
|
+
output_format: Optional[str] = None,
|
599
|
+
# Google-specific parameters
|
600
|
+
negative_prompt: Optional[str] = None,
|
601
|
+
guidance_scale: Optional[float] = None,
|
602
|
+
seed: Optional[int] = None,
|
603
|
+
safety_filter_level: Optional[str] = None,
|
604
|
+
person_generation: Optional[str] = None,
|
605
|
+
include_safety_attributes: Optional[bool] = None,
|
606
|
+
include_rai_reason: Optional[bool] = None,
|
607
|
+
language: Optional[str] = None,
|
608
|
+
output_mime_type: Optional[str] = None,
|
609
|
+
add_watermark: Optional[bool] = None,
|
610
|
+
enhance_prompt: Optional[bool] = None,
|
611
|
+
# Google-specific direct parameters
|
612
|
+
aspect_ratio: Optional[str] = None,
|
613
|
+
**kwargs,
|
614
|
+
) -> Dict[str, Any]:
|
615
|
+
"""
|
616
|
+
Generate images from a prompt.
|
617
|
+
|
618
|
+
Args:
|
619
|
+
prompt: Text prompt
|
620
|
+
model: Model to use in the format "provider/model" (e.g., "openai/dall-e-3", "google/imagen-3.0-generate-002")
|
621
|
+
|
622
|
+
# Provider-specific parameters - will only be included if explicitly provided
|
623
|
+
# Note: Different providers support different parameters
|
624
|
+
size: Image size - For OpenAI: "1024x1024", "512x512", etc. For Google: use aspect_ratio instead
|
625
|
+
n: Number of images to generate
|
626
|
+
quality: Image quality (e.g., "standard", "hd") - supported by some providers
|
627
|
+
style: Image style (e.g., "vivid", "natural") - supported by some providers
|
628
|
+
|
629
|
+
# Standard parameters
|
630
|
+
response_format: Format of the response - "url" or "b64_json"
|
631
|
+
user: A unique identifier for the end-user
|
632
|
+
|
633
|
+
# OpenAI-specific parameters
|
634
|
+
background: Background style - "transparent", "opaque", or "auto"
|
635
|
+
moderation: Moderation level - "low" or "auto"
|
636
|
+
output_compression: Compression quality for output images (0-100)
|
637
|
+
output_format: Output format - "png", "jpeg", or "webp"
|
638
|
+
|
639
|
+
# Google-specific parameters
|
640
|
+
negative_prompt: Description of what to discourage in the generated images
|
641
|
+
guidance_scale: Controls how much the model adheres to the prompt
|
642
|
+
seed: Random seed for image generation
|
643
|
+
safety_filter_level: Filter level for safety filtering
|
644
|
+
person_generation: Controls generation of people ("dont_allow", "allow_adult", "allow_all")
|
645
|
+
include_safety_attributes: Whether to report safety scores of generated images
|
646
|
+
include_rai_reason: Whether to include filter reason if the image is filtered
|
647
|
+
language: Language of the text in the prompt
|
648
|
+
output_mime_type: MIME type of the generated image
|
649
|
+
add_watermark: Whether to add a watermark to the generated images
|
650
|
+
enhance_prompt: Whether to use prompt rewriting logic
|
651
|
+
aspect_ratio: Aspect ratio for Google models (e.g., "1:1", "16:9") - preferred over size
|
652
|
+
|
653
|
+
**kwargs: Additional parameters to pass to the API
|
654
|
+
|
655
|
+
Returns:
|
656
|
+
Response data with image URLs
|
657
|
+
"""
|
658
|
+
# Format the model string
|
659
|
+
formatted_model = self._format_model_string(model)
|
660
|
+
|
661
|
+
# Extract provider and model name from model string if present
|
662
|
+
provider = "openai" # Default provider
|
663
|
+
model_name = model
|
664
|
+
if "/" in model:
|
665
|
+
provider, model_name = model.split("/", 1)
|
666
|
+
|
667
|
+
# Filter out problematic parameters
|
668
|
+
filtered_kwargs = {}
|
669
|
+
for key, value in kwargs.items():
|
670
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
671
|
+
filtered_kwargs[key] = value
|
672
|
+
|
673
|
+
# Create the base request data with only the required parameters
|
674
|
+
data = {
|
675
|
+
"prompt": prompt,
|
676
|
+
"model": formatted_model,
|
677
|
+
}
|
678
|
+
|
679
|
+
# Add optional parameters only if they are explicitly provided
|
680
|
+
if n is not None:
|
681
|
+
data["n"] = n
|
682
|
+
|
683
|
+
# Handle size/aspect_ratio parameters based on provider
|
684
|
+
if provider.lower() == "google":
|
685
|
+
# For Google, use aspect_ratio instead of size
|
686
|
+
if aspect_ratio is not None:
|
687
|
+
# Google's imagen-3 has specific supported aspect ratios
|
688
|
+
if model_name == "imagen-3.0-generate-002" and aspect_ratio not in [
|
689
|
+
"1:1",
|
690
|
+
"3:4",
|
691
|
+
"4:3",
|
692
|
+
"9:16",
|
693
|
+
"16:9",
|
694
|
+
]:
|
695
|
+
aspect_ratio = "1:1" # Default to 1:1 if not supported
|
696
|
+
data["aspect_ratio"] = aspect_ratio
|
697
|
+
elif size is not None:
|
698
|
+
# Convert size to aspect_ratio
|
699
|
+
formatted_size = self._format_image_size_for_provider(
|
700
|
+
size, provider, model_name
|
701
|
+
)
|
702
|
+
data["aspect_ratio"] = formatted_size
|
703
|
+
else:
|
704
|
+
# Default aspect_ratio for Google
|
705
|
+
data["aspect_ratio"] = "1:1"
|
706
|
+
elif provider.lower() == "xai":
|
707
|
+
# xAI doesn't support size parameter - do not include it
|
708
|
+
pass
|
709
|
+
elif size is not None and provider.lower() != "xai":
|
710
|
+
# For other providers (like OpenAI), use size as is
|
711
|
+
data["size"] = size
|
712
|
+
|
713
|
+
if quality is not None:
|
714
|
+
data["quality"] = quality
|
715
|
+
if style is not None:
|
716
|
+
data["style"] = style
|
717
|
+
|
718
|
+
# Add standard parameters if provided
|
719
|
+
if response_format is not None:
|
720
|
+
# Only add response_format if explicitly provided by the user
|
721
|
+
data["response_format"] = response_format
|
722
|
+
|
723
|
+
if user is not None:
|
724
|
+
data["user"] = user
|
725
|
+
|
726
|
+
# Add OpenAI-specific parameters if provided
|
727
|
+
if background is not None:
|
728
|
+
data["background"] = background
|
729
|
+
if moderation is not None:
|
730
|
+
data["moderation"] = moderation
|
731
|
+
if output_compression is not None:
|
732
|
+
data["output_compression"] = output_compression
|
733
|
+
if output_format is not None:
|
734
|
+
data["output_format"] = output_format
|
735
|
+
|
736
|
+
# Add Google-specific parameters if provided
|
737
|
+
if negative_prompt is not None:
|
738
|
+
data["negative_prompt"] = negative_prompt
|
739
|
+
if guidance_scale is not None:
|
740
|
+
data["guidance_scale"] = guidance_scale
|
741
|
+
if seed is not None:
|
742
|
+
data["seed"] = seed
|
743
|
+
if safety_filter_level is not None:
|
744
|
+
data["safety_filter_level"] = safety_filter_level
|
745
|
+
if person_generation is not None:
|
746
|
+
data["person_generation"] = person_generation
|
747
|
+
if include_safety_attributes is not None:
|
748
|
+
data["include_safety_attributes"] = include_safety_attributes
|
749
|
+
if include_rai_reason is not None:
|
750
|
+
data["include_rai_reason"] = include_rai_reason
|
751
|
+
if language is not None:
|
752
|
+
data["language"] = language
|
753
|
+
if output_mime_type is not None:
|
754
|
+
data["output_mime_type"] = output_mime_type
|
755
|
+
if add_watermark is not None:
|
756
|
+
data["add_watermark"] = add_watermark
|
757
|
+
if enhance_prompt is not None:
|
758
|
+
data["enhance_prompt"] = enhance_prompt
|
759
|
+
|
760
|
+
# Add any remaining parameters
|
761
|
+
if filtered_kwargs:
|
762
|
+
data["additional_params"] = filtered_kwargs
|
763
|
+
|
764
|
+
# Special case handling for specific models and providers
|
765
|
+
# Only include parameters supported by each model based on their JSON definitions
|
766
|
+
if provider.lower() == "openai" and "gpt-image" in model_name.lower():
|
767
|
+
# For OpenAI's gpt-image models, don't automatically add response_format
|
768
|
+
if "response_format" in data and response_format is None:
|
769
|
+
del data["response_format"]
|
770
|
+
|
771
|
+
if provider.lower() == "xai" and "grok-2-image" in model_name.lower():
|
772
|
+
# For xAI's grok-2-image models, ensure size is not included
|
773
|
+
if "size" in data:
|
774
|
+
del data["size"]
|
775
|
+
|
776
|
+
# Clean up any parameters that shouldn't be sent to specific providers
|
777
|
+
# This ensures we only send parameters that each provider supports
|
778
|
+
supported_params = self._get_supported_parameters_for_model(
|
779
|
+
provider, model_name
|
780
|
+
)
|
781
|
+
if supported_params:
|
782
|
+
for param in list(data.keys()):
|
783
|
+
if param not in ["prompt", "model"] and param not in supported_params:
|
784
|
+
del data[param]
|
785
|
+
|
786
|
+
return self._request("POST", IMAGE_ENDPOINT, data)
|
787
|
+
|
788
|
+
def text_to_speech(
|
789
|
+
self,
|
790
|
+
input: str,
|
791
|
+
model: str = DEFAULT_TTS_MODEL,
|
792
|
+
voice: Optional[str] = None,
|
793
|
+
response_format: Optional[str] = None,
|
794
|
+
speed: Optional[float] = None,
|
795
|
+
instructions: Optional[str] = None,
|
796
|
+
**kwargs,
|
797
|
+
) -> Dict[str, Any]:
|
798
|
+
"""
|
799
|
+
Generate audio from text using text-to-speech models.
|
800
|
+
|
801
|
+
Args:
|
802
|
+
input: The text to generate audio for
|
803
|
+
model: Model to use in the format "provider/model" (e.g., "openai/tts-1")
|
804
|
+
voice: Voice to use for the audio generation (provider-specific)
|
805
|
+
response_format: Format of the audio response (e.g., "mp3", "opus", "aac", "flac")
|
806
|
+
speed: Speed of the generated audio (0.25 to 4.0)
|
807
|
+
instructions: Optional instructions for the TTS generation
|
808
|
+
**kwargs: Additional parameters to pass to the API
|
809
|
+
|
810
|
+
Returns:
|
811
|
+
Response data with audio content
|
812
|
+
|
813
|
+
Examples:
|
814
|
+
Basic usage:
|
815
|
+
response = client.text_to_speech("Hello, world!")
|
816
|
+
|
817
|
+
With specific voice and format:
|
818
|
+
response = client.text_to_speech(
|
819
|
+
"Hello, world!",
|
820
|
+
model="openai/tts-1",
|
821
|
+
voice="alloy",
|
822
|
+
response_format="mp3",
|
823
|
+
speed=1.0
|
824
|
+
)
|
825
|
+
|
826
|
+
For different providers (when available):
|
827
|
+
response = client.text_to_speech(
|
828
|
+
"Hello, world!",
|
829
|
+
model="provider/model-name",
|
830
|
+
voice="provider-specific-voice"
|
831
|
+
)
|
832
|
+
"""
|
833
|
+
# Format the model string
|
834
|
+
formatted_model = self._format_model_string(model)
|
835
|
+
|
836
|
+
# Filter out problematic parameters
|
837
|
+
filtered_kwargs = {}
|
838
|
+
for key, value in kwargs.items():
|
839
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
840
|
+
filtered_kwargs[key] = value
|
841
|
+
|
842
|
+
# Create the base request data with required parameters
|
843
|
+
data = {
|
844
|
+
"input": input,
|
845
|
+
"model": formatted_model,
|
846
|
+
}
|
847
|
+
|
848
|
+
# Add optional parameters only if they are explicitly provided
|
849
|
+
if voice is not None:
|
850
|
+
data["voice"] = voice
|
851
|
+
if response_format is not None:
|
852
|
+
data["response_format"] = response_format
|
853
|
+
if speed is not None:
|
854
|
+
data["speed"] = speed
|
855
|
+
if instructions is not None and instructions.strip():
|
856
|
+
data["instructions"] = instructions
|
857
|
+
|
858
|
+
# Add any additional parameters from kwargs
|
859
|
+
if filtered_kwargs:
|
860
|
+
data["additional_params"] = filtered_kwargs
|
861
|
+
|
862
|
+
return self._request("POST", TTS_ENDPOINT, data)
|
863
|
+
|
864
|
+
def _get_supported_parameters_for_model(
|
865
|
+
self, provider: str, model_name: str
|
866
|
+
) -> List[str]:
|
867
|
+
"""
|
868
|
+
Get the list of supported parameters for a specific model.
|
869
|
+
This helps avoid sending unsupported parameters to providers.
|
870
|
+
|
871
|
+
Args:
|
872
|
+
provider: The provider name (e.g., 'openai', 'google', 'xai')
|
873
|
+
model_name: The model name (e.g., 'gpt-image-1', 'imagen-3.0-generate-002')
|
874
|
+
|
875
|
+
Returns:
|
876
|
+
List of parameter names supported by the model
|
877
|
+
"""
|
878
|
+
# Define supported parameters for specific models
|
879
|
+
if provider.lower() == "openai" and "gpt-image" in model_name.lower():
|
880
|
+
return [
|
881
|
+
"prompt",
|
882
|
+
"size",
|
883
|
+
"quality",
|
884
|
+
"n",
|
885
|
+
"user",
|
886
|
+
"background",
|
887
|
+
"moderation",
|
888
|
+
"output_compression",
|
889
|
+
"output_format",
|
890
|
+
"style",
|
891
|
+
]
|
892
|
+
|
893
|
+
elif provider.lower() == "google" and "imagen" in model_name.lower():
|
894
|
+
return [
|
895
|
+
"prompt",
|
896
|
+
"n",
|
897
|
+
"negative_prompt",
|
898
|
+
"aspect_ratio",
|
899
|
+
"guidance_scale",
|
900
|
+
"seed",
|
901
|
+
"safety_filter_level",
|
902
|
+
"person_generation",
|
903
|
+
"include_safety_attributes",
|
904
|
+
"include_rai_reason",
|
905
|
+
"language",
|
906
|
+
"output_mime_type",
|
907
|
+
"output_compression_quality",
|
908
|
+
"add_watermark",
|
909
|
+
"enhance_prompt",
|
910
|
+
"response_format",
|
911
|
+
]
|
912
|
+
|
913
|
+
elif provider.lower() == "xai" and "grok-2-image" in model_name.lower():
|
914
|
+
return ["prompt", "n", "response_format"]
|
915
|
+
|
916
|
+
# Default case - allow all parameters
|
917
|
+
return []
|
918
|
+
|
919
|
+
def models(self, provider: Optional[str] = None) -> Dict[str, Any]:
|
920
|
+
"""
|
921
|
+
Get available models.
|
922
|
+
|
923
|
+
Args:
|
924
|
+
provider: Provider to filter by
|
925
|
+
|
926
|
+
Returns:
|
927
|
+
List of available models with pricing information
|
928
|
+
"""
|
929
|
+
endpoint = MODEL_ENDPOINT
|
930
|
+
if provider:
|
931
|
+
endpoint = f"{MODEL_ENDPOINT}/{provider}"
|
932
|
+
|
933
|
+
return self._request("GET", endpoint)
|
934
|
+
|
935
|
+
def get_model_info(self, provider: str, model: str) -> Dict[str, Any]:
|
936
|
+
"""
|
937
|
+
Get information about a specific model.
|
938
|
+
|
939
|
+
Args:
|
940
|
+
provider: Provider ID
|
941
|
+
model: Model ID
|
942
|
+
|
943
|
+
Returns:
|
944
|
+
Model information including pricing
|
945
|
+
"""
|
946
|
+
return self._request("GET", f"{MODEL_ENDPOINT}/{provider}/{model}")
|
947
|
+
|
948
|
+
def get_usage(self) -> Dict[str, Any]:
|
949
|
+
"""
|
950
|
+
Get usage statistics for the current user.
|
951
|
+
|
952
|
+
Returns:
|
953
|
+
Usage statistics
|
954
|
+
"""
|
955
|
+
return self._request("GET", USAGE_ENDPOINT)
|
956
|
+
|
957
|
+
def test_connection(self) -> Dict[str, Any]:
|
958
|
+
"""
|
959
|
+
Test the connection to the server and return server status information.
|
960
|
+
|
961
|
+
This method can be used to diagnose connection issues and verify that
|
962
|
+
the server is accessible and properly configured.
|
963
|
+
|
964
|
+
Returns:
|
965
|
+
Dictionary containing server status information
|
966
|
+
"""
|
967
|
+
try:
|
968
|
+
# Try to access the base URL
|
969
|
+
response = self.session.get(self.base_url, timeout=self.timeout)
|
970
|
+
|
971
|
+
# Try to get server info if available
|
972
|
+
server_info = {}
|
973
|
+
try:
|
974
|
+
if response.headers.get("Content-Type", "").startswith(
|
975
|
+
"application/json"
|
976
|
+
):
|
977
|
+
server_info = response.json()
|
978
|
+
except:
|
979
|
+
pass
|
980
|
+
|
981
|
+
return {
|
982
|
+
"status": "connected",
|
983
|
+
"url": self.base_url,
|
984
|
+
"status_code": response.status_code,
|
985
|
+
"server_info": server_info,
|
986
|
+
"headers": dict(response.headers),
|
987
|
+
}
|
988
|
+
except requests.RequestException as e:
|
989
|
+
return {
|
990
|
+
"status": "error",
|
991
|
+
"url": self.base_url,
|
992
|
+
"error": str(e),
|
993
|
+
"error_type": type(e).__name__,
|
994
|
+
}
|
995
|
+
|
996
|
+
def diagnose_request(self, endpoint: str, data: Dict[str, Any]) -> Dict[str, Any]:
|
997
|
+
"""
|
998
|
+
Diagnose potential issues with a request before sending it to the server.
|
999
|
+
|
1000
|
+
This method checks for common issues like malformed model strings,
|
1001
|
+
invalid message formats, or missing required parameters.
|
1002
|
+
|
1003
|
+
Args:
|
1004
|
+
endpoint: API endpoint
|
1005
|
+
data: Request data
|
1006
|
+
|
1007
|
+
Returns:
|
1008
|
+
Dictionary with diagnosis results
|
1009
|
+
"""
|
1010
|
+
issues = []
|
1011
|
+
warnings = []
|
1012
|
+
|
1013
|
+
# Check if this is a chat request
|
1014
|
+
if endpoint == CHAT_ENDPOINT:
|
1015
|
+
# Check model format
|
1016
|
+
if "model" in data:
|
1017
|
+
model = data["model"]
|
1018
|
+
# Check if the model is already formatted as JSON
|
1019
|
+
if (
|
1020
|
+
isinstance(model, str)
|
1021
|
+
and model.startswith("{")
|
1022
|
+
and model.endswith("}")
|
1023
|
+
):
|
1024
|
+
try:
|
1025
|
+
model_json = json.loads(model)
|
1026
|
+
if (
|
1027
|
+
not isinstance(model_json, dict)
|
1028
|
+
or "provider" not in model_json
|
1029
|
+
or "model" not in model_json
|
1030
|
+
):
|
1031
|
+
issues.append(f"Invalid model JSON format: {model}")
|
1032
|
+
except json.JSONDecodeError:
|
1033
|
+
issues.append(f"Invalid model JSON format: {model}")
|
1034
|
+
elif not isinstance(model, str):
|
1035
|
+
issues.append(f"Model must be a string, got {type(model).__name__}")
|
1036
|
+
elif "/" not in model:
|
1037
|
+
issues.append(
|
1038
|
+
f"Model '{model}' is missing provider prefix (should be 'provider/model')"
|
1039
|
+
)
|
1040
|
+
else:
|
1041
|
+
provider, model_name = model.split("/", 1)
|
1042
|
+
if not provider or not model_name:
|
1043
|
+
issues.append(
|
1044
|
+
f"Invalid model format: '{model}'. Should be 'provider/model'"
|
1045
|
+
)
|
1046
|
+
else:
|
1047
|
+
warnings.append("No model specified, will use default model")
|
1048
|
+
|
1049
|
+
# Check messages format
|
1050
|
+
if "messages" in data:
|
1051
|
+
messages = data["messages"]
|
1052
|
+
if not isinstance(messages, list):
|
1053
|
+
issues.append(
|
1054
|
+
f"Messages must be a list, got {type(messages).__name__}"
|
1055
|
+
)
|
1056
|
+
elif not messages:
|
1057
|
+
issues.append("Messages list is empty")
|
1058
|
+
else:
|
1059
|
+
for i, msg in enumerate(messages):
|
1060
|
+
if not isinstance(msg, dict):
|
1061
|
+
issues.append(
|
1062
|
+
f"Message {i} must be a dictionary, got {type(msg).__name__}"
|
1063
|
+
)
|
1064
|
+
elif "role" not in msg:
|
1065
|
+
issues.append(f"Message {i} is missing 'role' field")
|
1066
|
+
elif "content" not in msg:
|
1067
|
+
issues.append(f"Message {i} is missing 'content' field")
|
1068
|
+
else:
|
1069
|
+
issues.append("No messages specified")
|
1070
|
+
|
1071
|
+
# Check if this is a completion request
|
1072
|
+
elif endpoint == COMPLETION_ENDPOINT:
|
1073
|
+
# Check model format (same as chat)
|
1074
|
+
if "model" in data:
|
1075
|
+
model = data["model"]
|
1076
|
+
if not isinstance(model, str):
|
1077
|
+
issues.append(f"Model must be a string, got {type(model).__name__}")
|
1078
|
+
elif "/" not in model:
|
1079
|
+
issues.append(
|
1080
|
+
f"Model '{model}' is missing provider prefix (should be 'provider/model')"
|
1081
|
+
)
|
1082
|
+
else:
|
1083
|
+
warnings.append("No model specified, will use default model")
|
1084
|
+
|
1085
|
+
# Check prompt
|
1086
|
+
if "prompt" not in data:
|
1087
|
+
issues.append("No prompt specified")
|
1088
|
+
elif not isinstance(data["prompt"], str):
|
1089
|
+
issues.append(
|
1090
|
+
f"Prompt must be a string, got {type(data['prompt']).__name__}"
|
1091
|
+
)
|
1092
|
+
|
1093
|
+
# Return diagnosis results
|
1094
|
+
return {
|
1095
|
+
"endpoint": endpoint,
|
1096
|
+
"issues": issues,
|
1097
|
+
"warnings": warnings,
|
1098
|
+
"is_valid": len(issues) == 0,
|
1099
|
+
"data": data,
|
1100
|
+
}
|
1101
|
+
|
1102
|
+
def _handle_streaming_response(self, response):
|
1103
|
+
"""
|
1104
|
+
Handle a streaming response.
|
1105
|
+
|
1106
|
+
Args:
|
1107
|
+
response: Streaming response
|
1108
|
+
|
1109
|
+
Returns:
|
1110
|
+
Generator yielding response chunks
|
1111
|
+
"""
|
1112
|
+
try:
|
1113
|
+
for line in response.iter_lines():
|
1114
|
+
if line:
|
1115
|
+
line = line.decode("utf-8")
|
1116
|
+
if line.startswith("data: "):
|
1117
|
+
data = line[6:]
|
1118
|
+
if data == "[DONE]":
|
1119
|
+
break
|
1120
|
+
try:
|
1121
|
+
# Parse JSON chunk
|
1122
|
+
chunk = json.loads(data)
|
1123
|
+
|
1124
|
+
# For chat responses, return the processed chunk
|
1125
|
+
# with data field for backward compatibility
|
1126
|
+
if "choices" in chunk:
|
1127
|
+
# For delta responses (streaming)
|
1128
|
+
choice = chunk["choices"][0]
|
1129
|
+
if "delta" in choice and "content" in choice["delta"]:
|
1130
|
+
# Add a data field for backward compatibility
|
1131
|
+
chunk["data"] = choice["delta"]["content"]
|
1132
|
+
# For text responses (completion)
|
1133
|
+
elif "text" in choice:
|
1134
|
+
chunk["data"] = choice["text"]
|
1135
|
+
|
1136
|
+
yield chunk
|
1137
|
+
except json.JSONDecodeError:
|
1138
|
+
# For raw text responses
|
1139
|
+
yield {"data": data}
|
1140
|
+
finally:
|
1141
|
+
response.close()
|
1142
|
+
|
1143
|
+
def close(self):
|
1144
|
+
"""Close the session."""
|
1145
|
+
self.session.close()
|
1146
|
+
|
1147
|
+
def __enter__(self):
|
1148
|
+
"""Enter context manager."""
|
1149
|
+
return self
|
1150
|
+
|
1151
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
1152
|
+
"""Exit context manager."""
|
1153
|
+
self.close()
|
1154
|
+
|
1155
|
+
def set_base_url(self, base_url: str) -> None:
|
1156
|
+
"""
|
1157
|
+
Set a new base URL for the API.
|
1158
|
+
|
1159
|
+
Args:
|
1160
|
+
base_url: New base URL for the API.
|
1161
|
+
"""
|
1162
|
+
self.base_url = base_url
|
1163
|
+
logger.debug(f"Base URL set to {base_url}")
|
1164
|
+
|
1165
|
+
|
1166
|
+
IndoxRouter = Client
|