webscout 7.7__py3-none-any.whl → 7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (134) hide show
  1. webscout/AIutel.py +2 -1
  2. webscout/Bard.py +12 -29
  3. webscout/DWEBS.py +477 -461
  4. webscout/Extra/__init__.py +2 -0
  5. webscout/Extra/autocoder/__init__.py +9 -9
  6. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -790
  7. webscout/Extra/autocoder/autocoder_utiles.py +332 -194
  8. webscout/Extra/gguf.py +682 -682
  9. webscout/Extra/tempmail/__init__.py +26 -0
  10. webscout/Extra/tempmail/async_utils.py +141 -0
  11. webscout/Extra/tempmail/base.py +156 -0
  12. webscout/Extra/tempmail/cli.py +187 -0
  13. webscout/Extra/tempmail/mail_tm.py +361 -0
  14. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  15. webscout/Provider/AI21.py +1 -1
  16. webscout/Provider/AISEARCH/DeepFind.py +2 -2
  17. webscout/Provider/AISEARCH/ISou.py +2 -2
  18. webscout/Provider/AISEARCH/felo_search.py +6 -6
  19. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  20. webscout/Provider/Aitopia.py +292 -0
  21. webscout/Provider/AllenAI.py +1 -1
  22. webscout/Provider/Andi.py +3 -3
  23. webscout/Provider/C4ai.py +1 -1
  24. webscout/Provider/ChatGPTES.py +3 -5
  25. webscout/Provider/ChatGPTGratis.py +4 -4
  26. webscout/Provider/Chatify.py +2 -2
  27. webscout/Provider/Cloudflare.py +3 -2
  28. webscout/Provider/DeepSeek.py +2 -2
  29. webscout/Provider/Deepinfra.py +288 -286
  30. webscout/Provider/ElectronHub.py +709 -634
  31. webscout/Provider/ExaChat.py +325 -0
  32. webscout/Provider/Free2GPT.py +2 -2
  33. webscout/Provider/Gemini.py +167 -179
  34. webscout/Provider/GithubChat.py +1 -1
  35. webscout/Provider/Glider.py +4 -4
  36. webscout/Provider/Groq.py +41 -27
  37. webscout/Provider/HF_space/qwen_qwen2.py +1 -1
  38. webscout/Provider/HeckAI.py +1 -1
  39. webscout/Provider/HuggingFaceChat.py +1 -1
  40. webscout/Provider/Hunyuan.py +1 -1
  41. webscout/Provider/Jadve.py +3 -3
  42. webscout/Provider/Koboldai.py +3 -3
  43. webscout/Provider/LambdaChat.py +3 -2
  44. webscout/Provider/Llama.py +3 -5
  45. webscout/Provider/Llama3.py +4 -12
  46. webscout/Provider/Marcus.py +3 -3
  47. webscout/Provider/OLLAMA.py +8 -8
  48. webscout/Provider/Openai.py +7 -3
  49. webscout/Provider/PI.py +1 -1
  50. webscout/Provider/Perplexitylabs.py +1 -1
  51. webscout/Provider/Phind.py +1 -1
  52. webscout/Provider/PizzaGPT.py +1 -1
  53. webscout/Provider/QwenLM.py +4 -7
  54. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +3 -1
  55. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +3 -3
  56. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  57. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  58. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  59. webscout/Provider/TTI/__init__.py +3 -1
  60. webscout/Provider/TTI/artbit/async_artbit.py +1 -1
  61. webscout/Provider/TTI/artbit/sync_artbit.py +1 -1
  62. webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
  63. webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
  64. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  65. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  66. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  67. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  68. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  69. webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
  70. webscout/Provider/TTS/utils.py +1 -1
  71. webscout/Provider/TeachAnything.py +1 -1
  72. webscout/Provider/TextPollinationsAI.py +232 -230
  73. webscout/Provider/TwoAI.py +1 -2
  74. webscout/Provider/Venice.py +4 -2
  75. webscout/Provider/VercelAI.py +234 -0
  76. webscout/Provider/WebSim.py +3 -2
  77. webscout/Provider/WiseCat.py +10 -12
  78. webscout/Provider/Youchat.py +1 -1
  79. webscout/Provider/__init__.py +10 -4
  80. webscout/Provider/ai4chat.py +1 -1
  81. webscout/Provider/aimathgpt.py +2 -6
  82. webscout/Provider/akashgpt.py +1 -1
  83. webscout/Provider/askmyai.py +4 -4
  84. webscout/Provider/{DARKAI.py → asksteve.py} +56 -77
  85. webscout/Provider/bagoodex.py +2 -2
  86. webscout/Provider/cerebras.py +1 -1
  87. webscout/Provider/chatglm.py +4 -4
  88. webscout/Provider/cleeai.py +1 -0
  89. webscout/Provider/copilot.py +21 -9
  90. webscout/Provider/elmo.py +1 -1
  91. webscout/Provider/flowith.py +1 -1
  92. webscout/Provider/freeaichat.py +64 -31
  93. webscout/Provider/gaurish.py +3 -5
  94. webscout/Provider/geminiprorealtime.py +1 -1
  95. webscout/Provider/granite.py +4 -4
  96. webscout/Provider/hermes.py +5 -5
  97. webscout/Provider/julius.py +1 -1
  98. webscout/Provider/koala.py +1 -1
  99. webscout/Provider/lepton.py +1 -1
  100. webscout/Provider/llama3mitril.py +4 -4
  101. webscout/Provider/llamatutor.py +1 -1
  102. webscout/Provider/llmchat.py +3 -3
  103. webscout/Provider/meta.py +1 -1
  104. webscout/Provider/multichat.py +10 -10
  105. webscout/Provider/promptrefine.py +1 -1
  106. webscout/Provider/searchchat.py +293 -0
  107. webscout/Provider/sonus.py +2 -2
  108. webscout/Provider/talkai.py +2 -2
  109. webscout/Provider/turboseek.py +1 -1
  110. webscout/Provider/tutorai.py +1 -1
  111. webscout/Provider/typegpt.py +5 -42
  112. webscout/Provider/uncovr.py +312 -297
  113. webscout/Provider/x0gpt.py +1 -1
  114. webscout/Provider/yep.py +64 -12
  115. webscout/__init__.py +3 -1
  116. webscout/cli.py +59 -98
  117. webscout/conversation.py +350 -17
  118. webscout/litprinter/__init__.py +59 -667
  119. webscout/optimizers.py +419 -419
  120. webscout/tempid.py +11 -11
  121. webscout/update_checker.py +14 -12
  122. webscout/utils.py +2 -2
  123. webscout/version.py +1 -1
  124. webscout/webscout_search.py +146 -87
  125. webscout/webscout_search_async.py +148 -27
  126. {webscout-7.7.dist-info → webscout-7.9.dist-info}/METADATA +92 -66
  127. webscout-7.9.dist-info/RECORD +248 -0
  128. webscout/Provider/EDITEE.py +0 -192
  129. webscout/litprinter/colors.py +0 -54
  130. webscout-7.7.dist-info/RECORD +0 -234
  131. {webscout-7.7.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
  132. {webscout-7.7.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
  133. {webscout-7.7.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
  134. {webscout-7.7.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
@@ -1,634 +1,709 @@
1
- import requests
2
- import json
3
- import os
4
- from typing import Any, Dict, Optional, Generator, Union
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider, AsyncProvider
10
- from webscout import exceptions
11
- from webscout import LitAgent
12
-
13
- class ElectronHub(Provider):
14
- """
15
- A class to interact with the ElectronHub API with LitAgent user-agent.
16
- """
17
-
18
- AVAILABLE_MODELS = [
19
- # OpenAI GPT models
20
- "gpt-3.5-turbo",
21
- "gpt-3.5-turbo-16k",
22
- "gpt-3.5-turbo-1106",
23
- "gpt-3.5-turbo-0125",
24
- "gpt-4",
25
- "gpt-4-turbo",
26
- "gpt-4-turbo-preview",
27
- "gpt-4-0125-preview",
28
- "gpt-4-1106-preview",
29
- "gpt-4o",
30
- "gpt-4o-2024-05-13",
31
- "gpt-4o-2024-08-06",
32
- "gpt-4o-2024-11-20",
33
- "gpt-4o-mini",
34
- "gpt-4o-mini-2024-07-18",
35
- "chatgpt-4o-latest",
36
- "gpt-4.5-preview",
37
- "gpt-4.5-preview-2025-02-27",
38
- "o1-mini",
39
- "o1-preview",
40
- "o1",
41
- "o1-low",
42
- "o1-high",
43
- "o3-mini",
44
- "o3-mini-low",
45
- "o3-mini-high",
46
- "o3-mini-online",
47
-
48
- # Anthropic Claude models
49
- "claude-2",
50
- "claude-2.1",
51
- "claude-3-opus-20240229",
52
- "claude-3-sonnet-20240229",
53
- "claude-3-haiku-20240307",
54
- "claude-3-5-sonnet-20240620",
55
- "claude-3-5-sonnet-20241022",
56
- "claude-3-5-haiku-20241022",
57
- "claude-3-7-sonnet-20250219",
58
- "claude-3-7-sonnet-20250219-thinking",
59
-
60
- # Google Gemini models
61
- "gemini-1.0-pro",
62
- "gemini-1.5-pro",
63
- "gemini-1.5-pro-latest",
64
- "gemini-1.5-flash-8b",
65
- "gemini-1.5-flash",
66
- "gemini-1.5-flash-latest",
67
- "gemini-1.5-flash-exp",
68
- "gemini-1.5-flash-online",
69
- "gemini-exp-1206",
70
- "learnlm-1.5-pro-experimental",
71
- "gemini-2.0-flash-001",
72
- "gemini-2.0-flash-exp",
73
- "gemini-2.0-flash-thinking-exp",
74
- "gemini-2.0-flash-thinking-exp-1219",
75
- "gemini-2.0-flash-thinking-exp-01-21",
76
- "gemini-2.0-flash-lite-preview-02-05",
77
- "gemini-2.0-flash-lite-001",
78
- "gemini-2.0-pro-exp-02-05",
79
-
80
- # Google PaLM models
81
- "palm-2-chat-bison",
82
- "palm-2-codechat-bison",
83
- "palm-2-chat-bison-32k",
84
- "palm-2-codechat-bison-32k",
85
-
86
- # Meta Llama models
87
- "llama-2-13b-chat",
88
- "llama-2-70b-chat",
89
- "llama-guard-3-8b",
90
- "code-llama-34b-instruct",
91
- "llama-3-8b",
92
- "llama-3-70b",
93
- "llama-3.1-8b",
94
- "llama-3.1-70b",
95
- "llama-3.1-405b",
96
- "llama-3.2-1b",
97
- "llama-3.2-3b",
98
- "llama-3.2-11b",
99
- "llama-3.2-90b",
100
- "llama-3.3-70b-instruct",
101
- "llama-3.1-nemotron-70b-instruct",
102
- "llama-3.1-tulu-3-8b",
103
- "llama-3.1-tulu-3-70b",
104
- "llama-3.1-tulu-3-405b",
105
-
106
- # Mistral models
107
- "mistral-7b-instruct",
108
- "mistral-tiny-latest",
109
- "mistral-tiny",
110
- "mistral-tiny-2312",
111
- "mistral-tiny-2407",
112
- "mistral-small-24b-instruct-2501",
113
- "mistral-small-latest",
114
- "mistral-small",
115
- "mistral-small-2312",
116
- "mistral-small-2402",
117
- "mistral-small-2409",
118
- "mistral-medium-latest",
119
- "mistral-medium",
120
- "mistral-medium-2312",
121
- "mistral-large-latest",
122
- "mistral-large-2411",
123
- "mistral-large-2407",
124
- "mistral-large-2402",
125
-
126
- # Mixtral models
127
- "mixtral-8x7b",
128
- "mixtral-8x22b",
129
-
130
- # DeepSeek models
131
- "deepseek-r1",
132
- "deepseek-r1-nitro",
133
- "deepseek-r1-distill-llama-8b",
134
- "deepseek-r1-distill-llama-70b",
135
- "deepseek-r1-distill-qwen-1.5b",
136
- "deepseek-r1-distill-qwen-7b",
137
- "deepseek-r1-distill-qwen-14b",
138
- "deepseek-r1-distill-qwen-32b",
139
- "deepseek-v3",
140
- "deepseek-coder",
141
- "deepseek-v2.5",
142
- "deepseek-vl2",
143
- "deepseek-llm-67b-chat",
144
- "deepseek-math-7b-instruct",
145
- "deepseek-coder-6.7b-base-awq",
146
- "deepseek-coder-6.7b-instruct-awq",
147
-
148
- # Qwen models
149
- "qwen-1.5-0.5b-chat",
150
- "qwen-1.5-1.8b-chat",
151
- "qwen-1.5-14b-chat-awq",
152
- "qwen-1.5-7b-chat-awq",
153
- "qwen-2-7b-instruct",
154
- "qwen-2-72b-instruct",
155
- "qwen-2-vl-7b-instruct",
156
- "qwen-2-vl-72b-instruct",
157
- "qwen-2.5-7b-instruct",
158
- "qwen-2.5-32b-instruct",
159
- "qwen-2.5-72b-instruct",
160
- "qwen-2.5-coder-32b-instruct",
161
- "qwq-32b-preview",
162
- "qvq-72b-preview",
163
- "qwen-vl-plus",
164
- "qwen2.5-vl-72b-instruct",
165
- "qwen-turbo",
166
- "qwen-plus",
167
- "qwen-max",
168
-
169
- # Microsoft models
170
- "phi-4",
171
- "phi-3.5-mini-128k-instruct",
172
- "phi-3-medium-128k-instruct",
173
- "phi-3-mini-128k-instruct",
174
- "phi-2",
175
-
176
- # Gemma models
177
- "gemma-7b-it",
178
- "gemma-2-9b-it",
179
- "gemma-2-27b-it",
180
-
181
- # Various other models
182
- "nemotron-4-340b",
183
- "pixtral-large-2411",
184
- "pixtral-12b",
185
- "open-mistral-nemo",
186
- "open-mistral-nemo-2407",
187
- "open-mixtral-8x22b-2404",
188
- "open-mixtral-8x7b",
189
- "codestral-mamba",
190
- "codestral-latest",
191
- "codestral-2405",
192
- "codestral-2412",
193
- "codestral-2501",
194
- "codestral-2411-rc5",
195
- "ministral-3b",
196
- "ministral-3b-2410",
197
- "ministral-8b",
198
- "ministral-8b-2410",
199
- "mistral-saba-latest",
200
- "mistral-saba-2502",
201
- "f1-mini-preview",
202
- "f1-preview",
203
- "dolphin-mixtral-8x7b",
204
- "dolphin-mixtral-8x22b",
205
- "dolphin3.0-mistral-24b",
206
- "dolphin3.0-r1-mistral-24b",
207
- "dbrx-instruct",
208
- "command",
209
- "command-light",
210
- "command-nightly",
211
- "command-light-nightly",
212
- "command-r",
213
- "command-r-03-2024",
214
- "command-r-08-2024",
215
- "command-r-plus",
216
- "command-r-plus-04-2024",
217
- "command-r-plus-08-2024",
218
- "command-r7b-12-2024",
219
- "c4ai-aya-expanse-8b",
220
- "c4ai-aya-expanse-32b",
221
- "reka-flash",
222
- "reka-core",
223
- "grok-2",
224
- "grok-2-mini",
225
- "grok-beta",
226
- "grok-vision-beta",
227
- "grok-2-1212",
228
- "grok-2-vision-1212",
229
- "grok-3-early",
230
- "grok-3-preview-02-24",
231
- "r1-1776",
232
- "sonar-deep-research",
233
- "sonar-reasoning-pro",
234
- "sonar-reasoning",
235
- "sonar-pro",
236
- "sonar",
237
- "llama-3.1-sonar-small-128k-online",
238
- "llama-3.1-sonar-large-128k-online",
239
- "llama-3.1-sonar-huge-128k-online",
240
- "llama-3.1-sonar-small-128k-chat",
241
- "llama-3.1-sonar-large-128k-chat",
242
- "wizardlm-2-7b",
243
- "wizardlm-2-8x22b",
244
- "minimax-01",
245
- "jamba-1.5-large",
246
- "jamba-1.5-mini",
247
- "jamba-instruct",
248
- "openchat-3.5-7b",
249
- "openchat-3.6-8b",
250
- "aion-1.0",
251
- "aion-1.0-mini",
252
- "aion-rp-llama-3.1-8b",
253
- "nova-lite-v1",
254
- "nova-micro-v1",
255
- "nova-pro-v1",
256
- "inflection-3-pi",
257
- "inflection-3-productivity",
258
- "mytho-max-l2-13b",
259
- "deephermes-3-llama-3-8b-preview",
260
- "nous-hermes-llama2-13b",
261
- "hermes-3-llama-3.1-8b",
262
- "hermes-3-llama-3.1-405b",
263
- "hermes-2-pro-llama-3-8b",
264
- "nous-hermes-2-mixtral-8x7b-dpo",
265
-
266
- # Chinese models
267
- "doubao-lite-4k",
268
- "doubao-lite-32k",
269
- "doubao-pro-4k",
270
- "doubao-pro-32k",
271
- "ernie-lite-8k",
272
- "ernie-tiny-8k",
273
- "ernie-speed-8k",
274
- "ernie-speed-128k",
275
- "hunyuan-lite",
276
- "hunyuan-standard-2025-02-10",
277
- "hunyuan-large-2025-02-10",
278
- "glm-3-130b",
279
- "glm-4-flash",
280
- "glm-4-long",
281
- "glm-4-airx",
282
- "glm-4-air",
283
- "glm-4-plus",
284
- "glm-4-alltools",
285
- "yi-vl-plus",
286
- "yi-large",
287
- "yi-large-turbo",
288
- "yi-large-rag",
289
- "yi-medium",
290
- "yi-34b-chat-200k",
291
- "spark-desk-v1.5",
292
-
293
- # Other AI models
294
- "step-2-16k-exp-202412",
295
- "granite-3.1-2b-instruct",
296
- "granite-3.1-8b-instruct",
297
- "solar-0-70b-16bit",
298
- "mistral-nemo-inferor-12b",
299
- "unslopnemo-12b",
300
- "rocinante-12b-v1.1",
301
- "rocinante-12b-v1",
302
- "sky-t1-32b-preview",
303
- "lfm-3b",
304
- "lfm-7b",
305
- "lfm-40b",
306
- "rogue-rose-103b-v0.2",
307
- "eva-llama-3.33-70b-v0.0",
308
- "eva-llama-3.33-70b-v0.1",
309
- "eva-qwen2.5-72b",
310
- "eva-qwen2.5-32b-v0.2",
311
- "sorcererlm-8x22b",
312
- "mythalion-13b",
313
- "zephyr-7b-beta",
314
- "zephyr-7b-alpha",
315
- "toppy-m-7b",
316
- "openhermes-2.5-mistral-7b",
317
- "l3-lunaris-8b",
318
- "llama-3.1-lumimaid-8b",
319
- "llama-3.1-lumimaid-70b",
320
- "llama-3-lumimaid-8b",
321
- "llama-3-lumimaid-70b",
322
- "llama3-openbiollm-70b",
323
- "l3.1-70b-hanami-x1",
324
- "magnum-v4-72b",
325
- "magnum-v2-72b",
326
- "magnum-72b",
327
- "mini-magnum-12b-v1.1",
328
- "remm-slerp-l2-13b",
329
- "midnight-rose-70b",
330
- "athene-v2-chat",
331
- "airoboros-l2-70b",
332
- "xwin-lm-70b",
333
- "noromaid-20b",
334
- "violet-twilight-v0.2",
335
- "saiga-nemo-12b",
336
- "l3-8b-stheno-v3.2",
337
- "llama-3.1-8b-lexi-uncensored-v2",
338
- "l3.3-70b-euryale-v2.3",
339
- "l3.3-ms-evayale-70b",
340
- "70b-l3.3-cirrus-x1",
341
- "l31-70b-euryale-v2.2",
342
- "l3-70b-euryale-v2.1",
343
- "fimbulvetr-11b-v2",
344
- "goliath-120b",
345
-
346
- # Image generation models
347
- "weaver",
348
- "sdxl",
349
- "sdxl-turbo",
350
- "sdxl-lightning",
351
- "stable-diffusion-3",
352
- "stable-diffusion-3-2b",
353
- "stable-diffusion-3.5-large",
354
- "stable-diffusion-3.5-turbo",
355
- "playground-v3",
356
- "playground-v2.5",
357
- "animaginexl-3.1",
358
- "realvisxl-4.0",
359
- "imagen",
360
- "imagen-3-fast",
361
- "imagen-3",
362
- "luma-photon",
363
- "luma-photon-flash",
364
- "recraft-20b",
365
- "recraft-v3",
366
- "grok-2-aurora",
367
- "flux-schnell",
368
- "flux-dev",
369
- "flux-pro",
370
- "flux-1.1-pro",
371
- "flux-1.1-pro-ultra",
372
- "flux-1.1-pro-ultra-raw",
373
- "flux-realism",
374
- "flux-half-illustration",
375
- "ideogram-v2-turbo",
376
- "ideogram-v2",
377
- "amazon-titan",
378
- "amazon-titan-v2",
379
- "nova-canvas",
380
- "omni-gen",
381
- "aura-flow",
382
- "cogview-3-flash",
383
- "sana",
384
- "kandinsky-3",
385
- "dall-e-3",
386
- "midjourney-v6.1",
387
- "midjourney-v6",
388
- "midjourney-v5.2",
389
- "midjourney-v5.1",
390
- "midjourney-v5",
391
- "niji-v6",
392
- "niji-v5",
393
-
394
- # Video generation models
395
- "t2v-turbo",
396
- "cogvideox-5b",
397
- "ltx-video",
398
- "mochi-1",
399
- "dream-machine",
400
- "hailuo-ai",
401
- "haiper-video-2.5",
402
- "haiper-video-2",
403
- "hunyuan-video",
404
- "kling-video/v1/standard/text-to-video",
405
- "kling-video/v1/pro/text-to-video",
406
- "kling-video/v1.6/standard/text-to-video",
407
- "kling-video/v1.5/pro/text-to-video",
408
- "kokoro-82m",
409
-
410
- # Audio models
411
- "elevenlabs",
412
- "myshell-tts",
413
- "deepinfra-tts",
414
- "whisper-large-v3",
415
- "distil-large-v3",
416
-
417
- # Embedding and moderation models
418
- "text-embedding-3-large",
419
- "text-embedding-3-small",
420
- "omni-moderation-latest",
421
- "omni-moderation-2024-09-26",
422
- "text-moderation-latest",
423
- "text-moderation-stable",
424
- "text-moderation-007"
425
- ]
426
-
427
- def __init__(
428
- self,
429
- is_conversation: bool = True,
430
- max_tokens: int = 16000,
431
- timeout: int = 30,
432
- intro: str = None,
433
- filepath: str = None,
434
- update_file: bool = True,
435
- proxies: dict = {},
436
- history_offset: int = 10250,
437
- act: str = None,
438
- model: str = "claude-3-7-sonnet-20250219",
439
- system_prompt: str = "You're helpful assistant that can help me with my questions.",
440
- api_key: str = None
441
- ):
442
- """Initializes the ElectronHub API client."""
443
- if model not in self.AVAILABLE_MODELS:
444
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
445
-
446
- self.url = "https://api.electronhub.top/v1/chat/completions"
447
- # Use LitAgent for user-agent
448
- self.headers = {
449
- 'User-Agent': LitAgent().random(),
450
- 'Content-Type': 'application/json',
451
- 'Accept': '*/*',
452
- 'Accept-Language': 'en-US,en;q=0.9',
453
- 'DNT': '1',
454
- 'Origin': 'https://playground.electronhub.top',
455
- 'Referer': 'https://playground.electronhub.top/',
456
- 'Sec-Fetch-Dest': 'empty',
457
- 'Sec-Fetch-Mode': 'cors',
458
- 'Sec-Fetch-Site': 'same-site',
459
- 'Priority': 'u=1, i'
460
- }
461
-
462
- # Add API key if provided
463
- if api_key:
464
- self.headers['Authorization'] = f'Bearer {api_key}'
465
- self.system_prompt = system_prompt
466
- self.session = requests.Session()
467
- self.session.headers.update(self.headers)
468
- self.session.proxies.update(proxies)
469
-
470
- self.is_conversation = is_conversation
471
- self.max_tokens = max_tokens
472
- self.timeout = timeout
473
- self.last_response = {}
474
- self.model = model
475
-
476
- self.__available_optimizers = (
477
- method
478
- for method in dir(Optimizers)
479
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
480
- )
481
- Conversation.intro = (
482
- AwesomePrompts().get_act(
483
- act, raise_not_found=True, default=None, case_insensitive=True
484
- )
485
- if act
486
- else intro or Conversation.intro
487
- )
488
-
489
- self.conversation = Conversation(
490
- is_conversation, self.max_tokens, filepath, update_file
491
- )
492
- self.conversation.history_offset = history_offset
493
-
494
- def ask(
495
- self,
496
- prompt: str,
497
- stream: bool = True,
498
- raw: bool = False,
499
- optimizer: str = None,
500
- conversationally: bool = False,
501
- temperature: float = 0.5,
502
- top_p: float = 1.0,
503
- top_k: int = 5,
504
- ) -> Union[Dict[str, Any], Generator]:
505
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
506
- if optimizer:
507
- if optimizer in self.__available_optimizers:
508
- conversation_prompt = getattr(Optimizers, optimizer)(
509
- conversation_prompt if conversationally else prompt
510
- )
511
- else:
512
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
513
-
514
- # Construct messages for the conversation
515
- messages = [
516
- {"role": "system", "content": self.system_prompt},
517
- {"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
518
- ]
519
-
520
- # Payload construction based on ElectronHub API requirements
521
- payload = {
522
- "model": self.model,
523
- "messages": messages,
524
- "stream": stream,
525
- "stream_options": {"include_usage": True},
526
- "max_tokens": self.max_tokens,
527
- "temperature": temperature,
528
- "top_p": top_p,
529
- "top_k": top_k,
530
- "web_search": False,
531
- "customId": None
532
- }
533
-
534
- def for_stream():
535
- try:
536
- with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
537
- if response.status_code != 200:
538
- raise exceptions.FailedToGenerateResponseError(
539
- f"Request failed with status code {response.status_code}"
540
- )
541
-
542
- streaming_text = ""
543
- for line in response.iter_lines(decode_unicode=True):
544
- if line:
545
- line = line.strip()
546
- if line.startswith("data: "):
547
- json_str = line[6:]
548
- if json_str == "[DONE]":
549
- break
550
- try:
551
- json_data = json.loads(json_str)
552
- if 'choices' in json_data:
553
- choice = json_data['choices'][0]
554
- if 'delta' in choice and 'content' in choice['delta']:
555
- content = choice['delta']['content']
556
- # Fix: Check if content is not None before concatenating
557
- if content is not None:
558
- streaming_text += content
559
- resp = dict(text=content)
560
- yield resp if raw else resp
561
- except json.JSONDecodeError:
562
- continue
563
- except Exception as e:
564
- print(f"Error processing chunk: {e}")
565
- continue
566
-
567
- self.conversation.update_chat_history(prompt, streaming_text)
568
-
569
- except requests.RequestException as e:
570
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
571
-
572
- def for_non_stream():
573
- collected_response = ""
574
- try:
575
- for chunk in for_stream():
576
- if isinstance(chunk, dict) and "text" in chunk:
577
- content = chunk["text"]
578
- if content is not None:
579
- collected_response += content
580
- except Exception as e:
581
- raise exceptions.FailedToGenerateResponseError(f"Error during non-stream processing: {str(e)}")
582
-
583
- self.last_response = {"text": collected_response}
584
- return self.last_response
585
-
586
- return for_stream() if stream else for_non_stream()
587
-
588
- def chat(
589
- self,
590
- prompt: str,
591
- stream: bool = True,
592
- optimizer: str = None,
593
- conversationally: bool = False,
594
- temperature: float = 0.5,
595
- top_p: float = 1.0,
596
- top_k: int = 5,
597
- ) -> str:
598
- def for_stream():
599
- for response in self.ask(
600
- prompt,
601
- True,
602
- optimizer=optimizer,
603
- conversationally=conversationally,
604
- temperature=temperature,
605
- top_p=top_p,
606
- top_k=top_k
607
- ):
608
- yield self.get_message(response)
609
- def for_non_stream():
610
- return self.get_message(
611
- self.ask(
612
- prompt,
613
- False,
614
- optimizer=optimizer,
615
- conversationally=conversationally,
616
- temperature=temperature,
617
- top_p=top_p,
618
- top_k=top_k
619
- )
620
- )
621
- return for_stream() if stream else for_non_stream()
622
-
623
- def get_message(self, response: dict) -> str:
624
- assert isinstance(response, dict), "Response should be of dict data-type only"
625
- return response["text"]
626
-
627
- if __name__ == "__main__":
628
- from rich import print
629
- # You need to provide your own API key
630
- api_key = "" # U can get free API key from https://playground.electronhub.top/console
631
- ai = ElectronHub(timeout=5000, api_key=api_key)
632
- response = ai.chat("hi there, how are you today?", stream=True)
633
- for chunk in response:
634
- print(chunk, end="", flush=True)
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class ElectronHub(Provider):
14
+ """
15
+ A class to interact with the ElectronHub API with LitAgent user-agent.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ # OpenAI GPT models
20
+ "gpt-3.5-turbo",
21
+ "gpt-3.5-turbo-16k",
22
+ "gpt-3.5-turbo-1106",
23
+ "gpt-3.5-turbo-0125",
24
+ "gpt-4",
25
+ "gpt-4-turbo",
26
+ "gpt-4-turbo-preview",
27
+ "gpt-4-0125-preview",
28
+ "gpt-4-1106-preview",
29
+ "gpt-4o",
30
+ "gpt-4o-2024-05-13",
31
+ "gpt-4o-2024-08-06",
32
+ "gpt-4o-2024-11-20",
33
+ "gpt-4o-search-preview",
34
+ "gpt-4o-search-preview-2025-03-11",
35
+ "gpt-4o-mini",
36
+ "gpt-4o-mini-2024-07-18",
37
+ "gpt-4o-mini-search-preview",
38
+ "gpt-4o-mini-search-preview-2025-03-11",
39
+ "chatgpt-4o-latest",
40
+ "gpt-4.5-preview",
41
+ "gpt-4.5-preview-2025-02-27",
42
+ "o1-mini",
43
+ "o1-preview",
44
+ "o1",
45
+ "o1-low",
46
+ "o1-high",
47
+ "o3-mini",
48
+ "o3-mini-low",
49
+ "o3-mini-high",
50
+ "o3-mini-online",
51
+
52
+ # Anthropic Claude models
53
+ "claude-2",
54
+ "claude-2.1",
55
+ "claude-3-haiku-20240307",
56
+ "claude-3-5-haiku-20241022",
57
+ "claude-3-opus-20240229",
58
+ "claude-3-sonnet-20240229",
59
+ "claude-3-5-sonnet-20240620",
60
+ "claude-3-5-sonnet-20241022",
61
+ "claude-3-7-sonnet-20250219",
62
+ "claude-3-7-sonnet-20250219-thinking",
63
+ "claude-3-opus-20240229:safe",
64
+ "claude-3-sonnet-20240229:safe",
65
+ "claude-3-5-sonnet-20240620:safe",
66
+ "claude-3-5-sonnet-20241022:safe",
67
+ "claude-3-7-sonnet-20250219:safe",
68
+ "claude-3-7-sonnet-20250219-thinking:safe",
69
+
70
+ # Google Gemini models
71
+ "gemini-1.0-pro",
72
+ "gemini-1.0-pro-vision",
73
+ "gemini-1.5-pro",
74
+ "gemini-1.5-pro-latest",
75
+ "gemini-1.5-flash-8b",
76
+ "gemini-1.5-flash",
77
+ "gemini-1.5-flash-latest",
78
+ "gemini-1.5-flash-exp",
79
+ "gemini-1.5-flash-online",
80
+ "gemini-exp-1206",
81
+ "learnlm-1.5-pro-experimental",
82
+ "gemini-2.0-flash-001",
83
+ "gemini-2.0-flash-exp",
84
+ "gemini-2.0-flash-thinking-exp",
85
+ "gemini-2.0-flash-thinking-exp-1219",
86
+ "gemini-2.0-flash-thinking-exp-01-21",
87
+ "gemini-2.0-flash-lite-preview-02-05",
88
+ "gemini-2.0-flash-lite-001",
89
+ "gemini-2.0-pro-exp-02-05",
90
+ "gemini-2.5-pro-exp-03-25",
91
+
92
+ # Google PaLM models
93
+ "palm-2-chat-bison",
94
+ "palm-2-codechat-bison",
95
+ "palm-2-chat-bison-32k",
96
+ "palm-2-codechat-bison-32k",
97
+
98
+ # Meta Llama models
99
+ "llama-2-13b-chat-awq",
100
+ "llama-2-7b-chat-fp16",
101
+ "llama-2-7b-chat-int8",
102
+ "llama-2-70b-chat",
103
+ "llama-3-8b-instruct",
104
+ "llama-3-8b-instruct-awq",
105
+ "llama-3-70b",
106
+ "llama-3.1-8b-instruct",
107
+ "llama-3.1-8b-instruct-awq",
108
+ "llama-3.1-8b-instruct-fp8",
109
+ "llama-3.1-70b",
110
+ "llama-3.1-405b",
111
+ "llama-3.2-11b-vision-instruct",
112
+ "llama-3.2-1b-instruct",
113
+ "llama-3.2-3b-instruct",
114
+ "llama-3.2-90b",
115
+ "llama-3.3-70b-instruct-fp8-fast",
116
+ "llama-guard-3-8b",
117
+ "llamaguard-7b-awq",
118
+ "meta-llama-3-8b-instruct",
119
+ "llama-3.1-nemotron-70b-instruct",
120
+ "llama-3.1-tulu-3-70b",
121
+ "llama-3.1-tulu-3-405b",
122
+ "llama-3.1-sonar-small-128k-online",
123
+ "llama-3.1-sonar-large-128k-online",
124
+ "llama-3.1-sonar-huge-128k-online",
125
+ "llama-3.1-sonar-small-128k-chat",
126
+ "llama-3.1-sonar-large-128k-chat",
127
+ "llama-3.1-swallow-70b-instruct-v0.3",
128
+ "llama-3.1-8b-lexi-uncensored-v2",
129
+ "llama-3.1-lumimaid-8b",
130
+ "llama-3.1-lumimaid-70b",
131
+ "llama3-openbiollm-70b",
132
+
133
+ # Mistral models
134
+ "mistral-7b-instruct-v0.1",
135
+ "mistral-7b-instruct-v0.1-awq",
136
+ "mistral-7b-instruct-v0.2",
137
+ "mistral-tiny-latest",
138
+ "mistral-tiny",
139
+ "mistral-tiny-2312",
140
+ "mistral-tiny-2407",
141
+ "mistral-small-3.1-24b-instruct",
142
+ "mistral-small-24b-instruct-2501",
143
+ "mistral-small-latest",
144
+ "mistral-small",
145
+ "mistral-small-2312",
146
+ "mistral-small-2402",
147
+ "mistral-small-2409",
148
+ "mistral-medium-latest",
149
+ "mistral-medium",
150
+ "mistral-medium-2312",
151
+ "mistral-large-latest",
152
+ "mistral-large-2411",
153
+ "mistral-large-2407",
154
+ "mistral-large-2402",
155
+ "open-mistral-nemo",
156
+ "open-mistral-nemo-2407",
157
+ "open-mixtral-8x22b-2404",
158
+ "open-mixtral-8x7b",
159
+
160
+ # Codestral models
161
+ "codestral-mamba",
162
+ "codestral-latest",
163
+ "codestral-2405",
164
+ "codestral-2412",
165
+ "codestral-2501",
166
+
167
+ # Ministral models
168
+ "ministral-3b",
169
+ "ministral-3b-2410",
170
+ "ministral-8b",
171
+ "ministral-8b-2410",
172
+
173
+ # Mistral Saba models
174
+ "mistral-saba-latest",
175
+ "mistral-saba-2502",
176
+
177
+ # Mixtral models
178
+ "mixtral-8x7b",
179
+ "mixtral-8x22b",
180
+
181
+ # DeepSeek models
182
+ "deepseek-coder",
183
+ "deepseek-coder-6.7b-base-awq",
184
+ "deepseek-coder-6.7b-instruct-awq",
185
+ "deepseek-llm-67b-chat",
186
+ "deepseek-math-7b-instruct",
187
+ "deepseek-r1",
188
+ "deepseek-r1-distill-llama-70b",
189
+ "deepseek-r1-distill-llama-8b",
190
+ "deepseek-r1-distill-qwen-1.5b",
191
+ "deepseek-r1-distill-qwen-14b",
192
+ "deepseek-r1-distill-qwen-32b",
193
+ "deepseek-r1-distill-qwen-7b",
194
+ "deepseek-r1-nitro",
195
+ "deepseek-r1-zero",
196
+ "deepseek-v2.5",
197
+ "deepseek-v3",
198
+ "deepseek-v3-0324",
199
+ "deepseek-vl2",
200
+
201
+ # Qwen models
202
+ "qwen-1.5-0.5b-chat",
203
+ "qwen-1.5-1.8b-chat",
204
+ "qwen-1.5-14b-chat-awq",
205
+ "qwen-1.5-7b-chat-awq",
206
+ "qwen-2-7b-instruct",
207
+ "qwen-2-72b-instruct",
208
+ "qwen-2-vl-7b-instruct",
209
+ "qwen-2-vl-72b-instruct",
210
+ "qwen-2.5-7b-instruct",
211
+ "qwen-2.5-32b-instruct",
212
+ "qwen-2.5-72b-instruct",
213
+ "qwen-2.5-coder-32b-instruct",
214
+ "qwq-32b-preview",
215
+ "qwq-32b",
216
+ "qwen-vl-plus",
217
+ "qwen2.5-vl-3b-instruct",
218
+ "qwen2.5-vl-7b-instruct",
219
+ "qwen2.5-vl-72b-instruct",
220
+ "qwen-turbo",
221
+ "qwen-plus",
222
+ "qwen-max",
223
+
224
+ # F1 models
225
+ "f1-mini-preview",
226
+ "f1-preview",
227
+
228
+ # Command models
229
+ "command",
230
+ "command-light",
231
+ "command-nightly",
232
+ "command-light-nightly",
233
+ "command-r",
234
+ "command-r-03-2024",
235
+ "command-r-08-2024",
236
+ "command-r-plus",
237
+ "command-r-plus-04-2024",
238
+ "command-r-plus-08-2024",
239
+ "command-r7b-12-2024",
240
+ "command-a-03-2025",
241
+
242
+ # Dolphin models
243
+ "dolphin-mixtral-8x7b",
244
+ "dolphin-mixtral-8x22b",
245
+ "dolphin3.0-mistral-24b",
246
+ "dolphin3.0-r1-mistral-24b",
247
+
248
+ # Cohere models
249
+ "c4ai-aya-expanse-8b",
250
+ "c4ai-aya-expanse-32b",
251
+
252
+ # Reka models
253
+ "reka-flash",
254
+ "reka-core",
255
+ "reka-flash-3",
256
+
257
+ # OpenChat models
258
+ "openchat-3.5-0106",
259
+ "openchat-3.5-7b",
260
+ "openchat-3.6-8b",
261
+
262
+ # Yi models
263
+ "yi-34b-chat-200k",
264
+ "yi-large",
265
+ "yi-large-rag",
266
+ "yi-large-turbo",
267
+ "yi-medium",
268
+ "yi-vl-plus",
269
+
270
+ # Phi models
271
+ "phi-2",
272
+ "phi-3-mini-128k-instruct",
273
+ "phi-3-medium-128k-instruct",
274
+ "phi-3.5-mini-128k-instruct",
275
+ "phi-4",
276
+ "phi-4-multimodal-instruct",
277
+
278
+ # Claude models by AION-LABS
279
+ "aion-1.0",
280
+ "aion-1.0-mini",
281
+ "aion-rp-llama-3.1-8b",
282
+
283
+ # Other AI models
284
+ "nemotron-4-340b",
285
+ "pixtral-large-2411",
286
+ "pixtral-12b",
287
+ "dbrx-instruct",
288
+ "grok-2",
289
+ "grok-2-mini",
290
+ "grok-beta",
291
+ "grok-vision-beta",
292
+ "grok-2-1212",
293
+ "grok-2-vision-1212",
294
+ "grok-3-early",
295
+ "grok-3-preview-02-24",
296
+ "r1-1776",
297
+ "sonar-deep-research",
298
+ "sonar-reasoning-pro",
299
+ "sonar-reasoning",
300
+ "sonar-pro",
301
+ "sonar",
302
+ "wizardlm-2-7b",
303
+ "wizardlm-2-8x22b",
304
+ "minimax-01",
305
+ "jamba-1.5-large",
306
+ "jamba-1.5-mini",
307
+ "jamba-1.6-large",
308
+ "jamba-1.6-mini",
309
+ "jamba-instruct",
310
+
311
+ # Chinese language models
312
+ "doubao-lite-4k",
313
+ "doubao-lite-32k",
314
+ "doubao-pro-4k",
315
+ "doubao-pro-32k",
316
+ "ui-tars-72b-dpo",
317
+ "ernie-lite-8k",
318
+ "ernie-tiny-8k",
319
+ "ernie-speed-8k",
320
+ "ernie-speed-128k",
321
+ "hunyuan-lite",
322
+ "hunyuan-standard-2025-02-10",
323
+ "hunyuan-large-2025-02-10",
324
+ "glm-3-130b",
325
+ "glm-4-flash",
326
+ "glm-4-long",
327
+ "glm-4-airx",
328
+ "glm-4-air",
329
+ "glm-4-plus",
330
+ "glm-4-alltools",
331
+ "spark-desk-v1.5",
332
+
333
+ # Other language models
334
+ "discolm-german-7b-v1-awq",
335
+ "falcon-7b-instruct",
336
+ "neural-chat-7b-v3-1-awq",
337
+ "openhermes-2.5-mistral-7b",
338
+ "openhermes-2.5-mistral-7b-awq",
339
+ "sqlcoder-7b-2",
340
+ "starling-lm-7b-beta",
341
+ "tinyllama-1.1b-chat-v1.0",
342
+ "una-cybertron-7b-v2-bf16",
343
+ "zephyr-7b-beta",
344
+ "zephyr-7b-beta-awq",
345
+
346
+ # Inference-optimized models
347
+ "mistral-nemo-inferor-12b",
348
+ "rocinante-12b-v1",
349
+ "rocinante-12b-v1.1",
350
+ "unslopnemo-12b",
351
+
352
+ # Additional specialty models
353
+ "granite-3.1-2b-instruct",
354
+ "granite-3.1-8b-instruct",
355
+ "solar-0-70b-16bit",
356
+ "olympiccoder-7b",
357
+ "olympiccoder-32b",
358
+ "anubis-pro-105b-v1",
359
+ "fallen-llama-3.3-r1-70b-v1",
360
+ "skyfall-36b-v2",
361
+ "wayfarer-large-70b-llama-3.3",
362
+ "qwq-32b-snowdrop-v0",
363
+ "qwq-32b-abliterated",
364
+ "sky-t1-32b-preview",
365
+ "tiny-r1-32b-preview",
366
+ "lfm-3b",
367
+ "lfm-7b",
368
+ "lfm-40b",
369
+ "eva-llama-3.33-70b-v0.0",
370
+ "eva-llama-3.33-70b-v0.1",
371
+ "eva-qwen2.5-72b",
372
+ "eva-qwen2.5-32b-v0.2",
373
+ "sorcererlm-8x22b",
374
+ "mythalion-13b",
375
+ "toppy-m-7b",
376
+ "l3-lunaris-8b",
377
+ "l3.1-70b-hanami-x1",
378
+ "lumimaid-magnum-v4-12b",
379
+ "magnum-v4-72b",
380
+ "magnum-v4-12b",
381
+ "magnum-v3-34b",
382
+ "magnum-v2.5-12b-kto",
383
+ "magnum-v2-72b",
384
+ "magnum-v2-32b",
385
+ "magnum-v2-12b",
386
+ "magnum-72b",
387
+ "mini-magnum-12b-v1.1",
388
+ "remm-slerp-l2-13b",
389
+ "patricide-12b-unslop-mell",
390
+ "midnight-rose-70b",
391
+ "airoboros-l2-13b-gpt4-m2.0",
392
+ "airoboros-l2-70b",
393
+ "xwin-lm-70b",
394
+ "noromaid-20b",
395
+ "violet-twilight-v0.2",
396
+ "saiga-nemo-12b",
397
+ "l3-8b-stheno-v3.2",
398
+ "l3.3-electra-r1-70b",
399
+ "l3.3-cu-mai-r1-70b",
400
+ "l3.3-mokume-gane-r1-70b-v1.1",
401
+ "l3.3-70b-euryale-v2.3",
402
+ "l3.3-ms-evayale-70b",
403
+ "70b-l3.3-cirrus-x1",
404
+ "l31-70b-euryale-v2.2",
405
+ "l3-70b-euryale-v2.1",
406
+ "fimbulvetr-11b-v2",
407
+ "goliath-120b",
408
+ "hermes-2-pro-mistral-7b",
409
+ "mytho-max-l2-13b",
410
+ "deephermes-3-llama-3-8b-preview",
411
+ "nous-hermes-llama2-13b",
412
+ "hermes-3-llama-3.1-405b",
413
+ "nous-hermes-2-mixtral-8x7b-dpo",
414
+ "nova-lite-v1",
415
+ "nova-micro-v1",
416
+ "nova-pro-v1",
417
+ "inflection-3-pi",
418
+ "inflection-3-productivity",
419
+
420
+ # Image generation models
421
+ "weaver",
422
+ "sdxl",
423
+ "sdxl-turbo",
424
+ "sdxl-lightning",
425
+ "stable-diffusion-3",
426
+ "stable-diffusion-3-2b",
427
+ "stable-diffusion-3.5-large",
428
+ "stable-diffusion-3.5-turbo",
429
+ "playground-v3",
430
+ "playground-v2.5",
431
+ "animaginexl-3.1",
432
+ "realvisxl-4.0",
433
+ "imagen",
434
+ "imagen-3-fast",
435
+ "imagen-3",
436
+ "luma-photon",
437
+ "luma-photon-flash",
438
+ "recraft-20b",
439
+ "recraft-v3",
440
+ "grok-2-aurora",
441
+ "flux-schnell",
442
+ "flux-dev",
443
+ "flux-pro",
444
+ "flux-1.1-pro",
445
+ "flux-1.1-pro-ultra",
446
+ "flux-1.1-pro-ultra-raw",
447
+ "flux-realism",
448
+ "flux-half-illustration",
449
+ "ideogram-v2-turbo",
450
+ "ideogram-v2",
451
+ "amazon-titan",
452
+ "amazon-titan-v2",
453
+ "nova-canvas",
454
+ "omni-gen",
455
+ "aura-flow",
456
+ "cogview-3-flash",
457
+ "sana",
458
+ "kandinsky-3",
459
+ "dall-e-3",
460
+ "midjourney-v6.1",
461
+ "midjourney-v6",
462
+ "midjourney-v5.2",
463
+ "midjourney-v5.1",
464
+ "midjourney-v5",
465
+ "midjourney-v7",
466
+ "niji-v6",
467
+ "niji-v5",
468
+
469
+ # Video generation models
470
+ "t2v-turbo",
471
+ "cogvideox-5b",
472
+ "ltx-video",
473
+ "mochi-1",
474
+ "dream-machine",
475
+ "hailuo-ai",
476
+ "haiper-video-2.5",
477
+ "haiper-video-2",
478
+ "hunyuan-video",
479
+ "kling-video/v1/standard/text-to-video",
480
+ "kling-video/v1/pro/text-to-video",
481
+ "kling-video/v1.6/standard/text-to-video",
482
+ "kling-video/v1.5/pro/text-to-video",
483
+ "kokoro-82m",
484
+
485
+ # Audio models
486
+ "elevenlabs",
487
+ "myshell-tts",
488
+ "deepinfra-tts",
489
+ "whisper-large-v3",
490
+ "distil-large-v3",
491
+
492
+ # Embedding and moderation models
493
+ "text-embedding-3-large",
494
+ "text-embedding-3-small",
495
+ "omni-moderation-latest",
496
+ "omni-moderation-2024-09-26",
497
+ "text-moderation-latest",
498
+ "text-moderation-stable",
499
+ "text-moderation-007"
500
+ ]
501
+
502
+ def __init__(
503
+ self,
504
+ is_conversation: bool = True,
505
+ max_tokens: int = 16000,
506
+ timeout: int = 30,
507
+ intro: str = None,
508
+ filepath: str = None,
509
+ update_file: bool = True,
510
+ proxies: dict = {},
511
+ history_offset: int = 10250,
512
+ act: str = None,
513
+ model: str = "claude-3-7-sonnet-20250219",
514
+ system_prompt: str = "You're helpful assistant that can help me with my questions.",
515
+ api_key: str = None
516
+ ):
517
+ """Initializes the ElectronHub API client."""
518
+ if model not in self.AVAILABLE_MODELS:
519
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
520
+
521
+ self.url = "https://api.electronhub.top/v1/chat/completions"
522
+ # Use LitAgent for user-agent
523
+ self.headers = {
524
+ 'User-Agent': LitAgent().random(),
525
+ 'Content-Type': 'application/json',
526
+ 'Accept': '*/*',
527
+ 'Accept-Language': 'en-US,en;q=0.9',
528
+ 'DNT': '1',
529
+ 'Origin': 'https://playground.electronhub.top',
530
+ 'Referer': 'https://playground.electronhub.top/',
531
+ 'Sec-Fetch-Dest': 'empty',
532
+ 'Sec-Fetch-Mode': 'cors',
533
+ 'Sec-Fetch-Site': 'same-site',
534
+ 'Priority': 'u=1, i'
535
+ }
536
+
537
+ # Add API key if provided
538
+ if api_key:
539
+ self.headers['Authorization'] = f'Bearer {api_key}'
540
+ self.system_prompt = system_prompt
541
+ self.session = requests.Session()
542
+ self.session.headers.update(self.headers)
543
+ self.session.proxies.update(proxies)
544
+
545
+ self.is_conversation = is_conversation
546
+ self.max_tokens = max_tokens
547
+ self.timeout = timeout
548
+ self.last_response = {}
549
+ self.model = model
550
+
551
+ self.__available_optimizers = (
552
+ method
553
+ for method in dir(Optimizers)
554
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
555
+ )
556
+ Conversation.intro = (
557
+ AwesomePrompts().get_act(
558
+ act, raise_not_found=True, default=None, case_insensitive=True
559
+ )
560
+ if act
561
+ else intro or Conversation.intro
562
+ )
563
+
564
+ self.conversation = Conversation(
565
+ is_conversation, self.max_tokens, filepath, update_file
566
+ )
567
+ self.conversation.history_offset = history_offset
568
+
569
+ def ask(
570
+ self,
571
+ prompt: str,
572
+ stream: bool = True,
573
+ raw: bool = False,
574
+ optimizer: str = None,
575
+ conversationally: bool = False,
576
+ temperature: float = 0.5,
577
+ top_p: float = 1.0,
578
+ top_k: int = 5,
579
+ ) -> Union[Dict[str, Any], Generator]:
580
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
581
+ if optimizer:
582
+ if optimizer in self.__available_optimizers:
583
+ conversation_prompt = getattr(Optimizers, optimizer)(
584
+ conversation_prompt if conversationally else prompt
585
+ )
586
+ else:
587
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
588
+
589
+ # Construct messages for the conversation
590
+ messages = [
591
+ {"role": "system", "content": self.system_prompt},
592
+ {"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
593
+ ]
594
+
595
+ # Payload construction based on ElectronHub API requirements
596
+ payload = {
597
+ "model": self.model,
598
+ "messages": messages,
599
+ "stream": stream,
600
+ "stream_options": {"include_usage": True},
601
+ "max_tokens": self.max_tokens,
602
+ "temperature": temperature,
603
+ "top_p": top_p,
604
+ "top_k": top_k,
605
+ "web_search": False,
606
+ "customId": None
607
+ }
608
+
609
+ def for_stream():
610
+ try:
611
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
612
+ if response.status_code != 200:
613
+ raise exceptions.FailedToGenerateResponseError(
614
+ f"Request failed with status code {response.status_code}"
615
+ )
616
+
617
+ streaming_text = ""
618
+ for line in response.iter_lines(decode_unicode=True):
619
+ if line:
620
+ line = line.strip()
621
+ if line.startswith("data: "):
622
+ json_str = line[6:]
623
+ if json_str == "[DONE]":
624
+ break
625
+ try:
626
+ json_data = json.loads(json_str)
627
+ if 'choices' in json_data:
628
+ choice = json_data['choices'][0]
629
+ if 'delta' in choice and 'content' in choice['delta']:
630
+ content = choice['delta']['content']
631
+ # Fix: Check if content is not None before concatenating
632
+ if content is not None:
633
+ streaming_text += content
634
+ resp = dict(text=content)
635
+ yield resp if raw else resp
636
+ except json.JSONDecodeError:
637
+ continue
638
+ except Exception as e:
639
+ print(f"Error processing chunk: {e}")
640
+ continue
641
+
642
+ self.conversation.update_chat_history(prompt, streaming_text)
643
+
644
+ except requests.RequestException as e:
645
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
646
+
647
+ def for_non_stream():
648
+ collected_response = ""
649
+ try:
650
+ for chunk in for_stream():
651
+ if isinstance(chunk, dict) and "text" in chunk:
652
+ content = chunk["text"]
653
+ if content is not None:
654
+ collected_response += content
655
+ except Exception as e:
656
+ raise exceptions.FailedToGenerateResponseError(f"Error during non-stream processing: {str(e)}")
657
+
658
+ self.last_response = {"text": collected_response}
659
+ return self.last_response
660
+
661
+ return for_stream() if stream else for_non_stream()
662
+
663
+ def chat(
664
+ self,
665
+ prompt: str,
666
+ stream: bool = True,
667
+ optimizer: str = None,
668
+ conversationally: bool = False,
669
+ temperature: float = 0.5,
670
+ top_p: float = 1.0,
671
+ top_k: int = 5,
672
+ ) -> str:
673
+ def for_stream():
674
+ for response in self.ask(
675
+ prompt,
676
+ True,
677
+ optimizer=optimizer,
678
+ conversationally=conversationally,
679
+ temperature=temperature,
680
+ top_p=top_p,
681
+ top_k=top_k
682
+ ):
683
+ yield self.get_message(response)
684
+ def for_non_stream():
685
+ return self.get_message(
686
+ self.ask(
687
+ prompt,
688
+ False,
689
+ optimizer=optimizer,
690
+ conversationally=conversationally,
691
+ temperature=temperature,
692
+ top_p=top_p,
693
+ top_k=top_k
694
+ )
695
+ )
696
+ return for_stream() if stream else for_non_stream()
697
+
698
+ def get_message(self, response: dict) -> str:
699
+ assert isinstance(response, dict), "Response should be of dict data-type only"
700
+ return response["text"]
701
+
702
+ if __name__ == "__main__":
703
+ from rich import print
704
+ # You need to provide your own API key
705
+ api_key = "" # U can get free API key from https://playground.electronhub.top/console
706
+ ai = ElectronHub(timeout=5000, api_key=api_key)
707
+ response = ai.chat("hi there, how are you today?", stream=True)
708
+ for chunk in response:
709
+ print(chunk, end="", flush=True)