webscout 7.8__py3-none-any.whl → 7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (41) hide show
  1. webscout/Bard.py +5 -25
  2. webscout/DWEBS.py +476 -476
  3. webscout/Extra/__init__.py +2 -0
  4. webscout/Extra/autocoder/__init__.py +1 -1
  5. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
  6. webscout/Extra/tempmail/__init__.py +26 -0
  7. webscout/Extra/tempmail/async_utils.py +141 -0
  8. webscout/Extra/tempmail/base.py +156 -0
  9. webscout/Extra/tempmail/cli.py +187 -0
  10. webscout/Extra/tempmail/mail_tm.py +361 -0
  11. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  12. webscout/Provider/Deepinfra.py +288 -286
  13. webscout/Provider/ElectronHub.py +709 -716
  14. webscout/Provider/ExaChat.py +20 -5
  15. webscout/Provider/Gemini.py +167 -165
  16. webscout/Provider/Groq.py +38 -24
  17. webscout/Provider/LambdaChat.py +2 -1
  18. webscout/Provider/TextPollinationsAI.py +232 -230
  19. webscout/Provider/__init__.py +0 -4
  20. webscout/Provider/copilot.py +427 -427
  21. webscout/Provider/freeaichat.py +8 -1
  22. webscout/Provider/uncovr.py +312 -299
  23. webscout/Provider/yep.py +64 -12
  24. webscout/__init__.py +38 -36
  25. webscout/cli.py +293 -293
  26. webscout/conversation.py +350 -17
  27. webscout/litprinter/__init__.py +59 -667
  28. webscout/optimizers.py +419 -419
  29. webscout/update_checker.py +14 -12
  30. webscout/version.py +1 -1
  31. webscout/webscout_search.py +1282 -1282
  32. webscout/webscout_search_async.py +813 -813
  33. {webscout-7.8.dist-info → webscout-7.9.dist-info}/METADATA +44 -39
  34. {webscout-7.8.dist-info → webscout-7.9.dist-info}/RECORD +38 -35
  35. webscout/Provider/DARKAI.py +0 -225
  36. webscout/Provider/EDITEE.py +0 -192
  37. webscout/litprinter/colors.py +0 -54
  38. {webscout-7.8.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
  39. {webscout-7.8.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
  40. {webscout-7.8.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
  41. {webscout-7.8.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
@@ -1,716 +1,709 @@
1
- import requests
2
- import json
3
- import os
4
- from typing import Any, Dict, Optional, Generator, Union
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider, AsyncProvider
10
- from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
-
13
- class ElectronHub(Provider):
14
- """
15
- A class to interact with the ElectronHub API with LitAgent user-agent.
16
- """
17
-
18
- AVAILABLE_MODELS = [
19
- # DeepSeek models
20
- "deepseek-coder-6.7b-base-awq",
21
- "deepseek-coder-6.7b-instruct-awq",
22
- "deepseek-math-7b-instruct",
23
- "deepseek-r1-distill-qwen-32b",
24
-
25
- # DiscoLM models
26
- "discolm-german-7b-v1-awq",
27
-
28
- # Falcon models
29
- "falcon-7b-instruct",
30
-
31
- # Gemma models
32
- "gemma-7b-it",
33
-
34
- # Hermes models
35
- "hermes-2-pro-mistral-7b",
36
-
37
- # Llama models
38
- "llama-2-13b-chat-awq",
39
- "llama-2-7b-chat-fp16",
40
- "llama-2-7b-chat-int8",
41
- "llama-3-8b-instruct",
42
- "llama-3-8b-instruct-awq",
43
- "llama-3.1-8b-instruct",
44
- "llama-3.1-8b-instruct-awq",
45
- "llama-3.1-8b-instruct-fp8",
46
- "llama-3.2-11b-vision-instruct",
47
- "llama-3.2-1b-instruct",
48
- "llama-3.2-3b-instruct",
49
- "llama-3.3-70b-instruct-fp8-fast",
50
- "llama-guard-3-8b",
51
- "llamaguard-7b-awq",
52
- "meta-llama-3-8b-instruct",
53
-
54
- # Mistral models
55
- "mistral-7b-instruct-v0.1",
56
- "mistral-7b-instruct-v0.1-awq",
57
- "mistral-7b-instruct-v0.2",
58
-
59
- # Neural Chat models
60
- "neural-chat-7b-v3-1-awq",
61
-
62
- # OpenChat models
63
- "openchat-3.5-0106",
64
-
65
- # OpenHermes models
66
- "openhermes-2.5-mistral-7b-awq",
67
-
68
- # Phi models
69
- "phi-2",
70
-
71
- # Qwen models
72
- "qwen1.5-0.5b-chat",
73
- "qwen1.5-1.8b-chat",
74
- "qwen1.5-14b-chat-awq",
75
- "qwen1.5-7b-chat-awq",
76
-
77
- # SQLCoder models
78
- "sqlcoder-7b-2",
79
-
80
- # Starling models
81
- "starling-lm-7b-beta",
82
-
83
- # TinyLlama models
84
- "tinyllama-1.1b-chat-v1.0",
85
-
86
- # UNA models
87
- "una-cybertron-7b-v2-bf16",
88
-
89
- # Zephyr models
90
- "zephyr-7b-beta-awq",
91
-
92
- # OpenAI GPT models
93
- "gpt-3.5-turbo",
94
- "gpt-3.5-turbo-16k",
95
- "gpt-3.5-turbo-1106",
96
- "gpt-3.5-turbo-0125",
97
- "gpt-4",
98
- "gpt-4-turbo",
99
- "gpt-4-turbo-preview",
100
- "gpt-4-0125-preview",
101
- "gpt-4-1106-preview",
102
- "gpt-4o",
103
- "gpt-4o-2024-05-13",
104
- "gpt-4o-2024-08-06",
105
- "gpt-4o-2024-11-20",
106
- "gpt-4o-search-preview",
107
- "gpt-4o-search-preview-2025-03-11",
108
- "gpt-4o-mini",
109
- "gpt-4o-mini-2024-07-18",
110
- "gpt-4o-mini-search-preview",
111
- "gpt-4o-mini-search-preview-2025-03-11",
112
- "chatgpt-4o-latest",
113
- "gpt-4.5-preview",
114
- "gpt-4.5-preview-2025-02-27",
115
- "o1-mini",
116
- "o1-preview",
117
- "o1",
118
- "o1-low",
119
- "o1-high",
120
- "o3-mini",
121
- "o3-mini-low",
122
- "o3-mini-high",
123
- "o3-mini-online",
124
-
125
- # Anthropic Claude models
126
- "claude-2",
127
- "claude-2.1",
128
- "claude-3-haiku-20240307",
129
- "claude-3-5-haiku-20241022",
130
- "claude-3-opus-20240229",
131
- "claude-3-sonnet-20240229",
132
- "claude-3-5-sonnet-20240620",
133
- "claude-3-5-sonnet-20241022",
134
- "claude-3-7-sonnet-20250219",
135
- "claude-3-7-sonnet-20250219-thinking",
136
- "claude-3-opus-20240229:safe",
137
- "claude-3-sonnet-20240229:safe",
138
- "claude-3-5-sonnet-20240620:safe",
139
- "claude-3-5-sonnet-20241022:safe",
140
- "claude-3-7-sonnet-20250219:safe",
141
- "claude-3-7-sonnet-20250219-thinking:safe",
142
-
143
- # Google Gemini models
144
- "gemini-1.0-pro",
145
- "gemini-1.0-pro-vision",
146
- "gemini-1.5-pro",
147
- "gemini-1.5-pro-latest",
148
- "gemini-1.5-flash-8b",
149
- "gemini-1.5-flash",
150
- "gemini-1.5-flash-latest",
151
- "gemini-1.5-flash-exp",
152
- "gemini-1.5-flash-online",
153
- "gemini-exp-1206",
154
- "learnlm-1.5-pro-experimental",
155
- "gemini-2.0-flash-001",
156
- "gemini-2.0-flash-exp",
157
- "gemini-2.0-flash-thinking-exp",
158
- "gemini-2.0-flash-thinking-exp-1219",
159
- "gemini-2.0-flash-thinking-exp-01-21",
160
- "gemini-2.0-flash-lite-preview-02-05",
161
- "gemini-2.0-flash-lite-001",
162
- "gemini-2.0-pro-exp-02-05",
163
- "gemini-2.5-pro-exp-03-25",
164
-
165
- # Google PaLM models
166
- "palm-2-chat-bison",
167
- "palm-2-codechat-bison",
168
- "palm-2-chat-bison-32k",
169
- "palm-2-codechat-bison-32k",
170
-
171
- # Meta Llama models
172
- "llama-2-70b-chat",
173
- "llama-3-70b",
174
- "llama-3.1-70b",
175
- "llama-3.1-405b",
176
- "llama-3.2-90b",
177
- "llama-3.1-nemotron-70b-instruct",
178
- "llama-3.1-tulu-3-70b",
179
- "llama-3.1-tulu-3-405b",
180
- "llama-3.1-sonar-small-128k-online",
181
- "llama-3.1-sonar-large-128k-online",
182
- "llama-3.1-sonar-huge-128k-online",
183
- "llama-3.1-sonar-small-128k-chat",
184
- "llama-3.1-sonar-large-128k-chat",
185
- "llama-3.1-swallow-70b-instruct-v0.3",
186
- "llama-3.1-8b-lexi-uncensored-v2",
187
- "llama-3.1-lumimaid-8b",
188
- "llama-3.1-lumimaid-70b",
189
- "llama3-openbiollm-70b",
190
-
191
- # Mixtral models
192
- "mixtral-8x7b",
193
- "mixtral-8x22b",
194
-
195
- "mistral-tiny-latest",
196
- "mistral-tiny",
197
- "mistral-tiny-2312",
198
- "mistral-tiny-2407",
199
- "mistral-small-3.1-24b-instruct",
200
- "mistral-small-24b-instruct-2501",
201
- "mistral-small-latest",
202
- "mistral-small",
203
- "mistral-small-2312",
204
- "mistral-small-2402",
205
- "mistral-small-2409",
206
- "mistral-medium-latest",
207
- "mistral-medium",
208
- "mistral-medium-2312",
209
- "mistral-large-latest",
210
- "mistral-large-2411",
211
- "mistral-large-2407",
212
- "mistral-large-2402",
213
- "open-mistral-nemo",
214
- "open-mistral-nemo-2407",
215
- "open-mixtral-8x22b-2404",
216
- "open-mixtral-8x7b",
217
-
218
- # Codestral models
219
- "codestral-mamba",
220
- "codestral-latest",
221
- "codestral-2405",
222
- "codestral-2412",
223
- "codestral-2501",
224
-
225
- # Ministral models
226
- "ministral-3b",
227
- "ministral-3b-2410",
228
- "ministral-8b",
229
- "ministral-8b-2410",
230
-
231
- # Mistral Saba models
232
- "mistral-saba-latest",
233
- "mistral-saba-2502",
234
-
235
- # F1 models
236
- "f1-mini-preview",
237
- "f1-preview",
238
-
239
- # Dolphin models
240
- "dolphin-mixtral-8x7b",
241
- "dolphin-mixtral-8x22b",
242
- "dolphin3.0-mistral-24b",
243
- "dolphin3.0-r1-mistral-24b",
244
-
245
- # Command models
246
- "command",
247
- "command-light",
248
- "command-nightly",
249
- "command-light-nightly",
250
- "command-r",
251
- "command-r-03-2024",
252
- "command-r-08-2024",
253
- "command-r-plus",
254
- "command-r-plus-04-2024",
255
- "command-r-plus-08-2024",
256
- "command-r7b-12-2024",
257
- "command-a-03-2025",
258
-
259
- # Other AI models
260
- "nemotron-4-340b",
261
- "pixtral-large-2411",
262
- "pixtral-12b",
263
- "dbrx-instruct",
264
- "c4ai-aya-expanse-8b",
265
- "c4ai-aya-expanse-32b",
266
- "reka-flash",
267
- "reka-core",
268
- "reka-flash-3",
269
- "grok-2",
270
- "grok-2-mini",
271
- "grok-beta",
272
- "grok-vision-beta",
273
- "grok-2-1212",
274
- "grok-2-vision-1212",
275
- "grok-3-early",
276
- "grok-3-preview-02-24",
277
- "r1-1776",
278
- "sonar-deep-research",
279
- "sonar-reasoning-pro",
280
- "sonar-reasoning",
281
- "sonar-pro",
282
- "sonar",
283
- "phi-4",
284
- "phi-4-multimodal-instruct",
285
- "phi-3.5-mini-128k-instruct",
286
- "phi-3-medium-128k-instruct",
287
- "phi-3-mini-128k-instruct",
288
- "wizardlm-2-7b",
289
- "wizardlm-2-8x22b",
290
- "minimax-01",
291
- "jamba-1.5-large",
292
- "jamba-1.5-mini",
293
- "jamba-1.6-large",
294
- "jamba-1.6-mini",
295
- "jamba-instruct",
296
- "openchat-3.6-8b",
297
-
298
- # Qwen models
299
- "qwen-1.5-0.5b-chat",
300
- "qwen-1.5-1.8b-chat",
301
- "qwen-1.5-14b-chat-awq",
302
- "qwen-1.5-7b-chat-awq",
303
- "qwen-2-7b-instruct",
304
- "qwen-2-72b-instruct",
305
- "qwen-2-vl-7b-instruct",
306
- "qwen-2-vl-72b-instruct",
307
- "qwen-2.5-7b-instruct",
308
- "qwen-2.5-32b-instruct",
309
- "qwen-2.5-72b-instruct",
310
- "qwen-2.5-coder-32b-instruct",
311
- "qwq-32b-preview",
312
- "qwq-32b",
313
- "qwen-vl-plus",
314
- "qwen2.5-vl-3b-instruct",
315
- "qwen2.5-vl-7b-instruct",
316
- "qwen2.5-vl-72b-instruct",
317
- "qwen-turbo",
318
- "qwen-plus",
319
- "qwen-max",
320
-
321
- # Other models
322
- "aion-1.0",
323
- "aion-1.0-mini",
324
- "aion-rp-llama-3.1-8b",
325
- "nova-lite-v1",
326
- "nova-micro-v1",
327
- "nova-pro-v1",
328
- "inflection-3-pi",
329
- "inflection-3-productivity",
330
- "mytho-max-l2-13b",
331
- "deephermes-3-llama-3-8b-preview",
332
- "nous-hermes-llama2-13b",
333
- "hermes-3-llama-3.1-405b",
334
- "nous-hermes-2-mixtral-8x7b-dpo",
335
-
336
- # Chinese models
337
- "doubao-lite-4k",
338
- "doubao-lite-32k",
339
- "doubao-pro-4k",
340
- "doubao-pro-32k",
341
- "ui-tars-72b-dpo",
342
- "ernie-lite-8k",
343
- "ernie-tiny-8k",
344
- "ernie-speed-8k",
345
- "ernie-speed-128k",
346
- "hunyuan-lite",
347
- "hunyuan-standard-2025-02-10",
348
- "hunyuan-large-2025-02-10",
349
- "glm-3-130b",
350
- "glm-4-flash",
351
- "glm-4-long",
352
- "glm-4-airx",
353
- "glm-4-air",
354
- "glm-4-plus",
355
- "glm-4-alltools",
356
- "yi-vl-plus",
357
- "yi-large",
358
- "yi-large-turbo",
359
- "yi-large-rag",
360
- "yi-medium",
361
- "yi-34b-chat-200k",
362
- "moonlight-16b-a3b-instruct",
363
- "spark-desk-v1.5",
364
-
365
- # Additional models
366
- "step-2-16k-exp-202412",
367
- "granite-3.1-2b-instruct",
368
- "granite-3.1-8b-instruct",
369
- "solar-0-70b-16bit",
370
- "mistral-nemo-inferor-12b",
371
- "unslopnemo-12b",
372
- "rocinante-12b-v1.1",
373
- "rocinante-12b-v1",
374
- "olympiccoder-7b",
375
- "olympiccoder-32b",
376
- "anubis-pro-105b-v1",
377
- "fallen-llama-3.3-r1-70b-v1",
378
- "skyfall-36b-v2",
379
- "wayfarer-large-70b-llama-3.3",
380
- "qwq-32b-snowdrop-v0",
381
- "qwq-32b-abliterated",
382
- "sky-t1-32b-preview",
383
- "tiny-r1-32b-preview",
384
- "lfm-3b",
385
- "lfm-7b",
386
- "lfm-40b",
387
- "eva-llama-3.33-70b-v0.0",
388
- "eva-llama-3.33-70b-v0.1",
389
- "eva-qwen2.5-72b",
390
- "eva-qwen2.5-32b-v0.2",
391
- "sorcererlm-8x22b",
392
- "mythalion-13b",
393
- "toppy-m-7b",
394
- "openhermes-2.5-mistral-7b",
395
- "l3-lunaris-8b",
396
- "l3.1-70b-hanami-x1",
397
- "lumimaid-magnum-v4-12b",
398
- "magnum-v4-72b",
399
- "magnum-v4-12b",
400
- "magnum-v3-34b",
401
- "magnum-v2.5-12b-kto",
402
- "magnum-v2-72b",
403
- "magnum-v2-32b",
404
- "magnum-v2-12b",
405
- "magnum-72b",
406
- "mini-magnum-12b-v1.1",
407
- "remm-slerp-l2-13b",
408
- "patricide-12b-unslop-mell",
409
- "midnight-rose-70b",
410
- "airoboros-l2-13b-gpt4-m2.0",
411
- "airoboros-l2-70b",
412
- "xwin-lm-70b",
413
- "noromaid-20b",
414
- "violet-twilight-v0.2",
415
- "saiga-nemo-12b",
416
- "l3-8b-stheno-v3.2",
417
- "l3.3-electra-r1-70b",
418
- "l3.3-cu-mai-r1-70b",
419
- "l3.3-mokume-gane-r1-70b-v1.1",
420
- "l3.3-70b-euryale-v2.3",
421
- "l3.3-ms-evayale-70b",
422
- "70b-l3.3-cirrus-x1",
423
- "l31-70b-euryale-v2.2",
424
- "l3-70b-euryale-v2.1",
425
- "fimbulvetr-11b-v2",
426
- "goliath-120b",
427
-
428
- # Image generation models
429
- "weaver",
430
- "sdxl",
431
- "sdxl-turbo",
432
- "sdxl-lightning",
433
- "stable-diffusion-3",
434
- "stable-diffusion-3-2b",
435
- "stable-diffusion-3.5-large",
436
- "stable-diffusion-3.5-turbo",
437
- "playground-v3",
438
- "playground-v2.5",
439
- "animaginexl-3.1",
440
- "realvisxl-4.0",
441
- "imagen",
442
- "imagen-3-fast",
443
- "imagen-3",
444
- "luma-photon",
445
- "luma-photon-flash",
446
- "recraft-20b",
447
- "recraft-v3",
448
- "grok-2-aurora",
449
- "flux-schnell",
450
- "flux-dev",
451
- "flux-pro",
452
- "flux-1.1-pro",
453
- "flux-1.1-pro-ultra",
454
- "flux-1.1-pro-ultra-raw",
455
- "flux-realism",
456
- "flux-half-illustration",
457
- "ideogram-v2-turbo",
458
- "ideogram-v2",
459
- "amazon-titan",
460
- "amazon-titan-v2",
461
- "nova-canvas",
462
- "omni-gen",
463
- "aura-flow",
464
- "cogview-3-flash",
465
- "sana",
466
- "kandinsky-3",
467
- "dall-e-3",
468
- "midjourney-v6.1",
469
- "midjourney-v6",
470
- "midjourney-v5.2",
471
- "midjourney-v5.1",
472
- "midjourney-v5",
473
- "niji-v6",
474
- "niji-v5",
475
-
476
- # Video generation models
477
- "t2v-turbo",
478
- "cogvideox-5b",
479
- "ltx-video",
480
- "mochi-1",
481
- "dream-machine",
482
- "hailuo-ai",
483
- "haiper-video-2.5",
484
- "haiper-video-2",
485
- "hunyuan-video",
486
- "kling-video/v1/standard/text-to-video",
487
- "kling-video/v1/pro/text-to-video",
488
- "kling-video/v1.6/standard/text-to-video",
489
- "kling-video/v1.5/pro/text-to-video",
490
- "kokoro-82m",
491
-
492
- # Audio models
493
- "elevenlabs",
494
- "myshell-tts",
495
- "deepinfra-tts",
496
- "whisper-large-v3",
497
- "distil-large-v3",
498
-
499
- # Embedding and moderation models
500
- "text-embedding-3-large",
501
- "text-embedding-3-small",
502
- "omni-moderation-latest",
503
- "omni-moderation-2024-09-26",
504
- "text-moderation-latest",
505
- "text-moderation-stable",
506
- "text-moderation-007"
507
- ]
508
-
509
- def __init__(
510
- self,
511
- is_conversation: bool = True,
512
- max_tokens: int = 16000,
513
- timeout: int = 30,
514
- intro: str = None,
515
- filepath: str = None,
516
- update_file: bool = True,
517
- proxies: dict = {},
518
- history_offset: int = 10250,
519
- act: str = None,
520
- model: str = "claude-3-7-sonnet-20250219",
521
- system_prompt: str = "You're helpful assistant that can help me with my questions.",
522
- api_key: str = None
523
- ):
524
- """Initializes the ElectronHub API client."""
525
- if model not in self.AVAILABLE_MODELS:
526
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
527
-
528
- self.url = "https://api.electronhub.top/v1/chat/completions"
529
- # Use LitAgent for user-agent
530
- self.headers = {
531
- 'User-Agent': LitAgent().random(),
532
- 'Content-Type': 'application/json',
533
- 'Accept': '*/*',
534
- 'Accept-Language': 'en-US,en;q=0.9',
535
- 'DNT': '1',
536
- 'Origin': 'https://playground.electronhub.top',
537
- 'Referer': 'https://playground.electronhub.top/',
538
- 'Sec-Fetch-Dest': 'empty',
539
- 'Sec-Fetch-Mode': 'cors',
540
- 'Sec-Fetch-Site': 'same-site',
541
- 'Priority': 'u=1, i'
542
- }
543
-
544
- # Add API key if provided
545
- if api_key:
546
- self.headers['Authorization'] = f'Bearer {api_key}'
547
- self.system_prompt = system_prompt
548
- self.session = requests.Session()
549
- self.session.headers.update(self.headers)
550
- self.session.proxies.update(proxies)
551
-
552
- self.is_conversation = is_conversation
553
- self.max_tokens = max_tokens
554
- self.timeout = timeout
555
- self.last_response = {}
556
- self.model = model
557
-
558
- self.__available_optimizers = (
559
- method
560
- for method in dir(Optimizers)
561
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
562
- )
563
- Conversation.intro = (
564
- AwesomePrompts().get_act(
565
- act, raise_not_found=True, default=None, case_insensitive=True
566
- )
567
- if act
568
- else intro or Conversation.intro
569
- )
570
-
571
- self.conversation = Conversation(
572
- is_conversation, self.max_tokens, filepath, update_file
573
- )
574
- self.conversation.history_offset = history_offset
575
-
576
- def ask(
577
- self,
578
- prompt: str,
579
- stream: bool = True,
580
- raw: bool = False,
581
- optimizer: str = None,
582
- conversationally: bool = False,
583
- temperature: float = 0.5,
584
- top_p: float = 1.0,
585
- top_k: int = 5,
586
- ) -> Union[Dict[str, Any], Generator]:
587
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
588
- if optimizer:
589
- if optimizer in self.__available_optimizers:
590
- conversation_prompt = getattr(Optimizers, optimizer)(
591
- conversation_prompt if conversationally else prompt
592
- )
593
- else:
594
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
595
-
596
- # Construct messages for the conversation
597
- messages = [
598
- {"role": "system", "content": self.system_prompt},
599
- {"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
600
- ]
601
-
602
- # Payload construction based on ElectronHub API requirements
603
- payload = {
604
- "model": self.model,
605
- "messages": messages,
606
- "stream": stream,
607
- "stream_options": {"include_usage": True},
608
- "max_tokens": self.max_tokens,
609
- "temperature": temperature,
610
- "top_p": top_p,
611
- "top_k": top_k,
612
- "web_search": False,
613
- "customId": None
614
- }
615
-
616
- def for_stream():
617
- try:
618
- with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
619
- if response.status_code != 200:
620
- raise exceptions.FailedToGenerateResponseError(
621
- f"Request failed with status code {response.status_code}"
622
- )
623
-
624
- streaming_text = ""
625
- for line in response.iter_lines(decode_unicode=True):
626
- if line:
627
- line = line.strip()
628
- if line.startswith("data: "):
629
- json_str = line[6:]
630
- if json_str == "[DONE]":
631
- break
632
- try:
633
- json_data = json.loads(json_str)
634
- if 'choices' in json_data:
635
- choice = json_data['choices'][0]
636
- if 'delta' in choice and 'content' in choice['delta']:
637
- content = choice['delta']['content']
638
- # Fix: Check if content is not None before concatenating
639
- if content is not None:
640
- streaming_text += content
641
- resp = dict(text=content)
642
- yield resp if raw else resp
643
- except json.JSONDecodeError:
644
- continue
645
- except Exception as e:
646
- print(f"Error processing chunk: {e}")
647
- continue
648
-
649
- self.conversation.update_chat_history(prompt, streaming_text)
650
-
651
- except requests.RequestException as e:
652
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
653
-
654
- def for_non_stream():
655
- collected_response = ""
656
- try:
657
- for chunk in for_stream():
658
- if isinstance(chunk, dict) and "text" in chunk:
659
- content = chunk["text"]
660
- if content is not None:
661
- collected_response += content
662
- except Exception as e:
663
- raise exceptions.FailedToGenerateResponseError(f"Error during non-stream processing: {str(e)}")
664
-
665
- self.last_response = {"text": collected_response}
666
- return self.last_response
667
-
668
- return for_stream() if stream else for_non_stream()
669
-
670
- def chat(
671
- self,
672
- prompt: str,
673
- stream: bool = True,
674
- optimizer: str = None,
675
- conversationally: bool = False,
676
- temperature: float = 0.5,
677
- top_p: float = 1.0,
678
- top_k: int = 5,
679
- ) -> str:
680
- def for_stream():
681
- for response in self.ask(
682
- prompt,
683
- True,
684
- optimizer=optimizer,
685
- conversationally=conversationally,
686
- temperature=temperature,
687
- top_p=top_p,
688
- top_k=top_k
689
- ):
690
- yield self.get_message(response)
691
- def for_non_stream():
692
- return self.get_message(
693
- self.ask(
694
- prompt,
695
- False,
696
- optimizer=optimizer,
697
- conversationally=conversationally,
698
- temperature=temperature,
699
- top_p=top_p,
700
- top_k=top_k
701
- )
702
- )
703
- return for_stream() if stream else for_non_stream()
704
-
705
- def get_message(self, response: dict) -> str:
706
- assert isinstance(response, dict), "Response should be of dict data-type only"
707
- return response["text"]
708
-
709
- if __name__ == "__main__":
710
- from rich import print
711
- # You need to provide your own API key
712
- api_key = "" # U can get free API key from https://playground.electronhub.top/console
713
- ai = ElectronHub(timeout=5000, api_key=api_key)
714
- response = ai.chat("hi there, how are you today?", stream=True)
715
- for chunk in response:
716
- print(chunk, end="", flush=True)
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class ElectronHub(Provider):
14
+ """
15
+ A class to interact with the ElectronHub API with LitAgent user-agent.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ # OpenAI GPT models
20
+ "gpt-3.5-turbo",
21
+ "gpt-3.5-turbo-16k",
22
+ "gpt-3.5-turbo-1106",
23
+ "gpt-3.5-turbo-0125",
24
+ "gpt-4",
25
+ "gpt-4-turbo",
26
+ "gpt-4-turbo-preview",
27
+ "gpt-4-0125-preview",
28
+ "gpt-4-1106-preview",
29
+ "gpt-4o",
30
+ "gpt-4o-2024-05-13",
31
+ "gpt-4o-2024-08-06",
32
+ "gpt-4o-2024-11-20",
33
+ "gpt-4o-search-preview",
34
+ "gpt-4o-search-preview-2025-03-11",
35
+ "gpt-4o-mini",
36
+ "gpt-4o-mini-2024-07-18",
37
+ "gpt-4o-mini-search-preview",
38
+ "gpt-4o-mini-search-preview-2025-03-11",
39
+ "chatgpt-4o-latest",
40
+ "gpt-4.5-preview",
41
+ "gpt-4.5-preview-2025-02-27",
42
+ "o1-mini",
43
+ "o1-preview",
44
+ "o1",
45
+ "o1-low",
46
+ "o1-high",
47
+ "o3-mini",
48
+ "o3-mini-low",
49
+ "o3-mini-high",
50
+ "o3-mini-online",
51
+
52
+ # Anthropic Claude models
53
+ "claude-2",
54
+ "claude-2.1",
55
+ "claude-3-haiku-20240307",
56
+ "claude-3-5-haiku-20241022",
57
+ "claude-3-opus-20240229",
58
+ "claude-3-sonnet-20240229",
59
+ "claude-3-5-sonnet-20240620",
60
+ "claude-3-5-sonnet-20241022",
61
+ "claude-3-7-sonnet-20250219",
62
+ "claude-3-7-sonnet-20250219-thinking",
63
+ "claude-3-opus-20240229:safe",
64
+ "claude-3-sonnet-20240229:safe",
65
+ "claude-3-5-sonnet-20240620:safe",
66
+ "claude-3-5-sonnet-20241022:safe",
67
+ "claude-3-7-sonnet-20250219:safe",
68
+ "claude-3-7-sonnet-20250219-thinking:safe",
69
+
70
+ # Google Gemini models
71
+ "gemini-1.0-pro",
72
+ "gemini-1.0-pro-vision",
73
+ "gemini-1.5-pro",
74
+ "gemini-1.5-pro-latest",
75
+ "gemini-1.5-flash-8b",
76
+ "gemini-1.5-flash",
77
+ "gemini-1.5-flash-latest",
78
+ "gemini-1.5-flash-exp",
79
+ "gemini-1.5-flash-online",
80
+ "gemini-exp-1206",
81
+ "learnlm-1.5-pro-experimental",
82
+ "gemini-2.0-flash-001",
83
+ "gemini-2.0-flash-exp",
84
+ "gemini-2.0-flash-thinking-exp",
85
+ "gemini-2.0-flash-thinking-exp-1219",
86
+ "gemini-2.0-flash-thinking-exp-01-21",
87
+ "gemini-2.0-flash-lite-preview-02-05",
88
+ "gemini-2.0-flash-lite-001",
89
+ "gemini-2.0-pro-exp-02-05",
90
+ "gemini-2.5-pro-exp-03-25",
91
+
92
+ # Google PaLM models
93
+ "palm-2-chat-bison",
94
+ "palm-2-codechat-bison",
95
+ "palm-2-chat-bison-32k",
96
+ "palm-2-codechat-bison-32k",
97
+
98
+ # Meta Llama models
99
+ "llama-2-13b-chat-awq",
100
+ "llama-2-7b-chat-fp16",
101
+ "llama-2-7b-chat-int8",
102
+ "llama-2-70b-chat",
103
+ "llama-3-8b-instruct",
104
+ "llama-3-8b-instruct-awq",
105
+ "llama-3-70b",
106
+ "llama-3.1-8b-instruct",
107
+ "llama-3.1-8b-instruct-awq",
108
+ "llama-3.1-8b-instruct-fp8",
109
+ "llama-3.1-70b",
110
+ "llama-3.1-405b",
111
+ "llama-3.2-11b-vision-instruct",
112
+ "llama-3.2-1b-instruct",
113
+ "llama-3.2-3b-instruct",
114
+ "llama-3.2-90b",
115
+ "llama-3.3-70b-instruct-fp8-fast",
116
+ "llama-guard-3-8b",
117
+ "llamaguard-7b-awq",
118
+ "meta-llama-3-8b-instruct",
119
+ "llama-3.1-nemotron-70b-instruct",
120
+ "llama-3.1-tulu-3-70b",
121
+ "llama-3.1-tulu-3-405b",
122
+ "llama-3.1-sonar-small-128k-online",
123
+ "llama-3.1-sonar-large-128k-online",
124
+ "llama-3.1-sonar-huge-128k-online",
125
+ "llama-3.1-sonar-small-128k-chat",
126
+ "llama-3.1-sonar-large-128k-chat",
127
+ "llama-3.1-swallow-70b-instruct-v0.3",
128
+ "llama-3.1-8b-lexi-uncensored-v2",
129
+ "llama-3.1-lumimaid-8b",
130
+ "llama-3.1-lumimaid-70b",
131
+ "llama3-openbiollm-70b",
132
+
133
+ # Mistral models
134
+ "mistral-7b-instruct-v0.1",
135
+ "mistral-7b-instruct-v0.1-awq",
136
+ "mistral-7b-instruct-v0.2",
137
+ "mistral-tiny-latest",
138
+ "mistral-tiny",
139
+ "mistral-tiny-2312",
140
+ "mistral-tiny-2407",
141
+ "mistral-small-3.1-24b-instruct",
142
+ "mistral-small-24b-instruct-2501",
143
+ "mistral-small-latest",
144
+ "mistral-small",
145
+ "mistral-small-2312",
146
+ "mistral-small-2402",
147
+ "mistral-small-2409",
148
+ "mistral-medium-latest",
149
+ "mistral-medium",
150
+ "mistral-medium-2312",
151
+ "mistral-large-latest",
152
+ "mistral-large-2411",
153
+ "mistral-large-2407",
154
+ "mistral-large-2402",
155
+ "open-mistral-nemo",
156
+ "open-mistral-nemo-2407",
157
+ "open-mixtral-8x22b-2404",
158
+ "open-mixtral-8x7b",
159
+
160
+ # Codestral models
161
+ "codestral-mamba",
162
+ "codestral-latest",
163
+ "codestral-2405",
164
+ "codestral-2412",
165
+ "codestral-2501",
166
+
167
+ # Ministral models
168
+ "ministral-3b",
169
+ "ministral-3b-2410",
170
+ "ministral-8b",
171
+ "ministral-8b-2410",
172
+
173
+ # Mistral Saba models
174
+ "mistral-saba-latest",
175
+ "mistral-saba-2502",
176
+
177
+ # Mixtral models
178
+ "mixtral-8x7b",
179
+ "mixtral-8x22b",
180
+
181
+ # DeepSeek models
182
+ "deepseek-coder",
183
+ "deepseek-coder-6.7b-base-awq",
184
+ "deepseek-coder-6.7b-instruct-awq",
185
+ "deepseek-llm-67b-chat",
186
+ "deepseek-math-7b-instruct",
187
+ "deepseek-r1",
188
+ "deepseek-r1-distill-llama-70b",
189
+ "deepseek-r1-distill-llama-8b",
190
+ "deepseek-r1-distill-qwen-1.5b",
191
+ "deepseek-r1-distill-qwen-14b",
192
+ "deepseek-r1-distill-qwen-32b",
193
+ "deepseek-r1-distill-qwen-7b",
194
+ "deepseek-r1-nitro",
195
+ "deepseek-r1-zero",
196
+ "deepseek-v2.5",
197
+ "deepseek-v3",
198
+ "deepseek-v3-0324",
199
+ "deepseek-vl2",
200
+
201
+ # Qwen models
202
+ "qwen-1.5-0.5b-chat",
203
+ "qwen-1.5-1.8b-chat",
204
+ "qwen-1.5-14b-chat-awq",
205
+ "qwen-1.5-7b-chat-awq",
206
+ "qwen-2-7b-instruct",
207
+ "qwen-2-72b-instruct",
208
+ "qwen-2-vl-7b-instruct",
209
+ "qwen-2-vl-72b-instruct",
210
+ "qwen-2.5-7b-instruct",
211
+ "qwen-2.5-32b-instruct",
212
+ "qwen-2.5-72b-instruct",
213
+ "qwen-2.5-coder-32b-instruct",
214
+ "qwq-32b-preview",
215
+ "qwq-32b",
216
+ "qwen-vl-plus",
217
+ "qwen2.5-vl-3b-instruct",
218
+ "qwen2.5-vl-7b-instruct",
219
+ "qwen2.5-vl-72b-instruct",
220
+ "qwen-turbo",
221
+ "qwen-plus",
222
+ "qwen-max",
223
+
224
+ # F1 models
225
+ "f1-mini-preview",
226
+ "f1-preview",
227
+
228
+ # Command models
229
+ "command",
230
+ "command-light",
231
+ "command-nightly",
232
+ "command-light-nightly",
233
+ "command-r",
234
+ "command-r-03-2024",
235
+ "command-r-08-2024",
236
+ "command-r-plus",
237
+ "command-r-plus-04-2024",
238
+ "command-r-plus-08-2024",
239
+ "command-r7b-12-2024",
240
+ "command-a-03-2025",
241
+
242
+ # Dolphin models
243
+ "dolphin-mixtral-8x7b",
244
+ "dolphin-mixtral-8x22b",
245
+ "dolphin3.0-mistral-24b",
246
+ "dolphin3.0-r1-mistral-24b",
247
+
248
+ # Cohere models
249
+ "c4ai-aya-expanse-8b",
250
+ "c4ai-aya-expanse-32b",
251
+
252
+ # Reka models
253
+ "reka-flash",
254
+ "reka-core",
255
+ "reka-flash-3",
256
+
257
+ # OpenChat models
258
+ "openchat-3.5-0106",
259
+ "openchat-3.5-7b",
260
+ "openchat-3.6-8b",
261
+
262
+ # Yi models
263
+ "yi-34b-chat-200k",
264
+ "yi-large",
265
+ "yi-large-rag",
266
+ "yi-large-turbo",
267
+ "yi-medium",
268
+ "yi-vl-plus",
269
+
270
+ # Phi models
271
+ "phi-2",
272
+ "phi-3-mini-128k-instruct",
273
+ "phi-3-medium-128k-instruct",
274
+ "phi-3.5-mini-128k-instruct",
275
+ "phi-4",
276
+ "phi-4-multimodal-instruct",
277
+
278
+ # Claude models by AION-LABS
279
+ "aion-1.0",
280
+ "aion-1.0-mini",
281
+ "aion-rp-llama-3.1-8b",
282
+
283
+ # Other AI models
284
+ "nemotron-4-340b",
285
+ "pixtral-large-2411",
286
+ "pixtral-12b",
287
+ "dbrx-instruct",
288
+ "grok-2",
289
+ "grok-2-mini",
290
+ "grok-beta",
291
+ "grok-vision-beta",
292
+ "grok-2-1212",
293
+ "grok-2-vision-1212",
294
+ "grok-3-early",
295
+ "grok-3-preview-02-24",
296
+ "r1-1776",
297
+ "sonar-deep-research",
298
+ "sonar-reasoning-pro",
299
+ "sonar-reasoning",
300
+ "sonar-pro",
301
+ "sonar",
302
+ "wizardlm-2-7b",
303
+ "wizardlm-2-8x22b",
304
+ "minimax-01",
305
+ "jamba-1.5-large",
306
+ "jamba-1.5-mini",
307
+ "jamba-1.6-large",
308
+ "jamba-1.6-mini",
309
+ "jamba-instruct",
310
+
311
+ # Chinese language models
312
+ "doubao-lite-4k",
313
+ "doubao-lite-32k",
314
+ "doubao-pro-4k",
315
+ "doubao-pro-32k",
316
+ "ui-tars-72b-dpo",
317
+ "ernie-lite-8k",
318
+ "ernie-tiny-8k",
319
+ "ernie-speed-8k",
320
+ "ernie-speed-128k",
321
+ "hunyuan-lite",
322
+ "hunyuan-standard-2025-02-10",
323
+ "hunyuan-large-2025-02-10",
324
+ "glm-3-130b",
325
+ "glm-4-flash",
326
+ "glm-4-long",
327
+ "glm-4-airx",
328
+ "glm-4-air",
329
+ "glm-4-plus",
330
+ "glm-4-alltools",
331
+ "spark-desk-v1.5",
332
+
333
+ # Other language models
334
+ "discolm-german-7b-v1-awq",
335
+ "falcon-7b-instruct",
336
+ "neural-chat-7b-v3-1-awq",
337
+ "openhermes-2.5-mistral-7b",
338
+ "openhermes-2.5-mistral-7b-awq",
339
+ "sqlcoder-7b-2",
340
+ "starling-lm-7b-beta",
341
+ "tinyllama-1.1b-chat-v1.0",
342
+ "una-cybertron-7b-v2-bf16",
343
+ "zephyr-7b-beta",
344
+ "zephyr-7b-beta-awq",
345
+
346
+ # Inference-optimized models
347
+ "mistral-nemo-inferor-12b",
348
+ "rocinante-12b-v1",
349
+ "rocinante-12b-v1.1",
350
+ "unslopnemo-12b",
351
+
352
+ # Additional specialty models
353
+ "granite-3.1-2b-instruct",
354
+ "granite-3.1-8b-instruct",
355
+ "solar-0-70b-16bit",
356
+ "olympiccoder-7b",
357
+ "olympiccoder-32b",
358
+ "anubis-pro-105b-v1",
359
+ "fallen-llama-3.3-r1-70b-v1",
360
+ "skyfall-36b-v2",
361
+ "wayfarer-large-70b-llama-3.3",
362
+ "qwq-32b-snowdrop-v0",
363
+ "qwq-32b-abliterated",
364
+ "sky-t1-32b-preview",
365
+ "tiny-r1-32b-preview",
366
+ "lfm-3b",
367
+ "lfm-7b",
368
+ "lfm-40b",
369
+ "eva-llama-3.33-70b-v0.0",
370
+ "eva-llama-3.33-70b-v0.1",
371
+ "eva-qwen2.5-72b",
372
+ "eva-qwen2.5-32b-v0.2",
373
+ "sorcererlm-8x22b",
374
+ "mythalion-13b",
375
+ "toppy-m-7b",
376
+ "l3-lunaris-8b",
377
+ "l3.1-70b-hanami-x1",
378
+ "lumimaid-magnum-v4-12b",
379
+ "magnum-v4-72b",
380
+ "magnum-v4-12b",
381
+ "magnum-v3-34b",
382
+ "magnum-v2.5-12b-kto",
383
+ "magnum-v2-72b",
384
+ "magnum-v2-32b",
385
+ "magnum-v2-12b",
386
+ "magnum-72b",
387
+ "mini-magnum-12b-v1.1",
388
+ "remm-slerp-l2-13b",
389
+ "patricide-12b-unslop-mell",
390
+ "midnight-rose-70b",
391
+ "airoboros-l2-13b-gpt4-m2.0",
392
+ "airoboros-l2-70b",
393
+ "xwin-lm-70b",
394
+ "noromaid-20b",
395
+ "violet-twilight-v0.2",
396
+ "saiga-nemo-12b",
397
+ "l3-8b-stheno-v3.2",
398
+ "l3.3-electra-r1-70b",
399
+ "l3.3-cu-mai-r1-70b",
400
+ "l3.3-mokume-gane-r1-70b-v1.1",
401
+ "l3.3-70b-euryale-v2.3",
402
+ "l3.3-ms-evayale-70b",
403
+ "70b-l3.3-cirrus-x1",
404
+ "l31-70b-euryale-v2.2",
405
+ "l3-70b-euryale-v2.1",
406
+ "fimbulvetr-11b-v2",
407
+ "goliath-120b",
408
+ "hermes-2-pro-mistral-7b",
409
+ "mytho-max-l2-13b",
410
+ "deephermes-3-llama-3-8b-preview",
411
+ "nous-hermes-llama2-13b",
412
+ "hermes-3-llama-3.1-405b",
413
+ "nous-hermes-2-mixtral-8x7b-dpo",
414
+ "nova-lite-v1",
415
+ "nova-micro-v1",
416
+ "nova-pro-v1",
417
+ "inflection-3-pi",
418
+ "inflection-3-productivity",
419
+
420
+ # Image generation models
421
+ "weaver",
422
+ "sdxl",
423
+ "sdxl-turbo",
424
+ "sdxl-lightning",
425
+ "stable-diffusion-3",
426
+ "stable-diffusion-3-2b",
427
+ "stable-diffusion-3.5-large",
428
+ "stable-diffusion-3.5-turbo",
429
+ "playground-v3",
430
+ "playground-v2.5",
431
+ "animaginexl-3.1",
432
+ "realvisxl-4.0",
433
+ "imagen",
434
+ "imagen-3-fast",
435
+ "imagen-3",
436
+ "luma-photon",
437
+ "luma-photon-flash",
438
+ "recraft-20b",
439
+ "recraft-v3",
440
+ "grok-2-aurora",
441
+ "flux-schnell",
442
+ "flux-dev",
443
+ "flux-pro",
444
+ "flux-1.1-pro",
445
+ "flux-1.1-pro-ultra",
446
+ "flux-1.1-pro-ultra-raw",
447
+ "flux-realism",
448
+ "flux-half-illustration",
449
+ "ideogram-v2-turbo",
450
+ "ideogram-v2",
451
+ "amazon-titan",
452
+ "amazon-titan-v2",
453
+ "nova-canvas",
454
+ "omni-gen",
455
+ "aura-flow",
456
+ "cogview-3-flash",
457
+ "sana",
458
+ "kandinsky-3",
459
+ "dall-e-3",
460
+ "midjourney-v6.1",
461
+ "midjourney-v6",
462
+ "midjourney-v5.2",
463
+ "midjourney-v5.1",
464
+ "midjourney-v5",
465
+ "midjourney-v7",
466
+ "niji-v6",
467
+ "niji-v5",
468
+
469
+ # Video generation models
470
+ "t2v-turbo",
471
+ "cogvideox-5b",
472
+ "ltx-video",
473
+ "mochi-1",
474
+ "dream-machine",
475
+ "hailuo-ai",
476
+ "haiper-video-2.5",
477
+ "haiper-video-2",
478
+ "hunyuan-video",
479
+ "kling-video/v1/standard/text-to-video",
480
+ "kling-video/v1/pro/text-to-video",
481
+ "kling-video/v1.6/standard/text-to-video",
482
+ "kling-video/v1.5/pro/text-to-video",
483
+ "kokoro-82m",
484
+
485
+ # Audio models
486
+ "elevenlabs",
487
+ "myshell-tts",
488
+ "deepinfra-tts",
489
+ "whisper-large-v3",
490
+ "distil-large-v3",
491
+
492
+ # Embedding and moderation models
493
+ "text-embedding-3-large",
494
+ "text-embedding-3-small",
495
+ "omni-moderation-latest",
496
+ "omni-moderation-2024-09-26",
497
+ "text-moderation-latest",
498
+ "text-moderation-stable",
499
+ "text-moderation-007"
500
+ ]
501
+
502
+ def __init__(
503
+ self,
504
+ is_conversation: bool = True,
505
+ max_tokens: int = 16000,
506
+ timeout: int = 30,
507
+ intro: str = None,
508
+ filepath: str = None,
509
+ update_file: bool = True,
510
+ proxies: dict = {},
511
+ history_offset: int = 10250,
512
+ act: str = None,
513
+ model: str = "claude-3-7-sonnet-20250219",
514
+ system_prompt: str = "You're helpful assistant that can help me with my questions.",
515
+ api_key: str = None
516
+ ):
517
+ """Initializes the ElectronHub API client."""
518
+ if model not in self.AVAILABLE_MODELS:
519
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
520
+
521
+ self.url = "https://api.electronhub.top/v1/chat/completions"
522
+ # Use LitAgent for user-agent
523
+ self.headers = {
524
+ 'User-Agent': LitAgent().random(),
525
+ 'Content-Type': 'application/json',
526
+ 'Accept': '*/*',
527
+ 'Accept-Language': 'en-US,en;q=0.9',
528
+ 'DNT': '1',
529
+ 'Origin': 'https://playground.electronhub.top',
530
+ 'Referer': 'https://playground.electronhub.top/',
531
+ 'Sec-Fetch-Dest': 'empty',
532
+ 'Sec-Fetch-Mode': 'cors',
533
+ 'Sec-Fetch-Site': 'same-site',
534
+ 'Priority': 'u=1, i'
535
+ }
536
+
537
+ # Add API key if provided
538
+ if api_key:
539
+ self.headers['Authorization'] = f'Bearer {api_key}'
540
+ self.system_prompt = system_prompt
541
+ self.session = requests.Session()
542
+ self.session.headers.update(self.headers)
543
+ self.session.proxies.update(proxies)
544
+
545
+ self.is_conversation = is_conversation
546
+ self.max_tokens = max_tokens
547
+ self.timeout = timeout
548
+ self.last_response = {}
549
+ self.model = model
550
+
551
+ self.__available_optimizers = (
552
+ method
553
+ for method in dir(Optimizers)
554
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
555
+ )
556
+ Conversation.intro = (
557
+ AwesomePrompts().get_act(
558
+ act, raise_not_found=True, default=None, case_insensitive=True
559
+ )
560
+ if act
561
+ else intro or Conversation.intro
562
+ )
563
+
564
+ self.conversation = Conversation(
565
+ is_conversation, self.max_tokens, filepath, update_file
566
+ )
567
+ self.conversation.history_offset = history_offset
568
+
569
+ def ask(
570
+ self,
571
+ prompt: str,
572
+ stream: bool = True,
573
+ raw: bool = False,
574
+ optimizer: str = None,
575
+ conversationally: bool = False,
576
+ temperature: float = 0.5,
577
+ top_p: float = 1.0,
578
+ top_k: int = 5,
579
+ ) -> Union[Dict[str, Any], Generator]:
580
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
581
+ if optimizer:
582
+ if optimizer in self.__available_optimizers:
583
+ conversation_prompt = getattr(Optimizers, optimizer)(
584
+ conversation_prompt if conversationally else prompt
585
+ )
586
+ else:
587
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
588
+
589
+ # Construct messages for the conversation
590
+ messages = [
591
+ {"role": "system", "content": self.system_prompt},
592
+ {"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
593
+ ]
594
+
595
+ # Payload construction based on ElectronHub API requirements
596
+ payload = {
597
+ "model": self.model,
598
+ "messages": messages,
599
+ "stream": stream,
600
+ "stream_options": {"include_usage": True},
601
+ "max_tokens": self.max_tokens,
602
+ "temperature": temperature,
603
+ "top_p": top_p,
604
+ "top_k": top_k,
605
+ "web_search": False,
606
+ "customId": None
607
+ }
608
+
609
+ def for_stream():
610
+ try:
611
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
612
+ if response.status_code != 200:
613
+ raise exceptions.FailedToGenerateResponseError(
614
+ f"Request failed with status code {response.status_code}"
615
+ )
616
+
617
+ streaming_text = ""
618
+ for line in response.iter_lines(decode_unicode=True):
619
+ if line:
620
+ line = line.strip()
621
+ if line.startswith("data: "):
622
+ json_str = line[6:]
623
+ if json_str == "[DONE]":
624
+ break
625
+ try:
626
+ json_data = json.loads(json_str)
627
+ if 'choices' in json_data:
628
+ choice = json_data['choices'][0]
629
+ if 'delta' in choice and 'content' in choice['delta']:
630
+ content = choice['delta']['content']
631
+ # Fix: Check if content is not None before concatenating
632
+ if content is not None:
633
+ streaming_text += content
634
+ resp = dict(text=content)
635
+ yield resp if raw else resp
636
+ except json.JSONDecodeError:
637
+ continue
638
+ except Exception as e:
639
+ print(f"Error processing chunk: {e}")
640
+ continue
641
+
642
+ self.conversation.update_chat_history(prompt, streaming_text)
643
+
644
+ except requests.RequestException as e:
645
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
646
+
647
+ def for_non_stream():
648
+ collected_response = ""
649
+ try:
650
+ for chunk in for_stream():
651
+ if isinstance(chunk, dict) and "text" in chunk:
652
+ content = chunk["text"]
653
+ if content is not None:
654
+ collected_response += content
655
+ except Exception as e:
656
+ raise exceptions.FailedToGenerateResponseError(f"Error during non-stream processing: {str(e)}")
657
+
658
+ self.last_response = {"text": collected_response}
659
+ return self.last_response
660
+
661
+ return for_stream() if stream else for_non_stream()
662
+
663
+ def chat(
664
+ self,
665
+ prompt: str,
666
+ stream: bool = True,
667
+ optimizer: str = None,
668
+ conversationally: bool = False,
669
+ temperature: float = 0.5,
670
+ top_p: float = 1.0,
671
+ top_k: int = 5,
672
+ ) -> str:
673
+ def for_stream():
674
+ for response in self.ask(
675
+ prompt,
676
+ True,
677
+ optimizer=optimizer,
678
+ conversationally=conversationally,
679
+ temperature=temperature,
680
+ top_p=top_p,
681
+ top_k=top_k
682
+ ):
683
+ yield self.get_message(response)
684
+ def for_non_stream():
685
+ return self.get_message(
686
+ self.ask(
687
+ prompt,
688
+ False,
689
+ optimizer=optimizer,
690
+ conversationally=conversationally,
691
+ temperature=temperature,
692
+ top_p=top_p,
693
+ top_k=top_k
694
+ )
695
+ )
696
+ return for_stream() if stream else for_non_stream()
697
+
698
+ def get_message(self, response: dict) -> str:
699
+ assert isinstance(response, dict), "Response should be of dict data-type only"
700
+ return response["text"]
701
+
702
+ if __name__ == "__main__":
703
+ from rich import print
704
+ # You need to provide your own API key
705
+ api_key = "" # U can get free API key from https://playground.electronhub.top/console
706
+ ai = ElectronHub(timeout=5000, api_key=api_key)
707
+ response = ai.chat("hi there, how are you today?", stream=True)
708
+ for chunk in response:
709
+ print(chunk, end="", flush=True)