webscout 7.2__py3-none-any.whl → 7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (47) hide show
  1. webscout/Bard.py +2 -2
  2. webscout/Litlogger/core/level.py +3 -0
  3. webscout/Litlogger/core/logger.py +101 -58
  4. webscout/Litlogger/handlers/console.py +14 -31
  5. webscout/Litlogger/handlers/network.py +16 -17
  6. webscout/Litlogger/styles/colors.py +81 -63
  7. webscout/Litlogger/styles/formats.py +163 -80
  8. webscout/Provider/AISEARCH/ISou.py +277 -0
  9. webscout/Provider/AISEARCH/__init__.py +4 -2
  10. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  11. webscout/Provider/AllenAI.py +282 -0
  12. webscout/Provider/Deepinfra.py +52 -37
  13. webscout/Provider/ElectronHub.py +634 -0
  14. webscout/Provider/Glider.py +7 -41
  15. webscout/Provider/HeckAI.py +200 -0
  16. webscout/Provider/Jadve.py +49 -63
  17. webscout/Provider/PI.py +106 -93
  18. webscout/Provider/Perplexitylabs.py +395 -0
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  21. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
  22. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
  23. webscout/Provider/TTI/__init__.py +3 -1
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +28 -6
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/__init__.py +14 -0
  32. webscout/Provider/akashgpt.py +312 -0
  33. webscout/Provider/chatglm.py +5 -5
  34. webscout/Provider/freeaichat.py +251 -0
  35. webscout/Provider/koala.py +9 -1
  36. webscout/Provider/yep.py +5 -25
  37. webscout/__init__.py +1 -0
  38. webscout/version.py +1 -1
  39. webscout/webscout_search.py +82 -2
  40. webscout/webscout_search_async.py +58 -1
  41. webscout/yep_search.py +297 -0
  42. {webscout-7.2.dist-info → webscout-7.4.dist-info}/METADATA +99 -65
  43. {webscout-7.2.dist-info → webscout-7.4.dist-info}/RECORD +47 -30
  44. {webscout-7.2.dist-info → webscout-7.4.dist-info}/WHEEL +1 -1
  45. {webscout-7.2.dist-info → webscout-7.4.dist-info}/LICENSE.md +0 -0
  46. {webscout-7.2.dist-info → webscout-7.4.dist-info}/entry_points.txt +0 -0
  47. {webscout-7.2.dist-info → webscout-7.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,634 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from webscout import LitAgent
12
+
13
+ class ElectronHub(Provider):
14
+ """
15
+ A class to interact with the ElectronHub API with LitAgent user-agent.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ # OpenAI GPT models
20
+ "gpt-3.5-turbo",
21
+ "gpt-3.5-turbo-16k",
22
+ "gpt-3.5-turbo-1106",
23
+ "gpt-3.5-turbo-0125",
24
+ "gpt-4",
25
+ "gpt-4-turbo",
26
+ "gpt-4-turbo-preview",
27
+ "gpt-4-0125-preview",
28
+ "gpt-4-1106-preview",
29
+ "gpt-4o",
30
+ "gpt-4o-2024-05-13",
31
+ "gpt-4o-2024-08-06",
32
+ "gpt-4o-2024-11-20",
33
+ "gpt-4o-mini",
34
+ "gpt-4o-mini-2024-07-18",
35
+ "chatgpt-4o-latest",
36
+ "gpt-4.5-preview",
37
+ "gpt-4.5-preview-2025-02-27",
38
+ "o1-mini",
39
+ "o1-preview",
40
+ "o1",
41
+ "o1-low",
42
+ "o1-high",
43
+ "o3-mini",
44
+ "o3-mini-low",
45
+ "o3-mini-high",
46
+ "o3-mini-online",
47
+
48
+ # Anthropic Claude models
49
+ "claude-2",
50
+ "claude-2.1",
51
+ "claude-3-opus-20240229",
52
+ "claude-3-sonnet-20240229",
53
+ "claude-3-haiku-20240307",
54
+ "claude-3-5-sonnet-20240620",
55
+ "claude-3-5-sonnet-20241022",
56
+ "claude-3-5-haiku-20241022",
57
+ "claude-3-7-sonnet-20250219",
58
+ "claude-3-7-sonnet-20250219-thinking",
59
+
60
+ # Google Gemini models
61
+ "gemini-1.0-pro",
62
+ "gemini-1.5-pro",
63
+ "gemini-1.5-pro-latest",
64
+ "gemini-1.5-flash-8b",
65
+ "gemini-1.5-flash",
66
+ "gemini-1.5-flash-latest",
67
+ "gemini-1.5-flash-exp",
68
+ "gemini-1.5-flash-online",
69
+ "gemini-exp-1206",
70
+ "learnlm-1.5-pro-experimental",
71
+ "gemini-2.0-flash-001",
72
+ "gemini-2.0-flash-exp",
73
+ "gemini-2.0-flash-thinking-exp",
74
+ "gemini-2.0-flash-thinking-exp-1219",
75
+ "gemini-2.0-flash-thinking-exp-01-21",
76
+ "gemini-2.0-flash-lite-preview-02-05",
77
+ "gemini-2.0-flash-lite-001",
78
+ "gemini-2.0-pro-exp-02-05",
79
+
80
+ # Google PaLM models
81
+ "palm-2-chat-bison",
82
+ "palm-2-codechat-bison",
83
+ "palm-2-chat-bison-32k",
84
+ "palm-2-codechat-bison-32k",
85
+
86
+ # Meta Llama models
87
+ "llama-2-13b-chat",
88
+ "llama-2-70b-chat",
89
+ "llama-guard-3-8b",
90
+ "code-llama-34b-instruct",
91
+ "llama-3-8b",
92
+ "llama-3-70b",
93
+ "llama-3.1-8b",
94
+ "llama-3.1-70b",
95
+ "llama-3.1-405b",
96
+ "llama-3.2-1b",
97
+ "llama-3.2-3b",
98
+ "llama-3.2-11b",
99
+ "llama-3.2-90b",
100
+ "llama-3.3-70b-instruct",
101
+ "llama-3.1-nemotron-70b-instruct",
102
+ "llama-3.1-tulu-3-8b",
103
+ "llama-3.1-tulu-3-70b",
104
+ "llama-3.1-tulu-3-405b",
105
+
106
+ # Mistral models
107
+ "mistral-7b-instruct",
108
+ "mistral-tiny-latest",
109
+ "mistral-tiny",
110
+ "mistral-tiny-2312",
111
+ "mistral-tiny-2407",
112
+ "mistral-small-24b-instruct-2501",
113
+ "mistral-small-latest",
114
+ "mistral-small",
115
+ "mistral-small-2312",
116
+ "mistral-small-2402",
117
+ "mistral-small-2409",
118
+ "mistral-medium-latest",
119
+ "mistral-medium",
120
+ "mistral-medium-2312",
121
+ "mistral-large-latest",
122
+ "mistral-large-2411",
123
+ "mistral-large-2407",
124
+ "mistral-large-2402",
125
+
126
+ # Mixtral models
127
+ "mixtral-8x7b",
128
+ "mixtral-8x22b",
129
+
130
+ # DeepSeek models
131
+ "deepseek-r1",
132
+ "deepseek-r1-nitro",
133
+ "deepseek-r1-distill-llama-8b",
134
+ "deepseek-r1-distill-llama-70b",
135
+ "deepseek-r1-distill-qwen-1.5b",
136
+ "deepseek-r1-distill-qwen-7b",
137
+ "deepseek-r1-distill-qwen-14b",
138
+ "deepseek-r1-distill-qwen-32b",
139
+ "deepseek-v3",
140
+ "deepseek-coder",
141
+ "deepseek-v2.5",
142
+ "deepseek-vl2",
143
+ "deepseek-llm-67b-chat",
144
+ "deepseek-math-7b-instruct",
145
+ "deepseek-coder-6.7b-base-awq",
146
+ "deepseek-coder-6.7b-instruct-awq",
147
+
148
+ # Qwen models
149
+ "qwen-1.5-0.5b-chat",
150
+ "qwen-1.5-1.8b-chat",
151
+ "qwen-1.5-14b-chat-awq",
152
+ "qwen-1.5-7b-chat-awq",
153
+ "qwen-2-7b-instruct",
154
+ "qwen-2-72b-instruct",
155
+ "qwen-2-vl-7b-instruct",
156
+ "qwen-2-vl-72b-instruct",
157
+ "qwen-2.5-7b-instruct",
158
+ "qwen-2.5-32b-instruct",
159
+ "qwen-2.5-72b-instruct",
160
+ "qwen-2.5-coder-32b-instruct",
161
+ "qwq-32b-preview",
162
+ "qvq-72b-preview",
163
+ "qwen-vl-plus",
164
+ "qwen2.5-vl-72b-instruct",
165
+ "qwen-turbo",
166
+ "qwen-plus",
167
+ "qwen-max",
168
+
169
+ # Microsoft models
170
+ "phi-4",
171
+ "phi-3.5-mini-128k-instruct",
172
+ "phi-3-medium-128k-instruct",
173
+ "phi-3-mini-128k-instruct",
174
+ "phi-2",
175
+
176
+ # Gemma models
177
+ "gemma-7b-it",
178
+ "gemma-2-9b-it",
179
+ "gemma-2-27b-it",
180
+
181
+ # Various other models
182
+ "nemotron-4-340b",
183
+ "pixtral-large-2411",
184
+ "pixtral-12b",
185
+ "open-mistral-nemo",
186
+ "open-mistral-nemo-2407",
187
+ "open-mixtral-8x22b-2404",
188
+ "open-mixtral-8x7b",
189
+ "codestral-mamba",
190
+ "codestral-latest",
191
+ "codestral-2405",
192
+ "codestral-2412",
193
+ "codestral-2501",
194
+ "codestral-2411-rc5",
195
+ "ministral-3b",
196
+ "ministral-3b-2410",
197
+ "ministral-8b",
198
+ "ministral-8b-2410",
199
+ "mistral-saba-latest",
200
+ "mistral-saba-2502",
201
+ "f1-mini-preview",
202
+ "f1-preview",
203
+ "dolphin-mixtral-8x7b",
204
+ "dolphin-mixtral-8x22b",
205
+ "dolphin3.0-mistral-24b",
206
+ "dolphin3.0-r1-mistral-24b",
207
+ "dbrx-instruct",
208
+ "command",
209
+ "command-light",
210
+ "command-nightly",
211
+ "command-light-nightly",
212
+ "command-r",
213
+ "command-r-03-2024",
214
+ "command-r-08-2024",
215
+ "command-r-plus",
216
+ "command-r-plus-04-2024",
217
+ "command-r-plus-08-2024",
218
+ "command-r7b-12-2024",
219
+ "c4ai-aya-expanse-8b",
220
+ "c4ai-aya-expanse-32b",
221
+ "reka-flash",
222
+ "reka-core",
223
+ "grok-2",
224
+ "grok-2-mini",
225
+ "grok-beta",
226
+ "grok-vision-beta",
227
+ "grok-2-1212",
228
+ "grok-2-vision-1212",
229
+ "grok-3-early",
230
+ "grok-3-preview-02-24",
231
+ "r1-1776",
232
+ "sonar-deep-research",
233
+ "sonar-reasoning-pro",
234
+ "sonar-reasoning",
235
+ "sonar-pro",
236
+ "sonar",
237
+ "llama-3.1-sonar-small-128k-online",
238
+ "llama-3.1-sonar-large-128k-online",
239
+ "llama-3.1-sonar-huge-128k-online",
240
+ "llama-3.1-sonar-small-128k-chat",
241
+ "llama-3.1-sonar-large-128k-chat",
242
+ "wizardlm-2-7b",
243
+ "wizardlm-2-8x22b",
244
+ "minimax-01",
245
+ "jamba-1.5-large",
246
+ "jamba-1.5-mini",
247
+ "jamba-instruct",
248
+ "openchat-3.5-7b",
249
+ "openchat-3.6-8b",
250
+ "aion-1.0",
251
+ "aion-1.0-mini",
252
+ "aion-rp-llama-3.1-8b",
253
+ "nova-lite-v1",
254
+ "nova-micro-v1",
255
+ "nova-pro-v1",
256
+ "inflection-3-pi",
257
+ "inflection-3-productivity",
258
+ "mytho-max-l2-13b",
259
+ "deephermes-3-llama-3-8b-preview",
260
+ "nous-hermes-llama2-13b",
261
+ "hermes-3-llama-3.1-8b",
262
+ "hermes-3-llama-3.1-405b",
263
+ "hermes-2-pro-llama-3-8b",
264
+ "nous-hermes-2-mixtral-8x7b-dpo",
265
+
266
+ # Chinese models
267
+ "doubao-lite-4k",
268
+ "doubao-lite-32k",
269
+ "doubao-pro-4k",
270
+ "doubao-pro-32k",
271
+ "ernie-lite-8k",
272
+ "ernie-tiny-8k",
273
+ "ernie-speed-8k",
274
+ "ernie-speed-128k",
275
+ "hunyuan-lite",
276
+ "hunyuan-standard-2025-02-10",
277
+ "hunyuan-large-2025-02-10",
278
+ "glm-3-130b",
279
+ "glm-4-flash",
280
+ "glm-4-long",
281
+ "glm-4-airx",
282
+ "glm-4-air",
283
+ "glm-4-plus",
284
+ "glm-4-alltools",
285
+ "yi-vl-plus",
286
+ "yi-large",
287
+ "yi-large-turbo",
288
+ "yi-large-rag",
289
+ "yi-medium",
290
+ "yi-34b-chat-200k",
291
+ "spark-desk-v1.5",
292
+
293
+ # Other AI models
294
+ "step-2-16k-exp-202412",
295
+ "granite-3.1-2b-instruct",
296
+ "granite-3.1-8b-instruct",
297
+ "solar-0-70b-16bit",
298
+ "mistral-nemo-inferor-12b",
299
+ "unslopnemo-12b",
300
+ "rocinante-12b-v1.1",
301
+ "rocinante-12b-v1",
302
+ "sky-t1-32b-preview",
303
+ "lfm-3b",
304
+ "lfm-7b",
305
+ "lfm-40b",
306
+ "rogue-rose-103b-v0.2",
307
+ "eva-llama-3.33-70b-v0.0",
308
+ "eva-llama-3.33-70b-v0.1",
309
+ "eva-qwen2.5-72b",
310
+ "eva-qwen2.5-32b-v0.2",
311
+ "sorcererlm-8x22b",
312
+ "mythalion-13b",
313
+ "zephyr-7b-beta",
314
+ "zephyr-7b-alpha",
315
+ "toppy-m-7b",
316
+ "openhermes-2.5-mistral-7b",
317
+ "l3-lunaris-8b",
318
+ "llama-3.1-lumimaid-8b",
319
+ "llama-3.1-lumimaid-70b",
320
+ "llama-3-lumimaid-8b",
321
+ "llama-3-lumimaid-70b",
322
+ "llama3-openbiollm-70b",
323
+ "l3.1-70b-hanami-x1",
324
+ "magnum-v4-72b",
325
+ "magnum-v2-72b",
326
+ "magnum-72b",
327
+ "mini-magnum-12b-v1.1",
328
+ "remm-slerp-l2-13b",
329
+ "midnight-rose-70b",
330
+ "athene-v2-chat",
331
+ "airoboros-l2-70b",
332
+ "xwin-lm-70b",
333
+ "noromaid-20b",
334
+ "violet-twilight-v0.2",
335
+ "saiga-nemo-12b",
336
+ "l3-8b-stheno-v3.2",
337
+ "llama-3.1-8b-lexi-uncensored-v2",
338
+ "l3.3-70b-euryale-v2.3",
339
+ "l3.3-ms-evayale-70b",
340
+ "70b-l3.3-cirrus-x1",
341
+ "l31-70b-euryale-v2.2",
342
+ "l3-70b-euryale-v2.1",
343
+ "fimbulvetr-11b-v2",
344
+ "goliath-120b",
345
+
346
+ # Image generation models
347
+ "weaver",
348
+ "sdxl",
349
+ "sdxl-turbo",
350
+ "sdxl-lightning",
351
+ "stable-diffusion-3",
352
+ "stable-diffusion-3-2b",
353
+ "stable-diffusion-3.5-large",
354
+ "stable-diffusion-3.5-turbo",
355
+ "playground-v3",
356
+ "playground-v2.5",
357
+ "animaginexl-3.1",
358
+ "realvisxl-4.0",
359
+ "imagen",
360
+ "imagen-3-fast",
361
+ "imagen-3",
362
+ "luma-photon",
363
+ "luma-photon-flash",
364
+ "recraft-20b",
365
+ "recraft-v3",
366
+ "grok-2-aurora",
367
+ "flux-schnell",
368
+ "flux-dev",
369
+ "flux-pro",
370
+ "flux-1.1-pro",
371
+ "flux-1.1-pro-ultra",
372
+ "flux-1.1-pro-ultra-raw",
373
+ "flux-realism",
374
+ "flux-half-illustration",
375
+ "ideogram-v2-turbo",
376
+ "ideogram-v2",
377
+ "amazon-titan",
378
+ "amazon-titan-v2",
379
+ "nova-canvas",
380
+ "omni-gen",
381
+ "aura-flow",
382
+ "cogview-3-flash",
383
+ "sana",
384
+ "kandinsky-3",
385
+ "dall-e-3",
386
+ "midjourney-v6.1",
387
+ "midjourney-v6",
388
+ "midjourney-v5.2",
389
+ "midjourney-v5.1",
390
+ "midjourney-v5",
391
+ "niji-v6",
392
+ "niji-v5",
393
+
394
+ # Video generation models
395
+ "t2v-turbo",
396
+ "cogvideox-5b",
397
+ "ltx-video",
398
+ "mochi-1",
399
+ "dream-machine",
400
+ "hailuo-ai",
401
+ "haiper-video-2.5",
402
+ "haiper-video-2",
403
+ "hunyuan-video",
404
+ "kling-video/v1/standard/text-to-video",
405
+ "kling-video/v1/pro/text-to-video",
406
+ "kling-video/v1.6/standard/text-to-video",
407
+ "kling-video/v1.5/pro/text-to-video",
408
+ "kokoro-82m",
409
+
410
+ # Audio models
411
+ "elevenlabs",
412
+ "myshell-tts",
413
+ "deepinfra-tts",
414
+ "whisper-large-v3",
415
+ "distil-large-v3",
416
+
417
+ # Embedding and moderation models
418
+ "text-embedding-3-large",
419
+ "text-embedding-3-small",
420
+ "omni-moderation-latest",
421
+ "omni-moderation-2024-09-26",
422
+ "text-moderation-latest",
423
+ "text-moderation-stable",
424
+ "text-moderation-007"
425
+ ]
426
+
427
+ def __init__(
428
+ self,
429
+ is_conversation: bool = True,
430
+ max_tokens: int = 16000,
431
+ timeout: int = 30,
432
+ intro: str = None,
433
+ filepath: str = None,
434
+ update_file: bool = True,
435
+ proxies: dict = {},
436
+ history_offset: int = 10250,
437
+ act: str = None,
438
+ model: str = "claude-3-7-sonnet-20250219",
439
+ system_prompt: str = "You're helpful assistant that can help me with my questions.",
440
+ api_key: str = None
441
+ ):
442
+ """Initializes the ElectronHub API client."""
443
+ if model not in self.AVAILABLE_MODELS:
444
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
445
+
446
+ self.url = "https://api.electronhub.top/v1/chat/completions"
447
+ # Use LitAgent for user-agent
448
+ self.headers = {
449
+ 'User-Agent': LitAgent().random(),
450
+ 'Content-Type': 'application/json',
451
+ 'Accept': '*/*',
452
+ 'Accept-Language': 'en-US,en;q=0.9',
453
+ 'DNT': '1',
454
+ 'Origin': 'https://playground.electronhub.top',
455
+ 'Referer': 'https://playground.electronhub.top/',
456
+ 'Sec-Fetch-Dest': 'empty',
457
+ 'Sec-Fetch-Mode': 'cors',
458
+ 'Sec-Fetch-Site': 'same-site',
459
+ 'Priority': 'u=1, i'
460
+ }
461
+
462
+ # Add API key if provided
463
+ if api_key:
464
+ self.headers['Authorization'] = f'Bearer {api_key}'
465
+ self.system_prompt = system_prompt
466
+ self.session = requests.Session()
467
+ self.session.headers.update(self.headers)
468
+ self.session.proxies.update(proxies)
469
+
470
+ self.is_conversation = is_conversation
471
+ self.max_tokens = max_tokens
472
+ self.timeout = timeout
473
+ self.last_response = {}
474
+ self.model = model
475
+
476
+ self.__available_optimizers = (
477
+ method
478
+ for method in dir(Optimizers)
479
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
480
+ )
481
+ Conversation.intro = (
482
+ AwesomePrompts().get_act(
483
+ act, raise_not_found=True, default=None, case_insensitive=True
484
+ )
485
+ if act
486
+ else intro or Conversation.intro
487
+ )
488
+
489
+ self.conversation = Conversation(
490
+ is_conversation, self.max_tokens, filepath, update_file
491
+ )
492
+ self.conversation.history_offset = history_offset
493
+
494
+ def ask(
495
+ self,
496
+ prompt: str,
497
+ stream: bool = True,
498
+ raw: bool = False,
499
+ optimizer: str = None,
500
+ conversationally: bool = False,
501
+ temperature: float = 0.5,
502
+ top_p: float = 1.0,
503
+ top_k: int = 5,
504
+ ) -> Union[Dict[str, Any], Generator]:
505
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
506
+ if optimizer:
507
+ if optimizer in self.__available_optimizers:
508
+ conversation_prompt = getattr(Optimizers, optimizer)(
509
+ conversation_prompt if conversationally else prompt
510
+ )
511
+ else:
512
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
513
+
514
+ # Construct messages for the conversation
515
+ messages = [
516
+ {"role": "system", "content": self.system_prompt},
517
+ {"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
518
+ ]
519
+
520
+ # Payload construction based on ElectronHub API requirements
521
+ payload = {
522
+ "model": self.model,
523
+ "messages": messages,
524
+ "stream": stream,
525
+ "stream_options": {"include_usage": True},
526
+ "max_tokens": self.max_tokens,
527
+ "temperature": temperature,
528
+ "top_p": top_p,
529
+ "top_k": top_k,
530
+ "web_search": False,
531
+ "customId": None
532
+ }
533
+
534
+ def for_stream():
535
+ try:
536
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
537
+ if response.status_code != 200:
538
+ raise exceptions.FailedToGenerateResponseError(
539
+ f"Request failed with status code {response.status_code}"
540
+ )
541
+
542
+ streaming_text = ""
543
+ for line in response.iter_lines(decode_unicode=True):
544
+ if line:
545
+ line = line.strip()
546
+ if line.startswith("data: "):
547
+ json_str = line[6:]
548
+ if json_str == "[DONE]":
549
+ break
550
+ try:
551
+ json_data = json.loads(json_str)
552
+ if 'choices' in json_data:
553
+ choice = json_data['choices'][0]
554
+ if 'delta' in choice and 'content' in choice['delta']:
555
+ content = choice['delta']['content']
556
+ # Fix: Check if content is not None before concatenating
557
+ if content is not None:
558
+ streaming_text += content
559
+ resp = dict(text=content)
560
+ yield resp if raw else resp
561
+ except json.JSONDecodeError:
562
+ continue
563
+ except Exception as e:
564
+ print(f"Error processing chunk: {e}")
565
+ continue
566
+
567
+ self.conversation.update_chat_history(prompt, streaming_text)
568
+
569
+ except requests.RequestException as e:
570
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
571
+
572
+ def for_non_stream():
573
+ collected_response = ""
574
+ try:
575
+ for chunk in for_stream():
576
+ if isinstance(chunk, dict) and "text" in chunk:
577
+ content = chunk["text"]
578
+ if content is not None:
579
+ collected_response += content
580
+ except Exception as e:
581
+ raise exceptions.FailedToGenerateResponseError(f"Error during non-stream processing: {str(e)}")
582
+
583
+ self.last_response = {"text": collected_response}
584
+ return self.last_response
585
+
586
+ return for_stream() if stream else for_non_stream()
587
+
588
+ def chat(
589
+ self,
590
+ prompt: str,
591
+ stream: bool = True,
592
+ optimizer: str = None,
593
+ conversationally: bool = False,
594
+ temperature: float = 0.5,
595
+ top_p: float = 1.0,
596
+ top_k: int = 5,
597
+ ) -> str:
598
+ def for_stream():
599
+ for response in self.ask(
600
+ prompt,
601
+ True,
602
+ optimizer=optimizer,
603
+ conversationally=conversationally,
604
+ temperature=temperature,
605
+ top_p=top_p,
606
+ top_k=top_k
607
+ ):
608
+ yield self.get_message(response)
609
+ def for_non_stream():
610
+ return self.get_message(
611
+ self.ask(
612
+ prompt,
613
+ False,
614
+ optimizer=optimizer,
615
+ conversationally=conversationally,
616
+ temperature=temperature,
617
+ top_p=top_p,
618
+ top_k=top_k
619
+ )
620
+ )
621
+ return for_stream() if stream else for_non_stream()
622
+
623
+ def get_message(self, response: dict) -> str:
624
+ assert isinstance(response, dict), "Response should be of dict data-type only"
625
+ return response["text"]
626
+
627
+ if __name__ == "__main__":
628
+ from rich import print
629
+ # You need to provide your own API key
630
+ api_key = "" # U can get free API key from https://playground.electronhub.top/console
631
+ ai = ElectronHub(timeout=5000, api_key=api_key)
632
+ response = ai.chat("hi there, how are you today?", stream=True)
633
+ for chunk in response:
634
+ print(chunk, end="", flush=True)