webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,10 +1,13 @@
1
- import requests
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session
2
3
  import json
3
4
  import os
4
- from typing import Any, Dict, Optional, Generator, Union
5
+ from typing import Any, Dict, Optional, Generator, Union, List
6
+
7
+ import requests
5
8
 
6
9
  from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
10
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
8
11
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
12
  from webscout.AIbase import Provider, AsyncProvider
10
13
  from webscout import exceptions
@@ -15,6 +18,7 @@ class ElectronHub(Provider):
15
18
  A class to interact with the ElectronHub API with LitAgent user-agent.
16
19
  """
17
20
 
21
+ # Default models list (will be updated dynamically)
18
22
  AVAILABLE_MODELS = [
19
23
  # OpenAI GPT models
20
24
  "gpt-3.5-turbo",
@@ -498,6 +502,63 @@ class ElectronHub(Provider):
498
502
  "text-moderation-stable",
499
503
  "text-moderation-007"
500
504
  ]
505
+
506
+ @classmethod
507
+ def get_models(cls, api_key: str = None):
508
+ """Fetch available models from ElectronHub API.
509
+
510
+ Args:
511
+ api_key (str, optional): ElectronHub API key. If not provided, returns default models.
512
+
513
+ Returns:
514
+ list: List of available model IDs
515
+ """
516
+ if not api_key:
517
+ return cls.AVAILABLE_MODELS
518
+
519
+ try:
520
+ headers = {
521
+ 'Content-Type': 'application/json',
522
+ 'Accept': '*/*',
523
+ 'User-Agent': LitAgent().random(),
524
+ 'Authorization': f'Bearer {api_key}'
525
+ }
526
+
527
+ response = requests.get(
528
+ "https://api.electronhub.top/v1/models",
529
+ headers=headers,
530
+ timeout=10
531
+ )
532
+
533
+ if response.status_code != 200:
534
+ return cls.AVAILABLE_MODELS
535
+
536
+ data = response.json()
537
+ if "data" in data and isinstance(data["data"], list):
538
+ return [model["id"] for model in data["data"]]
539
+ return cls.AVAILABLE_MODELS
540
+
541
+ except Exception:
542
+ # Fallback to default models list if fetching fails
543
+ return cls.AVAILABLE_MODELS
544
+
545
+ @classmethod
546
+ def update_available_models(cls, api_key=None):
547
+ """Update the available models list from ElectronHub API"""
548
+ try:
549
+ models = cls.get_models(api_key)
550
+ if models and len(models) > 0:
551
+ cls.AVAILABLE_MODELS = models
552
+ except Exception:
553
+ # Fallback to default models list if fetching fails
554
+ pass
555
+
556
+ @staticmethod
557
+ def _electronhub_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
558
+ """Extracts content from ElectronHub stream JSON objects."""
559
+ if isinstance(chunk, dict):
560
+ return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
561
+ return None
501
562
 
502
563
  def __init__(
503
564
  self,
@@ -515,6 +576,10 @@ class ElectronHub(Provider):
515
576
  api_key: str = None
516
577
  ):
517
578
  """Initializes the ElectronHub API client."""
579
+ # Update available models from API
580
+ self.update_available_models(api_key)
581
+
582
+ # Validate model after updating available models
518
583
  if model not in self.AVAILABLE_MODELS:
519
584
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
520
585
 
@@ -538,9 +603,9 @@ class ElectronHub(Provider):
538
603
  if api_key:
539
604
  self.headers['Authorization'] = f'Bearer {api_key}'
540
605
  self.system_prompt = system_prompt
541
- self.session = requests.Session()
606
+ self.session = Session() # Use curl_cffi Session
542
607
  self.session.headers.update(self.headers)
543
- self.session.proxies.update(proxies)
608
+ self.session.proxies = proxies # Assign proxies directly
544
609
 
545
610
  self.is_conversation = is_conversation
546
611
  self.max_tokens = max_tokens
@@ -608,41 +673,38 @@ class ElectronHub(Provider):
608
673
 
609
674
  def for_stream():
610
675
  try:
611
- with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
612
- if response.status_code != 200:
613
- raise exceptions.FailedToGenerateResponseError(
614
- f"Request failed with status code {response.status_code}"
615
- )
616
-
617
- streaming_text = ""
618
- for line in response.iter_lines(decode_unicode=True):
619
- if line:
620
- line = line.strip()
621
- if line.startswith("data: "):
622
- json_str = line[6:]
623
- if json_str == "[DONE]":
624
- break
625
- try:
626
- json_data = json.loads(json_str)
627
- if 'choices' in json_data:
628
- choice = json_data['choices'][0]
629
- if 'delta' in choice and 'content' in choice['delta']:
630
- content = choice['delta']['content']
631
- # Fix: Check if content is not None before concatenating
632
- if content is not None:
633
- streaming_text += content
634
- resp = dict(text=content)
635
- yield resp if raw else resp
636
- except json.JSONDecodeError:
637
- continue
638
- except Exception as e:
639
- print(f"Error processing chunk: {e}")
640
- continue
641
-
642
- self.conversation.update_chat_history(prompt, streaming_text)
643
-
644
- except requests.RequestException as e:
676
+ response = self.session.post(
677
+ self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout,
678
+ impersonate="chrome120" # Add impersonate
679
+ )
680
+ response.raise_for_status()
681
+
682
+ streaming_text = ""
683
+ # Use sanitize_stream
684
+ processed_stream = sanitize_stream(
685
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
686
+ intro_value="data:",
687
+ to_json=True, # Stream sends JSON
688
+ skip_markers=["[DONE]"],
689
+ content_extractor=self._electronhub_extractor, # Use the specific extractor
690
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
691
+ )
692
+
693
+ for content_chunk in processed_stream:
694
+ # content_chunk is the string extracted by _electronhub_extractor
695
+ if content_chunk and isinstance(content_chunk, str):
696
+ streaming_text += content_chunk
697
+ resp = dict(text=content_chunk)
698
+ yield resp if not raw else content_chunk
699
+
700
+ except CurlError as e:
701
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
702
+ except Exception as e:
645
703
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
704
+ finally:
705
+ # Update history after stream finishes or fails
706
+ if streaming_text:
707
+ self.conversation.update_chat_history(prompt, streaming_text)
646
708
 
647
709
  def for_non_stream():
648
710
  collected_response = ""
@@ -655,7 +717,9 @@ class ElectronHub(Provider):
655
717
  except Exception as e:
656
718
  raise exceptions.FailedToGenerateResponseError(f"Error during non-stream processing: {str(e)}")
657
719
 
720
+ # Update history and last_response after aggregation
658
721
  self.last_response = {"text": collected_response}
722
+ self.conversation.update_chat_history(prompt, collected_response)
659
723
  return self.last_response
660
724
 
661
725
  return for_stream() if stream else for_non_stream()
@@ -1,10 +1,11 @@
1
- import requests
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session, Response # Import Response
2
3
  import json
3
4
  import uuid
4
- from typing import Any, Dict, Union, Optional
5
+ from typing import Any, Dict, Union, Optional, List
5
6
  from datetime import datetime
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
- from webscout.AIbase import Provider
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
+ from webscout.AIbase import Provider
8
9
  from webscout import exceptions
9
10
  from webscout.litagent import LitAgent
10
11
 
@@ -22,6 +23,8 @@ MODEL_CONFIGS = {
22
23
  "gemini-2.0-flash-thinking-exp-01-21",
23
24
  "gemini-2.5-pro-exp-03-25",
24
25
  "gemini-2.0-pro-exp-02-05",
26
+ "gemini-2.5-flash-preview-04-17",
27
+
25
28
 
26
29
  ],
27
30
  },
@@ -87,6 +90,7 @@ class ExaChat(Provider):
87
90
  "gemini-2.0-flash-thinking-exp-01-21",
88
91
  "gemini-2.5-pro-exp-03-25",
89
92
  "gemini-2.0-pro-exp-02-05",
93
+ "gemini-2.5-flash-preview-04-17",
90
94
 
91
95
  # OpenRouter Models
92
96
  "mistralai/mistral-small-3.1-24b-instruct:free",
@@ -141,7 +145,7 @@ class ExaChat(Provider):
141
145
  if model not in self.AVAILABLE_MODELS:
142
146
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
143
147
 
144
- self.session = requests.Session()
148
+ self.session = Session() # Use curl_cffi Session
145
149
  self.is_conversation = is_conversation
146
150
  self.max_tokens_to_sample = max_tokens
147
151
  self.timeout = timeout
@@ -166,7 +170,7 @@ class ExaChat(Provider):
166
170
  }
167
171
 
168
172
  self.session.headers.update(self.headers)
169
- self.session.proxies = proxies
173
+ self.session.proxies = proxies # Assign proxies directly
170
174
  self.session.cookies.update({"session": uuid.uuid4().hex})
171
175
 
172
176
  self.__available_optimizers = (
@@ -208,18 +212,27 @@ class ExaChat(Provider):
208
212
  error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
209
213
  raise ValueError(error_msg)
210
214
 
211
- def _make_request(self, payload: Dict[str, Any]) -> requests.Response:
215
+ @staticmethod
216
+ def _exachat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
217
+ """Extracts content from ExaChat stream JSON objects."""
218
+ if isinstance(chunk, dict):
219
+ return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
220
+ return None
221
+
222
+ def _make_request(self, payload: Dict[str, Any]) -> Response: # Change type hint to Response
212
223
  """Make the API request with proper error handling."""
213
224
  try:
214
225
  response = self.session.post(
215
226
  self._get_endpoint(),
216
227
  headers=self.headers,
217
228
  json=payload,
218
- timeout=self.timeout,
229
+ timeout=self.timeout, # type: ignore
230
+ stream=True, # Enable streaming for the request
231
+ impersonate="chrome120" # Add impersonate
219
232
  )
220
233
  response.raise_for_status()
221
234
  return response
222
- except requests.exceptions.RequestException as e:
235
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch CurlError and others
223
236
  raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
224
237
 
225
238
  def _build_payload(self, conversation_prompt: str) -> Dict[str, Any]:
@@ -271,20 +284,23 @@ class ExaChat(Provider):
271
284
 
272
285
  try:
273
286
  full_response = ""
274
- for line in response.iter_lines():
275
- if line:
276
- try:
277
- data = json.loads(line.decode('utf-8'))
278
- if 'choices' in data and len(data['choices']) > 0:
279
- content = data['choices'][0].get('delta', {}).get('content', '')
280
- if content:
281
- full_response += content
282
- except json.JSONDecodeError:
283
- continue
287
+ # Use sanitize_stream to process the response
288
+ processed_stream = sanitize_stream(
289
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
290
+ intro_value=None, # API doesn't seem to use 'data:' prefix
291
+ to_json=True, # Stream sends JSON lines
292
+ content_extractor=self._exachat_extractor, # Use the specific extractor
293
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
294
+ )
295
+
296
+ for content_chunk in processed_stream:
297
+ # content_chunk is the string extracted by _exachat_extractor
298
+ if content_chunk and isinstance(content_chunk, str):
299
+ full_response += content_chunk
284
300
 
285
301
  self.last_response = {"text": full_response}
286
302
  self.conversation.update_chat_history(prompt, full_response)
287
- return self.last_response
303
+ return self.last_response if not raw else full_response # Return dict or raw string
288
304
 
289
305
  except json.JSONDecodeError as e:
290
306
  raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
@@ -1,8 +1,11 @@
1
- import requests
1
+ from typing import Any, Dict, Generator, Optional, Union
2
+ from curl_cffi.requests import Session
3
+ from curl_cffi import CurlError
2
4
  import json
3
5
 
6
+ from webscout import exceptions
4
7
  from webscout.AIutel import Optimizers
5
- from webscout.AIutel import Conversation
8
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
6
9
  from webscout.AIutel import AwesomePrompts
7
10
  from webscout.AIbase import Provider
8
11
 
@@ -14,7 +17,7 @@ class GPTWeb(Provider):
14
17
  def __init__(
15
18
  self,
16
19
  is_conversation: bool = True,
17
- max_tokens: int = 600,
20
+ max_tokens: int = 600, # Note: max_tokens is not used by this API
18
21
  timeout: int = 30,
19
22
  intro: str = None,
20
23
  filepath: str = None,
@@ -22,7 +25,7 @@ class GPTWeb(Provider):
22
25
  proxies: dict = {},
23
26
  history_offset: int = 10250,
24
27
  act: str = None,
25
-
28
+ # Note: system_prompt is not used by this API
26
29
  ):
27
30
  """
28
31
  Initializes the Nexra GPTWeb API with given parameters.
@@ -37,9 +40,9 @@ class GPTWeb(Provider):
37
40
  proxies (dict, optional): Http request proxies. Defaults to {}.
38
41
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
39
42
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
40
- system_prompt (str, optional): System prompt for GPTWeb. Defaults to "You are a helpful AI assistant.".
41
43
  """
42
- self.session = requests.Session()
44
+ # Initialize curl_cffi Session
45
+ self.session = Session()
43
46
  self.is_conversation = is_conversation
44
47
  self.max_tokens_to_sample = max_tokens
45
48
  self.api_endpoint = 'https://nexra.aryahcr.cc/api/chat/gptweb'
@@ -48,6 +51,7 @@ class GPTWeb(Provider):
48
51
  self.last_response = {}
49
52
  self.headers = {
50
53
  "Content-Type": "application/json"
54
+ # Remove User-Agent, Accept-Encoding, etc. - handled by impersonate
51
55
  }
52
56
 
53
57
  self.__available_optimizers = (
@@ -55,7 +59,10 @@ class GPTWeb(Provider):
55
59
  for method in dir(Optimizers)
56
60
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
57
61
  )
62
+ # Update curl_cffi session headers and proxies
58
63
  self.session.headers.update(self.headers)
64
+ self.session.proxies = proxies # Assign proxies directly
65
+
59
66
  Conversation.intro = (
60
67
  AwesomePrompts().get_act(
61
68
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -67,16 +74,22 @@ class GPTWeb(Provider):
67
74
  is_conversation, self.max_tokens_to_sample, filepath, update_file
68
75
  )
69
76
  self.conversation.history_offset = history_offset
70
- self.session.proxies = proxies
77
+
78
+ @staticmethod
79
+ def _gptweb_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
80
+ """Extracts content from GPTWeb stream JSON objects."""
81
+ if isinstance(chunk, dict):
82
+ return chunk.get("gpt")
83
+ return None
71
84
 
72
85
  def ask(
73
86
  self,
74
87
  prompt: str,
75
- stream: bool = False,
88
+ stream: bool = False, # API supports streaming
76
89
  raw: bool = False,
77
90
  optimizer: str = None,
78
91
  conversationally: bool = False,
79
- ) -> dict:
92
+ ) -> Union[dict, Generator[dict, None, None]]: # Corrected return type hint
80
93
  """Chat with GPTWeb
81
94
 
82
95
  Args:
@@ -110,31 +123,65 @@ class GPTWeb(Provider):
110
123
  }
111
124
 
112
125
  def for_stream():
113
- response = self.session.post(self.api_endpoint, headers=self.headers, data=json.dumps(data), stream=True, timeout=self.timeout)
114
- if not response.ok:
115
- raise Exception(
116
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
126
+ full_response = '' # Initialize outside try block
127
+ try:
128
+ # Use curl_cffi session post with impersonate
129
+ response = self.session.post(
130
+ self.api_endpoint,
131
+ # headers are set on the session
132
+ data=json.dumps(data),
133
+ stream=True,
134
+ timeout=self.timeout,
135
+ impersonate="chrome110" # Use a common impersonation profile
117
136
  )
137
+ response.raise_for_status() # Check for HTTP errors
118
138
 
119
- full_response = ''
120
- for line in response.iter_lines(decode_unicode=True):
121
- if line:
122
- line = line.lstrip('_') # Remove "_"
123
- try:
124
- # Attempt to parse the entire line as JSON
125
- json_data = json.loads(line)
126
- full_response = json_data.get("gpt", "")
127
- yield full_response if raw else dict(text=full_response)
128
- except json.JSONDecodeError:
129
- print(f"Skipping invalid JSON line: {line}")
130
- self.last_response.update(dict(text=full_response))
131
- self.conversation.update_chat_history(
132
- prompt, self.get_message(self.last_response)
133
- )
139
+ # Use sanitize_stream
140
+ processed_stream = sanitize_stream(
141
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
142
+ intro_value=None, # No standard prefix, potential '_' handled by json.loads
143
+ to_json=True, # Stream sends JSON lines
144
+ content_extractor=self._gptweb_extractor, # Use the specific extractor
145
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
146
+ )
147
+
148
+ for content_chunk in processed_stream:
149
+ # content_chunk is the full text extracted by _gptweb_extractor
150
+ if content_chunk and isinstance(content_chunk, str):
151
+ full_response = content_chunk # API sends full response each time
152
+ resp = dict(text=full_response)
153
+ yield resp if not raw else full_response
154
+
155
+ # Update history after stream finishes (using the final full response)
156
+ self.last_response = dict(text=full_response)
157
+ self.conversation.update_chat_history(
158
+ prompt, full_response
159
+ )
160
+ except CurlError as e: # Catch CurlError
161
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
162
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
163
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
164
+ raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
165
+
166
+
134
167
  def for_non_stream():
135
- for _ in for_stream():
136
- pass
137
- return self.last_response
168
+ # Aggregate the stream using the updated for_stream logic
169
+ # Since the stream yields the full response each time, we just need the last one.
170
+ last_chunk = None
171
+ try:
172
+ for chunk in for_stream():
173
+ last_chunk = chunk
174
+ except Exception as e:
175
+ # If aggregation fails, re-raise.
176
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
177
+
178
+ # last_response and history are updated within for_stream
179
+ # Return the final aggregated response dict or raw string
180
+ if last_chunk is None:
181
+ raise exceptions.FailedToGenerateResponseError("No response received from stream.")
182
+
183
+ return last_chunk # last_chunk is already dict or raw string based on 'raw'
184
+
138
185
 
139
186
  return for_stream() if stream else for_non_stream()
140
187
 
@@ -144,7 +191,7 @@ class GPTWeb(Provider):
144
191
  stream: bool = False,
145
192
  optimizer: str = None,
146
193
  conversationally: bool = False,
147
- ) -> str:
194
+ ) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
148
195
  """Generate response `str`
149
196
  Args:
150
197
  prompt (str): Prompt to be send.
@@ -155,23 +202,31 @@ class GPTWeb(Provider):
155
202
  str: Response generated
156
203
  """
157
204
 
158
- def for_stream():
159
- for response in self.ask(
160
- prompt, True, optimizer=optimizer, conversationally=conversationally
161
- ):
162
- yield self.get_message(response)
163
-
164
- def for_non_stream():
165
- return self.get_message(
166
- self.ask(
167
- prompt,
168
- False,
169
- optimizer=optimizer,
170
- conversationally=conversationally,
171
- )
205
+ def for_stream_chat():
206
+ # ask() yields dicts or strings when streaming
207
+ gen = self.ask(
208
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
209
+ optimizer=optimizer, conversationally=conversationally
172
210
  )
211
+ # Since the API sends the full response each time, we only need the last one.
212
+ # However, to maintain the streaming interface, we yield the message from each chunk.
213
+ # This might result in repeated text if the client doesn't handle it.
214
+ # A better approach might be to track changes, but for simplicity, yield each message.
215
+ for response_dict in gen:
216
+ yield self.get_message(response_dict)
217
+
218
+ def for_non_stream_chat():
219
+ # ask() returns dict or str when not streaming
220
+ response_data = self.ask(
221
+ prompt,
222
+ stream=False,
223
+ raw=False, # Ensure ask returns dict
224
+ optimizer=optimizer,
225
+ conversationally=conversationally,
226
+ )
227
+ return self.get_message(response_data) # get_message expects dict
173
228
 
174
- return for_stream() if stream else for_non_stream()
229
+ return for_stream_chat() if stream else for_non_stream_chat()
175
230
 
176
231
  def get_message(self, response: dict) -> str:
177
232
  """Retrieves message only from response
@@ -186,6 +241,7 @@ class GPTWeb(Provider):
186
241
  return response["text"]
187
242
 
188
243
  if __name__ == '__main__':
244
+ # Ensure curl_cffi is installed
189
245
  from rich import print
190
246
  ai = GPTWeb()
191
247
  response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)