webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/DWEBS.py CHANGED
@@ -4,7 +4,7 @@ DWEBS - A Google search library with advanced features
4
4
  import random
5
5
  from time import sleep
6
6
  from webscout.scout import Scout
7
- from requests import get
7
+ from curl_cffi.requests import Session
8
8
  from urllib.parse import unquote, urlencode
9
9
  from typing import List, Dict, Optional, Union, Iterator, Any
10
10
  from concurrent.futures import ThreadPoolExecutor
@@ -35,34 +35,55 @@ class SearchResult:
35
35
 
36
36
  class GoogleSearch:
37
37
  """Google search implementation with configurable parameters and advanced features."""
38
-
38
+
39
39
  _executor: ThreadPoolExecutor = ThreadPoolExecutor()
40
-
40
+
41
41
  def __init__(
42
42
  self,
43
43
  timeout: int = 10,
44
44
  proxies: Optional[Dict[str, str]] = None,
45
45
  verify: bool = True,
46
46
  lang: str = "en",
47
- sleep_interval: float = 0.0
47
+ sleep_interval: float = 0.0,
48
+ impersonate: str = "chrome110"
48
49
  ):
49
50
  """
50
51
  Initialize GoogleSearch with custom settings.
51
-
52
+
52
53
  Args:
53
54
  timeout: Request timeout in seconds
54
55
  proxies: Proxy configuration for requests
55
56
  verify: Whether to verify SSL certificates
56
57
  lang: Search language
57
58
  sleep_interval: Sleep time between pagination requests
59
+ impersonate: Browser profile for curl_cffi. Defaults to "chrome110".
58
60
  """
59
- self.timeout = timeout
61
+ self.timeout = timeout # Keep timeout for potential non-session uses or reference
60
62
  self.proxies = proxies if proxies else {}
61
63
  self.verify = verify
62
64
  self.lang = lang
63
65
  self.sleep_interval = sleep_interval
64
66
  self.base_url = "https://www.google.com/search"
65
-
67
+ # Initialize curl_cffi session
68
+ self.session = Session(
69
+ proxies=self.proxies,
70
+ verify=self.verify,
71
+ timeout=self.timeout,
72
+ impersonate=impersonate
73
+ )
74
+ # Set common headers for the session
75
+ self.session.headers = {
76
+ "User-Agent": self._get_useragent(),
77
+ "Accept-Language": self.lang,
78
+ "Accept-Encoding": "gzip, deflate, br",
79
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
80
+ }
81
+ # Set default cookies for the session
82
+ self.session.cookies.update({
83
+ 'CONSENT': 'PENDING+987',
84
+ 'SOCS': 'CAESHAgBEhIaAB',
85
+ })
86
+
66
87
  def _get_useragent(self) -> str:
67
88
  """
68
89
  Generate a random user agent string.
@@ -99,29 +120,23 @@ class GoogleSearch:
99
120
  # Add search type if specified
100
121
  if search_type:
101
122
  params["tbm"] = search_type
102
-
123
+
103
124
  try:
104
- resp = get(
125
+ # Use the curl_cffi session
126
+ resp = self.session.get(
105
127
  url=self.base_url,
106
- headers={
107
- "User-Agent": self._get_useragent(),
108
- "Accept-Language": self.lang,
109
- "Accept-Encoding": "gzip, deflate, br",
110
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
111
- },
112
128
  params=params,
113
- proxies=self.proxies if any(self.proxies) else None,
114
- timeout=self.timeout,
115
- verify=self.verify,
116
- cookies={
117
- 'CONSENT': 'PENDING+987',
118
- 'SOCS': 'CAESHAgBEhIaAB',
119
- }
129
+ # Headers and cookies are now part of the session
130
+ # proxies, timeout, verify are handled by the session
120
131
  )
121
132
  resp.raise_for_status()
122
133
  return resp.text
123
134
  except Exception as e:
124
- raise RuntimeError(f"Search request failed: {str(e)}")
135
+ # Provide more specific error context if possible
136
+ if hasattr(e, 'response') and e.response is not None:
137
+ raise RuntimeError(f"Search request failed with status {e.response.status_code}: {str(e)}")
138
+ else:
139
+ raise RuntimeError(f"Search request failed: {str(e)}")
125
140
 
126
141
  def _extract_url(self, raw_link: str) -> Optional[str]:
127
142
  """
@@ -283,8 +298,15 @@ class GoogleSearch:
283
298
  start = start_num
284
299
 
285
300
  while len(fetched_results) < max_results:
301
+ # Add safe search parameter to the request
302
+ # Note: This modifies the session params for this specific request type
303
+ # It might be better to pass params directly to session.get if mixing search types
304
+ term_with_safe = f"{keywords} safe:{safe}"
305
+ if region and region.lower() != "all":
306
+ term_with_safe += f" location:{region}" # Example of adding region, adjust as needed
307
+
286
308
  response_html = self._make_request(
287
- term=keywords,
309
+ term=term_with_safe, # Pass term with safe search
288
310
  results=max_results - len(fetched_results),
289
311
  start=start
290
312
  )
@@ -304,7 +326,7 @@ class GoogleSearch:
304
326
  if len(fetched_results) >= max_results:
305
327
  break
306
328
 
307
- start += 10
329
+ start += 10 # Google typically uses increments of 10
308
330
  sleep(self.sleep_interval)
309
331
 
310
332
  return fetched_results[:max_results]
@@ -340,11 +362,15 @@ class GoogleSearch:
340
362
  safe = safe_map.get(safesearch.lower(), "moderate")
341
363
 
342
364
  # Keep track of unique results
343
- fetched_results = []
344
365
  fetched_links = set()
345
-
366
+
367
+ # Add safe search parameter
368
+ term_with_safe = f"{keywords} safe:{safe}"
369
+ if region and region.lower() != "all":
370
+ term_with_safe += f" location:{region}" # Example
371
+
346
372
  response_html = self._make_request(
347
- term=keywords,
373
+ term=term_with_safe, # Pass term with safe search
348
374
  results=max_results,
349
375
  search_type="nws"
350
376
  )
@@ -353,7 +379,7 @@ class GoogleSearch:
353
379
  html=response_html,
354
380
  num_results=max_results,
355
381
  fetched_links=fetched_links,
356
- unique=True
382
+ unique=True # News results are generally unique per request
357
383
  )
358
384
 
359
385
  return results[:max_results]
@@ -384,17 +410,19 @@ class GoogleSearch:
384
410
 
385
411
  url = f"https://www.google.com/complete/search?{urlencode(params)}"
386
412
 
413
+ # Use a simpler header set for the suggestions API
387
414
  headers = {
388
415
  "User-Agent": self._get_useragent(),
389
416
  "Accept": "application/json, text/javascript, */*",
390
417
  "Accept-Language": self.lang,
391
418
  }
392
419
 
393
- response = get(
420
+ # Use session.get but override headers for this specific request
421
+ response = self.session.get(
394
422
  url=url,
395
423
  headers=headers,
396
- timeout=self.timeout,
397
- verify=self.verify
424
+ params=params # Pass params directly
425
+ # timeout and verify are handled by session
398
426
  )
399
427
  response.raise_for_status()
400
428
 
@@ -405,25 +433,40 @@ class GoogleSearch:
405
433
  return []
406
434
 
407
435
  except Exception as e:
436
+ # Provide more specific error context if possible
437
+ if hasattr(e, 'response') and e.response is not None:
438
+ # Log error or handle differently if needed
439
+ print(f"Suggestions request failed with status {e.response.status_code}: {str(e)}")
440
+ else:
441
+ print(f"Suggestions request failed: {str(e)}")
408
442
  # Return empty list on error instead of raising exception
409
443
  return []
410
444
 
411
445
 
412
446
  # Legacy function support for backward compatibility
413
- def search(term, num_results=10, lang="en", proxy=None, advanced=False, sleep_interval=0, timeout=5, safe="active", ssl_verify=True, region=None, start_num=0, unique=False):
447
+ def search(term, num_results=10, lang="en", proxy=None, advanced=False, sleep_interval=0, timeout=5, safe="active", ssl_verify=True, region=None, start_num=0, unique=False, impersonate="chrome110"): # Added impersonate
414
448
  """Legacy function for backward compatibility."""
415
449
  google_search = GoogleSearch(
416
450
  timeout=timeout,
417
451
  proxies={"https": proxy, "http": proxy} if proxy else None,
418
452
  verify=ssl_verify,
419
453
  lang=lang,
420
- sleep_interval=sleep_interval
454
+ sleep_interval=sleep_interval,
455
+ impersonate=impersonate # Pass impersonate
421
456
  )
422
457
 
458
+ # Map legacy safe values
459
+ safe_search_map = {
460
+ "active": "on",
461
+ "moderate": "moderate",
462
+ "off": "off"
463
+ }
464
+ safesearch_val = safe_search_map.get(safe, "moderate")
465
+
423
466
  results = google_search.text(
424
467
  keywords=term,
425
468
  region=region,
426
- safesearch="on" if safe == "active" else "moderate" if safe == "moderate" else "off",
469
+ safesearch=safesearch_val,
427
470
  max_results=num_results,
428
471
  start_num=start_num,
429
472
  unique=unique
webscout/Extra/gguf.py CHANGED
@@ -1,6 +1,8 @@
1
1
  """
2
2
  Convert Hugging Face models to GGUF format with advanced features.
3
3
 
4
+ For detailed documentation, see: webscout/Extra/gguf.md
5
+
4
6
  >>> python -m webscout.Extra.gguf convert -m "OEvortex/HelpingAI-Lite-1.5T" -q "q4_k_m,q5_k_m"
5
7
  >>> # With upload options:
6
8
  >>> python -m webscout.Extra.gguf convert -m "your-model" -u "username" -t "token" -q "q4_k_m"
@@ -148,7 +148,7 @@ def get_random_email(provider_name: str = "mailtm") -> Tuple[str, TempMailProvid
148
148
  except Exception as e:
149
149
  raise RuntimeError(f"Failed to initialize provider: {e}")
150
150
 
151
- # Create the account
151
+ # Create the account (auto-generates a random email)
152
152
  success = provider.create_account()
153
153
  if not success:
154
154
  raise RuntimeError(f"Failed to create account with provider {provider_name}")
@@ -31,12 +31,15 @@ class Hika(AISearch):
31
31
  timeout: int = 60,
32
32
  proxies: Optional[dict] = None,
33
33
  language: str = "en",
34
+ model: str = "deepseek-r1",
35
+
34
36
  ):
35
37
  self.session = requests.Session()
36
38
  self.base_url = "https://api.hika.fyi/api/"
37
39
  self.endpoint = "kbase/web"
38
40
  self.timeout = timeout
39
41
  self.language = language
42
+ self.model = model
40
43
  self.last_response = {}
41
44
 
42
45
  self.headers = {
@@ -104,6 +107,7 @@ class Hika(AISearch):
104
107
  # Prepare payload
105
108
  payload = {
106
109
  "keyword": prompt,
110
+ "model": self.model,
107
111
  "language": self.language,
108
112
  "stream": True # Always request streaming for consistent handling
109
113
  }
@@ -69,17 +69,14 @@ class Scira(AISearch):
69
69
  """
70
70
 
71
71
  AVAILABLE_MODELS = {
72
- "scira-default": "Grok3",
73
- "scira-grok-3-mini": "Grok3-mini", # thinking model
72
+ "scira-default": "Grok3-mini", # thinking model
73
+ "scira-grok-3": "Grok3",
74
74
  "scira-vision" : "Grok2-Vision", # vision model
75
75
  "scira-4.1-mini": "GPT4.1-mini",
76
76
  "scira-qwq": "QWQ-32B",
77
77
  "scira-o4-mini": "o4-mini",
78
78
  "scira-google": "gemini 2.5 flash"
79
-
80
-
81
79
  }
82
-
83
80
  def __init__(
84
81
  self,
85
82
  timeout: int = 60,
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session
2
3
  import json
3
4
  import uuid
4
5
  import time
@@ -6,8 +7,8 @@ import hashlib
6
7
  from typing import Any, Dict, Optional, Generator, Union
7
8
 
8
9
  from webscout.AIutel import Optimizers
9
- from webscout.AIutel import Conversation
10
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
11
+ from webscout.AIutel import AwesomePrompts
11
12
  from webscout.AIbase import Provider, AsyncProvider
12
13
  from webscout import exceptions
13
14
  from webscout.litagent import LitAgent
@@ -67,9 +68,9 @@ class Aitopia(Provider):
67
68
  "user-agent": self.fingerprint["user_agent"]
68
69
  }
69
70
 
70
- self.session = requests.Session()
71
+ self.session = Session() # Use curl_cffi Session
71
72
  self.session.headers.update(self.headers)
72
- self.session.proxies.update(proxies)
73
+ self.session.proxies = proxies # Assign proxies directly
73
74
 
74
75
  self.is_conversation = is_conversation
75
76
  self.max_tokens_to_sample = max_tokens
@@ -129,6 +130,20 @@ class Aitopia(Provider):
129
130
  random_str = str(uuid.uuid4()) + str(time.time())
130
131
  return hashlib.md5(random_str.encode()).hexdigest()
131
132
 
133
+ @staticmethod
134
+ def _aitopia_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
135
+ """Extracts content from Aitopia stream JSON objects."""
136
+ if isinstance(chunk, dict):
137
+ # Handle Claude 3 Haiku response format
138
+ if "delta" in chunk and "text" in chunk["delta"]:
139
+ return chunk["delta"]["text"]
140
+ # Handle GPT-4o Mini response format
141
+ elif "choices" in chunk and "0" in chunk["choices"]:
142
+ return chunk["choices"]["0"]["delta"].get("content")
143
+ # Add other potential formats here if needed
144
+ return None
145
+
146
+
132
147
  def ask(
133
148
  self,
134
149
  prompt: str,
@@ -184,63 +199,72 @@ class Aitopia(Provider):
184
199
  }
185
200
 
186
201
  def for_stream():
202
+ streaming_text = "" # Initialize outside try block
187
203
  try:
188
- with requests.post(self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
189
- if response.status_code != 200:
190
- raise exceptions.FailedToGenerateResponseError(
191
- f"Request failed with status code {response.status_code}"
192
- )
193
-
194
- streaming_text = ""
195
- for line in response.iter_lines():
196
- if line:
197
- line = line.decode('utf-8')
198
- if line.startswith('data: '):
199
- data = line[6:]
200
- if data == '[DONE]':
201
- break
202
- try:
203
- json_data = json.loads(data)
204
-
205
- # Handle Claude 3 Haiku response format
206
- if "delta" in json_data and "text" in json_data["delta"]:
207
- content = json_data["delta"]["text"]
208
- if content:
209
- streaming_text += content
210
- resp = dict(text=content)
211
- yield resp if raw else resp
212
- # Handle GPT-4o Mini response format
213
- elif "choices" in json_data and "0" in json_data["choices"]:
214
- content = json_data["choices"]["0"]["delta"].get("content", "")
215
- if content:
216
- streaming_text += content
217
- resp = dict(text=content)
218
- yield resp if raw else resp
219
- except json.JSONDecodeError:
220
- continue
221
-
204
+ response = self.session.post(
205
+ self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout,
206
+ impersonate="chrome120" # Add impersonate
207
+ )
208
+ response.raise_for_status()
209
+
210
+ # Use sanitize_stream
211
+ processed_stream = sanitize_stream(
212
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
213
+ intro_value="data:",
214
+ to_json=True, # Stream sends JSON
215
+ skip_markers=["[DONE]"],
216
+ content_extractor=self._aitopia_extractor, # Use the specific extractor
217
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
218
+ )
219
+
220
+ for content_chunk in processed_stream:
221
+ # content_chunk is the string extracted by _aitopia_extractor
222
+ if content_chunk and isinstance(content_chunk, str):
223
+ streaming_text += content_chunk
224
+ resp = dict(text=content_chunk)
225
+ yield resp if not raw else content_chunk
226
+
227
+ except CurlError as e:
228
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
229
+ except Exception as e:
230
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
231
+ finally:
232
+ # Update history after stream finishes or fails
233
+ if streaming_text:
222
234
  self.last_response = {"text": streaming_text}
223
235
  self.conversation.update_chat_history(prompt, streaming_text)
224
-
225
- except requests.RequestException as e:
226
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
227
236
 
228
237
  def for_non_stream():
229
238
  try:
230
- response = requests.post(self.url, headers=self.headers, json=payload, timeout=self.timeout)
231
- if response.status_code != 200:
232
- raise exceptions.FailedToGenerateResponseError(
233
- f"Request failed with status code {response.status_code}"
234
- )
239
+ response = self.session.post(
240
+ self.url, headers=self.headers, json=payload, timeout=self.timeout,
241
+ impersonate="chrome120" # Add impersonate
242
+ )
243
+ response.raise_for_status()
244
+
245
+ response_text_raw = response.text # Get raw text
246
+
247
+ # Use sanitize_stream to parse the non-streaming JSON response
248
+ # Assuming non-stream uses the GPT format based on original code
249
+ processed_stream = sanitize_stream(
250
+ data=response_text_raw,
251
+ to_json=True, # Parse the whole text as JSON
252
+ intro_value=None,
253
+ content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
254
+ yield_raw_on_error=False
255
+ )
256
+ # Extract the single result
257
+ content = next(processed_stream, None)
258
+ content = content if isinstance(content, str) else "" # Ensure it's a string
235
259
 
236
- response_data = response.json()
237
- if 'choices' in response_data and len(response_data['choices']) > 0:
238
- content = response_data['choices'][0].get('message', {}).get('content', '')
260
+ if content: # Check if content was successfully extracted
239
261
  self.last_response = {"text": content}
240
262
  self.conversation.update_chat_history(prompt, content)
241
263
  return {"text": content}
242
264
  else:
243
- raise exceptions.FailedToGenerateResponseError("No response content found")
265
+ raise exceptions.FailedToGenerateResponseError("No response content found or failed to parse")
266
+ except CurlError as e:
267
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
244
268
  except Exception as e:
245
269
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
246
270