webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import uuid
4
5
  import sys
@@ -17,16 +18,20 @@ class HeckAI(Provider):
17
18
  """
18
19
 
19
20
  AVAILABLE_MODELS = [
21
+ "google/gemini-2.0-flash-001",
20
22
  "deepseek/deepseek-chat",
21
- "openai/gpt-4o-mini",
22
23
  "deepseek/deepseek-r1",
23
- "google/gemini-2.0-flash-001"
24
+ "openai/gpt-4o-mini",
25
+ "openai/gpt-4.1-mini",
26
+ "x-ai/grok-3-mini-beta",
27
+ "meta-llama/llama-4-scout"
28
+
24
29
  ]
25
30
 
26
31
  def __init__(
27
32
  self,
28
33
  is_conversation: bool = True,
29
- max_tokens: int = 2049,
34
+ max_tokens: int = 2049, # Note: max_tokens is not used by this API
30
35
  timeout: int = 30,
31
36
  intro: str = None,
32
37
  filepath: str = None,
@@ -45,18 +50,18 @@ class HeckAI(Provider):
45
50
  self.session_id = str(uuid.uuid4())
46
51
  self.language = language
47
52
 
48
- # Use LitAgent for user-agent
53
+ # Use LitAgent (keep if needed for other headers or logic)
49
54
  self.headers = {
50
- 'User-Agent': LitAgent().random(),
51
55
  'Content-Type': 'application/json',
52
- 'Origin': 'https://heck.ai',
53
- 'Referer': 'https://heck.ai/',
54
- 'Connection': 'keep-alive'
56
+ 'Origin': 'https://heck.ai', # Keep Origin
57
+ 'Referer': 'https://heck.ai/', # Keep Referer
55
58
  }
56
59
 
57
- self.session = requests.Session()
60
+ # Initialize curl_cffi Session
61
+ self.session = Session()
62
+ # Update curl_cffi session headers and proxies
58
63
  self.session.headers.update(self.headers)
59
- self.session.proxies.update(proxies)
64
+ self.session.proxies = proxies # Assign proxies directly
60
65
 
61
66
  self.is_conversation = is_conversation
62
67
  self.max_tokens_to_sample = max_tokens
@@ -87,7 +92,7 @@ class HeckAI(Provider):
87
92
  def ask(
88
93
  self,
89
94
  prompt: str,
90
- stream: bool = False,
95
+ stream: bool = False, # API supports streaming
91
96
  raw: bool = False,
92
97
  optimizer: str = None,
93
98
  conversationally: bool = False,
@@ -95,9 +100,7 @@ class HeckAI(Provider):
95
100
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
96
101
  if optimizer:
97
102
  if optimizer in self.__available_optimizers:
98
- conversation_prompt = getattr(Optimizers, optimizer)(
99
- conversation_prompt if conversationally else prompt
100
- )
103
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
101
104
  else:
102
105
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
103
106
 
@@ -116,25 +119,32 @@ class HeckAI(Provider):
116
119
  self.previous_question = conversation_prompt
117
120
 
118
121
  def for_stream():
122
+ streaming_text = "" # Initialize outside try block
123
+ in_answer = False # Initialize outside try block
119
124
  try:
120
- with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
121
- if response.status_code != 200:
122
- raise exceptions.FailedToGenerateResponseError(
123
- f"Request failed with status code {response.status_code}"
124
- )
125
-
126
- streaming_text = ""
127
- in_answer = False
125
+ # Use curl_cffi session post with impersonate
126
+ response = self.session.post(
127
+ self.url,
128
+ # headers are set on the session
129
+ data=json.dumps(payload),
130
+ stream=True,
131
+ timeout=self.timeout,
132
+ impersonate="chrome110" # Use a common impersonation profile
133
+ )
134
+ response.raise_for_status() # Check for HTTP errors
128
135
 
129
- for line in response.iter_lines(decode_unicode=True):
130
- if not line:
131
- continue
132
-
136
+ # Iterate over bytes and decode manually
137
+ for line_bytes in response.iter_lines():
138
+ if not line_bytes:
139
+ continue
140
+
141
+ try:
142
+ line = line_bytes.decode('utf-8')
133
143
  # Remove "data: " prefix
134
144
  if line.startswith("data: "):
135
145
  data = line[6:]
136
146
  else:
137
- continue
147
+ continue # Skip lines without the prefix
138
148
 
139
149
  # Check for control markers
140
150
  if data == "[ANSWER_START]":
@@ -150,23 +160,46 @@ class HeckAI(Provider):
150
160
 
151
161
  # Process content if we're in an answer section
152
162
  if in_answer:
163
+ # Assuming 'data' is the text chunk here
153
164
  streaming_text += data
154
165
  resp = dict(text=data)
155
- yield resp if raw else resp
156
-
157
- self.previous_answer = streaming_text
158
- self.conversation.update_chat_history(prompt, streaming_text)
166
+ # Yield dict or raw string chunk
167
+ yield resp if not raw else data
168
+ except UnicodeDecodeError:
169
+ continue # Ignore decoding errors for specific lines
170
+
171
+ # Update history and previous answer after stream finishes
172
+ self.previous_answer = streaming_text
173
+ self.conversation.update_chat_history(prompt, streaming_text)
159
174
 
160
- except requests.RequestException as e:
161
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
175
+ except CurlError as e: # Catch CurlError
176
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
177
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
178
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
179
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
180
+
162
181
 
163
182
  def for_non_stream():
183
+ # Aggregate the stream using the updated for_stream logic
164
184
  full_text = ""
165
- for chunk in for_stream():
166
- if isinstance(chunk, dict) and "text" in chunk:
167
- full_text += chunk["text"]
168
- self.last_response = {"text": full_text}
169
- return self.last_response
185
+ try:
186
+ # Ensure raw=False so for_stream yields dicts
187
+ for chunk_data in for_stream():
188
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
189
+ full_text += chunk_data["text"]
190
+ # Handle raw string case if raw=True was passed
191
+ elif raw and isinstance(chunk_data, str):
192
+ full_text += chunk_data
193
+ except Exception as e:
194
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
195
+ if not full_text:
196
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
197
+
198
+ # last_response and history are updated within for_stream
199
+ # Return the final aggregated response dict or raw string
200
+ self.last_response = {"text": full_text} # Update last_response here
201
+ return full_text if raw else self.last_response
202
+
170
203
 
171
204
  return for_stream() if stream else for_non_stream()
172
205
 
@@ -191,23 +224,32 @@ class HeckAI(Provider):
191
224
  stream: bool = False,
192
225
  optimizer: str = None,
193
226
  conversationally: bool = False,
194
- ) -> str:
195
- def for_stream():
196
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
197
- yield self.get_message(response)
227
+ ) -> Union[str, Generator[str, None, None]]: # Corrected return type hint
228
+ def for_stream_chat():
229
+ # ask() yields dicts or strings when streaming
230
+ gen = self.ask(
231
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
232
+ optimizer=optimizer, conversationally=conversationally
233
+ )
234
+ for response_dict in gen:
235
+ yield self.get_message(response_dict) # get_message expects dict
198
236
 
199
- def for_non_stream():
200
- return self.get_message(
201
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
237
+ def for_non_stream_chat():
238
+ # ask() returns dict or str when not streaming
239
+ response_data = self.ask(
240
+ prompt, stream=False, raw=False, # Ensure ask returns dict
241
+ optimizer=optimizer, conversationally=conversationally
202
242
  )
243
+ return self.get_message(response_data) # get_message expects dict
203
244
 
204
- return for_stream() if stream else for_non_stream()
245
+ return for_stream_chat() if stream else for_non_stream_chat()
205
246
 
206
247
  def get_message(self, response: dict) -> str:
207
248
  assert isinstance(response, dict), "Response should be of dict data-type only"
208
249
  return response["text"]
209
250
 
210
251
  if __name__ == "__main__":
252
+ # Ensure curl_cffi is installed
211
253
  print("-" * 80)
212
254
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
213
255
  print("-" * 80)
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import uuid
3
4
  import json
4
5
  import time
@@ -18,68 +19,53 @@ class HuggingFaceChat(Provider):
18
19
  """
19
20
 
20
21
  # Available models (default models - will be updated dynamically)
21
- AVAILABLE_MODELS = [
22
- 'meta-llama/Llama-3.3-70B-Instruct',
23
- 'Qwen/Qwen2.5-72B-Instruct',
24
- 'CohereForAI/c4ai-command-r-plus-08-2024',
25
- 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
26
- 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
27
- 'Qwen/QwQ-32B',
28
- 'Qwen/Qwen2.5-Coder-32B-Instruct',
29
- 'meta-llama/Llama-3.2-11B-Vision-Instruct',
30
- 'NousResearch/Hermes-3-Llama-3.1-8B',
31
- 'mistralai/Mistral-Nemo-Instruct-2407',
32
- 'microsoft/Phi-3.5-mini-instruct',
33
- 'meta-llama/Llama-3.1-8B-Instruct'
34
-
35
- ]
22
+ AVAILABLE_MODELS = ['meta-llama/Llama-3.3-70B-Instruct', 'Qwen/Qwen3-235B-A22B', 'Qwen/Qwen2.5-72B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', 'Qwen/QwQ-32B', 'google/gemma-3-27b-it', 'mistralai/Mistral-Small-3.1-24B-Instruct-2503', 'Qwen/Qwen2.5-VL-32B-Instruct', 'microsoft/Phi-4', 'NousResearch/Hermes-3-Llama-3.1-8B', 'internal/task']
36
23
 
37
24
  def __init__(
38
25
  self,
39
26
  is_conversation: bool = True,
40
- max_tokens: int = 2000,
27
+ max_tokens: int = 2000, # Note: max_tokens is not used by this API
41
28
  timeout: int = 60,
42
29
  filepath: str = None,
43
30
  update_file: bool = True,
44
31
  proxies: dict = {},
45
32
  model: str = "Qwen/QwQ-32B",
46
33
  cookie_path: str = "cookies.json",
47
- assistantId: str = None,
34
+ assistantId: str = None, # Note: assistantId is not used by this API
48
35
  system_prompt: str = "You are a helpful assistant. Please answer the following question.",
49
36
  ):
50
37
  """Initialize the HuggingFaceChat client."""
51
38
  self.url = "https://huggingface.co/chat"
52
39
  self.cookie_path = cookie_path
53
- self.session = requests.Session()
54
- self.session.proxies.update(proxies)
55
- self.assistantId = assistantId
56
- self.system_prompt = system_prompt
57
- # Load cookies for authentication
58
- self.cookies = self.load_cookies()
59
-
60
- # Set up headers for all requests
40
+ # Initialize curl_cffi Session
41
+ self.session = Session()
42
+ # Set up headers for all requests (remove those handled by impersonate)
61
43
  self.headers = {
62
- "Content-Type": "application/json",
63
- "User-Agent": LitAgent().random(),
64
- "Accept": "*/*",
65
- "Accept-Encoding": "gzip, deflate, br, zstd",
66
- "Accept-Language": "en-US,en;q=0.9",
67
- "Origin": "https://huggingface.co",
68
- "Referer": "https://huggingface.co/chat",
69
- "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
70
- "Sec-Ch-Ua-Mobile": "?0",
71
- "Sec-Ch-Ua-Platform": "\"Windows\"",
72
- "Sec-Fetch-Dest": "empty",
44
+ "Content-Type": "application/json", # Keep Content-Type for JSON posts
45
+ "Accept": "*/*", # Keep Accept
46
+ "Accept-Language": "en-US,en;q=0.9", # Keep Accept-Language
47
+ "Origin": "https://huggingface.co", # Keep Origin
48
+ "Referer": "https://huggingface.co/chat", # Keep Referer (will be updated)
49
+ "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
73
50
  "Sec-Fetch-Mode": "cors",
74
51
  "Sec-Fetch-Site": "same-origin",
75
- "DNT": "1",
76
- "Priority": "u=1, i"
52
+ "DNT": "1", # Keep DNT
53
+ "Priority": "u=1, i" # Keep Priority
77
54
  }
78
55
 
79
- # Apply cookies to session
56
+ # Update curl_cffi session headers and proxies
57
+ self.session.headers.update(self.headers)
58
+ self.session.proxies = proxies # Assign proxies directly
59
+ self.system_prompt = system_prompt
60
+ self.assistantId = assistantId or None # Generate a new UUID if not provided
61
+ # Load cookies for authentication
62
+ self.cookies = self.load_cookies()
63
+ # Apply cookies to curl_cffi session
80
64
  if self.cookies:
81
- self.session.cookies.update(self.cookies)
82
-
65
+ for name, value in self.cookies.items():
66
+ # Set cookies on the session object
67
+ self.session.cookies.set(name, value, domain="huggingface.co") # Specify domain if needed
68
+
83
69
  # Update available models
84
70
  self.update_available_models()
85
71
 
@@ -112,7 +98,13 @@ class HuggingFaceChat(Provider):
112
98
  def get_models(cls):
113
99
  """Fetch available models from HuggingFace."""
114
100
  try:
115
- response = requests.get("https://huggingface.co/chat")
101
+ # Use a temporary curl_cffi session for this class method
102
+ temp_session = Session()
103
+ response = temp_session.get(
104
+ "https://huggingface.co/chat",
105
+ impersonate="chrome110" # Use impersonate for fetching
106
+ )
107
+ response.raise_for_status()
116
108
  text = response.text
117
109
  models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
118
110
 
@@ -131,7 +123,7 @@ class HuggingFaceChat(Provider):
131
123
  models_data = json.loads(models_text)
132
124
  # print([model["id"] for model in models_data])
133
125
  return [model["id"] for model in models_data]
134
- except Exception:
126
+ except (CurlError, Exception): # Catch CurlError and other exceptions
135
127
  return cls.AVAILABLE_MODELS
136
128
 
137
129
  def load_cookies(self):
@@ -163,7 +155,13 @@ class HuggingFaceChat(Provider):
163
155
  headers["Referer"] = f"https://huggingface.co/chat/models/{model}"
164
156
 
165
157
  try:
166
- response = self.session.post(url, json=payload, headers=headers)
158
+ # Use curl_cffi session post with impersonate
159
+ response = self.session.post(
160
+ url,
161
+ json=payload,
162
+ headers=headers, # Use updated headers with specific Referer
163
+ impersonate="chrome110" # Use a common impersonation profile
164
+ )
167
165
 
168
166
  if response.status_code == 401:
169
167
  raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
@@ -182,19 +180,22 @@ class HuggingFaceChat(Provider):
182
180
  "messageId": str(uuid.uuid4()) # Initial message ID
183
181
  }
184
182
 
185
- # Update cookies if needed
186
- if 'hf-chat' in response.cookies:
187
- self.cookies["hf-chat"] = response.cookies['hf-chat']
188
-
189
183
  return conversation_id
190
- except requests.exceptions.RequestException:
184
+ except CurlError as e: # Catch CurlError
185
+ # Log or handle CurlError specifically if needed
186
+ return None
187
+ except Exception: # Catch other potential exceptions (like JSONDecodeError, HTTPError)
191
188
  return None
192
189
 
193
190
  def fetch_message_id(self, conversation_id: str) -> str:
194
191
  """Fetch the latest message ID for a conversation."""
195
192
  try:
196
193
  url = f"https://huggingface.co/chat/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
197
- response = self.session.get(url, headers=self.headers)
194
+ response = self.session.get(
195
+ url,
196
+ headers=self.headers, # Use base headers
197
+ impersonate="chrome110" # Use a common impersonation profile
198
+ )
198
199
  response.raise_for_status()
199
200
 
200
201
  # Parse the JSON data from the response
@@ -224,7 +225,9 @@ class HuggingFaceChat(Provider):
224
225
 
225
226
  return message_id
226
227
 
227
- except Exception:
228
+ except CurlError: # Catch CurlError
229
+ return str(uuid.uuid4()) # Fallback on CurlError
230
+ except Exception: # Catch other potential exceptions
228
231
  # Fall back to a UUID if there's an error
229
232
  return str(uuid.uuid4())
230
233
 
@@ -299,10 +302,10 @@ class HuggingFaceChat(Provider):
299
302
  def ask(
300
303
  self,
301
304
  prompt: str,
302
- stream: bool = False,
305
+ stream: bool = False, # API supports streaming
303
306
  raw: bool = False,
304
- optimizer: str = None,
305
- conversationally: bool = False,
307
+ optimizer: str = None, # Note: optimizer is not used by this API
308
+ conversationally: bool = False, # Note: conversationally is not used by this API
306
309
  web_search: bool = False,
307
310
  ) -> Union[Dict[str, Any], Generator]:
308
311
  """Send a message to the HuggingFace Chat API"""
@@ -332,13 +335,9 @@ class HuggingFaceChat(Provider):
332
335
  "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
333
336
  }
334
337
 
335
- # Update headers for this specific request
336
- headers = self.headers.copy()
337
- headers["Referer"] = f"https://huggingface.co/chat/conversation/{conversation_id}"
338
-
339
338
  # Create multipart form data
340
339
  boundary = self.generate_boundary()
341
- multipart_headers = headers.copy()
340
+ multipart_headers = self.headers.copy()
342
341
  multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
343
342
 
344
343
  # Serialize the data to JSON
@@ -358,35 +357,38 @@ class HuggingFaceChat(Provider):
358
357
  # Try with multipart/form-data first
359
358
  response = None
360
359
  try:
360
+ # Use curl_cffi session post with impersonate
361
361
  response = self.session.post(
362
362
  url,
363
363
  data=body,
364
- headers=multipart_headers,
364
+ headers=multipart_headers, # Use multipart headers
365
365
  stream=True,
366
- timeout=self.timeout
366
+ timeout=self.timeout,
367
+ impersonate="chrome110" # Use a common impersonation profile
367
368
  )
368
- except requests.exceptions.RequestException:
369
- pass
370
-
369
+ response.raise_for_status() # Check status after potential error
370
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception): # Catch potential errors
371
+ response = None # Ensure response is None if multipart fails
372
+
371
373
  # If multipart fails or returns error, try with regular JSON
372
374
  if not response or response.status_code != 200:
375
+ # Use curl_cffi session post with impersonate
373
376
  response = self.session.post(
374
377
  url,
375
- json=request_data,
376
- headers=headers,
378
+ json=request_data, # Use JSON payload
379
+ headers=self.headers, # Use class-defined headers
377
380
  stream=True,
378
- timeout=self.timeout
381
+ timeout=self.timeout,
382
+ impersonate="chrome110" # Use a common impersonation profile
379
383
  )
380
384
 
381
- # If both methods fail, raise exception
382
- if response.status_code != 200:
383
- raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
385
+ response.raise_for_status() # Check status after potential fallback
384
386
 
385
- # Process the streaming response
387
+ # Process the streaming response (iter_lines works with curl_cffi)
386
388
  yield from self.process_response(response, prompt)
387
389
 
388
- except Exception as e:
389
- if isinstance(e, requests.exceptions.RequestException):
390
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
391
+ if isinstance(e):
390
392
  if hasattr(e, 'response') and e.response is not None:
391
393
  status_code = e.response.status_code
392
394
  if status_code == 401:
@@ -407,15 +409,29 @@ class HuggingFaceChat(Provider):
407
409
  return
408
410
 
409
411
  # If we get here, all models failed
410
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
412
+ raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
413
+
411
414
 
412
415
  def for_non_stream():
416
+ # Aggregate the stream using the updated for_stream logic
413
417
  response_text = ""
414
- for response in for_stream():
415
- if "text" in response:
416
- response_text += response["text"]
417
- self.last_response = {"text": response_text}
418
- return self.last_response
418
+ try:
419
+ # Ensure raw=False so for_stream yields dicts
420
+ for chunk_data in for_stream():
421
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
422
+ response_text += chunk_data["text"]
423
+ # Handle raw string case if raw=True was passed
424
+ elif raw and isinstance(chunk_data, str):
425
+ response_text += chunk_data
426
+ except Exception as e:
427
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
428
+ if not response_text:
429
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
430
+
431
+ # last_response and history are updated within process_response called by for_stream
432
+ # Return the final aggregated response dict or raw string
433
+ return response_text if raw else {"text": response_text} # Return dict for consistency
434
+
419
435
 
420
436
  return for_stream() if stream else for_non_stream()
421
437
 
@@ -423,40 +439,31 @@ class HuggingFaceChat(Provider):
423
439
  self,
424
440
  prompt: str,
425
441
  stream: bool = False,
426
- optimizer: str = None,
427
- conversationally: bool = False,
442
+ optimizer: str = None, # Note: optimizer is not used by this API
443
+ conversationally: bool = False, # Note: conversationally is not used by this API
428
444
  web_search: bool = False
429
445
  ) -> Union[str, Generator]:
430
446
  """Generate a response to a prompt"""
431
- def for_stream():
432
- for response in self.ask(
433
- prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
434
- ):
435
- yield self.get_message(response)
447
+ def for_stream_chat():
448
+ # ask() yields dicts or strings when streaming
449
+ gen = self.ask(
450
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
451
+ optimizer=optimizer, conversationally=conversationally, web_search=web_search
452
+ )
453
+ for response_dict in gen:
454
+ yield self.get_message(response_dict) # get_message expects dict
436
455
 
437
- def for_non_stream():
438
- return self.get_message(
439
- self.ask(
440
- prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
441
- )
456
+ def for_non_stream_chat():
457
+ # ask() returns dict or str when not streaming
458
+ response_data = self.ask(
459
+ prompt, stream=False, raw=False, # Ensure ask returns dict
460
+ optimizer=optimizer, conversationally=conversationally, web_search=web_search
442
461
  )
462
+ return self.get_message(response_data) # get_message expects dict
443
463
 
444
- return for_stream() if stream else for_non_stream()
464
+ return for_stream_chat() if stream else for_non_stream_chat()
445
465
 
446
466
  def get_message(self, response: dict) -> str:
447
467
  """Extract message text from response"""
448
468
  assert isinstance(response, dict), "Response should be of dict data-type only"
449
469
  return response.get("text", "")
450
-
451
- if __name__ == "__main__":
452
- # Simple test code
453
- from rich import print
454
-
455
- try:
456
- ai = HuggingFaceChat(cookie_path="cookies.json", system_prompt="You are a helpful assistant. Please answer the following question.")
457
- response = ai.chat("how many r in strawberry", stream=True, web_search=False)
458
- for chunk in response:
459
- print(chunk, end="", flush=True)
460
- print()
461
- except Exception as e:
462
- print(f"An error occurred: {e}")