webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/Provider/AISEARCH/__init__.py +4 -3
  2. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  3. webscout/Provider/AllenAI.py +282 -0
  4. webscout/Provider/C4ai.py +414 -0
  5. webscout/Provider/Cloudflare.py +18 -21
  6. webscout/Provider/DeepSeek.py +3 -32
  7. webscout/Provider/Deepinfra.py +52 -44
  8. webscout/Provider/ElectronHub.py +634 -0
  9. webscout/Provider/GithubChat.py +362 -0
  10. webscout/Provider/Glider.py +7 -41
  11. webscout/Provider/HeckAI.py +217 -0
  12. webscout/Provider/HuggingFaceChat.py +462 -0
  13. webscout/Provider/Jadve.py +49 -63
  14. webscout/Provider/Marcus.py +7 -50
  15. webscout/Provider/Netwrck.py +6 -53
  16. webscout/Provider/PI.py +106 -93
  17. webscout/Provider/Perplexitylabs.py +395 -0
  18. webscout/Provider/Phind.py +29 -3
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  22. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  23. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +3 -2
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/Youchat.py +1 -1
  32. webscout/Provider/__init__.py +25 -2
  33. webscout/Provider/akashgpt.py +315 -0
  34. webscout/Provider/chatglm.py +5 -5
  35. webscout/Provider/copilot.py +416 -0
  36. webscout/Provider/flowith.py +181 -0
  37. webscout/Provider/freeaichat.py +251 -221
  38. webscout/Provider/granite.py +17 -53
  39. webscout/Provider/koala.py +9 -1
  40. webscout/Provider/llamatutor.py +6 -46
  41. webscout/Provider/llmchat.py +7 -46
  42. webscout/Provider/multichat.py +29 -91
  43. webscout/Provider/yep.py +4 -24
  44. webscout/exceptions.py +19 -9
  45. webscout/update_checker.py +55 -93
  46. webscout/version.py +1 -1
  47. webscout-7.5.dist-info/LICENSE.md +146 -0
  48. {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
  49. {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
  50. webscout/Local/__init__.py +0 -10
  51. webscout/Local/_version.py +0 -3
  52. webscout/Local/formats.py +0 -747
  53. webscout/Local/model.py +0 -1368
  54. webscout/Local/samplers.py +0 -125
  55. webscout/Local/thread.py +0 -539
  56. webscout/Local/ui.py +0 -401
  57. webscout/Local/utils.py +0 -388
  58. webscout/Provider/dgaf.py +0 -214
  59. webscout-7.3.dist-info/LICENSE.md +0 -211
  60. {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  61. {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  62. {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,217 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ import sys
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+ from webscout import LitAgent
13
+
14
+ class HeckAI(Provider):
15
+ """
16
+ A class to interact with the HeckAI API with LitAgent user-agent.
17
+ """
18
+
19
+ AVAILABLE_MODELS = [
20
+ "deepseek/deepseek-chat",
21
+ "openai/gpt-4o-mini",
22
+ "deepseek/deepseek-r1",
23
+ "google/gemini-2.0-flash-001"
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 2049,
30
+ timeout: int = 30,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "google/gemini-2.0-flash-001",
38
+ language: str = "English"
39
+ ):
40
+ """Initializes the HeckAI API client."""
41
+ if model not in self.AVAILABLE_MODELS:
42
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
+
44
+ self.url = "https://api.heckai.weight-wave.com/api/ha/v1/chat"
45
+ self.session_id = str(uuid.uuid4())
46
+ self.language = language
47
+
48
+ # Use LitAgent for user-agent
49
+ self.headers = {
50
+ 'User-Agent': LitAgent().random(),
51
+ 'Content-Type': 'application/json',
52
+ 'Origin': 'https://heck.ai',
53
+ 'Referer': 'https://heck.ai/',
54
+ 'Connection': 'keep-alive'
55
+ }
56
+
57
+ self.session = requests.Session()
58
+ self.session.headers.update(self.headers)
59
+ self.session.proxies.update(proxies)
60
+
61
+ self.is_conversation = is_conversation
62
+ self.max_tokens_to_sample = max_tokens
63
+ self.timeout = timeout
64
+ self.last_response = {}
65
+ self.model = model
66
+ self.previous_question = None
67
+ self.previous_answer = None
68
+
69
+ self.__available_optimizers = (
70
+ method
71
+ for method in dir(Optimizers)
72
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
73
+ )
74
+ Conversation.intro = (
75
+ AwesomePrompts().get_act(
76
+ act, raise_not_found=True, default=None, case_insensitive=True
77
+ )
78
+ if act
79
+ else intro or Conversation.intro
80
+ )
81
+
82
+ self.conversation = Conversation(
83
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
84
+ )
85
+ self.conversation.history_offset = history_offset
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ ) -> Union[Dict[str, Any], Generator]:
95
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
96
+ if optimizer:
97
+ if optimizer in self.__available_optimizers:
98
+ conversation_prompt = getattr(Optimizers, optimizer)(
99
+ conversation_prompt if conversationally else prompt
100
+ )
101
+ else:
102
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
103
+
104
+ # Payload construction
105
+ payload = {
106
+ "model": self.model,
107
+ "question": conversation_prompt,
108
+ "language": self.language,
109
+ "sessionId": self.session_id,
110
+ "previousQuestion": self.previous_question,
111
+ "previousAnswer": self.previous_answer,
112
+ "imgUrls": []
113
+ }
114
+
115
+ # Store this message as previous for next request
116
+ self.previous_question = conversation_prompt
117
+
118
+ def for_stream():
119
+ try:
120
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
121
+ if response.status_code != 200:
122
+ raise exceptions.FailedToGenerateResponseError(
123
+ f"Request failed with status code {response.status_code}"
124
+ )
125
+
126
+ streaming_text = ""
127
+ in_answer = False
128
+
129
+ for line in response.iter_lines(decode_unicode=True):
130
+ if not line:
131
+ continue
132
+
133
+ # Remove "data: " prefix
134
+ if line.startswith("data: "):
135
+ data = line[6:]
136
+ else:
137
+ continue
138
+
139
+ # Check for control markers
140
+ if data == "[ANSWER_START]":
141
+ in_answer = True
142
+ continue
143
+
144
+ if data == "[ANSWER_DONE]":
145
+ in_answer = False
146
+ continue
147
+
148
+ if data == "[RELATE_Q_START]" or data == "[RELATE_Q_DONE]":
149
+ continue
150
+
151
+ # Process content if we're in an answer section
152
+ if in_answer:
153
+ streaming_text += data
154
+ resp = dict(text=data)
155
+ yield resp if raw else resp
156
+
157
+ self.previous_answer = streaming_text
158
+ self.conversation.update_chat_history(prompt, streaming_text)
159
+
160
+ except requests.RequestException as e:
161
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
162
+
163
+ def for_non_stream():
164
+ full_text = ""
165
+ for chunk in for_stream():
166
+ if isinstance(chunk, dict) and "text" in chunk:
167
+ full_text += chunk["text"]
168
+ self.last_response = {"text": full_text}
169
+ return self.last_response
170
+
171
+ return for_stream() if stream else for_non_stream()
172
+
173
+ @staticmethod
174
+ def fix_encoding(text):
175
+ if isinstance(text, dict) and "text" in text:
176
+ try:
177
+ text["text"] = text["text"].encode("latin1").decode("utf-8")
178
+ return text
179
+ except (UnicodeError, AttributeError) as e:
180
+ return text
181
+ elif isinstance(text, str):
182
+ try:
183
+ return text.encode("latin1").decode("utf-8")
184
+ except (UnicodeError, AttributeError) as e:
185
+ return text
186
+ return text
187
+
188
+ def chat(
189
+ self,
190
+ prompt: str,
191
+ stream: bool = False,
192
+ optimizer: str = None,
193
+ conversationally: bool = False,
194
+ ) -> str:
195
+ def for_stream():
196
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
197
+ yield self.get_message(response)
198
+
199
+ def for_non_stream():
200
+ return self.get_message(
201
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
202
+ )
203
+
204
+ return for_stream() if stream else for_non_stream()
205
+
206
+ def get_message(self, response: dict) -> str:
207
+ assert isinstance(response, dict), "Response should be of dict data-type only"
208
+ return response["text"]
209
+
210
+ if __name__ == "__main__":
211
+ from rich import print
212
+ ai = HeckAI(timeout=120)
213
+ response = ai.chat("Write a short poem about artificial intelligence", stream=False)
214
+ print(response)
215
+ # for chunk in response:
216
+ # chunk = ai.fix_encoding(chunk)
217
+ # print(chunk, end="", flush=True)
@@ -0,0 +1,462 @@
1
+ import requests
2
+ import uuid
3
+ import json
4
+ import time
5
+ import random
6
+ import re
7
+ from typing import Any, Dict, List, Optional, Union, Generator
8
+
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout import LitAgent
13
+
14
+ class HuggingFaceChat(Provider):
15
+ """
16
+ A class to interact with the Hugging Face Chat API.
17
+ Uses cookies for authentication and supports streaming responses.
18
+ """
19
+
20
+ # Available models (default models - will be updated dynamically)
21
+ AVAILABLE_MODELS = [
22
+ 'meta-llama/Llama-3.3-70B-Instruct',
23
+ 'Qwen/Qwen2.5-72B-Instruct',
24
+ 'CohereForAI/c4ai-command-r-plus-08-2024',
25
+ 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
26
+ 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
27
+ 'Qwen/QwQ-32B',
28
+ 'Qwen/Qwen2.5-Coder-32B-Instruct',
29
+ 'meta-llama/Llama-3.2-11B-Vision-Instruct',
30
+ 'NousResearch/Hermes-3-Llama-3.1-8B',
31
+ 'mistralai/Mistral-Nemo-Instruct-2407',
32
+ 'microsoft/Phi-3.5-mini-instruct',
33
+ 'meta-llama/Llama-3.1-8B-Instruct'
34
+
35
+ ]
36
+
37
+ def __init__(
38
+ self,
39
+ is_conversation: bool = True,
40
+ max_tokens: int = 2000,
41
+ timeout: int = 60,
42
+ filepath: str = None,
43
+ update_file: bool = True,
44
+ proxies: dict = {},
45
+ model: str = "Qwen/QwQ-32B",
46
+ cookie_path: str = "cookies.json",
47
+ assistantId: str = None,
48
+ system_prompt: str = "You are a helpful assistant. Please answer the following question.",
49
+ ):
50
+ """Initialize the HuggingFaceChat client."""
51
+ self.url = "https://huggingface.co/chat"
52
+ self.cookie_path = cookie_path
53
+ self.session = requests.Session()
54
+ self.session.proxies.update(proxies)
55
+ self.assistantId = assistantId
56
+ self.system_prompt = system_prompt
57
+ # Load cookies for authentication
58
+ self.cookies = self.load_cookies()
59
+
60
+ # Set up headers for all requests
61
+ self.headers = {
62
+ "Content-Type": "application/json",
63
+ "User-Agent": LitAgent().random(),
64
+ "Accept": "*/*",
65
+ "Accept-Encoding": "gzip, deflate, br, zstd",
66
+ "Accept-Language": "en-US,en;q=0.9",
67
+ "Origin": "https://huggingface.co",
68
+ "Referer": "https://huggingface.co/chat",
69
+ "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
70
+ "Sec-Ch-Ua-Mobile": "?0",
71
+ "Sec-Ch-Ua-Platform": "\"Windows\"",
72
+ "Sec-Fetch-Dest": "empty",
73
+ "Sec-Fetch-Mode": "cors",
74
+ "Sec-Fetch-Site": "same-origin",
75
+ "DNT": "1",
76
+ "Priority": "u=1, i"
77
+ }
78
+
79
+ # Apply cookies to session
80
+ if self.cookies:
81
+ self.session.cookies.update(self.cookies)
82
+
83
+ # Update available models
84
+ self.update_available_models()
85
+
86
+ # Set default model if none provided
87
+ self.model = model
88
+
89
+ # Provider settings
90
+ self.is_conversation = is_conversation
91
+ self.max_tokens_to_sample = max_tokens
92
+ self.timeout = timeout
93
+ self.last_response = {}
94
+
95
+ # Initialize a simplified conversation history for file saving only
96
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
97
+
98
+ # Store conversation data for different models
99
+ self._conversation_data = {}
100
+
101
+ def update_available_models(self):
102
+ """Update the available models list from HuggingFace"""
103
+ try:
104
+ models = self.get_models()
105
+ if models and len(models) > 0:
106
+ self.AVAILABLE_MODELS = models
107
+ except Exception:
108
+ # Fallback to default models list if fetching fails
109
+ pass
110
+
111
+ @classmethod
112
+ def get_models(cls):
113
+ """Fetch available models from HuggingFace."""
114
+ try:
115
+ response = requests.get("https://huggingface.co/chat")
116
+ text = response.text
117
+ models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
118
+
119
+ if not models_match:
120
+ return cls.AVAILABLE_MODELS
121
+
122
+ models_text = models_match.group(1)
123
+ models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
124
+ models_text = models_text.replace('void 0', 'null')
125
+
126
+ def add_quotation_mark(match):
127
+ return f'{match.group(1)}"{match.group(2)}":'
128
+
129
+ models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
130
+
131
+ models_data = json.loads(models_text)
132
+ # print([model["id"] for model in models_data])
133
+ return [model["id"] for model in models_data]
134
+ except Exception:
135
+ return cls.AVAILABLE_MODELS
136
+
137
+ def load_cookies(self):
138
+ """Load cookies from a JSON file"""
139
+ try:
140
+ with open(self.cookie_path, 'r') as f:
141
+ cookies_data = json.load(f)
142
+
143
+ # Convert the cookie list to a dictionary format for requests
144
+ cookies = {}
145
+ for cookie in cookies_data:
146
+ # Only include cookies that are not expired and have a name and value
147
+ if 'name' in cookie and 'value' in cookie:
148
+ # Check if the cookie hasn't expired
149
+ if 'expirationDate' not in cookie or cookie['expirationDate'] > time.time():
150
+ cookies[cookie['name']] = cookie['value']
151
+
152
+ return cookies
153
+ except Exception:
154
+ return {}
155
+
156
+ def create_conversation(self, model: str):
157
+ """Create a new conversation with the specified model."""
158
+ url = "https://huggingface.co/chat/conversation"
159
+ payload = {"model": model, "assistantId": self.assistantId, "preprompt": self.system_prompt}
160
+
161
+ # Update referer for this specific request
162
+ headers = self.headers.copy()
163
+ headers["Referer"] = f"https://huggingface.co/chat/models/{model}"
164
+
165
+ try:
166
+ response = self.session.post(url, json=payload, headers=headers)
167
+
168
+ if response.status_code == 401:
169
+ raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
170
+
171
+ # Handle other error codes
172
+ if response.status_code != 200:
173
+ return None
174
+
175
+ data = response.json()
176
+ conversation_id = data.get("conversationId")
177
+
178
+ # Store conversation data
179
+ if model not in self._conversation_data:
180
+ self._conversation_data[model] = {
181
+ "conversationId": conversation_id,
182
+ "messageId": str(uuid.uuid4()) # Initial message ID
183
+ }
184
+
185
+ # Update cookies if needed
186
+ if 'hf-chat' in response.cookies:
187
+ self.cookies["hf-chat"] = response.cookies['hf-chat']
188
+
189
+ return conversation_id
190
+ except requests.exceptions.RequestException:
191
+ return None
192
+
193
+ def fetch_message_id(self, conversation_id: str) -> str:
194
+ """Fetch the latest message ID for a conversation."""
195
+ try:
196
+ url = f"https://huggingface.co/chat/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
197
+ response = self.session.get(url, headers=self.headers)
198
+ response.raise_for_status()
199
+
200
+ # Parse the JSON data from the response
201
+ json_data = None
202
+ for line in response.text.split('\n'):
203
+ if line.strip():
204
+ try:
205
+ parsed = json.loads(line)
206
+ if isinstance(parsed, dict) and "nodes" in parsed:
207
+ json_data = parsed
208
+ break
209
+ except json.JSONDecodeError:
210
+ continue
211
+
212
+ if not json_data:
213
+ # Fall back to a UUID if we can't parse the response
214
+ return str(uuid.uuid4())
215
+
216
+ # Extract message ID using the same pattern as in the example
217
+ if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
218
+ return str(uuid.uuid4())
219
+
220
+ data = json_data["nodes"][1]["data"]
221
+ keys = data[data[0]["messages"]]
222
+ message_keys = data[keys[-1]]
223
+ message_id = data[message_keys["id"]]
224
+
225
+ return message_id
226
+
227
+ except Exception:
228
+ # Fall back to a UUID if there's an error
229
+ return str(uuid.uuid4())
230
+
231
+ def generate_boundary(self):
232
+ """Generate a random boundary for multipart/form-data requests"""
233
+ boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
234
+ boundary = "----WebKitFormBoundary"
235
+ boundary += "".join(random.choice(boundary_chars) for _ in range(16))
236
+ return boundary
237
+
238
+ def process_response(self, response, prompt: str):
239
+ """Process streaming response and extract content."""
240
+ full_text = ""
241
+ sources = None
242
+ reasoning_text = ""
243
+ has_reasoning = False
244
+
245
+ for line in response.iter_lines(decode_unicode=True):
246
+ if not line:
247
+ continue
248
+
249
+ try:
250
+ # Parse each line as JSON
251
+ data = json.loads(line)
252
+
253
+ # Handle different response types
254
+ if "type" not in data:
255
+ continue
256
+
257
+ if data["type"] == "stream" and "token" in data:
258
+ token = data["token"].replace("\u0000", "")
259
+ full_text += token
260
+ resp = {"text": token}
261
+ yield resp
262
+ elif data["type"] == "finalAnswer":
263
+ final_text = data.get("text", "")
264
+ if final_text and not full_text:
265
+ full_text = final_text
266
+ resp = {"text": final_text}
267
+ yield resp
268
+ elif data["type"] == "webSearch" and "sources" in data:
269
+ sources = data["sources"]
270
+ elif data["type"] == "reasoning":
271
+ has_reasoning = True
272
+ if data.get("subtype") == "stream" and "token" in data:
273
+ reasoning_text += data["token"]
274
+ # elif data.get("subtype") == "status":
275
+ # # For status updates in reasoning, we can just append them as a comment
276
+ # if data.get("status"):
277
+ # reasoning_text += f"\n# {data['status']}"
278
+
279
+ # If we have reasoning, prepend it to the next text output
280
+ if reasoning_text and not full_text:
281
+ resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
282
+ yield resp
283
+
284
+ except json.JSONDecodeError:
285
+ continue
286
+
287
+ # Update conversation history only for saving to file if needed
288
+ if full_text and self.conversation.file:
289
+ if has_reasoning:
290
+ full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
291
+ self.last_response = {"text": full_text_with_reasoning}
292
+ self.conversation.update_chat_history(prompt, full_text_with_reasoning)
293
+ else:
294
+ self.last_response = {"text": full_text}
295
+ self.conversation.update_chat_history(prompt, full_text)
296
+
297
+ return full_text
298
+
299
+ def ask(
300
+ self,
301
+ prompt: str,
302
+ stream: bool = False,
303
+ raw: bool = False,
304
+ optimizer: str = None,
305
+ conversationally: bool = False,
306
+ web_search: bool = False,
307
+ ) -> Union[Dict[str, Any], Generator]:
308
+ """Send a message to the HuggingFace Chat API"""
309
+ model = self.model
310
+
311
+ # Check if we have a conversation for this model
312
+ if model not in self._conversation_data:
313
+ conversation_id = self.create_conversation(model)
314
+ if not conversation_id:
315
+ raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
316
+ else:
317
+ conversation_id = self._conversation_data[model]["conversationId"]
318
+ # Refresh message ID
319
+ self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
320
+
321
+ url = f"https://huggingface.co/chat/conversation/{conversation_id}"
322
+ message_id = self._conversation_data[model]["messageId"]
323
+
324
+ # Data to send - use the prompt directly without generating a complete prompt
325
+ # since HuggingFace maintains conversation state internally
326
+ request_data = {
327
+ "inputs": prompt,
328
+ "id": message_id,
329
+ "is_retry": False,
330
+ "is_continue": False,
331
+ "web_search": web_search,
332
+ "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
333
+ }
334
+
335
+ # Update headers for this specific request
336
+ headers = self.headers.copy()
337
+ headers["Referer"] = f"https://huggingface.co/chat/conversation/{conversation_id}"
338
+
339
+ # Create multipart form data
340
+ boundary = self.generate_boundary()
341
+ multipart_headers = headers.copy()
342
+ multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
343
+
344
+ # Serialize the data to JSON
345
+ data_json = json.dumps(request_data, separators=(',', ':'))
346
+
347
+ # Create the multipart form data body
348
+ body = f"--{boundary}\r\n"
349
+ body += f'Content-Disposition: form-data; name="data"\r\n'
350
+ body += f"Content-Type: application/json\r\n\r\n"
351
+ body += f"{data_json}\r\n"
352
+ body += f"--{boundary}--\r\n"
353
+
354
+ multipart_headers["Content-Length"] = str(len(body))
355
+
356
+ def for_stream():
357
+ try:
358
+ # Try with multipart/form-data first
359
+ response = None
360
+ try:
361
+ response = self.session.post(
362
+ url,
363
+ data=body,
364
+ headers=multipart_headers,
365
+ stream=True,
366
+ timeout=self.timeout
367
+ )
368
+ except requests.exceptions.RequestException:
369
+ pass
370
+
371
+ # If multipart fails or returns error, try with regular JSON
372
+ if not response or response.status_code != 200:
373
+ response = self.session.post(
374
+ url,
375
+ json=request_data,
376
+ headers=headers,
377
+ stream=True,
378
+ timeout=self.timeout
379
+ )
380
+
381
+ # If both methods fail, raise exception
382
+ if response.status_code != 200:
383
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
384
+
385
+ # Process the streaming response
386
+ yield from self.process_response(response, prompt)
387
+
388
+ except Exception as e:
389
+ if isinstance(e, requests.exceptions.RequestException):
390
+ if hasattr(e, 'response') and e.response is not None:
391
+ status_code = e.response.status_code
392
+ if status_code == 401:
393
+ raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
394
+
395
+ # Try another model if current one fails
396
+ if len(self.AVAILABLE_MODELS) > 1:
397
+ current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
398
+ next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
399
+ self.model = self.AVAILABLE_MODELS[next_model_index]
400
+
401
+ # Create new conversation with the alternate model
402
+ conversation_id = self.create_conversation(self.model)
403
+ if conversation_id:
404
+ # Try again with the new model
405
+ yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
406
+ conversationally=conversationally, web_search=web_search)
407
+ return
408
+
409
+ # If we get here, all models failed
410
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
411
+
412
+ def for_non_stream():
413
+ response_text = ""
414
+ for response in for_stream():
415
+ if "text" in response:
416
+ response_text += response["text"]
417
+ self.last_response = {"text": response_text}
418
+ return self.last_response
419
+
420
+ return for_stream() if stream else for_non_stream()
421
+
422
+ def chat(
423
+ self,
424
+ prompt: str,
425
+ stream: bool = False,
426
+ optimizer: str = None,
427
+ conversationally: bool = False,
428
+ web_search: bool = False
429
+ ) -> Union[str, Generator]:
430
+ """Generate a response to a prompt"""
431
+ def for_stream():
432
+ for response in self.ask(
433
+ prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
434
+ ):
435
+ yield self.get_message(response)
436
+
437
+ def for_non_stream():
438
+ return self.get_message(
439
+ self.ask(
440
+ prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
441
+ )
442
+ )
443
+
444
+ return for_stream() if stream else for_non_stream()
445
+
446
+ def get_message(self, response: dict) -> str:
447
+ """Extract message text from response"""
448
+ assert isinstance(response, dict), "Response should be of dict data-type only"
449
+ return response.get("text", "")
450
+
451
+ if __name__ == "__main__":
452
+ # Simple test code
453
+ from rich import print
454
+
455
+ try:
456
+ ai = HuggingFaceChat(cookie_path="cookies.json", system_prompt="You are a helpful assistant. Please answer the following question.")
457
+ response = ai.chat("how many r in strawberry", stream=True, web_search=False)
458
+ for chunk in response:
459
+ print(chunk, end="", flush=True)
460
+ print()
461
+ except Exception as e:
462
+ print(f"An error occurred: {e}")