webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,12 +1,12 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
- import os
4
4
  from typing import Any, Dict, Optional, Generator, Union
5
5
 
6
6
  from webscout.AIutel import Optimizers
7
7
  from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider, AsyncProvider
9
+ from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
11
  from webscout.litagent import LitAgent
12
12
  class TwoAI(Provider):
@@ -47,9 +47,11 @@ class TwoAI(Provider):
47
47
  'Referer': 'https://api.two.app/'
48
48
  }
49
49
 
50
- self.session = requests.Session()
50
+ # Initialize curl_cffi Session
51
+ self.session = Session()
52
+ # Update curl_cffi session headers and proxies
51
53
  self.session.headers.update(self.headers)
52
- self.session.proxies.update(proxies)
54
+ self.session.proxies = proxies
53
55
 
54
56
  self.is_conversation = is_conversation
55
57
  self.max_tokens_to_sample = max_tokens
@@ -90,9 +92,7 @@ class TwoAI(Provider):
90
92
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
91
93
  if optimizer:
92
94
  if optimizer in self.__available_optimizers:
93
- conversation_prompt = getattr(Optimizers, optimizer)(
94
- conversation_prompt if conversationally else prompt
95
- )
95
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
96
96
  else:
97
97
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
98
98
 
@@ -111,39 +111,65 @@ class TwoAI(Provider):
111
111
 
112
112
  def for_stream():
113
113
  try:
114
- with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
115
- if response.status_code != 200:
116
- raise exceptions.FailedToGenerateResponseError(
117
- f"Request failed with status code {response.status_code}"
118
- )
119
-
120
- streaming_text = ""
121
- for line in response.iter_lines(decode_unicode=True):
122
- if line:
123
- try:
124
- chunk = json.loads(line)
125
- if chunk["typeName"] == "LLMChunk":
126
- content = chunk["content"]
127
- streaming_text += content
128
- resp = dict(text=content)
129
- yield resp if raw else resp
130
- except json.JSONDecodeError:
131
- continue
114
+ # Use curl_cffi session post with impersonate
115
+ response = self.session.post(
116
+ self.url,
117
+ json=payload,
118
+ stream=True,
119
+ timeout=self.timeout,
120
+ impersonate="chrome110" # Add impersonate
121
+ )
122
+
123
+ if response.status_code != 200:
124
+ raise exceptions.FailedToGenerateResponseError(
125
+ f"Request failed with status code {response.status_code} - {response.text}"
126
+ )
132
127
 
133
- self.last_response = {"text": streaming_text}
134
- self.conversation.update_chat_history(prompt, streaming_text)
128
+ streaming_text = ""
129
+ # Iterate over bytes and decode manually
130
+ for line_bytes in response.iter_lines():
131
+ if line_bytes:
132
+ try:
133
+ line = line_bytes.decode('utf-8') # Decode bytes
134
+ chunk = json.loads(line)
135
+ if chunk.get("typeName") == "LLMChunk": # Use .get for safety
136
+ content = chunk.get("content", "") # Use .get for safety
137
+ streaming_text += content
138
+ resp = dict(text=content)
139
+ # Yield dict or raw string
140
+ yield resp if raw else resp
141
+ except json.JSONDecodeError:
142
+ continue
143
+ except UnicodeDecodeError:
144
+ continue
145
+
146
+ # Update history and last response after stream finishes
147
+ self.last_response = {"text": streaming_text}
148
+ self.conversation.update_chat_history(prompt, streaming_text)
135
149
 
136
- except requests.RequestException as e:
137
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
150
+ except CurlError as e: # Catch CurlError
151
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
152
+ except Exception as e: # Catch other potential exceptions
153
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
138
154
 
139
155
  def for_non_stream():
156
+ # Non-stream requests might not work the same way if the API expects streaming.
157
+ # This implementation aggregates the stream.
140
158
  streaming_text = ""
141
- for resp in for_stream():
142
- streaming_text += resp["text"]
143
- self.last_response = {"text": streaming_text}
144
- return self.last_response
159
+ # Iterate through the generator provided by for_stream
160
+ for chunk_data in for_stream():
161
+ # Check if chunk_data is a dict (not raw) and has 'text'
162
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
163
+ streaming_text += chunk_data["text"]
164
+ # If raw=True, chunk_data is the string content itself
165
+ elif isinstance(chunk_data, str):
166
+ streaming_text += chunk_data
167
+ # last_response and history are updated within for_stream
168
+ return self.last_response # Return the final aggregated response
145
169
 
146
- return for_stream() if stream else for_non_stream()
170
+ # Ensure stream defaults to True if not provided, matching original behavior
171
+ effective_stream = stream if stream is not None else True
172
+ return for_stream() if effective_stream else for_non_stream()
147
173
 
148
174
  def chat(
149
175
  self,
@@ -154,30 +180,35 @@ class TwoAI(Provider):
154
180
  online_search: bool = True,
155
181
  reasoning_on: bool = False,
156
182
  ) -> str:
157
- def for_stream():
158
- for response in self.ask(
183
+ # Ensure stream defaults to True if not provided
184
+ effective_stream = stream if stream is not None else True
185
+
186
+ def for_stream_chat():
187
+ # ask() yields dicts when raw=False
188
+ for response_dict in self.ask(
159
189
  prompt,
160
- True,
190
+ stream=True,
191
+ raw=False, # Ensure ask yields dicts
161
192
  optimizer=optimizer,
162
193
  conversationally=conversationally,
163
194
  online_search=online_search,
164
195
  reasoning_on=reasoning_on
165
196
  ):
166
- yield self.get_message(response)
197
+ yield self.get_message(response_dict)
167
198
 
168
- def for_non_stream():
169
- return self.get_message(
170
- self.ask(
171
- prompt,
172
- False,
173
- optimizer=optimizer,
174
- conversationally=conversationally,
175
- online_search=online_search,
176
- reasoning_on=reasoning_on
177
- )
199
+ def for_non_stream_chat():
200
+ # ask() returns a dict when stream=False
201
+ response_dict = self.ask(
202
+ prompt,
203
+ stream=False,
204
+ optimizer=optimizer,
205
+ conversationally=conversationally,
206
+ online_search=online_search,
207
+ reasoning_on=reasoning_on
178
208
  )
209
+ return self.get_message(response_dict)
179
210
 
180
- return for_stream() if stream else for_non_stream()
211
+ return for_stream_chat() if effective_stream else for_non_stream_chat()
181
212
 
182
213
  def get_message(self, response: dict) -> str:
183
214
  assert isinstance(response, dict), "Response should be of dict data-type only"
@@ -186,14 +217,32 @@ class TwoAI(Provider):
186
217
  if __name__ == "__main__":
187
218
  from rich import print
188
219
 
189
- api_key = ""
190
-
191
- ai = TwoAI(
192
- api_key=api_key,
193
- timeout=60,
194
- system_message="You are an intelligent AI assistant. Be concise and helpful."
195
- )
220
+ api_key = "" # Add your API key here or load from env
196
221
 
197
- response = ai.chat("666+444=?", stream=True, reasoning_on=True)
198
- for chunk in response:
199
- print(chunk, end="", flush=True)
222
+ try: # Add try-except block for testing
223
+ ai = TwoAI(
224
+ api_key=api_key,
225
+ timeout=60,
226
+ system_message="You are an intelligent AI assistant. Be concise and helpful."
227
+ )
228
+
229
+ print("[bold blue]Testing Stream:[/bold blue]")
230
+ response_stream = ai.chat("666+444=?", stream=True, reasoning_on=True)
231
+ full_stream_response = ""
232
+ for chunk in response_stream:
233
+ print(chunk, end="", flush=True)
234
+ full_stream_response += chunk
235
+ print("\n[bold green]Stream Test Complete.[/bold green]\n")
236
+
237
+ # Optional: Test non-stream
238
+ # print("[bold blue]Testing Non-Stream:[/bold blue]")
239
+ # response_non_stream = ai.chat("What is the capital of France?", stream=False)
240
+ # print(response_non_stream)
241
+ # print("[bold green]Non-Stream Test Complete.[/bold green]")
242
+
243
+ except exceptions.FailedToGenerateResponseError as e:
244
+ print(f"\n[bold red]API Error:[/bold red] {e}")
245
+ except ValueError as e:
246
+ print(f"\n[bold red]Configuration Error:[/bold red] {e}")
247
+ except Exception as e:
248
+ print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
@@ -1,11 +1,13 @@
1
- import requests
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session # Import Session
2
3
  import json
3
4
  from typing import Generator, Dict, Any, List, Union
4
5
  from uuid import uuid4
6
+ import random
5
7
 
6
8
  from webscout.AIutel import Optimizers
7
9
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIutel import AwesomePrompts
9
11
  from webscout.AIbase import Provider
10
12
  from webscout import exceptions
11
13
  from webscout.litagent import LitAgent
@@ -17,6 +19,7 @@ class Venice(Provider):
17
19
 
18
20
  AVAILABLE_MODELS = [
19
21
  "mistral-31-24b",
22
+ "dolphin-3.0-mistral-24b",
20
23
  "llama-3.2-3b-akash",
21
24
  "qwen2dot5-coder-32b",
22
25
  "deepseek-coder-v2-lite",
@@ -28,23 +31,26 @@ class Venice(Provider):
28
31
  is_conversation: bool = True,
29
32
  max_tokens: int = 2000,
30
33
  timeout: int = 30,
31
- temperature: float = 0.8,
32
- top_p: float = 0.9,
34
+ temperature: float = 0.8, # Keep temperature, user might want to adjust
35
+ top_p: float = 0.9, # Keep top_p
33
36
  intro: str = None,
34
37
  filepath: str = None,
35
38
  update_file: bool = True,
36
39
  proxies: dict = {},
37
40
  history_offset: int = 10250,
38
41
  act: str = None,
39
- model: str = "llama-3.3-70b",
40
- system_prompt: str = "You are a helpful AI assistant."
42
+ model: str = "mistral-31-24b",
43
+ # System prompt is empty in the example, but keep it configurable
44
+ system_prompt: str = ""
41
45
  ):
42
46
  """Initialize Venice AI client"""
43
47
  if model not in self.AVAILABLE_MODELS:
44
48
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
45
49
 
46
- self.api_endpoint = "https://venice.ai/api/inference/chat"
47
- self.session = requests.Session()
50
+ # Update API endpoint
51
+ self.api_endpoint = "https://outerface.venice.ai/api/inference/chat"
52
+ # Initialize curl_cffi Session
53
+ self.session = Session()
48
54
  self.is_conversation = is_conversation
49
55
  self.max_tokens_to_sample = max_tokens
50
56
  self.temperature = temperature
@@ -54,22 +60,29 @@ class Venice(Provider):
54
60
  self.system_prompt = system_prompt
55
61
  self.last_response = {}
56
62
 
57
- # Headers for the request
63
+ # Update Headers based on successful request
58
64
  self.headers = {
59
- "User-Agent": LitAgent().random(),
65
+ "User-Agent": LitAgent().random(), # Keep using LitAgent
60
66
  "accept": "*/*",
61
- "accept-language": "en-US,en;q=0.9",
67
+ "accept-language": "en-US,en;q=0.9", # Keep existing
62
68
  "content-type": "application/json",
63
69
  "origin": "https://venice.ai",
64
- "referer": "https://venice.ai/chat/",
65
- "sec-ch-ua": '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
70
+ "referer": "https://venice.ai/", # Update referer
71
+ # Update sec-ch-ua to match example
72
+ "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
66
73
  "sec-ch-ua-mobile": "?0",
67
74
  "sec-ch-ua-platform": '"Windows"',
68
75
  "sec-fetch-dest": "empty",
69
76
  "sec-fetch-mode": "cors",
70
- "sec-fetch-site": "same-origin"
77
+ # Update sec-fetch-site to match example
78
+ "sec-fetch-site": "same-site",
79
+ # Add missing headers from example
80
+ "priority": "u=1, i",
81
+ "sec-gpc": "1",
82
+ "x-venice-version": "interface@20250424.065523+50bac27" # Add version header
71
83
  }
72
-
84
+
85
+ # Update curl_cffi session headers and proxies
73
86
  self.session.headers.update(self.headers)
74
87
  self.session.proxies.update(proxies)
75
88
 
@@ -108,64 +121,87 @@ class Venice(Provider):
108
121
  else:
109
122
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
110
123
 
111
- # Payload construction
124
+ # Update Payload construction based on successful request
112
125
  payload = {
113
- "requestId": str(uuid4())[:7],
126
+ "requestId": str(uuid4())[:7], # Keep generating request ID
114
127
  "modelId": self.model,
115
128
  "prompt": [{"content": conversation_prompt, "role": "user"}],
116
- "systemPrompt": self.system_prompt,
129
+ "systemPrompt": self.system_prompt, # Use configured system prompt
117
130
  "conversationType": "text",
118
- "temperature": self.temperature,
119
- "webEnabled": True,
120
- "topP": self.top_p,
121
- "includeVeniceSystemPrompt": False,
122
- "isCharacter": False,
123
- "clientProcessingTime": 2000
131
+ "temperature": self.temperature, # Use configured temperature
132
+ "webEnabled": True, # Keep webEnabled
133
+ "topP": self.top_p, # Use configured topP
134
+ "includeVeniceSystemPrompt": True, # Set to True as per example
135
+ "isCharacter": False, # Keep as False
136
+ # Add missing fields from example payload
137
+ "userId": "user_anon_" + str(random.randint(1000000000, 9999999999)), # Generate anon user ID
138
+ "isDefault": True,
139
+ "textToSpeech": {"voiceId": "af_sky", "speed": 1},
140
+ "clientProcessingTime": random.randint(10, 50) # Randomize slightly
124
141
  }
125
142
 
126
143
  def for_stream():
127
144
  try:
128
- with self.session.post(
145
+ # Use curl_cffi session post
146
+ response = self.session.post(
129
147
  self.api_endpoint,
130
148
  json=payload,
131
149
  stream=True,
132
- timeout=self.timeout
133
- ) as response:
134
- if response.status_code != 200:
135
- raise exceptions.FailedToGenerateResponseError(
136
- f"Request failed with status code {response.status_code}"
137
- )
150
+ timeout=self.timeout,
151
+ impersonate="edge101" # Match impersonation closer to headers
152
+ )
153
+ # Check response status after the call
154
+ if response.status_code != 200:
155
+ # Include response text in error
156
+ raise exceptions.FailedToGenerateResponseError(
157
+ f"Request failed with status code {response.status_code} - {response.text}"
158
+ )
138
159
 
139
- streaming_text = ""
140
- for line in response.iter_lines():
141
- if not line:
142
- continue
143
-
144
- try:
145
- # Decode bytes to string
146
- line_data = line.decode('utf-8').strip()
147
- if '"kind":"content"' in line_data:
148
- data = json.loads(line_data)
149
- if 'content' in data:
150
- content = data['content']
151
- streaming_text += content
152
- resp = dict(text=content)
153
- yield resp if raw else resp
154
- except json.JSONDecodeError:
155
- continue
156
- except UnicodeDecodeError:
157
- continue
160
+ streaming_text = ""
161
+ # Iterate over bytes and decode manually
162
+ for line in response.iter_lines(): # Removed decode_unicode
163
+ if not line:
164
+ continue
158
165
 
159
- self.conversation.update_chat_history(prompt, streaming_text)
166
+ try:
167
+ # Decode bytes to string
168
+ line_data = line.decode('utf-8').strip()
169
+ if '"kind":"content"' in line_data:
170
+ data = json.loads(line_data)
171
+ if 'content' in data:
172
+ content = data['content']
173
+ streaming_text += content
174
+ resp = dict(text=content)
175
+ # Yield content or dict based on raw flag
176
+ yield content if raw else resp
177
+ except json.JSONDecodeError:
178
+ continue
179
+ except UnicodeDecodeError:
180
+ continue
181
+
182
+ # Update history and last response after stream finishes
183
+ self.conversation.update_chat_history(prompt, streaming_text)
184
+ self.last_response = {"text": streaming_text}
160
185
 
161
- except requests.RequestException as e:
162
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
186
+ except CurlError as e:
187
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
188
+ # Catch requests.exceptions.RequestException if needed, but CurlError is primary for curl_cffi
189
+ except Exception as e:
190
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
163
191
 
164
192
  def for_non_stream():
165
193
  full_text = ""
166
- for chunk in for_stream():
167
- full_text += chunk["text"]
168
- return {"text": full_text}
194
+ # Iterate through the generator provided by for_stream
195
+ for chunk_data in for_stream():
196
+ # Check if chunk_data is a dict (not raw) and has 'text'
197
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
198
+ full_text += chunk_data["text"]
199
+ # If raw=True, chunk_data is the string content itself
200
+ elif isinstance(chunk_data, str):
201
+ full_text += chunk_data
202
+ # Update last_response after aggregation
203
+ self.last_response = {"text": full_text}
204
+ return self.last_response
169
205
 
170
206
  return for_stream() if stream else for_non_stream()
171
207
 
@@ -190,6 +226,7 @@ class Venice(Provider):
190
226
  return response["text"]
191
227
 
192
228
  if __name__ == "__main__":
229
+ # Ensure curl_cffi is installed
193
230
  print("-" * 80)
194
231
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
195
232
  print("-" * 80)
@@ -1,6 +1,6 @@
1
1
  import re
2
2
  import time
3
- import requests
3
+ from curl_cffi import requests
4
4
  import json
5
5
  from typing import Union, Any, Dict, Generator, Optional
6
6
  import uuid
@@ -231,4 +231,4 @@ if __name__ == "__main__":
231
231
  display_text = "Empty or invalid response"
232
232
  print(f"\r{model:<50} {status:<10} {display_text}")
233
233
  except Exception as e:
234
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
234
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -1,11 +1,12 @@
1
1
  import re
2
- import requests
3
2
  import json
4
3
  from typing import Union, Any, Dict, Generator, Optional
4
+ from curl_cffi import CurlError
5
+ from curl_cffi.requests import Session
5
6
 
6
7
  from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
10
  from webscout.AIbase import Provider
10
11
  from webscout import exceptions
11
12
  from webscout.litagent import LitAgent
@@ -18,12 +19,11 @@ class WiseCat(Provider):
18
19
 
19
20
  AVAILABLE_MODELS = [
20
21
  "chat-model-small",
21
- "chat-model-large",
22
+ # "chat-model-large", # >>> NOT WORKING <<<
22
23
  "chat-model-reasoning",
23
24
  ]
24
25
 
25
- def __init__(
26
- self,
26
+ def __init__(self,
27
27
  is_conversation: bool = True,
28
28
  max_tokens: int = 600,
29
29
  timeout: int = 30,
@@ -41,17 +41,21 @@ class WiseCat(Provider):
41
41
  if model not in self.AVAILABLE_MODELS:
42
42
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
43
 
44
- self.session = requests.Session()
44
+ # Initialize curl_cffi Session
45
+ self.session = Session()
45
46
  self.is_conversation = is_conversation
46
47
  self.max_tokens_to_sample = max_tokens
47
48
  self.api_endpoint = "https://wise-cat-groq.vercel.app/api/chat"
48
- self.stream_chunk_size = 64
49
+ # stream_chunk_size is not directly applicable to curl_cffi iter_lines
50
+ # self.stream_chunk_size = 64
49
51
  self.timeout = timeout
50
52
  self.last_response = {}
51
53
  self.model = model
52
54
  self.system_prompt = system_prompt
53
55
  self.litagent = LitAgent()
54
- self.headers = self.litagent.generate_fingerprint()
56
+ # Generate headers using LitAgent, but apply them to the curl_cffi session
57
+ self.headers = self.litagent.generate_fingerprint()
58
+ # Update curl_cffi session headers and proxies
55
59
  self.session.headers.update(self.headers)
56
60
  self.session.proxies = proxies
57
61
 
@@ -72,6 +76,17 @@ class WiseCat(Provider):
72
76
  )
73
77
  self.conversation.history_offset = history_offset
74
78
 
79
+ @staticmethod
80
+ def _wisecat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
81
+ """Extracts content from the WiseCat stream format '0:"..."'."""
82
+ if isinstance(chunk, str):
83
+ match = re.search(r'0:"(.*?)"', chunk)
84
+ if match:
85
+ # Decode potential unicode escapes like \u00e9
86
+ content = match.group(1).encode().decode('unicode_escape')
87
+ return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
88
+ return None
89
+
75
90
  def ask(
76
91
  self,
77
92
  prompt: str,
@@ -108,27 +123,46 @@ class WiseCat(Provider):
108
123
  }
109
124
 
110
125
  def for_stream():
111
- response = self.session.post(
112
- self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
113
- )
114
- if not response.ok:
115
- error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
116
- raise exceptions.FailedToGenerateResponseError(error_msg)
117
-
118
- streaming_response = ""
119
- for line in response.iter_lines(decode_unicode=True):
120
- if line:
121
- match = re.search(r'0:"(.*?)"', line)
122
- if match:
123
- content = match.group(1)
124
- streaming_response += content
125
- yield content if raw else dict(text=content)
126
- self.last_response.update(dict(text=streaming_response))
127
- self.conversation.update_chat_history(
128
- prompt, self.get_message(self.last_response)
129
- )
126
+ try: # Add try block for CurlError
127
+ # Use curl_cffi session post with impersonate
128
+ response = self.session.post(
129
+ self.api_endpoint,
130
+ headers=self.headers,
131
+ json=payload,
132
+ stream=True,
133
+ timeout=self.timeout,
134
+ impersonate="chrome120" # Add impersonate
135
+ )
136
+ if not response.ok:
137
+ error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
138
+ raise exceptions.FailedToGenerateResponseError(error_msg)
139
+
140
+ streaming_text = ""
141
+ # Use sanitize_stream with the custom extractor
142
+ processed_stream = sanitize_stream(
143
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
144
+ intro_value=None, # No simple prefix to remove here
145
+ to_json=False, # Content is not JSON
146
+ content_extractor=self._wisecat_extractor # Use the specific extractor
147
+ )
148
+
149
+ for content_chunk in processed_stream:
150
+ if content_chunk and isinstance(content_chunk, str):
151
+ streaming_text += content_chunk
152
+ yield content_chunk if raw else dict(text=content_chunk)
153
+
154
+ self.last_response.update(dict(text=streaming_text)) # Use streaming_text here
155
+ self.conversation.update_chat_history(
156
+ prompt, self.get_message(self.last_response)
157
+ )
158
+ except CurlError as e: # Catch CurlError
159
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
160
+ except Exception as e: # Catch other potential exceptions
161
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
162
+
130
163
 
131
164
  def for_non_stream():
165
+ # This function implicitly uses the updated for_stream
132
166
  for _ in for_stream():
133
167
  pass
134
168
  return self.last_response
@@ -164,9 +198,12 @@ class WiseCat(Provider):
164
198
  def get_message(self, response: dict) -> str:
165
199
  """Retrieves message only from response"""
166
200
  assert isinstance(response, dict), "Response should be of dict data-type only"
167
- return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
201
+ # Formatting (like unicode escapes) is handled by the extractor now.
202
+ # Keep newline replacement if needed for display.
203
+ return response.get("text", "").replace('\\n', '\n').replace('\\n\\n', '\n\n')
168
204
 
169
205
  if __name__ == "__main__":
206
+ # Ensure curl_cffi is installed
170
207
  print("-" * 80)
171
208
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
172
209
  print("-" * 80)