webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,7 +1,9 @@
1
1
  import re
2
- import requests, json
2
+ import json
3
+ from curl_cffi import CurlError
4
+ from curl_cffi.requests import Session
3
5
  from typing import Union, Any, Dict, Generator, Optional
4
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
5
7
  from webscout.AIbase import Provider
6
8
  from webscout import exceptions
7
9
  from webscout.litagent import LitAgent
@@ -29,27 +31,34 @@ class WritingMate(Provider):
29
31
  intro: str = None,
30
32
  filepath: str = None,
31
33
  update_file: bool = True,
34
+ proxies: dict = {}, # Added proxies parameter
35
+ history_offset: int = 10250, # Added history_offset parameter
32
36
  act: str = None,
33
37
  system_prompt: str = "You are a friendly, helpful AI assistant.",
34
38
  model: str = "gpt-4o-mini"
35
39
  ):
36
40
  self.cookies_path = cookies_path
37
- self.cookies = self._load_cookies(cookies_path)
38
- self.session = requests.Session()
41
+ # Load cookies into a dictionary for curl_cffi
42
+ self.cookies = self._load_cookies_dict(cookies_path)
43
+ # Initialize curl_cffi Session
44
+ self.session = Session()
39
45
  self.timeout = timeout
40
46
  self.system_prompt = system_prompt
41
47
  self.model = model
42
48
  if self.model not in self.AVAILABLE_MODELS:
43
49
  raise ValueError(f"Unknown model: {self.model}. Choose from {self.AVAILABLE_MODELS}")
44
50
  self.last_response = {}
51
+ self.agent = LitAgent() # Initialize LitAgent
45
52
  self.headers = {
46
53
  "Accept": "*/*",
47
54
  "Accept-Encoding": "gzip, deflate, br, zstd",
48
55
  "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
49
- "Content-Type": "text/plain;charset=UTF-8",
56
+ # Content-Type might be application/json based on body, but API expects text/plain? Keep for now.
57
+ "Content-Type": "text/plain;charset=UTF-8",
50
58
  "Origin": "https://chat.writingmate.ai",
51
59
  "Referer": "https://chat.writingmate.ai/chat",
52
- "Cookie": self.cookies,
60
+ # Remove Cookie header, pass cookies via parameter
61
+ # "Cookie": self.cookies,
53
62
  "DNT": "1",
54
63
  "sec-ch-ua": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
55
64
  "sec-ch-ua-mobile": "?0",
@@ -58,9 +67,12 @@ class WritingMate(Provider):
58
67
  "Sec-Fetch-Mode": "cors",
59
68
  "Sec-Fetch-Site": "same-origin",
60
69
  "Sec-GPC": "1",
61
- "User-Agent": LitAgent().random()
70
+ "User-Agent": self.agent.random() # Use LitAgent
62
71
  }
72
+ # Update curl_cffi session headers and proxies
63
73
  self.session.headers.update(self.headers)
74
+ self.session.proxies = proxies
75
+
64
76
  self.__available_optimizers = (
65
77
  m for m in dir(Optimizers)
66
78
  if callable(getattr(Optimizers, m)) and not m.startswith("__")
@@ -70,25 +82,51 @@ class WritingMate(Provider):
70
82
  if act else intro or Conversation.intro
71
83
  )
72
84
  self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
73
- self.conversation.history_offset = 10250
85
+ # Apply history offset
86
+ self.conversation.history_offset = history_offset
74
87
 
75
- def _load_cookies(self, path: str) -> str:
88
+ # Keep original _load_cookies if needed elsewhere, or remove
89
+ # def _load_cookies(self, path: str) -> str:
90
+ # try:
91
+ # with open(path, 'r') as f:
92
+ # data = json.load(f)
93
+ # return '; '.join(f"{c['name']}={c['value']}" for c in data)
94
+ # except (FileNotFoundError, json.JSONDecodeError):
95
+ # raise RuntimeError(f"Failed to load cookies from {path}")
96
+
97
+ # New method to load cookies as a dictionary
98
+ def _load_cookies_dict(self, path: str) -> Dict[str, str]:
76
99
  try:
77
100
  with open(path, 'r') as f:
78
101
  data = json.load(f)
79
- return '; '.join(f"{c['name']}={c['value']}" for c in data)
80
- except (FileNotFoundError, json.JSONDecodeError):
81
- raise RuntimeError(f"Failed to load cookies from {path}")
102
+ # Ensure data is a list of cookie objects
103
+ if not isinstance(data, list):
104
+ raise ValueError("Cookie file should contain a list of cookie objects.")
105
+ return {c['name']: c['value'] for c in data if 'name' in c and 'value' in c}
106
+ except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
107
+ raise RuntimeError(f"Failed to load cookies from {path}: {e}")
82
108
 
109
+ @staticmethod
110
+ def _writingmate_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
111
+ """Extracts content from the WritingMate stream format '0:"..."'."""
112
+ if isinstance(chunk, str):
113
+ # Regex to find the pattern 0:"<content>"
114
+ match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
115
+ if match:
116
+ # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
117
+ content = match.group(1).encode().decode('unicode_escape')
118
+ return content.replace('\\\\', '\\').replace('\\"', '"')
119
+ return None
83
120
 
84
121
  def ask(
85
122
  self,
86
123
  prompt: str,
87
- stream: bool = True,
124
+ stream: bool = True, # Defaulting stream to True as per original
88
125
  raw: bool = False,
89
126
  optimizer: str = None,
90
127
  conversationally: bool = False
91
128
  ) -> Union[Dict[str,Any], Generator[Any,None,None]]:
129
+ # ... existing prompt generation and optimizer logic ...
92
130
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
93
131
  if optimizer:
94
132
  if optimizer in self.__available_optimizers:
@@ -96,8 +134,10 @@ class WritingMate(Provider):
96
134
  conversation_prompt if conversationally else prompt
97
135
  )
98
136
  else:
137
+ # Use the correct exception type
99
138
  raise exceptions.FailedToGenerateResponseError(f"Unknown optimizer: {optimizer}")
100
139
 
140
+ # Body seems to be JSON, let curl_cffi handle serialization
101
141
  body = {
102
142
  "chatSettings": {
103
143
  "model": self.model,
@@ -116,82 +156,114 @@ class WritingMate(Provider):
116
156
  }
117
157
 
118
158
  def for_stream():
119
- response = self.session.post(self.api_endpoint, headers=self.headers, json=body, stream=True, timeout=self.timeout)
120
- if not response.ok:
121
- raise exceptions.FailedToGenerateResponseError(
122
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
159
+ try:
160
+ # Use curl_cffi session post, pass cookies dict
161
+ response = self.session.post(
162
+ self.api_endpoint,
163
+ headers=self.headers,
164
+ cookies=self.cookies, # Pass cookies dict
165
+ json=body, # Pass body as json
166
+ stream=True,
167
+ timeout=self.timeout,
168
+ impersonate="chrome120" # Add impersonate
169
+ # http_version=CurlHttpVersion.V1_1 # Add if HTTP/2 errors occur
123
170
  )
124
- streaming_response = ""
125
- for line in response.iter_lines(decode_unicode=True):
126
- if line:
127
- match = re.search(r'0:"(.*?)"', line)
128
- if match:
129
- content = match.group(1)
130
- streaming_response += content
131
- yield content if raw else dict(text=content)
132
- self.last_response.update(dict(text=streaming_response))
133
- self.conversation.update_chat_history(
134
- prompt, self.get_message(self.last_response)
135
- )
171
+ if not response.ok:
172
+ raise exceptions.FailedToGenerateResponseError(
173
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
174
+ )
175
+ streaming_text = ""
176
+ # Use sanitize_stream with the custom extractor
177
+ processed_stream = sanitize_stream(
178
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
179
+ intro_value=None, # No simple prefix
180
+ to_json=False, # Content is not JSON
181
+ content_extractor=self._writingmate_extractor # Use the specific extractor
182
+ )
183
+
184
+ for content_chunk in processed_stream:
185
+ if content_chunk and isinstance(content_chunk, str):
186
+ streaming_text += content_chunk
187
+ yield content_chunk if raw else dict(text=content_chunk)
188
+
189
+ self.last_response.update(dict(text=streaming_text))
190
+ self.conversation.update_chat_history(
191
+ prompt, self.get_message(self.last_response)
192
+ )
193
+ except CurlError as e: # Catch CurlError
194
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
195
+ except Exception as e: # Catch other potential exceptions
196
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
136
197
 
137
198
  def for_non_stream():
199
+ # This function implicitly uses the updated for_stream
138
200
  for _ in for_stream():
139
201
  pass
140
202
  return self.last_response
141
203
 
142
- return for_stream() if stream else for_non_stream()
204
+ # Ensure stream defaults to True if not provided, matching original behavior
205
+ effective_stream = stream if stream is not None else True
206
+ return for_stream() if effective_stream else for_non_stream()
143
207
 
144
208
  def chat(
145
209
  self,
146
210
  prompt: str,
147
- stream: bool = False,
211
+ stream: bool = False, # Default stream to False as per original chat method
148
212
  optimizer: str = None,
149
213
  conversationally: bool = False
150
214
  ) -> Union[str, Generator[str,None,None]]:
151
215
  if stream:
152
- # yield raw SSE lines
153
- def raw_stream():
154
- for line in self.ask(
155
- prompt, stream=True, raw=True,
216
+ # yield decoded text chunks
217
+ def text_stream():
218
+ # Call ask with stream=True, raw=False to get dicts
219
+ for response_dict in self.ask(
220
+ prompt, stream=True, raw=False,
156
221
  optimizer=optimizer, conversationally=conversationally
157
222
  ):
158
- yield line
159
- return raw_stream()
160
- # non‐stream: return aggregated text
161
- return self.get_message(
162
- self.ask(
223
+ # Extract text from dict
224
+ yield self.get_message(response_dict)
225
+ return text_stream()
226
+ else: # non‐stream: return aggregated text
227
+ # Call ask with stream=False, raw=False
228
+ response_data = self.ask(
163
229
  prompt,
164
- False,
230
+ stream=False,
165
231
  raw=False,
166
232
  optimizer=optimizer,
167
233
  conversationally=conversationally,
168
234
  )
169
- )
170
-
171
- def get_message(self, response: dict) -> str:
172
- """
173
- Extracts the message from the API response.
174
-
175
- Args:
176
- response (dict): The API response.
235
+ # Ensure response_data is a dict before passing to get_message
236
+ if isinstance(response_data, dict):
237
+ return self.get_message(response_data)
238
+ else:
239
+ # Handle unexpected generator case if ask(stream=False) behaves differently
240
+ # This part might need adjustment based on actual behavior
241
+ full_text = "".join(self.get_message(chunk) for chunk in response_data if isinstance(chunk, dict))
242
+ return full_text
177
243
 
178
- Returns:
179
- str: The message content.
180
244
 
181
- Examples:
182
- >>> ai = X0GPT()
183
- >>> response = ai.ask("Tell me a joke!")
184
- >>> message = ai.get_message(response)
185
- >>> print(message)
186
- 'Why did the scarecrow win an award? Because he was outstanding in his field!'
187
- """
245
+ def get_message(self, response: dict) -> str:
188
246
  assert isinstance(response, dict), "Response should be of dict data-type only"
189
- formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
247
+ # Ensure text exists before processing
248
+ # Formatting is now mostly handled by the extractor
249
+ text = response.get("text", "")
250
+ formatted_text = text # Keep newline replacement if needed: .replace('\\n', '\n')
190
251
  return formatted_text
191
252
 
192
253
  if __name__ == "__main__":
193
254
  from rich import print
194
- ai = WritingMate(cookies_path="cookies.json")
195
- response = ai.chat(input(">>> "), stream=True)
196
- for chunk in response:
197
- print(chunk, end="", flush=True)
255
+ try:
256
+ ai = WritingMate(cookies_path="cookies.json", proxies={}, timeout=120) # Example with proxies and timeout
257
+ # Get input within the try block
258
+ user_input = input(">>> ")
259
+ response = ai.chat(user_input, stream=True)
260
+ print("[bold green]Assistant:[/bold green]")
261
+ for chunk in response:
262
+ print(chunk, end="", flush=True)
263
+ print() # Add a newline at the end
264
+ except RuntimeError as e:
265
+ print(f"[bold red]Error initializing WritingMate:[/bold red] {e}")
266
+ except exceptions.FailedToGenerateResponseError as e:
267
+ print(f"[bold red]Error during chat:[/bold red] {e}")
268
+ except Exception as e:
269
+ print(f"[bold red]An unexpected error occurred:[/bold red] {e}")
@@ -1,6 +1,5 @@
1
1
  # webscout/providers/__init__.py
2
2
  from .PI import *
3
- from .Llama import LLAMA
4
3
  from .Cohere import Cohere
5
4
  from .Reka import REKA
6
5
  from .Groq import GROQ
@@ -10,8 +9,6 @@ from .Openai import AsyncOPENAI
10
9
  from .Koboldai import KOBOLDAI
11
10
  from .Koboldai import AsyncKOBOLDAI
12
11
  from .Blackboxai import BLACKBOXAI
13
- from .Phind import PhindSearch
14
- from .Phind import Phindv2
15
12
  from .ai4chat import *
16
13
  from .Gemini import GEMINI
17
14
  from .Deepinfra import DeepInfra
@@ -31,26 +28,20 @@ from .turboseek import *
31
28
  from .Free2GPT import *
32
29
  from .TeachAnything import *
33
30
  from .AI21 import *
34
- from .Chatify import *
35
31
  from .x0gpt import *
36
32
  from .cerebras import *
37
- from .lepton import *
38
33
  from .geminiapi import *
39
34
  from .elmo import *
40
35
  from .GPTWeb import *
41
36
  from .Netwrck import Netwrck
42
- from .llamatutor import *
43
37
  from .promptrefine import *
44
38
  from .tutorai import *
45
- from .ChatGPTES import *
46
39
  from .bagoodex import *
47
40
  from .aimathgpt import *
48
- from .gaurish import *
49
41
  from .geminiprorealtime import *
50
42
  from .llmchat import *
51
43
  from .llmchatco import LLMChatCo # Add new LLMChat.co provider
52
44
  from .talkai import *
53
- from .askmyai import *
54
45
  from .llama3mitril import *
55
46
  from .Marcus import *
56
47
  from .typegpt import *
@@ -64,7 +55,6 @@ from .ChatGPTGratis import *
64
55
  from .QwenLM import *
65
56
  from .granite import *
66
57
  from .WiseCat import *
67
- from .DeepSeek import *
68
58
  from .freeaichat import FreeAIChat
69
59
  from .akashgpt import *
70
60
  from .Perplexitylabs import *
@@ -76,11 +66,8 @@ from .ElectronHub import *
76
66
  from .HuggingFaceChat import *
77
67
  from .GithubChat import *
78
68
  from .copilot import *
79
- from .C4ai import *
80
69
  from .sonus import *
81
70
  from .uncovr import *
82
- from .labyrinth import *
83
- from .WebSim import *
84
71
  from .LambdaChat import *
85
72
  from .ChatGPTClone import *
86
73
  from .VercelAI import *
@@ -96,14 +83,17 @@ from .Writecream import Writecream
96
83
  from .toolbaz import Toolbaz
97
84
  from .scnet import SCNet
98
85
  from .WritingMate import WritingMate
86
+ from .MCPCore import MCPCore
87
+ from .TypliAI import TypliAI
88
+ from .ChatSandbox import ChatSandbox
89
+ from .GizAI import GizAI
99
90
  __all__ = [
100
- 'LLAMA',
101
91
  'SCNet',
92
+ 'GizAI',
93
+ 'ChatSandbox',
102
94
  'SciraAI',
103
95
  'StandardInputAI',
104
- 'LabyrinthAI',
105
96
  'OpenGPT',
106
- 'C4ai',
107
97
  'Venice',
108
98
  'ExaAI',
109
99
  'Copilot',
@@ -113,7 +103,6 @@ __all__ = [
113
103
  'AllenAI',
114
104
  'PerplexityLabs',
115
105
  'AkashGPT',
116
- 'DeepSeek',
117
106
  'WritingMate',
118
107
  'WiseCat',
119
108
  'IBMGranite',
@@ -131,18 +120,15 @@ __all__ = [
131
120
  'KOBOLDAI',
132
121
  'AsyncKOBOLDAI',
133
122
  'BLACKBOXAI',
134
- 'PhindSearch',
135
123
  'GEMINI',
136
124
  'DeepInfra',
137
125
  'AI4Chat',
138
- 'Phindv2',
139
126
  'OLLAMA',
140
127
  'AndiSearch',
141
128
  'PIZZAGPT',
142
129
  'Sambanova',
143
130
  'KOALA',
144
131
  'Meta',
145
- 'AskMyAI',
146
132
  'PiAI',
147
133
  'Julius',
148
134
  'YouChat',
@@ -151,10 +137,8 @@ __all__ = [
151
137
  'TurboSeek',
152
138
  'TeachAnything',
153
139
  'AI21',
154
- 'Chatify',
155
140
  'X0GPT',
156
141
  'Cerebras',
157
- 'Lepton',
158
142
  'GEMINIAPI',
159
143
  'SonusAI',
160
144
  'Cleeai',
@@ -164,13 +148,10 @@ __all__ = [
164
148
  'Free2GPT',
165
149
  'GPTWeb',
166
150
  'Netwrck',
167
- 'LlamaTutor',
168
151
  'PromptRefine',
169
152
  'TutorAI',
170
- 'ChatGPTES',
171
153
  'Bagoodex',
172
154
  'AIMathGPT',
173
- 'GaurishCerebras',
174
155
  'GeminiPro',
175
156
  'LLMChat',
176
157
  'LLMChatCo',
@@ -187,12 +168,13 @@ __all__ = [
187
168
  'ElectronHub',
188
169
  'GithubChat',
189
170
  'UncovrAI',
190
- 'WebSim',
191
171
  'VercelAI',
192
172
  'ExaChat',
193
173
  'AskSteve',
194
174
  'Aitopia',
195
175
  'SearchChatAI',
196
176
  'Writecream',
197
- 'Toolbaz'
177
+ 'Toolbaz',
178
+ 'MCPCore',
179
+ 'TypliAI',
198
180
  ]
@@ -1,4 +1,4 @@
1
- import requests
1
+ from curl_cffi.requests import Session, RequestsError
2
2
  import urllib.parse
3
3
  from typing import Union, Any, Dict
4
4
 
@@ -44,7 +44,7 @@ class AI4Chat(Provider):
44
44
  country (str, optional): Country parameter for API. Defaults to "Asia".
45
45
  user_id (str, optional): User ID for API. Defaults to "usersmjb2oaz7y".
46
46
  """
47
- self.session = requests.Session()
47
+ self.session = Session(timeout=timeout, proxies=proxies)
48
48
  self.is_conversation = is_conversation
49
49
  self.max_tokens_to_sample = max_tokens
50
50
  self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
@@ -84,7 +84,6 @@ class AI4Chat(Provider):
84
84
  is_conversation, self.max_tokens_to_sample, filepath, update_file
85
85
  )
86
86
  self.conversation.history_offset = history_offset
87
- self.session.proxies = proxies
88
87
  self.system_prompt = system_prompt
89
88
 
90
89
  def ask(
@@ -123,24 +122,24 @@ class AI4Chat(Provider):
123
122
  f"Optimizer is not one of {self.__available_optimizers}"
124
123
  )
125
124
 
126
- # Use provided values or defaults
127
125
  country_param = country or self.country
128
126
  user_id_param = user_id or self.user_id
129
127
 
130
- # Build the URL with parameters
131
128
  encoded_text = urllib.parse.quote(conversation_prompt)
132
129
  encoded_country = urllib.parse.quote(country_param)
133
130
  encoded_user_id = urllib.parse.quote(user_id_param)
134
131
 
135
132
  url = f"{self.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
136
133
 
137
- response = self.session.get(url, headers=self.headers, timeout=self.timeout)
134
+ try:
135
+ response = self.session.get(url, headers=self.headers, timeout=self.timeout)
136
+ except RequestsError as e:
137
+ raise Exception(f"Failed to generate response: {e}")
138
138
  if not response.ok:
139
139
  raise Exception(f"Failed to generate response: {response.status_code} - {response.reason}")
140
140
 
141
141
  response_text = response.text
142
142
 
143
- # Remove quotes from the start and end of the response
144
143
  if response_text.startswith('"'):
145
144
  response_text = response_text[1:]
146
145
  if response_text.endswith('"'):
@@ -1,8 +1,11 @@
1
- import requests
1
+ from typing import Any, Dict, Optional, Union
2
+ from curl_cffi import CurlError
3
+ from curl_cffi.requests import Session
4
+ from webscout import exceptions
2
5
  from webscout.AIutel import Optimizers
3
6
  from webscout.AIutel import Conversation
4
- from webscout.AIutel import AwesomePrompts
5
- from webscout.AIbase import Provider
7
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
8
+ from webscout.AIbase import Provider
6
9
  from webscout.litagent import LitAgent
7
10
 
8
11
  class AskSteve(Provider):
@@ -36,7 +39,7 @@ class AskSteve(Provider):
36
39
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
37
40
  system_prompt (str, optional): System prompt for AskSteve. Defaults to the provided string.
38
41
  """
39
- self.session = requests.Session()
42
+ self.session = Session() # Use curl_cffi Session
40
43
  self.is_conversation = is_conversation
41
44
  self.max_tokens_to_sample = max_tokens
42
45
  self.api_endpoint = "https://quickstart.asksteve.to/quickStartRequest"
@@ -73,7 +76,15 @@ class AskSteve(Provider):
73
76
  is_conversation, self.max_tokens_to_sample, filepath, update_file
74
77
  )
75
78
  self.conversation.history_offset = history_offset
76
- self.session.proxies = proxies
79
+ self.session.proxies = proxies # Assign proxies directly
80
+ @staticmethod
81
+ def _asksteve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
82
+ """Extracts content from AskSteve JSON response."""
83
+ if isinstance(chunk, dict) and "candidates" in chunk and len(chunk["candidates"]) > 0:
84
+ parts = chunk["candidates"][0].get("content", {}).get("parts", [])
85
+ if parts and isinstance(parts[0].get("text"), str):
86
+ return parts[0]["text"]
87
+ return None
77
88
 
78
89
  def ask(
79
90
  self,
@@ -115,37 +126,43 @@ class AskSteve(Provider):
115
126
  "prompt": conversation_prompt
116
127
  }
117
128
 
118
- def for_stream():
129
+
130
+ # This API doesn't stream, so we process the full response
131
+ try:
119
132
  response = self.session.post(
120
133
  self.api_endpoint,
121
134
  headers=self.headers,
122
135
  json=payload,
123
- stream=True,
136
+ stream=False, # API doesn't stream
124
137
  timeout=self.timeout,
138
+ impersonate="chrome120" # Add impersonate
125
139
  )
126
- if not response.ok:
127
- raise Exception(
128
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
129
- )
130
-
131
- response_data = response.json()
132
- if "candidates" in response_data and len(response_data["candidates"]) > 0:
133
- text = response_data["candidates"][0]["content"]["parts"][0]["text"]
134
- self.last_response.update(dict(text=text))
135
- yield dict(text=text) if not raw else text
136
- else:
137
- raise Exception("No response generated")
140
+ response.raise_for_status()
141
+ response_text_raw = response.text # Get raw text
142
+
143
+ # Process the full JSON text using sanitize_stream
144
+ processed_stream = sanitize_stream(
145
+ data=response_text_raw,
146
+ to_json=True, # Parse the whole text as JSON
147
+ intro_value=None,
148
+ content_extractor=self._asksteve_extractor, # Use the specific extractor
149
+ yield_raw_on_error=False
150
+ )
151
+ # Extract the single result
152
+ text = next(processed_stream, None)
153
+ text = text if isinstance(text, str) else "" # Ensure it's a string
138
154
 
155
+ self.last_response.update(dict(text=text))
139
156
  self.conversation.update_chat_history(
140
157
  prompt, self.get_message(self.last_response)
141
158
  )
159
+ # Return dict or raw string based on raw flag
160
+ return text if raw else self.last_response
142
161
 
143
- def for_non_stream():
144
- for _ in for_stream():
145
- pass
146
- return self.last_response
147
-
148
- return for_stream() if stream else for_non_stream()
162
+ except CurlError as e:
163
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
164
+ except Exception as e: # Catch other potential errors
165
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}") from e
149
166
 
150
167
  def chat(
151
168
  self,
@@ -164,23 +181,15 @@ class AskSteve(Provider):
164
181
  str: Response generated
165
182
  """
166
183
 
167
- def for_stream():
168
- for response in self.ask(
169
- prompt, True, optimizer=optimizer, conversationally=conversationally
170
- ):
171
- yield self.get_message(response)
172
-
173
- def for_non_stream():
174
- return self.get_message(
175
- self.ask(
176
- prompt,
177
- False,
178
- optimizer=optimizer,
179
- conversationally=conversationally,
180
- )
181
- )
182
-
183
- return for_stream() if stream else for_non_stream()
184
+ # Since ask() doesn't truly stream, we just call it once.
185
+ response_data = self.ask(
186
+ prompt,
187
+ stream=False, # Always False for this API
188
+ raw=False, # Get the dict back
189
+ optimizer=optimizer,
190
+ conversationally=conversationally,
191
+ )
192
+ return self.get_message(response_data)
184
193
 
185
194
  def get_message(self, response: dict) -> str:
186
195
  """Retrieves message only from response
@@ -192,12 +201,12 @@ class AskSteve(Provider):
192
201
  str: Message extracted
193
202
  """
194
203
  assert isinstance(response, dict), "Response should be of dict data-type only"
195
- return response["text"]
204
+ return response.get("text", "") # Use .get for safety
196
205
 
197
206
 
198
207
  if __name__ == "__main__":
199
208
  from rich import print
200
209
  ai = AskSteve()
201
- response = ai.chat("hi", stream=True)
210
+ response = ai.chat("write a short poem about AI", stream=True)
202
211
  for chunk in response:
203
212
  print(chunk, end="", flush=True)