webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/Writecream.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session # Keep Session import
|
|
2
3
|
import json
|
|
3
4
|
from typing import Any, Dict, Optional, Generator, Union
|
|
4
5
|
|
|
5
6
|
from webscout.AIutel import Optimizers
|
|
6
7
|
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
9
|
from webscout.AIbase import Provider
|
|
9
10
|
from webscout import exceptions
|
|
10
11
|
from webscout.litagent import LitAgent
|
|
@@ -29,7 +30,6 @@ class Writecream(Provider):
|
|
|
29
30
|
act: str = None,
|
|
30
31
|
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
31
32
|
base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
|
|
32
|
-
user_agent: str = "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Mobile Safari/537.36",
|
|
33
33
|
referer: str = "https://www.writecream.com/chatgpt-chat/",
|
|
34
34
|
link: str = "writecream.com",
|
|
35
35
|
model: str = "writecream-gpt"
|
|
@@ -40,7 +40,8 @@ class Writecream(Provider):
|
|
|
40
40
|
if model not in self.AVAILABLE_MODELS:
|
|
41
41
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
42
42
|
|
|
43
|
-
|
|
43
|
+
# Initialize curl_cffi Session
|
|
44
|
+
self.session = Session()
|
|
44
45
|
self.is_conversation = is_conversation
|
|
45
46
|
self.max_tokens_to_sample = max_tokens
|
|
46
47
|
self.base_url = base_url
|
|
@@ -48,13 +49,16 @@ class Writecream(Provider):
|
|
|
48
49
|
self.last_response = {}
|
|
49
50
|
self.system_prompt = system_prompt
|
|
50
51
|
self.model = model
|
|
51
|
-
|
|
52
|
+
# Initialize LitAgent
|
|
53
|
+
self.agent = LitAgent()
|
|
52
54
|
self.referer = referer
|
|
53
55
|
self.link = link
|
|
54
56
|
|
|
55
57
|
self.headers = {
|
|
56
|
-
|
|
58
|
+
# Use LitAgent for User-Agent
|
|
59
|
+
"User-Agent": self.agent.random(),
|
|
57
60
|
"Referer": self.referer
|
|
61
|
+
# Add other headers if needed by curl_cffi impersonation or API
|
|
58
62
|
}
|
|
59
63
|
|
|
60
64
|
self.__available_optimizers = (
|
|
@@ -63,6 +67,7 @@ class Writecream(Provider):
|
|
|
63
67
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
64
68
|
)
|
|
65
69
|
|
|
70
|
+
# Update curl_cffi session headers and proxies
|
|
66
71
|
self.session.headers.update(self.headers)
|
|
67
72
|
self.session.proxies.update(proxies)
|
|
68
73
|
|
|
@@ -123,20 +128,40 @@ class Writecream(Provider):
|
|
|
123
128
|
|
|
124
129
|
def for_non_stream():
|
|
125
130
|
try:
|
|
126
|
-
|
|
131
|
+
# Use curl_cffi session.get with impersonate
|
|
132
|
+
response = self.session.get(
|
|
133
|
+
self.base_url,
|
|
134
|
+
params=params,
|
|
135
|
+
timeout=self.timeout,
|
|
136
|
+
impersonate="chrome120" # Add impersonate
|
|
137
|
+
)
|
|
127
138
|
response.raise_for_status()
|
|
128
|
-
|
|
139
|
+
response_text = response.text # Get the raw text
|
|
140
|
+
|
|
141
|
+
# Use sanitize_stream to process the non-streaming text
|
|
142
|
+
# It will try to parse the whole text as JSON because to_json=True
|
|
143
|
+
processed_stream = sanitize_stream(
|
|
144
|
+
data=response_text,
|
|
145
|
+
to_json=True, # Attempt to parse the whole response text as JSON
|
|
146
|
+
intro_value=None, # No prefix expected on the full response
|
|
147
|
+
content_extractor=lambda chunk: chunk.get("response", chunk.get("response_content", "")) if isinstance(chunk, dict) else None
|
|
148
|
+
)
|
|
129
149
|
|
|
130
|
-
# Extract the
|
|
131
|
-
response_content =
|
|
150
|
+
# Extract the single result from the generator
|
|
151
|
+
response_content = ""
|
|
152
|
+
for content in processed_stream:
|
|
153
|
+
response_content = content if isinstance(content, str) else ""
|
|
132
154
|
|
|
133
155
|
# Update conversation history
|
|
134
156
|
self.last_response = {"text": response_content}
|
|
135
157
|
self.conversation.update_chat_history(prompt, response_content)
|
|
136
158
|
|
|
137
159
|
return {"text": response_content}
|
|
160
|
+
except CurlError as e: # Catch CurlError
|
|
161
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
138
162
|
except Exception as e:
|
|
139
|
-
|
|
163
|
+
# Include original exception type
|
|
164
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}")
|
|
140
165
|
|
|
141
166
|
# Currently, Writecream API doesn't support streaming, so we always return non-streaming response
|
|
142
167
|
return for_non_stream()
|
|
@@ -188,6 +213,7 @@ class Writecream(Provider):
|
|
|
188
213
|
|
|
189
214
|
|
|
190
215
|
if __name__ == "__main__":
|
|
216
|
+
# Ensure curl_cffi is installed
|
|
191
217
|
print("-" * 80)
|
|
192
218
|
print(f"{'Model':<30} {'Status':<10} {'Response'}")
|
|
193
219
|
print("-" * 80)
|
webscout/Provider/WritingMate.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import re
|
|
2
|
-
import
|
|
2
|
+
import json
|
|
3
|
+
from curl_cffi import CurlError
|
|
4
|
+
from curl_cffi.requests import Session
|
|
3
5
|
from typing import Union, Any, Dict, Generator, Optional
|
|
4
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
5
7
|
from webscout.AIbase import Provider
|
|
6
8
|
from webscout import exceptions
|
|
7
9
|
from webscout.litagent import LitAgent
|
|
@@ -29,27 +31,34 @@ class WritingMate(Provider):
|
|
|
29
31
|
intro: str = None,
|
|
30
32
|
filepath: str = None,
|
|
31
33
|
update_file: bool = True,
|
|
34
|
+
proxies: dict = {}, # Added proxies parameter
|
|
35
|
+
history_offset: int = 10250, # Added history_offset parameter
|
|
32
36
|
act: str = None,
|
|
33
37
|
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
34
38
|
model: str = "gpt-4o-mini"
|
|
35
39
|
):
|
|
36
40
|
self.cookies_path = cookies_path
|
|
37
|
-
|
|
38
|
-
self.
|
|
41
|
+
# Load cookies into a dictionary for curl_cffi
|
|
42
|
+
self.cookies = self._load_cookies_dict(cookies_path)
|
|
43
|
+
# Initialize curl_cffi Session
|
|
44
|
+
self.session = Session()
|
|
39
45
|
self.timeout = timeout
|
|
40
46
|
self.system_prompt = system_prompt
|
|
41
47
|
self.model = model
|
|
42
48
|
if self.model not in self.AVAILABLE_MODELS:
|
|
43
49
|
raise ValueError(f"Unknown model: {self.model}. Choose from {self.AVAILABLE_MODELS}")
|
|
44
50
|
self.last_response = {}
|
|
51
|
+
self.agent = LitAgent() # Initialize LitAgent
|
|
45
52
|
self.headers = {
|
|
46
53
|
"Accept": "*/*",
|
|
47
54
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
48
55
|
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
49
|
-
|
|
56
|
+
# Content-Type might be application/json based on body, but API expects text/plain? Keep for now.
|
|
57
|
+
"Content-Type": "text/plain;charset=UTF-8",
|
|
50
58
|
"Origin": "https://chat.writingmate.ai",
|
|
51
59
|
"Referer": "https://chat.writingmate.ai/chat",
|
|
52
|
-
|
|
60
|
+
# Remove Cookie header, pass cookies via parameter
|
|
61
|
+
# "Cookie": self.cookies,
|
|
53
62
|
"DNT": "1",
|
|
54
63
|
"sec-ch-ua": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
|
|
55
64
|
"sec-ch-ua-mobile": "?0",
|
|
@@ -58,9 +67,12 @@ class WritingMate(Provider):
|
|
|
58
67
|
"Sec-Fetch-Mode": "cors",
|
|
59
68
|
"Sec-Fetch-Site": "same-origin",
|
|
60
69
|
"Sec-GPC": "1",
|
|
61
|
-
"User-Agent":
|
|
70
|
+
"User-Agent": self.agent.random() # Use LitAgent
|
|
62
71
|
}
|
|
72
|
+
# Update curl_cffi session headers and proxies
|
|
63
73
|
self.session.headers.update(self.headers)
|
|
74
|
+
self.session.proxies = proxies
|
|
75
|
+
|
|
64
76
|
self.__available_optimizers = (
|
|
65
77
|
m for m in dir(Optimizers)
|
|
66
78
|
if callable(getattr(Optimizers, m)) and not m.startswith("__")
|
|
@@ -70,25 +82,51 @@ class WritingMate(Provider):
|
|
|
70
82
|
if act else intro or Conversation.intro
|
|
71
83
|
)
|
|
72
84
|
self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
|
|
73
|
-
|
|
85
|
+
# Apply history offset
|
|
86
|
+
self.conversation.history_offset = history_offset
|
|
74
87
|
|
|
75
|
-
|
|
88
|
+
# Keep original _load_cookies if needed elsewhere, or remove
|
|
89
|
+
# def _load_cookies(self, path: str) -> str:
|
|
90
|
+
# try:
|
|
91
|
+
# with open(path, 'r') as f:
|
|
92
|
+
# data = json.load(f)
|
|
93
|
+
# return '; '.join(f"{c['name']}={c['value']}" for c in data)
|
|
94
|
+
# except (FileNotFoundError, json.JSONDecodeError):
|
|
95
|
+
# raise RuntimeError(f"Failed to load cookies from {path}")
|
|
96
|
+
|
|
97
|
+
# New method to load cookies as a dictionary
|
|
98
|
+
def _load_cookies_dict(self, path: str) -> Dict[str, str]:
|
|
76
99
|
try:
|
|
77
100
|
with open(path, 'r') as f:
|
|
78
101
|
data = json.load(f)
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
102
|
+
# Ensure data is a list of cookie objects
|
|
103
|
+
if not isinstance(data, list):
|
|
104
|
+
raise ValueError("Cookie file should contain a list of cookie objects.")
|
|
105
|
+
return {c['name']: c['value'] for c in data if 'name' in c and 'value' in c}
|
|
106
|
+
except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
|
|
107
|
+
raise RuntimeError(f"Failed to load cookies from {path}: {e}")
|
|
82
108
|
|
|
109
|
+
@staticmethod
|
|
110
|
+
def _writingmate_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
111
|
+
"""Extracts content from the WritingMate stream format '0:"..."'."""
|
|
112
|
+
if isinstance(chunk, str):
|
|
113
|
+
# Regex to find the pattern 0:"<content>"
|
|
114
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
115
|
+
if match:
|
|
116
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
117
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
118
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
119
|
+
return None
|
|
83
120
|
|
|
84
121
|
def ask(
|
|
85
122
|
self,
|
|
86
123
|
prompt: str,
|
|
87
|
-
stream: bool = True,
|
|
124
|
+
stream: bool = True, # Defaulting stream to True as per original
|
|
88
125
|
raw: bool = False,
|
|
89
126
|
optimizer: str = None,
|
|
90
127
|
conversationally: bool = False
|
|
91
128
|
) -> Union[Dict[str,Any], Generator[Any,None,None]]:
|
|
129
|
+
# ... existing prompt generation and optimizer logic ...
|
|
92
130
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
93
131
|
if optimizer:
|
|
94
132
|
if optimizer in self.__available_optimizers:
|
|
@@ -96,8 +134,10 @@ class WritingMate(Provider):
|
|
|
96
134
|
conversation_prompt if conversationally else prompt
|
|
97
135
|
)
|
|
98
136
|
else:
|
|
137
|
+
# Use the correct exception type
|
|
99
138
|
raise exceptions.FailedToGenerateResponseError(f"Unknown optimizer: {optimizer}")
|
|
100
139
|
|
|
140
|
+
# Body seems to be JSON, let curl_cffi handle serialization
|
|
101
141
|
body = {
|
|
102
142
|
"chatSettings": {
|
|
103
143
|
"model": self.model,
|
|
@@ -116,82 +156,114 @@ class WritingMate(Provider):
|
|
|
116
156
|
}
|
|
117
157
|
|
|
118
158
|
def for_stream():
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
159
|
+
try:
|
|
160
|
+
# Use curl_cffi session post, pass cookies dict
|
|
161
|
+
response = self.session.post(
|
|
162
|
+
self.api_endpoint,
|
|
163
|
+
headers=self.headers,
|
|
164
|
+
cookies=self.cookies, # Pass cookies dict
|
|
165
|
+
json=body, # Pass body as json
|
|
166
|
+
stream=True,
|
|
167
|
+
timeout=self.timeout,
|
|
168
|
+
impersonate="chrome120" # Add impersonate
|
|
169
|
+
# http_version=CurlHttpVersion.V1_1 # Add if HTTP/2 errors occur
|
|
123
170
|
)
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
171
|
+
if not response.ok:
|
|
172
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
173
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
174
|
+
)
|
|
175
|
+
streaming_text = ""
|
|
176
|
+
# Use sanitize_stream with the custom extractor
|
|
177
|
+
processed_stream = sanitize_stream(
|
|
178
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
179
|
+
intro_value=None, # No simple prefix
|
|
180
|
+
to_json=False, # Content is not JSON
|
|
181
|
+
content_extractor=self._writingmate_extractor # Use the specific extractor
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
for content_chunk in processed_stream:
|
|
185
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
186
|
+
streaming_text += content_chunk
|
|
187
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
188
|
+
|
|
189
|
+
self.last_response.update(dict(text=streaming_text))
|
|
190
|
+
self.conversation.update_chat_history(
|
|
191
|
+
prompt, self.get_message(self.last_response)
|
|
192
|
+
)
|
|
193
|
+
except CurlError as e: # Catch CurlError
|
|
194
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
195
|
+
except Exception as e: # Catch other potential exceptions
|
|
196
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
136
197
|
|
|
137
198
|
def for_non_stream():
|
|
199
|
+
# This function implicitly uses the updated for_stream
|
|
138
200
|
for _ in for_stream():
|
|
139
201
|
pass
|
|
140
202
|
return self.last_response
|
|
141
203
|
|
|
142
|
-
|
|
204
|
+
# Ensure stream defaults to True if not provided, matching original behavior
|
|
205
|
+
effective_stream = stream if stream is not None else True
|
|
206
|
+
return for_stream() if effective_stream else for_non_stream()
|
|
143
207
|
|
|
144
208
|
def chat(
|
|
145
209
|
self,
|
|
146
210
|
prompt: str,
|
|
147
|
-
stream: bool = False,
|
|
211
|
+
stream: bool = False, # Default stream to False as per original chat method
|
|
148
212
|
optimizer: str = None,
|
|
149
213
|
conversationally: bool = False
|
|
150
214
|
) -> Union[str, Generator[str,None,None]]:
|
|
151
215
|
if stream:
|
|
152
|
-
# yield
|
|
153
|
-
def
|
|
154
|
-
|
|
155
|
-
|
|
216
|
+
# yield decoded text chunks
|
|
217
|
+
def text_stream():
|
|
218
|
+
# Call ask with stream=True, raw=False to get dicts
|
|
219
|
+
for response_dict in self.ask(
|
|
220
|
+
prompt, stream=True, raw=False,
|
|
156
221
|
optimizer=optimizer, conversationally=conversationally
|
|
157
222
|
):
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
return
|
|
162
|
-
|
|
223
|
+
# Extract text from dict
|
|
224
|
+
yield self.get_message(response_dict)
|
|
225
|
+
return text_stream()
|
|
226
|
+
else: # non‐stream: return aggregated text
|
|
227
|
+
# Call ask with stream=False, raw=False
|
|
228
|
+
response_data = self.ask(
|
|
163
229
|
prompt,
|
|
164
|
-
False,
|
|
230
|
+
stream=False,
|
|
165
231
|
raw=False,
|
|
166
232
|
optimizer=optimizer,
|
|
167
233
|
conversationally=conversationally,
|
|
168
234
|
)
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
235
|
+
# Ensure response_data is a dict before passing to get_message
|
|
236
|
+
if isinstance(response_data, dict):
|
|
237
|
+
return self.get_message(response_data)
|
|
238
|
+
else:
|
|
239
|
+
# Handle unexpected generator case if ask(stream=False) behaves differently
|
|
240
|
+
# This part might need adjustment based on actual behavior
|
|
241
|
+
full_text = "".join(self.get_message(chunk) for chunk in response_data if isinstance(chunk, dict))
|
|
242
|
+
return full_text
|
|
177
243
|
|
|
178
|
-
Returns:
|
|
179
|
-
str: The message content.
|
|
180
244
|
|
|
181
|
-
|
|
182
|
-
>>> ai = X0GPT()
|
|
183
|
-
>>> response = ai.ask("Tell me a joke!")
|
|
184
|
-
>>> message = ai.get_message(response)
|
|
185
|
-
>>> print(message)
|
|
186
|
-
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
187
|
-
"""
|
|
245
|
+
def get_message(self, response: dict) -> str:
|
|
188
246
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
189
|
-
|
|
247
|
+
# Ensure text exists before processing
|
|
248
|
+
# Formatting is now mostly handled by the extractor
|
|
249
|
+
text = response.get("text", "")
|
|
250
|
+
formatted_text = text # Keep newline replacement if needed: .replace('\\n', '\n')
|
|
190
251
|
return formatted_text
|
|
191
252
|
|
|
192
253
|
if __name__ == "__main__":
|
|
193
254
|
from rich import print
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
255
|
+
try:
|
|
256
|
+
ai = WritingMate(cookies_path="cookies.json", proxies={}, timeout=120) # Example with proxies and timeout
|
|
257
|
+
# Get input within the try block
|
|
258
|
+
user_input = input(">>> ")
|
|
259
|
+
response = ai.chat(user_input, stream=True)
|
|
260
|
+
print("[bold green]Assistant:[/bold green]")
|
|
261
|
+
for chunk in response:
|
|
262
|
+
print(chunk, end="", flush=True)
|
|
263
|
+
print() # Add a newline at the end
|
|
264
|
+
except RuntimeError as e:
|
|
265
|
+
print(f"[bold red]Error initializing WritingMate:[/bold red] {e}")
|
|
266
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
267
|
+
print(f"[bold red]Error during chat:[/bold red] {e}")
|
|
268
|
+
except Exception as e:
|
|
269
|
+
print(f"[bold red]An unexpected error occurred:[/bold red] {e}")
|
webscout/Provider/__init__.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
# webscout/providers/__init__.py
|
|
2
2
|
from .PI import *
|
|
3
|
-
from .Llama import LLAMA
|
|
4
3
|
from .Cohere import Cohere
|
|
5
4
|
from .Reka import REKA
|
|
6
5
|
from .Groq import GROQ
|
|
@@ -10,8 +9,6 @@ from .Openai import AsyncOPENAI
|
|
|
10
9
|
from .Koboldai import KOBOLDAI
|
|
11
10
|
from .Koboldai import AsyncKOBOLDAI
|
|
12
11
|
from .Blackboxai import BLACKBOXAI
|
|
13
|
-
from .Phind import PhindSearch
|
|
14
|
-
from .Phind import Phindv2
|
|
15
12
|
from .ai4chat import *
|
|
16
13
|
from .Gemini import GEMINI
|
|
17
14
|
from .Deepinfra import DeepInfra
|
|
@@ -34,15 +31,12 @@ from .AI21 import *
|
|
|
34
31
|
from .Chatify import *
|
|
35
32
|
from .x0gpt import *
|
|
36
33
|
from .cerebras import *
|
|
37
|
-
from .lepton import *
|
|
38
34
|
from .geminiapi import *
|
|
39
35
|
from .elmo import *
|
|
40
36
|
from .GPTWeb import *
|
|
41
37
|
from .Netwrck import Netwrck
|
|
42
|
-
from .llamatutor import *
|
|
43
38
|
from .promptrefine import *
|
|
44
39
|
from .tutorai import *
|
|
45
|
-
from .ChatGPTES import *
|
|
46
40
|
from .bagoodex import *
|
|
47
41
|
from .aimathgpt import *
|
|
48
42
|
from .gaurish import *
|
|
@@ -64,7 +58,6 @@ from .ChatGPTGratis import *
|
|
|
64
58
|
from .QwenLM import *
|
|
65
59
|
from .granite import *
|
|
66
60
|
from .WiseCat import *
|
|
67
|
-
from .DeepSeek import *
|
|
68
61
|
from .freeaichat import FreeAIChat
|
|
69
62
|
from .akashgpt import *
|
|
70
63
|
from .Perplexitylabs import *
|
|
@@ -76,11 +69,8 @@ from .ElectronHub import *
|
|
|
76
69
|
from .HuggingFaceChat import *
|
|
77
70
|
from .GithubChat import *
|
|
78
71
|
from .copilot import *
|
|
79
|
-
from .C4ai import *
|
|
80
72
|
from .sonus import *
|
|
81
73
|
from .uncovr import *
|
|
82
|
-
from .labyrinth import *
|
|
83
|
-
from .WebSim import *
|
|
84
74
|
from .LambdaChat import *
|
|
85
75
|
from .ChatGPTClone import *
|
|
86
76
|
from .VercelAI import *
|
|
@@ -96,14 +86,12 @@ from .Writecream import Writecream
|
|
|
96
86
|
from .toolbaz import Toolbaz
|
|
97
87
|
from .scnet import SCNet
|
|
98
88
|
from .WritingMate import WritingMate
|
|
89
|
+
from .MCPCore import MCPCore
|
|
99
90
|
__all__ = [
|
|
100
|
-
'LLAMA',
|
|
101
91
|
'SCNet',
|
|
102
92
|
'SciraAI',
|
|
103
93
|
'StandardInputAI',
|
|
104
|
-
'LabyrinthAI',
|
|
105
94
|
'OpenGPT',
|
|
106
|
-
'C4ai',
|
|
107
95
|
'Venice',
|
|
108
96
|
'ExaAI',
|
|
109
97
|
'Copilot',
|
|
@@ -113,7 +101,6 @@ __all__ = [
|
|
|
113
101
|
'AllenAI',
|
|
114
102
|
'PerplexityLabs',
|
|
115
103
|
'AkashGPT',
|
|
116
|
-
'DeepSeek',
|
|
117
104
|
'WritingMate',
|
|
118
105
|
'WiseCat',
|
|
119
106
|
'IBMGranite',
|
|
@@ -131,11 +118,9 @@ __all__ = [
|
|
|
131
118
|
'KOBOLDAI',
|
|
132
119
|
'AsyncKOBOLDAI',
|
|
133
120
|
'BLACKBOXAI',
|
|
134
|
-
'PhindSearch',
|
|
135
121
|
'GEMINI',
|
|
136
122
|
'DeepInfra',
|
|
137
123
|
'AI4Chat',
|
|
138
|
-
'Phindv2',
|
|
139
124
|
'OLLAMA',
|
|
140
125
|
'AndiSearch',
|
|
141
126
|
'PIZZAGPT',
|
|
@@ -154,7 +139,6 @@ __all__ = [
|
|
|
154
139
|
'Chatify',
|
|
155
140
|
'X0GPT',
|
|
156
141
|
'Cerebras',
|
|
157
|
-
'Lepton',
|
|
158
142
|
'GEMINIAPI',
|
|
159
143
|
'SonusAI',
|
|
160
144
|
'Cleeai',
|
|
@@ -164,10 +148,8 @@ __all__ = [
|
|
|
164
148
|
'Free2GPT',
|
|
165
149
|
'GPTWeb',
|
|
166
150
|
'Netwrck',
|
|
167
|
-
'LlamaTutor',
|
|
168
151
|
'PromptRefine',
|
|
169
152
|
'TutorAI',
|
|
170
|
-
'ChatGPTES',
|
|
171
153
|
'Bagoodex',
|
|
172
154
|
'AIMathGPT',
|
|
173
155
|
'GaurishCerebras',
|
|
@@ -187,12 +169,12 @@ __all__ = [
|
|
|
187
169
|
'ElectronHub',
|
|
188
170
|
'GithubChat',
|
|
189
171
|
'UncovrAI',
|
|
190
|
-
'WebSim',
|
|
191
172
|
'VercelAI',
|
|
192
173
|
'ExaChat',
|
|
193
174
|
'AskSteve',
|
|
194
175
|
'Aitopia',
|
|
195
176
|
'SearchChatAI',
|
|
196
177
|
'Writecream',
|
|
197
|
-
'Toolbaz'
|
|
178
|
+
'Toolbaz',
|
|
179
|
+
'MCPCore'
|
|
198
180
|
]
|
webscout/Provider/ai4chat.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session, RequestsError
|
|
2
2
|
import urllib.parse
|
|
3
3
|
from typing import Union, Any, Dict
|
|
4
4
|
|
|
@@ -44,7 +44,7 @@ class AI4Chat(Provider):
|
|
|
44
44
|
country (str, optional): Country parameter for API. Defaults to "Asia".
|
|
45
45
|
user_id (str, optional): User ID for API. Defaults to "usersmjb2oaz7y".
|
|
46
46
|
"""
|
|
47
|
-
self.session =
|
|
47
|
+
self.session = Session(timeout=timeout, proxies=proxies)
|
|
48
48
|
self.is_conversation = is_conversation
|
|
49
49
|
self.max_tokens_to_sample = max_tokens
|
|
50
50
|
self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
|
|
@@ -84,7 +84,6 @@ class AI4Chat(Provider):
|
|
|
84
84
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
85
|
)
|
|
86
86
|
self.conversation.history_offset = history_offset
|
|
87
|
-
self.session.proxies = proxies
|
|
88
87
|
self.system_prompt = system_prompt
|
|
89
88
|
|
|
90
89
|
def ask(
|
|
@@ -123,24 +122,24 @@ class AI4Chat(Provider):
|
|
|
123
122
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
124
123
|
)
|
|
125
124
|
|
|
126
|
-
# Use provided values or defaults
|
|
127
125
|
country_param = country or self.country
|
|
128
126
|
user_id_param = user_id or self.user_id
|
|
129
127
|
|
|
130
|
-
# Build the URL with parameters
|
|
131
128
|
encoded_text = urllib.parse.quote(conversation_prompt)
|
|
132
129
|
encoded_country = urllib.parse.quote(country_param)
|
|
133
130
|
encoded_user_id = urllib.parse.quote(user_id_param)
|
|
134
131
|
|
|
135
132
|
url = f"{self.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
136
133
|
|
|
137
|
-
|
|
134
|
+
try:
|
|
135
|
+
response = self.session.get(url, headers=self.headers, timeout=self.timeout)
|
|
136
|
+
except RequestsError as e:
|
|
137
|
+
raise Exception(f"Failed to generate response: {e}")
|
|
138
138
|
if not response.ok:
|
|
139
139
|
raise Exception(f"Failed to generate response: {response.status_code} - {response.reason}")
|
|
140
140
|
|
|
141
141
|
response_text = response.text
|
|
142
142
|
|
|
143
|
-
# Remove quotes from the start and end of the response
|
|
144
143
|
if response_text.startswith('"'):
|
|
145
144
|
response_text = response_text[1:]
|
|
146
145
|
if response_text.endswith('"'):
|
webscout/Provider/copilot.py
CHANGED
|
@@ -2,7 +2,6 @@ import os
|
|
|
2
2
|
import json
|
|
3
3
|
import base64
|
|
4
4
|
import asyncio
|
|
5
|
-
import requests
|
|
6
5
|
from urllib.parse import quote
|
|
7
6
|
from typing import Optional, Dict, Any, List, Union, Generator
|
|
8
7
|
|
|
@@ -287,8 +286,6 @@ class Copilot(Provider):
|
|
|
287
286
|
finally:
|
|
288
287
|
wss.close()
|
|
289
288
|
|
|
290
|
-
except requests.RequestException as e:
|
|
291
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
292
289
|
except Exception as e:
|
|
293
290
|
raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
|
|
294
291
|
|