webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/tutorai.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import os
|
|
3
4
|
from typing import Union, List, Optional
|
|
4
5
|
from string import punctuation
|
|
@@ -28,8 +29,6 @@ class TutorAI(Provider):
|
|
|
28
29
|
proxies: dict = {},
|
|
29
30
|
history_offset: int = 10250,
|
|
30
31
|
act: str = None,
|
|
31
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
32
|
-
model: str = "gpt-4o"
|
|
33
32
|
):
|
|
34
33
|
"""
|
|
35
34
|
Initializes the TutorAI.me API with given parameters.
|
|
@@ -47,26 +46,19 @@ class TutorAI(Provider):
|
|
|
47
46
|
system_prompt (str, optional): System prompt for TutorAI.
|
|
48
47
|
Defaults to "You are a helpful AI assistant.".
|
|
49
48
|
"""
|
|
50
|
-
|
|
49
|
+
# Initialize curl_cffi Session
|
|
50
|
+
self.session = Session()
|
|
51
51
|
self.is_conversation = is_conversation
|
|
52
52
|
self.max_tokens_to_sample = max_tokens
|
|
53
53
|
self.api_endpoint = "https://ai-tutor.ai/api/generate-homeworkify-response"
|
|
54
54
|
self.stream_chunk_size = 1024
|
|
55
55
|
self.timeout = timeout
|
|
56
56
|
self.last_response = {}
|
|
57
|
-
|
|
57
|
+
# Remove Cookie header, curl_cffi doesn't use it directly like this
|
|
58
58
|
self.headers = {
|
|
59
59
|
"Accept": "*/*",
|
|
60
60
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
61
61
|
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
62
|
-
"Cookie": (
|
|
63
|
-
"ARRAffinity=5ef5a1afbc0178c19fc7bc85047a2309cb69de3271923483302c69744e2b1d24; "
|
|
64
|
-
"ARRAffinitySameSite=5ef5a1afbc0178c19fc7bc85047a2309cb69de3271923483302c69744e2b1d24; "
|
|
65
|
-
"_ga=GA1.1.412867530.1726937399; "
|
|
66
|
-
"_clck=1kwy10j%7C2%7Cfpd%7C0%7C1725; "
|
|
67
|
-
"_clsk=1cqd2q1%7C1726937402133%7C1%7C1%7Cm.clarity.ms%2Fcollect; "
|
|
68
|
-
"_ga_0WF5W33HD7=GS1.1.1726937399.1.1.1726937459.0.0.0"
|
|
69
|
-
),
|
|
70
62
|
"DNT": "1",
|
|
71
63
|
"Origin": "https://tutorai.me",
|
|
72
64
|
"Priority": "u=1, i",
|
|
@@ -85,7 +77,9 @@ class TutorAI(Provider):
|
|
|
85
77
|
for method in dir(Optimizers)
|
|
86
78
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
87
79
|
)
|
|
80
|
+
# Update curl_cffi session headers and proxies
|
|
88
81
|
self.session.headers.update(self.headers)
|
|
82
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
89
83
|
Conversation.intro = (
|
|
90
84
|
AwesomePrompts().get_act(
|
|
91
85
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -97,12 +91,11 @@ class TutorAI(Provider):
|
|
|
97
91
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
98
92
|
)
|
|
99
93
|
self.conversation.history_offset = history_offset
|
|
100
|
-
self.session.proxies = proxies
|
|
101
94
|
|
|
102
95
|
def ask(
|
|
103
96
|
self,
|
|
104
97
|
prompt: str,
|
|
105
|
-
stream: bool = False,
|
|
98
|
+
stream: bool = False, # Note: API doesn't seem to truly stream text chunks
|
|
106
99
|
raw: bool = False,
|
|
107
100
|
optimizer: str = None,
|
|
108
101
|
conversationally: bool = False,
|
|
@@ -140,68 +133,67 @@ class TutorAI(Provider):
|
|
|
140
133
|
"attachmentsCount": "1" if attachment_path else "0"
|
|
141
134
|
}
|
|
142
135
|
files = {}
|
|
136
|
+
file_handle = None # To ensure file is closed
|
|
143
137
|
if attachment_path:
|
|
144
138
|
if not os.path.isfile(attachment_path):
|
|
145
139
|
raise FileNotFoundError(f"Error: The file '{attachment_path}' does not exist.")
|
|
146
140
|
try:
|
|
147
|
-
|
|
141
|
+
# Open file handle to pass to curl_cffi
|
|
142
|
+
file_handle = open(attachment_path, 'rb')
|
|
143
|
+
files["attachment0"] = (os.path.basename(attachment_path), file_handle, 'image/png') # Adjust mime type if needed
|
|
148
144
|
except Exception as e:
|
|
145
|
+
if file_handle: file_handle.close() # Close if opened
|
|
149
146
|
raise exceptions.FailedToGenerateResponseError(f"Error opening the file: {e}")
|
|
150
147
|
|
|
151
|
-
|
|
148
|
+
# The API doesn't seem to support streaming text chunks based on the original code.
|
|
149
|
+
# Both stream=True and stream=False resulted in processing the full response.
|
|
150
|
+
# We will implement the non-stream logic for both cases.
|
|
151
|
+
try:
|
|
152
|
+
# Use curl_cffi session post with impersonate
|
|
153
|
+
# Pass data and files for multipart/form-data
|
|
154
|
+
response = self.session.post(
|
|
155
|
+
self.api_endpoint,
|
|
156
|
+
# headers are set on the session
|
|
157
|
+
data=form_data,
|
|
158
|
+
files=files,
|
|
159
|
+
timeout=self.timeout,
|
|
160
|
+
impersonate="chrome120", # Try a different impersonation profile
|
|
161
|
+
)
|
|
162
|
+
response.raise_for_status() # Check for HTTP errors
|
|
163
|
+
|
|
152
164
|
try:
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
json_str = ''
|
|
157
|
-
for chunk in response.iter_content(chunk_size=self.stream_chunk_size, decode_unicode=True):
|
|
158
|
-
if chunk:
|
|
159
|
-
response_chunks.append(chunk)
|
|
160
|
-
yield chunk if raw else dict(text=chunk)
|
|
161
|
-
json_str = ''.join(response_chunks)
|
|
162
|
-
try:
|
|
163
|
-
response_data = json.loads(json_str)
|
|
164
|
-
except json.JSONDecodeError as json_err:
|
|
165
|
-
raise exceptions.FailedToGenerateResponseError(f"\nError decoding JSON: {json_err}")
|
|
166
|
-
homeworkify_html = response_data.get("homeworkifyResponse", "")
|
|
167
|
-
if not homeworkify_html:
|
|
168
|
-
raise exceptions.FailedToGenerateResponseError("\nNo 'homeworkifyResponse' found in the response.")
|
|
169
|
-
clean_text = homeworkify_html # Removed html_to_terminal call
|
|
170
|
-
self.last_response.update(dict(text=clean_text))
|
|
171
|
-
self.conversation.update_chat_history(
|
|
172
|
-
prompt, self.get_message(self.last_response)
|
|
173
|
-
)
|
|
174
|
-
except requests.exceptions.RequestException as e:
|
|
175
|
-
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
|
|
176
|
-
|
|
177
|
-
def for_non_stream():
|
|
178
|
-
response = self.session.post(self.api_endpoint, headers=self.headers, data=form_data, files=files, timeout=self.timeout)
|
|
179
|
-
if not response.ok:
|
|
180
|
-
raise Exception(
|
|
181
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
182
|
-
)
|
|
165
|
+
response_data = response.json()
|
|
166
|
+
except json.JSONDecodeError as json_err:
|
|
167
|
+
raise exceptions.FailedToGenerateResponseError(f"Error decoding JSON: {json_err} - Response text: {response.text}")
|
|
183
168
|
|
|
184
|
-
# Parse the entire JSON response
|
|
185
|
-
response_data = response.json()
|
|
186
169
|
homeworkify_html = response_data.get("homeworkifyResponse", "")
|
|
187
170
|
if not homeworkify_html:
|
|
188
|
-
|
|
189
|
-
|
|
171
|
+
# Return empty if no content, consistent with original non-stream logic
|
|
172
|
+
clean_text = ""
|
|
173
|
+
else:
|
|
174
|
+
# Assuming the response is HTML that needs cleaning/parsing
|
|
175
|
+
# For now, just return the raw HTML content as text
|
|
176
|
+
clean_text = homeworkify_html
|
|
190
177
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
178
|
+
self.last_response = {"text": clean_text}
|
|
179
|
+
self.conversation.update_chat_history(prompt, clean_text)
|
|
180
|
+
return self.last_response # Return the full response content
|
|
181
|
+
|
|
182
|
+
except CurlError as e: # Catch CurlError
|
|
183
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
184
|
+
except Exception as e: # Catch other potential exceptions
|
|
185
|
+
# Include response text if available in HTTP errors
|
|
186
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
187
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}")
|
|
188
|
+
finally:
|
|
189
|
+
if file_handle: # Ensure file is closed
|
|
190
|
+
file_handle.close()
|
|
198
191
|
|
|
199
|
-
return for_stream() if stream else for_non_stream()
|
|
200
192
|
|
|
201
193
|
def chat(
|
|
202
194
|
self,
|
|
203
195
|
prompt: str,
|
|
204
|
-
stream: bool = False,
|
|
196
|
+
stream: bool = False, # Keep stream param for interface consistency, though API might not support it
|
|
205
197
|
optimizer: str = None,
|
|
206
198
|
conversationally: bool = False,
|
|
207
199
|
attachment_path: Optional[str] = None,
|
|
@@ -246,7 +238,33 @@ class TutorAI(Provider):
|
|
|
246
238
|
if __name__ == "__main__":
|
|
247
239
|
from rich import print
|
|
248
240
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
print(
|
|
241
|
+
try: # Add try-except block for testing
|
|
242
|
+
ai = TutorAI(timeout=120) # Increased timeout for potential uploads
|
|
243
|
+
# Test without attachment first
|
|
244
|
+
print("[bold blue]Testing Text Prompt:[/bold blue]")
|
|
245
|
+
response_gen = ai.chat("hello buddy", stream=True) # Test stream interface
|
|
246
|
+
full_response = ""
|
|
247
|
+
for chunk in response_gen:
|
|
248
|
+
print(chunk, end="", flush=True)
|
|
249
|
+
full_response += chunk
|
|
250
|
+
print("\n[bold green]Text Test Complete.[/bold green]\n")
|
|
251
|
+
|
|
252
|
+
# Optional: Test with attachment (replace with a valid image path)
|
|
253
|
+
# attachment_file = "path/to/your/image.png"
|
|
254
|
+
# if os.path.exists(attachment_file):
|
|
255
|
+
# print(f"[bold blue]Testing with Attachment ({attachment_file}):[/bold blue]")
|
|
256
|
+
# response_gen_attach = ai.chat("Describe this image", stream=True, attachment_path=attachment_file)
|
|
257
|
+
# full_response_attach = ""
|
|
258
|
+
# for chunk in response_gen_attach:
|
|
259
|
+
# print(chunk, end="", flush=True)
|
|
260
|
+
# full_response_attach += chunk
|
|
261
|
+
# print("\n[bold green]Attachment Test Complete.[/bold green]")
|
|
262
|
+
# else:
|
|
263
|
+
# print(f"[bold yellow]Skipping attachment test: File not found at {attachment_file}[/bold yellow]")
|
|
264
|
+
|
|
265
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
266
|
+
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
267
|
+
except FileNotFoundError as e:
|
|
268
|
+
print(f"\n[bold red]File Error:[/bold red] {e}")
|
|
269
|
+
except Exception as e:
|
|
270
|
+
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
webscout/Provider/typefully.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
from typing import Union, Any, Dict
|
|
2
|
-
import requests
|
|
3
2
|
import re
|
|
4
3
|
from uuid import uuid4
|
|
5
4
|
|
|
@@ -9,6 +8,9 @@ from webscout.AIutel import AwesomePrompts
|
|
|
9
8
|
from webscout.AIbase import Provider
|
|
10
9
|
from webscout import exceptions
|
|
11
10
|
from webscout.litagent import LitAgent
|
|
11
|
+
# Replace requests with curl_cffi
|
|
12
|
+
from curl_cffi.requests import Session # Import Session
|
|
13
|
+
from curl_cffi import CurlError # Import CurlError
|
|
12
14
|
|
|
13
15
|
class TypefullyAI(Provider):
|
|
14
16
|
"""
|
|
@@ -63,7 +65,8 @@ class TypefullyAI(Provider):
|
|
|
63
65
|
>>> print(ai.system_prompt)
|
|
64
66
|
'You are a friendly assistant.'
|
|
65
67
|
"""
|
|
66
|
-
|
|
68
|
+
# Initialize curl_cffi Session
|
|
69
|
+
self.session = Session()
|
|
67
70
|
self.is_conversation = is_conversation
|
|
68
71
|
self.max_tokens_to_sample = max_tokens
|
|
69
72
|
self.api_endpoint = "https://typefully.com/tools/ai/api/completion"
|
|
@@ -96,7 +99,9 @@ class TypefullyAI(Provider):
|
|
|
96
99
|
for method in dir(Optimizers)
|
|
97
100
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
98
101
|
)
|
|
102
|
+
# Update curl_cffi session headers and proxies
|
|
99
103
|
self.session.headers.update(self.headers)
|
|
104
|
+
self.session.proxies = proxies # Use proxies directly, not session.proxies.update
|
|
100
105
|
Conversation.intro = (
|
|
101
106
|
AwesomePrompts().get_act(
|
|
102
107
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -108,7 +113,6 @@ class TypefullyAI(Provider):
|
|
|
108
113
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
109
114
|
)
|
|
110
115
|
self.conversation.history_offset = history_offset
|
|
111
|
-
self.session.proxies = proxies
|
|
112
116
|
|
|
113
117
|
def ask(
|
|
114
118
|
self,
|
|
@@ -156,31 +160,51 @@ class TypefullyAI(Provider):
|
|
|
156
160
|
}
|
|
157
161
|
|
|
158
162
|
def for_stream():
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
+
try: # Add try block for CurlError
|
|
164
|
+
# Use curl_cffi session post with impersonate
|
|
165
|
+
response = self.session.post(
|
|
166
|
+
self.api_endpoint,
|
|
167
|
+
headers=self.headers,
|
|
168
|
+
json=payload,
|
|
169
|
+
stream=True,
|
|
170
|
+
timeout=self.timeout,
|
|
171
|
+
impersonate="chrome120" # Add impersonate
|
|
163
172
|
)
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
173
|
+
if not response.ok:
|
|
174
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
175
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
176
|
+
)
|
|
177
|
+
streaming_response = ""
|
|
178
|
+
# Iterate over bytes and decode manually
|
|
179
|
+
for line_bytes in response.iter_lines():
|
|
180
|
+
if line_bytes:
|
|
181
|
+
line = line_bytes.decode('utf-8') # Decode bytes
|
|
182
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
183
|
+
if match:
|
|
184
|
+
# Decode potential unicode escapes
|
|
185
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
186
|
+
streaming_response += content
|
|
187
|
+
# Yield dict or raw string
|
|
188
|
+
yield content if raw else dict(text=content)
|
|
189
|
+
elif line.startswith('e:') or line.startswith('d:'):
|
|
190
|
+
# End of response
|
|
191
|
+
break
|
|
192
|
+
# Update history and last response after stream finishes
|
|
193
|
+
self.last_response.update(dict(text=streaming_response))
|
|
194
|
+
self.conversation.update_chat_history(
|
|
195
|
+
prompt, self.get_message(self.last_response)
|
|
196
|
+
)
|
|
197
|
+
except CurlError as e: # Catch CurlError
|
|
198
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
199
|
+
except Exception as e: # Catch other potential exceptions
|
|
200
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
179
201
|
|
|
180
202
|
def for_non_stream():
|
|
203
|
+
# This function implicitly uses the updated for_stream
|
|
181
204
|
for _ in for_stream():
|
|
182
205
|
pass
|
|
183
|
-
|
|
206
|
+
# Ensure last_response is updated by for_stream before returning
|
|
207
|
+
return self.last_response
|
|
184
208
|
|
|
185
209
|
return for_stream() if stream else for_non_stream()
|
|
186
210
|
|
|
@@ -246,10 +270,19 @@ class TypefullyAI(Provider):
|
|
|
246
270
|
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
247
271
|
"""
|
|
248
272
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
249
|
-
|
|
250
|
-
|
|
273
|
+
# Handle potential unicode escapes in the final text
|
|
274
|
+
text = response.get("text", "")
|
|
275
|
+
try:
|
|
276
|
+
# Attempt to decode escapes, return original if fails
|
|
277
|
+
# Already decoded in ask method, just handle formatting
|
|
278
|
+
formatted_text = text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
279
|
+
return formatted_text
|
|
280
|
+
except Exception: # Catch potential errors during formatting
|
|
281
|
+
return text # Return original text if formatting fails
|
|
282
|
+
|
|
251
283
|
|
|
252
284
|
if __name__ == "__main__":
|
|
285
|
+
# Ensure curl_cffi is installed
|
|
253
286
|
print("-" * 80)
|
|
254
287
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
255
288
|
print("-" * 80)
|
|
@@ -261,20 +294,29 @@ if __name__ == "__main__":
|
|
|
261
294
|
for model in TypefullyAI.AVAILABLE_MODELS:
|
|
262
295
|
try:
|
|
263
296
|
test_ai = TypefullyAI(model=model, timeout=60)
|
|
264
|
-
|
|
297
|
+
# Test stream first
|
|
298
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
265
299
|
response_text = ""
|
|
266
|
-
|
|
300
|
+
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
301
|
+
for chunk in response_stream:
|
|
267
302
|
response_text += chunk
|
|
268
|
-
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
269
303
|
|
|
270
304
|
if response_text and len(response_text.strip()) > 0:
|
|
271
305
|
status = "✓"
|
|
272
|
-
#
|
|
273
|
-
|
|
306
|
+
# Clean and truncate response
|
|
307
|
+
clean_text = response_text.strip() # Already formatted in get_message
|
|
308
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
274
309
|
else:
|
|
275
|
-
status = "✗"
|
|
276
|
-
display_text = "Empty or invalid response"
|
|
310
|
+
status = "✗ (Stream)"
|
|
311
|
+
display_text = "Empty or invalid stream response"
|
|
277
312
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
313
|
+
|
|
314
|
+
# Optional: Add non-stream test if needed
|
|
315
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
316
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
317
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
318
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
319
|
+
|
|
278
320
|
except Exception as e:
|
|
279
321
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
280
|
-
|
|
322
|
+
|
webscout/Provider/typegpt.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
from typing import Union, Any, Dict, Generator
|
|
4
|
-
import requests.exceptions
|
|
5
5
|
|
|
6
6
|
from webscout.AIutel import Optimizers
|
|
7
7
|
from webscout.AIutel import Conversation
|
|
@@ -35,7 +35,7 @@ class TypeGPT(Provider):
|
|
|
35
35
|
proxies: dict = {},
|
|
36
36
|
history_offset: int = 10250,
|
|
37
37
|
act: str = None,
|
|
38
|
-
model: str = "gpt-4o",
|
|
38
|
+
model: str = "gpt-4o-mini-2024-07-18",
|
|
39
39
|
system_prompt: str = "You are a helpful assistant.",
|
|
40
40
|
temperature: float = 0.5,
|
|
41
41
|
presence_penalty: int = 0,
|
|
@@ -46,7 +46,8 @@ class TypeGPT(Provider):
|
|
|
46
46
|
if model not in self.AVAILABLE_MODELS:
|
|
47
47
|
raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
|
|
48
48
|
|
|
49
|
-
|
|
49
|
+
# Initialize curl_cffi Session
|
|
50
|
+
self.session = Session()
|
|
50
51
|
self.is_conversation = is_conversation
|
|
51
52
|
self.max_tokens_to_sample = max_tokens
|
|
52
53
|
self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
|
|
@@ -82,6 +83,8 @@ class TypeGPT(Provider):
|
|
|
82
83
|
)
|
|
83
84
|
self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
|
|
84
85
|
self.conversation.history_offset = history_offset
|
|
86
|
+
# Update curl_cffi session headers and proxies
|
|
87
|
+
self.session.headers.update(self.headers)
|
|
85
88
|
self.session.proxies = proxies
|
|
86
89
|
|
|
87
90
|
def ask(
|
|
@@ -120,12 +123,18 @@ class TypeGPT(Provider):
|
|
|
120
123
|
|
|
121
124
|
def for_stream():
|
|
122
125
|
try:
|
|
126
|
+
# Use curl_cffi session post with impersonate
|
|
123
127
|
response = self.session.post(
|
|
124
|
-
self.api_endpoint,
|
|
128
|
+
self.api_endpoint,
|
|
129
|
+
headers=self.headers,
|
|
130
|
+
json=payload,
|
|
131
|
+
stream=True,
|
|
132
|
+
timeout=self.timeout,
|
|
133
|
+
impersonate="chrome120"
|
|
125
134
|
)
|
|
126
|
-
except
|
|
135
|
+
except CurlError as ce:
|
|
127
136
|
raise exceptions.FailedToGenerateResponseError(
|
|
128
|
-
f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
|
|
137
|
+
f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
|
|
129
138
|
) from ce
|
|
130
139
|
|
|
131
140
|
if not response.ok:
|
|
@@ -133,9 +142,10 @@ class TypeGPT(Provider):
|
|
|
133
142
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
134
143
|
)
|
|
135
144
|
message_load = ""
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
145
|
+
# Iterate over bytes and decode manually
|
|
146
|
+
for line_bytes in response.iter_lines():
|
|
147
|
+
if line_bytes:
|
|
148
|
+
line = line_bytes.decode("utf-8")
|
|
139
149
|
if line.startswith("data: "):
|
|
140
150
|
line = line[6:] # Remove "data: " prefix
|
|
141
151
|
# Skip [DONE] message
|
|
@@ -151,26 +161,52 @@ class TypeGPT(Provider):
|
|
|
151
161
|
message_load += new_content
|
|
152
162
|
# Yield only the new content
|
|
153
163
|
yield dict(text=new_content) if not raw else new_content
|
|
154
|
-
|
|
164
|
+
# Update last_response incrementally for potential non-stream use later
|
|
165
|
+
self.last_response = dict(text=message_load)
|
|
155
166
|
except json.JSONDecodeError:
|
|
156
167
|
continue
|
|
157
|
-
|
|
168
|
+
# Update conversation history after stream finishes
|
|
169
|
+
if message_load: # Only update if something was received
|
|
170
|
+
self.conversation.update_chat_history(prompt, message_load)
|
|
171
|
+
|
|
158
172
|
|
|
159
173
|
def for_non_stream():
|
|
160
174
|
try:
|
|
161
|
-
|
|
162
|
-
|
|
175
|
+
# Use curl_cffi session post with impersonate
|
|
176
|
+
response = self.session.post(
|
|
177
|
+
self.api_endpoint,
|
|
178
|
+
headers=self.headers,
|
|
179
|
+
json=payload,
|
|
180
|
+
timeout=self.timeout,
|
|
181
|
+
impersonate="chrome120"
|
|
182
|
+
)
|
|
183
|
+
except CurlError as ce:
|
|
163
184
|
raise exceptions.FailedToGenerateResponseError(
|
|
164
|
-
f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
|
|
185
|
+
f"Network connection failed (CurlError). Check your firewall or antivirus settings. Original error: {ce}"
|
|
165
186
|
) from ce
|
|
166
187
|
|
|
167
188
|
if not response.ok:
|
|
168
189
|
raise exceptions.FailedToGenerateResponseError(
|
|
169
190
|
f"Request failed - {response.status_code}: {response.text}"
|
|
170
191
|
)
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
# curl_cffi response.json() handles decoding
|
|
195
|
+
response_data = response.json()
|
|
196
|
+
# Extract the message content for history and return value
|
|
197
|
+
if 'choices' in response_data and len(response_data['choices']) > 0:
|
|
198
|
+
message = response_data['choices'][0].get('message', {})
|
|
199
|
+
content = message.get('content', '')
|
|
200
|
+
self.last_response = {"text": content} # Store in expected format
|
|
201
|
+
self.conversation.update_chat_history(prompt, content)
|
|
202
|
+
return self.last_response
|
|
203
|
+
else:
|
|
204
|
+
# Handle cases where response structure is unexpected
|
|
205
|
+
self.last_response = {"text": ""}
|
|
206
|
+
return self.last_response
|
|
207
|
+
except json.JSONDecodeError as je:
|
|
208
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to decode JSON response: {je} - Response text: {response.text}")
|
|
209
|
+
|
|
174
210
|
|
|
175
211
|
return for_stream() if stream else for_non_stream()
|
|
176
212
|
|
|
@@ -183,23 +219,36 @@ class TypeGPT(Provider):
|
|
|
183
219
|
) -> Union[str, Generator[str, None, None]]:
|
|
184
220
|
"""Generate response string or stream."""
|
|
185
221
|
if stream:
|
|
222
|
+
# ask() yields dicts or strings when streaming
|
|
186
223
|
gen = self.ask(
|
|
187
|
-
prompt, stream=True,
|
|
224
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
225
|
+
optimizer=optimizer, conversationally=conversationally
|
|
188
226
|
)
|
|
189
|
-
for
|
|
190
|
-
|
|
227
|
+
for chunk_dict in gen:
|
|
228
|
+
# get_message expects a dict
|
|
229
|
+
yield self.get_message(chunk_dict)
|
|
191
230
|
else:
|
|
192
|
-
|
|
231
|
+
# ask() returns a dict when not streaming
|
|
232
|
+
response_dict = self.ask(
|
|
233
|
+
prompt, stream=False,
|
|
234
|
+
optimizer=optimizer, conversationally=conversationally
|
|
235
|
+
)
|
|
236
|
+
return self.get_message(response_dict)
|
|
193
237
|
|
|
194
238
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
195
239
|
"""Retrieves message from response."""
|
|
196
|
-
if isinstance(response,
|
|
197
|
-
return response
|
|
198
|
-
elif isinstance(response, dict):
|
|
240
|
+
if isinstance(response, dict):
|
|
199
241
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
200
|
-
|
|
242
|
+
# Handle potential unicode escapes in the final text
|
|
243
|
+
text = response.get("text", "")
|
|
244
|
+
try:
|
|
245
|
+
# Attempt to decode escapes, return original if fails
|
|
246
|
+
return text.encode('utf-8').decode('unicode_escape')
|
|
247
|
+
except UnicodeDecodeError:
|
|
248
|
+
return text
|
|
201
249
|
else:
|
|
202
|
-
|
|
250
|
+
# This case should ideally not be reached if ask() behaves as expected
|
|
251
|
+
raise TypeError(f"Invalid response type: {type(response)}. Expected dict.")
|
|
203
252
|
|
|
204
253
|
if __name__ == "__main__":
|
|
205
254
|
print("-" * 80)
|
|
@@ -213,20 +262,32 @@ if __name__ == "__main__":
|
|
|
213
262
|
for model in TypeGPT.AVAILABLE_MODELS:
|
|
214
263
|
try:
|
|
215
264
|
test_ai = TypeGPT(model=model, timeout=60)
|
|
216
|
-
|
|
265
|
+
# Test stream first
|
|
266
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
217
267
|
response_text = ""
|
|
218
|
-
|
|
268
|
+
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
269
|
+
for chunk in response_stream:
|
|
219
270
|
response_text += chunk
|
|
220
|
-
print
|
|
271
|
+
# Optional: print chunks as they arrive for visual feedback
|
|
272
|
+
# print(chunk, end="", flush=True)
|
|
221
273
|
|
|
222
274
|
if response_text and len(response_text.strip()) > 0:
|
|
223
275
|
status = "✓"
|
|
224
|
-
#
|
|
225
|
-
|
|
276
|
+
# Clean and truncate response
|
|
277
|
+
clean_text = response_text.strip() # Already decoded in get_message
|
|
278
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
226
279
|
else:
|
|
227
|
-
status = "✗"
|
|
228
|
-
display_text = "Empty or invalid response"
|
|
280
|
+
status = "✗ (Stream)"
|
|
281
|
+
display_text = "Empty or invalid stream response"
|
|
229
282
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
283
|
+
|
|
284
|
+
# Optional: Add non-stream test if needed, but stream test covers basic functionality
|
|
285
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
286
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
287
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
288
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
289
|
+
|
|
290
|
+
|
|
230
291
|
except Exception as e:
|
|
231
292
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
232
|
-
|
|
293
|
+
|