webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/turboseek.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
4
|
|
|
4
5
|
from webscout.AIutel import Optimizers
|
|
5
6
|
from webscout.AIutel import Conversation
|
|
6
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
-
from webscout.AIbase import Provider
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
8
9
|
from webscout import exceptions
|
|
9
|
-
from typing import Union, Any, AsyncGenerator, Dict
|
|
10
|
+
from typing import Optional, Union, Any, AsyncGenerator, Dict
|
|
10
11
|
from webscout.litagent import LitAgent
|
|
11
12
|
|
|
12
13
|
class TurboSeek(Provider):
|
|
@@ -26,7 +27,7 @@ class TurboSeek(Provider):
|
|
|
26
27
|
proxies: dict = {},
|
|
27
28
|
history_offset: int = 10250,
|
|
28
29
|
act: str = None,
|
|
29
|
-
model: str = "Llama 3.1 70B"
|
|
30
|
+
model: str = "Llama 3.1 70B" # Note: model parameter is not used by the API endpoint
|
|
30
31
|
):
|
|
31
32
|
"""Instantiates TurboSeek
|
|
32
33
|
|
|
@@ -41,7 +42,8 @@ class TurboSeek(Provider):
|
|
|
41
42
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
43
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
44
|
"""
|
|
44
|
-
|
|
45
|
+
# Initialize curl_cffi Session
|
|
46
|
+
self.session = Session()
|
|
45
47
|
self.is_conversation = is_conversation
|
|
46
48
|
self.max_tokens_to_sample = max_tokens
|
|
47
49
|
self.chat_endpoint = "https://www.turboseek.io/api/getAnswer"
|
|
@@ -49,14 +51,9 @@ class TurboSeek(Provider):
|
|
|
49
51
|
self.timeout = timeout
|
|
50
52
|
self.last_response = {}
|
|
51
53
|
self.headers = {
|
|
52
|
-
"authority": "www.turboseek.io",
|
|
53
|
-
"method": "POST",
|
|
54
|
-
"path": "/api/getAnswer",
|
|
55
|
-
"scheme": "https",
|
|
56
54
|
"accept": "*/*",
|
|
57
55
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
58
56
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
59
|
-
"content-length": "63",
|
|
60
57
|
"content-type": "application/json",
|
|
61
58
|
"dnt": "1",
|
|
62
59
|
"origin": "https://www.turboseek.io",
|
|
@@ -76,7 +73,9 @@ class TurboSeek(Provider):
|
|
|
76
73
|
for method in dir(Optimizers)
|
|
77
74
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
78
75
|
)
|
|
76
|
+
# Update curl_cffi session headers and proxies
|
|
79
77
|
self.session.headers.update(self.headers)
|
|
78
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
80
79
|
Conversation.intro = (
|
|
81
80
|
AwesomePrompts().get_act(
|
|
82
81
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -88,7 +87,13 @@ class TurboSeek(Provider):
|
|
|
88
87
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
89
88
|
)
|
|
90
89
|
self.conversation.history_offset = history_offset
|
|
91
|
-
|
|
90
|
+
|
|
91
|
+
@staticmethod
|
|
92
|
+
def _turboseek_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
93
|
+
"""Extracts content from TurboSeek stream JSON objects."""
|
|
94
|
+
if isinstance(chunk, dict) and "text" in chunk:
|
|
95
|
+
return chunk.get("text") # json.loads already handles unicode escapes
|
|
96
|
+
return None
|
|
92
97
|
|
|
93
98
|
def ask(
|
|
94
99
|
self,
|
|
@@ -125,41 +130,69 @@ class TurboSeek(Provider):
|
|
|
125
130
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
126
131
|
)
|
|
127
132
|
|
|
128
|
-
self.session.headers.update(self.headers)
|
|
129
133
|
payload = {
|
|
130
134
|
"question": conversation_prompt,
|
|
131
135
|
"sources": []
|
|
132
136
|
}
|
|
133
137
|
|
|
134
138
|
def for_stream():
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
139
|
+
try: # Add try block for CurlError
|
|
140
|
+
# Use curl_cffi session post with impersonate
|
|
141
|
+
response = self.session.post(
|
|
142
|
+
self.chat_endpoint,
|
|
143
|
+
json=payload,
|
|
144
|
+
stream=True,
|
|
145
|
+
timeout=self.timeout,
|
|
146
|
+
impersonate="chrome120", # Try a different impersonation profile
|
|
141
147
|
)
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
148
|
+
if not response.ok:
|
|
149
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
150
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
streaming_text = ""
|
|
154
|
+
# Use sanitize_stream with the custom extractor
|
|
155
|
+
processed_stream = sanitize_stream(
|
|
156
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
157
|
+
intro_value="data:",
|
|
158
|
+
to_json=True, # Stream sends JSON
|
|
159
|
+
content_extractor=self._turboseek_extractor, # Use the specific extractor
|
|
160
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
for content_chunk in processed_stream:
|
|
164
|
+
# content_chunk is the string extracted by _turboseek_extractor
|
|
165
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
166
|
+
streaming_text += content_chunk
|
|
167
|
+
self.last_response.update(dict(text=streaming_text)) # Update last_response incrementally
|
|
168
|
+
yield dict(text=content_chunk) if not raw else content_chunk # Yield dict or raw string
|
|
169
|
+
|
|
170
|
+
# Update conversation history after stream finishes
|
|
171
|
+
if streaming_text: # Only update if content was received
|
|
172
|
+
self.conversation.update_chat_history(
|
|
173
|
+
prompt, streaming_text # Use the fully aggregated text
|
|
174
|
+
)
|
|
175
|
+
except CurlError as e: # Catch CurlError
|
|
176
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
177
|
+
except Exception as e: # Catch other potential exceptions
|
|
178
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
179
|
+
|
|
159
180
|
|
|
160
181
|
def for_non_stream():
|
|
161
|
-
|
|
162
|
-
|
|
182
|
+
# Aggregate the stream using the updated for_stream logic
|
|
183
|
+
full_text = ""
|
|
184
|
+
try:
|
|
185
|
+
# Ensure raw=False so for_stream yields dicts
|
|
186
|
+
for chunk_data in for_stream():
|
|
187
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
188
|
+
full_text += chunk_data["text"]
|
|
189
|
+
elif isinstance(chunk_data, str): # Handle case where raw=True was passed
|
|
190
|
+
full_text += chunk_data
|
|
191
|
+
except Exception as e:
|
|
192
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {e}") from e
|
|
193
|
+
# last_response and history are updated within for_stream
|
|
194
|
+
# Ensure last_response reflects the complete aggregated text
|
|
195
|
+
self.last_response = {"text": full_text}
|
|
163
196
|
return self.last_response
|
|
164
197
|
|
|
165
198
|
return for_stream() if stream else for_non_stream()
|
|
@@ -209,11 +242,25 @@ class TurboSeek(Provider):
|
|
|
209
242
|
str: Message extracted
|
|
210
243
|
"""
|
|
211
244
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
212
|
-
|
|
245
|
+
# Unicode escapes are handled by json.loads within sanitize_stream
|
|
246
|
+
return response.get("text", "")
|
|
247
|
+
|
|
213
248
|
if __name__ == '__main__':
|
|
249
|
+
# Ensure curl_cffi is installed
|
|
214
250
|
from rich import print
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
251
|
+
try: # Add try-except block for testing
|
|
252
|
+
ai = TurboSeek(timeout=60)
|
|
253
|
+
print("[bold blue]Testing Stream:[/bold blue]")
|
|
254
|
+
response_stream = ai.chat("yooooooooooo", stream=True)
|
|
255
|
+
for chunk in response_stream:
|
|
256
|
+
print(chunk, end="", flush=True)
|
|
257
|
+
# Optional: Test non-stream
|
|
258
|
+
# print("[bold blue]Testing Non-Stream:[/bold blue]")
|
|
259
|
+
# response_non_stream = ai.chat("What is the capital of France?", stream=False)
|
|
260
|
+
# print(response_non_stream)
|
|
261
|
+
# print("[bold green]Non-Stream Test Complete.[/bold green]")
|
|
219
262
|
|
|
263
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
264
|
+
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
265
|
+
except Exception as e:
|
|
266
|
+
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
webscout/Provider/tutorai.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import os
|
|
3
4
|
from typing import Union, List, Optional
|
|
4
5
|
from string import punctuation
|
|
@@ -28,8 +29,6 @@ class TutorAI(Provider):
|
|
|
28
29
|
proxies: dict = {},
|
|
29
30
|
history_offset: int = 10250,
|
|
30
31
|
act: str = None,
|
|
31
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
32
|
-
model: str = "gpt-4o"
|
|
33
32
|
):
|
|
34
33
|
"""
|
|
35
34
|
Initializes the TutorAI.me API with given parameters.
|
|
@@ -47,26 +46,19 @@ class TutorAI(Provider):
|
|
|
47
46
|
system_prompt (str, optional): System prompt for TutorAI.
|
|
48
47
|
Defaults to "You are a helpful AI assistant.".
|
|
49
48
|
"""
|
|
50
|
-
|
|
49
|
+
# Initialize curl_cffi Session
|
|
50
|
+
self.session = Session()
|
|
51
51
|
self.is_conversation = is_conversation
|
|
52
52
|
self.max_tokens_to_sample = max_tokens
|
|
53
53
|
self.api_endpoint = "https://ai-tutor.ai/api/generate-homeworkify-response"
|
|
54
54
|
self.stream_chunk_size = 1024
|
|
55
55
|
self.timeout = timeout
|
|
56
56
|
self.last_response = {}
|
|
57
|
-
|
|
57
|
+
# Remove Cookie header, curl_cffi doesn't use it directly like this
|
|
58
58
|
self.headers = {
|
|
59
59
|
"Accept": "*/*",
|
|
60
60
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
61
61
|
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
62
|
-
"Cookie": (
|
|
63
|
-
"ARRAffinity=5ef5a1afbc0178c19fc7bc85047a2309cb69de3271923483302c69744e2b1d24; "
|
|
64
|
-
"ARRAffinitySameSite=5ef5a1afbc0178c19fc7bc85047a2309cb69de3271923483302c69744e2b1d24; "
|
|
65
|
-
"_ga=GA1.1.412867530.1726937399; "
|
|
66
|
-
"_clck=1kwy10j%7C2%7Cfpd%7C0%7C1725; "
|
|
67
|
-
"_clsk=1cqd2q1%7C1726937402133%7C1%7C1%7Cm.clarity.ms%2Fcollect; "
|
|
68
|
-
"_ga_0WF5W33HD7=GS1.1.1726937399.1.1.1726937459.0.0.0"
|
|
69
|
-
),
|
|
70
62
|
"DNT": "1",
|
|
71
63
|
"Origin": "https://tutorai.me",
|
|
72
64
|
"Priority": "u=1, i",
|
|
@@ -85,7 +77,9 @@ class TutorAI(Provider):
|
|
|
85
77
|
for method in dir(Optimizers)
|
|
86
78
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
87
79
|
)
|
|
80
|
+
# Update curl_cffi session headers and proxies
|
|
88
81
|
self.session.headers.update(self.headers)
|
|
82
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
89
83
|
Conversation.intro = (
|
|
90
84
|
AwesomePrompts().get_act(
|
|
91
85
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -97,12 +91,11 @@ class TutorAI(Provider):
|
|
|
97
91
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
98
92
|
)
|
|
99
93
|
self.conversation.history_offset = history_offset
|
|
100
|
-
self.session.proxies = proxies
|
|
101
94
|
|
|
102
95
|
def ask(
|
|
103
96
|
self,
|
|
104
97
|
prompt: str,
|
|
105
|
-
stream: bool = False,
|
|
98
|
+
stream: bool = False, # Note: API doesn't seem to truly stream text chunks
|
|
106
99
|
raw: bool = False,
|
|
107
100
|
optimizer: str = None,
|
|
108
101
|
conversationally: bool = False,
|
|
@@ -140,68 +133,67 @@ class TutorAI(Provider):
|
|
|
140
133
|
"attachmentsCount": "1" if attachment_path else "0"
|
|
141
134
|
}
|
|
142
135
|
files = {}
|
|
136
|
+
file_handle = None # To ensure file is closed
|
|
143
137
|
if attachment_path:
|
|
144
138
|
if not os.path.isfile(attachment_path):
|
|
145
139
|
raise FileNotFoundError(f"Error: The file '{attachment_path}' does not exist.")
|
|
146
140
|
try:
|
|
147
|
-
|
|
141
|
+
# Open file handle to pass to curl_cffi
|
|
142
|
+
file_handle = open(attachment_path, 'rb')
|
|
143
|
+
files["attachment0"] = (os.path.basename(attachment_path), file_handle, 'image/png') # Adjust mime type if needed
|
|
148
144
|
except Exception as e:
|
|
145
|
+
if file_handle: file_handle.close() # Close if opened
|
|
149
146
|
raise exceptions.FailedToGenerateResponseError(f"Error opening the file: {e}")
|
|
150
147
|
|
|
151
|
-
|
|
148
|
+
# The API doesn't seem to support streaming text chunks based on the original code.
|
|
149
|
+
# Both stream=True and stream=False resulted in processing the full response.
|
|
150
|
+
# We will implement the non-stream logic for both cases.
|
|
151
|
+
try:
|
|
152
|
+
# Use curl_cffi session post with impersonate
|
|
153
|
+
# Pass data and files for multipart/form-data
|
|
154
|
+
response = self.session.post(
|
|
155
|
+
self.api_endpoint,
|
|
156
|
+
# headers are set on the session
|
|
157
|
+
data=form_data,
|
|
158
|
+
files=files,
|
|
159
|
+
timeout=self.timeout,
|
|
160
|
+
impersonate="chrome120", # Try a different impersonation profile
|
|
161
|
+
)
|
|
162
|
+
response.raise_for_status() # Check for HTTP errors
|
|
163
|
+
|
|
152
164
|
try:
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
json_str = ''
|
|
157
|
-
for chunk in response.iter_content(chunk_size=self.stream_chunk_size, decode_unicode=True):
|
|
158
|
-
if chunk:
|
|
159
|
-
response_chunks.append(chunk)
|
|
160
|
-
yield chunk if raw else dict(text=chunk)
|
|
161
|
-
json_str = ''.join(response_chunks)
|
|
162
|
-
try:
|
|
163
|
-
response_data = json.loads(json_str)
|
|
164
|
-
except json.JSONDecodeError as json_err:
|
|
165
|
-
raise exceptions.FailedToGenerateResponseError(f"\nError decoding JSON: {json_err}")
|
|
166
|
-
homeworkify_html = response_data.get("homeworkifyResponse", "")
|
|
167
|
-
if not homeworkify_html:
|
|
168
|
-
raise exceptions.FailedToGenerateResponseError("\nNo 'homeworkifyResponse' found in the response.")
|
|
169
|
-
clean_text = homeworkify_html # Removed html_to_terminal call
|
|
170
|
-
self.last_response.update(dict(text=clean_text))
|
|
171
|
-
self.conversation.update_chat_history(
|
|
172
|
-
prompt, self.get_message(self.last_response)
|
|
173
|
-
)
|
|
174
|
-
except requests.exceptions.RequestException as e:
|
|
175
|
-
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
|
|
176
|
-
|
|
177
|
-
def for_non_stream():
|
|
178
|
-
response = self.session.post(self.api_endpoint, headers=self.headers, data=form_data, files=files, timeout=self.timeout)
|
|
179
|
-
if not response.ok:
|
|
180
|
-
raise Exception(
|
|
181
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
182
|
-
)
|
|
165
|
+
response_data = response.json()
|
|
166
|
+
except json.JSONDecodeError as json_err:
|
|
167
|
+
raise exceptions.FailedToGenerateResponseError(f"Error decoding JSON: {json_err} - Response text: {response.text}")
|
|
183
168
|
|
|
184
|
-
# Parse the entire JSON response
|
|
185
|
-
response_data = response.json()
|
|
186
169
|
homeworkify_html = response_data.get("homeworkifyResponse", "")
|
|
187
170
|
if not homeworkify_html:
|
|
188
|
-
|
|
189
|
-
|
|
171
|
+
# Return empty if no content, consistent with original non-stream logic
|
|
172
|
+
clean_text = ""
|
|
173
|
+
else:
|
|
174
|
+
# Assuming the response is HTML that needs cleaning/parsing
|
|
175
|
+
# For now, just return the raw HTML content as text
|
|
176
|
+
clean_text = homeworkify_html
|
|
190
177
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
178
|
+
self.last_response = {"text": clean_text}
|
|
179
|
+
self.conversation.update_chat_history(prompt, clean_text)
|
|
180
|
+
return self.last_response # Return the full response content
|
|
181
|
+
|
|
182
|
+
except CurlError as e: # Catch CurlError
|
|
183
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
184
|
+
except Exception as e: # Catch other potential exceptions
|
|
185
|
+
# Include response text if available in HTTP errors
|
|
186
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
187
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}")
|
|
188
|
+
finally:
|
|
189
|
+
if file_handle: # Ensure file is closed
|
|
190
|
+
file_handle.close()
|
|
198
191
|
|
|
199
|
-
return for_stream() if stream else for_non_stream()
|
|
200
192
|
|
|
201
193
|
def chat(
|
|
202
194
|
self,
|
|
203
195
|
prompt: str,
|
|
204
|
-
stream: bool = False,
|
|
196
|
+
stream: bool = False, # Keep stream param for interface consistency, though API might not support it
|
|
205
197
|
optimizer: str = None,
|
|
206
198
|
conversationally: bool = False,
|
|
207
199
|
attachment_path: Optional[str] = None,
|
|
@@ -246,7 +238,33 @@ class TutorAI(Provider):
|
|
|
246
238
|
if __name__ == "__main__":
|
|
247
239
|
from rich import print
|
|
248
240
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
print(
|
|
241
|
+
try: # Add try-except block for testing
|
|
242
|
+
ai = TutorAI(timeout=120) # Increased timeout for potential uploads
|
|
243
|
+
# Test without attachment first
|
|
244
|
+
print("[bold blue]Testing Text Prompt:[/bold blue]")
|
|
245
|
+
response_gen = ai.chat("hello buddy", stream=True) # Test stream interface
|
|
246
|
+
full_response = ""
|
|
247
|
+
for chunk in response_gen:
|
|
248
|
+
print(chunk, end="", flush=True)
|
|
249
|
+
full_response += chunk
|
|
250
|
+
print("\n[bold green]Text Test Complete.[/bold green]\n")
|
|
251
|
+
|
|
252
|
+
# Optional: Test with attachment (replace with a valid image path)
|
|
253
|
+
# attachment_file = "path/to/your/image.png"
|
|
254
|
+
# if os.path.exists(attachment_file):
|
|
255
|
+
# print(f"[bold blue]Testing with Attachment ({attachment_file}):[/bold blue]")
|
|
256
|
+
# response_gen_attach = ai.chat("Describe this image", stream=True, attachment_path=attachment_file)
|
|
257
|
+
# full_response_attach = ""
|
|
258
|
+
# for chunk in response_gen_attach:
|
|
259
|
+
# print(chunk, end="", flush=True)
|
|
260
|
+
# full_response_attach += chunk
|
|
261
|
+
# print("\n[bold green]Attachment Test Complete.[/bold green]")
|
|
262
|
+
# else:
|
|
263
|
+
# print(f"[bold yellow]Skipping attachment test: File not found at {attachment_file}[/bold yellow]")
|
|
264
|
+
|
|
265
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
266
|
+
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
267
|
+
except FileNotFoundError as e:
|
|
268
|
+
print(f"\n[bold red]File Error:[/bold red] {e}")
|
|
269
|
+
except Exception as e:
|
|
270
|
+
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
webscout/Provider/typefully.py
CHANGED
|
@@ -1,14 +1,16 @@
|
|
|
1
|
-
from typing import Union, Any, Dict
|
|
2
|
-
import requests
|
|
1
|
+
from typing import Optional, Union, Any, Dict
|
|
3
2
|
import re
|
|
4
3
|
from uuid import uuid4
|
|
5
4
|
|
|
6
5
|
from webscout.AIutel import Optimizers
|
|
7
6
|
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
8
|
from webscout.AIbase import Provider
|
|
10
9
|
from webscout import exceptions
|
|
11
10
|
from webscout.litagent import LitAgent
|
|
11
|
+
# Replace requests with curl_cffi
|
|
12
|
+
from curl_cffi.requests import Session # Import Session
|
|
13
|
+
from curl_cffi import CurlError # Import CurlError
|
|
12
14
|
|
|
13
15
|
class TypefullyAI(Provider):
|
|
14
16
|
"""
|
|
@@ -63,7 +65,8 @@ class TypefullyAI(Provider):
|
|
|
63
65
|
>>> print(ai.system_prompt)
|
|
64
66
|
'You are a friendly assistant.'
|
|
65
67
|
"""
|
|
66
|
-
|
|
68
|
+
# Initialize curl_cffi Session
|
|
69
|
+
self.session = Session()
|
|
67
70
|
self.is_conversation = is_conversation
|
|
68
71
|
self.max_tokens_to_sample = max_tokens
|
|
69
72
|
self.api_endpoint = "https://typefully.com/tools/ai/api/completion"
|
|
@@ -96,7 +99,9 @@ class TypefullyAI(Provider):
|
|
|
96
99
|
for method in dir(Optimizers)
|
|
97
100
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
98
101
|
)
|
|
102
|
+
# Update curl_cffi session headers and proxies
|
|
99
103
|
self.session.headers.update(self.headers)
|
|
104
|
+
self.session.proxies = proxies # Use proxies directly, not session.proxies.update
|
|
100
105
|
Conversation.intro = (
|
|
101
106
|
AwesomePrompts().get_act(
|
|
102
107
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -108,7 +113,17 @@ class TypefullyAI(Provider):
|
|
|
108
113
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
109
114
|
)
|
|
110
115
|
self.conversation.history_offset = history_offset
|
|
111
|
-
|
|
116
|
+
|
|
117
|
+
@staticmethod
|
|
118
|
+
def _typefully_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
119
|
+
"""Extracts content from the Typefully stream format '0:"..."'."""
|
|
120
|
+
if isinstance(chunk, str):
|
|
121
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
122
|
+
if match:
|
|
123
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
124
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
125
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
126
|
+
return None
|
|
112
127
|
|
|
113
128
|
def ask(
|
|
114
129
|
self,
|
|
@@ -156,31 +171,50 @@ class TypefullyAI(Provider):
|
|
|
156
171
|
}
|
|
157
172
|
|
|
158
173
|
def for_stream():
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
174
|
+
try: # Add try block for CurlError
|
|
175
|
+
# Use curl_cffi session post with impersonate
|
|
176
|
+
response = self.session.post(
|
|
177
|
+
self.api_endpoint,
|
|
178
|
+
headers=self.headers,
|
|
179
|
+
json=payload,
|
|
180
|
+
stream=True,
|
|
181
|
+
timeout=self.timeout,
|
|
182
|
+
impersonate="chrome120" # Add impersonate
|
|
163
183
|
)
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
184
|
+
if not response.ok:
|
|
185
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
186
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
187
|
+
)
|
|
188
|
+
streaming_text = ""
|
|
189
|
+
# Use sanitize_stream with the custom extractor
|
|
190
|
+
processed_stream = sanitize_stream(
|
|
191
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
192
|
+
intro_value=None, # No simple prefix
|
|
193
|
+
to_json=False, # Content is not JSON
|
|
194
|
+
content_extractor=self._typefully_extractor, # Use the specific extractor
|
|
195
|
+
end_marker="e:", # Stop processing if "e:" line is encountered (adjust if needed)
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
for content_chunk in processed_stream:
|
|
199
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
200
|
+
streaming_text += content_chunk
|
|
201
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
202
|
+
# Update history and last response after stream finishes
|
|
203
|
+
self.last_response.update(dict(text=streaming_text))
|
|
204
|
+
self.conversation.update_chat_history(
|
|
205
|
+
prompt, self.get_message(self.last_response)
|
|
206
|
+
)
|
|
207
|
+
except CurlError as e: # Catch CurlError
|
|
208
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
209
|
+
except Exception as e: # Catch other potential exceptions
|
|
210
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
179
211
|
|
|
180
212
|
def for_non_stream():
|
|
213
|
+
# This function implicitly uses the updated for_stream
|
|
181
214
|
for _ in for_stream():
|
|
182
215
|
pass
|
|
183
|
-
|
|
216
|
+
# Ensure last_response is updated by for_stream before returning
|
|
217
|
+
return self.last_response
|
|
184
218
|
|
|
185
219
|
return for_stream() if stream else for_non_stream()
|
|
186
220
|
|
|
@@ -246,10 +280,18 @@ class TypefullyAI(Provider):
|
|
|
246
280
|
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
247
281
|
"""
|
|
248
282
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
249
|
-
|
|
250
|
-
|
|
283
|
+
# Handle potential unicode escapes in the final text
|
|
284
|
+
# Formatting is now handled by the extractor
|
|
285
|
+
text = response.get("text", "")
|
|
286
|
+
try:
|
|
287
|
+
formatted_text = text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
288
|
+
return formatted_text
|
|
289
|
+
except Exception: # Catch potential errors during newline replacement
|
|
290
|
+
return text # Return original text if formatting fails
|
|
291
|
+
|
|
251
292
|
|
|
252
293
|
if __name__ == "__main__":
|
|
294
|
+
# Ensure curl_cffi is installed
|
|
253
295
|
print("-" * 80)
|
|
254
296
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
255
297
|
print("-" * 80)
|
|
@@ -261,20 +303,28 @@ if __name__ == "__main__":
|
|
|
261
303
|
for model in TypefullyAI.AVAILABLE_MODELS:
|
|
262
304
|
try:
|
|
263
305
|
test_ai = TypefullyAI(model=model, timeout=60)
|
|
264
|
-
|
|
306
|
+
# Test stream first
|
|
307
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
265
308
|
response_text = ""
|
|
266
|
-
|
|
309
|
+
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
310
|
+
for chunk in response_stream:
|
|
267
311
|
response_text += chunk
|
|
268
|
-
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
269
312
|
|
|
270
313
|
if response_text and len(response_text.strip()) > 0:
|
|
271
314
|
status = "✓"
|
|
272
|
-
#
|
|
273
|
-
|
|
315
|
+
# Clean and truncate response
|
|
316
|
+
clean_text = response_text.strip() # Already formatted in get_message
|
|
317
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
274
318
|
else:
|
|
275
|
-
status = "✗"
|
|
276
|
-
display_text = "Empty or invalid response"
|
|
319
|
+
status = "✗ (Stream)"
|
|
320
|
+
display_text = "Empty or invalid stream response"
|
|
277
321
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
322
|
+
|
|
323
|
+
# Optional: Add non-stream test if needed
|
|
324
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
325
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
326
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
327
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
328
|
+
|
|
278
329
|
except Exception as e:
|
|
279
330
|
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
280
|
-
|