webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/TwoAI.py
CHANGED
|
@@ -1,21 +1,26 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
2
3
|
import json
|
|
3
|
-
import os
|
|
4
4
|
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
import re # Import re for parsing SSE
|
|
5
6
|
|
|
6
7
|
from webscout.AIutel import Optimizers
|
|
7
8
|
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
10
11
|
from webscout import exceptions
|
|
11
12
|
from webscout.litagent import LitAgent
|
|
13
|
+
|
|
14
|
+
|
|
12
15
|
class TwoAI(Provider):
|
|
13
16
|
"""
|
|
14
|
-
A class to interact with the Two AI API with LitAgent user-agent.
|
|
17
|
+
A class to interact with the Two AI API (v2) with LitAgent user-agent.
|
|
15
18
|
"""
|
|
16
19
|
|
|
17
20
|
AVAILABLE_MODELS = [
|
|
18
|
-
"sutra-
|
|
21
|
+
"sutra-v2",
|
|
22
|
+
"sutra-r0"
|
|
23
|
+
|
|
19
24
|
]
|
|
20
25
|
|
|
21
26
|
def __init__(
|
|
@@ -30,26 +35,27 @@ class TwoAI(Provider):
|
|
|
30
35
|
proxies: dict = {},
|
|
31
36
|
history_offset: int = 10250,
|
|
32
37
|
act: str = None,
|
|
33
|
-
model: str = "sutra-
|
|
38
|
+
model: str = "sutra-v2", # Update default model
|
|
34
39
|
temperature: float = 0.6,
|
|
35
40
|
system_message: str = "You are a helpful assistant."
|
|
36
41
|
):
|
|
37
42
|
"""Initializes the TwoAI API client."""
|
|
38
43
|
if model not in self.AVAILABLE_MODELS:
|
|
39
44
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
40
|
-
self.url = "https://api.two.app/
|
|
45
|
+
self.url = "https://api.two.app/v2/chat/completions" # Update API endpoint
|
|
41
46
|
self.headers = {
|
|
42
47
|
'User-Agent': LitAgent().random(),
|
|
43
|
-
'Accept': 'application/json',
|
|
48
|
+
'Accept': 'application/json', # Keep application/json for request, response is text/event-stream
|
|
44
49
|
'Content-Type': 'application/json',
|
|
45
50
|
'X-Session-Token': api_key,
|
|
46
51
|
'Origin': 'https://chat.two.ai',
|
|
47
52
|
'Referer': 'https://api.two.app/'
|
|
48
53
|
}
|
|
49
|
-
|
|
50
|
-
|
|
54
|
+
|
|
55
|
+
# Initialize curl_cffi Session
|
|
56
|
+
self.session = Session()
|
|
51
57
|
self.session.headers.update(self.headers)
|
|
52
|
-
self.session.proxies
|
|
58
|
+
self.session.proxies = proxies
|
|
53
59
|
|
|
54
60
|
self.is_conversation = is_conversation
|
|
55
61
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -77,6 +83,19 @@ class TwoAI(Provider):
|
|
|
77
83
|
)
|
|
78
84
|
self.conversation.history_offset = history_offset
|
|
79
85
|
|
|
86
|
+
@staticmethod
|
|
87
|
+
def _twoai_extractor(chunk_json: Dict[str, Any]) -> Optional[str]:
|
|
88
|
+
"""Extracts content from TwoAI v2 stream JSON objects."""
|
|
89
|
+
if not isinstance(chunk_json, dict) or "choices" not in chunk_json or not chunk_json["choices"]:
|
|
90
|
+
return None
|
|
91
|
+
|
|
92
|
+
delta = chunk_json["choices"][0].get("delta")
|
|
93
|
+
if not isinstance(delta, dict):
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
content = delta.get("content")
|
|
97
|
+
return content if isinstance(content, str) else None
|
|
98
|
+
|
|
80
99
|
def ask(
|
|
81
100
|
self,
|
|
82
101
|
prompt: str,
|
|
@@ -85,65 +104,102 @@ class TwoAI(Provider):
|
|
|
85
104
|
optimizer: str = None,
|
|
86
105
|
conversationally: bool = False,
|
|
87
106
|
online_search: bool = True,
|
|
88
|
-
reasoning_on: bool = False,
|
|
89
107
|
) -> Union[Dict[str, Any], Generator]:
|
|
90
108
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
91
109
|
if optimizer:
|
|
92
110
|
if optimizer in self.__available_optimizers:
|
|
93
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
94
|
-
conversation_prompt if conversationally else prompt
|
|
95
|
-
)
|
|
111
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
96
112
|
else:
|
|
97
113
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
98
114
|
|
|
99
|
-
# Payload construction
|
|
100
115
|
payload = {
|
|
101
116
|
"messages": [
|
|
102
|
-
{"role": "system", "content": self.system_message},
|
|
117
|
+
*([{"role": "system", "content": self.system_message}] if self.system_message else []),
|
|
103
118
|
{"role": "user", "content": conversation_prompt},
|
|
104
119
|
],
|
|
105
120
|
"model": self.model,
|
|
106
121
|
"temperature": self.temperature,
|
|
107
122
|
"max_tokens": self.max_tokens_to_sample,
|
|
108
|
-
"
|
|
109
|
-
|
|
123
|
+
"extra_body": {
|
|
124
|
+
"online_search": online_search,
|
|
125
|
+
}
|
|
110
126
|
}
|
|
111
127
|
|
|
112
128
|
def for_stream():
|
|
129
|
+
streaming_text = "" # Initialize outside try block
|
|
113
130
|
try:
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
131
|
+
response = self.session.post(
|
|
132
|
+
self.url,
|
|
133
|
+
json=payload,
|
|
134
|
+
stream=True,
|
|
135
|
+
timeout=self.timeout,
|
|
136
|
+
impersonate="chrome110"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
if response.status_code != 200:
|
|
140
|
+
error_detail = response.text
|
|
141
|
+
try:
|
|
142
|
+
error_json = response.json()
|
|
143
|
+
error_detail = error_json.get("error", {}).get("message", error_detail)
|
|
144
|
+
except json.JSONDecodeError:
|
|
145
|
+
pass
|
|
146
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
147
|
+
f"Request failed with status code {response.status_code} - {error_detail}"
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Use sanitize_stream for SSE processing
|
|
151
|
+
processed_stream = sanitize_stream(
|
|
152
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
153
|
+
intro_value="data:",
|
|
154
|
+
to_json=True, # Stream sends JSON
|
|
155
|
+
skip_markers=["[DONE]"],
|
|
156
|
+
content_extractor=self._twoai_extractor, # Use the specific extractor
|
|
157
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
for content_chunk in processed_stream:
|
|
161
|
+
# content_chunk is the string extracted by _twoai_extractor
|
|
162
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
163
|
+
streaming_text += content_chunk
|
|
164
|
+
resp = dict(text=content_chunk)
|
|
165
|
+
yield resp if not raw else content_chunk
|
|
166
|
+
|
|
167
|
+
# If stream completes successfully, update history
|
|
168
|
+
self.last_response = {"text": streaming_text}
|
|
169
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
170
|
+
|
|
171
|
+
except CurlError as e:
|
|
172
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
173
|
+
except exceptions.FailedToGenerateResponseError:
|
|
174
|
+
raise # Re-raise specific exception
|
|
175
|
+
except Exception as e:
|
|
176
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
|
|
177
|
+
finally:
|
|
178
|
+
# Ensure history is updated even if stream ends abruptly but text was received
|
|
179
|
+
if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
|
|
133
180
|
self.last_response = {"text": streaming_text}
|
|
134
181
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
135
|
-
|
|
136
|
-
except requests.RequestException as e:
|
|
137
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
182
|
+
|
|
138
183
|
|
|
139
184
|
def for_non_stream():
|
|
185
|
+
# Non-stream still uses the stream internally and aggregates
|
|
140
186
|
streaming_text = ""
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
187
|
+
# We need to consume the generator from for_stream()
|
|
188
|
+
gen = for_stream()
|
|
189
|
+
try:
|
|
190
|
+
for chunk_data in gen:
|
|
191
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
192
|
+
streaming_text += chunk_data["text"]
|
|
193
|
+
elif isinstance(chunk_data, str): # Handle raw=True case
|
|
194
|
+
streaming_text += chunk_data
|
|
195
|
+
except exceptions.FailedToGenerateResponseError:
|
|
196
|
+
# If the underlying stream fails, re-raise the error
|
|
197
|
+
raise
|
|
198
|
+
# self.last_response and history are updated within for_stream's try/finally
|
|
199
|
+
return self.last_response # Return the final aggregated dict
|
|
145
200
|
|
|
146
|
-
|
|
201
|
+
effective_stream = stream if stream is not None else True
|
|
202
|
+
return for_stream() if effective_stream else for_non_stream()
|
|
147
203
|
|
|
148
204
|
def chat(
|
|
149
205
|
self,
|
|
@@ -152,48 +208,73 @@ class TwoAI(Provider):
|
|
|
152
208
|
optimizer: str = None,
|
|
153
209
|
conversationally: bool = False,
|
|
154
210
|
online_search: bool = True,
|
|
155
|
-
reasoning_on: bool = False,
|
|
156
211
|
) -> str:
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
212
|
+
effective_stream = stream if stream is not None else True
|
|
213
|
+
|
|
214
|
+
def for_stream_chat():
|
|
215
|
+
# ask() yields dicts when raw=False (default for chat)
|
|
216
|
+
gen = self.ask(
|
|
217
|
+
prompt,
|
|
218
|
+
stream=True,
|
|
219
|
+
raw=False, # Ensure ask yields dicts
|
|
220
|
+
optimizer=optimizer,
|
|
162
221
|
conversationally=conversationally,
|
|
163
222
|
online_search=online_search,
|
|
164
|
-
reasoning_on=reasoning_on
|
|
165
|
-
):
|
|
166
|
-
yield self.get_message(response)
|
|
167
|
-
|
|
168
|
-
def for_non_stream():
|
|
169
|
-
return self.get_message(
|
|
170
|
-
self.ask(
|
|
171
|
-
prompt,
|
|
172
|
-
False,
|
|
173
|
-
optimizer=optimizer,
|
|
174
|
-
conversationally=conversationally,
|
|
175
|
-
online_search=online_search,
|
|
176
|
-
reasoning_on=reasoning_on
|
|
177
|
-
)
|
|
178
223
|
)
|
|
179
|
-
|
|
180
|
-
|
|
224
|
+
for response_dict in gen:
|
|
225
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
226
|
+
|
|
227
|
+
def for_non_stream_chat():
|
|
228
|
+
# ask() returns a dict when stream=False
|
|
229
|
+
response_dict = self.ask(
|
|
230
|
+
prompt,
|
|
231
|
+
stream=False, # Ensure ask returns dict
|
|
232
|
+
raw=False,
|
|
233
|
+
optimizer=optimizer,
|
|
234
|
+
conversationally=conversationally,
|
|
235
|
+
online_search=online_search,
|
|
236
|
+
)
|
|
237
|
+
return self.get_message(response_dict) # get_message expects dict
|
|
238
|
+
|
|
239
|
+
return for_stream_chat() if effective_stream else for_non_stream_chat()
|
|
181
240
|
|
|
182
241
|
def get_message(self, response: dict) -> str:
|
|
183
242
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
184
|
-
return response
|
|
243
|
+
return response.get("text", "") # Use .get for safety
|
|
244
|
+
|
|
185
245
|
|
|
186
246
|
if __name__ == "__main__":
|
|
187
247
|
from rich import print
|
|
248
|
+
import os
|
|
249
|
+
|
|
250
|
+
api_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJzanl2OHJtZGxDZDFnQ2hQdGxzZHdxUlVteXkyIiwic291cmNlIjoiRmlyZWJhc2UiLCJpYXQiOjE3NDYxMDY0NjksImV4cCI6MTc0NjEwNzM2OX0.o3fprDgsUJwvwCsWr0HfqmVpSBUthHsxqnopfWhtiYc"
|
|
251
|
+
|
|
252
|
+
try:
|
|
253
|
+
ai = TwoAI(
|
|
254
|
+
api_key=api_key,
|
|
255
|
+
timeout=60,
|
|
256
|
+
model="sutra-r0",
|
|
257
|
+
system_message="You are an intelligent AI assistant. Be concise and helpful."
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
response_stream = ai.chat("write me a poem about AI", stream=True, online_search=True)
|
|
261
|
+
full_stream_response = ""
|
|
262
|
+
for chunk in response_stream:
|
|
263
|
+
print(chunk, end="", flush=True)
|
|
264
|
+
full_stream_response += chunk
|
|
265
|
+
print("\n[bold green]Stream Test Complete.[/bold green]\n")
|
|
266
|
+
|
|
267
|
+
# Optional: Test non-stream
|
|
268
|
+
# print("[bold blue]Testing Non-Stream:[/bold blue]")
|
|
269
|
+
# non_stream_response = ai.chat("What is the capital of France?", stream=False, online_search=False)
|
|
270
|
+
# print(non_stream_response)
|
|
271
|
+
# print("[bold green]Non-Stream Test Complete.[/bold green]\n")
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
275
|
+
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
276
|
+
except ValueError as e:
|
|
277
|
+
print(f"\n[bold red]Configuration Error:[/bold red] {e}")
|
|
278
|
+
except Exception as e:
|
|
279
|
+
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
|
188
280
|
|
|
189
|
-
api_key = ""
|
|
190
|
-
|
|
191
|
-
ai = TwoAI(
|
|
192
|
-
api_key=api_key,
|
|
193
|
-
timeout=60,
|
|
194
|
-
system_message="You are an intelligent AI assistant. Be concise and helpful."
|
|
195
|
-
)
|
|
196
|
-
|
|
197
|
-
response = ai.chat("666+444=?", stream=True, reasoning_on=True)
|
|
198
|
-
for chunk in response:
|
|
199
|
-
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import json
|
|
3
|
+
import random
|
|
4
|
+
import string
|
|
5
|
+
from typing import Optional, Union, Any, Dict, Generator
|
|
6
|
+
from curl_cffi import CurlError
|
|
7
|
+
from curl_cffi.requests import Session
|
|
8
|
+
# from curl_cffi.const import CurlHttpVersion # Not strictly needed if using default
|
|
9
|
+
from webscout.AIutel import Optimizers
|
|
10
|
+
from webscout.AIutel import Conversation
|
|
11
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
12
|
+
from webscout.AIbase import Provider
|
|
13
|
+
from webscout import exceptions
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def generate_random_id(length=16):
|
|
18
|
+
"""Generates a random alphanumeric string."""
|
|
19
|
+
characters = string.ascii_letters + string.digits
|
|
20
|
+
return ''.join(random.choice(characters) for i in range(length))
|
|
21
|
+
|
|
22
|
+
class TypliAI(Provider):
|
|
23
|
+
"""
|
|
24
|
+
A class to interact with the Typli.ai API.
|
|
25
|
+
|
|
26
|
+
Attributes:
|
|
27
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
28
|
+
|
|
29
|
+
Examples:
|
|
30
|
+
>>> from lol import TypliAI
|
|
31
|
+
>>> ai = TypliAI()
|
|
32
|
+
>>> response = ai.chat("What's the weather today?")
|
|
33
|
+
>>> print(response)
|
|
34
|
+
'I don't have access to real-time weather information...'
|
|
35
|
+
"""
|
|
36
|
+
AVAILABLE_MODELS = ["free-no-sign-up-chatgpt"]
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
is_conversation: bool = True,
|
|
41
|
+
max_tokens: int = 600,
|
|
42
|
+
timeout: int = 30,
|
|
43
|
+
intro: str = None,
|
|
44
|
+
filepath: str = None,
|
|
45
|
+
update_file: bool = True,
|
|
46
|
+
proxies: dict = {},
|
|
47
|
+
history_offset: int = 10250,
|
|
48
|
+
act: str = None,
|
|
49
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
50
|
+
model: str = "free-no-sign-up-chatgpt"
|
|
51
|
+
):
|
|
52
|
+
"""
|
|
53
|
+
Initializes the TypliAI API with given parameters.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
is_conversation (bool): Whether the provider is in conversation mode.
|
|
57
|
+
max_tokens (int): Maximum number of tokens to sample.
|
|
58
|
+
timeout (int): Timeout for API requests.
|
|
59
|
+
intro (str): Introduction message for the conversation.
|
|
60
|
+
filepath (str): Filepath for storing conversation history.
|
|
61
|
+
update_file (bool): Whether to update the conversation history file.
|
|
62
|
+
proxies (dict): Proxies for the API requests.
|
|
63
|
+
history_offset (int): Offset for conversation history.
|
|
64
|
+
act (str): Act for the conversation.
|
|
65
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
66
|
+
model (str): The model to use for generation.
|
|
67
|
+
"""
|
|
68
|
+
# Initialize curl_cffi Session instead of requests.Session
|
|
69
|
+
self.session = Session()
|
|
70
|
+
self.is_conversation = is_conversation
|
|
71
|
+
self.max_tokens_to_sample = max_tokens
|
|
72
|
+
self.api_endpoint = "https://typli.ai/api/generators/chat"
|
|
73
|
+
self.timeout = timeout
|
|
74
|
+
self.last_response = {}
|
|
75
|
+
self.system_prompt = system_prompt
|
|
76
|
+
self.model = model
|
|
77
|
+
|
|
78
|
+
# Initialize LitAgent for user agent generation if available
|
|
79
|
+
|
|
80
|
+
self.agent = LitAgent()
|
|
81
|
+
# user_agent = self.agent.random() # Let impersonate handle the user-agent
|
|
82
|
+
self.headers = {
|
|
83
|
+
'accept': '*/*', # Changed from '/' in example, but '*' is safer
|
|
84
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
85
|
+
'content-type': 'application/json',
|
|
86
|
+
'origin': 'https://typli.ai',
|
|
87
|
+
'referer': 'https://typli.ai/free-no-sign-up-chatgpt',
|
|
88
|
+
# Let impersonate handle sec-ch-ua headers
|
|
89
|
+
# 'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
90
|
+
# 'sec-ch-ua-mobile': '?0',
|
|
91
|
+
# 'sec-ch-ua-platform': '"Windows"',
|
|
92
|
+
'sec-fetch-dest': 'empty',
|
|
93
|
+
'sec-fetch-mode': 'cors',
|
|
94
|
+
'sec-fetch-site': 'same-origin',
|
|
95
|
+
'dnt': '1',
|
|
96
|
+
# 'user-agent': user_agent, # Let impersonate handle this
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
self.__available_optimizers = (
|
|
101
|
+
method
|
|
102
|
+
for method in dir(Optimizers)
|
|
103
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
104
|
+
)
|
|
105
|
+
# Update curl_cffi session headers and proxies
|
|
106
|
+
self.session.headers.update(self.headers)
|
|
107
|
+
self.session.proxies = proxies
|
|
108
|
+
|
|
109
|
+
Conversation.intro = (
|
|
110
|
+
AwesomePrompts().get_act(
|
|
111
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
112
|
+
)
|
|
113
|
+
if act
|
|
114
|
+
else intro or Conversation.intro
|
|
115
|
+
)
|
|
116
|
+
self.conversation = Conversation(
|
|
117
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
118
|
+
)
|
|
119
|
+
self.conversation.history_offset = history_offset
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
@staticmethod
|
|
123
|
+
def _typli_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
124
|
+
"""Extracts content from the Typli.ai stream format '0:"..."'."""
|
|
125
|
+
if isinstance(chunk, str):
|
|
126
|
+
match = re.search(r'0:"(.*?)"', chunk)
|
|
127
|
+
if match:
|
|
128
|
+
# Decode potential unicode escapes like \u00e9
|
|
129
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
130
|
+
return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
|
|
131
|
+
return None
|
|
132
|
+
|
|
133
|
+
def ask(
|
|
134
|
+
self,
|
|
135
|
+
prompt: str,
|
|
136
|
+
stream: bool = False,
|
|
137
|
+
raw: bool = False,
|
|
138
|
+
optimizer: str = None,
|
|
139
|
+
conversationally: bool = False,
|
|
140
|
+
) -> Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]:
|
|
141
|
+
"""
|
|
142
|
+
Sends a prompt to the Typli.ai API and returns the response.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
prompt (str): The prompt to send to the API.
|
|
146
|
+
stream (bool): Whether to stream the response.
|
|
147
|
+
raw (bool): Whether to return the raw response.
|
|
148
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
149
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]: The API response.
|
|
153
|
+
"""
|
|
154
|
+
|
|
155
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
156
|
+
if optimizer:
|
|
157
|
+
if optimizer in self.__available_optimizers:
|
|
158
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
159
|
+
conversation_prompt if conversationally else prompt
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
raise Exception(
|
|
163
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
payload = {
|
|
168
|
+
"id": generate_random_id(),
|
|
169
|
+
"messages": [
|
|
170
|
+
{ # Add the system role message
|
|
171
|
+
"role": "system",
|
|
172
|
+
"content": self.system_prompt
|
|
173
|
+
},
|
|
174
|
+
{
|
|
175
|
+
"role": "user",
|
|
176
|
+
"content": conversation_prompt,
|
|
177
|
+
"parts": [
|
|
178
|
+
{
|
|
179
|
+
"type": "text",
|
|
180
|
+
"text": conversation_prompt
|
|
181
|
+
}
|
|
182
|
+
]
|
|
183
|
+
}
|
|
184
|
+
],
|
|
185
|
+
"slug": self.model
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
def for_stream():
|
|
189
|
+
try:
|
|
190
|
+
# Use curl_cffi session post with updated impersonate and http_version
|
|
191
|
+
response = self.session.post(
|
|
192
|
+
self.api_endpoint,
|
|
193
|
+
headers=self.headers,
|
|
194
|
+
json=payload,
|
|
195
|
+
stream=True,
|
|
196
|
+
timeout=self.timeout,
|
|
197
|
+
impersonate="chrome120", # Switch to a more common profile
|
|
198
|
+
# http_version=CurlHttpVersion.V1_1 # Usually not needed
|
|
199
|
+
)
|
|
200
|
+
if not response.ok:
|
|
201
|
+
error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
202
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
203
|
+
|
|
204
|
+
streaming_response = ""
|
|
205
|
+
# Use sanitize_stream with the custom extractor
|
|
206
|
+
processed_stream = sanitize_stream(
|
|
207
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
208
|
+
intro_value=None, # No simple prefix like 'data:'
|
|
209
|
+
to_json=False, # Content is extracted as string, not JSON object per line
|
|
210
|
+
content_extractor=self._typli_extractor, # Use the specific extractor
|
|
211
|
+
skip_markers=["f:{", "e:{", "d:{", "8:[", "2:["] # Skip metadata lines based on observed format
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
for content_chunk in processed_stream:
|
|
215
|
+
if content_chunk and isinstance(content_chunk, str): # Extractor returns string
|
|
216
|
+
streaming_response += content_chunk
|
|
217
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
218
|
+
|
|
219
|
+
self.last_response.update(dict(text=streaming_response))
|
|
220
|
+
|
|
221
|
+
self.conversation.update_chat_history(
|
|
222
|
+
prompt, self.get_message(self.last_response)
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
except CurlError as e: # Catch CurlError
|
|
226
|
+
error_msg = f"Request failed (CurlError): {e}"
|
|
227
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
228
|
+
|
|
229
|
+
except Exception as e: # Catch other potential exceptions
|
|
230
|
+
# Include the original exception type in the message for clarity
|
|
231
|
+
error_msg = f"An unexpected error occurred ({type(e).__name__}): {e}"
|
|
232
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def for_non_stream():
|
|
236
|
+
# This function implicitly uses the updated for_stream
|
|
237
|
+
for _ in for_stream():
|
|
238
|
+
pass
|
|
239
|
+
return self.last_response
|
|
240
|
+
|
|
241
|
+
return for_stream() if stream else for_non_stream()
|
|
242
|
+
|
|
243
|
+
def chat(
|
|
244
|
+
self,
|
|
245
|
+
prompt: str,
|
|
246
|
+
stream: bool = False,
|
|
247
|
+
optimizer: str = None,
|
|
248
|
+
conversationally: bool = False,
|
|
249
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
250
|
+
"""
|
|
251
|
+
Generates a response from the Typli.ai API.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
prompt (str): The prompt to send to the API.
|
|
255
|
+
stream (bool): Whether to stream the response.
|
|
256
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
257
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
Union[str, Generator[str, None, None]]: The API response.
|
|
261
|
+
"""
|
|
262
|
+
|
|
263
|
+
def for_stream():
|
|
264
|
+
for response in self.ask(
|
|
265
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
266
|
+
):
|
|
267
|
+
yield self.get_message(response)
|
|
268
|
+
|
|
269
|
+
def for_non_stream():
|
|
270
|
+
return self.get_message(
|
|
271
|
+
self.ask(
|
|
272
|
+
prompt,
|
|
273
|
+
False,
|
|
274
|
+
optimizer=optimizer,
|
|
275
|
+
conversationally=conversationally,
|
|
276
|
+
)
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
return for_stream() if stream else for_non_stream()
|
|
280
|
+
|
|
281
|
+
def get_message(self, response: dict) -> str:
|
|
282
|
+
"""
|
|
283
|
+
Extracts the message from the API response.
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
response (dict): The API response.
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
str: The message content.
|
|
290
|
+
"""
|
|
291
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
292
|
+
# Ensure text exists before processing
|
|
293
|
+
return response.get("text", "")
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
if __name__ == "__main__":
|
|
298
|
+
from rich import print
|
|
299
|
+
try:
|
|
300
|
+
ai = TypliAI(timeout=60)
|
|
301
|
+
response = ai.chat("Write a short poem about AI", stream=True)
|
|
302
|
+
for chunk in response:
|
|
303
|
+
print(chunk, end="", flush=True)
|
|
304
|
+
except Exception as e:
|
|
305
|
+
print(f"An error occurred: {e}")
|