webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
import re
|
|
7
|
+
import urllib.parse
|
|
8
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
|
+
|
|
10
|
+
from webscout.Extra.tempmail import get_random_email
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
# Import base classes and utilities from OPENAI provider stack
|
|
14
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
15
|
+
from webscout.Provider.OPENAI.utils import (
|
|
16
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
17
|
+
ChatCompletionMessage, CompletionUsage
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Attempt to import LitAgent for browser fingerprinting
|
|
21
|
+
try:
|
|
22
|
+
from webscout.litagent import LitAgent
|
|
23
|
+
except ImportError: # pragma: no cover - LitAgent optional
|
|
24
|
+
LitAgent = None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Completions(BaseCompletions):
|
|
28
|
+
"""TwoAI chat completions compatible with OpenAI format."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, client: 'TwoAI'):
|
|
31
|
+
self._client = client
|
|
32
|
+
|
|
33
|
+
def create(
|
|
34
|
+
self,
|
|
35
|
+
*,
|
|
36
|
+
model: str,
|
|
37
|
+
messages: List[Dict[str, Any]],
|
|
38
|
+
max_tokens: Optional[int] = 2049,
|
|
39
|
+
stream: bool = False,
|
|
40
|
+
temperature: Optional[float] = None,
|
|
41
|
+
top_p: Optional[float] = None,
|
|
42
|
+
timeout: Optional[int] = None,
|
|
43
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
44
|
+
**kwargs: Any
|
|
45
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
46
|
+
"""Create a chat completion using TwoAI."""
|
|
47
|
+
payload = {
|
|
48
|
+
"model": model,
|
|
49
|
+
"messages": messages,
|
|
50
|
+
"max_tokens": max_tokens,
|
|
51
|
+
"stream": stream,
|
|
52
|
+
}
|
|
53
|
+
if temperature is not None:
|
|
54
|
+
payload["temperature"] = temperature
|
|
55
|
+
if top_p is not None:
|
|
56
|
+
payload["top_p"] = top_p
|
|
57
|
+
|
|
58
|
+
payload.update(kwargs)
|
|
59
|
+
|
|
60
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
61
|
+
created_time = int(time.time())
|
|
62
|
+
|
|
63
|
+
if stream:
|
|
64
|
+
return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
65
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
66
|
+
|
|
67
|
+
def _create_stream(
|
|
68
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
69
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
70
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
71
|
+
original_proxies = self._client.session.proxies.copy()
|
|
72
|
+
if proxies is not None:
|
|
73
|
+
self._client.session.proxies = proxies
|
|
74
|
+
else:
|
|
75
|
+
self._client.session.proxies = {}
|
|
76
|
+
try:
|
|
77
|
+
response = self._client.session.post(
|
|
78
|
+
self._client.base_url,
|
|
79
|
+
headers=self._client.headers,
|
|
80
|
+
json=payload,
|
|
81
|
+
stream=True,
|
|
82
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
83
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
84
|
+
)
|
|
85
|
+
response.raise_for_status()
|
|
86
|
+
|
|
87
|
+
prompt_tokens = 0
|
|
88
|
+
completion_tokens = 0
|
|
89
|
+
total_tokens = 0
|
|
90
|
+
|
|
91
|
+
for line in response.iter_lines():
|
|
92
|
+
if not line:
|
|
93
|
+
continue
|
|
94
|
+
decoded = line.decode("utf-8").strip()
|
|
95
|
+
if not decoded.startswith("data: "):
|
|
96
|
+
continue
|
|
97
|
+
json_str = decoded[6:]
|
|
98
|
+
if json_str == "[DONE]":
|
|
99
|
+
break
|
|
100
|
+
try:
|
|
101
|
+
data = json.loads(json_str)
|
|
102
|
+
except json.JSONDecodeError:
|
|
103
|
+
continue
|
|
104
|
+
|
|
105
|
+
choice_data = data.get("choices", [{}])[0]
|
|
106
|
+
delta_data = choice_data.get("delta", {})
|
|
107
|
+
finish_reason = choice_data.get("finish_reason")
|
|
108
|
+
|
|
109
|
+
usage_data = data.get("usage", {})
|
|
110
|
+
if usage_data:
|
|
111
|
+
prompt_tokens = usage_data.get("prompt_tokens", prompt_tokens)
|
|
112
|
+
completion_tokens = usage_data.get(
|
|
113
|
+
"completion_tokens", completion_tokens
|
|
114
|
+
)
|
|
115
|
+
total_tokens = usage_data.get("total_tokens", total_tokens)
|
|
116
|
+
|
|
117
|
+
delta = ChoiceDelta(
|
|
118
|
+
content=delta_data.get("content"),
|
|
119
|
+
role=delta_data.get("role"),
|
|
120
|
+
tool_calls=delta_data.get("tool_calls"),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
choice = Choice(
|
|
124
|
+
index=choice_data.get("index", 0),
|
|
125
|
+
delta=delta,
|
|
126
|
+
finish_reason=finish_reason,
|
|
127
|
+
logprobs=choice_data.get("logprobs"),
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
chunk = ChatCompletionChunk(
|
|
131
|
+
id=request_id,
|
|
132
|
+
choices=[choice],
|
|
133
|
+
created=created_time,
|
|
134
|
+
model=model,
|
|
135
|
+
system_fingerprint=data.get("system_fingerprint"),
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
yield chunk
|
|
139
|
+
except Exception as e:
|
|
140
|
+
raise IOError(f"TwoAI request failed: {e}") from e
|
|
141
|
+
finally:
|
|
142
|
+
self._client.session.proxies = original_proxies
|
|
143
|
+
|
|
144
|
+
def _create_non_stream(
|
|
145
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
146
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
147
|
+
) -> ChatCompletion:
|
|
148
|
+
original_proxies = self._client.session.proxies.copy()
|
|
149
|
+
if proxies is not None:
|
|
150
|
+
self._client.session.proxies = proxies
|
|
151
|
+
else:
|
|
152
|
+
self._client.session.proxies = {}
|
|
153
|
+
try:
|
|
154
|
+
response = self._client.session.post(
|
|
155
|
+
self._client.base_url,
|
|
156
|
+
headers=self._client.headers,
|
|
157
|
+
json=payload,
|
|
158
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
159
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
160
|
+
)
|
|
161
|
+
response.raise_for_status()
|
|
162
|
+
data = response.json()
|
|
163
|
+
|
|
164
|
+
choices_data = data.get("choices", [])
|
|
165
|
+
usage_data = data.get("usage", {})
|
|
166
|
+
|
|
167
|
+
choices = []
|
|
168
|
+
for choice_d in choices_data:
|
|
169
|
+
message_d = choice_d.get("message", {})
|
|
170
|
+
message = ChatCompletionMessage(
|
|
171
|
+
role=message_d.get("role", "assistant"),
|
|
172
|
+
content=message_d.get("content", ""),
|
|
173
|
+
tool_calls=message_d.get("tool_calls"),
|
|
174
|
+
)
|
|
175
|
+
choice = Choice(
|
|
176
|
+
index=choice_d.get("index", 0),
|
|
177
|
+
message=message,
|
|
178
|
+
finish_reason=choice_d.get("finish_reason", "stop"),
|
|
179
|
+
)
|
|
180
|
+
choices.append(choice)
|
|
181
|
+
|
|
182
|
+
usage = CompletionUsage(
|
|
183
|
+
prompt_tokens=usage_data.get("prompt_tokens", 0),
|
|
184
|
+
completion_tokens=usage_data.get("completion_tokens", 0),
|
|
185
|
+
total_tokens=usage_data.get("total_tokens", 0),
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
completion = ChatCompletion(
|
|
189
|
+
id=request_id,
|
|
190
|
+
choices=choices,
|
|
191
|
+
created=created_time,
|
|
192
|
+
model=data.get("model", model),
|
|
193
|
+
usage=usage,
|
|
194
|
+
)
|
|
195
|
+
return completion
|
|
196
|
+
except Exception as e:
|
|
197
|
+
raise IOError(f"TwoAI request failed: {e}") from e
|
|
198
|
+
finally:
|
|
199
|
+
self._client.session.proxies = original_proxies
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
class Chat(BaseChat):
|
|
203
|
+
def __init__(self, client: 'TwoAI'):
|
|
204
|
+
self.completions = Completions(client)
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
class TwoAI(OpenAICompatibleProvider):
|
|
208
|
+
"""OpenAI-compatible client for the TwoAI API."""
|
|
209
|
+
|
|
210
|
+
AVAILABLE_MODELS = ["sutra-v2", "sutra-r0"]
|
|
211
|
+
|
|
212
|
+
@staticmethod
|
|
213
|
+
def generate_api_key() -> str:
|
|
214
|
+
"""
|
|
215
|
+
Generate a new Two AI API key using a temporary email.
|
|
216
|
+
"""
|
|
217
|
+
email, provider = get_random_email("tempmailio")
|
|
218
|
+
loops_url = "https://app.loops.so/api/newsletter-form/cm7i4o92h057auy1o74cxbhxo"
|
|
219
|
+
|
|
220
|
+
session = Session()
|
|
221
|
+
session.headers.update({
|
|
222
|
+
'User-Agent': LitAgent().random(),
|
|
223
|
+
'Content-Type': 'application/x-www-form-urlencoded',
|
|
224
|
+
'Origin': 'https://www.two.ai',
|
|
225
|
+
'Referer': 'https://app.loops.so/',
|
|
226
|
+
})
|
|
227
|
+
|
|
228
|
+
form_data = {
|
|
229
|
+
'email': email,
|
|
230
|
+
'userGroup': 'Via Framer',
|
|
231
|
+
'mailingLists': 'cm8ay9cic00x70kjv0bd34k66'
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
encoded_data = urllib.parse.urlencode(form_data)
|
|
235
|
+
response = session.post(loops_url, data=encoded_data, impersonate="chrome120")
|
|
236
|
+
|
|
237
|
+
if response.status_code != 200:
|
|
238
|
+
raise RuntimeError(f"Failed to register for Two AI: {response.status_code} - {response.text}")
|
|
239
|
+
|
|
240
|
+
max_attempts = 15
|
|
241
|
+
attempt = 0
|
|
242
|
+
api_key = None
|
|
243
|
+
wait_time = 2
|
|
244
|
+
|
|
245
|
+
while attempt < max_attempts and not api_key:
|
|
246
|
+
messages = provider.get_messages()
|
|
247
|
+
for message in messages:
|
|
248
|
+
subject = message.get('subject', '')
|
|
249
|
+
sender = ''
|
|
250
|
+
if 'from' in message:
|
|
251
|
+
if isinstance(message['from'], dict):
|
|
252
|
+
sender = message['from'].get('address', '')
|
|
253
|
+
else:
|
|
254
|
+
sender = str(message['from'])
|
|
255
|
+
elif 'sender' in message:
|
|
256
|
+
if isinstance(message['sender'], dict):
|
|
257
|
+
sender = message['sender'].get('address', '')
|
|
258
|
+
else:
|
|
259
|
+
sender = str(message['sender'])
|
|
260
|
+
subject_match = any(keyword in subject.lower() for keyword in
|
|
261
|
+
['welcome', 'confirm', 'verify', 'api', 'key', 'sutra', 'two.ai', 'loops'])
|
|
262
|
+
sender_match = any(keyword in sender.lower() for keyword in
|
|
263
|
+
['two.ai', 'sutra', 'loops.so', 'loops', 'no-reply', 'noreply'])
|
|
264
|
+
is_confirmation = subject_match or sender_match
|
|
265
|
+
|
|
266
|
+
content = None
|
|
267
|
+
if 'body' in message:
|
|
268
|
+
content = message['body']
|
|
269
|
+
elif 'content' in message and 'text' in message['content']:
|
|
270
|
+
content = message['content']['text']
|
|
271
|
+
elif 'html' in message:
|
|
272
|
+
content = message['html']
|
|
273
|
+
elif 'text' in message:
|
|
274
|
+
content = message['text']
|
|
275
|
+
if not content:
|
|
276
|
+
continue
|
|
277
|
+
|
|
278
|
+
# Robust API key extraction with multiple regex patterns
|
|
279
|
+
patterns = [
|
|
280
|
+
r'sutra_[A-Za-z0-9]{60,70}',
|
|
281
|
+
r'sutra_[A-Za-z0-9]{30,}',
|
|
282
|
+
r'sutra_\S+',
|
|
283
|
+
]
|
|
284
|
+
api_key_match = None
|
|
285
|
+
for pat in patterns:
|
|
286
|
+
api_key_match = re.search(pat, content)
|
|
287
|
+
if api_key_match:
|
|
288
|
+
break
|
|
289
|
+
# Also try to extract from labeled section
|
|
290
|
+
if not api_key_match:
|
|
291
|
+
key_section_match = re.search(r'🔑 SUTRA API Key\s*([^\s]+)', content)
|
|
292
|
+
if key_section_match:
|
|
293
|
+
api_key_match = re.search(r'sutra_[A-Za-z0-9]+', key_section_match.group(1))
|
|
294
|
+
if api_key_match:
|
|
295
|
+
api_key = api_key_match.group(0)
|
|
296
|
+
break
|
|
297
|
+
if not api_key:
|
|
298
|
+
attempt += 1
|
|
299
|
+
time.sleep(wait_time)
|
|
300
|
+
if not api_key:
|
|
301
|
+
raise RuntimeError("Failed to get API key from confirmation email")
|
|
302
|
+
return api_key
|
|
303
|
+
|
|
304
|
+
def __init__(self, browser: str = "chrome"):
|
|
305
|
+
api_key = self.generate_api_key()
|
|
306
|
+
self.timeout = 30
|
|
307
|
+
self.base_url = "https://api.two.ai/v2/chat/completions"
|
|
308
|
+
self.api_key = api_key
|
|
309
|
+
self.session = Session()
|
|
310
|
+
self.session.proxies = {}
|
|
311
|
+
|
|
312
|
+
headers: Dict[str, str] = {
|
|
313
|
+
"Content-Type": "application/json",
|
|
314
|
+
"Authorization": f"Bearer {api_key}",
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
if LitAgent is not None:
|
|
318
|
+
try:
|
|
319
|
+
agent = LitAgent()
|
|
320
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
321
|
+
headers.update({
|
|
322
|
+
"Accept": fingerprint["accept"],
|
|
323
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
324
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
325
|
+
"Cache-Control": "no-cache",
|
|
326
|
+
"Connection": "keep-alive",
|
|
327
|
+
"Origin": "https://chat.two.ai",
|
|
328
|
+
"Pragma": "no-cache",
|
|
329
|
+
"Referer": "https://chat.two.ai/",
|
|
330
|
+
"Sec-Fetch-Dest": "empty",
|
|
331
|
+
"Sec-Fetch-Mode": "cors",
|
|
332
|
+
"Sec-Fetch-Site": "same-site",
|
|
333
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
334
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
335
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
336
|
+
"User-Agent": fingerprint["user_agent"],
|
|
337
|
+
})
|
|
338
|
+
except Exception:
|
|
339
|
+
# Fallback minimal headers if fingerprinting fails
|
|
340
|
+
headers.update({
|
|
341
|
+
"Accept": "application/json",
|
|
342
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
343
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
344
|
+
"User-Agent": "Mozilla/5.0",
|
|
345
|
+
})
|
|
346
|
+
else:
|
|
347
|
+
headers.update({
|
|
348
|
+
"Accept": "application/json",
|
|
349
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
350
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
351
|
+
"User-Agent": "Mozilla/5.0",
|
|
352
|
+
})
|
|
353
|
+
|
|
354
|
+
self.headers = headers
|
|
355
|
+
self.session.headers.update(headers)
|
|
356
|
+
self.chat = Chat(self)
|
|
357
|
+
|
|
358
|
+
@property
|
|
359
|
+
def models(self):
|
|
360
|
+
class _ModelList:
|
|
361
|
+
def list(inner_self):
|
|
362
|
+
return type(self).AVAILABLE_MODELS
|
|
363
|
+
return _ModelList()
|
|
364
|
+
|
|
365
|
+
if __name__ == "__main__":
|
|
366
|
+
from rich import print
|
|
367
|
+
two_ai = TwoAI()
|
|
368
|
+
resp = two_ai.chat.completions.create(
|
|
369
|
+
model="sutra-v2",
|
|
370
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
371
|
+
stream=True
|
|
372
|
+
)
|
|
373
|
+
for chunk in resp:
|
|
374
|
+
print(chunk, end="")
|
|
@@ -33,4 +33,10 @@ from .c4ai import *
|
|
|
33
33
|
from .flowith import *
|
|
34
34
|
from .Cloudflare import *
|
|
35
35
|
from .NEMOTRON import *
|
|
36
|
-
from .BLACKBOXAI import *
|
|
36
|
+
from .BLACKBOXAI import *
|
|
37
|
+
from .copilot import * # Add Microsoft Copilot
|
|
38
|
+
from .TwoAI import *
|
|
39
|
+
from .oivscode import * # Add OnRender provider
|
|
40
|
+
from .Qwen3 import *
|
|
41
|
+
from .FalconH1 import *
|
|
42
|
+
from .PI import * # Add PI.ai provider
|
|
@@ -8,7 +8,7 @@ from typing import List, Dict, Optional, Union, Generator, Any
|
|
|
8
8
|
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
9
|
from .utils import (
|
|
10
10
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
12
|
)
|
|
13
13
|
|
|
14
14
|
# --- AI4Chat Client ---
|
|
@@ -26,6 +26,8 @@ class Completions(BaseCompletions):
|
|
|
26
26
|
stream: bool = False,
|
|
27
27
|
temperature: Optional[float] = None,
|
|
28
28
|
top_p: Optional[float] = None,
|
|
29
|
+
timeout: Optional[int] = None,
|
|
30
|
+
proxies: Optional[dict] = None,
|
|
29
31
|
**kwargs: Any
|
|
30
32
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
31
33
|
"""
|
|
@@ -48,57 +50,58 @@ class Completions(BaseCompletions):
|
|
|
48
50
|
|
|
49
51
|
# AI4Chat doesn't support streaming, so we'll simulate it if requested
|
|
50
52
|
if stream:
|
|
51
|
-
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
53
|
+
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
|
|
52
54
|
else:
|
|
53
|
-
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
55
|
+
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
|
|
54
56
|
|
|
55
57
|
def _create_stream(
|
|
56
58
|
self, request_id: str, created_time: int, model: str,
|
|
57
|
-
conversation_prompt: str, country: str, user_id: str
|
|
59
|
+
conversation_prompt: str, country: str, user_id: str,
|
|
60
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
58
61
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
-
"""Simulate streaming by breaking up the full response."""
|
|
62
|
+
"""Simulate streaming by breaking up the full response into fixed-size character chunks."""
|
|
60
63
|
try:
|
|
61
64
|
# Get the full response first
|
|
62
|
-
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
63
|
-
|
|
64
|
-
# Break it into chunks for simulated streaming
|
|
65
|
-
words = full_response.split()
|
|
66
|
-
chunk_size = max(1, len(words) // 10) # Divide into ~10 chunks
|
|
65
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
|
|
67
66
|
|
|
68
67
|
# Track token usage
|
|
69
|
-
prompt_tokens =
|
|
68
|
+
prompt_tokens = count_tokens(conversation_prompt)
|
|
70
69
|
completion_tokens = 0
|
|
71
70
|
|
|
72
|
-
# Stream chunks
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
71
|
+
# Stream fixed-size character chunks (e.g., 48 chars)
|
|
72
|
+
buffer = full_response
|
|
73
|
+
chunk_size = 48
|
|
74
|
+
while buffer:
|
|
75
|
+
chunk_text = buffer[:chunk_size]
|
|
76
|
+
buffer = buffer[chunk_size:]
|
|
77
|
+
completion_tokens += count_tokens(chunk_text)
|
|
78
|
+
|
|
79
|
+
if chunk_text.strip():
|
|
80
|
+
# Create the delta object
|
|
81
|
+
delta = ChoiceDelta(
|
|
82
|
+
content=chunk_text,
|
|
83
|
+
role="assistant",
|
|
84
|
+
tool_calls=None
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Create the choice object
|
|
88
|
+
choice = Choice(
|
|
89
|
+
index=0,
|
|
90
|
+
delta=delta,
|
|
91
|
+
finish_reason=None,
|
|
92
|
+
logprobs=None
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Create the chunk object
|
|
96
|
+
chunk = ChatCompletionChunk(
|
|
97
|
+
id=request_id,
|
|
98
|
+
choices=[choice],
|
|
99
|
+
created=created_time,
|
|
100
|
+
model=model,
|
|
101
|
+
system_fingerprint=None
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
yield chunk
|
|
102
105
|
|
|
103
106
|
# Final chunk with finish_reason="stop"
|
|
104
107
|
delta = ChoiceDelta(
|
|
@@ -133,16 +136,17 @@ class Completions(BaseCompletions):
|
|
|
133
136
|
|
|
134
137
|
def _create_non_stream(
|
|
135
138
|
self, request_id: str, created_time: int, model: str,
|
|
136
|
-
conversation_prompt: str, country: str, user_id: str
|
|
139
|
+
conversation_prompt: str, country: str, user_id: str,
|
|
140
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
137
141
|
) -> ChatCompletion:
|
|
138
142
|
"""Get a complete response from AI4Chat."""
|
|
139
143
|
try:
|
|
140
144
|
# Get the full response
|
|
141
|
-
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
145
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
|
|
142
146
|
|
|
143
147
|
# Estimate token counts
|
|
144
|
-
prompt_tokens =
|
|
145
|
-
completion_tokens =
|
|
148
|
+
prompt_tokens = count_tokens(conversation_prompt)
|
|
149
|
+
completion_tokens = count_tokens(full_response)
|
|
146
150
|
total_tokens = prompt_tokens + completion_tokens
|
|
147
151
|
|
|
148
152
|
# Create the message object
|
|
@@ -183,22 +187,31 @@ class Completions(BaseCompletions):
|
|
|
183
187
|
print(f"Unexpected error during AI4Chat non-stream request: {e}")
|
|
184
188
|
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
185
189
|
|
|
186
|
-
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str
|
|
190
|
+
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str,
|
|
191
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None) -> str:
|
|
187
192
|
"""Make the actual API request to AI4Chat."""
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
193
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
194
|
+
original_proxies = self._client.session.proxies
|
|
195
|
+
if proxies is not None:
|
|
196
|
+
self._client.session.proxies = proxies
|
|
197
|
+
|
|
198
|
+
try:
|
|
199
|
+
# URL encode parameters
|
|
200
|
+
encoded_text = urllib.parse.quote(prompt)
|
|
201
|
+
encoded_country = urllib.parse.quote(country)
|
|
202
|
+
encoded_user_id = urllib.parse.quote(user_id)
|
|
192
203
|
|
|
193
|
-
|
|
194
|
-
|
|
204
|
+
# Construct the API URL
|
|
205
|
+
url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
195
206
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
response = self._client.session.get(url, headers=self._client.headers, timeout=self._client.timeout)
|
|
207
|
+
# Make the request
|
|
208
|
+
response = self._client.session.get(url, headers=self._client.headers, timeout=timeout_val)
|
|
199
209
|
response.raise_for_status()
|
|
200
210
|
except RequestsError as e:
|
|
201
211
|
raise IOError(f"Failed to generate response: {e}")
|
|
212
|
+
finally:
|
|
213
|
+
if proxies is not None:
|
|
214
|
+
self._client.session.proxies = original_proxies
|
|
202
215
|
|
|
203
216
|
# Process the response text
|
|
204
217
|
response_text = response.text
|
|
@@ -235,8 +248,6 @@ class AI4Chat(OpenAICompatibleProvider):
|
|
|
235
248
|
|
|
236
249
|
def __init__(
|
|
237
250
|
self,
|
|
238
|
-
timeout: int = 30,
|
|
239
|
-
proxies: dict = {},
|
|
240
251
|
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
241
252
|
country: str = "Asia",
|
|
242
253
|
user_id: str = "usersmjb2oaz7y"
|
|
@@ -245,14 +256,11 @@ class AI4Chat(OpenAICompatibleProvider):
|
|
|
245
256
|
Initialize the AI4Chat client.
|
|
246
257
|
|
|
247
258
|
Args:
|
|
248
|
-
timeout: Request timeout in seconds
|
|
249
|
-
proxies: Optional proxy configuration
|
|
250
259
|
system_prompt: System prompt to guide the AI's behavior
|
|
251
260
|
country: Country parameter for API
|
|
252
261
|
user_id: User ID for API
|
|
253
262
|
"""
|
|
254
|
-
self.timeout =
|
|
255
|
-
self.proxies = proxies
|
|
263
|
+
self.timeout = 30
|
|
256
264
|
self.system_prompt = system_prompt
|
|
257
265
|
self.country = country
|
|
258
266
|
self.user_id = user_id
|
|
@@ -261,7 +269,9 @@ class AI4Chat(OpenAICompatibleProvider):
|
|
|
261
269
|
self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
|
|
262
270
|
|
|
263
271
|
# Initialize session
|
|
264
|
-
self.session = Session(
|
|
272
|
+
self.session = Session()
|
|
273
|
+
self.session.proxies = {}
|
|
274
|
+
# self.session.timeout = self.timeout # Timeout is per-request for curl_cffi
|
|
265
275
|
|
|
266
276
|
# Set headers
|
|
267
277
|
self.headers = {
|
|
@@ -290,4 +300,4 @@ class AI4Chat(OpenAICompatibleProvider):
|
|
|
290
300
|
class _ModelList:
|
|
291
301
|
def list(inner_self):
|
|
292
302
|
return type(self).AVAILABLE_MODELS
|
|
293
|
-
return _ModelList()
|
|
303
|
+
return _ModelList()
|