webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +32 -14
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +153 -35
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +171 -81
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
- webscout/Provider/OPENAI/Cloudflare.py +7 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -5
- webscout/Provider/OPENAI/NEMOTRON.py +8 -20
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +5 -1
- webscout/Provider/OPENAI/ai4chat.py +40 -40
- webscout/Provider/OPENAI/api.py +808 -649
- webscout/Provider/OPENAI/c4ai.py +3 -3
- webscout/Provider/OPENAI/chatgpt.py +555 -555
- webscout/Provider/OPENAI/chatgptclone.py +493 -487
- webscout/Provider/OPENAI/chatsandbox.py +4 -3
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +5 -2
- webscout/Provider/OPENAI/e2b.py +63 -5
- webscout/Provider/OPENAI/exaai.py +416 -410
- webscout/Provider/OPENAI/exachat.py +444 -443
- webscout/Provider/OPENAI/freeaichat.py +2 -2
- webscout/Provider/OPENAI/glider.py +5 -2
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +308 -307
- webscout/Provider/OPENAI/mcpcore.py +8 -2
- webscout/Provider/OPENAI/multichat.py +4 -4
- webscout/Provider/OPENAI/netwrck.py +6 -5
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +15 -9
- webscout/Provider/OPENAI/sonus.py +304 -303
- webscout/Provider/OPENAI/standardinput.py +433 -433
- webscout/Provider/OPENAI/textpollinations.py +4 -4
- webscout/Provider/OPENAI/toolbaz.py +413 -413
- webscout/Provider/OPENAI/typefully.py +3 -3
- webscout/Provider/OPENAI/typegpt.py +11 -5
- webscout/Provider/OPENAI/uncovrAI.py +463 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +431 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +3 -3
- webscout/Provider/OPENAI/x0gpt.py +365 -378
- webscout/Provider/OPENAI/yep.py +39 -13
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -0
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,357 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
import re
|
|
7
|
+
import urllib.parse
|
|
8
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
|
+
|
|
10
|
+
from webscout.Extra.tempmail import get_random_email
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
# Import base classes and utilities from OPENAI provider stack
|
|
14
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
15
|
+
from webscout.Provider.OPENAI.utils import (
|
|
16
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
17
|
+
ChatCompletionMessage, CompletionUsage
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Attempt to import LitAgent for browser fingerprinting
|
|
21
|
+
try:
|
|
22
|
+
from webscout.litagent import LitAgent
|
|
23
|
+
except ImportError: # pragma: no cover - LitAgent optional
|
|
24
|
+
LitAgent = None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Completions(BaseCompletions):
|
|
28
|
+
"""TwoAI chat completions compatible with OpenAI format."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, client: 'TwoAI'):
|
|
31
|
+
self._client = client
|
|
32
|
+
|
|
33
|
+
def create(
|
|
34
|
+
self,
|
|
35
|
+
*,
|
|
36
|
+
model: str,
|
|
37
|
+
messages: List[Dict[str, Any]],
|
|
38
|
+
max_tokens: Optional[int] = 2049,
|
|
39
|
+
stream: bool = False,
|
|
40
|
+
temperature: Optional[float] = None,
|
|
41
|
+
top_p: Optional[float] = None,
|
|
42
|
+
**kwargs: Any
|
|
43
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
44
|
+
"""Create a chat completion using TwoAI."""
|
|
45
|
+
payload = {
|
|
46
|
+
"model": model,
|
|
47
|
+
"messages": messages,
|
|
48
|
+
"max_tokens": max_tokens,
|
|
49
|
+
"stream": stream,
|
|
50
|
+
}
|
|
51
|
+
if temperature is not None:
|
|
52
|
+
payload["temperature"] = temperature
|
|
53
|
+
if top_p is not None:
|
|
54
|
+
payload["top_p"] = top_p
|
|
55
|
+
|
|
56
|
+
payload.update(kwargs)
|
|
57
|
+
|
|
58
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
59
|
+
created_time = int(time.time())
|
|
60
|
+
|
|
61
|
+
if stream:
|
|
62
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
63
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
64
|
+
|
|
65
|
+
def _create_stream(
|
|
66
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
67
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
68
|
+
try:
|
|
69
|
+
response = self._client.session.post(
|
|
70
|
+
self._client.base_url,
|
|
71
|
+
headers=self._client.headers,
|
|
72
|
+
json=payload,
|
|
73
|
+
stream=True,
|
|
74
|
+
timeout=self._client.timeout,
|
|
75
|
+
)
|
|
76
|
+
response.raise_for_status()
|
|
77
|
+
|
|
78
|
+
prompt_tokens = 0
|
|
79
|
+
completion_tokens = 0
|
|
80
|
+
total_tokens = 0
|
|
81
|
+
|
|
82
|
+
for line in response.iter_lines():
|
|
83
|
+
if not line:
|
|
84
|
+
continue
|
|
85
|
+
decoded = line.decode("utf-8").strip()
|
|
86
|
+
if not decoded.startswith("data: "):
|
|
87
|
+
continue
|
|
88
|
+
json_str = decoded[6:]
|
|
89
|
+
if json_str == "[DONE]":
|
|
90
|
+
break
|
|
91
|
+
try:
|
|
92
|
+
data = json.loads(json_str)
|
|
93
|
+
except json.JSONDecodeError:
|
|
94
|
+
continue
|
|
95
|
+
|
|
96
|
+
choice_data = data.get("choices", [{}])[0]
|
|
97
|
+
delta_data = choice_data.get("delta", {})
|
|
98
|
+
finish_reason = choice_data.get("finish_reason")
|
|
99
|
+
|
|
100
|
+
usage_data = data.get("usage", {})
|
|
101
|
+
if usage_data:
|
|
102
|
+
prompt_tokens = usage_data.get("prompt_tokens", prompt_tokens)
|
|
103
|
+
completion_tokens = usage_data.get(
|
|
104
|
+
"completion_tokens", completion_tokens
|
|
105
|
+
)
|
|
106
|
+
total_tokens = usage_data.get("total_tokens", total_tokens)
|
|
107
|
+
|
|
108
|
+
delta = ChoiceDelta(
|
|
109
|
+
content=delta_data.get("content"),
|
|
110
|
+
role=delta_data.get("role"),
|
|
111
|
+
tool_calls=delta_data.get("tool_calls"),
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
choice = Choice(
|
|
115
|
+
index=choice_data.get("index", 0),
|
|
116
|
+
delta=delta,
|
|
117
|
+
finish_reason=finish_reason,
|
|
118
|
+
logprobs=choice_data.get("logprobs"),
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
chunk = ChatCompletionChunk(
|
|
122
|
+
id=request_id,
|
|
123
|
+
choices=[choice],
|
|
124
|
+
created=created_time,
|
|
125
|
+
model=model,
|
|
126
|
+
system_fingerprint=data.get("system_fingerprint"),
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
yield chunk
|
|
130
|
+
except Exception as e:
|
|
131
|
+
raise IOError(f"TwoAI request failed: {e}") from e
|
|
132
|
+
except Exception as e:
|
|
133
|
+
raise IOError(f"Error processing TwoAI stream: {e}") from e
|
|
134
|
+
|
|
135
|
+
def _create_non_stream(
|
|
136
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
137
|
+
) -> ChatCompletion:
|
|
138
|
+
try:
|
|
139
|
+
response = self._client.session.post(
|
|
140
|
+
self._client.base_url,
|
|
141
|
+
headers=self._client.headers,
|
|
142
|
+
json=payload,
|
|
143
|
+
timeout=self._client.timeout,
|
|
144
|
+
)
|
|
145
|
+
response.raise_for_status()
|
|
146
|
+
data = response.json()
|
|
147
|
+
|
|
148
|
+
choices_data = data.get("choices", [])
|
|
149
|
+
usage_data = data.get("usage", {})
|
|
150
|
+
|
|
151
|
+
choices = []
|
|
152
|
+
for choice_d in choices_data:
|
|
153
|
+
message_d = choice_d.get("message", {})
|
|
154
|
+
message = ChatCompletionMessage(
|
|
155
|
+
role=message_d.get("role", "assistant"),
|
|
156
|
+
content=message_d.get("content", ""),
|
|
157
|
+
tool_calls=message_d.get("tool_calls"),
|
|
158
|
+
)
|
|
159
|
+
choice = Choice(
|
|
160
|
+
index=choice_d.get("index", 0),
|
|
161
|
+
message=message,
|
|
162
|
+
finish_reason=choice_d.get("finish_reason", "stop"),
|
|
163
|
+
)
|
|
164
|
+
choices.append(choice)
|
|
165
|
+
|
|
166
|
+
usage = CompletionUsage(
|
|
167
|
+
prompt_tokens=usage_data.get("prompt_tokens", 0),
|
|
168
|
+
completion_tokens=usage_data.get("completion_tokens", 0),
|
|
169
|
+
total_tokens=usage_data.get("total_tokens", 0),
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
completion = ChatCompletion(
|
|
173
|
+
id=request_id,
|
|
174
|
+
choices=choices,
|
|
175
|
+
created=created_time,
|
|
176
|
+
model=data.get("model", model),
|
|
177
|
+
usage=usage,
|
|
178
|
+
)
|
|
179
|
+
return completion
|
|
180
|
+
except Exception as e:
|
|
181
|
+
raise IOError(f"TwoAI request failed: {e}") from e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
raise IOError(f"Error processing TwoAI response: {e}") from e
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class Chat(BaseChat):
|
|
187
|
+
def __init__(self, client: 'TwoAI'):
|
|
188
|
+
self.completions = Completions(client)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class TwoAI(OpenAICompatibleProvider):
|
|
192
|
+
"""OpenAI-compatible client for the TwoAI API."""
|
|
193
|
+
|
|
194
|
+
AVAILABLE_MODELS = ["sutra-v2", "sutra-r0"]
|
|
195
|
+
|
|
196
|
+
@staticmethod
|
|
197
|
+
def generate_api_key() -> str:
|
|
198
|
+
"""
|
|
199
|
+
Generate a new Two AI API key using a temporary email.
|
|
200
|
+
"""
|
|
201
|
+
email, provider = get_random_email("tempmailio")
|
|
202
|
+
loops_url = "https://app.loops.so/api/newsletter-form/cm7i4o92h057auy1o74cxbhxo"
|
|
203
|
+
|
|
204
|
+
session = Session()
|
|
205
|
+
session.headers.update({
|
|
206
|
+
'User-Agent': LitAgent().random(),
|
|
207
|
+
'Content-Type': 'application/x-www-form-urlencoded',
|
|
208
|
+
'Origin': 'https://www.two.ai',
|
|
209
|
+
'Referer': 'https://app.loops.so/',
|
|
210
|
+
})
|
|
211
|
+
|
|
212
|
+
form_data = {
|
|
213
|
+
'email': email,
|
|
214
|
+
'userGroup': 'Via Framer',
|
|
215
|
+
'mailingLists': 'cm8ay9cic00x70kjv0bd34k66'
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
encoded_data = urllib.parse.urlencode(form_data)
|
|
219
|
+
response = session.post(loops_url, data=encoded_data, impersonate="chrome120")
|
|
220
|
+
|
|
221
|
+
if response.status_code != 200:
|
|
222
|
+
raise RuntimeError(f"Failed to register for Two AI: {response.status_code} - {response.text}")
|
|
223
|
+
|
|
224
|
+
max_attempts = 15
|
|
225
|
+
attempt = 0
|
|
226
|
+
api_key = None
|
|
227
|
+
wait_time = 2
|
|
228
|
+
|
|
229
|
+
while attempt < max_attempts and not api_key:
|
|
230
|
+
messages = provider.get_messages()
|
|
231
|
+
for message in messages:
|
|
232
|
+
subject = message.get('subject', '')
|
|
233
|
+
sender = ''
|
|
234
|
+
if 'from' in message:
|
|
235
|
+
if isinstance(message['from'], dict):
|
|
236
|
+
sender = message['from'].get('address', '')
|
|
237
|
+
else:
|
|
238
|
+
sender = str(message['from'])
|
|
239
|
+
elif 'sender' in message:
|
|
240
|
+
if isinstance(message['sender'], dict):
|
|
241
|
+
sender = message['sender'].get('address', '')
|
|
242
|
+
else:
|
|
243
|
+
sender = str(message['sender'])
|
|
244
|
+
subject_match = any(keyword in subject.lower() for keyword in
|
|
245
|
+
['welcome', 'confirm', 'verify', 'api', 'key', 'sutra', 'two.ai', 'loops'])
|
|
246
|
+
sender_match = any(keyword in sender.lower() for keyword in
|
|
247
|
+
['two.ai', 'sutra', 'loops.so', 'loops', 'no-reply', 'noreply'])
|
|
248
|
+
is_confirmation = subject_match or sender_match
|
|
249
|
+
|
|
250
|
+
content = None
|
|
251
|
+
if 'body' in message:
|
|
252
|
+
content = message['body']
|
|
253
|
+
elif 'content' in message and 'text' in message['content']:
|
|
254
|
+
content = message['content']['text']
|
|
255
|
+
elif 'html' in message:
|
|
256
|
+
content = message['html']
|
|
257
|
+
elif 'text' in message:
|
|
258
|
+
content = message['text']
|
|
259
|
+
if not content:
|
|
260
|
+
continue
|
|
261
|
+
|
|
262
|
+
# Robust API key extraction with multiple regex patterns
|
|
263
|
+
patterns = [
|
|
264
|
+
r'sutra_[A-Za-z0-9]{60,70}',
|
|
265
|
+
r'sutra_[A-Za-z0-9]{30,}',
|
|
266
|
+
r'sutra_\S+',
|
|
267
|
+
]
|
|
268
|
+
api_key_match = None
|
|
269
|
+
for pat in patterns:
|
|
270
|
+
api_key_match = re.search(pat, content)
|
|
271
|
+
if api_key_match:
|
|
272
|
+
break
|
|
273
|
+
# Also try to extract from labeled section
|
|
274
|
+
if not api_key_match:
|
|
275
|
+
key_section_match = re.search(r'🔑 SUTRA API Key\s*([^\s]+)', content)
|
|
276
|
+
if key_section_match:
|
|
277
|
+
api_key_match = re.search(r'sutra_[A-Za-z0-9]+', key_section_match.group(1))
|
|
278
|
+
if api_key_match:
|
|
279
|
+
api_key = api_key_match.group(0)
|
|
280
|
+
break
|
|
281
|
+
if not api_key:
|
|
282
|
+
attempt += 1
|
|
283
|
+
time.sleep(wait_time)
|
|
284
|
+
if not api_key:
|
|
285
|
+
raise RuntimeError("Failed to get API key from confirmation email")
|
|
286
|
+
return api_key
|
|
287
|
+
|
|
288
|
+
def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
|
|
289
|
+
api_key = self.generate_api_key()
|
|
290
|
+
self.timeout = timeout
|
|
291
|
+
self.base_url = "https://api.two.ai/v2/chat/completions"
|
|
292
|
+
self.api_key = api_key
|
|
293
|
+
self.session = Session()
|
|
294
|
+
|
|
295
|
+
headers: Dict[str, str] = {
|
|
296
|
+
"Content-Type": "application/json",
|
|
297
|
+
"Authorization": f"Bearer {api_key}",
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
if LitAgent is not None:
|
|
301
|
+
try:
|
|
302
|
+
agent = LitAgent()
|
|
303
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
304
|
+
headers.update({
|
|
305
|
+
"Accept": fingerprint["accept"],
|
|
306
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
307
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
308
|
+
"Cache-Control": "no-cache",
|
|
309
|
+
"Connection": "keep-alive",
|
|
310
|
+
"Origin": "https://chat.two.ai",
|
|
311
|
+
"Pragma": "no-cache",
|
|
312
|
+
"Referer": "https://chat.two.ai/",
|
|
313
|
+
"Sec-Fetch-Dest": "empty",
|
|
314
|
+
"Sec-Fetch-Mode": "cors",
|
|
315
|
+
"Sec-Fetch-Site": "same-site",
|
|
316
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
317
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
318
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
319
|
+
"User-Agent": fingerprint["user_agent"],
|
|
320
|
+
})
|
|
321
|
+
except Exception:
|
|
322
|
+
# Fallback minimal headers if fingerprinting fails
|
|
323
|
+
headers.update({
|
|
324
|
+
"Accept": "application/json",
|
|
325
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
326
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
327
|
+
"User-Agent": "Mozilla/5.0",
|
|
328
|
+
})
|
|
329
|
+
else:
|
|
330
|
+
headers.update({
|
|
331
|
+
"Accept": "application/json",
|
|
332
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
333
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
334
|
+
"User-Agent": "Mozilla/5.0",
|
|
335
|
+
})
|
|
336
|
+
|
|
337
|
+
self.headers = headers
|
|
338
|
+
self.session.headers.update(headers)
|
|
339
|
+
self.chat = Chat(self)
|
|
340
|
+
|
|
341
|
+
@property
|
|
342
|
+
def models(self):
|
|
343
|
+
class _ModelList:
|
|
344
|
+
def list(inner_self):
|
|
345
|
+
return type(self).AVAILABLE_MODELS
|
|
346
|
+
return _ModelList()
|
|
347
|
+
|
|
348
|
+
if __name__ == "__main__":
|
|
349
|
+
from rich import print
|
|
350
|
+
two_ai = TwoAI()
|
|
351
|
+
resp = two_ai.chat.completions.create(
|
|
352
|
+
model="sutra-v2",
|
|
353
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
354
|
+
stream=True
|
|
355
|
+
)
|
|
356
|
+
for chunk in resp:
|
|
357
|
+
print(chunk, end="")
|
|
@@ -33,4 +33,8 @@ from .c4ai import *
|
|
|
33
33
|
from .flowith import *
|
|
34
34
|
from .Cloudflare import *
|
|
35
35
|
from .NEMOTRON import *
|
|
36
|
-
from .BLACKBOXAI import *
|
|
36
|
+
from .BLACKBOXAI import *
|
|
37
|
+
from .copilot import * # Add Microsoft Copilot
|
|
38
|
+
from .TwoAI import *
|
|
39
|
+
from .oivscode import * # Add OnRender provider
|
|
40
|
+
from .Qwen3 import *
|
|
@@ -8,7 +8,7 @@ from typing import List, Dict, Optional, Union, Generator, Any
|
|
|
8
8
|
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
9
|
from .utils import (
|
|
10
10
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
12
|
)
|
|
13
13
|
|
|
14
14
|
# --- AI4Chat Client ---
|
|
@@ -56,49 +56,49 @@ class Completions(BaseCompletions):
|
|
|
56
56
|
self, request_id: str, created_time: int, model: str,
|
|
57
57
|
conversation_prompt: str, country: str, user_id: str
|
|
58
58
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
-
"""Simulate streaming by breaking up the full response."""
|
|
59
|
+
"""Simulate streaming by breaking up the full response into fixed-size character chunks."""
|
|
60
60
|
try:
|
|
61
61
|
# Get the full response first
|
|
62
62
|
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
63
63
|
|
|
64
|
-
# Break it into chunks for simulated streaming
|
|
65
|
-
words = full_response.split()
|
|
66
|
-
chunk_size = max(1, len(words) // 10) # Divide into ~10 chunks
|
|
67
|
-
|
|
68
64
|
# Track token usage
|
|
69
|
-
prompt_tokens =
|
|
65
|
+
prompt_tokens = count_tokens(conversation_prompt)
|
|
70
66
|
completion_tokens = 0
|
|
71
67
|
|
|
72
|
-
# Stream chunks
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
68
|
+
# Stream fixed-size character chunks (e.g., 48 chars)
|
|
69
|
+
buffer = full_response
|
|
70
|
+
chunk_size = 48
|
|
71
|
+
while buffer:
|
|
72
|
+
chunk_text = buffer[:chunk_size]
|
|
73
|
+
buffer = buffer[chunk_size:]
|
|
74
|
+
completion_tokens += count_tokens(chunk_text)
|
|
75
|
+
|
|
76
|
+
if chunk_text.strip():
|
|
77
|
+
# Create the delta object
|
|
78
|
+
delta = ChoiceDelta(
|
|
79
|
+
content=chunk_text,
|
|
80
|
+
role="assistant",
|
|
81
|
+
tool_calls=None
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Create the choice object
|
|
85
|
+
choice = Choice(
|
|
86
|
+
index=0,
|
|
87
|
+
delta=delta,
|
|
88
|
+
finish_reason=None,
|
|
89
|
+
logprobs=None
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Create the chunk object
|
|
93
|
+
chunk = ChatCompletionChunk(
|
|
94
|
+
id=request_id,
|
|
95
|
+
choices=[choice],
|
|
96
|
+
created=created_time,
|
|
97
|
+
model=model,
|
|
98
|
+
system_fingerprint=None
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
yield chunk
|
|
102
102
|
|
|
103
103
|
# Final chunk with finish_reason="stop"
|
|
104
104
|
delta = ChoiceDelta(
|
|
@@ -141,8 +141,8 @@ class Completions(BaseCompletions):
|
|
|
141
141
|
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
142
142
|
|
|
143
143
|
# Estimate token counts
|
|
144
|
-
prompt_tokens =
|
|
145
|
-
completion_tokens =
|
|
144
|
+
prompt_tokens = count_tokens(conversation_prompt)
|
|
145
|
+
completion_tokens = count_tokens(full_response)
|
|
146
146
|
total_tokens = prompt_tokens + completion_tokens
|
|
147
147
|
|
|
148
148
|
# Create the message object
|
|
@@ -290,4 +290,4 @@ class AI4Chat(OpenAICompatibleProvider):
|
|
|
290
290
|
class _ModelList:
|
|
291
291
|
def list(inner_self):
|
|
292
292
|
return type(self).AVAILABLE_MODELS
|
|
293
|
-
return _ModelList()
|
|
293
|
+
return _ModelList()
|