webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,467 +1,466 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
import time
|
|
5
|
-
import uuid
|
|
6
|
-
import re
|
|
7
|
-
import urllib.parse
|
|
8
|
-
import os
|
|
9
|
-
import pickle
|
|
10
|
-
import tempfile
|
|
11
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
12
|
-
|
|
13
|
-
from webscout.Extra.tempmail import get_random_email
|
|
14
|
-
from webscout.litagent import LitAgent
|
|
15
|
-
|
|
16
|
-
# Import base classes and utilities from OPENAI provider stack
|
|
17
|
-
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
18
|
-
from webscout.Provider.OPENAI.utils import (
|
|
19
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
20
|
-
ChatCompletionMessage, CompletionUsage
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
# Attempt to import LitAgent for browser fingerprinting
|
|
24
|
-
try:
|
|
25
|
-
from webscout.litagent import LitAgent
|
|
26
|
-
except ImportError: # pragma: no cover - LitAgent optional
|
|
27
|
-
LitAgent = None
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
class Completions(BaseCompletions):
|
|
31
|
-
"""TwoAI chat completions compatible with OpenAI format."""
|
|
32
|
-
|
|
33
|
-
def __init__(self, client: 'TwoAI'):
|
|
34
|
-
self._client = client
|
|
35
|
-
|
|
36
|
-
def create(
|
|
37
|
-
self,
|
|
38
|
-
*,
|
|
39
|
-
model: str,
|
|
40
|
-
messages: List[Dict[str, Any]],
|
|
41
|
-
max_tokens: Optional[int] = 2049,
|
|
42
|
-
stream: bool = False,
|
|
43
|
-
temperature: Optional[float] = None,
|
|
44
|
-
top_p: Optional[float] = None,
|
|
45
|
-
timeout: Optional[int] = None,
|
|
46
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
47
|
-
**kwargs: Any
|
|
48
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
49
|
-
"""Create a chat completion using TwoAI."""
|
|
50
|
-
payload = {
|
|
51
|
-
"model": model,
|
|
52
|
-
"messages": messages,
|
|
53
|
-
"max_tokens": max_tokens,
|
|
54
|
-
"stream": stream,
|
|
55
|
-
}
|
|
56
|
-
if temperature is not None:
|
|
57
|
-
payload["temperature"] = temperature
|
|
58
|
-
if top_p is not None:
|
|
59
|
-
payload["top_p"] = top_p
|
|
60
|
-
|
|
61
|
-
payload.update(kwargs)
|
|
62
|
-
|
|
63
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
64
|
-
created_time = int(time.time())
|
|
65
|
-
|
|
66
|
-
if stream:
|
|
67
|
-
return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
68
|
-
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
69
|
-
|
|
70
|
-
def _create_stream(
|
|
71
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
72
|
-
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
73
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
74
|
-
original_proxies = self._client.session.proxies.copy()
|
|
75
|
-
if proxies is not None:
|
|
76
|
-
self._client.session.proxies = proxies
|
|
77
|
-
else:
|
|
78
|
-
self._client.session.proxies = {}
|
|
79
|
-
try:
|
|
80
|
-
response = self._client.session.post(
|
|
81
|
-
self._client.base_url,
|
|
82
|
-
headers=self._client.headers,
|
|
83
|
-
json=payload,
|
|
84
|
-
stream=True,
|
|
85
|
-
timeout=timeout if timeout is not None else self._client.timeout,
|
|
86
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
87
|
-
)
|
|
88
|
-
response.raise_for_status()
|
|
89
|
-
|
|
90
|
-
prompt_tokens = 0
|
|
91
|
-
completion_tokens = 0
|
|
92
|
-
total_tokens = 0
|
|
93
|
-
|
|
94
|
-
for line in response.iter_lines():
|
|
95
|
-
if not line:
|
|
96
|
-
continue
|
|
97
|
-
decoded = line.decode("utf-8").strip()
|
|
98
|
-
if not decoded.startswith("data: "):
|
|
99
|
-
continue
|
|
100
|
-
json_str = decoded[6:]
|
|
101
|
-
if json_str == "[DONE]":
|
|
102
|
-
break
|
|
103
|
-
try:
|
|
104
|
-
data = json.loads(json_str)
|
|
105
|
-
except json.JSONDecodeError:
|
|
106
|
-
continue
|
|
107
|
-
|
|
108
|
-
choice_data = data.get("choices", [{}])[0]
|
|
109
|
-
delta_data = choice_data.get("delta", {})
|
|
110
|
-
finish_reason = choice_data.get("finish_reason")
|
|
111
|
-
|
|
112
|
-
usage_data = data.get("usage", {})
|
|
113
|
-
if usage_data:
|
|
114
|
-
prompt_tokens = usage_data.get("prompt_tokens", prompt_tokens)
|
|
115
|
-
completion_tokens = usage_data.get(
|
|
116
|
-
"completion_tokens", completion_tokens
|
|
117
|
-
)
|
|
118
|
-
total_tokens = usage_data.get("total_tokens", total_tokens)
|
|
119
|
-
|
|
120
|
-
delta = ChoiceDelta(
|
|
121
|
-
content=delta_data.get("content"),
|
|
122
|
-
role=delta_data.get("role"),
|
|
123
|
-
tool_calls=delta_data.get("tool_calls"),
|
|
124
|
-
)
|
|
125
|
-
|
|
126
|
-
choice = Choice(
|
|
127
|
-
index=choice_data.get("index", 0),
|
|
128
|
-
delta=delta,
|
|
129
|
-
finish_reason=finish_reason,
|
|
130
|
-
logprobs=choice_data.get("logprobs"),
|
|
131
|
-
)
|
|
132
|
-
|
|
133
|
-
chunk = ChatCompletionChunk(
|
|
134
|
-
id=request_id,
|
|
135
|
-
choices=[choice],
|
|
136
|
-
created=created_time,
|
|
137
|
-
model=model,
|
|
138
|
-
system_fingerprint=data.get("system_fingerprint"),
|
|
139
|
-
)
|
|
140
|
-
|
|
141
|
-
yield chunk
|
|
142
|
-
except Exception as e:
|
|
143
|
-
raise IOError(f"TwoAI request failed: {e}") from e
|
|
144
|
-
finally:
|
|
145
|
-
self._client.session.proxies = original_proxies
|
|
146
|
-
|
|
147
|
-
def _create_non_stream(
|
|
148
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
149
|
-
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
150
|
-
) -> ChatCompletion:
|
|
151
|
-
original_proxies = self._client.session.proxies.copy()
|
|
152
|
-
if proxies is not None:
|
|
153
|
-
self._client.session.proxies = proxies
|
|
154
|
-
else:
|
|
155
|
-
self._client.session.proxies = {}
|
|
156
|
-
try:
|
|
157
|
-
response = self._client.session.post(
|
|
158
|
-
self._client.base_url,
|
|
159
|
-
headers=self._client.headers,
|
|
160
|
-
json=payload,
|
|
161
|
-
timeout=timeout if timeout is not None else self._client.timeout,
|
|
162
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
163
|
-
)
|
|
164
|
-
response.raise_for_status()
|
|
165
|
-
data = response.json()
|
|
166
|
-
|
|
167
|
-
choices_data = data.get("choices", [])
|
|
168
|
-
usage_data = data.get("usage", {})
|
|
169
|
-
|
|
170
|
-
choices = []
|
|
171
|
-
for choice_d in choices_data:
|
|
172
|
-
message_d = choice_d.get("message", {})
|
|
173
|
-
message = ChatCompletionMessage(
|
|
174
|
-
role=message_d.get("role", "assistant"),
|
|
175
|
-
content=message_d.get("content", ""),
|
|
176
|
-
tool_calls=message_d.get("tool_calls"),
|
|
177
|
-
)
|
|
178
|
-
choice = Choice(
|
|
179
|
-
index=choice_d.get("index", 0),
|
|
180
|
-
message=message,
|
|
181
|
-
finish_reason=choice_d.get("finish_reason", "stop"),
|
|
182
|
-
)
|
|
183
|
-
choices.append(choice)
|
|
184
|
-
|
|
185
|
-
usage = CompletionUsage(
|
|
186
|
-
prompt_tokens=usage_data.get("prompt_tokens", 0),
|
|
187
|
-
completion_tokens=usage_data.get("completion_tokens", 0),
|
|
188
|
-
total_tokens=usage_data.get("total_tokens", 0),
|
|
189
|
-
)
|
|
190
|
-
|
|
191
|
-
completion = ChatCompletion(
|
|
192
|
-
id=request_id,
|
|
193
|
-
choices=choices,
|
|
194
|
-
created=created_time,
|
|
195
|
-
model=data.get("model", model),
|
|
196
|
-
usage=usage,
|
|
197
|
-
)
|
|
198
|
-
return completion
|
|
199
|
-
except Exception as e:
|
|
200
|
-
raise IOError(f"TwoAI request failed: {e}") from e
|
|
201
|
-
finally:
|
|
202
|
-
self._client.session.proxies = original_proxies
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
class Chat(BaseChat):
|
|
206
|
-
def __init__(self, client: 'TwoAI'):
|
|
207
|
-
self.completions = Completions(client)
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
class TwoAI(OpenAICompatibleProvider):
|
|
211
|
-
"""OpenAI-compatible client for the TwoAI API."""
|
|
212
|
-
|
|
213
|
-
AVAILABLE_MODELS = ["sutra-v2", "sutra-r0"]
|
|
214
|
-
|
|
215
|
-
# Class-level cache for API keys
|
|
216
|
-
_api_key_cache = None
|
|
217
|
-
_cache_file = os.path.join(tempfile.gettempdir(), "webscout_twoai_openai_cache.pkl")
|
|
218
|
-
|
|
219
|
-
@classmethod
|
|
220
|
-
def _load_cached_api_key(cls) -> Optional[str]:
|
|
221
|
-
"""Load cached API key from file."""
|
|
222
|
-
try:
|
|
223
|
-
if os.path.exists(cls._cache_file):
|
|
224
|
-
with open(cls._cache_file, 'rb') as f:
|
|
225
|
-
cache_data = pickle.load(f)
|
|
226
|
-
# Check if cache is not too old (24 hours)
|
|
227
|
-
if time.time() - cache_data.get('timestamp', 0) < 86400:
|
|
228
|
-
return cache_data.get('api_key')
|
|
229
|
-
except Exception:
|
|
230
|
-
# If cache is corrupted or unreadable, ignore and regenerate
|
|
231
|
-
pass
|
|
232
|
-
return None
|
|
233
|
-
|
|
234
|
-
@classmethod
|
|
235
|
-
def _save_cached_api_key(cls, api_key: str):
|
|
236
|
-
"""Save API key to cache file."""
|
|
237
|
-
try:
|
|
238
|
-
cache_data = {
|
|
239
|
-
'api_key': api_key,
|
|
240
|
-
'timestamp': time.time()
|
|
241
|
-
}
|
|
242
|
-
with open(cls._cache_file, 'wb') as f:
|
|
243
|
-
pickle.dump(cache_data, f)
|
|
244
|
-
except Exception:
|
|
245
|
-
# If caching fails, continue without caching
|
|
246
|
-
pass
|
|
247
|
-
|
|
248
|
-
@classmethod
|
|
249
|
-
def _validate_api_key(cls, api_key: str) -> bool:
|
|
250
|
-
"""Validate if an API key is still working."""
|
|
251
|
-
try:
|
|
252
|
-
session = Session()
|
|
253
|
-
headers = {
|
|
254
|
-
'User-Agent': LitAgent().random(),
|
|
255
|
-
'Accept': 'application/json',
|
|
256
|
-
'Content-Type': 'application/json',
|
|
257
|
-
'Authorization': f'Bearer {api_key}',
|
|
258
|
-
}
|
|
259
|
-
|
|
260
|
-
# Test with a simple request
|
|
261
|
-
test_payload = {
|
|
262
|
-
"messages": [{"role": "user", "content": "test"}],
|
|
263
|
-
"model": "sutra-v2",
|
|
264
|
-
"max_tokens": 1,
|
|
265
|
-
"stream": False
|
|
266
|
-
}
|
|
267
|
-
|
|
268
|
-
response = session.post(
|
|
269
|
-
"https://api.two.ai/v2/chat/completions",
|
|
270
|
-
headers=headers,
|
|
271
|
-
json=test_payload,
|
|
272
|
-
timeout=10,
|
|
273
|
-
impersonate="chrome120"
|
|
274
|
-
)
|
|
275
|
-
|
|
276
|
-
# If we get a 200 or 400 (bad request but auth worked), key is valid
|
|
277
|
-
# If we get 401/403, key is invalid
|
|
278
|
-
return response.status_code not in [401, 403]
|
|
279
|
-
except Exception:
|
|
280
|
-
# If validation fails, assume key is invalid
|
|
281
|
-
return False
|
|
282
|
-
|
|
283
|
-
@classmethod
|
|
284
|
-
def get_cached_api_key(cls) -> str:
|
|
285
|
-
"""Get a cached API key or generate a new one if needed."""
|
|
286
|
-
# First check class-level cache
|
|
287
|
-
if cls._api_key_cache:
|
|
288
|
-
if cls._validate_api_key(cls._api_key_cache):
|
|
289
|
-
return cls._api_key_cache
|
|
290
|
-
else:
|
|
291
|
-
cls._api_key_cache = None
|
|
292
|
-
|
|
293
|
-
# Then check file cache
|
|
294
|
-
cached_key = cls._load_cached_api_key()
|
|
295
|
-
if cached_key and cls._validate_api_key(cached_key):
|
|
296
|
-
cls._api_key_cache = cached_key
|
|
297
|
-
return cached_key
|
|
298
|
-
|
|
299
|
-
# Generate new key if no valid cached key
|
|
300
|
-
new_key = cls.generate_api_key()
|
|
301
|
-
cls._api_key_cache = new_key
|
|
302
|
-
cls._save_cached_api_key(new_key)
|
|
303
|
-
return new_key
|
|
304
|
-
|
|
305
|
-
@staticmethod
|
|
306
|
-
def generate_api_key() -> str:
|
|
307
|
-
"""
|
|
308
|
-
Generate a new Two AI API key using a temporary email.
|
|
309
|
-
"""
|
|
310
|
-
email, provider = get_random_email("tempmailio")
|
|
311
|
-
loops_url = "https://app.loops.so/api/newsletter-form/cm7i4o92h057auy1o74cxbhxo"
|
|
312
|
-
|
|
313
|
-
session = Session()
|
|
314
|
-
session.headers.update({
|
|
315
|
-
'User-Agent': LitAgent().random(),
|
|
316
|
-
'Content-Type': 'application/x-www-form-urlencoded',
|
|
317
|
-
'Origin': 'https://www.two.ai',
|
|
318
|
-
'Referer': 'https://app.loops.so/',
|
|
319
|
-
})
|
|
320
|
-
|
|
321
|
-
form_data = {
|
|
322
|
-
'email': email,
|
|
323
|
-
'userGroup': 'Via Framer',
|
|
324
|
-
'mailingLists': 'cm8ay9cic00x70kjv0bd34k66'
|
|
325
|
-
}
|
|
326
|
-
|
|
327
|
-
encoded_data = urllib.parse.urlencode(form_data)
|
|
328
|
-
response = session.post(loops_url, data=encoded_data, impersonate="chrome120")
|
|
329
|
-
|
|
330
|
-
if response.status_code != 200:
|
|
331
|
-
raise RuntimeError(f"Failed to register for Two AI: {response.status_code} - {response.text}")
|
|
332
|
-
|
|
333
|
-
max_attempts = 15
|
|
334
|
-
attempt = 0
|
|
335
|
-
api_key = None
|
|
336
|
-
wait_time = 2
|
|
337
|
-
|
|
338
|
-
while attempt < max_attempts and not api_key:
|
|
339
|
-
messages = provider.get_messages()
|
|
340
|
-
for message in messages:
|
|
341
|
-
subject = message.get('subject', '')
|
|
342
|
-
sender = ''
|
|
343
|
-
if 'from' in message:
|
|
344
|
-
if isinstance(message['from'], dict):
|
|
345
|
-
sender = message['from'].get('address', '')
|
|
346
|
-
else:
|
|
347
|
-
sender = str(message['from'])
|
|
348
|
-
elif 'sender' in message:
|
|
349
|
-
if isinstance(message['sender'], dict):
|
|
350
|
-
sender = message['sender'].get('address', '')
|
|
351
|
-
else:
|
|
352
|
-
sender = str(message['sender'])
|
|
353
|
-
subject_match = any(keyword in subject.lower() for keyword in
|
|
354
|
-
['welcome', 'confirm', 'verify', 'api', 'key', 'sutra', 'two.ai', 'loops'])
|
|
355
|
-
sender_match = any(keyword in sender.lower() for keyword in
|
|
356
|
-
['two.ai', 'sutra', 'loops.so', 'loops', 'no-reply', 'noreply'])
|
|
357
|
-
is_confirmation = subject_match or sender_match
|
|
358
|
-
|
|
359
|
-
content = None
|
|
360
|
-
if 'body' in message:
|
|
361
|
-
content = message['body']
|
|
362
|
-
elif 'content' in message and 'text' in message['content']:
|
|
363
|
-
content = message['content']['text']
|
|
364
|
-
elif 'html' in message:
|
|
365
|
-
content = message['html']
|
|
366
|
-
elif 'text' in message:
|
|
367
|
-
content = message['text']
|
|
368
|
-
if not content:
|
|
369
|
-
continue
|
|
370
|
-
|
|
371
|
-
# Robust API key extraction with multiple regex patterns
|
|
372
|
-
patterns = [
|
|
373
|
-
r'sutra_[A-Za-z0-9]{60,70}',
|
|
374
|
-
r'sutra_[A-Za-z0-9]{30,}',
|
|
375
|
-
r'sutra_\S+',
|
|
376
|
-
]
|
|
377
|
-
api_key_match = None
|
|
378
|
-
for pat in patterns:
|
|
379
|
-
api_key_match = re.search(pat, content)
|
|
380
|
-
if api_key_match:
|
|
381
|
-
break
|
|
382
|
-
# Also try to extract from labeled section
|
|
383
|
-
if not api_key_match:
|
|
384
|
-
key_section_match = re.search(r'🔑 SUTRA API Key\s*([^\s]+)', content)
|
|
385
|
-
if key_section_match:
|
|
386
|
-
api_key_match = re.search(r'sutra_[A-Za-z0-9]+', key_section_match.group(1))
|
|
387
|
-
if api_key_match:
|
|
388
|
-
api_key = api_key_match.group(0)
|
|
389
|
-
break
|
|
390
|
-
if not api_key:
|
|
391
|
-
attempt += 1
|
|
392
|
-
time.sleep(wait_time)
|
|
393
|
-
if not api_key:
|
|
394
|
-
raise RuntimeError("Failed to get API key from confirmation email")
|
|
395
|
-
return api_key
|
|
396
|
-
|
|
397
|
-
def __init__(self, browser: str = "chrome"):
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
self.
|
|
401
|
-
self.
|
|
402
|
-
self.
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
"
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
"Accept":
|
|
416
|
-
"Accept-
|
|
417
|
-
"
|
|
418
|
-
"
|
|
419
|
-
"
|
|
420
|
-
"
|
|
421
|
-
"
|
|
422
|
-
"
|
|
423
|
-
"Sec-Fetch-
|
|
424
|
-
"Sec-Fetch-
|
|
425
|
-
"Sec-
|
|
426
|
-
"Sec-CH-UA":
|
|
427
|
-
"Sec-CH-UA-
|
|
428
|
-
"
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
"Accept": "
|
|
435
|
-
"Accept-
|
|
436
|
-
"
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
"Accept": "
|
|
442
|
-
"Accept-
|
|
443
|
-
"
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
self.headers
|
|
448
|
-
self.
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
print(chunk, end="")
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
import re
|
|
7
|
+
import urllib.parse
|
|
8
|
+
import os
|
|
9
|
+
import pickle
|
|
10
|
+
import tempfile
|
|
11
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
12
|
+
|
|
13
|
+
from webscout.Extra.tempmail import get_random_email
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
|
|
16
|
+
# Import base classes and utilities from OPENAI provider stack
|
|
17
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
18
|
+
from webscout.Provider.OPENAI.utils import (
|
|
19
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
20
|
+
ChatCompletionMessage, CompletionUsage
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Attempt to import LitAgent for browser fingerprinting
|
|
24
|
+
try:
|
|
25
|
+
from webscout.litagent import LitAgent
|
|
26
|
+
except ImportError: # pragma: no cover - LitAgent optional
|
|
27
|
+
LitAgent = None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class Completions(BaseCompletions):
|
|
31
|
+
"""TwoAI chat completions compatible with OpenAI format."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, client: 'TwoAI'):
|
|
34
|
+
self._client = client
|
|
35
|
+
|
|
36
|
+
def create(
|
|
37
|
+
self,
|
|
38
|
+
*,
|
|
39
|
+
model: str,
|
|
40
|
+
messages: List[Dict[str, Any]],
|
|
41
|
+
max_tokens: Optional[int] = 2049,
|
|
42
|
+
stream: bool = False,
|
|
43
|
+
temperature: Optional[float] = None,
|
|
44
|
+
top_p: Optional[float] = None,
|
|
45
|
+
timeout: Optional[int] = None,
|
|
46
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
47
|
+
**kwargs: Any
|
|
48
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
49
|
+
"""Create a chat completion using TwoAI."""
|
|
50
|
+
payload = {
|
|
51
|
+
"model": model,
|
|
52
|
+
"messages": messages,
|
|
53
|
+
"max_tokens": max_tokens,
|
|
54
|
+
"stream": stream,
|
|
55
|
+
}
|
|
56
|
+
if temperature is not None:
|
|
57
|
+
payload["temperature"] = temperature
|
|
58
|
+
if top_p is not None:
|
|
59
|
+
payload["top_p"] = top_p
|
|
60
|
+
|
|
61
|
+
payload.update(kwargs)
|
|
62
|
+
|
|
63
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
64
|
+
created_time = int(time.time())
|
|
65
|
+
|
|
66
|
+
if stream:
|
|
67
|
+
return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
68
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
69
|
+
|
|
70
|
+
def _create_stream(
|
|
71
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
72
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
73
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
74
|
+
original_proxies = self._client.session.proxies.copy()
|
|
75
|
+
if proxies is not None:
|
|
76
|
+
self._client.session.proxies = proxies
|
|
77
|
+
else:
|
|
78
|
+
self._client.session.proxies = {}
|
|
79
|
+
try:
|
|
80
|
+
response = self._client.session.post(
|
|
81
|
+
self._client.base_url,
|
|
82
|
+
headers=self._client.headers,
|
|
83
|
+
json=payload,
|
|
84
|
+
stream=True,
|
|
85
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
86
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
87
|
+
)
|
|
88
|
+
response.raise_for_status()
|
|
89
|
+
|
|
90
|
+
prompt_tokens = 0
|
|
91
|
+
completion_tokens = 0
|
|
92
|
+
total_tokens = 0
|
|
93
|
+
|
|
94
|
+
for line in response.iter_lines():
|
|
95
|
+
if not line:
|
|
96
|
+
continue
|
|
97
|
+
decoded = line.decode("utf-8").strip()
|
|
98
|
+
if not decoded.startswith("data: "):
|
|
99
|
+
continue
|
|
100
|
+
json_str = decoded[6:]
|
|
101
|
+
if json_str == "[DONE]":
|
|
102
|
+
break
|
|
103
|
+
try:
|
|
104
|
+
data = json.loads(json_str)
|
|
105
|
+
except json.JSONDecodeError:
|
|
106
|
+
continue
|
|
107
|
+
|
|
108
|
+
choice_data = data.get("choices", [{}])[0]
|
|
109
|
+
delta_data = choice_data.get("delta", {})
|
|
110
|
+
finish_reason = choice_data.get("finish_reason")
|
|
111
|
+
|
|
112
|
+
usage_data = data.get("usage", {})
|
|
113
|
+
if usage_data:
|
|
114
|
+
prompt_tokens = usage_data.get("prompt_tokens", prompt_tokens)
|
|
115
|
+
completion_tokens = usage_data.get(
|
|
116
|
+
"completion_tokens", completion_tokens
|
|
117
|
+
)
|
|
118
|
+
total_tokens = usage_data.get("total_tokens", total_tokens)
|
|
119
|
+
|
|
120
|
+
delta = ChoiceDelta(
|
|
121
|
+
content=delta_data.get("content"),
|
|
122
|
+
role=delta_data.get("role"),
|
|
123
|
+
tool_calls=delta_data.get("tool_calls"),
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
choice = Choice(
|
|
127
|
+
index=choice_data.get("index", 0),
|
|
128
|
+
delta=delta,
|
|
129
|
+
finish_reason=finish_reason,
|
|
130
|
+
logprobs=choice_data.get("logprobs"),
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
chunk = ChatCompletionChunk(
|
|
134
|
+
id=request_id,
|
|
135
|
+
choices=[choice],
|
|
136
|
+
created=created_time,
|
|
137
|
+
model=model,
|
|
138
|
+
system_fingerprint=data.get("system_fingerprint"),
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
yield chunk
|
|
142
|
+
except Exception as e:
|
|
143
|
+
raise IOError(f"TwoAI request failed: {e}") from e
|
|
144
|
+
finally:
|
|
145
|
+
self._client.session.proxies = original_proxies
|
|
146
|
+
|
|
147
|
+
def _create_non_stream(
|
|
148
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
149
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
150
|
+
) -> ChatCompletion:
|
|
151
|
+
original_proxies = self._client.session.proxies.copy()
|
|
152
|
+
if proxies is not None:
|
|
153
|
+
self._client.session.proxies = proxies
|
|
154
|
+
else:
|
|
155
|
+
self._client.session.proxies = {}
|
|
156
|
+
try:
|
|
157
|
+
response = self._client.session.post(
|
|
158
|
+
self._client.base_url,
|
|
159
|
+
headers=self._client.headers,
|
|
160
|
+
json=payload,
|
|
161
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
162
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
163
|
+
)
|
|
164
|
+
response.raise_for_status()
|
|
165
|
+
data = response.json()
|
|
166
|
+
|
|
167
|
+
choices_data = data.get("choices", [])
|
|
168
|
+
usage_data = data.get("usage", {})
|
|
169
|
+
|
|
170
|
+
choices = []
|
|
171
|
+
for choice_d in choices_data:
|
|
172
|
+
message_d = choice_d.get("message", {})
|
|
173
|
+
message = ChatCompletionMessage(
|
|
174
|
+
role=message_d.get("role", "assistant"),
|
|
175
|
+
content=message_d.get("content", ""),
|
|
176
|
+
tool_calls=message_d.get("tool_calls"),
|
|
177
|
+
)
|
|
178
|
+
choice = Choice(
|
|
179
|
+
index=choice_d.get("index", 0),
|
|
180
|
+
message=message,
|
|
181
|
+
finish_reason=choice_d.get("finish_reason", "stop"),
|
|
182
|
+
)
|
|
183
|
+
choices.append(choice)
|
|
184
|
+
|
|
185
|
+
usage = CompletionUsage(
|
|
186
|
+
prompt_tokens=usage_data.get("prompt_tokens", 0),
|
|
187
|
+
completion_tokens=usage_data.get("completion_tokens", 0),
|
|
188
|
+
total_tokens=usage_data.get("total_tokens", 0),
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
completion = ChatCompletion(
|
|
192
|
+
id=request_id,
|
|
193
|
+
choices=choices,
|
|
194
|
+
created=created_time,
|
|
195
|
+
model=data.get("model", model),
|
|
196
|
+
usage=usage,
|
|
197
|
+
)
|
|
198
|
+
return completion
|
|
199
|
+
except Exception as e:
|
|
200
|
+
raise IOError(f"TwoAI request failed: {e}") from e
|
|
201
|
+
finally:
|
|
202
|
+
self._client.session.proxies = original_proxies
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
class Chat(BaseChat):
|
|
206
|
+
def __init__(self, client: 'TwoAI'):
|
|
207
|
+
self.completions = Completions(client)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class TwoAI(OpenAICompatibleProvider):
|
|
211
|
+
"""OpenAI-compatible client for the TwoAI API."""
|
|
212
|
+
|
|
213
|
+
AVAILABLE_MODELS = ["sutra-v2", "sutra-r0"]
|
|
214
|
+
|
|
215
|
+
# Class-level cache for API keys
|
|
216
|
+
_api_key_cache = None
|
|
217
|
+
_cache_file = os.path.join(tempfile.gettempdir(), "webscout_twoai_openai_cache.pkl")
|
|
218
|
+
|
|
219
|
+
@classmethod
|
|
220
|
+
def _load_cached_api_key(cls) -> Optional[str]:
|
|
221
|
+
"""Load cached API key from file."""
|
|
222
|
+
try:
|
|
223
|
+
if os.path.exists(cls._cache_file):
|
|
224
|
+
with open(cls._cache_file, 'rb') as f:
|
|
225
|
+
cache_data = pickle.load(f)
|
|
226
|
+
# Check if cache is not too old (24 hours)
|
|
227
|
+
if time.time() - cache_data.get('timestamp', 0) < 86400:
|
|
228
|
+
return cache_data.get('api_key')
|
|
229
|
+
except Exception:
|
|
230
|
+
# If cache is corrupted or unreadable, ignore and regenerate
|
|
231
|
+
pass
|
|
232
|
+
return None
|
|
233
|
+
|
|
234
|
+
@classmethod
|
|
235
|
+
def _save_cached_api_key(cls, api_key: str):
|
|
236
|
+
"""Save API key to cache file."""
|
|
237
|
+
try:
|
|
238
|
+
cache_data = {
|
|
239
|
+
'api_key': api_key,
|
|
240
|
+
'timestamp': time.time()
|
|
241
|
+
}
|
|
242
|
+
with open(cls._cache_file, 'wb') as f:
|
|
243
|
+
pickle.dump(cache_data, f)
|
|
244
|
+
except Exception:
|
|
245
|
+
# If caching fails, continue without caching
|
|
246
|
+
pass
|
|
247
|
+
|
|
248
|
+
@classmethod
|
|
249
|
+
def _validate_api_key(cls, api_key: str) -> bool:
|
|
250
|
+
"""Validate if an API key is still working."""
|
|
251
|
+
try:
|
|
252
|
+
session = Session()
|
|
253
|
+
headers = {
|
|
254
|
+
'User-Agent': LitAgent().random(),
|
|
255
|
+
'Accept': 'application/json',
|
|
256
|
+
'Content-Type': 'application/json',
|
|
257
|
+
'Authorization': f'Bearer {api_key}',
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
# Test with a simple request
|
|
261
|
+
test_payload = {
|
|
262
|
+
"messages": [{"role": "user", "content": "test"}],
|
|
263
|
+
"model": "sutra-v2",
|
|
264
|
+
"max_tokens": 1,
|
|
265
|
+
"stream": False
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
response = session.post(
|
|
269
|
+
"https://api.two.ai/v2/chat/completions",
|
|
270
|
+
headers=headers,
|
|
271
|
+
json=test_payload,
|
|
272
|
+
timeout=10,
|
|
273
|
+
impersonate="chrome120"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# If we get a 200 or 400 (bad request but auth worked), key is valid
|
|
277
|
+
# If we get 401/403, key is invalid
|
|
278
|
+
return response.status_code not in [401, 403]
|
|
279
|
+
except Exception:
|
|
280
|
+
# If validation fails, assume key is invalid
|
|
281
|
+
return False
|
|
282
|
+
|
|
283
|
+
@classmethod
|
|
284
|
+
def get_cached_api_key(cls) -> str:
|
|
285
|
+
"""Get a cached API key or generate a new one if needed."""
|
|
286
|
+
# First check class-level cache
|
|
287
|
+
if cls._api_key_cache:
|
|
288
|
+
if cls._validate_api_key(cls._api_key_cache):
|
|
289
|
+
return cls._api_key_cache
|
|
290
|
+
else:
|
|
291
|
+
cls._api_key_cache = None
|
|
292
|
+
|
|
293
|
+
# Then check file cache
|
|
294
|
+
cached_key = cls._load_cached_api_key()
|
|
295
|
+
if cached_key and cls._validate_api_key(cached_key):
|
|
296
|
+
cls._api_key_cache = cached_key
|
|
297
|
+
return cached_key
|
|
298
|
+
|
|
299
|
+
# Generate new key if no valid cached key
|
|
300
|
+
new_key = cls.generate_api_key()
|
|
301
|
+
cls._api_key_cache = new_key
|
|
302
|
+
cls._save_cached_api_key(new_key)
|
|
303
|
+
return new_key
|
|
304
|
+
|
|
305
|
+
@staticmethod
|
|
306
|
+
def generate_api_key() -> str:
|
|
307
|
+
"""
|
|
308
|
+
Generate a new Two AI API key using a temporary email.
|
|
309
|
+
"""
|
|
310
|
+
email, provider = get_random_email("tempmailio")
|
|
311
|
+
loops_url = "https://app.loops.so/api/newsletter-form/cm7i4o92h057auy1o74cxbhxo"
|
|
312
|
+
|
|
313
|
+
session = Session()
|
|
314
|
+
session.headers.update({
|
|
315
|
+
'User-Agent': LitAgent().random(),
|
|
316
|
+
'Content-Type': 'application/x-www-form-urlencoded',
|
|
317
|
+
'Origin': 'https://www.two.ai',
|
|
318
|
+
'Referer': 'https://app.loops.so/',
|
|
319
|
+
})
|
|
320
|
+
|
|
321
|
+
form_data = {
|
|
322
|
+
'email': email,
|
|
323
|
+
'userGroup': 'Via Framer',
|
|
324
|
+
'mailingLists': 'cm8ay9cic00x70kjv0bd34k66'
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
encoded_data = urllib.parse.urlencode(form_data)
|
|
328
|
+
response = session.post(loops_url, data=encoded_data, impersonate="chrome120")
|
|
329
|
+
|
|
330
|
+
if response.status_code != 200:
|
|
331
|
+
raise RuntimeError(f"Failed to register for Two AI: {response.status_code} - {response.text}")
|
|
332
|
+
|
|
333
|
+
max_attempts = 15
|
|
334
|
+
attempt = 0
|
|
335
|
+
api_key = None
|
|
336
|
+
wait_time = 2
|
|
337
|
+
|
|
338
|
+
while attempt < max_attempts and not api_key:
|
|
339
|
+
messages = provider.get_messages()
|
|
340
|
+
for message in messages:
|
|
341
|
+
subject = message.get('subject', '')
|
|
342
|
+
sender = ''
|
|
343
|
+
if 'from' in message:
|
|
344
|
+
if isinstance(message['from'], dict):
|
|
345
|
+
sender = message['from'].get('address', '')
|
|
346
|
+
else:
|
|
347
|
+
sender = str(message['from'])
|
|
348
|
+
elif 'sender' in message:
|
|
349
|
+
if isinstance(message['sender'], dict):
|
|
350
|
+
sender = message['sender'].get('address', '')
|
|
351
|
+
else:
|
|
352
|
+
sender = str(message['sender'])
|
|
353
|
+
subject_match = any(keyword in subject.lower() for keyword in
|
|
354
|
+
['welcome', 'confirm', 'verify', 'api', 'key', 'sutra', 'two.ai', 'loops'])
|
|
355
|
+
sender_match = any(keyword in sender.lower() for keyword in
|
|
356
|
+
['two.ai', 'sutra', 'loops.so', 'loops', 'no-reply', 'noreply'])
|
|
357
|
+
is_confirmation = subject_match or sender_match
|
|
358
|
+
|
|
359
|
+
content = None
|
|
360
|
+
if 'body' in message:
|
|
361
|
+
content = message['body']
|
|
362
|
+
elif 'content' in message and 'text' in message['content']:
|
|
363
|
+
content = message['content']['text']
|
|
364
|
+
elif 'html' in message:
|
|
365
|
+
content = message['html']
|
|
366
|
+
elif 'text' in message:
|
|
367
|
+
content = message['text']
|
|
368
|
+
if not content:
|
|
369
|
+
continue
|
|
370
|
+
|
|
371
|
+
# Robust API key extraction with multiple regex patterns
|
|
372
|
+
patterns = [
|
|
373
|
+
r'sutra_[A-Za-z0-9]{60,70}',
|
|
374
|
+
r'sutra_[A-Za-z0-9]{30,}',
|
|
375
|
+
r'sutra_\S+',
|
|
376
|
+
]
|
|
377
|
+
api_key_match = None
|
|
378
|
+
for pat in patterns:
|
|
379
|
+
api_key_match = re.search(pat, content)
|
|
380
|
+
if api_key_match:
|
|
381
|
+
break
|
|
382
|
+
# Also try to extract from labeled section
|
|
383
|
+
if not api_key_match:
|
|
384
|
+
key_section_match = re.search(r'🔑 SUTRA API Key\s*([^\s]+)', content)
|
|
385
|
+
if key_section_match:
|
|
386
|
+
api_key_match = re.search(r'sutra_[A-Za-z0-9]+', key_section_match.group(1))
|
|
387
|
+
if api_key_match:
|
|
388
|
+
api_key = api_key_match.group(0)
|
|
389
|
+
break
|
|
390
|
+
if not api_key:
|
|
391
|
+
attempt += 1
|
|
392
|
+
time.sleep(wait_time)
|
|
393
|
+
if not api_key:
|
|
394
|
+
raise RuntimeError("Failed to get API key from confirmation email")
|
|
395
|
+
return api_key
|
|
396
|
+
|
|
397
|
+
def __init__(self, browser: str = "chrome", proxies: Optional[Dict[str, str]] = None):
|
|
398
|
+
super().__init__(proxies=proxies)
|
|
399
|
+
api_key = self.get_cached_api_key()
|
|
400
|
+
self.timeout = 30
|
|
401
|
+
self.base_url = "https://api.two.ai/v2/chat/completions"
|
|
402
|
+
self.api_key = api_key
|
|
403
|
+
|
|
404
|
+
headers: Dict[str, str] = {
|
|
405
|
+
"Content-Type": "application/json",
|
|
406
|
+
"Authorization": f"Bearer {api_key}",
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
if LitAgent is not None:
|
|
410
|
+
try:
|
|
411
|
+
agent = LitAgent()
|
|
412
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
413
|
+
headers.update({
|
|
414
|
+
"Accept": fingerprint["accept"],
|
|
415
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
416
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
417
|
+
"Cache-Control": "no-cache",
|
|
418
|
+
"Connection": "keep-alive",
|
|
419
|
+
"Origin": "https://chat.two.ai",
|
|
420
|
+
"Pragma": "no-cache",
|
|
421
|
+
"Referer": "https://chat.two.ai/",
|
|
422
|
+
"Sec-Fetch-Dest": "empty",
|
|
423
|
+
"Sec-Fetch-Mode": "cors",
|
|
424
|
+
"Sec-Fetch-Site": "same-site",
|
|
425
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
426
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
427
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
428
|
+
"User-Agent": fingerprint["user_agent"],
|
|
429
|
+
})
|
|
430
|
+
except Exception:
|
|
431
|
+
# Fallback minimal headers if fingerprinting fails
|
|
432
|
+
headers.update({
|
|
433
|
+
"Accept": "application/json",
|
|
434
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
435
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
436
|
+
"User-Agent": "Mozilla/5.0",
|
|
437
|
+
})
|
|
438
|
+
else:
|
|
439
|
+
headers.update({
|
|
440
|
+
"Accept": "application/json",
|
|
441
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
442
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
443
|
+
"User-Agent": "Mozilla/5.0",
|
|
444
|
+
})
|
|
445
|
+
|
|
446
|
+
self.headers = headers
|
|
447
|
+
self.session.headers.update(headers)
|
|
448
|
+
self.chat = Chat(self)
|
|
449
|
+
|
|
450
|
+
@property
|
|
451
|
+
def models(self):
|
|
452
|
+
class _ModelList:
|
|
453
|
+
def list(inner_self):
|
|
454
|
+
return type(self).AVAILABLE_MODELS
|
|
455
|
+
return _ModelList()
|
|
456
|
+
|
|
457
|
+
if __name__ == "__main__":
|
|
458
|
+
from rich import print
|
|
459
|
+
two_ai = TwoAI()
|
|
460
|
+
resp = two_ai.chat.completions.create(
|
|
461
|
+
model="sutra-v2",
|
|
462
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
463
|
+
stream=True
|
|
464
|
+
)
|
|
465
|
+
for chunk in resp:
|
|
466
|
+
print(chunk, end="")
|