webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +33 -15
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +703 -250
- webscout/Bard.py +441 -323
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +15 -0
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +333 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +202 -0
- webscout/Provider/AISEARCH/genspark_search.py +324 -0
- webscout/Provider/AISEARCH/hika_search.py +186 -0
- webscout/Provider/AISEARCH/iask_search.py +410 -0
- webscout/Provider/AISEARCH/monica_search.py +220 -0
- webscout/Provider/AISEARCH/scira_search.py +298 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +791 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +369 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +375 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +283 -0
- webscout/Provider/OPENAI/NEMOTRON.py +232 -0
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -0
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +40 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +969 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +494 -0
- webscout/Provider/OPENAI/chatsandbox.py +173 -0
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +322 -0
- webscout/Provider/OPENAI/e2b.py +1414 -0
- webscout/Provider/OPENAI/exaai.py +417 -0
- webscout/Provider/OPENAI/exachat.py +444 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +326 -0
- webscout/Provider/OPENAI/groq.py +364 -0
- webscout/Provider/OPENAI/heckai.py +308 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +389 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +357 -0
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +477 -0
- webscout/Provider/OPENAI/sonus.py +304 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +364 -0
- webscout/Provider/OPENAI/uncovrAI.py +463 -0
- webscout/Provider/OPENAI/utils.py +318 -0
- webscout/Provider/OPENAI/venice.py +431 -0
- webscout/Provider/OPENAI/wisecat.py +387 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +365 -0
- webscout/Provider/OPENAI/yep.py +382 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/README.md +82 -0
- webscout/Provider/TTI/__init__.py +7 -0
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +10 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +475 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +174 -0
- webscout/Provider/ai4chat.py +174 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +224 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/client.py +70 -0
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +404 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +210 -0
- webscout/scout/core/scout.py +607 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +478 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
- webscout-8.2.9.dist-info/RECORD +289 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,475 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import base64
|
|
5
|
+
import time
|
|
6
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
7
|
+
import re # Import re for parsing SSE
|
|
8
|
+
import urllib.parse
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import Optimizers
|
|
11
|
+
from webscout.AIutel import Conversation
|
|
12
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
13
|
+
from webscout.AIbase import Provider
|
|
14
|
+
from webscout import exceptions
|
|
15
|
+
from webscout.litagent import LitAgent
|
|
16
|
+
from webscout.Extra.tempmail import get_random_email
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class TwoAI(Provider):
|
|
20
|
+
"""
|
|
21
|
+
A class to interact with the Two AI API (v2) with LitAgent user-agent.
|
|
22
|
+
SUTRA is a family of large multi-lingual language models (LMLMs) developed by TWO AI.
|
|
23
|
+
SUTRA's dual-transformer extends the power of both MoE and Dense AI language model architectures,
|
|
24
|
+
delivering cost-efficient multilingual capabilities for over 50+ languages.
|
|
25
|
+
|
|
26
|
+
API keys can be generated using the generate_api_key() method, which uses a temporary email
|
|
27
|
+
to register for the Two AI service and extract the API key from the confirmation email.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
AVAILABLE_MODELS = [
|
|
31
|
+
"sutra-v2", # Multilingual AI model for instruction execution and conversational intelligence
|
|
32
|
+
"sutra-r0", # Advanced reasoning model for complex problem-solving and deep contextual understanding
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
@staticmethod
|
|
36
|
+
def generate_api_key() -> str:
|
|
37
|
+
"""
|
|
38
|
+
Generate a new Two AI API key using a temporary email.
|
|
39
|
+
|
|
40
|
+
This method:
|
|
41
|
+
1. Creates a temporary email using webscout's tempmail module
|
|
42
|
+
2. Registers for Two AI using the Loops.so newsletter form
|
|
43
|
+
3. Waits for and extracts the API key from the confirmation email
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
str: The generated API key
|
|
47
|
+
|
|
48
|
+
Raises:
|
|
49
|
+
Exception: If the API key cannot be generated
|
|
50
|
+
"""
|
|
51
|
+
# Get a temporary email
|
|
52
|
+
email, provider = get_random_email("tempmailio")
|
|
53
|
+
|
|
54
|
+
# Register for Two AI using the Loops.so newsletter form
|
|
55
|
+
loops_url = "https://app.loops.so/api/newsletter-form/cm7i4o92h057auy1o74cxbhxo"
|
|
56
|
+
|
|
57
|
+
# Create a session with appropriate headers
|
|
58
|
+
session = Session()
|
|
59
|
+
session.headers.update({
|
|
60
|
+
'User-Agent': LitAgent().random(),
|
|
61
|
+
'Content-Type': 'application/x-www-form-urlencoded',
|
|
62
|
+
'Origin': 'https://www.two.ai',
|
|
63
|
+
'Referer': 'https://app.loops.so/',
|
|
64
|
+
})
|
|
65
|
+
|
|
66
|
+
# Prepare form data
|
|
67
|
+
form_data = {
|
|
68
|
+
'email': email,
|
|
69
|
+
'userGroup': 'Via Framer',
|
|
70
|
+
'mailingLists': 'cm8ay9cic00x70kjv0bd34k66'
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
# Send the registration request
|
|
74
|
+
encoded_data = urllib.parse.urlencode(form_data)
|
|
75
|
+
response = session.post(loops_url, data=encoded_data, impersonate="chrome120")
|
|
76
|
+
|
|
77
|
+
if response.status_code != 200:
|
|
78
|
+
raise Exception(f"Failed to register for Two AI: {response.status_code} - {response.text}")
|
|
79
|
+
|
|
80
|
+
# Wait for the confirmation email and extract the API key
|
|
81
|
+
max_attempts = 5
|
|
82
|
+
attempt = 0
|
|
83
|
+
api_key = None
|
|
84
|
+
wait_time = 2
|
|
85
|
+
|
|
86
|
+
while attempt < max_attempts and not api_key:
|
|
87
|
+
messages = provider.get_messages()
|
|
88
|
+
|
|
89
|
+
for message in messages:
|
|
90
|
+
# Check if this is likely the confirmation email based on subject and sender
|
|
91
|
+
subject = message.get('subject', '')
|
|
92
|
+
sender = ''
|
|
93
|
+
|
|
94
|
+
# Try to get the sender from different possible fields
|
|
95
|
+
if 'from' in message:
|
|
96
|
+
if isinstance(message['from'], dict):
|
|
97
|
+
sender = message['from'].get('address', '')
|
|
98
|
+
else:
|
|
99
|
+
sender = str(message['from'])
|
|
100
|
+
elif 'sender' in message:
|
|
101
|
+
if isinstance(message['sender'], dict):
|
|
102
|
+
sender = message['sender'].get('address', '')
|
|
103
|
+
else:
|
|
104
|
+
sender = str(message['sender'])
|
|
105
|
+
|
|
106
|
+
# Look for keywords in the subject that indicate this is the confirmation email
|
|
107
|
+
subject_match = any(keyword in subject.lower() for keyword in
|
|
108
|
+
['welcome', 'confirm', 'verify', 'api', 'key', 'sutra', 'two.ai', 'loops'])
|
|
109
|
+
|
|
110
|
+
# Look for keywords in the sender that indicate this is from Two AI or Loops
|
|
111
|
+
sender_match = any(keyword in sender.lower() for keyword in
|
|
112
|
+
['two.ai', 'sutra', 'loops.so', 'loops', 'no-reply', 'noreply'])
|
|
113
|
+
|
|
114
|
+
is_confirmation = subject_match or sender_match
|
|
115
|
+
|
|
116
|
+
if is_confirmation:
|
|
117
|
+
pass
|
|
118
|
+
# Try to get the message content from various possible fields
|
|
119
|
+
content = None
|
|
120
|
+
|
|
121
|
+
# Check for body field (seen in the debug output)
|
|
122
|
+
if 'body' in message:
|
|
123
|
+
content = message['body']
|
|
124
|
+
# Check for content.text field
|
|
125
|
+
elif 'content' in message and 'text' in message['content']:
|
|
126
|
+
content = message['content']['text']
|
|
127
|
+
# Check for html field
|
|
128
|
+
elif 'html' in message:
|
|
129
|
+
content = message['html']
|
|
130
|
+
# Check for text field
|
|
131
|
+
elif 'text' in message:
|
|
132
|
+
content = message['text']
|
|
133
|
+
|
|
134
|
+
if not content:
|
|
135
|
+
continue
|
|
136
|
+
|
|
137
|
+
# Look for the API key pattern in the email content
|
|
138
|
+
# First, try to find the API key directly
|
|
139
|
+
api_key_match = re.search(r'sutra_[A-Za-z0-9]{60,70}', content)
|
|
140
|
+
|
|
141
|
+
# If not found, try looking for the key with the label
|
|
142
|
+
if not api_key_match:
|
|
143
|
+
key_section_match = re.search(r'🔑 SUTRA API Key\s*([^\s]+)', content)
|
|
144
|
+
if key_section_match:
|
|
145
|
+
api_key_match = re.search(r'(sutra_[A-Za-z0-9]+)', key_section_match.group(1))
|
|
146
|
+
|
|
147
|
+
# If still not found, try a more general pattern
|
|
148
|
+
if not api_key_match:
|
|
149
|
+
api_key_match = re.search(r'sutra_\S+', content)
|
|
150
|
+
|
|
151
|
+
if api_key_match:
|
|
152
|
+
api_key = api_key_match.group(0)
|
|
153
|
+
break
|
|
154
|
+
if not api_key:
|
|
155
|
+
attempt += 1
|
|
156
|
+
time.sleep(wait_time)
|
|
157
|
+
if not api_key:
|
|
158
|
+
raise Exception("Failed to get API key from confirmation email")
|
|
159
|
+
return api_key
|
|
160
|
+
|
|
161
|
+
def __init__(
|
|
162
|
+
self,
|
|
163
|
+
is_conversation: bool = True,
|
|
164
|
+
max_tokens: int = 1024,
|
|
165
|
+
timeout: int = 30,
|
|
166
|
+
intro: str = None,
|
|
167
|
+
filepath: str = None,
|
|
168
|
+
update_file: bool = True,
|
|
169
|
+
proxies: dict = {},
|
|
170
|
+
history_offset: int = 10250,
|
|
171
|
+
act: str = None,
|
|
172
|
+
model: str = "sutra-v2", # Default model
|
|
173
|
+
temperature: float = 0.6,
|
|
174
|
+
system_message: str = "You are a helpful assistant."
|
|
175
|
+
):
|
|
176
|
+
"""
|
|
177
|
+
Initializes the TwoAI API client.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
is_conversation: Whether to maintain conversation history.
|
|
181
|
+
max_tokens: Maximum number of tokens to generate.
|
|
182
|
+
timeout: Request timeout in seconds.
|
|
183
|
+
intro: Introduction text for the conversation.
|
|
184
|
+
filepath: Path to save conversation history.
|
|
185
|
+
update_file: Whether to update the conversation history file.
|
|
186
|
+
proxies: Proxy configuration for requests.
|
|
187
|
+
history_offset: Maximum history length in characters.
|
|
188
|
+
act: Persona for the conversation.
|
|
189
|
+
model: Model to use. Must be one of AVAILABLE_MODELS.
|
|
190
|
+
temperature: Temperature for generation (0.0 to 1.0).
|
|
191
|
+
system_message: System message to use for the conversation.
|
|
192
|
+
"""
|
|
193
|
+
if model not in self.AVAILABLE_MODELS:
|
|
194
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
195
|
+
|
|
196
|
+
# Always auto-generate API key
|
|
197
|
+
api_key = self.generate_api_key()
|
|
198
|
+
|
|
199
|
+
self.url = "https://api.two.ai/v2/chat/completions" # API endpoint
|
|
200
|
+
self.headers = {
|
|
201
|
+
'User-Agent': LitAgent().random(),
|
|
202
|
+
'Accept': 'text/event-stream', # For streaming responses
|
|
203
|
+
'Content-Type': 'application/json',
|
|
204
|
+
'Authorization': f'Bearer {api_key}', # Using Bearer token authentication
|
|
205
|
+
'Origin': 'https://chat.two.ai',
|
|
206
|
+
'Referer': 'https://api.two.app/'
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
# Initialize curl_cffi Session
|
|
210
|
+
self.session = Session()
|
|
211
|
+
self.session.headers.update(self.headers)
|
|
212
|
+
self.session.proxies = proxies
|
|
213
|
+
|
|
214
|
+
self.is_conversation = is_conversation
|
|
215
|
+
self.max_tokens_to_sample = max_tokens
|
|
216
|
+
self.timeout = timeout
|
|
217
|
+
self.last_response = {}
|
|
218
|
+
self.model = model
|
|
219
|
+
self.temperature = temperature
|
|
220
|
+
self.system_message = system_message
|
|
221
|
+
self.api_key = api_key
|
|
222
|
+
|
|
223
|
+
self.__available_optimizers = (
|
|
224
|
+
method
|
|
225
|
+
for method in dir(Optimizers)
|
|
226
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
227
|
+
)
|
|
228
|
+
Conversation.intro = (
|
|
229
|
+
AwesomePrompts().get_act(
|
|
230
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
231
|
+
)
|
|
232
|
+
if act
|
|
233
|
+
else intro or Conversation.intro
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
self.conversation = Conversation(
|
|
237
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
238
|
+
)
|
|
239
|
+
self.conversation.history_offset = history_offset
|
|
240
|
+
|
|
241
|
+
@staticmethod
|
|
242
|
+
def _twoai_extractor(chunk_json: Dict[str, Any]) -> Optional[str]:
|
|
243
|
+
"""Extracts content from TwoAI v2 stream JSON objects."""
|
|
244
|
+
if not isinstance(chunk_json, dict) or "choices" not in chunk_json or not chunk_json["choices"]:
|
|
245
|
+
return None
|
|
246
|
+
|
|
247
|
+
delta = chunk_json["choices"][0].get("delta")
|
|
248
|
+
if not isinstance(delta, dict):
|
|
249
|
+
return None
|
|
250
|
+
|
|
251
|
+
content = delta.get("content")
|
|
252
|
+
return content if isinstance(content, str) else None
|
|
253
|
+
|
|
254
|
+
def encode_image(self, image_path: str) -> str:
|
|
255
|
+
"""
|
|
256
|
+
Encode an image file to base64 string.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
image_path: Path to the image file
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Base64 encoded string of the image
|
|
263
|
+
"""
|
|
264
|
+
with open(image_path, "rb") as image_file:
|
|
265
|
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
|
266
|
+
|
|
267
|
+
def ask(
|
|
268
|
+
self,
|
|
269
|
+
prompt: str,
|
|
270
|
+
stream: bool = True,
|
|
271
|
+
raw: bool = False,
|
|
272
|
+
optimizer: str = None,
|
|
273
|
+
conversationally: bool = False,
|
|
274
|
+
online_search: bool = True,
|
|
275
|
+
image_path: str = None,
|
|
276
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
277
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
278
|
+
if optimizer:
|
|
279
|
+
if optimizer in self.__available_optimizers:
|
|
280
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
281
|
+
else:
|
|
282
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
283
|
+
|
|
284
|
+
# Prepare messages with image if provided
|
|
285
|
+
if image_path:
|
|
286
|
+
# Create a message with image content
|
|
287
|
+
image_content = {
|
|
288
|
+
"type": "image_url",
|
|
289
|
+
"image_url": {
|
|
290
|
+
"url": f"data:image/jpeg;base64,{self.encode_image(image_path)}"
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
user_message = {
|
|
294
|
+
"role": "user",
|
|
295
|
+
"content": [
|
|
296
|
+
{"type": "text", "text": conversation_prompt},
|
|
297
|
+
image_content
|
|
298
|
+
]
|
|
299
|
+
}
|
|
300
|
+
else:
|
|
301
|
+
# Text-only message
|
|
302
|
+
user_message = {"role": "user", "content": conversation_prompt}
|
|
303
|
+
|
|
304
|
+
# Prepare the payload
|
|
305
|
+
payload = {
|
|
306
|
+
"messages": [
|
|
307
|
+
*([{"role": "system", "content": self.system_message}] if self.system_message else []),
|
|
308
|
+
user_message
|
|
309
|
+
],
|
|
310
|
+
"model": self.model,
|
|
311
|
+
"temperature": self.temperature,
|
|
312
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
313
|
+
"stream": stream,
|
|
314
|
+
"extra_body": {
|
|
315
|
+
"online_search": online_search,
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
def for_stream():
|
|
320
|
+
streaming_text = "" # Initialize outside try block
|
|
321
|
+
try:
|
|
322
|
+
response = self.session.post(
|
|
323
|
+
self.url,
|
|
324
|
+
json=payload,
|
|
325
|
+
stream=True,
|
|
326
|
+
timeout=self.timeout,
|
|
327
|
+
impersonate="chrome110"
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
if response.status_code != 200:
|
|
331
|
+
error_detail = response.text
|
|
332
|
+
try:
|
|
333
|
+
error_json = response.json()
|
|
334
|
+
error_detail = error_json.get("error", {}).get("message", error_detail)
|
|
335
|
+
except json.JSONDecodeError:
|
|
336
|
+
pass
|
|
337
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
338
|
+
f"Request failed with status code {response.status_code} - {error_detail}"
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
# Use sanitize_stream for SSE processing
|
|
342
|
+
processed_stream = sanitize_stream(
|
|
343
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
344
|
+
intro_value="data:",
|
|
345
|
+
to_json=True, # Stream sends JSON
|
|
346
|
+
skip_markers=["[DONE]"],
|
|
347
|
+
content_extractor=self._twoai_extractor, # Use the specific extractor
|
|
348
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
for content_chunk in processed_stream:
|
|
352
|
+
# content_chunk is the string extracted by _twoai_extractor
|
|
353
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
354
|
+
streaming_text += content_chunk
|
|
355
|
+
resp = dict(text=content_chunk)
|
|
356
|
+
yield resp if not raw else content_chunk
|
|
357
|
+
|
|
358
|
+
# If stream completes successfully, update history
|
|
359
|
+
self.last_response = {"text": streaming_text}
|
|
360
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
361
|
+
|
|
362
|
+
except CurlError as e:
|
|
363
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
364
|
+
except exceptions.FailedToGenerateResponseError:
|
|
365
|
+
raise # Re-raise specific exception
|
|
366
|
+
except Exception as e:
|
|
367
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
|
|
368
|
+
finally:
|
|
369
|
+
# Ensure history is updated even if stream ends abruptly but text was received
|
|
370
|
+
if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
|
|
371
|
+
self.last_response = {"text": streaming_text}
|
|
372
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
def for_non_stream():
|
|
376
|
+
# Non-stream still uses the stream internally and aggregates
|
|
377
|
+
streaming_text = ""
|
|
378
|
+
# We need to consume the generator from for_stream()
|
|
379
|
+
gen = for_stream()
|
|
380
|
+
try:
|
|
381
|
+
for chunk_data in gen:
|
|
382
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
383
|
+
streaming_text += chunk_data["text"]
|
|
384
|
+
elif isinstance(chunk_data, str): # Handle raw=True case
|
|
385
|
+
streaming_text += chunk_data
|
|
386
|
+
except exceptions.FailedToGenerateResponseError:
|
|
387
|
+
# If the underlying stream fails, re-raise the error
|
|
388
|
+
raise
|
|
389
|
+
# self.last_response and history are updated within for_stream's try/finally
|
|
390
|
+
return self.last_response # Return the final aggregated dict
|
|
391
|
+
|
|
392
|
+
effective_stream = stream if stream is not None else True
|
|
393
|
+
return for_stream() if effective_stream else for_non_stream()
|
|
394
|
+
|
|
395
|
+
def chat(
|
|
396
|
+
self,
|
|
397
|
+
prompt: str,
|
|
398
|
+
stream: bool = True,
|
|
399
|
+
optimizer: str = None,
|
|
400
|
+
conversationally: bool = False,
|
|
401
|
+
online_search: bool = True,
|
|
402
|
+
image_path: str = None,
|
|
403
|
+
) -> str:
|
|
404
|
+
effective_stream = stream if stream is not None else True
|
|
405
|
+
|
|
406
|
+
def for_stream_chat():
|
|
407
|
+
# ask() yields dicts when raw=False (default for chat)
|
|
408
|
+
gen = self.ask(
|
|
409
|
+
prompt,
|
|
410
|
+
stream=True,
|
|
411
|
+
raw=False, # Ensure ask yields dicts
|
|
412
|
+
optimizer=optimizer,
|
|
413
|
+
conversationally=conversationally,
|
|
414
|
+
online_search=online_search,
|
|
415
|
+
image_path=image_path,
|
|
416
|
+
)
|
|
417
|
+
for response_dict in gen:
|
|
418
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
419
|
+
|
|
420
|
+
def for_non_stream_chat():
|
|
421
|
+
# ask() returns a dict when stream=False
|
|
422
|
+
response_dict = self.ask(
|
|
423
|
+
prompt,
|
|
424
|
+
stream=False, # Ensure ask returns dict
|
|
425
|
+
raw=False,
|
|
426
|
+
optimizer=optimizer,
|
|
427
|
+
conversationally=conversationally,
|
|
428
|
+
online_search=online_search,
|
|
429
|
+
image_path=image_path,
|
|
430
|
+
)
|
|
431
|
+
return self.get_message(response_dict) # get_message expects dict
|
|
432
|
+
|
|
433
|
+
return for_stream_chat() if effective_stream else for_non_stream_chat()
|
|
434
|
+
|
|
435
|
+
def get_message(self, response: dict) -> str:
|
|
436
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
437
|
+
return response.get("text", "") # Use .get for safety
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
if __name__ == "__main__":
|
|
441
|
+
print("-" * 80)
|
|
442
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
443
|
+
print("-" * 80)
|
|
444
|
+
|
|
445
|
+
for model in TwoAI.AVAILABLE_MODELS:
|
|
446
|
+
try:
|
|
447
|
+
test_ai = TwoAI(model=model, timeout=60)
|
|
448
|
+
# Test stream first
|
|
449
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
450
|
+
response_text = ""
|
|
451
|
+
print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
|
|
452
|
+
for chunk in response_stream:
|
|
453
|
+
response_text += chunk
|
|
454
|
+
# Optional: print chunks as they arrive for visual feedback
|
|
455
|
+
# print(chunk, end="", flush=True)
|
|
456
|
+
|
|
457
|
+
if response_text and len(response_text.strip()) > 0:
|
|
458
|
+
status = "✓"
|
|
459
|
+
# Clean and truncate response
|
|
460
|
+
clean_text = response_text.strip() # Already decoded in get_message
|
|
461
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
462
|
+
else:
|
|
463
|
+
status = "✗ (Stream)"
|
|
464
|
+
display_text = "Empty or invalid stream response"
|
|
465
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
466
|
+
|
|
467
|
+
# Optional: Add non-stream test if needed, but stream test covers basic functionality
|
|
468
|
+
# print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
|
|
469
|
+
# response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
|
|
470
|
+
# if not response_non_stream or len(response_non_stream.strip()) == 0:
|
|
471
|
+
# print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
except Exception as e:
|
|
475
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|