webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
webscout/Provider/TwoAI.py
CHANGED
|
@@ -1,313 +1,313 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
import base64
|
|
5
|
-
from typing import Any, Dict, Optional, Generator, Union
|
|
6
|
-
import re # Import re for parsing SSE
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class TwoAI(Provider):
|
|
17
|
-
"""
|
|
18
|
-
A class to interact with the Two AI API (v2) with LitAgent user-agent.
|
|
19
|
-
SUTRA is a family of large multi-lingual language models (LMLMs) developed by TWO AI.
|
|
20
|
-
SUTRA's dual-transformer extends the power of both MoE and Dense AI language model architectures,
|
|
21
|
-
delivering cost-efficient multilingual capabilities for over 50+ languages.
|
|
22
|
-
|
|
23
|
-
API keys must be provided directly by the user.
|
|
24
|
-
"""
|
|
25
|
-
|
|
26
|
-
required_auth = True
|
|
27
|
-
AVAILABLE_MODELS = [
|
|
28
|
-
"sutra-v2", # Multilingual AI model for instruction execution and conversational intelligence
|
|
29
|
-
"sutra-r0", # Advanced reasoning model for complex problem-solving and deep contextual understanding
|
|
30
|
-
]
|
|
31
|
-
|
|
32
|
-
def __init__(
|
|
33
|
-
self,
|
|
34
|
-
api_key: str,
|
|
35
|
-
is_conversation: bool = True,
|
|
36
|
-
max_tokens: int = 1024,
|
|
37
|
-
timeout: int = 30,
|
|
38
|
-
intro: str = None,
|
|
39
|
-
filepath: str = None,
|
|
40
|
-
update_file: bool = True,
|
|
41
|
-
proxies: dict = {},
|
|
42
|
-
history_offset: int = 10250,
|
|
43
|
-
act: str = None,
|
|
44
|
-
model: str = "sutra-v2", # Default model
|
|
45
|
-
temperature: float = 0.6,
|
|
46
|
-
system_message: str = "You are a helpful assistant."
|
|
47
|
-
):
|
|
48
|
-
"""
|
|
49
|
-
Initializes the TwoAI API client.
|
|
50
|
-
|
|
51
|
-
Args:
|
|
52
|
-
api_key: TwoAI API key (required).
|
|
53
|
-
is_conversation: Whether to maintain conversation history.
|
|
54
|
-
max_tokens: Maximum number of tokens to generate.
|
|
55
|
-
timeout: Request timeout in seconds.
|
|
56
|
-
intro: Introduction text for the conversation.
|
|
57
|
-
filepath: Path to save conversation history.
|
|
58
|
-
update_file: Whether to update the conversation history file.
|
|
59
|
-
proxies: Proxy configuration for requests.
|
|
60
|
-
history_offset: Maximum history length in characters.
|
|
61
|
-
act: Persona for the conversation.
|
|
62
|
-
model: Model to use. Must be one of AVAILABLE_MODELS.
|
|
63
|
-
temperature: Temperature for generation (0.0 to 1.0).
|
|
64
|
-
system_message: System message to use for the conversation.
|
|
65
|
-
"""
|
|
66
|
-
if model not in self.AVAILABLE_MODELS:
|
|
67
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
68
|
-
|
|
69
|
-
if not api_key:
|
|
70
|
-
raise exceptions.AuthenticationError("TwoAI API key is required.")
|
|
71
|
-
|
|
72
|
-
self.url = "https://chatsutra-server.account-2b0.workers.dev/v2/chat/completions" # Correct API endpoint
|
|
73
|
-
self.headers = {
|
|
74
|
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0',
|
|
75
|
-
'Accept': 'application/json',
|
|
76
|
-
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
77
|
-
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
78
|
-
'Content-Type': 'application/json',
|
|
79
|
-
'Origin': 'https://chat.two.ai',
|
|
80
|
-
'Referer': 'https://chatsutra-server.account-2b0.workers.dev/',
|
|
81
|
-
'Sec-Ch-Ua': '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
|
|
82
|
-
'Sec-Ch-Ua-Mobile': '?0',
|
|
83
|
-
'Sec-Ch-Ua-Platform': '"Windows"',
|
|
84
|
-
'Sec-Fetch-Dest': 'empty',
|
|
85
|
-
'Sec-Fetch-Mode': 'cors',
|
|
86
|
-
'Sec-Fetch-Site': 'cross-site',
|
|
87
|
-
'Sec-Gpc': '1',
|
|
88
|
-
'Dnt': '1',
|
|
89
|
-
'X-Session-Token': api_key # Using session token instead of Bearer auth
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
# Initialize curl_cffi Session
|
|
93
|
-
self.session = Session()
|
|
94
|
-
self.session.headers.update(self.headers)
|
|
95
|
-
self.session.proxies = proxies
|
|
96
|
-
|
|
97
|
-
self.is_conversation = is_conversation
|
|
98
|
-
self.max_tokens_to_sample = max_tokens
|
|
99
|
-
self.timeout = timeout
|
|
100
|
-
self.last_response = {}
|
|
101
|
-
self.model = model
|
|
102
|
-
self.temperature = temperature
|
|
103
|
-
self.system_message = system_message
|
|
104
|
-
self.api_key = api_key
|
|
105
|
-
|
|
106
|
-
self.__available_optimizers = (
|
|
107
|
-
method
|
|
108
|
-
for method in dir(Optimizers)
|
|
109
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
110
|
-
)
|
|
111
|
-
Conversation.intro = (
|
|
112
|
-
AwesomePrompts().get_act(
|
|
113
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
114
|
-
)
|
|
115
|
-
if act
|
|
116
|
-
else intro or Conversation.intro
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
self.conversation = Conversation(
|
|
120
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
121
|
-
)
|
|
122
|
-
self.conversation.history_offset = history_offset
|
|
123
|
-
|
|
124
|
-
@staticmethod
|
|
125
|
-
def _twoai_extractor(chunk_json: Dict[str, Any]) -> Optional[str]:
|
|
126
|
-
"""Extracts content from TwoAI v2 stream JSON objects."""
|
|
127
|
-
if not isinstance(chunk_json, dict) or "choices" not in chunk_json or not chunk_json["choices"]:
|
|
128
|
-
return None
|
|
129
|
-
|
|
130
|
-
delta = chunk_json["choices"][0].get("delta")
|
|
131
|
-
if not isinstance(delta, dict):
|
|
132
|
-
return None
|
|
133
|
-
|
|
134
|
-
content = delta.get("content")
|
|
135
|
-
return content if isinstance(content, str) else None
|
|
136
|
-
|
|
137
|
-
def encode_image(self, image_path: str) -> str:
|
|
138
|
-
"""
|
|
139
|
-
Encode an image file to base64 string.
|
|
140
|
-
|
|
141
|
-
Args:
|
|
142
|
-
image_path: Path to the image file
|
|
143
|
-
|
|
144
|
-
Returns:
|
|
145
|
-
Base64 encoded string of the image
|
|
146
|
-
"""
|
|
147
|
-
with open(image_path, "rb") as image_file:
|
|
148
|
-
return base64.b64encode(image_file.read()).decode('utf-8')
|
|
149
|
-
|
|
150
|
-
def ask(
|
|
151
|
-
self,
|
|
152
|
-
prompt: str,
|
|
153
|
-
stream: bool = True,
|
|
154
|
-
raw: bool = False,
|
|
155
|
-
optimizer: str = None,
|
|
156
|
-
conversationally: bool = False,
|
|
157
|
-
online_search: bool = True,
|
|
158
|
-
image_path: str = None,
|
|
159
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
160
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
161
|
-
if optimizer:
|
|
162
|
-
if optimizer in self.__available_optimizers:
|
|
163
|
-
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
164
|
-
else:
|
|
165
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
166
|
-
|
|
167
|
-
# Prepare messages with image if provided
|
|
168
|
-
if image_path:
|
|
169
|
-
# Create a message with image content
|
|
170
|
-
image_content = {
|
|
171
|
-
"type": "image_url",
|
|
172
|
-
"image_url": {
|
|
173
|
-
"url": f"data:image/jpeg;base64,{self.encode_image(image_path)}"
|
|
174
|
-
}
|
|
175
|
-
}
|
|
176
|
-
user_message = {
|
|
177
|
-
"role": "user",
|
|
178
|
-
"content": [
|
|
179
|
-
{"type": "text", "text": conversation_prompt},
|
|
180
|
-
image_content
|
|
181
|
-
]
|
|
182
|
-
}
|
|
183
|
-
else:
|
|
184
|
-
# Text-only message
|
|
185
|
-
user_message = {"role": "user", "content": conversation_prompt}
|
|
186
|
-
|
|
187
|
-
# Prepare the payload
|
|
188
|
-
payload = {
|
|
189
|
-
"messages": [
|
|
190
|
-
*([{"role": "system", "content": self.system_message}] if self.system_message else []),
|
|
191
|
-
user_message
|
|
192
|
-
],
|
|
193
|
-
"model": self.model,
|
|
194
|
-
"temperature": self.temperature,
|
|
195
|
-
"max_tokens": self.max_tokens_to_sample,
|
|
196
|
-
"extra_body": {
|
|
197
|
-
"online_search": online_search,
|
|
198
|
-
}
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
def for_stream():
|
|
202
|
-
streaming_text = "" # Initialize outside try block
|
|
203
|
-
try:
|
|
204
|
-
response = self.session.post(
|
|
205
|
-
self.url,
|
|
206
|
-
json=payload,
|
|
207
|
-
stream=True,
|
|
208
|
-
timeout=self.timeout
|
|
209
|
-
)
|
|
210
|
-
|
|
211
|
-
if response.status_code != 200:
|
|
212
|
-
error_detail = response.text
|
|
213
|
-
try:
|
|
214
|
-
error_json = response.json()
|
|
215
|
-
error_detail = error_json.get("error", {}).get("message", error_detail)
|
|
216
|
-
except json.JSONDecodeError:
|
|
217
|
-
pass
|
|
218
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
219
|
-
f"Request failed with status code {response.status_code} - {error_detail}"
|
|
220
|
-
)
|
|
221
|
-
|
|
222
|
-
# Use sanitize_stream to process the SSE stream
|
|
223
|
-
processed_stream = sanitize_stream(
|
|
224
|
-
data=response.iter_content(chunk_size=None),
|
|
225
|
-
intro_value="data:",
|
|
226
|
-
to_json=True,
|
|
227
|
-
skip_markers=["[DONE]"],
|
|
228
|
-
content_extractor=self._twoai_extractor,
|
|
229
|
-
yield_raw_on_error=False
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
for content_chunk in processed_stream:
|
|
233
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
234
|
-
streaming_text += content_chunk
|
|
235
|
-
resp = dict(text=content_chunk)
|
|
236
|
-
yield resp if not raw else content_chunk
|
|
237
|
-
|
|
238
|
-
# If stream completes successfully, update history
|
|
239
|
-
self.last_response = {"text": streaming_text}
|
|
240
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
241
|
-
|
|
242
|
-
except CurlError as e:
|
|
243
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
244
|
-
except exceptions.FailedToGenerateResponseError:
|
|
245
|
-
raise # Re-raise specific exception
|
|
246
|
-
except Exception as e:
|
|
247
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
|
|
248
|
-
finally:
|
|
249
|
-
# Ensure history is updated even if stream ends abruptly but text was received
|
|
250
|
-
if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
|
|
251
|
-
self.last_response = {"text": streaming_text}
|
|
252
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
def for_non_stream():
|
|
256
|
-
# Non-stream still uses the stream internally and aggregates
|
|
257
|
-
streaming_text = ""
|
|
258
|
-
# We need to consume the generator from for_stream()
|
|
259
|
-
gen = for_stream()
|
|
260
|
-
try:
|
|
261
|
-
for chunk_data in gen:
|
|
262
|
-
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
263
|
-
streaming_text += chunk_data["text"]
|
|
264
|
-
elif isinstance(chunk_data, str): # Handle raw=True case
|
|
265
|
-
streaming_text += chunk_data
|
|
266
|
-
except exceptions.FailedToGenerateResponseError:
|
|
267
|
-
# If the underlying stream fails, re-raise the error
|
|
268
|
-
raise
|
|
269
|
-
# self.last_response and history are updated within for_stream's try/finally
|
|
270
|
-
return self.last_response # Return the final aggregated dict
|
|
271
|
-
|
|
272
|
-
# The API uses SSE streaming for all requests, so we always use streaming
|
|
273
|
-
return for_stream()
|
|
274
|
-
|
|
275
|
-
def chat(
|
|
276
|
-
self,
|
|
277
|
-
prompt: str,
|
|
278
|
-
stream: bool = True,
|
|
279
|
-
optimizer: str = None,
|
|
280
|
-
conversationally: bool = False,
|
|
281
|
-
online_search: bool = True,
|
|
282
|
-
image_path: str = None,
|
|
283
|
-
) -> str:
|
|
284
|
-
# The API uses SSE streaming for all requests, so we always aggregate
|
|
285
|
-
aggregated_text = ""
|
|
286
|
-
gen = self.ask(
|
|
287
|
-
prompt,
|
|
288
|
-
stream=True,
|
|
289
|
-
raw=False, # Ensure ask yields dicts
|
|
290
|
-
optimizer=optimizer,
|
|
291
|
-
conversationally=conversationally,
|
|
292
|
-
online_search=online_search,
|
|
293
|
-
image_path=image_path,
|
|
294
|
-
)
|
|
295
|
-
for response_dict in gen:
|
|
296
|
-
if isinstance(response_dict, dict) and "text" in response_dict:
|
|
297
|
-
aggregated_text += response_dict["text"]
|
|
298
|
-
elif isinstance(response_dict, str):
|
|
299
|
-
aggregated_text += response_dict
|
|
300
|
-
|
|
301
|
-
return aggregated_text
|
|
302
|
-
|
|
303
|
-
def get_message(self, response: dict) -> str:
|
|
304
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
305
|
-
return response.get("text", "") # Use .get for safety
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
if __name__ == "__main__":
|
|
309
|
-
from rich import print
|
|
310
|
-
ai = TwoAI(api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJzanl2OHJtZGxDZDFnQ2hQdGxzZHdxUlVteXkyIiwic291cmNlIjoiRmlyZWJhc2UiLCJpYXQiOjE3NTc4NTEyMzYsImV4cCI6MTc1Nzg1MjEzNn0.ilTYrHRdN3_cme6VW3knWWfbypY_n_gsUe9DeDhEwrM", model="sutra-v2", temperature=0.7)
|
|
311
|
-
response = ai.chat("Write a poem about AI in the style of Shakespeare.")
|
|
312
|
-
for chunk in response:
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import base64
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
6
|
+
import re # Import re for parsing SSE
|
|
7
|
+
|
|
8
|
+
from webscout.AIutel import Optimizers
|
|
9
|
+
from webscout.AIutel import Conversation
|
|
10
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
11
|
+
from webscout.AIbase import Provider
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class TwoAI(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with the Two AI API (v2) with LitAgent user-agent.
|
|
19
|
+
SUTRA is a family of large multi-lingual language models (LMLMs) developed by TWO AI.
|
|
20
|
+
SUTRA's dual-transformer extends the power of both MoE and Dense AI language model architectures,
|
|
21
|
+
delivering cost-efficient multilingual capabilities for over 50+ languages.
|
|
22
|
+
|
|
23
|
+
API keys must be provided directly by the user.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
required_auth = True
|
|
27
|
+
AVAILABLE_MODELS = [
|
|
28
|
+
"sutra-v2", # Multilingual AI model for instruction execution and conversational intelligence
|
|
29
|
+
"sutra-r0", # Advanced reasoning model for complex problem-solving and deep contextual understanding
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
api_key: str,
|
|
35
|
+
is_conversation: bool = True,
|
|
36
|
+
max_tokens: int = 1024,
|
|
37
|
+
timeout: int = 30,
|
|
38
|
+
intro: str = None,
|
|
39
|
+
filepath: str = None,
|
|
40
|
+
update_file: bool = True,
|
|
41
|
+
proxies: dict = {},
|
|
42
|
+
history_offset: int = 10250,
|
|
43
|
+
act: str = None,
|
|
44
|
+
model: str = "sutra-v2", # Default model
|
|
45
|
+
temperature: float = 0.6,
|
|
46
|
+
system_message: str = "You are a helpful assistant."
|
|
47
|
+
):
|
|
48
|
+
"""
|
|
49
|
+
Initializes the TwoAI API client.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
api_key: TwoAI API key (required).
|
|
53
|
+
is_conversation: Whether to maintain conversation history.
|
|
54
|
+
max_tokens: Maximum number of tokens to generate.
|
|
55
|
+
timeout: Request timeout in seconds.
|
|
56
|
+
intro: Introduction text for the conversation.
|
|
57
|
+
filepath: Path to save conversation history.
|
|
58
|
+
update_file: Whether to update the conversation history file.
|
|
59
|
+
proxies: Proxy configuration for requests.
|
|
60
|
+
history_offset: Maximum history length in characters.
|
|
61
|
+
act: Persona for the conversation.
|
|
62
|
+
model: Model to use. Must be one of AVAILABLE_MODELS.
|
|
63
|
+
temperature: Temperature for generation (0.0 to 1.0).
|
|
64
|
+
system_message: System message to use for the conversation.
|
|
65
|
+
"""
|
|
66
|
+
if model not in self.AVAILABLE_MODELS:
|
|
67
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
68
|
+
|
|
69
|
+
if not api_key:
|
|
70
|
+
raise exceptions.AuthenticationError("TwoAI API key is required.")
|
|
71
|
+
|
|
72
|
+
self.url = "https://chatsutra-server.account-2b0.workers.dev/v2/chat/completions" # Correct API endpoint
|
|
73
|
+
self.headers = {
|
|
74
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0',
|
|
75
|
+
'Accept': 'application/json',
|
|
76
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
77
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
78
|
+
'Content-Type': 'application/json',
|
|
79
|
+
'Origin': 'https://chat.two.ai',
|
|
80
|
+
'Referer': 'https://chatsutra-server.account-2b0.workers.dev/',
|
|
81
|
+
'Sec-Ch-Ua': '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
|
|
82
|
+
'Sec-Ch-Ua-Mobile': '?0',
|
|
83
|
+
'Sec-Ch-Ua-Platform': '"Windows"',
|
|
84
|
+
'Sec-Fetch-Dest': 'empty',
|
|
85
|
+
'Sec-Fetch-Mode': 'cors',
|
|
86
|
+
'Sec-Fetch-Site': 'cross-site',
|
|
87
|
+
'Sec-Gpc': '1',
|
|
88
|
+
'Dnt': '1',
|
|
89
|
+
'X-Session-Token': api_key # Using session token instead of Bearer auth
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
# Initialize curl_cffi Session
|
|
93
|
+
self.session = Session()
|
|
94
|
+
self.session.headers.update(self.headers)
|
|
95
|
+
self.session.proxies = proxies
|
|
96
|
+
|
|
97
|
+
self.is_conversation = is_conversation
|
|
98
|
+
self.max_tokens_to_sample = max_tokens
|
|
99
|
+
self.timeout = timeout
|
|
100
|
+
self.last_response = {}
|
|
101
|
+
self.model = model
|
|
102
|
+
self.temperature = temperature
|
|
103
|
+
self.system_message = system_message
|
|
104
|
+
self.api_key = api_key
|
|
105
|
+
|
|
106
|
+
self.__available_optimizers = (
|
|
107
|
+
method
|
|
108
|
+
for method in dir(Optimizers)
|
|
109
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
110
|
+
)
|
|
111
|
+
Conversation.intro = (
|
|
112
|
+
AwesomePrompts().get_act(
|
|
113
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
114
|
+
)
|
|
115
|
+
if act
|
|
116
|
+
else intro or Conversation.intro
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
self.conversation = Conversation(
|
|
120
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
121
|
+
)
|
|
122
|
+
self.conversation.history_offset = history_offset
|
|
123
|
+
|
|
124
|
+
@staticmethod
|
|
125
|
+
def _twoai_extractor(chunk_json: Dict[str, Any]) -> Optional[str]:
|
|
126
|
+
"""Extracts content from TwoAI v2 stream JSON objects."""
|
|
127
|
+
if not isinstance(chunk_json, dict) or "choices" not in chunk_json or not chunk_json["choices"]:
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
delta = chunk_json["choices"][0].get("delta")
|
|
131
|
+
if not isinstance(delta, dict):
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
content = delta.get("content")
|
|
135
|
+
return content if isinstance(content, str) else None
|
|
136
|
+
|
|
137
|
+
def encode_image(self, image_path: str) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Encode an image file to base64 string.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
image_path: Path to the image file
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Base64 encoded string of the image
|
|
146
|
+
"""
|
|
147
|
+
with open(image_path, "rb") as image_file:
|
|
148
|
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
|
149
|
+
|
|
150
|
+
def ask(
|
|
151
|
+
self,
|
|
152
|
+
prompt: str,
|
|
153
|
+
stream: bool = True,
|
|
154
|
+
raw: bool = False,
|
|
155
|
+
optimizer: str = None,
|
|
156
|
+
conversationally: bool = False,
|
|
157
|
+
online_search: bool = True,
|
|
158
|
+
image_path: str = None,
|
|
159
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
160
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
161
|
+
if optimizer:
|
|
162
|
+
if optimizer in self.__available_optimizers:
|
|
163
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
164
|
+
else:
|
|
165
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
166
|
+
|
|
167
|
+
# Prepare messages with image if provided
|
|
168
|
+
if image_path:
|
|
169
|
+
# Create a message with image content
|
|
170
|
+
image_content = {
|
|
171
|
+
"type": "image_url",
|
|
172
|
+
"image_url": {
|
|
173
|
+
"url": f"data:image/jpeg;base64,{self.encode_image(image_path)}"
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
user_message = {
|
|
177
|
+
"role": "user",
|
|
178
|
+
"content": [
|
|
179
|
+
{"type": "text", "text": conversation_prompt},
|
|
180
|
+
image_content
|
|
181
|
+
]
|
|
182
|
+
}
|
|
183
|
+
else:
|
|
184
|
+
# Text-only message
|
|
185
|
+
user_message = {"role": "user", "content": conversation_prompt}
|
|
186
|
+
|
|
187
|
+
# Prepare the payload
|
|
188
|
+
payload = {
|
|
189
|
+
"messages": [
|
|
190
|
+
*([{"role": "system", "content": self.system_message}] if self.system_message else []),
|
|
191
|
+
user_message
|
|
192
|
+
],
|
|
193
|
+
"model": self.model,
|
|
194
|
+
"temperature": self.temperature,
|
|
195
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
196
|
+
"extra_body": {
|
|
197
|
+
"online_search": online_search,
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
def for_stream():
|
|
202
|
+
streaming_text = "" # Initialize outside try block
|
|
203
|
+
try:
|
|
204
|
+
response = self.session.post(
|
|
205
|
+
self.url,
|
|
206
|
+
json=payload,
|
|
207
|
+
stream=True,
|
|
208
|
+
timeout=self.timeout
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
if response.status_code != 200:
|
|
212
|
+
error_detail = response.text
|
|
213
|
+
try:
|
|
214
|
+
error_json = response.json()
|
|
215
|
+
error_detail = error_json.get("error", {}).get("message", error_detail)
|
|
216
|
+
except json.JSONDecodeError:
|
|
217
|
+
pass
|
|
218
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
219
|
+
f"Request failed with status code {response.status_code} - {error_detail}"
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Use sanitize_stream to process the SSE stream
|
|
223
|
+
processed_stream = sanitize_stream(
|
|
224
|
+
data=response.iter_content(chunk_size=None),
|
|
225
|
+
intro_value="data:",
|
|
226
|
+
to_json=True,
|
|
227
|
+
skip_markers=["[DONE]"],
|
|
228
|
+
content_extractor=self._twoai_extractor,
|
|
229
|
+
yield_raw_on_error=False
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
for content_chunk in processed_stream:
|
|
233
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
234
|
+
streaming_text += content_chunk
|
|
235
|
+
resp = dict(text=content_chunk)
|
|
236
|
+
yield resp if not raw else content_chunk
|
|
237
|
+
|
|
238
|
+
# If stream completes successfully, update history
|
|
239
|
+
self.last_response = {"text": streaming_text}
|
|
240
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
241
|
+
|
|
242
|
+
except CurlError as e:
|
|
243
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
244
|
+
except exceptions.FailedToGenerateResponseError:
|
|
245
|
+
raise # Re-raise specific exception
|
|
246
|
+
except Exception as e:
|
|
247
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
|
|
248
|
+
finally:
|
|
249
|
+
# Ensure history is updated even if stream ends abruptly but text was received
|
|
250
|
+
if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
|
|
251
|
+
self.last_response = {"text": streaming_text}
|
|
252
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def for_non_stream():
|
|
256
|
+
# Non-stream still uses the stream internally and aggregates
|
|
257
|
+
streaming_text = ""
|
|
258
|
+
# We need to consume the generator from for_stream()
|
|
259
|
+
gen = for_stream()
|
|
260
|
+
try:
|
|
261
|
+
for chunk_data in gen:
|
|
262
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
263
|
+
streaming_text += chunk_data["text"]
|
|
264
|
+
elif isinstance(chunk_data, str): # Handle raw=True case
|
|
265
|
+
streaming_text += chunk_data
|
|
266
|
+
except exceptions.FailedToGenerateResponseError:
|
|
267
|
+
# If the underlying stream fails, re-raise the error
|
|
268
|
+
raise
|
|
269
|
+
# self.last_response and history are updated within for_stream's try/finally
|
|
270
|
+
return self.last_response # Return the final aggregated dict
|
|
271
|
+
|
|
272
|
+
# The API uses SSE streaming for all requests, so we always use streaming
|
|
273
|
+
return for_stream()
|
|
274
|
+
|
|
275
|
+
def chat(
|
|
276
|
+
self,
|
|
277
|
+
prompt: str,
|
|
278
|
+
stream: bool = True,
|
|
279
|
+
optimizer: str = None,
|
|
280
|
+
conversationally: bool = False,
|
|
281
|
+
online_search: bool = True,
|
|
282
|
+
image_path: str = None,
|
|
283
|
+
) -> str:
|
|
284
|
+
# The API uses SSE streaming for all requests, so we always aggregate
|
|
285
|
+
aggregated_text = ""
|
|
286
|
+
gen = self.ask(
|
|
287
|
+
prompt,
|
|
288
|
+
stream=True,
|
|
289
|
+
raw=False, # Ensure ask yields dicts
|
|
290
|
+
optimizer=optimizer,
|
|
291
|
+
conversationally=conversationally,
|
|
292
|
+
online_search=online_search,
|
|
293
|
+
image_path=image_path,
|
|
294
|
+
)
|
|
295
|
+
for response_dict in gen:
|
|
296
|
+
if isinstance(response_dict, dict) and "text" in response_dict:
|
|
297
|
+
aggregated_text += response_dict["text"]
|
|
298
|
+
elif isinstance(response_dict, str):
|
|
299
|
+
aggregated_text += response_dict
|
|
300
|
+
|
|
301
|
+
return aggregated_text
|
|
302
|
+
|
|
303
|
+
def get_message(self, response: dict) -> str:
|
|
304
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
305
|
+
return response.get("text", "") # Use .get for safety
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
if __name__ == "__main__":
|
|
309
|
+
from rich import print
|
|
310
|
+
ai = TwoAI(api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJzanl2OHJtZGxDZDFnQ2hQdGxzZHdxUlVteXkyIiwic291cmNlIjoiRmlyZWJhc2UiLCJpYXQiOjE3NTc4NTEyMzYsImV4cCI6MTc1Nzg1MjEzNn0.ilTYrHRdN3_cme6VW3knWWfbypY_n_gsUe9DeDhEwrM", model="sutra-v2", temperature=0.7)
|
|
311
|
+
response = ai.chat("Write a poem about AI in the style of Shakespeare.")
|
|
312
|
+
for chunk in response:
|
|
313
313
|
print(chunk, end="", flush=True)
|