webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (304) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -58
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +33 -11
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -314
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -315
  64. webscout/Provider/Andi.py +3 -3
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +236 -236
  67. webscout/Provider/ChatSandbox.py +343 -342
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -207
  70. webscout/Provider/Deepinfra.py +370 -369
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -387
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -162
  76. webscout/Provider/GeminiProxy.py +167 -166
  77. webscout/Provider/GithubChat.py +371 -370
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -379
  80. webscout/Provider/Jadve.py +282 -297
  81. webscout/Provider/K2Think.py +308 -0
  82. webscout/Provider/Koboldai.py +206 -384
  83. webscout/Provider/LambdaChat.py +423 -425
  84. webscout/Provider/Nemotron.py +244 -245
  85. webscout/Provider/Netwrck.py +248 -247
  86. webscout/Provider/OLLAMA.py +395 -394
  87. webscout/Provider/OPENAI/Cloudflare.py +394 -395
  88. webscout/Provider/OPENAI/FalconH1.py +452 -457
  89. webscout/Provider/OPENAI/FreeGemini.py +297 -299
  90. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
  91. webscout/Provider/OPENAI/NEMOTRON.py +241 -244
  92. webscout/Provider/OPENAI/PI.py +428 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +466 -467
  96. webscout/Provider/OPENAI/__init__.py +33 -59
  97. webscout/Provider/OPENAI/ai4chat.py +313 -303
  98. webscout/Provider/OPENAI/base.py +249 -269
  99. webscout/Provider/OPENAI/chatglm.py +528 -0
  100. webscout/Provider/OPENAI/chatgpt.py +593 -588
  101. webscout/Provider/OPENAI/chatgptclone.py +521 -524
  102. webscout/Provider/OPENAI/chatsandbox.py +202 -177
  103. webscout/Provider/OPENAI/deepinfra.py +319 -315
  104. webscout/Provider/OPENAI/e2b.py +1665 -1665
  105. webscout/Provider/OPENAI/exaai.py +420 -420
  106. webscout/Provider/OPENAI/exachat.py +452 -452
  107. webscout/Provider/OPENAI/friendli.py +232 -232
  108. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  109. webscout/Provider/OPENAI/groq.py +364 -364
  110. webscout/Provider/OPENAI/heckai.py +314 -311
  111. webscout/Provider/OPENAI/llmchatco.py +337 -337
  112. webscout/Provider/OPENAI/netwrck.py +355 -354
  113. webscout/Provider/OPENAI/oivscode.py +290 -290
  114. webscout/Provider/OPENAI/opkfc.py +518 -518
  115. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  116. webscout/Provider/OPENAI/scirachat.py +535 -529
  117. webscout/Provider/OPENAI/sonus.py +308 -308
  118. webscout/Provider/OPENAI/standardinput.py +442 -442
  119. webscout/Provider/OPENAI/textpollinations.py +340 -348
  120. webscout/Provider/OPENAI/toolbaz.py +419 -413
  121. webscout/Provider/OPENAI/typefully.py +362 -362
  122. webscout/Provider/OPENAI/utils.py +295 -295
  123. webscout/Provider/OPENAI/venice.py +436 -436
  124. webscout/Provider/OPENAI/wisecat.py +387 -387
  125. webscout/Provider/OPENAI/writecream.py +166 -166
  126. webscout/Provider/OPENAI/x0gpt.py +378 -378
  127. webscout/Provider/OPENAI/yep.py +389 -389
  128. webscout/Provider/OpenGPT.py +230 -230
  129. webscout/Provider/Openai.py +244 -496
  130. webscout/Provider/PI.py +405 -404
  131. webscout/Provider/Perplexitylabs.py +430 -431
  132. webscout/Provider/QwenLM.py +272 -254
  133. webscout/Provider/STT/__init__.py +32 -2
  134. webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
  135. webscout/Provider/StandardInput.py +309 -309
  136. webscout/Provider/TTI/README.md +82 -82
  137. webscout/Provider/TTI/__init__.py +33 -12
  138. webscout/Provider/TTI/aiarta.py +413 -413
  139. webscout/Provider/TTI/base.py +136 -136
  140. webscout/Provider/TTI/bing.py +243 -243
  141. webscout/Provider/TTI/gpt1image.py +149 -149
  142. webscout/Provider/TTI/imagen.py +196 -196
  143. webscout/Provider/TTI/infip.py +211 -211
  144. webscout/Provider/TTI/magicstudio.py +232 -232
  145. webscout/Provider/TTI/monochat.py +219 -219
  146. webscout/Provider/TTI/piclumen.py +214 -214
  147. webscout/Provider/TTI/pixelmuse.py +232 -232
  148. webscout/Provider/TTI/pollinations.py +232 -232
  149. webscout/Provider/TTI/together.py +288 -288
  150. webscout/Provider/TTI/utils.py +12 -12
  151. webscout/Provider/TTI/venice.py +367 -367
  152. webscout/Provider/TTS/README.md +192 -192
  153. webscout/Provider/TTS/__init__.py +33 -10
  154. webscout/Provider/TTS/parler.py +110 -110
  155. webscout/Provider/TTS/streamElements.py +333 -333
  156. webscout/Provider/TTS/utils.py +280 -280
  157. webscout/Provider/TeachAnything.py +237 -236
  158. webscout/Provider/TextPollinationsAI.py +311 -318
  159. webscout/Provider/TogetherAI.py +356 -357
  160. webscout/Provider/TwoAI.py +313 -569
  161. webscout/Provider/TypliAI.py +312 -311
  162. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  163. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  164. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
  165. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
  166. webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
  167. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  168. webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
  169. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  170. webscout/Provider/UNFINISHED/liner.py +334 -0
  171. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  172. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  173. webscout/Provider/UNFINISHED/samurai.py +223 -223
  174. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  175. webscout/Provider/Venice.py +251 -250
  176. webscout/Provider/VercelAI.py +256 -255
  177. webscout/Provider/WiseCat.py +232 -231
  178. webscout/Provider/WrDoChat.py +367 -366
  179. webscout/Provider/__init__.py +33 -86
  180. webscout/Provider/ai4chat.py +174 -174
  181. webscout/Provider/akashgpt.py +331 -334
  182. webscout/Provider/cerebras.py +446 -340
  183. webscout/Provider/chatglm.py +394 -214
  184. webscout/Provider/cleeai.py +211 -212
  185. webscout/Provider/deepseek_assistant.py +1 -1
  186. webscout/Provider/elmo.py +282 -282
  187. webscout/Provider/geminiapi.py +208 -208
  188. webscout/Provider/granite.py +261 -261
  189. webscout/Provider/hermes.py +263 -265
  190. webscout/Provider/julius.py +223 -222
  191. webscout/Provider/learnfastai.py +309 -309
  192. webscout/Provider/llama3mitril.py +214 -214
  193. webscout/Provider/llmchat.py +243 -243
  194. webscout/Provider/llmchatco.py +290 -290
  195. webscout/Provider/meta.py +801 -801
  196. webscout/Provider/oivscode.py +309 -309
  197. webscout/Provider/scira_chat.py +384 -457
  198. webscout/Provider/searchchat.py +292 -291
  199. webscout/Provider/sonus.py +258 -258
  200. webscout/Provider/toolbaz.py +370 -364
  201. webscout/Provider/turboseek.py +274 -265
  202. webscout/Provider/typefully.py +208 -207
  203. webscout/Provider/x0gpt.py +1 -0
  204. webscout/Provider/yep.py +372 -371
  205. webscout/__init__.py +30 -31
  206. webscout/__main__.py +5 -5
  207. webscout/auth/api_key_manager.py +189 -189
  208. webscout/auth/config.py +175 -175
  209. webscout/auth/models.py +185 -185
  210. webscout/auth/routes.py +664 -664
  211. webscout/auth/simple_logger.py +236 -236
  212. webscout/cli.py +523 -523
  213. webscout/conversation.py +438 -438
  214. webscout/exceptions.py +361 -361
  215. webscout/litagent/Readme.md +298 -298
  216. webscout/litagent/__init__.py +28 -28
  217. webscout/litagent/agent.py +581 -581
  218. webscout/litagent/constants.py +59 -59
  219. webscout/litprinter/__init__.py +58 -58
  220. webscout/models.py +181 -181
  221. webscout/optimizers.py +419 -419
  222. webscout/prompt_manager.py +288 -288
  223. webscout/sanitize.py +1078 -1078
  224. webscout/scout/README.md +401 -401
  225. webscout/scout/__init__.py +8 -8
  226. webscout/scout/core/__init__.py +6 -6
  227. webscout/scout/core/crawler.py +297 -297
  228. webscout/scout/core/scout.py +706 -706
  229. webscout/scout/core/search_result.py +95 -95
  230. webscout/scout/core/text_analyzer.py +62 -62
  231. webscout/scout/core/text_utils.py +277 -277
  232. webscout/scout/core/web_analyzer.py +51 -51
  233. webscout/scout/element.py +599 -599
  234. webscout/scout/parsers/__init__.py +69 -69
  235. webscout/scout/parsers/html5lib_parser.py +172 -172
  236. webscout/scout/parsers/html_parser.py +236 -236
  237. webscout/scout/parsers/lxml_parser.py +178 -178
  238. webscout/scout/utils.py +37 -37
  239. webscout/swiftcli/Readme.md +323 -323
  240. webscout/swiftcli/__init__.py +95 -95
  241. webscout/swiftcli/core/__init__.py +7 -7
  242. webscout/swiftcli/core/cli.py +308 -308
  243. webscout/swiftcli/core/context.py +104 -104
  244. webscout/swiftcli/core/group.py +241 -241
  245. webscout/swiftcli/decorators/__init__.py +28 -28
  246. webscout/swiftcli/decorators/command.py +221 -221
  247. webscout/swiftcli/decorators/options.py +220 -220
  248. webscout/swiftcli/decorators/output.py +302 -302
  249. webscout/swiftcli/exceptions.py +21 -21
  250. webscout/swiftcli/plugins/__init__.py +9 -9
  251. webscout/swiftcli/plugins/base.py +135 -135
  252. webscout/swiftcli/plugins/manager.py +269 -269
  253. webscout/swiftcli/utils/__init__.py +59 -59
  254. webscout/swiftcli/utils/formatting.py +252 -252
  255. webscout/swiftcli/utils/parsing.py +267 -267
  256. webscout/update_checker.py +117 -117
  257. webscout/version.py +1 -1
  258. webscout/webscout_search.py +1183 -1183
  259. webscout/webscout_search_async.py +649 -649
  260. webscout/yep_search.py +346 -346
  261. webscout/zeroart/README.md +89 -89
  262. webscout/zeroart/__init__.py +134 -134
  263. webscout/zeroart/base.py +66 -66
  264. webscout/zeroart/effects.py +100 -100
  265. webscout/zeroart/fonts.py +1238 -1238
  266. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
  267. webscout-2025.10.11.dist-info/RECORD +300 -0
  268. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  269. webscout/Provider/AllenAI.py +0 -440
  270. webscout/Provider/Blackboxai.py +0 -793
  271. webscout/Provider/FreeGemini.py +0 -250
  272. webscout/Provider/GptOss.py +0 -207
  273. webscout/Provider/Hunyuan.py +0 -283
  274. webscout/Provider/Kimi.py +0 -445
  275. webscout/Provider/MCPCore.py +0 -322
  276. webscout/Provider/MiniMax.py +0 -207
  277. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  278. webscout/Provider/OPENAI/MiniMax.py +0 -298
  279. webscout/Provider/OPENAI/Qwen3.py +0 -304
  280. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  281. webscout/Provider/OPENAI/copilot.py +0 -321
  282. webscout/Provider/OPENAI/gptoss.py +0 -288
  283. webscout/Provider/OPENAI/kimi.py +0 -469
  284. webscout/Provider/OPENAI/mcpcore.py +0 -431
  285. webscout/Provider/OPENAI/multichat.py +0 -378
  286. webscout/Provider/OPENAI/qodo.py +0 -630
  287. webscout/Provider/OPENAI/xenai.py +0 -514
  288. webscout/Provider/Reka.py +0 -214
  289. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  290. webscout/Provider/asksteve.py +0 -220
  291. webscout/Provider/copilot.py +0 -441
  292. webscout/Provider/freeaichat.py +0 -294
  293. webscout/Provider/koala.py +0 -182
  294. webscout/Provider/lmarena.py +0 -198
  295. webscout/Provider/monochat.py +0 -275
  296. webscout/Provider/multichat.py +0 -375
  297. webscout/Provider/scnet.py +0 -244
  298. webscout/Provider/talkai.py +0 -194
  299. webscout/tempid.py +0 -128
  300. webscout-8.3.6.dist-info/RECORD +0 -327
  301. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  302. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  303. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  304. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,569 +1,313 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import base64
5
- import time
6
- import os
7
- import pickle
8
- import tempfile
9
- from typing import Any, Dict, Optional, Generator, Union
10
- import re # Import re for parsing SSE
11
- import urllib.parse
12
-
13
- from webscout.AIutel import Optimizers
14
- from webscout.AIutel import Conversation
15
- from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
16
- from webscout.AIbase import Provider
17
- from webscout import exceptions
18
- from webscout.litagent import LitAgent
19
- from webscout.Extra.tempmail import get_random_email
20
-
21
-
22
- class TwoAI(Provider):
23
- """
24
- A class to interact with the Two AI API (v2) with LitAgent user-agent.
25
- SUTRA is a family of large multi-lingual language models (LMLMs) developed by TWO AI.
26
- SUTRA's dual-transformer extends the power of both MoE and Dense AI language model architectures,
27
- delivering cost-efficient multilingual capabilities for over 50+ languages.
28
-
29
- API keys can be generated using the generate_api_key() method, which uses a temporary email
30
- to register for the Two AI service and extract the API key from the confirmation email.
31
- API keys are cached to avoid regenerating them on every initialization.
32
- """
33
-
34
- AVAILABLE_MODELS = [
35
- "sutra-v2", # Multilingual AI model for instruction execution and conversational intelligence
36
- "sutra-r0", # Advanced reasoning model for complex problem-solving and deep contextual understanding
37
- ]
38
-
39
- # Class-level cache for API keys
40
- _api_key_cache = None
41
- _cache_file = os.path.join(tempfile.gettempdir(), "webscout_twoai_cache.pkl")
42
-
43
- @classmethod
44
- def _load_cached_api_key(cls) -> Optional[str]:
45
- """Load cached API key from file."""
46
- try:
47
- if os.path.exists(cls._cache_file):
48
- with open(cls._cache_file, 'rb') as f:
49
- cache_data = pickle.load(f)
50
- # Check if cache is not too old (24 hours)
51
- if time.time() - cache_data.get('timestamp', 0) < 86400:
52
- return cache_data.get('api_key')
53
- except Exception:
54
- # If cache is corrupted or unreadable, ignore and regenerate
55
- pass
56
- return None
57
-
58
- @classmethod
59
- def _save_cached_api_key(cls, api_key: str):
60
- """Save API key to cache file."""
61
- try:
62
- cache_data = {
63
- 'api_key': api_key,
64
- 'timestamp': time.time()
65
- }
66
- with open(cls._cache_file, 'wb') as f:
67
- pickle.dump(cache_data, f)
68
- except Exception:
69
- # If caching fails, continue without caching
70
- pass
71
-
72
- @classmethod
73
- def _validate_api_key(cls, api_key: str) -> bool:
74
- """Validate if an API key is still working."""
75
- try:
76
- session = Session()
77
- headers = {
78
- 'User-Agent': LitAgent().random(),
79
- 'Accept': 'application/json',
80
- 'Content-Type': 'application/json',
81
- 'Authorization': f'Bearer {api_key}',
82
- }
83
-
84
- # Test with a simple request
85
- test_payload = {
86
- "messages": [{"role": "user", "content": "test"}],
87
- "model": "sutra-v2",
88
- "max_tokens": 1,
89
- "stream": False
90
- }
91
-
92
- response = session.post(
93
- "https://api.two.ai/v2/chat/completions",
94
- headers=headers,
95
- json=test_payload,
96
- timeout=10,
97
- impersonate="chrome120"
98
- )
99
-
100
- # If we get a 200 or 400 (bad request but auth worked), key is valid
101
- # If we get 401/403, key is invalid
102
- return response.status_code not in [401, 403]
103
- except Exception:
104
- # If validation fails, assume key is invalid
105
- return False
106
-
107
- @classmethod
108
- def get_cached_api_key(cls) -> str:
109
- """Get a cached API key or generate a new one if needed."""
110
- # First check class-level cache
111
- if cls._api_key_cache:
112
- if cls._validate_api_key(cls._api_key_cache):
113
- return cls._api_key_cache
114
- else:
115
- cls._api_key_cache = None
116
-
117
- # Then check file cache
118
- cached_key = cls._load_cached_api_key()
119
- if cached_key and cls._validate_api_key(cached_key):
120
- cls._api_key_cache = cached_key
121
- return cached_key
122
-
123
- # Generate new key if no valid cached key
124
- new_key = cls.generate_api_key()
125
- cls._api_key_cache = new_key
126
- cls._save_cached_api_key(new_key)
127
- return new_key
128
-
129
- @staticmethod
130
- def generate_api_key() -> str:
131
- """
132
- Generate a new Two AI API key using a temporary email.
133
-
134
- This method:
135
- 1. Creates a temporary email using webscout's tempmail module
136
- 2. Registers for Two AI using the Loops.so newsletter form
137
- 3. Waits for and extracts the API key from the confirmation email
138
-
139
- Returns:
140
- str: The generated API key
141
-
142
- Raises:
143
- Exception: If the API key cannot be generated
144
- """
145
- # Get a temporary email
146
- email, provider = get_random_email("tempmailio")
147
-
148
- # Register for Two AI using the Loops.so newsletter form
149
- loops_url = "https://app.loops.so/api/newsletter-form/cm7i4o92h057auy1o74cxbhxo"
150
-
151
- # Create a session with appropriate headers
152
- session = Session()
153
- session.headers.update({
154
- 'User-Agent': LitAgent().random(),
155
- 'Content-Type': 'application/x-www-form-urlencoded',
156
- 'Origin': 'https://www.two.ai',
157
- 'Referer': 'https://app.loops.so/',
158
- })
159
-
160
- # Prepare form data
161
- form_data = {
162
- 'email': email,
163
- 'userGroup': 'Via Framer',
164
- 'mailingLists': 'cm8ay9cic00x70kjv0bd34k66'
165
- }
166
-
167
- # Send the registration request
168
- encoded_data = urllib.parse.urlencode(form_data)
169
- response = session.post(loops_url, data=encoded_data, impersonate="chrome120")
170
-
171
- if response.status_code != 200:
172
- raise Exception(f"Failed to register for Two AI: {response.status_code} - {response.text}")
173
-
174
- # Wait for the confirmation email and extract the API key
175
- max_attempts = 5
176
- attempt = 0
177
- api_key = None
178
- wait_time = 2
179
-
180
- while attempt < max_attempts and not api_key:
181
- messages = provider.get_messages()
182
-
183
- for message in messages:
184
- # Check if this is likely the confirmation email based on subject and sender
185
- subject = message.get('subject', '')
186
- sender = ''
187
-
188
- # Try to get the sender from different possible fields
189
- if 'from' in message:
190
- if isinstance(message['from'], dict):
191
- sender = message['from'].get('address', '')
192
- else:
193
- sender = str(message['from'])
194
- elif 'sender' in message:
195
- if isinstance(message['sender'], dict):
196
- sender = message['sender'].get('address', '')
197
- else:
198
- sender = str(message['sender'])
199
-
200
- # Look for keywords in the subject that indicate this is the confirmation email
201
- subject_match = any(keyword in subject.lower() for keyword in
202
- ['welcome', 'confirm', 'verify', 'api', 'key', 'sutra', 'two.ai', 'loops'])
203
-
204
- # Look for keywords in the sender that indicate this is from Two AI or Loops
205
- sender_match = any(keyword in sender.lower() for keyword in
206
- ['two.ai', 'sutra', 'loops.so', 'loops', 'no-reply', 'noreply'])
207
-
208
- is_confirmation = subject_match or sender_match
209
-
210
- if is_confirmation:
211
- pass
212
- # Try to get the message content from various possible fields
213
- content = None
214
-
215
- # Check for body field (seen in the debug output)
216
- if 'body' in message:
217
- content = message['body']
218
- # Check for content.text field
219
- elif 'content' in message and 'text' in message['content']:
220
- content = message['content']['text']
221
- # Check for html field
222
- elif 'html' in message:
223
- content = message['html']
224
- # Check for text field
225
- elif 'text' in message:
226
- content = message['text']
227
-
228
- if not content:
229
- continue
230
-
231
- # Look for the API key pattern in the email content
232
- # First, try to find the API key directly
233
- api_key_match = re.search(r'sutra_[A-Za-z0-9]{60,70}', content)
234
-
235
- # If not found, try looking for the key with the label
236
- if not api_key_match:
237
- key_section_match = re.search(r'🔑 SUTRA API Key\s*([^\s]+)', content)
238
- if key_section_match:
239
- api_key_match = re.search(r'(sutra_[A-Za-z0-9]+)', key_section_match.group(1))
240
-
241
- # If still not found, try a more general pattern
242
- if not api_key_match:
243
- api_key_match = re.search(r'sutra_\S+', content)
244
-
245
- if api_key_match:
246
- api_key = api_key_match.group(0)
247
- break
248
- if not api_key:
249
- attempt += 1
250
- time.sleep(wait_time)
251
- if not api_key:
252
- raise Exception("Failed to get API key from confirmation email")
253
- return api_key
254
-
255
- def __init__(
256
- self,
257
- is_conversation: bool = True,
258
- max_tokens: int = 1024,
259
- timeout: int = 30,
260
- intro: str = None,
261
- filepath: str = None,
262
- update_file: bool = True,
263
- proxies: dict = {},
264
- history_offset: int = 10250,
265
- act: str = None,
266
- model: str = "sutra-v2", # Default model
267
- temperature: float = 0.6,
268
- system_message: str = "You are a helpful assistant."
269
- ):
270
- """
271
- Initializes the TwoAI API client.
272
-
273
- Args:
274
- is_conversation: Whether to maintain conversation history.
275
- max_tokens: Maximum number of tokens to generate.
276
- timeout: Request timeout in seconds.
277
- intro: Introduction text for the conversation.
278
- filepath: Path to save conversation history.
279
- update_file: Whether to update the conversation history file.
280
- proxies: Proxy configuration for requests.
281
- history_offset: Maximum history length in characters.
282
- act: Persona for the conversation.
283
- model: Model to use. Must be one of AVAILABLE_MODELS.
284
- temperature: Temperature for generation (0.0 to 1.0).
285
- system_message: System message to use for the conversation.
286
- """
287
- if model not in self.AVAILABLE_MODELS:
288
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
289
-
290
- # Use cached API key or generate new one if needed
291
- api_key = self.get_cached_api_key()
292
-
293
- self.url = "https://api.two.ai/v2/chat/completions" # API endpoint
294
- self.headers = {
295
- 'User-Agent': LitAgent().random(),
296
- 'Accept': 'text/event-stream', # For streaming responses
297
- 'Content-Type': 'application/json',
298
- 'Authorization': f'Bearer {api_key}', # Using Bearer token authentication
299
- 'Origin': 'https://chat.two.ai',
300
- 'Referer': 'https://api.two.app/'
301
- }
302
-
303
- # Initialize curl_cffi Session
304
- self.session = Session()
305
- self.session.headers.update(self.headers)
306
- self.session.proxies = proxies
307
-
308
- self.is_conversation = is_conversation
309
- self.max_tokens_to_sample = max_tokens
310
- self.timeout = timeout
311
- self.last_response = {}
312
- self.model = model
313
- self.temperature = temperature
314
- self.system_message = system_message
315
- self.api_key = api_key
316
-
317
- self.__available_optimizers = (
318
- method
319
- for method in dir(Optimizers)
320
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
321
- )
322
- Conversation.intro = (
323
- AwesomePrompts().get_act(
324
- act, raise_not_found=True, default=None, case_insensitive=True
325
- )
326
- if act
327
- else intro or Conversation.intro
328
- )
329
-
330
- self.conversation = Conversation(
331
- is_conversation, self.max_tokens_to_sample, filepath, update_file
332
- )
333
- self.conversation.history_offset = history_offset
334
-
335
- @staticmethod
336
- def _twoai_extractor(chunk_json: Dict[str, Any]) -> Optional[str]:
337
- """Extracts content from TwoAI v2 stream JSON objects."""
338
- if not isinstance(chunk_json, dict) or "choices" not in chunk_json or not chunk_json["choices"]:
339
- return None
340
-
341
- delta = chunk_json["choices"][0].get("delta")
342
- if not isinstance(delta, dict):
343
- return None
344
-
345
- content = delta.get("content")
346
- return content if isinstance(content, str) else None
347
-
348
- def encode_image(self, image_path: str) -> str:
349
- """
350
- Encode an image file to base64 string.
351
-
352
- Args:
353
- image_path: Path to the image file
354
-
355
- Returns:
356
- Base64 encoded string of the image
357
- """
358
- with open(image_path, "rb") as image_file:
359
- return base64.b64encode(image_file.read()).decode('utf-8')
360
-
361
- def ask(
362
- self,
363
- prompt: str,
364
- stream: bool = True,
365
- raw: bool = False,
366
- optimizer: str = None,
367
- conversationally: bool = False,
368
- online_search: bool = True,
369
- image_path: str = None,
370
- ) -> Union[Dict[str, Any], Generator]:
371
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
372
- if optimizer:
373
- if optimizer in self.__available_optimizers:
374
- conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
375
- else:
376
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
377
-
378
- # Prepare messages with image if provided
379
- if image_path:
380
- # Create a message with image content
381
- image_content = {
382
- "type": "image_url",
383
- "image_url": {
384
- "url": f"data:image/jpeg;base64,{self.encode_image(image_path)}"
385
- }
386
- }
387
- user_message = {
388
- "role": "user",
389
- "content": [
390
- {"type": "text", "text": conversation_prompt},
391
- image_content
392
- ]
393
- }
394
- else:
395
- # Text-only message
396
- user_message = {"role": "user", "content": conversation_prompt}
397
-
398
- # Prepare the payload
399
- payload = {
400
- "messages": [
401
- *([{"role": "system", "content": self.system_message}] if self.system_message else []),
402
- user_message
403
- ],
404
- "model": self.model,
405
- "temperature": self.temperature,
406
- "max_tokens": self.max_tokens_to_sample,
407
- "stream": stream,
408
- "extra_body": {
409
- "online_search": online_search,
410
- }
411
- }
412
-
413
- def for_stream():
414
- streaming_text = "" # Initialize outside try block
415
- try:
416
- response = self.session.post(
417
- self.url,
418
- json=payload,
419
- stream=True,
420
- timeout=self.timeout,
421
- impersonate="chrome110"
422
- )
423
-
424
- if response.status_code != 200:
425
- error_detail = response.text
426
- try:
427
- error_json = response.json()
428
- error_detail = error_json.get("error", {}).get("message", error_detail)
429
- except json.JSONDecodeError:
430
- pass
431
- raise exceptions.FailedToGenerateResponseError(
432
- f"Request failed with status code {response.status_code} - {error_detail}"
433
- )
434
-
435
- # Use sanitize_stream for SSE processing
436
- processed_stream = sanitize_stream(
437
- data=response.iter_content(chunk_size=None), # Pass byte iterator
438
- intro_value="data:",
439
- to_json=True, # Stream sends JSON
440
- skip_markers=["[DONE]"],
441
- content_extractor=self._twoai_extractor, # Use the specific extractor
442
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
443
- )
444
-
445
- for content_chunk in processed_stream:
446
- # content_chunk is the string extracted by _twoai_extractor
447
- if content_chunk and isinstance(content_chunk, str):
448
- streaming_text += content_chunk
449
- resp = dict(text=content_chunk)
450
- yield resp if not raw else content_chunk
451
-
452
- # If stream completes successfully, update history
453
- self.last_response = {"text": streaming_text}
454
- self.conversation.update_chat_history(prompt, streaming_text)
455
-
456
- except CurlError as e:
457
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
458
- except exceptions.FailedToGenerateResponseError:
459
- raise # Re-raise specific exception
460
- except Exception as e:
461
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
462
- finally:
463
- # Ensure history is updated even if stream ends abruptly but text was received
464
- if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
465
- self.last_response = {"text": streaming_text}
466
- self.conversation.update_chat_history(prompt, streaming_text)
467
-
468
-
469
- def for_non_stream():
470
- # Non-stream still uses the stream internally and aggregates
471
- streaming_text = ""
472
- # We need to consume the generator from for_stream()
473
- gen = for_stream()
474
- try:
475
- for chunk_data in gen:
476
- if isinstance(chunk_data, dict) and "text" in chunk_data:
477
- streaming_text += chunk_data["text"]
478
- elif isinstance(chunk_data, str): # Handle raw=True case
479
- streaming_text += chunk_data
480
- except exceptions.FailedToGenerateResponseError:
481
- # If the underlying stream fails, re-raise the error
482
- raise
483
- # self.last_response and history are updated within for_stream's try/finally
484
- return self.last_response # Return the final aggregated dict
485
-
486
- effective_stream = stream if stream is not None else True
487
- return for_stream() if effective_stream else for_non_stream()
488
-
489
- def chat(
490
- self,
491
- prompt: str,
492
- stream: bool = True,
493
- optimizer: str = None,
494
- conversationally: bool = False,
495
- online_search: bool = True,
496
- image_path: str = None,
497
- ) -> str:
498
- effective_stream = stream if stream is not None else True
499
-
500
- def for_stream_chat():
501
- # ask() yields dicts when raw=False (default for chat)
502
- gen = self.ask(
503
- prompt,
504
- stream=True,
505
- raw=False, # Ensure ask yields dicts
506
- optimizer=optimizer,
507
- conversationally=conversationally,
508
- online_search=online_search,
509
- image_path=image_path,
510
- )
511
- for response_dict in gen:
512
- yield self.get_message(response_dict) # get_message expects dict
513
-
514
- def for_non_stream_chat():
515
- # ask() returns a dict when stream=False
516
- response_dict = self.ask(
517
- prompt,
518
- stream=False, # Ensure ask returns dict
519
- raw=False,
520
- optimizer=optimizer,
521
- conversationally=conversationally,
522
- online_search=online_search,
523
- image_path=image_path,
524
- )
525
- return self.get_message(response_dict) # get_message expects dict
526
-
527
- return for_stream_chat() if effective_stream else for_non_stream_chat()
528
-
529
- def get_message(self, response: dict) -> str:
530
- assert isinstance(response, dict), "Response should be of dict data-type only"
531
- return response.get("text", "") # Use .get for safety
532
-
533
-
534
- if __name__ == "__main__":
535
- print("-" * 80)
536
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
537
- print("-" * 80)
538
-
539
- for model in TwoAI.AVAILABLE_MODELS:
540
- try:
541
- test_ai = TwoAI(model=model, timeout=60)
542
- # Test stream first
543
- response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
544
- response_text = ""
545
- print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
546
- for chunk in response_stream:
547
- response_text += chunk
548
- # Optional: print chunks as they arrive for visual feedback
549
- # print(chunk, end="", flush=True)
550
-
551
- if response_text and len(response_text.strip()) > 0:
552
- status = "✓"
553
- # Clean and truncate response
554
- clean_text = response_text.strip() # Already decoded in get_message
555
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
556
- else:
557
- status = "✗ (Stream)"
558
- display_text = "Empty or invalid stream response"
559
- print(f"\r{model:<50} {status:<10} {display_text}")
560
-
561
- # Optional: Add non-stream test if needed, but stream test covers basic functionality
562
- # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
563
- # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
564
- # if not response_non_stream or len(response_non_stream.strip()) == 0:
565
- # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
566
-
567
-
568
- except Exception as e:
569
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import base64
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+ import re # Import re for parsing SSE
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+
16
+ class TwoAI(Provider):
17
+ """
18
+ A class to interact with the Two AI API (v2) with LitAgent user-agent.
19
+ SUTRA is a family of large multi-lingual language models (LMLMs) developed by TWO AI.
20
+ SUTRA's dual-transformer extends the power of both MoE and Dense AI language model architectures,
21
+ delivering cost-efficient multilingual capabilities for over 50+ languages.
22
+
23
+ API keys must be provided directly by the user.
24
+ """
25
+
26
+ required_auth = True
27
+ AVAILABLE_MODELS = [
28
+ "sutra-v2", # Multilingual AI model for instruction execution and conversational intelligence
29
+ "sutra-r0", # Advanced reasoning model for complex problem-solving and deep contextual understanding
30
+ ]
31
+
32
+ def __init__(
33
+ self,
34
+ api_key: str,
35
+ is_conversation: bool = True,
36
+ max_tokens: int = 1024,
37
+ timeout: int = 30,
38
+ intro: str = None,
39
+ filepath: str = None,
40
+ update_file: bool = True,
41
+ proxies: dict = {},
42
+ history_offset: int = 10250,
43
+ act: str = None,
44
+ model: str = "sutra-v2", # Default model
45
+ temperature: float = 0.6,
46
+ system_message: str = "You are a helpful assistant."
47
+ ):
48
+ """
49
+ Initializes the TwoAI API client.
50
+
51
+ Args:
52
+ api_key: TwoAI API key (required).
53
+ is_conversation: Whether to maintain conversation history.
54
+ max_tokens: Maximum number of tokens to generate.
55
+ timeout: Request timeout in seconds.
56
+ intro: Introduction text for the conversation.
57
+ filepath: Path to save conversation history.
58
+ update_file: Whether to update the conversation history file.
59
+ proxies: Proxy configuration for requests.
60
+ history_offset: Maximum history length in characters.
61
+ act: Persona for the conversation.
62
+ model: Model to use. Must be one of AVAILABLE_MODELS.
63
+ temperature: Temperature for generation (0.0 to 1.0).
64
+ system_message: System message to use for the conversation.
65
+ """
66
+ if model not in self.AVAILABLE_MODELS:
67
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
68
+
69
+ if not api_key:
70
+ raise exceptions.AuthenticationError("TwoAI API key is required.")
71
+
72
+ self.url = "https://chatsutra-server.account-2b0.workers.dev/v2/chat/completions" # Correct API endpoint
73
+ self.headers = {
74
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0',
75
+ 'Accept': 'application/json',
76
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
77
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
78
+ 'Content-Type': 'application/json',
79
+ 'Origin': 'https://chat.two.ai',
80
+ 'Referer': 'https://chatsutra-server.account-2b0.workers.dev/',
81
+ 'Sec-Ch-Ua': '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
82
+ 'Sec-Ch-Ua-Mobile': '?0',
83
+ 'Sec-Ch-Ua-Platform': '"Windows"',
84
+ 'Sec-Fetch-Dest': 'empty',
85
+ 'Sec-Fetch-Mode': 'cors',
86
+ 'Sec-Fetch-Site': 'cross-site',
87
+ 'Sec-Gpc': '1',
88
+ 'Dnt': '1',
89
+ 'X-Session-Token': api_key # Using session token instead of Bearer auth
90
+ }
91
+
92
+ # Initialize curl_cffi Session
93
+ self.session = Session()
94
+ self.session.headers.update(self.headers)
95
+ self.session.proxies = proxies
96
+
97
+ self.is_conversation = is_conversation
98
+ self.max_tokens_to_sample = max_tokens
99
+ self.timeout = timeout
100
+ self.last_response = {}
101
+ self.model = model
102
+ self.temperature = temperature
103
+ self.system_message = system_message
104
+ self.api_key = api_key
105
+
106
+ self.__available_optimizers = (
107
+ method
108
+ for method in dir(Optimizers)
109
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
110
+ )
111
+ Conversation.intro = (
112
+ AwesomePrompts().get_act(
113
+ act, raise_not_found=True, default=None, case_insensitive=True
114
+ )
115
+ if act
116
+ else intro or Conversation.intro
117
+ )
118
+
119
+ self.conversation = Conversation(
120
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
121
+ )
122
+ self.conversation.history_offset = history_offset
123
+
124
+ @staticmethod
125
+ def _twoai_extractor(chunk_json: Dict[str, Any]) -> Optional[str]:
126
+ """Extracts content from TwoAI v2 stream JSON objects."""
127
+ if not isinstance(chunk_json, dict) or "choices" not in chunk_json or not chunk_json["choices"]:
128
+ return None
129
+
130
+ delta = chunk_json["choices"][0].get("delta")
131
+ if not isinstance(delta, dict):
132
+ return None
133
+
134
+ content = delta.get("content")
135
+ return content if isinstance(content, str) else None
136
+
137
+ def encode_image(self, image_path: str) -> str:
138
+ """
139
+ Encode an image file to base64 string.
140
+
141
+ Args:
142
+ image_path: Path to the image file
143
+
144
+ Returns:
145
+ Base64 encoded string of the image
146
+ """
147
+ with open(image_path, "rb") as image_file:
148
+ return base64.b64encode(image_file.read()).decode('utf-8')
149
+
150
+ def ask(
151
+ self,
152
+ prompt: str,
153
+ stream: bool = True,
154
+ raw: bool = False,
155
+ optimizer: str = None,
156
+ conversationally: bool = False,
157
+ online_search: bool = True,
158
+ image_path: str = None,
159
+ ) -> Union[Dict[str, Any], Generator]:
160
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
161
+ if optimizer:
162
+ if optimizer in self.__available_optimizers:
163
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
164
+ else:
165
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
166
+
167
+ # Prepare messages with image if provided
168
+ if image_path:
169
+ # Create a message with image content
170
+ image_content = {
171
+ "type": "image_url",
172
+ "image_url": {
173
+ "url": f"data:image/jpeg;base64,{self.encode_image(image_path)}"
174
+ }
175
+ }
176
+ user_message = {
177
+ "role": "user",
178
+ "content": [
179
+ {"type": "text", "text": conversation_prompt},
180
+ image_content
181
+ ]
182
+ }
183
+ else:
184
+ # Text-only message
185
+ user_message = {"role": "user", "content": conversation_prompt}
186
+
187
+ # Prepare the payload
188
+ payload = {
189
+ "messages": [
190
+ *([{"role": "system", "content": self.system_message}] if self.system_message else []),
191
+ user_message
192
+ ],
193
+ "model": self.model,
194
+ "temperature": self.temperature,
195
+ "max_tokens": self.max_tokens_to_sample,
196
+ "extra_body": {
197
+ "online_search": online_search,
198
+ }
199
+ }
200
+
201
+ def for_stream():
202
+ streaming_text = "" # Initialize outside try block
203
+ try:
204
+ response = self.session.post(
205
+ self.url,
206
+ json=payload,
207
+ stream=True,
208
+ timeout=self.timeout
209
+ )
210
+
211
+ if response.status_code != 200:
212
+ error_detail = response.text
213
+ try:
214
+ error_json = response.json()
215
+ error_detail = error_json.get("error", {}).get("message", error_detail)
216
+ except json.JSONDecodeError:
217
+ pass
218
+ raise exceptions.FailedToGenerateResponseError(
219
+ f"Request failed with status code {response.status_code} - {error_detail}"
220
+ )
221
+
222
+ # Use sanitize_stream to process the SSE stream
223
+ processed_stream = sanitize_stream(
224
+ data=response.iter_content(chunk_size=None),
225
+ intro_value="data:",
226
+ to_json=True,
227
+ skip_markers=["[DONE]"],
228
+ content_extractor=self._twoai_extractor,
229
+ yield_raw_on_error=False
230
+ )
231
+
232
+ for content_chunk in processed_stream:
233
+ if content_chunk and isinstance(content_chunk, str):
234
+ streaming_text += content_chunk
235
+ resp = dict(text=content_chunk)
236
+ yield resp if not raw else content_chunk
237
+
238
+ # If stream completes successfully, update history
239
+ self.last_response = {"text": streaming_text}
240
+ self.conversation.update_chat_history(prompt, streaming_text)
241
+
242
+ except CurlError as e:
243
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
244
+ except exceptions.FailedToGenerateResponseError:
245
+ raise # Re-raise specific exception
246
+ except Exception as e:
247
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
248
+ finally:
249
+ # Ensure history is updated even if stream ends abruptly but text was received
250
+ if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
251
+ self.last_response = {"text": streaming_text}
252
+ self.conversation.update_chat_history(prompt, streaming_text)
253
+
254
+
255
+ def for_non_stream():
256
+ # Non-stream still uses the stream internally and aggregates
257
+ streaming_text = ""
258
+ # We need to consume the generator from for_stream()
259
+ gen = for_stream()
260
+ try:
261
+ for chunk_data in gen:
262
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
263
+ streaming_text += chunk_data["text"]
264
+ elif isinstance(chunk_data, str): # Handle raw=True case
265
+ streaming_text += chunk_data
266
+ except exceptions.FailedToGenerateResponseError:
267
+ # If the underlying stream fails, re-raise the error
268
+ raise
269
+ # self.last_response and history are updated within for_stream's try/finally
270
+ return self.last_response # Return the final aggregated dict
271
+
272
+ # The API uses SSE streaming for all requests, so we always use streaming
273
+ return for_stream()
274
+
275
+ def chat(
276
+ self,
277
+ prompt: str,
278
+ stream: bool = True,
279
+ optimizer: str = None,
280
+ conversationally: bool = False,
281
+ online_search: bool = True,
282
+ image_path: str = None,
283
+ ) -> str:
284
+ # The API uses SSE streaming for all requests, so we always aggregate
285
+ aggregated_text = ""
286
+ gen = self.ask(
287
+ prompt,
288
+ stream=True,
289
+ raw=False, # Ensure ask yields dicts
290
+ optimizer=optimizer,
291
+ conversationally=conversationally,
292
+ online_search=online_search,
293
+ image_path=image_path,
294
+ )
295
+ for response_dict in gen:
296
+ if isinstance(response_dict, dict) and "text" in response_dict:
297
+ aggregated_text += response_dict["text"]
298
+ elif isinstance(response_dict, str):
299
+ aggregated_text += response_dict
300
+
301
+ return aggregated_text
302
+
303
+ def get_message(self, response: dict) -> str:
304
+ assert isinstance(response, dict), "Response should be of dict data-type only"
305
+ return response.get("text", "") # Use .get for safety
306
+
307
+
308
+ if __name__ == "__main__":
309
+ from rich import print
310
+ ai = TwoAI(api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJzanl2OHJtZGxDZDFnQ2hQdGxzZHdxUlVteXkyIiwic291cmNlIjoiRmlyZWJhc2UiLCJpYXQiOjE3NTc4NTEyMzYsImV4cCI6MTc1Nzg1MjEzNn0.ilTYrHRdN3_cme6VW3knWWfbypY_n_gsUe9DeDhEwrM", model="sutra-v2", temperature=0.7)
311
+ response = ai.chat("Write a poem about AI in the style of Shakespeare.")
312
+ for chunk in response:
313
+ print(chunk, end="", flush=True)