webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (304) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -58
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +33 -11
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -314
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -315
  64. webscout/Provider/Andi.py +3 -3
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +236 -236
  67. webscout/Provider/ChatSandbox.py +343 -342
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -207
  70. webscout/Provider/Deepinfra.py +370 -369
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -387
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -162
  76. webscout/Provider/GeminiProxy.py +167 -166
  77. webscout/Provider/GithubChat.py +371 -370
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -379
  80. webscout/Provider/Jadve.py +282 -297
  81. webscout/Provider/K2Think.py +308 -0
  82. webscout/Provider/Koboldai.py +206 -384
  83. webscout/Provider/LambdaChat.py +423 -425
  84. webscout/Provider/Nemotron.py +244 -245
  85. webscout/Provider/Netwrck.py +248 -247
  86. webscout/Provider/OLLAMA.py +395 -394
  87. webscout/Provider/OPENAI/Cloudflare.py +394 -395
  88. webscout/Provider/OPENAI/FalconH1.py +452 -457
  89. webscout/Provider/OPENAI/FreeGemini.py +297 -299
  90. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
  91. webscout/Provider/OPENAI/NEMOTRON.py +241 -244
  92. webscout/Provider/OPENAI/PI.py +428 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +466 -467
  96. webscout/Provider/OPENAI/__init__.py +33 -59
  97. webscout/Provider/OPENAI/ai4chat.py +313 -303
  98. webscout/Provider/OPENAI/base.py +249 -269
  99. webscout/Provider/OPENAI/chatglm.py +528 -0
  100. webscout/Provider/OPENAI/chatgpt.py +593 -588
  101. webscout/Provider/OPENAI/chatgptclone.py +521 -524
  102. webscout/Provider/OPENAI/chatsandbox.py +202 -177
  103. webscout/Provider/OPENAI/deepinfra.py +319 -315
  104. webscout/Provider/OPENAI/e2b.py +1665 -1665
  105. webscout/Provider/OPENAI/exaai.py +420 -420
  106. webscout/Provider/OPENAI/exachat.py +452 -452
  107. webscout/Provider/OPENAI/friendli.py +232 -232
  108. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  109. webscout/Provider/OPENAI/groq.py +364 -364
  110. webscout/Provider/OPENAI/heckai.py +314 -311
  111. webscout/Provider/OPENAI/llmchatco.py +337 -337
  112. webscout/Provider/OPENAI/netwrck.py +355 -354
  113. webscout/Provider/OPENAI/oivscode.py +290 -290
  114. webscout/Provider/OPENAI/opkfc.py +518 -518
  115. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  116. webscout/Provider/OPENAI/scirachat.py +535 -529
  117. webscout/Provider/OPENAI/sonus.py +308 -308
  118. webscout/Provider/OPENAI/standardinput.py +442 -442
  119. webscout/Provider/OPENAI/textpollinations.py +340 -348
  120. webscout/Provider/OPENAI/toolbaz.py +419 -413
  121. webscout/Provider/OPENAI/typefully.py +362 -362
  122. webscout/Provider/OPENAI/utils.py +295 -295
  123. webscout/Provider/OPENAI/venice.py +436 -436
  124. webscout/Provider/OPENAI/wisecat.py +387 -387
  125. webscout/Provider/OPENAI/writecream.py +166 -166
  126. webscout/Provider/OPENAI/x0gpt.py +378 -378
  127. webscout/Provider/OPENAI/yep.py +389 -389
  128. webscout/Provider/OpenGPT.py +230 -230
  129. webscout/Provider/Openai.py +244 -496
  130. webscout/Provider/PI.py +405 -404
  131. webscout/Provider/Perplexitylabs.py +430 -431
  132. webscout/Provider/QwenLM.py +272 -254
  133. webscout/Provider/STT/__init__.py +32 -2
  134. webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
  135. webscout/Provider/StandardInput.py +309 -309
  136. webscout/Provider/TTI/README.md +82 -82
  137. webscout/Provider/TTI/__init__.py +33 -12
  138. webscout/Provider/TTI/aiarta.py +413 -413
  139. webscout/Provider/TTI/base.py +136 -136
  140. webscout/Provider/TTI/bing.py +243 -243
  141. webscout/Provider/TTI/gpt1image.py +149 -149
  142. webscout/Provider/TTI/imagen.py +196 -196
  143. webscout/Provider/TTI/infip.py +211 -211
  144. webscout/Provider/TTI/magicstudio.py +232 -232
  145. webscout/Provider/TTI/monochat.py +219 -219
  146. webscout/Provider/TTI/piclumen.py +214 -214
  147. webscout/Provider/TTI/pixelmuse.py +232 -232
  148. webscout/Provider/TTI/pollinations.py +232 -232
  149. webscout/Provider/TTI/together.py +288 -288
  150. webscout/Provider/TTI/utils.py +12 -12
  151. webscout/Provider/TTI/venice.py +367 -367
  152. webscout/Provider/TTS/README.md +192 -192
  153. webscout/Provider/TTS/__init__.py +33 -10
  154. webscout/Provider/TTS/parler.py +110 -110
  155. webscout/Provider/TTS/streamElements.py +333 -333
  156. webscout/Provider/TTS/utils.py +280 -280
  157. webscout/Provider/TeachAnything.py +237 -236
  158. webscout/Provider/TextPollinationsAI.py +311 -318
  159. webscout/Provider/TogetherAI.py +356 -357
  160. webscout/Provider/TwoAI.py +313 -569
  161. webscout/Provider/TypliAI.py +312 -311
  162. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  163. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  164. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
  165. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
  166. webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
  167. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  168. webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
  169. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  170. webscout/Provider/UNFINISHED/liner.py +334 -0
  171. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  172. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  173. webscout/Provider/UNFINISHED/samurai.py +223 -223
  174. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  175. webscout/Provider/Venice.py +251 -250
  176. webscout/Provider/VercelAI.py +256 -255
  177. webscout/Provider/WiseCat.py +232 -231
  178. webscout/Provider/WrDoChat.py +367 -366
  179. webscout/Provider/__init__.py +33 -86
  180. webscout/Provider/ai4chat.py +174 -174
  181. webscout/Provider/akashgpt.py +331 -334
  182. webscout/Provider/cerebras.py +446 -340
  183. webscout/Provider/chatglm.py +394 -214
  184. webscout/Provider/cleeai.py +211 -212
  185. webscout/Provider/deepseek_assistant.py +1 -1
  186. webscout/Provider/elmo.py +282 -282
  187. webscout/Provider/geminiapi.py +208 -208
  188. webscout/Provider/granite.py +261 -261
  189. webscout/Provider/hermes.py +263 -265
  190. webscout/Provider/julius.py +223 -222
  191. webscout/Provider/learnfastai.py +309 -309
  192. webscout/Provider/llama3mitril.py +214 -214
  193. webscout/Provider/llmchat.py +243 -243
  194. webscout/Provider/llmchatco.py +290 -290
  195. webscout/Provider/meta.py +801 -801
  196. webscout/Provider/oivscode.py +309 -309
  197. webscout/Provider/scira_chat.py +384 -457
  198. webscout/Provider/searchchat.py +292 -291
  199. webscout/Provider/sonus.py +258 -258
  200. webscout/Provider/toolbaz.py +370 -364
  201. webscout/Provider/turboseek.py +274 -265
  202. webscout/Provider/typefully.py +208 -207
  203. webscout/Provider/x0gpt.py +1 -0
  204. webscout/Provider/yep.py +372 -371
  205. webscout/__init__.py +30 -31
  206. webscout/__main__.py +5 -5
  207. webscout/auth/api_key_manager.py +189 -189
  208. webscout/auth/config.py +175 -175
  209. webscout/auth/models.py +185 -185
  210. webscout/auth/routes.py +664 -664
  211. webscout/auth/simple_logger.py +236 -236
  212. webscout/cli.py +523 -523
  213. webscout/conversation.py +438 -438
  214. webscout/exceptions.py +361 -361
  215. webscout/litagent/Readme.md +298 -298
  216. webscout/litagent/__init__.py +28 -28
  217. webscout/litagent/agent.py +581 -581
  218. webscout/litagent/constants.py +59 -59
  219. webscout/litprinter/__init__.py +58 -58
  220. webscout/models.py +181 -181
  221. webscout/optimizers.py +419 -419
  222. webscout/prompt_manager.py +288 -288
  223. webscout/sanitize.py +1078 -1078
  224. webscout/scout/README.md +401 -401
  225. webscout/scout/__init__.py +8 -8
  226. webscout/scout/core/__init__.py +6 -6
  227. webscout/scout/core/crawler.py +297 -297
  228. webscout/scout/core/scout.py +706 -706
  229. webscout/scout/core/search_result.py +95 -95
  230. webscout/scout/core/text_analyzer.py +62 -62
  231. webscout/scout/core/text_utils.py +277 -277
  232. webscout/scout/core/web_analyzer.py +51 -51
  233. webscout/scout/element.py +599 -599
  234. webscout/scout/parsers/__init__.py +69 -69
  235. webscout/scout/parsers/html5lib_parser.py +172 -172
  236. webscout/scout/parsers/html_parser.py +236 -236
  237. webscout/scout/parsers/lxml_parser.py +178 -178
  238. webscout/scout/utils.py +37 -37
  239. webscout/swiftcli/Readme.md +323 -323
  240. webscout/swiftcli/__init__.py +95 -95
  241. webscout/swiftcli/core/__init__.py +7 -7
  242. webscout/swiftcli/core/cli.py +308 -308
  243. webscout/swiftcli/core/context.py +104 -104
  244. webscout/swiftcli/core/group.py +241 -241
  245. webscout/swiftcli/decorators/__init__.py +28 -28
  246. webscout/swiftcli/decorators/command.py +221 -221
  247. webscout/swiftcli/decorators/options.py +220 -220
  248. webscout/swiftcli/decorators/output.py +302 -302
  249. webscout/swiftcli/exceptions.py +21 -21
  250. webscout/swiftcli/plugins/__init__.py +9 -9
  251. webscout/swiftcli/plugins/base.py +135 -135
  252. webscout/swiftcli/plugins/manager.py +269 -269
  253. webscout/swiftcli/utils/__init__.py +59 -59
  254. webscout/swiftcli/utils/formatting.py +252 -252
  255. webscout/swiftcli/utils/parsing.py +267 -267
  256. webscout/update_checker.py +117 -117
  257. webscout/version.py +1 -1
  258. webscout/webscout_search.py +1183 -1183
  259. webscout/webscout_search_async.py +649 -649
  260. webscout/yep_search.py +346 -346
  261. webscout/zeroart/README.md +89 -89
  262. webscout/zeroart/__init__.py +134 -134
  263. webscout/zeroart/base.py +66 -66
  264. webscout/zeroart/effects.py +100 -100
  265. webscout/zeroart/fonts.py +1238 -1238
  266. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
  267. webscout-2025.10.11.dist-info/RECORD +300 -0
  268. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  269. webscout/Provider/AllenAI.py +0 -440
  270. webscout/Provider/Blackboxai.py +0 -793
  271. webscout/Provider/FreeGemini.py +0 -250
  272. webscout/Provider/GptOss.py +0 -207
  273. webscout/Provider/Hunyuan.py +0 -283
  274. webscout/Provider/Kimi.py +0 -445
  275. webscout/Provider/MCPCore.py +0 -322
  276. webscout/Provider/MiniMax.py +0 -207
  277. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  278. webscout/Provider/OPENAI/MiniMax.py +0 -298
  279. webscout/Provider/OPENAI/Qwen3.py +0 -304
  280. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  281. webscout/Provider/OPENAI/copilot.py +0 -321
  282. webscout/Provider/OPENAI/gptoss.py +0 -288
  283. webscout/Provider/OPENAI/kimi.py +0 -469
  284. webscout/Provider/OPENAI/mcpcore.py +0 -431
  285. webscout/Provider/OPENAI/multichat.py +0 -378
  286. webscout/Provider/OPENAI/qodo.py +0 -630
  287. webscout/Provider/OPENAI/xenai.py +0 -514
  288. webscout/Provider/Reka.py +0 -214
  289. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  290. webscout/Provider/asksteve.py +0 -220
  291. webscout/Provider/copilot.py +0 -441
  292. webscout/Provider/freeaichat.py +0 -294
  293. webscout/Provider/koala.py +0 -182
  294. webscout/Provider/lmarena.py +0 -198
  295. webscout/Provider/monochat.py +0 -275
  296. webscout/Provider/multichat.py +0 -375
  297. webscout/Provider/scnet.py +0 -244
  298. webscout/Provider/talkai.py +0 -194
  299. webscout/tempid.py +0 -128
  300. webscout-8.3.6.dist-info/RECORD +0 -327
  301. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  302. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  303. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  304. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,426 +1,424 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import time
5
- import random
6
- import re
7
- import uuid
8
- from typing import Any, Dict, List, Optional, Union, Generator
9
-
10
- from webscout.AIutel import Conversation, sanitize_stream
11
- from webscout.AIbase import Provider # Import sanitize_stream
12
- from webscout import exceptions
13
- from webscout.litagent import LitAgent
14
-
15
- class LambdaChat(Provider):
16
- """
17
- A class to interact with the Lambda Chat API.
18
- Supports streaming responses.
19
- """
20
- url = "https://lambda.chat"
21
-
22
- AVAILABLE_MODELS = [
23
- "deepseek-llama3.3-70b",
24
- "apriel-5b-instruct",
25
- "deepseek-r1",
26
- "deepseek-v3-0324",
27
- "deepseek-r1-0528",
28
- "hermes-3-llama-3.1-405b-fp8",
29
- "llama3.1-nemotron-70b-instruct",
30
- "lfm-40b",
31
- "llama3.3-70b-instruct-fp8",
32
- "qwen25-coder-32b-instruct",
33
- "qwen3-32b-fp8",
34
- "llama-4-maverick-70b-128e-instruct-fp8",
35
- "llama-4-scout-17b-16e-instruct"
36
-
37
- ]
38
-
39
- def __init__(
40
- self,
41
- is_conversation: bool = True,
42
- max_tokens: int = 2000, # Note: max_tokens is not used by this API
43
- timeout: int = 60,
44
- filepath: str = None,
45
- update_file: bool = True,
46
- proxies: dict = {},
47
- model: str = "deepseek-llama3.3-70b",
48
- assistantId: str = None, # Note: assistantId is not used by this API
49
- system_prompt: str = "You are a helpful assistant. Please answer the following question.", # Note: system_prompt is not used by this API
50
- ):
51
- """Initialize the LambdaChat client."""
52
- if model not in self.AVAILABLE_MODELS:
53
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
54
-
55
- self.model = model
56
- # Initialize curl_cffi Session
57
- self.session = Session()
58
- self.assistantId = assistantId
59
- self.system_prompt = system_prompt
60
-
61
- # Set up headers for all requests
62
- self.headers = {
63
- "Content-Type": "application/json", # Keep Content-Type for JSON posts
64
- "Accept": "*/*", # Keep Accept
65
- # "User-Agent": LitAgent().random(), # Removed, handled by impersonate
66
- "Accept-Language": "en-US,en;q=0.9", # Keep Accept-Language
67
- "Origin": self.url, # Keep Origin
68
- "Referer": f"{self.url}/", # Keep Referer (will be updated per request)
69
- # "Sec-Ch-Ua": "\"Chromium\";v=\"120\"", # Removed, handled by impersonate
70
- # "Sec-Ch-Ua-Mobile": "?0", # Removed, handled by impersonate
71
- # "Sec-Ch-Ua-Platform": "\"Windows\"", # Removed, handled by impersonate
72
- "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-* headers
73
- "Sec-Fetch-Mode": "cors",
74
- "Sec-Fetch-Site": "same-origin",
75
- "DNT": "1", # Keep DNT
76
- "Priority": "u=1, i" # Keep Priority
77
- }
78
-
79
- # Provider settings
80
- self.is_conversation = is_conversation
81
- self.max_tokens_to_sample = max_tokens
82
- self.timeout = timeout
83
- self.last_response = {}
84
-
85
- # Initialize conversation history
86
- self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
87
-
88
- # Store conversation data for different models
89
- self._conversation_data = {}
90
-
91
- # Update curl_cffi session headers and proxies
92
- self.session.headers.update(self.headers)
93
- self.session.proxies = proxies # Assign proxies directly
94
-
95
- def create_conversation(self, model: str):
96
- """Create a new conversation with the specified model, using updated headers and cookies."""
97
- url = f"{self.url}/conversation"
98
- payload = {
99
- "model": model,
100
- "preprompt": self.system_prompt
101
- }
102
-
103
- # Update headers for this specific request
104
- headers = self.headers.copy()
105
- headers["Referer"] = f"{self.url}/"
106
- # Add browser-like headers for best compatibility
107
- headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
108
- headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
109
- headers["Sec-GPC"] = "1"
110
- headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
111
- headers["Sec-Ch-Ua-Mobile"] = "?0"
112
- headers["Sec-Ch-Ua-Platform"] = '"Windows"'
113
- headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
114
- headers["Origin"] = self.url
115
- # cookies are handled by curl_cffi session automatically
116
-
117
- try:
118
- response = self.session.post(
119
- url,
120
- json=payload,
121
- headers=headers,
122
- impersonate="chrome110"
123
- )
124
- if response.status_code == 401:
125
- raise exceptions.AuthenticationError("Authentication failed.")
126
- if response.status_code != 200:
127
- return None
128
- data = response.json()
129
- conversation_id = data.get("conversationId")
130
- if model not in self._conversation_data:
131
- self._conversation_data[model] = {
132
- "conversationId": conversation_id,
133
- "messageId": str(uuid.uuid4())
134
- }
135
- return conversation_id
136
- except CurlError:
137
- return None
138
- except Exception:
139
- return None
140
-
141
- def fetch_message_id(self, conversation_id: str) -> str:
142
- """Fetch the latest message ID for a conversation."""
143
- try:
144
- url = f"{self.url}/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
145
- response = self.session.get(
146
- url,
147
- headers=self.headers, # Use base headers
148
- impersonate="chrome110" # Use a common impersonation profile
149
- )
150
- response.raise_for_status()
151
-
152
- # Parse the JSON data from the response
153
- json_data = None
154
- for line in response.text.split('\n'):
155
- if line.strip():
156
- try:
157
- parsed = json.loads(line)
158
- if isinstance(parsed, dict) and "nodes" in parsed:
159
- json_data = parsed
160
- break
161
- except json.JSONDecodeError:
162
- continue
163
-
164
- if not json_data:
165
- # Fall back to a UUID if we can't parse the response
166
- return str(uuid.uuid4())
167
-
168
- # Extract message ID using the same pattern as in the example
169
- if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
170
- return str(uuid.uuid4())
171
-
172
- data = json_data["nodes"][1]["data"]
173
- keys = data[data[0]["messages"]]
174
- message_keys = data[keys[-1]]
175
- message_id = data[message_keys["id"]]
176
-
177
- return message_id
178
-
179
- except CurlError: # Catch CurlError
180
- return str(uuid.uuid4()) # Fallback on CurlError
181
- except Exception: # Catch other potential exceptions
182
- # Fall back to a UUID if there's an error
183
- return str(uuid.uuid4())
184
-
185
- def generate_boundary(self):
186
- """Generate a random boundary for multipart/form-data requests"""
187
- boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
188
- boundary = "----WebKitFormBoundary"
189
- boundary += "".join(random.choice(boundary_chars) for _ in range(16))
190
- return boundary
191
-
192
- @staticmethod
193
- def _lambdachat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
194
- """Extracts content from LambdaChat stream JSON objects."""
195
- if not isinstance(chunk, dict) or "type" not in chunk:
196
- return None
197
-
198
- reasoning_text = ""
199
- if chunk["type"] == "stream" and "token" in chunk:
200
- return chunk["token"].replace("\u0000", "")
201
- # elif chunk["type"] == "finalAnswer":
202
- # return chunk.get("text")
203
- elif chunk["type"] == "reasoning" and chunk.get("subtype") == "stream" and "token" in chunk:
204
- # Prepend reasoning with <think> tags? Or handle separately? For now, just return token.
205
- return chunk["token"] # Or potentially format as f"<think>{chunk['token']}</think>"
206
- return None
207
-
208
- def ask(
209
- self,
210
- prompt: str,
211
- stream: bool = False, # API supports streaming
212
- raw: bool = False,
213
- optimizer: str = None, # Note: optimizer is not used by this API
214
- conversationally: bool = False, # Note: conversationally is not used by this API
215
- web_search: bool = False,
216
- ) -> Union[Dict[str, Any], Generator]:
217
- """Send a message to the Lambda Chat API"""
218
- model = self.model
219
-
220
- # Check if we have a conversation for this model
221
- if model not in self._conversation_data:
222
- conversation_id = self.create_conversation(model)
223
- if not conversation_id:
224
- raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
225
- else:
226
- conversation_id = self._conversation_data[model]["conversationId"]
227
- # Refresh message ID
228
- self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
229
-
230
- url = f"{self.url}/conversation/{conversation_id}"
231
- message_id = self._conversation_data[model]["messageId"]
232
-
233
- # Data to send (tools should be empty list by default)
234
- request_data = {
235
- "inputs": prompt,
236
- "id": message_id,
237
- "is_retry": False,
238
- "is_continue": False,
239
- "web_search": web_search,
240
- "tools": []
241
- }
242
-
243
- # Update headers for this specific request
244
- headers = self.headers.copy()
245
- headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
246
- headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
247
- headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
248
- headers["Sec-GPC"] = "1"
249
- headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
250
- headers["Sec-Ch-Ua-Mobile"] = "?0"
251
- headers["Sec-Ch-Ua-Platform"] = '"Windows"'
252
- headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
253
- headers["Origin"] = self.url
254
-
255
- # Create multipart form data
256
- boundary = self.generate_boundary()
257
- multipart_headers = headers.copy()
258
- multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
259
-
260
- # Serialize the data to JSON
261
- data_json = json.dumps(request_data, separators=(',', ':'))
262
-
263
- # Create the multipart form data body
264
- body = f"--{boundary}\r\n"
265
- body += f'Content-Disposition: form-data; name="data"\r\n'
266
- body += f"\r\n"
267
- body += f"{data_json}\r\n"
268
- body += f"--{boundary}--\r\n"
269
-
270
- multipart_headers["Content-Length"] = str(len(body))
271
-
272
- def for_stream():
273
- streaming_text = "" # Initialize for history
274
- try:
275
- # Try with multipart/form-data first
276
- response = None
277
- try:
278
- # Use curl_cffi session post with impersonate
279
- response = self.session.post(
280
- url,
281
- data=body,
282
- headers=multipart_headers, # Use multipart headers
283
- stream=True,
284
- timeout=self.timeout,
285
- impersonate="chrome110" # Use a common impersonation profile
286
- )
287
- response.raise_for_status() # Check status after potential error
288
- except (CurlError, exceptions.FailedToGenerateResponseError, Exception): # Catch potential errors
289
- response = None # Ensure response is None if multipart fails
290
-
291
- # If multipart fails or returns error, try with regular JSON
292
- if not response or response.status_code != 200:
293
- # Use curl_cffi session post with impersonate
294
- response = self.session.post(
295
- url,
296
- json=request_data, # Use JSON payload
297
- headers=headers, # Use regular headers
298
- stream=True,
299
- timeout=self.timeout,
300
- impersonate="chrome110" # Use a common impersonation profile
301
- )
302
-
303
- response.raise_for_status() # Check status after potential fallback
304
-
305
- # Use sanitize_stream
306
- processed_stream = sanitize_stream(
307
- data=response.iter_content(chunk_size=None), # Pass byte iterator
308
- intro_value=None, # No prefix
309
- to_json=True, # Stream sends JSON lines
310
- content_extractor=self._lambdachat_extractor, # Use the specific extractor
311
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
312
- )
313
-
314
- for content_chunk in processed_stream:
315
- # content_chunk is the string extracted by _lambdachat_extractor
316
- if content_chunk and isinstance(content_chunk, str):
317
- streaming_text += content_chunk # Aggregate text for history
318
- resp = {"text": content_chunk}
319
- yield resp if not raw else content_chunk
320
-
321
- except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
322
- # Handle specific exceptions if needed
323
- if isinstance(e, CurlError):
324
- # Log or handle CurlError specifically
325
- pass
326
-
327
- # Try another model if current one fails
328
- if len(self.AVAILABLE_MODELS) > 1:
329
- current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
330
- next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
331
- self.model = self.AVAILABLE_MODELS[next_model_index]
332
-
333
- # Create new conversation with the alternate model
334
- conversation_id = self.create_conversation(self.model)
335
- if conversation_id:
336
- # Try again with the new model
337
- yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
338
- conversationally=conversationally, web_search=web_search)
339
- return
340
-
341
- # If we get here, all models failed
342
- raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
343
-
344
- # Update history after stream finishes
345
- if streaming_text and self.conversation.file:
346
- self.last_response = {"text": streaming_text}
347
- self.conversation.update_chat_history(prompt, streaming_text)
348
-
349
- def for_non_stream():
350
- # Aggregate the stream using the updated for_stream logic
351
- response_text = ""
352
- try:
353
- # Ensure raw=False so for_stream yields dicts
354
- for chunk_data in for_stream():
355
- if isinstance(chunk_data, dict) and "text" in chunk_data:
356
- response_text += chunk_data["text"]
357
- # Handle raw string case if raw=True was passed
358
- elif raw and isinstance(chunk_data, str):
359
- response_text += chunk_data
360
- except Exception as e:
361
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
362
- if not response_text:
363
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
364
-
365
- # last_response and history are updated within process_response called by for_stream
366
- # Return the final aggregated response dict or raw string
367
- return response_text if raw else {"text": response_text} # Return dict for consistency
368
-
369
-
370
- return for_stream() if stream else for_non_stream()
371
-
372
- def chat(
373
- self,
374
- prompt: str,
375
- stream: bool = False,
376
- optimizer: str = None, # Note: optimizer is not used by this API
377
- conversationally: bool = False, # Note: conversationally is not used by this API
378
- web_search: bool = False
379
- ) -> Union[str, Generator]:
380
- """Generate a response to a prompt"""
381
- def for_stream_chat():
382
- # ask() yields dicts or strings when streaming
383
- gen = self.ask(
384
- prompt, stream=True, raw=False, # Ensure ask yields dicts
385
- optimizer=optimizer, conversationally=conversationally, web_search=web_search
386
- )
387
- for response_dict in gen:
388
- yield self.get_message(response_dict) # get_message expects dict
389
-
390
- def for_non_stream_chat():
391
- # ask() returns dict or str when not streaming
392
- response_data = self.ask(
393
- prompt, stream=False, raw=False, # Ensure ask returns dict
394
- optimizer=optimizer, conversationally=conversationally, web_search=web_search
395
- )
396
- return self.get_message(response_data) # get_message expects dict
397
-
398
- return for_stream_chat() if stream else for_non_stream_chat()
399
-
400
- def get_message(self, response: dict) -> str:
401
- """Extract message text from response"""
402
- assert isinstance(response, dict), "Response should be of dict data-type only"
403
- return response.get("text", "")
404
-
405
- if __name__ == "__main__":
406
- # Ensure curl_cffi is installed
407
- print("-" * 80)
408
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
409
- print("-" * 80)
410
-
411
- for model in LambdaChat.AVAILABLE_MODELS:
412
- try:
413
- test_ai = LambdaChat(model=model, timeout=60)
414
- response = test_ai.chat("Say 'Hello' in one word")
415
- response_text = response
416
-
417
- if response_text and len(response_text.strip()) > 0:
418
- status = ""
419
- # Truncate response if too long
420
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
421
- else:
422
- status = "✗"
423
- display_text = "Empty or invalid response"
424
- print(f"{model:<50} {status:<10} {display_text}")
425
- except Exception as e:
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import random
5
+ import uuid
6
+ from typing import Any, Dict, Optional, Union, Generator
7
+
8
+ from webscout.AIutel import Conversation, sanitize_stream
9
+ from webscout.AIbase import Provider # Import sanitize_stream
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+
13
+ class LambdaChat(Provider):
14
+ """
15
+ A class to interact with the Lambda Chat API.
16
+ Supports streaming responses.
17
+ """
18
+ url = "https://lambda.chat"
19
+ required_auth = False
20
+ AVAILABLE_MODELS = [
21
+ "deepseek-llama3.3-70b",
22
+ "apriel-5b-instruct",
23
+ "deepseek-r1",
24
+ "deepseek-v3-0324",
25
+ "deepseek-r1-0528",
26
+ "hermes-3-llama-3.1-405b-fp8",
27
+ "llama3.1-nemotron-70b-instruct",
28
+ "lfm-40b",
29
+ "llama3.3-70b-instruct-fp8",
30
+ "qwen25-coder-32b-instruct",
31
+ "qwen3-32b-fp8",
32
+ "llama-4-maverick-70b-128e-instruct-fp8",
33
+ "llama-4-scout-17b-16e-instruct"
34
+
35
+ ]
36
+
37
+ def __init__(
38
+ self,
39
+ is_conversation: bool = True,
40
+ max_tokens: int = 2000, # Note: max_tokens is not used by this API
41
+ timeout: int = 60,
42
+ filepath: str = None,
43
+ update_file: bool = True,
44
+ proxies: dict = {},
45
+ model: str = "deepseek-llama3.3-70b",
46
+ assistantId: str = None, # Note: assistantId is not used by this API
47
+ system_prompt: str = "You are a helpful assistant. Please answer the following question.", # Note: system_prompt is not used by this API
48
+ ):
49
+ """Initialize the LambdaChat client."""
50
+ if model not in self.AVAILABLE_MODELS:
51
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
52
+
53
+ self.model = model
54
+ # Initialize curl_cffi Session
55
+ self.session = Session()
56
+ self.assistantId = assistantId
57
+ self.system_prompt = system_prompt
58
+
59
+ # Set up headers for all requests
60
+ self.headers = {
61
+ "Content-Type": "application/json", # Keep Content-Type for JSON posts
62
+ "Accept": "*/*", # Keep Accept
63
+ # "User-Agent": LitAgent().random(), # Removed, handled by impersonate
64
+ "Accept-Language": "en-US,en;q=0.9", # Keep Accept-Language
65
+ "Origin": self.url, # Keep Origin
66
+ "Referer": f"{self.url}/", # Keep Referer (will be updated per request)
67
+ # "Sec-Ch-Ua": "\"Chromium\";v=\"120\"", # Removed, handled by impersonate
68
+ # "Sec-Ch-Ua-Mobile": "?0", # Removed, handled by impersonate
69
+ # "Sec-Ch-Ua-Platform": "\"Windows\"", # Removed, handled by impersonate
70
+ "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-* headers
71
+ "Sec-Fetch-Mode": "cors",
72
+ "Sec-Fetch-Site": "same-origin",
73
+ "DNT": "1", # Keep DNT
74
+ "Priority": "u=1, i" # Keep Priority
75
+ }
76
+
77
+ # Provider settings
78
+ self.is_conversation = is_conversation
79
+ self.max_tokens_to_sample = max_tokens
80
+ self.timeout = timeout
81
+ self.last_response = {}
82
+
83
+ # Initialize conversation history
84
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
85
+
86
+ # Store conversation data for different models
87
+ self._conversation_data = {}
88
+
89
+ # Update curl_cffi session headers and proxies
90
+ self.session.headers.update(self.headers)
91
+ self.session.proxies = proxies # Assign proxies directly
92
+
93
+ def create_conversation(self, model: str):
94
+ """Create a new conversation with the specified model, using updated headers and cookies."""
95
+ url = f"{self.url}/conversation"
96
+ payload = {
97
+ "model": model,
98
+ "preprompt": self.system_prompt
99
+ }
100
+
101
+ # Update headers for this specific request
102
+ headers = self.headers.copy()
103
+ headers["Referer"] = f"{self.url}/"
104
+ # Add browser-like headers for best compatibility
105
+ headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
106
+ headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
107
+ headers["Sec-GPC"] = "1"
108
+ headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
109
+ headers["Sec-Ch-Ua-Mobile"] = "?0"
110
+ headers["Sec-Ch-Ua-Platform"] = '"Windows"'
111
+ headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
112
+ headers["Origin"] = self.url
113
+ # cookies are handled by curl_cffi session automatically
114
+
115
+ try:
116
+ response = self.session.post(
117
+ url,
118
+ json=payload,
119
+ headers=headers,
120
+ impersonate="chrome110"
121
+ )
122
+ if response.status_code == 401:
123
+ raise exceptions.AuthenticationError("Authentication failed.")
124
+ if response.status_code != 200:
125
+ return None
126
+ data = response.json()
127
+ conversation_id = data.get("conversationId")
128
+ if model not in self._conversation_data:
129
+ self._conversation_data[model] = {
130
+ "conversationId": conversation_id,
131
+ "messageId": str(uuid.uuid4())
132
+ }
133
+ return conversation_id
134
+ except CurlError:
135
+ return None
136
+ except Exception:
137
+ return None
138
+
139
+ def fetch_message_id(self, conversation_id: str) -> str:
140
+ """Fetch the latest message ID for a conversation."""
141
+ try:
142
+ url = f"{self.url}/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
143
+ response = self.session.get(
144
+ url,
145
+ headers=self.headers, # Use base headers
146
+ impersonate="chrome110" # Use a common impersonation profile
147
+ )
148
+ response.raise_for_status()
149
+
150
+ # Parse the JSON data from the response
151
+ json_data = None
152
+ for line in response.text.split('\n'):
153
+ if line.strip():
154
+ try:
155
+ parsed = json.loads(line)
156
+ if isinstance(parsed, dict) and "nodes" in parsed:
157
+ json_data = parsed
158
+ break
159
+ except json.JSONDecodeError:
160
+ continue
161
+
162
+ if not json_data:
163
+ # Fall back to a UUID if we can't parse the response
164
+ return str(uuid.uuid4())
165
+
166
+ # Extract message ID using the same pattern as in the example
167
+ if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
168
+ return str(uuid.uuid4())
169
+
170
+ data = json_data["nodes"][1]["data"]
171
+ keys = data[data[0]["messages"]]
172
+ message_keys = data[keys[-1]]
173
+ message_id = data[message_keys["id"]]
174
+
175
+ return message_id
176
+
177
+ except CurlError: # Catch CurlError
178
+ return str(uuid.uuid4()) # Fallback on CurlError
179
+ except Exception: # Catch other potential exceptions
180
+ # Fall back to a UUID if there's an error
181
+ return str(uuid.uuid4())
182
+
183
+ def generate_boundary(self):
184
+ """Generate a random boundary for multipart/form-data requests"""
185
+ boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
186
+ boundary = "----WebKitFormBoundary"
187
+ boundary += "".join(random.choice(boundary_chars) for _ in range(16))
188
+ return boundary
189
+
190
+ @staticmethod
191
+ def _lambdachat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
192
+ """Extracts content from LambdaChat stream JSON objects."""
193
+ if not isinstance(chunk, dict) or "type" not in chunk:
194
+ return None
195
+
196
+ reasoning_text = ""
197
+ if chunk["type"] == "stream" and "token" in chunk:
198
+ return chunk["token"].replace("\u0000", "")
199
+ # elif chunk["type"] == "finalAnswer":
200
+ # return chunk.get("text")
201
+ elif chunk["type"] == "reasoning" and chunk.get("subtype") == "stream" and "token" in chunk:
202
+ # Prepend reasoning with <think> tags? Or handle separately? For now, just return token.
203
+ return chunk["token"] # Or potentially format as f"<think>{chunk['token']}</think>"
204
+ return None
205
+
206
+ def ask(
207
+ self,
208
+ prompt: str,
209
+ stream: bool = False, # API supports streaming
210
+ raw: bool = False,
211
+ optimizer: str = None, # Note: optimizer is not used by this API
212
+ conversationally: bool = False, # Note: conversationally is not used by this API
213
+ web_search: bool = False,
214
+ ) -> Union[Dict[str, Any], Generator]:
215
+ """Send a message to the Lambda Chat API"""
216
+ model = self.model
217
+
218
+ # Check if we have a conversation for this model
219
+ if model not in self._conversation_data:
220
+ conversation_id = self.create_conversation(model)
221
+ if not conversation_id:
222
+ raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
223
+ else:
224
+ conversation_id = self._conversation_data[model]["conversationId"]
225
+ # Refresh message ID
226
+ self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
227
+
228
+ url = f"{self.url}/conversation/{conversation_id}"
229
+ message_id = self._conversation_data[model]["messageId"]
230
+
231
+ # Data to send (tools should be empty list by default)
232
+ request_data = {
233
+ "inputs": prompt,
234
+ "id": message_id,
235
+ "is_retry": False,
236
+ "is_continue": False,
237
+ "web_search": web_search,
238
+ "tools": []
239
+ }
240
+
241
+ # Update headers for this specific request
242
+ headers = self.headers.copy()
243
+ headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
244
+ headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
245
+ headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
246
+ headers["Sec-GPC"] = "1"
247
+ headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
248
+ headers["Sec-Ch-Ua-Mobile"] = "?0"
249
+ headers["Sec-Ch-Ua-Platform"] = '"Windows"'
250
+ headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
251
+ headers["Origin"] = self.url
252
+
253
+ # Create multipart form data
254
+ boundary = self.generate_boundary()
255
+ multipart_headers = headers.copy()
256
+ multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
257
+
258
+ # Serialize the data to JSON
259
+ data_json = json.dumps(request_data, separators=(',', ':'))
260
+
261
+ # Create the multipart form data body
262
+ body = f"--{boundary}\r\n"
263
+ body += f'Content-Disposition: form-data; name="data"\r\n'
264
+ body += f"\r\n"
265
+ body += f"{data_json}\r\n"
266
+ body += f"--{boundary}--\r\n"
267
+
268
+ multipart_headers["Content-Length"] = str(len(body))
269
+
270
+ def for_stream():
271
+ streaming_text = "" # Initialize for history
272
+ try:
273
+ # Try with multipart/form-data first
274
+ response = None
275
+ try:
276
+ # Use curl_cffi session post with impersonate
277
+ response = self.session.post(
278
+ url,
279
+ data=body,
280
+ headers=multipart_headers, # Use multipart headers
281
+ stream=True,
282
+ timeout=self.timeout,
283
+ impersonate="chrome110" # Use a common impersonation profile
284
+ )
285
+ response.raise_for_status() # Check status after potential error
286
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception): # Catch potential errors
287
+ response = None # Ensure response is None if multipart fails
288
+
289
+ # If multipart fails or returns error, try with regular JSON
290
+ if not response or response.status_code != 200:
291
+ # Use curl_cffi session post with impersonate
292
+ response = self.session.post(
293
+ url,
294
+ json=request_data, # Use JSON payload
295
+ headers=headers, # Use regular headers
296
+ stream=True,
297
+ timeout=self.timeout,
298
+ impersonate="chrome110" # Use a common impersonation profile
299
+ )
300
+
301
+ response.raise_for_status() # Check status after potential fallback
302
+
303
+ # Use sanitize_stream
304
+ processed_stream = sanitize_stream(
305
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
306
+ intro_value=None, # No prefix
307
+ to_json=True, # Stream sends JSON lines
308
+ content_extractor=self._lambdachat_extractor, # Use the specific extractor
309
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
310
+ )
311
+
312
+ for content_chunk in processed_stream:
313
+ # content_chunk is the string extracted by _lambdachat_extractor
314
+ if content_chunk and isinstance(content_chunk, str):
315
+ streaming_text += content_chunk # Aggregate text for history
316
+ resp = {"text": content_chunk}
317
+ yield resp if not raw else content_chunk
318
+
319
+ except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch errors from both attempts
320
+ # Handle specific exceptions if needed
321
+ if isinstance(e, CurlError):
322
+ # Log or handle CurlError specifically
323
+ pass
324
+
325
+ # Try another model if current one fails
326
+ if len(self.AVAILABLE_MODELS) > 1:
327
+ current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
328
+ next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
329
+ self.model = self.AVAILABLE_MODELS[next_model_index]
330
+
331
+ # Create new conversation with the alternate model
332
+ conversation_id = self.create_conversation(self.model)
333
+ if conversation_id:
334
+ # Try again with the new model
335
+ yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
336
+ conversationally=conversationally, web_search=web_search)
337
+ return
338
+
339
+ # If we get here, all models failed
340
+ raise exceptions.FailedToGenerateResponseError(f"Request failed after trying fallback: {str(e)}") from e
341
+
342
+ # Update history after stream finishes
343
+ if streaming_text and self.conversation.file:
344
+ self.last_response = {"text": streaming_text}
345
+ self.conversation.update_chat_history(prompt, streaming_text)
346
+
347
+ def for_non_stream():
348
+ # Aggregate the stream using the updated for_stream logic
349
+ response_text = ""
350
+ try:
351
+ # Ensure raw=False so for_stream yields dicts
352
+ for chunk_data in for_stream():
353
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
354
+ response_text += chunk_data["text"]
355
+ # Handle raw string case if raw=True was passed
356
+ elif raw and isinstance(chunk_data, str):
357
+ response_text += chunk_data
358
+ except Exception as e:
359
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
360
+ if not response_text:
361
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
362
+
363
+ # last_response and history are updated within process_response called by for_stream
364
+ # Return the final aggregated response dict or raw string
365
+ return response_text if raw else {"text": response_text} # Return dict for consistency
366
+
367
+
368
+ return for_stream() if stream else for_non_stream()
369
+
370
+ def chat(
371
+ self,
372
+ prompt: str,
373
+ stream: bool = False,
374
+ optimizer: str = None, # Note: optimizer is not used by this API
375
+ conversationally: bool = False, # Note: conversationally is not used by this API
376
+ web_search: bool = False
377
+ ) -> Union[str, Generator]:
378
+ """Generate a response to a prompt"""
379
+ def for_stream_chat():
380
+ # ask() yields dicts or strings when streaming
381
+ gen = self.ask(
382
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
383
+ optimizer=optimizer, conversationally=conversationally, web_search=web_search
384
+ )
385
+ for response_dict in gen:
386
+ yield self.get_message(response_dict) # get_message expects dict
387
+
388
+ def for_non_stream_chat():
389
+ # ask() returns dict or str when not streaming
390
+ response_data = self.ask(
391
+ prompt, stream=False, raw=False, # Ensure ask returns dict
392
+ optimizer=optimizer, conversationally=conversationally, web_search=web_search
393
+ )
394
+ return self.get_message(response_data) # get_message expects dict
395
+
396
+ return for_stream_chat() if stream else for_non_stream_chat()
397
+
398
+ def get_message(self, response: dict) -> str:
399
+ """Extract message text from response"""
400
+ assert isinstance(response, dict), "Response should be of dict data-type only"
401
+ return response.get("text", "")
402
+
403
+ if __name__ == "__main__":
404
+ # Ensure curl_cffi is installed
405
+ print("-" * 80)
406
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
407
+ print("-" * 80)
408
+
409
+ for model in LambdaChat.AVAILABLE_MODELS:
410
+ try:
411
+ test_ai = LambdaChat(model=model, timeout=60)
412
+ response = test_ai.chat("Say 'Hello' in one word")
413
+ response_text = response
414
+
415
+ if response_text and len(response_text.strip()) > 0:
416
+ status = "✓"
417
+ # Truncate response if too long
418
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
419
+ else:
420
+ status = ""
421
+ display_text = "Empty or invalid response"
422
+ print(f"{model:<50} {status:<10} {display_text}")
423
+ except Exception as e:
426
424
  print(f"{model:<50} {'✗':<10} {str(e)}")