webscout 8.2.9__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (413) hide show
  1. webscout/AIauto.py +524 -251
  2. webscout/AIbase.py +247 -319
  3. webscout/AIutel.py +68 -703
  4. webscout/Bard.py +1072 -1026
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -118
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +403 -232
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -684
  37. webscout/Extra/tempmail/README.md +487 -487
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +292 -333
  49. webscout/Provider/AISEARCH/README.md +106 -279
  50. webscout/Provider/AISEARCH/__init__.py +16 -9
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +357 -410
  53. webscout/Provider/AISEARCH/monica_search.py +200 -220
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -255
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -342
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +467 -340
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +175 -169
  67. webscout/Provider/GithubChat.py +385 -369
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -801
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -375
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -291
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -384
  77. webscout/Provider/Netwrck.py +273 -270
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -952
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -357
  85. webscout/Provider/OPENAI/__init__.py +148 -40
  86. webscout/Provider/OPENAI/ai4chat.py +348 -293
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/{exachat.py → ayle.py} +365 -444
  90. webscout/Provider/OPENAI/base.py +253 -249
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +870 -556
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -173
  94. webscout/Provider/OPENAI/deepinfra.py +403 -322
  95. webscout/Provider/OPENAI/e2b.py +2370 -1414
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +452 -417
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -364
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +333 -308
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -335
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +374 -357
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -287
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +305 -304
  115. webscout/Provider/OPENAI/textpollinations.py +370 -339
  116. webscout/Provider/OPENAI/toolbaz.py +375 -413
  117. webscout/Provider/OPENAI/typefully.py +419 -355
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -318
  120. webscout/Provider/OPENAI/wisecat.py +359 -387
  121. webscout/Provider/OPENAI/writecream.py +185 -163
  122. webscout/Provider/OPENAI/x0gpt.py +462 -365
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -429
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -82
  133. webscout/Provider/TTI/__init__.py +37 -7
  134. webscout/Provider/TTI/base.py +147 -64
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -201
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -221
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -11
  141. webscout/Provider/TTS/README.md +186 -192
  142. webscout/Provider/TTS/__init__.py +43 -10
  143. webscout/Provider/TTS/base.py +523 -159
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -129
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -580
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +331 -308
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -475
  158. webscout/Provider/TypliAI.py +311 -305
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -209
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +331 -326
  161. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +300 -295
  162. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +218 -198
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/{MCPCore.py → UNFINISHED/XenAI.py} +330 -315
  165. webscout/Provider/UNFINISHED/Youchat.py +347 -330
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -263
  170. webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +231 -224
  171. webscout/Provider/WiseCat.py +256 -233
  172. webscout/Provider/WrDoChat.py +390 -370
  173. webscout/Provider/__init__.py +115 -174
  174. webscout/Provider/ai4chat.py +181 -174
  175. webscout/Provider/akashgpt.py +330 -335
  176. webscout/Provider/cerebras.py +397 -290
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -283
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -325
  182. webscout/Provider/llama3mitril.py +230 -215
  183. webscout/Provider/llmchat.py +308 -258
  184. webscout/Provider/llmchatco.py +321 -306
  185. webscout/Provider/meta.py +996 -801
  186. webscout/Provider/oivscode.py +332 -309
  187. webscout/Provider/searchchat.py +316 -292
  188. webscout/Provider/sonus.py +264 -258
  189. webscout/Provider/toolbaz.py +359 -353
  190. webscout/Provider/turboseek.py +332 -266
  191. webscout/Provider/typefully.py +262 -202
  192. webscout/Provider/x0gpt.py +332 -299
  193. webscout/__init__.py +31 -39
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -524
  196. webscout/client.py +1497 -70
  197. webscout/conversation.py +140 -436
  198. webscout/exceptions.py +383 -362
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +74 -420
  204. webscout/prompt_manager.py +376 -288
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -404
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -210
  210. webscout/scout/core/scout.py +800 -607
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -478
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -95
  284. webscout/swiftcli/core/__init__.py +7 -7
  285. webscout/swiftcli/core/cli.py +574 -297
  286. webscout/swiftcli/core/context.py +98 -104
  287. webscout/swiftcli/core/group.py +268 -241
  288. webscout/swiftcli/decorators/__init__.py +28 -28
  289. webscout/swiftcli/decorators/command.py +243 -221
  290. webscout/swiftcli/decorators/options.py +247 -220
  291. webscout/swiftcli/decorators/output.py +392 -252
  292. webscout/swiftcli/exceptions.py +21 -21
  293. webscout/swiftcli/plugins/__init__.py +9 -9
  294. webscout/swiftcli/plugins/base.py +134 -135
  295. webscout/swiftcli/plugins/manager.py +269 -269
  296. webscout/swiftcli/utils/__init__.py +58 -59
  297. webscout/swiftcli/utils/formatting.py +251 -252
  298. webscout/swiftcli/utils/parsing.py +368 -267
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -135
  304. webscout/zeroart/base.py +70 -66
  305. webscout/zeroart/effects.py +155 -101
  306. webscout/zeroart/fonts.py +1799 -1239
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/entry_points.txt +1 -1
  311. webscout/DWEBS.py +0 -520
  312. webscout/Extra/Act.md +0 -309
  313. webscout/Extra/GitToolkit/gitapi/README.md +0 -110
  314. webscout/Extra/autocoder/__init__.py +0 -9
  315. webscout/Extra/autocoder/autocoder.py +0 -1105
  316. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  317. webscout/Extra/gguf.md +0 -430
  318. webscout/Extra/weather.md +0 -281
  319. webscout/Litlogger/README.md +0 -10
  320. webscout/Litlogger/__init__.py +0 -15
  321. webscout/Litlogger/formats.py +0 -4
  322. webscout/Litlogger/handlers.py +0 -103
  323. webscout/Litlogger/levels.py +0 -13
  324. webscout/Litlogger/logger.py +0 -92
  325. webscout/Provider/AI21.py +0 -177
  326. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  327. webscout/Provider/AISEARCH/felo_search.py +0 -202
  328. webscout/Provider/AISEARCH/genspark_search.py +0 -324
  329. webscout/Provider/AISEARCH/hika_search.py +0 -186
  330. webscout/Provider/AISEARCH/scira_search.py +0 -298
  331. webscout/Provider/Aitopia.py +0 -316
  332. webscout/Provider/AllenAI.py +0 -440
  333. webscout/Provider/Blackboxai.py +0 -791
  334. webscout/Provider/ChatGPTClone.py +0 -237
  335. webscout/Provider/ChatGPTGratis.py +0 -194
  336. webscout/Provider/Cloudflare.py +0 -324
  337. webscout/Provider/ExaChat.py +0 -358
  338. webscout/Provider/Flowith.py +0 -217
  339. webscout/Provider/FreeGemini.py +0 -250
  340. webscout/Provider/Glider.py +0 -225
  341. webscout/Provider/HF_space/__init__.py +0 -0
  342. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  343. webscout/Provider/HuggingFaceChat.py +0 -469
  344. webscout/Provider/Hunyuan.py +0 -283
  345. webscout/Provider/LambdaChat.py +0 -411
  346. webscout/Provider/Llama3.py +0 -259
  347. webscout/Provider/Nemotron.py +0 -218
  348. webscout/Provider/OLLAMA.py +0 -396
  349. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -766
  350. webscout/Provider/OPENAI/Cloudflare.py +0 -378
  351. webscout/Provider/OPENAI/FreeGemini.py +0 -283
  352. webscout/Provider/OPENAI/NEMOTRON.py +0 -232
  353. webscout/Provider/OPENAI/Qwen3.py +0 -283
  354. webscout/Provider/OPENAI/api.py +0 -969
  355. webscout/Provider/OPENAI/c4ai.py +0 -373
  356. webscout/Provider/OPENAI/chatgptclone.py +0 -494
  357. webscout/Provider/OPENAI/copilot.py +0 -242
  358. webscout/Provider/OPENAI/flowith.py +0 -162
  359. webscout/Provider/OPENAI/freeaichat.py +0 -359
  360. webscout/Provider/OPENAI/mcpcore.py +0 -389
  361. webscout/Provider/OPENAI/multichat.py +0 -376
  362. webscout/Provider/OPENAI/opkfc.py +0 -496
  363. webscout/Provider/OPENAI/scirachat.py +0 -477
  364. webscout/Provider/OPENAI/standardinput.py +0 -433
  365. webscout/Provider/OPENAI/typegpt.py +0 -364
  366. webscout/Provider/OPENAI/uncovrAI.py +0 -463
  367. webscout/Provider/OPENAI/venice.py +0 -431
  368. webscout/Provider/OPENAI/yep.py +0 -382
  369. webscout/Provider/OpenGPT.py +0 -209
  370. webscout/Provider/Perplexitylabs.py +0 -415
  371. webscout/Provider/Reka.py +0 -214
  372. webscout/Provider/StandardInput.py +0 -290
  373. webscout/Provider/TTI/aiarta.py +0 -365
  374. webscout/Provider/TTI/artbit.py +0 -0
  375. webscout/Provider/TTI/fastflux.py +0 -200
  376. webscout/Provider/TTI/piclumen.py +0 -203
  377. webscout/Provider/TTI/pixelmuse.py +0 -225
  378. webscout/Provider/TTS/gesserit.py +0 -128
  379. webscout/Provider/TTS/sthir.py +0 -94
  380. webscout/Provider/TeachAnything.py +0 -229
  381. webscout/Provider/UNFINISHED/puterjs.py +0 -635
  382. webscout/Provider/UNFINISHED/test_lmarena.py +0 -119
  383. webscout/Provider/Venice.py +0 -258
  384. webscout/Provider/VercelAI.py +0 -253
  385. webscout/Provider/Writecream.py +0 -246
  386. webscout/Provider/WritingMate.py +0 -269
  387. webscout/Provider/asksteve.py +0 -220
  388. webscout/Provider/chatglm.py +0 -215
  389. webscout/Provider/copilot.py +0 -425
  390. webscout/Provider/freeaichat.py +0 -285
  391. webscout/Provider/granite.py +0 -235
  392. webscout/Provider/hermes.py +0 -266
  393. webscout/Provider/koala.py +0 -170
  394. webscout/Provider/lmarena.py +0 -198
  395. webscout/Provider/multichat.py +0 -364
  396. webscout/Provider/scira_chat.py +0 -299
  397. webscout/Provider/scnet.py +0 -243
  398. webscout/Provider/talkai.py +0 -194
  399. webscout/Provider/typegpt.py +0 -289
  400. webscout/Provider/uncovr.py +0 -368
  401. webscout/Provider/yep.py +0 -389
  402. webscout/litagent/Readme.md +0 -276
  403. webscout/litprinter/__init__.py +0 -59
  404. webscout/swiftcli/Readme.md +0 -323
  405. webscout/tempid.py +0 -128
  406. webscout/webscout_search.py +0 -1184
  407. webscout/webscout_search_async.py +0 -654
  408. webscout/yep_search.py +0 -347
  409. webscout/zeroart/README.md +0 -89
  410. webscout-8.2.9.dist-info/METADATA +0 -1033
  411. webscout-8.2.9.dist-info/RECORD +0 -289
  412. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/licenses/LICENSE.md +0 -0
  413. {webscout-8.2.9.dist-info → webscout-2026.1.19.dist-info}/top_level.txt +0 -0
webscout/Provider/Groq.py CHANGED
@@ -1,801 +1,556 @@
1
- from typing import Any, AsyncGenerator, Dict, Optional, Callable, List, Union
2
-
3
- import httpx
4
- import json
5
-
6
- # Import curl_cffi for improved request handling
7
- from curl_cffi.requests import Session
8
- from curl_cffi import CurlError
9
-
10
- from webscout.AIutel import Optimizers
11
- from webscout.AIutel import Conversation
12
- from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
13
- from webscout.AIbase import Provider, AsyncProvider
14
- from webscout import exceptions
15
-
16
- class GROQ(Provider):
17
- """
18
- A class to interact with the GROQ AI API.
19
- """
20
-
21
- # Default models list (will be updated dynamically)
22
- AVAILABLE_MODELS = [
23
- "distil-whisper-large-v3-en",
24
- "gemma2-9b-it",
25
- "llama-3.3-70b-versatile",
26
- "llama-3.1-8b-instant",
27
- "llama-guard-3-8b",
28
- "llama3-70b-8192",
29
- "llama3-8b-8192",
30
- "whisper-large-v3",
31
- "whisper-large-v3-turbo",
32
- "meta-llama/llama-4-scout-17b-16e-instruct",
33
- "meta-llama/llama-4-maverick-17b-128e-instruct",
34
- "playai-tts",
35
- "playai-tts-arabic",
36
- "qwen-qwq-32b",
37
- "mistral-saba-24b",
38
- "qwen-2.5-coder-32b",
39
- "qwen-2.5-32b",
40
- "deepseek-r1-distill-qwen-32b",
41
- "deepseek-r1-distill-llama-70b",
42
- "llama-3.3-70b-specdec",
43
- "llama-3.2-1b-preview",
44
- "llama-3.2-3b-preview",
45
- "llama-3.2-11b-vision-preview",
46
- "llama-3.2-90b-vision-preview",
47
- "mixtral-8x7b-32768"
48
- ]
49
-
50
- @classmethod
51
- def get_models(cls, api_key: str = None):
52
- """Fetch available models from Groq API.
53
-
54
- Args:
55
- api_key (str, optional): Groq API key. If not provided, returns default models.
56
-
57
- Returns:
58
- list: List of available model IDs
59
- """
60
- if not api_key:
61
- return cls.AVAILABLE_MODELS
62
-
63
- try:
64
- # Use a temporary curl_cffi session for this class method
65
- temp_session = Session()
66
- headers = {
67
- "Content-Type": "application/json",
68
- "Authorization": f"Bearer {api_key}",
69
- }
70
-
71
- response = temp_session.get(
72
- "https://api.groq.com/openai/v1/models",
73
- headers=headers,
74
- impersonate="chrome110" # Use impersonate for fetching
75
- )
76
-
77
- if response.status_code != 200:
78
- return cls.AVAILABLE_MODELS
79
-
80
- data = response.json()
81
- if "data" in data and isinstance(data["data"], list):
82
- return [model["id"] for model in data["data"]]
83
- return cls.AVAILABLE_MODELS
84
-
85
- except (CurlError, Exception):
86
- # Fallback to default models list if fetching fails
87
- return cls.AVAILABLE_MODELS
88
-
89
- def __init__(
90
- self,
91
- api_key: str,
92
- is_conversation: bool = True,
93
- max_tokens: int = 600,
94
- temperature: float = 1,
95
- presence_penalty: int = 0,
96
- frequency_penalty: int = 0,
97
- top_p: float = 1,
98
- model: str = "mixtral-8x7b-32768",
99
- timeout: int = 30,
100
- intro: str = None,
101
- filepath: str = None,
102
- update_file: bool = True,
103
- proxies: dict = {},
104
- history_offset: int = 10250,
105
- act: str = None,
106
- system_prompt: Optional[str] = None,
107
- ):
108
- """Instantiates GROQ
109
-
110
- Args:
111
- api_key (key): GROQ's API key.
112
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
113
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
114
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
115
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
116
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
117
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
118
- model (str, optional): LLM model name. Defaults to "mixtral-8x7b-32768".
119
- timeout (int, optional): Http request timeout. Defaults to 30.
120
- intro (str, optional): Conversation introductory prompt. Defaults to None.
121
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
122
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
123
- proxies (dict, optional): Http request proxies. Defaults to {}.
124
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
125
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
126
- system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
127
- """
128
- # Update available models from API
129
- self.update_available_models(api_key)
130
-
131
- # Validate model after updating available models
132
- if model not in self.AVAILABLE_MODELS:
133
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
134
-
135
- # Initialize curl_cffi Session
136
- self.session = Session()
137
- self.is_conversation = is_conversation
138
- self.max_tokens_to_sample = max_tokens
139
- self.api_key = api_key
140
- self.model = model
141
- self.temperature = temperature
142
- self.presence_penalty = presence_penalty
143
- self.frequency_penalty = frequency_penalty
144
- self.top_p = top_p
145
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
146
- self.stream_chunk_size = 64
147
- self.timeout = timeout
148
- self.last_response = {}
149
- self.system_prompt = system_prompt
150
- self.available_functions: Dict[str, Callable] = {} # Store available functions
151
- self.headers = {
152
- "Content-Type": "application/json",
153
- "Authorization": f"Bearer {self.api_key}",
154
- }
155
-
156
- self.__available_optimizers = (
157
- method
158
- for method in dir(Optimizers)
159
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
160
- )
161
-
162
- # Update curl_cffi session headers
163
- self.session.headers.update(self.headers)
164
-
165
- # Set up conversation
166
- Conversation.intro = (
167
- AwesomePrompts().get_act(
168
- act, raise_not_found=True, default=None, case_insensitive=True
169
- )
170
- if act
171
- else intro or Conversation.intro
172
- )
173
- self.conversation = Conversation(
174
- is_conversation, self.max_tokens_to_sample, filepath, update_file
175
- )
176
- self.conversation.history_offset = history_offset
177
-
178
- # Set proxies for curl_cffi session
179
- self.session.proxies = proxies
180
-
181
- @staticmethod
182
- def _groq_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[Dict]:
183
- """Extracts the 'delta' object from Groq stream JSON chunks."""
184
- if isinstance(chunk, dict):
185
- # Return the delta object itself, or None if not found
186
- return chunk.get("choices", [{}])[0].get("delta")
187
- return None
188
-
189
- @classmethod
190
- def update_available_models(cls, api_key=None):
191
- """Update the available models list from Groq API"""
192
- try:
193
- models = cls.get_models(api_key)
194
- if models and len(models) > 0:
195
- cls.AVAILABLE_MODELS = models
196
- except Exception:
197
- # Fallback to default models list if fetching fails
198
- pass
199
-
200
- def add_function(self, function_name: str, function: Callable):
201
- """Add a function to the available functions dictionary.
202
-
203
- Args:
204
- function_name (str): The name of the function to be used in the prompt.
205
- function (Callable): The function itself.
206
- """
207
- self.available_functions[function_name] = function
208
-
209
- def ask(
210
- self,
211
- prompt: str,
212
- stream: bool = False,
213
- raw: bool = False,
214
- optimizer: str = None,
215
- conversationally: bool = False,
216
- tools: Optional[List[Dict[str, Any]]] = None, # Add tools parameter
217
- ) -> dict:
218
- """Chat with AI
219
-
220
- Args:
221
- prompt (str): Prompt to be send.
222
- stream (bool, optional): Flag for streaming response. Defaults to False.
223
- raw (bool, optional): Stream back raw response as received. Defaults to False.
224
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
225
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
226
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
227
-
228
- Returns:
229
- dict : {}
230
- """
231
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
232
- if optimizer:
233
- if optimizer in self.__available_optimizers:
234
- conversation_prompt = getattr(Optimizers, optimizer)(
235
- conversation_prompt if conversationally else prompt
236
- )
237
- else:
238
- raise Exception(
239
- f"Optimizer is not one of {self.__available_optimizers}"
240
- )
241
-
242
- messages = [{"content": conversation_prompt, "role": "user"}]
243
- if self.system_prompt:
244
- messages.insert(0, {"role": "system", "content": self.system_prompt})
245
-
246
- self.session.headers.update(self.headers)
247
- payload = {
248
- "frequency_penalty": self.frequency_penalty,
249
- "messages": messages,
250
- "model": self.model,
251
- "presence_penalty": self.presence_penalty,
252
- "stream": stream,
253
- "temperature": self.temperature,
254
- "top_p": self.top_p,
255
- "tools": tools # Include tools in the payload
256
- }
257
-
258
- def for_stream():
259
- try:
260
- response = self.session.post(
261
- self.chat_endpoint,
262
- json=payload,
263
- stream=True,
264
- timeout=self.timeout,
265
- impersonate="chrome110" # Use impersonate for better compatibility
266
- )
267
- if not response.status_code == 200:
268
- raise exceptions.FailedToGenerateResponseError(
269
- # Removed response.reason_phrase
270
- f"Failed to generate response - ({response.status_code}) - {response.text}"
271
- )
272
-
273
- streaming_text = ""
274
- # Use sanitize_stream
275
- processed_stream = sanitize_stream(
276
- data=response.iter_content(chunk_size=None), # Pass byte iterator
277
- intro_value="data:",
278
- to_json=True, # Stream sends JSON
279
- content_extractor=self._groq_extractor, # Use the delta extractor
280
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
281
- )
282
-
283
- for delta in processed_stream:
284
- # delta is the extracted 'delta' object or None
285
- if delta and isinstance(delta, dict):
286
- content = delta.get("content")
287
- if content:
288
- streaming_text += content
289
- resp = {"text": content} # Yield only the new chunk text
290
- self.last_response = {"choices": [{"delta": {"content": streaming_text}}]} # Update last_response structure
291
- yield resp if not raw else content # Yield dict or raw string chunk
292
- # Note: Tool calls in streaming delta are less common in OpenAI format, usually in final message
293
-
294
- except CurlError as e:
295
- raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
296
- except Exception as e:
297
- raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
298
-
299
- # Handle tool calls if any
300
- if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
301
- tool_calls = self.last_response['choices'][0]['message']['tool_calls']
302
- for tool_call in tool_calls:
303
- function_name = tool_call.get('function', {}).get('name')
304
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
305
- if function_name in self.available_functions:
306
- tool_response = self.available_functions[function_name](**arguments)
307
- messages.append({
308
- "tool_call_id": tool_call['id'],
309
- "role": "tool",
310
- "name": function_name,
311
- "content": tool_response
312
- })
313
- payload['messages'] = messages
314
- # Make a second call to get the final response
315
- try:
316
- second_response = self.session.post(
317
- self.chat_endpoint,
318
- json=payload,
319
- timeout=self.timeout,
320
- impersonate="chrome110" # Use impersonate for better compatibility
321
- )
322
- if second_response.status_code == 200:
323
- self.last_response = second_response.json()
324
- else:
325
- raise exceptions.FailedToGenerateResponseError(
326
- f"Failed to execute tool - {second_response.text}"
327
- )
328
- except CurlError as e:
329
- raise exceptions.FailedToGenerateResponseError(f"CurlError during tool execution: {str(e)}")
330
- except Exception as e:
331
- raise exceptions.FailedToGenerateResponseError(f"Error during tool execution: {str(e)}")
332
-
333
- self.conversation.update_chat_history(
334
- prompt, self.get_message(self.last_response)
335
- )
336
-
337
- def for_non_stream():
338
- try:
339
- response = self.session.post(
340
- self.chat_endpoint,
341
- json=payload,
342
- stream=False,
343
- timeout=self.timeout,
344
- impersonate="chrome110" # Use impersonate for better compatibility
345
- )
346
- if (
347
- not response.status_code == 200
348
- ):
349
- raise exceptions.FailedToGenerateResponseError(
350
- # Removed response.reason_phrase
351
- f"Failed to generate response - ({response.status_code}) - {response.text}"
352
- )
353
-
354
- response_text = response.text # Get raw text
355
-
356
- # Use sanitize_stream to parse the non-streaming JSON response
357
- processed_stream = sanitize_stream(
358
- data=response_text,
359
- to_json=True, # Parse the whole text as JSON
360
- intro_value=None,
361
- # Extractor for non-stream structure (returns the whole parsed dict)
362
- content_extractor=lambda chunk: chunk if isinstance(chunk, dict) else None,
363
- yield_raw_on_error=False
364
- )
365
-
366
- # Extract the single result (the parsed JSON dictionary)
367
- resp = next(processed_stream, None)
368
- if resp is None:
369
- raise exceptions.FailedToGenerateResponseError("Failed to parse non-stream JSON response")
370
-
371
- except CurlError as e:
372
- raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
373
- except Exception as e:
374
- # Catch the original AttributeError here if it happens before the raise
375
- if isinstance(e, AttributeError) and 'reason_phrase' in str(e):
376
- raise exceptions.FailedToGenerateResponseError(
377
- f"Failed to generate response - ({response.status_code}) - {response.text}"
378
- )
379
- raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
380
-
381
- # Handle tool calls if any
382
- if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
383
- tool_calls = resp['choices'][0]['message']['tool_calls']
384
- for tool_call in tool_calls:
385
- function_name = tool_call.get('function', {}).get('name')
386
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
387
- if function_name in self.available_functions:
388
- tool_response = self.available_functions[function_name](**arguments)
389
- messages.append({
390
- "tool_call_id": tool_call['id'],
391
- "role": "tool",
392
- "name": function_name,
393
- "content": tool_response
394
- })
395
- payload['messages'] = messages
396
- # Make a second call to get the final response
397
- try:
398
- second_response = self.session.post(
399
- self.chat_endpoint,
400
- json=payload,
401
- timeout=self.timeout,
402
- impersonate="chrome110" # Use impersonate for better compatibility
403
- )
404
- if second_response.status_code == 200:
405
- resp = second_response.json()
406
- else:
407
- raise exceptions.FailedToGenerateResponseError(
408
- f"Failed to execute tool - {second_response.text}"
409
- )
410
- except CurlError as e:
411
- raise exceptions.FailedToGenerateResponseError(f"CurlError during tool execution: {str(e)}")
412
- except Exception as e:
413
- raise exceptions.FailedToGenerateResponseError(f"Error during tool execution: {str(e)}")
414
-
415
- self.last_response.update(resp)
416
- self.conversation.update_chat_history(
417
- prompt, self.get_message(self.last_response)
418
- )
419
- return resp
420
-
421
- return for_stream() if stream else for_non_stream()
422
-
423
- def chat(
424
- self,
425
- prompt: str,
426
- stream: bool = False,
427
- optimizer: str = None,
428
- conversationally: bool = False,
429
- tools: Optional[List[Dict[str, Any]]] = None,
430
- ) -> str:
431
- """Generate response `str`
432
- Args:
433
- prompt (str): Prompt to be send.
434
- stream (bool, optional): Flag for streaming response. Defaults to False.
435
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
436
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
437
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
438
- Returns:
439
- str: Response generated
440
- """
441
-
442
- def for_stream():
443
- for response in self.ask(
444
- prompt, True, optimizer=optimizer, conversationally=conversationally, tools=tools
445
- ):
446
- yield self.get_message(response)
447
-
448
- def for_non_stream():
449
- return self.get_message(
450
- self.ask(
451
- prompt,
452
- False,
453
- optimizer=optimizer,
454
- conversationally=conversationally,
455
- tools=tools
456
- )
457
- )
458
-
459
- return for_stream() if stream else for_non_stream()
460
-
461
- def get_message(self, response: dict) -> str:
462
- """Retrieves message only from response
463
-
464
- Args:
465
- response (dict): Response generated by `self.ask`
466
-
467
- Returns:
468
- str: Message extracted
469
- """
470
- assert isinstance(response, dict), "Response should be of dict data-type only"
471
- try:
472
- # Check delta first for streaming
473
- if response.get("choices") and response["choices"][0].get("delta") and response["choices"][0]["delta"].get("content"):
474
- return response["choices"][0]["delta"]["content"]
475
- # Check message content for non-streaming or final message
476
- if response.get("choices") and response["choices"][0].get("message") and response["choices"][0]["message"].get("content"):
477
- return response["choices"][0]["message"]["content"]
478
- except (KeyError, IndexError, TypeError):
479
- # Handle cases where the structure might be different or content is null/missing
480
- pass
481
- return "" # Return empty string if no content found
482
-
483
-
484
- class AsyncGROQ(AsyncProvider):
485
- """
486
- An asynchronous class to interact with the GROQ AI API.
487
- """
488
-
489
- # Use the same model list as the synchronous class
490
- AVAILABLE_MODELS = GROQ.AVAILABLE_MODELS
491
-
492
- def __init__(
493
- self,
494
- api_key: str,
495
- is_conversation: bool = True,
496
- max_tokens: int = 600,
497
- temperature: float = 1,
498
- presence_penalty: int = 0,
499
- frequency_penalty: int = 0,
500
- top_p: float = 1,
501
- model: str = "mixtral-8x7b-32768",
502
- timeout: int = 30,
503
- intro: str = None,
504
- filepath: str = None,
505
- update_file: bool = True,
506
- proxies: dict = {},
507
- history_offset: int = 10250,
508
- act: str = None,
509
- system_prompt: Optional[str] = None,
510
- ):
511
- """Instantiates AsyncGROQ
512
-
513
- Args:
514
- api_key (key): GROQ's API key.
515
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
516
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
517
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
518
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
519
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
520
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
521
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
522
- timeout (int, optional): Http request timeout. Defaults to 30.
523
- intro (str, optional): Conversation introductory prompt. Defaults to None.
524
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
525
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
526
- proxies (dict, optional): Http request proxies. Defaults to {}.
527
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
528
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
529
- system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
530
- """
531
- # Update available models from API
532
- GROQ.update_available_models(api_key)
533
-
534
- # Validate model after updating available models
535
- if model not in self.AVAILABLE_MODELS:
536
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
537
-
538
- self.is_conversation = is_conversation
539
- self.max_tokens_to_sample = max_tokens
540
- self.api_key = api_key
541
- self.model = model
542
- self.temperature = temperature
543
- self.presence_penalty = presence_penalty
544
- self.frequency_penalty = frequency_penalty
545
- self.top_p = top_p
546
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
547
- self.stream_chunk_size = 64
548
- self.timeout = timeout
549
- self.last_response = {}
550
- self.system_prompt = system_prompt
551
- self.available_functions: Dict[str, Callable] = {} # Store available functions
552
- self.headers = {
553
- "Content-Type": "application/json",
554
- "Authorization": f"Bearer {self.api_key}",
555
- }
556
-
557
- self.__available_optimizers = (
558
- method
559
- for method in dir(Optimizers)
560
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
561
- )
562
- Conversation.intro = (
563
- AwesomePrompts().get_act(
564
- act, raise_not_found=True, default=None, case_insensitive=True
565
- )
566
- if act
567
- else intro or Conversation.intro
568
- )
569
- self.conversation = Conversation(
570
- is_conversation, self.max_tokens_to_sample, filepath, update_file
571
- )
572
- self.conversation.history_offset = history_offset
573
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
574
-
575
- def add_function(self, function_name: str, function: Callable):
576
- """Add a function to the available functions dictionary.
577
-
578
- Args:
579
- function_name (str): The name of the function to be used in the prompt.
580
- function (Callable): The function itself.
581
- """
582
- self.available_functions[function_name] = function
583
-
584
- async def ask(
585
- self,
586
- prompt: str,
587
- stream: bool = False,
588
- raw: bool = False,
589
- optimizer: str = None,
590
- conversationally: bool = False,
591
- tools: Optional[List[Dict[str, Any]]] = None,
592
- ) -> Union[dict, AsyncGenerator]:
593
- """Chat with AI asynchronously.
594
-
595
- Args:
596
- prompt (str): Prompt to be send.
597
- stream (bool, optional): Flag for streaming response. Defaults to False.
598
- raw (bool, optional): Stream back raw response as received. Defaults to False.
599
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
600
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
601
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
602
- Returns:
603
- dict|AsyncGenerator : ai content
604
- """
605
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
606
- if optimizer:
607
- if optimizer in self.__available_optimizers:
608
- conversation_prompt = getattr(Optimizers, optimizer)(
609
- conversation_prompt if conversationally else prompt
610
- )
611
- else:
612
- raise Exception(
613
- f"Optimizer is not one of {self.__available_optimizers}"
614
- )
615
-
616
- messages = [{"content": conversation_prompt, "role": "user"}]
617
- if self.system_prompt:
618
- messages.insert(0, {"role": "system", "content": self.system_prompt})
619
-
620
- payload = {
621
- "frequency_penalty": self.frequency_penalty,
622
- "messages": messages,
623
- "model": self.model,
624
- "presence_penalty": self.presence_penalty,
625
- "stream": stream,
626
- "temperature": self.temperature,
627
- "top_p": self.top_p,
628
- "tools": tools
629
- }
630
-
631
- async def for_stream():
632
- async with self.session.stream(
633
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
634
- ) as response:
635
- if not response.is_success:
636
- raise exceptions.FailedToGenerateResponseError(
637
- # Removed response.reason_phrase (not available in httpx response)
638
- f"Failed to generate response - ({response.status_code})"
639
- )
640
-
641
- message_load = ""
642
- intro_value = "data:"
643
- async for value in response.aiter_lines():
644
- try:
645
- if value.startswith(intro_value):
646
- value = value[len(intro_value) :]
647
- resp = json.loads(value)
648
- incomplete_message = await self.get_message(resp)
649
- if incomplete_message:
650
- message_load += incomplete_message
651
- resp["choices"][0]["delta"]["content"] = message_load
652
- self.last_response.update(resp)
653
- yield value if raw else resp
654
- elif raw:
655
- yield value
656
- except json.decoder.JSONDecodeError:
657
- pass
658
-
659
- # Handle tool calls if any (in streaming mode)
660
- if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
661
- tool_calls = self.last_response['choices'][0]['message']['tool_calls']
662
- for tool_call in tool_calls:
663
- function_name = tool_call.get('function', {}).get('name')
664
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
665
- if function_name in self.available_functions:
666
- tool_response = self.available_functions[function_name](**arguments)
667
- messages.append({
668
- "tool_call_id": tool_call['id'],
669
- "role": "tool",
670
- "name": function_name,
671
- "content": tool_response
672
- })
673
- payload['messages'] = messages
674
- # Make a second call to get the final response
675
- second_response = await self.session.post(
676
- self.chat_endpoint, json=payload, timeout=self.timeout
677
- )
678
- if second_response.is_success:
679
- self.last_response = second_response.json()
680
- else:
681
- raise exceptions.FailedToGenerateResponseError(
682
- f"Failed to execute tool - {second_response.text}"
683
- )
684
-
685
- self.conversation.update_chat_history(
686
- prompt, await self.get_message(self.last_response)
687
- )
688
-
689
- async def for_non_stream():
690
- response = await self.session.post(
691
- self.chat_endpoint, json=payload, timeout=self.timeout
692
- )
693
- if not response.is_success:
694
- raise exceptions.FailedToGenerateResponseError(
695
- # Removed response.reason_phrase (not available in httpx response)
696
- f"Failed to generate response - ({response.status_code})"
697
- )
698
- resp = response.json()
699
-
700
- # Handle tool calls if any (in non-streaming mode)
701
- if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
702
- tool_calls = resp['choices'][0]['message']['tool_calls']
703
- for tool_call in tool_calls:
704
- function_name = tool_call.get('function', {}).get('name')
705
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
706
- if function_name in self.available_functions:
707
- tool_response = self.available_functions[function_name](**arguments)
708
- messages.append({
709
- "tool_call_id": tool_call['id'],
710
- "role": "tool",
711
- "name": function_name,
712
- "content": tool_response
713
- })
714
- payload['messages'] = messages
715
- # Make a second call to get the final response
716
- second_response = await self.session.post(
717
- self.chat_endpoint, json=payload, timeout=self.timeout
718
- )
719
- if second_response.is_success:
720
- resp = second_response.json()
721
- else:
722
- raise exceptions.FailedToGenerateResponseError(
723
- f"Failed to execute tool - {second_response.text}"
724
- )
725
-
726
- self.last_response.update(resp)
727
- self.conversation.update_chat_history(
728
- prompt, await self.get_message(self.last_response)
729
- )
730
- return resp
731
-
732
- return for_stream() if stream else await for_non_stream()
733
-
734
- async def chat(
735
- self,
736
- prompt: str,
737
- stream: bool = False,
738
- optimizer: str = None,
739
- conversationally: bool = False,
740
- tools: Optional[List[Dict[str, Any]]] = None,
741
- ) -> Union[str, AsyncGenerator]:
742
- """Generate response `str` asynchronously.
743
- Args:
744
- prompt (str): Prompt to be send.
745
- stream (bool, optional): Flag for streaming response. Defaults to False.
746
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
747
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
748
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
749
- Returns:
750
- str|AsyncGenerator: Response generated
751
- """
752
-
753
- async def for_stream():
754
- async_ask = await self.ask(
755
- prompt, True, optimizer=optimizer, conversationally=conversationally, tools=tools
756
- )
757
- async for response in async_ask:
758
- yield await self.get_message(response)
759
-
760
- async def for_non_stream():
761
- return await self.get_message(
762
- await self.ask(
763
- prompt,
764
- False,
765
- optimizer=optimizer,
766
- conversationally=conversationally,
767
- tools=tools
768
- )
769
- )
770
-
771
- return for_stream() if stream else await for_non_stream()
772
-
773
- async def get_message(self, response: dict) -> str:
774
- """Retrieves message only from response
775
-
776
- Args:
777
- response (dict): Response generated by `self.ask`
778
-
779
- Returns:
780
- str: Message extracted
781
- """
782
- assert isinstance(response, dict), "Response should be of dict data-type only"
783
- try:
784
- # Check delta first for streaming
785
- if response.get("choices") and response["choices"][0].get("delta") and response["choices"][0]["delta"].get("content"):
786
- return response["choices"][0]["delta"]["content"]
787
- # Check message content for non-streaming or final message
788
- if response.get("choices") and response["choices"][0].get("message") and response["choices"][0]["message"].get("content"):
789
- return response["choices"][0]["message"]["content"]
790
- except (KeyError, IndexError, TypeError):
791
- # Handle cases where the structure might be different or content is null/missing
792
- pass
793
- return "" # Return empty string if no content found
794
-
795
- if __name__ == "__main__":
796
- # Example usage
797
- api_key = "gsk_*******************************"
798
- groq = GROQ(api_key=api_key, model="compound-beta")
799
- prompt = "What is the capital of France?"
800
- response = groq.chat(prompt)
801
- print(response)
1
+ import json
2
+ from typing import Any, Callable, Dict, Generator, List, Optional, Union, cast
3
+
4
+ from curl_cffi import CurlError
5
+
6
+ # Import curl_cffi for improved request handling
7
+ from curl_cffi.requests import Session
8
+
9
+ from webscout import exceptions
10
+ from webscout.AIbase import Provider, Response
11
+ from webscout.AIutel import ( # Import sanitize_stream
12
+ AwesomePrompts,
13
+ Conversation,
14
+ Optimizers,
15
+ sanitize_stream,
16
+ )
17
+
18
+
19
+ class GROQ(Provider):
20
+ """
21
+ A class to interact with the GROQ AI API.
22
+ """
23
+
24
+ required_auth = True
25
+ # Default models list (will be updated dynamically)
26
+ AVAILABLE_MODELS = [
27
+ "distil-whisper-large-v3-en",
28
+ "gemma2-9b-it",
29
+ "llama-3.3-70b-versatile",
30
+ "llama-3.1-8b-instant",
31
+ "llama-guard-3-8b",
32
+ "llama3-70b-8192",
33
+ "llama3-8b-8192",
34
+ "whisper-large-v3",
35
+ "whisper-large-v3-turbo",
36
+ "meta-llama/llama-4-scout-17b-16e-instruct",
37
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
38
+ "playai-tts",
39
+ "playai-tts-arabic",
40
+ "qwen-qwq-32b",
41
+ "mistral-saba-24b",
42
+ "qwen-2.5-coder-32b",
43
+ "qwen-2.5-32b",
44
+ "deepseek-r1-distill-qwen-32b",
45
+ "deepseek-r1-distill-llama-70b",
46
+ "llama-3.3-70b-specdec",
47
+ "llama-3.2-1b-preview",
48
+ "llama-3.2-3b-preview",
49
+ "llama-3.2-11b-vision-preview",
50
+ "llama-3.2-90b-vision-preview",
51
+ "mixtral-8x7b-32768",
52
+ ]
53
+
54
+ @classmethod
55
+ def get_models(cls, api_key: Optional[str] = None):
56
+ """Fetch available models from Groq API.
57
+
58
+ Args:
59
+ api_key (str, optional): Groq API key. If not provided, returns default models.
60
+
61
+ Returns:
62
+ list: List of available model IDs
63
+ """
64
+ if not api_key:
65
+ return cls.AVAILABLE_MODELS
66
+
67
+ try:
68
+ # Use a temporary curl_cffi session for this class method
69
+ temp_session = Session()
70
+ headers = {
71
+ "Content-Type": "application/json",
72
+ "Authorization": f"Bearer {api_key}",
73
+ }
74
+
75
+ response = temp_session.get(
76
+ "https://api.groq.com/openai/v1/models",
77
+ headers=headers,
78
+ impersonate="chrome110", # Use impersonate for fetching
79
+ )
80
+
81
+ if response.status_code != 200:
82
+ return cls.AVAILABLE_MODELS
83
+
84
+ data = response.json()
85
+ if "data" in data and isinstance(data["data"], list):
86
+ return [model["id"] for model in data["data"]]
87
+ return cls.AVAILABLE_MODELS
88
+
89
+ except (CurlError, Exception):
90
+ # Fallback to default models list if fetching fails
91
+ return cls.AVAILABLE_MODELS
92
+
93
+ def __init__(
94
+ self,
95
+ api_key: str,
96
+ is_conversation: bool = True,
97
+ max_tokens: int = 600,
98
+ temperature: float = 1,
99
+ presence_penalty: int = 0,
100
+ frequency_penalty: int = 0,
101
+ top_p: float = 1,
102
+ model: str = "mixtral-8x7b-32768",
103
+ timeout: int = 30,
104
+ intro: Optional[str] = None,
105
+ filepath: Optional[str] = None,
106
+ update_file: bool = True,
107
+ proxies: dict = {},
108
+ history_offset: int = 10250,
109
+ act: Optional[str] = None,
110
+ system_prompt: Optional[str] = None,
111
+ ):
112
+ """Instantiates GROQ
113
+
114
+ Args:
115
+ api_key (key): GROQ's API key.
116
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
117
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
118
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
119
+ presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
120
+ frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
121
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
122
+ model (str, optional): LLM model name. Defaults to "mixtral-8x7b-32768".
123
+ timeout (int, optional): Http request timeout. Defaults to 30.
124
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
125
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
126
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
127
+ proxies (dict, optional): Http request proxies. Defaults to {}.
128
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
129
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
130
+ system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
131
+ """
132
+ # Update available models from API
133
+ self.update_available_models(api_key)
134
+
135
+ # Validate model after updating available models
136
+ if model not in self.AVAILABLE_MODELS:
137
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
138
+
139
+ # Initialize curl_cffi Session
140
+ self.session = Session()
141
+ self.is_conversation = is_conversation
142
+ self.max_tokens_to_sample = max_tokens
143
+ self.api_key = api_key
144
+ self.model = model
145
+ self.temperature = temperature
146
+ self.presence_penalty = presence_penalty
147
+ self.frequency_penalty = frequency_penalty
148
+ self.top_p = top_p
149
+ self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
150
+ self.stream_chunk_size = 64
151
+ self.timeout = timeout
152
+ self.last_response = {}
153
+ self.system_prompt = system_prompt
154
+ self.available_functions: Dict[str, Callable] = {} # Store available functions
155
+ self.headers = {
156
+ "Content-Type": "application/json",
157
+ "Authorization": f"Bearer {self.api_key}",
158
+ }
159
+
160
+ self.__available_optimizers = (
161
+ method
162
+ for method in dir(Optimizers)
163
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
164
+ )
165
+
166
+ # Update curl_cffi session headers
167
+ self.session.headers.update(self.headers)
168
+
169
+ self.conversation = Conversation(
170
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
171
+ )
172
+ self.conversation.history_offset = history_offset
173
+
174
+ if act:
175
+ self.conversation.intro = (
176
+ AwesomePrompts().get_act(
177
+ cast(Union[str, int], act),
178
+ default=self.conversation.intro,
179
+ case_insensitive=True,
180
+ )
181
+ or self.conversation.intro
182
+ )
183
+ elif intro:
184
+ self.conversation.intro = intro
185
+
186
+ # Set proxies for curl_cffi session
187
+ self.session = Session()
188
+ self.session.headers.update(self.headers)
189
+ if proxies:
190
+ self.session.proxies.update(proxies)
191
+
192
+ @staticmethod
193
+ def _groq_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[Dict]:
194
+ """Extracts the 'delta' object from Groq stream JSON chunks."""
195
+ if isinstance(chunk, dict):
196
+ # Return the delta object itself, or None if not found
197
+ return chunk.get("choices", [{}])[0].get("delta")
198
+ return None
199
+
200
+ @classmethod
201
+ def update_available_models(cls, api_key=None):
202
+ """Update the available models list from Groq API"""
203
+ try:
204
+ models = cls.get_models(api_key)
205
+ if models and len(models) > 0:
206
+ cls.AVAILABLE_MODELS = models
207
+ except Exception:
208
+ # Fallback to default models list if fetching fails
209
+ pass
210
+
211
+ def add_function(self, function_name: str, function: Callable):
212
+ """Add a function to the available functions dictionary.
213
+
214
+ Args:
215
+ function_name (str): The name of the function to be used in the prompt.
216
+ function (Callable): The function itself.
217
+ """
218
+ self.available_functions[function_name] = function
219
+
220
+ def ask(
221
+ self,
222
+ prompt: str,
223
+ stream: bool = False,
224
+ raw: bool = False,
225
+ optimizer: Optional[str] = None,
226
+ conversationally: bool = False,
227
+ **kwargs: Any,
228
+ ) -> Response:
229
+ """Chat with AI
230
+
231
+ Args:
232
+ prompt (str): Prompt to be send.
233
+ stream (bool, optional): Flag for streaming response. Defaults to False.
234
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
235
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
236
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
237
+ **kwargs: Additional parameters like tools.
238
+
239
+ Returns:
240
+ dict : {}
241
+ """
242
+ tools = kwargs.get("tools")
243
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
244
+ if optimizer:
245
+ if optimizer in self.__available_optimizers:
246
+ conversation_prompt = getattr(Optimizers, optimizer)(
247
+ conversation_prompt if conversationally else prompt
248
+ )
249
+ else:
250
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
251
+
252
+ messages = [{"content": conversation_prompt, "role": "user"}]
253
+ if self.system_prompt:
254
+ messages.insert(0, {"role": "system", "content": self.system_prompt})
255
+
256
+ self.session.headers.update(self.headers)
257
+ payload = {
258
+ "frequency_penalty": self.frequency_penalty,
259
+ "messages": messages,
260
+ "model": self.model,
261
+ "presence_penalty": self.presence_penalty,
262
+ "stream": stream,
263
+ "temperature": self.temperature,
264
+ "top_p": self.top_p,
265
+ "tools": tools, # Include tools in the payload
266
+ }
267
+
268
+ def for_stream():
269
+ resp = {}
270
+ try:
271
+ response = self.session.post(
272
+ self.chat_endpoint,
273
+ json=payload,
274
+ stream=True,
275
+ timeout=self.timeout,
276
+ impersonate="chrome110", # Use impersonate for better compatibility
277
+ )
278
+ if not response.status_code == 200:
279
+ raise exceptions.FailedToGenerateResponseError(
280
+ # Removed response.reason_phrase
281
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
282
+ )
283
+
284
+ streaming_text = ""
285
+ # Use sanitize_stream
286
+ processed_stream = sanitize_stream(
287
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
288
+ intro_value="data:",
289
+ to_json=True, # Stream sends JSON
290
+ content_extractor=self._groq_extractor, # Use the delta extractor
291
+ yield_raw_on_error=False, # Skip non-JSON lines or lines where extractor fails
292
+ raw=raw,
293
+ )
294
+
295
+ for delta in processed_stream:
296
+ # delta is the extracted 'delta' object or None
297
+ if raw:
298
+ yield delta
299
+ else:
300
+ if delta and isinstance(delta, dict):
301
+ content = delta.get("content")
302
+ if content:
303
+ streaming_text += content
304
+ resp = {"text": content} # Yield only the new chunk text
305
+ self.last_response = {
306
+ "choices": [{"delta": {"content": streaming_text}}]
307
+ } # Update last_response structure
308
+ yield resp if not raw else content # Yield dict or raw string chunk
309
+ # Note: Tool calls in streaming delta are less common in OpenAI format, usually in final message
310
+
311
+ except CurlError as e:
312
+ raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
313
+ except Exception as e:
314
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
315
+
316
+ # Handle tool calls if any
317
+ first_choice = self.last_response.get("choices", [{}])[0]
318
+ message = first_choice.get("message", {})
319
+ if "tool_calls" in message:
320
+ tool_calls = message.get("tool_calls", [])
321
+ for tool_call in tool_calls:
322
+ if not isinstance(tool_call, dict):
323
+ continue
324
+ function_name = tool_call.get("function", {}).get("name")
325
+ arguments = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
326
+ if function_name in self.available_functions:
327
+ tool_response = self.available_functions[function_name](**arguments)
328
+ messages.append(
329
+ {
330
+ "tool_call_id": tool_call.get("id"),
331
+ "role": "tool",
332
+ "name": function_name,
333
+ "content": tool_response,
334
+ }
335
+ )
336
+ payload["messages"] = messages
337
+ # Make a second call to get the final response
338
+ try:
339
+ second_response = self.session.post(
340
+ self.chat_endpoint,
341
+ json=payload,
342
+ timeout=self.timeout,
343
+ impersonate="chrome110", # Use impersonate for better compatibility
344
+ )
345
+ if second_response.status_code == 200:
346
+ self.last_response = second_response.json()
347
+ else:
348
+ raise exceptions.FailedToGenerateResponseError(
349
+ f"Failed to execute tool - {second_response.text}"
350
+ )
351
+ except CurlError as e:
352
+ raise exceptions.FailedToGenerateResponseError(
353
+ f"CurlError during tool execution: {str(e)}"
354
+ )
355
+ except Exception as e:
356
+ raise exceptions.FailedToGenerateResponseError(
357
+ f"Error during tool execution: {str(e)}"
358
+ )
359
+
360
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
361
+
362
+ def for_non_stream():
363
+ resp = {}
364
+ response = None
365
+ try:
366
+ response = self.session.post(
367
+ self.chat_endpoint,
368
+ json=payload,
369
+ stream=False,
370
+ timeout=self.timeout,
371
+ impersonate="chrome110", # Use impersonate for better compatibility
372
+ )
373
+ if not response.status_code == 200:
374
+ raise exceptions.FailedToGenerateResponseError(
375
+ # Removed response.reason_phrase
376
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
377
+ )
378
+
379
+ response_text = response.text # Get raw text
380
+
381
+ # Use sanitize_stream to parse the non-streaming JSON response
382
+ processed_stream = sanitize_stream(
383
+ data=response_text,
384
+ to_json=True, # Parse the whole text as JSON
385
+ intro_value=None,
386
+ # Extractor for non-stream structure (returns the whole parsed dict)
387
+ content_extractor=lambda chunk: chunk if isinstance(chunk, dict) else None,
388
+ yield_raw_on_error=False,
389
+ raw=raw,
390
+ )
391
+
392
+ # Extract the single result (the parsed JSON dictionary)
393
+ resp = next(processed_stream, None)
394
+ if raw:
395
+ return resp
396
+ if resp is None:
397
+ raise exceptions.FailedToGenerateResponseError(
398
+ "Failed to parse non-stream JSON response"
399
+ )
400
+
401
+ except CurlError as e:
402
+ raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
403
+ except Exception as e:
404
+ # Catch the original AttributeError here if it happens before the raise
405
+ if (
406
+ isinstance(e, AttributeError)
407
+ and "reason_phrase" in str(e)
408
+ and response is not None
409
+ ):
410
+ raise exceptions.FailedToGenerateResponseError(
411
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
412
+ )
413
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
414
+
415
+ # Handle tool calls if any
416
+ first_choice = resp.get("choices", [{}])[0]
417
+ message = first_choice.get("message", {})
418
+ if "tool_calls" in message:
419
+ tool_calls = message.get("tool_calls", [])
420
+ for tool_call in tool_calls:
421
+ if not isinstance(tool_call, dict):
422
+ continue
423
+ function_name = tool_call.get("function", {}).get("name")
424
+ arguments = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
425
+ if function_name in self.available_functions:
426
+ tool_response = self.available_functions[function_name](**arguments)
427
+ messages.append(
428
+ {
429
+ "tool_call_id": tool_call.get("id"),
430
+ "role": "tool",
431
+ "name": function_name,
432
+ "content": tool_response,
433
+ }
434
+ )
435
+ payload["messages"] = messages
436
+ # Make a second call to get the final response
437
+ try:
438
+ second_response = self.session.post(
439
+ self.chat_endpoint,
440
+ json=payload,
441
+ timeout=self.timeout,
442
+ impersonate="chrome110", # Use impersonate for better compatibility
443
+ )
444
+ if second_response.status_code == 200:
445
+ resp = second_response.json()
446
+ else:
447
+ raise exceptions.FailedToGenerateResponseError(
448
+ f"Failed to execute tool - {second_response.text}"
449
+ )
450
+ except CurlError as e:
451
+ raise exceptions.FailedToGenerateResponseError(
452
+ f"CurlError during tool execution: {str(e)}"
453
+ )
454
+ except Exception as e:
455
+ raise exceptions.FailedToGenerateResponseError(
456
+ f"Error during tool execution: {str(e)}"
457
+ )
458
+
459
+ self.last_response.update(resp)
460
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
461
+ return resp
462
+
463
+ return for_stream() if stream else for_non_stream()
464
+
465
+ def chat(
466
+ self,
467
+ prompt: str,
468
+ stream: bool = False,
469
+ optimizer: Optional[str] = None,
470
+ conversationally: bool = False,
471
+ **kwargs: Any,
472
+ ) -> Union[str, Generator[str, None, None]]:
473
+ """Generate response `str`
474
+ Args:
475
+ prompt (str): Prompt to be send.
476
+ stream (bool, optional): Flag for streaming response. Defaults to False.
477
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
478
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
479
+ **kwargs: Additional parameters like tools.
480
+ Returns:
481
+ str: Response generated
482
+ """
483
+ raw = kwargs.get("raw", False)
484
+ if stream:
485
+
486
+ def for_stream():
487
+ gen = self.ask(
488
+ prompt,
489
+ True,
490
+ raw=raw,
491
+ optimizer=optimizer,
492
+ conversationally=conversationally,
493
+ **kwargs,
494
+ )
495
+ if hasattr(gen, "__iter__"):
496
+ for response in gen:
497
+ if raw:
498
+ yield cast(str, response)
499
+ else:
500
+ yield self.get_message(response)
501
+
502
+ return for_stream()
503
+ else:
504
+ result = self.ask(
505
+ prompt,
506
+ False,
507
+ raw=raw,
508
+ optimizer=optimizer,
509
+ conversationally=conversationally,
510
+ **kwargs,
511
+ )
512
+ if raw:
513
+ return cast(str, result)
514
+ return self.get_message(result)
515
+
516
+ def get_message(self, response: Response) -> str:
517
+ """Retrieves message only from response
518
+
519
+ Args:
520
+ response (Response): Response generated by `self.ask`
521
+
522
+ Returns:
523
+ str: Message extracted
524
+ """
525
+ if not isinstance(response, dict):
526
+ return str(response)
527
+
528
+ resp_dict = cast(Dict[str, Any], response)
529
+ try:
530
+ # Check delta first for streaming
531
+ if (
532
+ resp_dict.get("choices")
533
+ and resp_dict["choices"][0].get("delta")
534
+ and resp_dict["choices"][0]["delta"].get("content")
535
+ ):
536
+ return resp_dict["choices"][0]["delta"]["content"]
537
+ # Check message content for non-streaming or final message
538
+ if (
539
+ resp_dict.get("choices")
540
+ and resp_dict["choices"][0].get("message")
541
+ and resp_dict["choices"][0]["message"].get("content")
542
+ ):
543
+ return resp_dict["choices"][0]["message"]["content"]
544
+ except (KeyError, IndexError, TypeError):
545
+ # Handle cases where the structure might be different or content is null/missing
546
+ pass
547
+ return "" # Return empty string if no content found
548
+
549
+
550
+ if __name__ == "__main__":
551
+ # Example usage
552
+ api_key = "gsk_*******************************"
553
+ groq = GROQ(api_key=api_key, model="compound-beta")
554
+ prompt = "What is the capital of France?"
555
+ response = groq.chat(prompt)
556
+ print(response)