webscout 8.2.2__py3-none-any.whl → 2026.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (483) hide show
  1. webscout/AIauto.py +524 -143
  2. webscout/AIbase.py +247 -123
  3. webscout/AIutel.py +68 -132
  4. webscout/Bard.py +1072 -535
  5. webscout/Extra/GitToolkit/__init__.py +2 -2
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +20 -12
  7. webscout/Extra/GitToolkit/gitapi/gist.py +142 -0
  8. webscout/Extra/GitToolkit/gitapi/organization.py +91 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +308 -195
  10. webscout/Extra/GitToolkit/gitapi/search.py +162 -0
  11. webscout/Extra/GitToolkit/gitapi/trending.py +236 -0
  12. webscout/Extra/GitToolkit/gitapi/user.py +128 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +82 -62
  14. webscout/Extra/YTToolkit/README.md +443 -0
  15. webscout/Extra/YTToolkit/YTdownloader.py +953 -957
  16. webscout/Extra/YTToolkit/__init__.py +3 -3
  17. webscout/Extra/YTToolkit/transcriber.py +595 -476
  18. webscout/Extra/YTToolkit/ytapi/README.md +230 -0
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +22 -6
  20. webscout/Extra/YTToolkit/ytapi/captions.py +190 -0
  21. webscout/Extra/YTToolkit/ytapi/channel.py +302 -307
  22. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  23. webscout/Extra/YTToolkit/ytapi/extras.py +178 -45
  24. webscout/Extra/YTToolkit/ytapi/hashtag.py +120 -0
  25. webscout/Extra/YTToolkit/ytapi/https.py +89 -88
  26. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  27. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -59
  28. webscout/Extra/YTToolkit/ytapi/pool.py +8 -8
  29. webscout/Extra/YTToolkit/ytapi/query.py +143 -40
  30. webscout/Extra/YTToolkit/ytapi/shorts.py +122 -0
  31. webscout/Extra/YTToolkit/ytapi/stream.py +68 -63
  32. webscout/Extra/YTToolkit/ytapi/suggestions.py +97 -0
  33. webscout/Extra/YTToolkit/ytapi/utils.py +66 -62
  34. webscout/Extra/YTToolkit/ytapi/video.py +189 -18
  35. webscout/Extra/__init__.py +2 -3
  36. webscout/Extra/gguf.py +1298 -682
  37. webscout/Extra/tempmail/README.md +488 -0
  38. webscout/Extra/tempmail/__init__.py +28 -28
  39. webscout/Extra/tempmail/async_utils.py +143 -141
  40. webscout/Extra/tempmail/base.py +172 -161
  41. webscout/Extra/tempmail/cli.py +191 -187
  42. webscout/Extra/tempmail/emailnator.py +88 -84
  43. webscout/Extra/tempmail/mail_tm.py +378 -361
  44. webscout/Extra/tempmail/temp_mail_io.py +304 -292
  45. webscout/Extra/weather.py +196 -194
  46. webscout/Extra/weather_ascii.py +17 -15
  47. webscout/Provider/AISEARCH/PERPLEXED_search.py +175 -0
  48. webscout/Provider/AISEARCH/Perplexity.py +237 -304
  49. webscout/Provider/AISEARCH/README.md +106 -0
  50. webscout/Provider/AISEARCH/__init__.py +16 -10
  51. webscout/Provider/AISEARCH/brave_search.py +298 -0
  52. webscout/Provider/AISEARCH/iask_search.py +130 -209
  53. webscout/Provider/AISEARCH/monica_search.py +200 -246
  54. webscout/Provider/AISEARCH/webpilotai_search.py +242 -281
  55. webscout/Provider/Algion.py +413 -0
  56. webscout/Provider/Andi.py +74 -69
  57. webscout/Provider/Apriel.py +313 -0
  58. webscout/Provider/Ayle.py +323 -0
  59. webscout/Provider/ChatSandbox.py +329 -0
  60. webscout/Provider/ClaudeOnline.py +365 -0
  61. webscout/Provider/Cohere.py +232 -208
  62. webscout/Provider/DeepAI.py +367 -0
  63. webscout/Provider/Deepinfra.py +343 -173
  64. webscout/Provider/EssentialAI.py +217 -0
  65. webscout/Provider/ExaAI.py +274 -261
  66. webscout/Provider/Gemini.py +60 -54
  67. webscout/Provider/GithubChat.py +385 -367
  68. webscout/Provider/Gradient.py +286 -0
  69. webscout/Provider/Groq.py +556 -670
  70. webscout/Provider/HadadXYZ.py +323 -0
  71. webscout/Provider/HeckAI.py +392 -233
  72. webscout/Provider/HuggingFace.py +387 -0
  73. webscout/Provider/IBM.py +340 -0
  74. webscout/Provider/Jadve.py +317 -266
  75. webscout/Provider/K2Think.py +306 -0
  76. webscout/Provider/Koboldai.py +221 -381
  77. webscout/Provider/Netwrck.py +273 -228
  78. webscout/Provider/Nvidia.py +310 -0
  79. webscout/Provider/OPENAI/DeepAI.py +489 -0
  80. webscout/Provider/OPENAI/K2Think.py +423 -0
  81. webscout/Provider/OPENAI/PI.py +463 -0
  82. webscout/Provider/OPENAI/README.md +890 -0
  83. webscout/Provider/OPENAI/TogetherAI.py +405 -0
  84. webscout/Provider/OPENAI/TwoAI.py +255 -0
  85. webscout/Provider/OPENAI/__init__.py +148 -25
  86. webscout/Provider/OPENAI/ai4chat.py +348 -0
  87. webscout/Provider/OPENAI/akashgpt.py +436 -0
  88. webscout/Provider/OPENAI/algion.py +303 -0
  89. webscout/Provider/OPENAI/ayle.py +365 -0
  90. webscout/Provider/OPENAI/base.py +253 -46
  91. webscout/Provider/OPENAI/cerebras.py +296 -0
  92. webscout/Provider/OPENAI/chatgpt.py +514 -193
  93. webscout/Provider/OPENAI/chatsandbox.py +233 -0
  94. webscout/Provider/OPENAI/deepinfra.py +403 -272
  95. webscout/Provider/OPENAI/e2b.py +2370 -1350
  96. webscout/Provider/OPENAI/elmo.py +278 -0
  97. webscout/Provider/OPENAI/exaai.py +186 -138
  98. webscout/Provider/OPENAI/freeassist.py +446 -0
  99. webscout/Provider/OPENAI/gradient.py +448 -0
  100. webscout/Provider/OPENAI/groq.py +380 -0
  101. webscout/Provider/OPENAI/hadadxyz.py +292 -0
  102. webscout/Provider/OPENAI/heckai.py +100 -104
  103. webscout/Provider/OPENAI/huggingface.py +321 -0
  104. webscout/Provider/OPENAI/ibm.py +425 -0
  105. webscout/Provider/OPENAI/llmchat.py +253 -0
  106. webscout/Provider/OPENAI/llmchatco.py +378 -327
  107. webscout/Provider/OPENAI/meta.py +541 -0
  108. webscout/Provider/OPENAI/netwrck.py +110 -84
  109. webscout/Provider/OPENAI/nvidia.py +317 -0
  110. webscout/Provider/OPENAI/oivscode.py +348 -0
  111. webscout/Provider/OPENAI/openrouter.py +328 -0
  112. webscout/Provider/OPENAI/pydantic_imports.py +1 -0
  113. webscout/Provider/OPENAI/sambanova.py +397 -0
  114. webscout/Provider/OPENAI/sonus.py +126 -115
  115. webscout/Provider/OPENAI/textpollinations.py +218 -133
  116. webscout/Provider/OPENAI/toolbaz.py +136 -166
  117. webscout/Provider/OPENAI/typefully.py +419 -0
  118. webscout/Provider/OPENAI/typliai.py +279 -0
  119. webscout/Provider/OPENAI/utils.py +314 -211
  120. webscout/Provider/OPENAI/wisecat.py +103 -125
  121. webscout/Provider/OPENAI/writecream.py +185 -156
  122. webscout/Provider/OPENAI/x0gpt.py +227 -136
  123. webscout/Provider/OPENAI/zenmux.py +380 -0
  124. webscout/Provider/OpenRouter.py +386 -0
  125. webscout/Provider/Openai.py +337 -496
  126. webscout/Provider/PI.py +443 -344
  127. webscout/Provider/QwenLM.py +346 -254
  128. webscout/Provider/STT/__init__.py +28 -0
  129. webscout/Provider/STT/base.py +303 -0
  130. webscout/Provider/STT/elevenlabs.py +264 -0
  131. webscout/Provider/Sambanova.py +317 -0
  132. webscout/Provider/TTI/README.md +69 -0
  133. webscout/Provider/TTI/__init__.py +37 -12
  134. webscout/Provider/TTI/base.py +147 -0
  135. webscout/Provider/TTI/claudeonline.py +393 -0
  136. webscout/Provider/TTI/magicstudio.py +292 -0
  137. webscout/Provider/TTI/miragic.py +180 -0
  138. webscout/Provider/TTI/pollinations.py +331 -0
  139. webscout/Provider/TTI/together.py +334 -0
  140. webscout/Provider/TTI/utils.py +14 -0
  141. webscout/Provider/TTS/README.md +186 -0
  142. webscout/Provider/TTS/__init__.py +43 -7
  143. webscout/Provider/TTS/base.py +523 -0
  144. webscout/Provider/TTS/deepgram.py +286 -156
  145. webscout/Provider/TTS/elevenlabs.py +189 -111
  146. webscout/Provider/TTS/freetts.py +218 -0
  147. webscout/Provider/TTS/murfai.py +288 -113
  148. webscout/Provider/TTS/openai_fm.py +364 -0
  149. webscout/Provider/TTS/parler.py +203 -111
  150. webscout/Provider/TTS/qwen.py +334 -0
  151. webscout/Provider/TTS/sherpa.py +286 -0
  152. webscout/Provider/TTS/speechma.py +693 -180
  153. webscout/Provider/TTS/streamElements.py +275 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TextPollinationsAI.py +221 -121
  156. webscout/Provider/TogetherAI.py +450 -0
  157. webscout/Provider/TwoAI.py +309 -199
  158. webscout/Provider/TypliAI.py +311 -0
  159. webscout/Provider/UNFINISHED/ChatHub.py +219 -0
  160. webscout/Provider/{OPENAI/glider.py → UNFINISHED/ChutesAI.py} +160 -145
  161. webscout/Provider/UNFINISHED/GizAI.py +300 -0
  162. webscout/Provider/UNFINISHED/Marcus.py +218 -0
  163. webscout/Provider/UNFINISHED/Qodo.py +481 -0
  164. webscout/Provider/UNFINISHED/XenAI.py +330 -0
  165. webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +64 -47
  166. webscout/Provider/UNFINISHED/aihumanizer.py +41 -0
  167. webscout/Provider/UNFINISHED/grammerchecker.py +37 -0
  168. webscout/Provider/UNFINISHED/liner.py +342 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +246 -0
  170. webscout/Provider/UNFINISHED/samurai.py +231 -0
  171. webscout/Provider/WiseCat.py +256 -196
  172. webscout/Provider/WrDoChat.py +390 -0
  173. webscout/Provider/__init__.py +115 -198
  174. webscout/Provider/ai4chat.py +181 -202
  175. webscout/Provider/akashgpt.py +330 -342
  176. webscout/Provider/cerebras.py +397 -242
  177. webscout/Provider/cleeai.py +236 -213
  178. webscout/Provider/elmo.py +291 -234
  179. webscout/Provider/geminiapi.py +343 -208
  180. webscout/Provider/julius.py +245 -223
  181. webscout/Provider/learnfastai.py +333 -266
  182. webscout/Provider/llama3mitril.py +230 -180
  183. webscout/Provider/llmchat.py +308 -213
  184. webscout/Provider/llmchatco.py +321 -311
  185. webscout/Provider/meta.py +996 -794
  186. webscout/Provider/oivscode.py +332 -0
  187. webscout/Provider/searchchat.py +316 -293
  188. webscout/Provider/sonus.py +264 -208
  189. webscout/Provider/toolbaz.py +359 -320
  190. webscout/Provider/turboseek.py +332 -219
  191. webscout/Provider/typefully.py +262 -280
  192. webscout/Provider/x0gpt.py +332 -256
  193. webscout/__init__.py +31 -38
  194. webscout/__main__.py +5 -5
  195. webscout/cli.py +585 -293
  196. webscout/client.py +1497 -0
  197. webscout/conversation.py +140 -565
  198. webscout/exceptions.py +383 -339
  199. webscout/litagent/__init__.py +29 -29
  200. webscout/litagent/agent.py +492 -455
  201. webscout/litagent/constants.py +60 -60
  202. webscout/models.py +505 -181
  203. webscout/optimizers.py +32 -378
  204. webscout/prompt_manager.py +376 -274
  205. webscout/sanitize.py +1514 -0
  206. webscout/scout/README.md +452 -0
  207. webscout/scout/__init__.py +8 -8
  208. webscout/scout/core/__init__.py +7 -7
  209. webscout/scout/core/crawler.py +330 -140
  210. webscout/scout/core/scout.py +800 -568
  211. webscout/scout/core/search_result.py +51 -96
  212. webscout/scout/core/text_analyzer.py +64 -63
  213. webscout/scout/core/text_utils.py +412 -277
  214. webscout/scout/core/web_analyzer.py +54 -52
  215. webscout/scout/element.py +872 -460
  216. webscout/scout/parsers/__init__.py +70 -69
  217. webscout/scout/parsers/html5lib_parser.py +182 -172
  218. webscout/scout/parsers/html_parser.py +238 -236
  219. webscout/scout/parsers/lxml_parser.py +203 -178
  220. webscout/scout/utils.py +38 -37
  221. webscout/search/__init__.py +47 -0
  222. webscout/search/base.py +201 -0
  223. webscout/search/bing_main.py +45 -0
  224. webscout/search/brave_main.py +92 -0
  225. webscout/search/duckduckgo_main.py +57 -0
  226. webscout/search/engines/__init__.py +127 -0
  227. webscout/search/engines/bing/__init__.py +15 -0
  228. webscout/search/engines/bing/base.py +35 -0
  229. webscout/search/engines/bing/images.py +114 -0
  230. webscout/search/engines/bing/news.py +96 -0
  231. webscout/search/engines/bing/suggestions.py +36 -0
  232. webscout/search/engines/bing/text.py +109 -0
  233. webscout/search/engines/brave/__init__.py +19 -0
  234. webscout/search/engines/brave/base.py +47 -0
  235. webscout/search/engines/brave/images.py +213 -0
  236. webscout/search/engines/brave/news.py +353 -0
  237. webscout/search/engines/brave/suggestions.py +318 -0
  238. webscout/search/engines/brave/text.py +167 -0
  239. webscout/search/engines/brave/videos.py +364 -0
  240. webscout/search/engines/duckduckgo/__init__.py +25 -0
  241. webscout/search/engines/duckduckgo/answers.py +80 -0
  242. webscout/search/engines/duckduckgo/base.py +189 -0
  243. webscout/search/engines/duckduckgo/images.py +100 -0
  244. webscout/search/engines/duckduckgo/maps.py +183 -0
  245. webscout/search/engines/duckduckgo/news.py +70 -0
  246. webscout/search/engines/duckduckgo/suggestions.py +22 -0
  247. webscout/search/engines/duckduckgo/text.py +221 -0
  248. webscout/search/engines/duckduckgo/translate.py +48 -0
  249. webscout/search/engines/duckduckgo/videos.py +80 -0
  250. webscout/search/engines/duckduckgo/weather.py +84 -0
  251. webscout/search/engines/mojeek.py +61 -0
  252. webscout/search/engines/wikipedia.py +77 -0
  253. webscout/search/engines/yahoo/__init__.py +41 -0
  254. webscout/search/engines/yahoo/answers.py +19 -0
  255. webscout/search/engines/yahoo/base.py +34 -0
  256. webscout/search/engines/yahoo/images.py +323 -0
  257. webscout/search/engines/yahoo/maps.py +19 -0
  258. webscout/search/engines/yahoo/news.py +258 -0
  259. webscout/search/engines/yahoo/suggestions.py +140 -0
  260. webscout/search/engines/yahoo/text.py +273 -0
  261. webscout/search/engines/yahoo/translate.py +19 -0
  262. webscout/search/engines/yahoo/videos.py +302 -0
  263. webscout/search/engines/yahoo/weather.py +220 -0
  264. webscout/search/engines/yandex.py +67 -0
  265. webscout/search/engines/yep/__init__.py +13 -0
  266. webscout/search/engines/yep/base.py +34 -0
  267. webscout/search/engines/yep/images.py +101 -0
  268. webscout/search/engines/yep/suggestions.py +38 -0
  269. webscout/search/engines/yep/text.py +99 -0
  270. webscout/search/http_client.py +172 -0
  271. webscout/search/results.py +141 -0
  272. webscout/search/yahoo_main.py +57 -0
  273. webscout/search/yep_main.py +48 -0
  274. webscout/server/__init__.py +48 -0
  275. webscout/server/config.py +78 -0
  276. webscout/server/exceptions.py +69 -0
  277. webscout/server/providers.py +286 -0
  278. webscout/server/request_models.py +131 -0
  279. webscout/server/request_processing.py +404 -0
  280. webscout/server/routes.py +642 -0
  281. webscout/server/server.py +351 -0
  282. webscout/server/ui_templates.py +1171 -0
  283. webscout/swiftcli/__init__.py +79 -809
  284. webscout/swiftcli/core/__init__.py +7 -0
  285. webscout/swiftcli/core/cli.py +574 -0
  286. webscout/swiftcli/core/context.py +98 -0
  287. webscout/swiftcli/core/group.py +268 -0
  288. webscout/swiftcli/decorators/__init__.py +28 -0
  289. webscout/swiftcli/decorators/command.py +243 -0
  290. webscout/swiftcli/decorators/options.py +247 -0
  291. webscout/swiftcli/decorators/output.py +392 -0
  292. webscout/swiftcli/exceptions.py +21 -0
  293. webscout/swiftcli/plugins/__init__.py +9 -0
  294. webscout/swiftcli/plugins/base.py +134 -0
  295. webscout/swiftcli/plugins/manager.py +269 -0
  296. webscout/swiftcli/utils/__init__.py +58 -0
  297. webscout/swiftcli/utils/formatting.py +251 -0
  298. webscout/swiftcli/utils/parsing.py +368 -0
  299. webscout/update_checker.py +280 -136
  300. webscout/utils.py +28 -14
  301. webscout/version.py +2 -1
  302. webscout/version.py.bak +3 -0
  303. webscout/zeroart/__init__.py +218 -55
  304. webscout/zeroart/base.py +70 -60
  305. webscout/zeroart/effects.py +155 -99
  306. webscout/zeroart/fonts.py +1799 -816
  307. webscout-2026.1.19.dist-info/METADATA +638 -0
  308. webscout-2026.1.19.dist-info/RECORD +312 -0
  309. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info}/WHEEL +1 -1
  310. webscout-2026.1.19.dist-info/entry_points.txt +4 -0
  311. webscout-2026.1.19.dist-info/top_level.txt +1 -0
  312. inferno/__init__.py +0 -6
  313. inferno/__main__.py +0 -9
  314. inferno/cli.py +0 -6
  315. webscout/DWEBS.py +0 -477
  316. webscout/Extra/autocoder/__init__.py +0 -9
  317. webscout/Extra/autocoder/autocoder.py +0 -849
  318. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  319. webscout/LLM.py +0 -442
  320. webscout/Litlogger/__init__.py +0 -67
  321. webscout/Litlogger/core/__init__.py +0 -6
  322. webscout/Litlogger/core/level.py +0 -23
  323. webscout/Litlogger/core/logger.py +0 -165
  324. webscout/Litlogger/handlers/__init__.py +0 -12
  325. webscout/Litlogger/handlers/console.py +0 -33
  326. webscout/Litlogger/handlers/file.py +0 -143
  327. webscout/Litlogger/handlers/network.py +0 -173
  328. webscout/Litlogger/styles/__init__.py +0 -7
  329. webscout/Litlogger/styles/colors.py +0 -249
  330. webscout/Litlogger/styles/formats.py +0 -458
  331. webscout/Litlogger/styles/text.py +0 -87
  332. webscout/Litlogger/utils/__init__.py +0 -6
  333. webscout/Litlogger/utils/detectors.py +0 -153
  334. webscout/Litlogger/utils/formatters.py +0 -200
  335. webscout/Local/__init__.py +0 -12
  336. webscout/Local/__main__.py +0 -9
  337. webscout/Local/api.py +0 -576
  338. webscout/Local/cli.py +0 -516
  339. webscout/Local/config.py +0 -75
  340. webscout/Local/llm.py +0 -287
  341. webscout/Local/model_manager.py +0 -253
  342. webscout/Local/server.py +0 -721
  343. webscout/Local/utils.py +0 -93
  344. webscout/Provider/AI21.py +0 -177
  345. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  346. webscout/Provider/AISEARCH/ISou.py +0 -256
  347. webscout/Provider/AISEARCH/felo_search.py +0 -228
  348. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  349. webscout/Provider/AISEARCH/hika_search.py +0 -194
  350. webscout/Provider/AISEARCH/scira_search.py +0 -324
  351. webscout/Provider/Aitopia.py +0 -292
  352. webscout/Provider/AllenAI.py +0 -413
  353. webscout/Provider/Blackboxai.py +0 -229
  354. webscout/Provider/C4ai.py +0 -432
  355. webscout/Provider/ChatGPTClone.py +0 -226
  356. webscout/Provider/ChatGPTES.py +0 -237
  357. webscout/Provider/ChatGPTGratis.py +0 -194
  358. webscout/Provider/Chatify.py +0 -175
  359. webscout/Provider/Cloudflare.py +0 -273
  360. webscout/Provider/DeepSeek.py +0 -196
  361. webscout/Provider/ElectronHub.py +0 -709
  362. webscout/Provider/ExaChat.py +0 -342
  363. webscout/Provider/Free2GPT.py +0 -241
  364. webscout/Provider/GPTWeb.py +0 -193
  365. webscout/Provider/Glider.py +0 -211
  366. webscout/Provider/HF_space/__init__.py +0 -0
  367. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  368. webscout/Provider/HuggingFaceChat.py +0 -462
  369. webscout/Provider/Hunyuan.py +0 -272
  370. webscout/Provider/LambdaChat.py +0 -392
  371. webscout/Provider/Llama.py +0 -200
  372. webscout/Provider/Llama3.py +0 -204
  373. webscout/Provider/Marcus.py +0 -148
  374. webscout/Provider/OLLAMA.py +0 -396
  375. webscout/Provider/OPENAI/c4ai.py +0 -367
  376. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  377. webscout/Provider/OPENAI/exachat.py +0 -433
  378. webscout/Provider/OPENAI/freeaichat.py +0 -352
  379. webscout/Provider/OPENAI/opkfc.py +0 -488
  380. webscout/Provider/OPENAI/scirachat.py +0 -463
  381. webscout/Provider/OPENAI/standardinput.py +0 -425
  382. webscout/Provider/OPENAI/typegpt.py +0 -346
  383. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  384. webscout/Provider/OPENAI/venice.py +0 -413
  385. webscout/Provider/OPENAI/yep.py +0 -327
  386. webscout/Provider/OpenGPT.py +0 -199
  387. webscout/Provider/Perplexitylabs.py +0 -415
  388. webscout/Provider/Phind.py +0 -535
  389. webscout/Provider/PizzaGPT.py +0 -198
  390. webscout/Provider/Reka.py +0 -214
  391. webscout/Provider/StandardInput.py +0 -278
  392. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  393. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  394. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  395. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  396. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  397. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  398. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  399. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  400. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  401. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  402. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  403. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  404. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  405. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  406. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  407. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  408. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  409. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  410. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  411. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  412. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  413. webscout/Provider/TTI/artbit/__init__.py +0 -22
  414. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  415. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  416. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  417. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  418. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  419. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  420. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  421. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  422. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  423. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  424. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  425. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  426. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  427. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  428. webscout/Provider/TTI/talkai/__init__.py +0 -4
  429. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  430. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  431. webscout/Provider/TTS/gesserit.py +0 -127
  432. webscout/Provider/TeachAnything.py +0 -187
  433. webscout/Provider/Venice.py +0 -219
  434. webscout/Provider/VercelAI.py +0 -234
  435. webscout/Provider/WebSim.py +0 -228
  436. webscout/Provider/Writecream.py +0 -211
  437. webscout/Provider/WritingMate.py +0 -197
  438. webscout/Provider/aimathgpt.py +0 -189
  439. webscout/Provider/askmyai.py +0 -158
  440. webscout/Provider/asksteve.py +0 -203
  441. webscout/Provider/bagoodex.py +0 -145
  442. webscout/Provider/chatglm.py +0 -205
  443. webscout/Provider/copilot.py +0 -428
  444. webscout/Provider/freeaichat.py +0 -271
  445. webscout/Provider/gaurish.py +0 -244
  446. webscout/Provider/geminiprorealtime.py +0 -160
  447. webscout/Provider/granite.py +0 -187
  448. webscout/Provider/hermes.py +0 -219
  449. webscout/Provider/koala.py +0 -268
  450. webscout/Provider/labyrinth.py +0 -340
  451. webscout/Provider/lepton.py +0 -194
  452. webscout/Provider/llamatutor.py +0 -192
  453. webscout/Provider/multichat.py +0 -325
  454. webscout/Provider/promptrefine.py +0 -193
  455. webscout/Provider/scira_chat.py +0 -277
  456. webscout/Provider/scnet.py +0 -187
  457. webscout/Provider/talkai.py +0 -194
  458. webscout/Provider/tutorai.py +0 -252
  459. webscout/Provider/typegpt.py +0 -232
  460. webscout/Provider/uncovr.py +0 -312
  461. webscout/Provider/yep.py +0 -376
  462. webscout/litprinter/__init__.py +0 -59
  463. webscout/scout/core.py +0 -881
  464. webscout/tempid.py +0 -128
  465. webscout/webscout_search.py +0 -1346
  466. webscout/webscout_search_async.py +0 -877
  467. webscout/yep_search.py +0 -297
  468. webscout-8.2.2.dist-info/METADATA +0 -734
  469. webscout-8.2.2.dist-info/RECORD +0 -309
  470. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  471. webscout-8.2.2.dist-info/top_level.txt +0 -3
  472. webstoken/__init__.py +0 -30
  473. webstoken/classifier.py +0 -189
  474. webstoken/keywords.py +0 -216
  475. webstoken/language.py +0 -128
  476. webstoken/ner.py +0 -164
  477. webstoken/normalizer.py +0 -35
  478. webstoken/processor.py +0 -77
  479. webstoken/sentiment.py +0 -206
  480. webstoken/stemmer.py +0 -73
  481. webstoken/tagger.py +0 -60
  482. webstoken/tokenizer.py +0 -158
  483. {webscout-8.2.2.dist-info → webscout-2026.1.19.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/Groq.py CHANGED
@@ -1,670 +1,556 @@
1
- from typing import Any, AsyncGenerator, Dict, Optional, Callable, List, Union
2
-
3
- import httpx
4
- import requests
5
- import json
6
-
7
- from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
- from webscout.AIbase import Provider, AsyncProvider
11
- from webscout import exceptions
12
-
13
- class GROQ(Provider):
14
- """
15
- A class to interact with the GROQ AI API.
16
- """
17
-
18
- AVAILABLE_MODELS = [
19
- "distil-whisper-large-v3-en",
20
- "gemma2-9b-it",
21
- "llama-3.3-70b-versatile",
22
- "llama-3.1-8b-instant",
23
- "llama-guard-3-8b",
24
- "llama3-70b-8192",
25
- "llama3-8b-8192",
26
- "whisper-large-v3",
27
- "whisper-large-v3-turbo",
28
- "meta-llama/llama-4-scout-17b-16e-instruct",
29
- "meta-llama/llama-4-maverick-17b-128e-instruct",
30
- "playai-tts",
31
- "playai-tts-arabic",
32
- "qwen-qwq-32b",
33
- "mistral-saba-24b",
34
- "qwen-2.5-coder-32b",
35
- "qwen-2.5-32b",
36
- "deepseek-r1-distill-qwen-32b",
37
- "deepseek-r1-distill-llama-70b",
38
- "llama-3.3-70b-specdec",
39
- "llama-3.2-1b-preview",
40
- "llama-3.2-3b-preview",
41
- "llama-3.2-11b-vision-preview",
42
- "llama-3.2-90b-vision-preview",
43
- "mixtral-8x7b-32768"
44
- ]
45
-
46
- def __init__(
47
- self,
48
- api_key: str,
49
- is_conversation: bool = True,
50
- max_tokens: int = 600,
51
- temperature: float = 1,
52
- presence_penalty: int = 0,
53
- frequency_penalty: int = 0,
54
- top_p: float = 1,
55
- model: str = "mixtral-8x7b-32768",
56
- timeout: int = 30,
57
- intro: str = None,
58
- filepath: str = None,
59
- update_file: bool = True,
60
- proxies: dict = {},
61
- history_offset: int = 10250,
62
- act: str = None,
63
- system_prompt: Optional[str] = None,
64
- ):
65
- """Instantiates GROQ
66
-
67
- Args:
68
- api_key (key): GROQ's API key.
69
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
70
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
71
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
72
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
73
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
74
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
75
- model (str, optional): LLM model name. Defaults to "mixtral-8x7b-32768".
76
- timeout (int, optional): Http request timeout. Defaults to 30.
77
- intro (str, optional): Conversation introductory prompt. Defaults to None.
78
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
79
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
80
- proxies (dict, optional): Http request proxies. Defaults to {}.
81
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
82
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
83
- system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
84
- """
85
- if model not in self.AVAILABLE_MODELS:
86
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
87
-
88
- self.session = requests.Session()
89
- self.is_conversation = is_conversation
90
- self.max_tokens_to_sample = max_tokens
91
- self.api_key = api_key
92
- self.model = model
93
- self.temperature = temperature
94
- self.presence_penalty = presence_penalty
95
- self.frequency_penalty = frequency_penalty
96
- self.top_p = top_p
97
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
98
- self.stream_chunk_size = 64
99
- self.timeout = timeout
100
- self.last_response = {}
101
- self.system_prompt = system_prompt
102
- self.available_functions: Dict[str, Callable] = {} # Store available functions
103
- self.headers = {
104
- "Content-Type": "application/json",
105
- "Authorization": f"Bearer {self.api_key}",
106
- }
107
-
108
- self.__available_optimizers = (
109
- method
110
- for method in dir(Optimizers)
111
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
112
- )
113
- self.session.headers.update(self.headers)
114
- Conversation.intro = (
115
- AwesomePrompts().get_act(
116
- act, raise_not_found=True, default=None, case_insensitive=True
117
- )
118
- if act
119
- else intro or Conversation.intro
120
- )
121
- self.conversation = Conversation(
122
- is_conversation, self.max_tokens_to_sample, filepath, update_file
123
- )
124
- self.conversation.history_offset = history_offset
125
- self.session.proxies = proxies
126
-
127
- def add_function(self, function_name: str, function: Callable):
128
- """Add a function to the available functions dictionary.
129
-
130
- Args:
131
- function_name (str): The name of the function to be used in the prompt.
132
- function (Callable): The function itself.
133
- """
134
- self.available_functions[function_name] = function
135
-
136
- def ask(
137
- self,
138
- prompt: str,
139
- stream: bool = False,
140
- raw: bool = False,
141
- optimizer: str = None,
142
- conversationally: bool = False,
143
- tools: Optional[List[Dict[str, Any]]] = None, # Add tools parameter
144
- ) -> dict:
145
- """Chat with AI
146
-
147
- Args:
148
- prompt (str): Prompt to be send.
149
- stream (bool, optional): Flag for streaming response. Defaults to False.
150
- raw (bool, optional): Stream back raw response as received. Defaults to False.
151
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
152
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
153
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
154
-
155
- Returns:
156
- dict : {}
157
- """
158
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
159
- if optimizer:
160
- if optimizer in self.__available_optimizers:
161
- conversation_prompt = getattr(Optimizers, optimizer)(
162
- conversation_prompt if conversationally else prompt
163
- )
164
- else:
165
- raise Exception(
166
- f"Optimizer is not one of {self.__available_optimizers}"
167
- )
168
-
169
- messages = [{"content": conversation_prompt, "role": "user"}]
170
- if self.system_prompt:
171
- messages.insert(0, {"role": "system", "content": self.system_prompt})
172
-
173
- self.session.headers.update(self.headers)
174
- payload = {
175
- "frequency_penalty": self.frequency_penalty,
176
- "messages": messages,
177
- "model": self.model,
178
- "presence_penalty": self.presence_penalty,
179
- "stream": stream,
180
- "temperature": self.temperature,
181
- "top_p": self.top_p,
182
- "tools": tools # Include tools in the payload
183
- }
184
-
185
- def for_stream():
186
- response = self.session.post(
187
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
188
- )
189
- if not response.ok:
190
- raise exceptions.FailedToGenerateResponseError(
191
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
192
- )
193
-
194
- message_load = ""
195
- for value in response.iter_lines(
196
- decode_unicode=True,
197
- delimiter="" if raw else "data:",
198
- chunk_size=self.stream_chunk_size,
199
- ):
200
- try:
201
- resp = json.loads(value)
202
- incomplete_message = self.get_message(resp)
203
- if incomplete_message:
204
- message_load += incomplete_message
205
- resp["choices"][0]["delta"]["content"] = message_load
206
- self.last_response.update(resp)
207
- yield value if raw else resp
208
- elif raw:
209
- yield value
210
- except json.decoder.JSONDecodeError:
211
- pass
212
-
213
- # Handle tool calls if any
214
- if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
215
- tool_calls = self.last_response['choices'][0]['message']['tool_calls']
216
- for tool_call in tool_calls:
217
- function_name = tool_call.get('function', {}).get('name')
218
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
219
- if function_name in self.available_functions:
220
- tool_response = self.available_functions[function_name](**arguments)
221
- messages.append({
222
- "tool_call_id": tool_call['id'],
223
- "role": "tool",
224
- "name": function_name,
225
- "content": tool_response
226
- })
227
- payload['messages'] = messages
228
- # Make a second call to get the final response
229
- second_response = self.session.post(
230
- self.chat_endpoint, json=payload, timeout=self.timeout
231
- )
232
- if second_response.ok:
233
- self.last_response = second_response.json()
234
- else:
235
- raise exceptions.FailedToGenerateResponseError(
236
- f"Failed to execute tool - {second_response.text}"
237
- )
238
-
239
- self.conversation.update_chat_history(
240
- prompt, self.get_message(self.last_response)
241
- )
242
-
243
- def for_non_stream():
244
- response = self.session.post(
245
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
246
- )
247
- if (
248
- not response.ok
249
- or not response.headers.get("Content-Type", "") == "application/json"
250
- ):
251
- raise exceptions.FailedToGenerateResponseError(
252
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
253
- )
254
- resp = response.json()
255
-
256
- # Handle tool calls if any
257
- if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
258
- tool_calls = resp['choices'][0]['message']['tool_calls']
259
- for tool_call in tool_calls:
260
- function_name = tool_call.get('function', {}).get('name')
261
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
262
- if function_name in self.available_functions:
263
- tool_response = self.available_functions[function_name](**arguments)
264
- messages.append({
265
- "tool_call_id": tool_call['id'],
266
- "role": "tool",
267
- "name": function_name,
268
- "content": tool_response
269
- })
270
- payload['messages'] = messages
271
- # Make a second call to get the final response
272
- second_response = self.session.post(
273
- self.chat_endpoint, json=payload, timeout=self.timeout
274
- )
275
- if second_response.ok:
276
- resp = second_response.json()
277
- else:
278
- raise exceptions.FailedToGenerateResponseError(
279
- f"Failed to execute tool - {second_response.text}"
280
- )
281
-
282
- self.last_response.update(resp)
283
- self.conversation.update_chat_history(
284
- prompt, self.get_message(self.last_response)
285
- )
286
- return resp
287
-
288
- return for_stream() if stream else for_non_stream()
289
-
290
-
291
- def chat(
292
- self,
293
- prompt: str,
294
- stream: bool = False,
295
- optimizer: str = None,
296
- conversationally: bool = False,
297
- tools: Optional[List[Dict[str, Any]]] = None,
298
- ) -> str:
299
- """Generate response `str`
300
- Args:
301
- prompt (str): Prompt to be send.
302
- stream (bool, optional): Flag for streaming response. Defaults to False.
303
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
304
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
305
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
306
- Returns:
307
- str: Response generated
308
- """
309
-
310
- def for_stream():
311
- for response in self.ask(
312
- prompt, True, optimizer=optimizer, conversationally=conversationally, tools=tools
313
- ):
314
- yield self.get_message(response)
315
-
316
- def for_non_stream():
317
- return self.get_message(
318
- self.ask(
319
- prompt,
320
- False,
321
- optimizer=optimizer,
322
- conversationally=conversationally,
323
- tools=tools
324
- )
325
- )
326
-
327
- return for_stream() if stream else for_non_stream()
328
-
329
- def get_message(self, response: dict) -> str:
330
- """Retrieves message only from response
331
-
332
- Args:
333
- response (dict): Response generated by `self.ask`
334
-
335
- Returns:
336
- str: Message extracted
337
- """
338
- assert isinstance(response, dict), "Response should be of dict data-type only"
339
- try:
340
- if response["choices"][0].get("delta"):
341
- return response["choices"][0]["delta"]["content"]
342
- return response["choices"][0]["message"]["content"]
343
- except KeyError:
344
- return ""
345
-
346
-
347
- class AsyncGROQ(AsyncProvider):
348
- """
349
- An asynchronous class to interact with the GROQ AI API.
350
- """
351
-
352
- AVAILABLE_MODELS = [
353
- "distil-whisper-large-v3-en",
354
- "gemma2-9b-it",
355
- "llama-3.3-70b-versatile",
356
- "llama-3.1-8b-instant",
357
- "llama-guard-3-8b",
358
- "llama3-70b-8192",
359
- "llama3-8b-8192",
360
- "whisper-large-v3",
361
- "whisper-large-v3-turbo",
362
- "meta-llama/llama-4-scout-17b-16e-instruct",
363
- "meta-llama/llama-4-maverick-17b-128e-instruct",
364
- "playai-tts",
365
- "playai-tts-arabic",
366
- "qwen-qwq-32b",
367
- "mistral-saba-24b",
368
- "qwen-2.5-coder-32b",
369
- "qwen-2.5-32b",
370
- "deepseek-r1-distill-qwen-32b",
371
- "deepseek-r1-distill-llama-70b",
372
- "llama-3.3-70b-specdec",
373
- "llama-3.2-1b-preview",
374
- "llama-3.2-3b-preview",
375
- "llama-3.2-11b-vision-preview",
376
- "llama-3.2-90b-vision-preview",
377
- "mixtral-8x7b-32768"
378
- ]
379
-
380
- def __init__(
381
- self,
382
- api_key: str,
383
- is_conversation: bool = True,
384
- max_tokens: int = 600,
385
- temperature: float = 1,
386
- presence_penalty: int = 0,
387
- frequency_penalty: int = 0,
388
- top_p: float = 1,
389
- model: str = "mixtral-8x7b-32768",
390
- timeout: int = 30,
391
- intro: str = None,
392
- filepath: str = None,
393
- update_file: bool = True,
394
- proxies: dict = {},
395
- history_offset: int = 10250,
396
- act: str = None,
397
- system_prompt: Optional[str] = None,
398
- ):
399
- """Instantiates AsyncGROQ
400
-
401
- Args:
402
- api_key (key): GROQ's API key.
403
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
404
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
405
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
406
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
407
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
408
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
409
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
410
- timeout (int, optional): Http request timeout. Defaults to 30.
411
- intro (str, optional): Conversation introductory prompt. Defaults to None.
412
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
413
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
414
- proxies (dict, optional): Http request proxies. Defaults to {}.
415
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
416
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
417
- system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
418
- """
419
- if model not in self.AVAILABLE_MODELS:
420
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
421
-
422
- self.is_conversation = is_conversation
423
- self.max_tokens_to_sample = max_tokens
424
- self.api_key = api_key
425
- self.model = model
426
- self.temperature = temperature
427
- self.presence_penalty = presence_penalty
428
- self.frequency_penalty = frequency_penalty
429
- self.top_p = top_p
430
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
431
- self.stream_chunk_size = 64
432
- self.timeout = timeout
433
- self.last_response = {}
434
- self.system_prompt = system_prompt
435
- self.available_functions: Dict[str, Callable] = {} # Store available functions
436
- self.headers = {
437
- "Content-Type": "application/json",
438
- "Authorization": f"Bearer {self.api_key}",
439
- }
440
-
441
- self.__available_optimizers = (
442
- method
443
- for method in dir(Optimizers)
444
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
445
- )
446
- Conversation.intro = (
447
- AwesomePrompts().get_act(
448
- act, raise_not_found=True, default=None, case_insensitive=True
449
- )
450
- if act
451
- else intro or Conversation.intro
452
- )
453
- self.conversation = Conversation(
454
- is_conversation, self.max_tokens_to_sample, filepath, update_file
455
- )
456
- self.conversation.history_offset = history_offset
457
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
458
-
459
- def add_function(self, function_name: str, function: Callable):
460
- """Add a function to the available functions dictionary.
461
-
462
- Args:
463
- function_name (str): The name of the function to be used in the prompt.
464
- function (Callable): The function itself.
465
- """
466
- self.available_functions[function_name] = function
467
-
468
- async def ask(
469
- self,
470
- prompt: str,
471
- stream: bool = False,
472
- raw: bool = False,
473
- optimizer: str = None,
474
- conversationally: bool = False,
475
- tools: Optional[List[Dict[str, Any]]] = None,
476
- ) -> Union[dict, AsyncGenerator]:
477
- """Chat with AI asynchronously.
478
-
479
- Args:
480
- prompt (str): Prompt to be send.
481
- stream (bool, optional): Flag for streaming response. Defaults to False.
482
- raw (bool, optional): Stream back raw response as received. Defaults to False.
483
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
484
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
485
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
486
- Returns:
487
- dict|AsyncGenerator : ai content
488
- """
489
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
490
- if optimizer:
491
- if optimizer in self.__available_optimizers:
492
- conversation_prompt = getattr(Optimizers, optimizer)(
493
- conversation_prompt if conversationally else prompt
494
- )
495
- else:
496
- raise Exception(
497
- f"Optimizer is not one of {self.__available_optimizers}"
498
- )
499
-
500
- messages = [{"content": conversation_prompt, "role": "user"}]
501
- if self.system_prompt:
502
- messages.insert(0, {"role": "system", "content": self.system_prompt})
503
-
504
- payload = {
505
- "frequency_penalty": self.frequency_penalty,
506
- "messages": messages,
507
- "model": self.model,
508
- "presence_penalty": self.presence_penalty,
509
- "stream": stream,
510
- "temperature": self.temperature,
511
- "top_p": self.top_p,
512
- "tools": tools
513
- }
514
-
515
- async def for_stream():
516
- async with self.session.stream(
517
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
518
- ) as response:
519
- if not response.is_success:
520
- raise exceptions.FailedToGenerateResponseError(
521
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
522
- )
523
-
524
- message_load = ""
525
- intro_value = "data:"
526
- async for value in response.aiter_lines():
527
- try:
528
- if value.startswith(intro_value):
529
- value = value[len(intro_value) :]
530
- resp = json.loads(value)
531
- incomplete_message = await self.get_message(resp)
532
- if incomplete_message:
533
- message_load += incomplete_message
534
- resp["choices"][0]["delta"]["content"] = message_load
535
- self.last_response.update(resp)
536
- yield value if raw else resp
537
- elif raw:
538
- yield value
539
- except json.decoder.JSONDecodeError:
540
- pass
541
-
542
- # Handle tool calls if any (in streaming mode)
543
- if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
544
- tool_calls = self.last_response['choices'][0]['message']['tool_calls']
545
- for tool_call in tool_calls:
546
- function_name = tool_call.get('function', {}).get('name')
547
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
548
- if function_name in self.available_functions:
549
- tool_response = self.available_functions[function_name](**arguments)
550
- messages.append({
551
- "tool_call_id": tool_call['id'],
552
- "role": "tool",
553
- "name": function_name,
554
- "content": tool_response
555
- })
556
- payload['messages'] = messages
557
- # Make a second call to get the final response
558
- second_response = await self.session.post(
559
- self.chat_endpoint, json=payload, timeout=self.timeout
560
- )
561
- if second_response.is_success:
562
- self.last_response = second_response.json()
563
- else:
564
- raise exceptions.FailedToGenerateResponseError(
565
- f"Failed to execute tool - {second_response.text}"
566
- )
567
-
568
- self.conversation.update_chat_history(
569
- prompt, await self.get_message(self.last_response)
570
- )
571
-
572
- async def for_non_stream():
573
- response = await self.session.post(
574
- self.chat_endpoint, json=payload, timeout=self.timeout
575
- )
576
- if not response.is_success:
577
- raise exceptions.FailedToGenerateResponseError(
578
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
579
- )
580
- resp = response.json()
581
-
582
- # Handle tool calls if any (in non-streaming mode)
583
- if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
584
- tool_calls = resp['choices'][0]['message']['tool_calls']
585
- for tool_call in tool_calls:
586
- function_name = tool_call.get('function', {}).get('name')
587
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
588
- if function_name in self.available_functions:
589
- tool_response = self.available_functions[function_name](**arguments)
590
- messages.append({
591
- "tool_call_id": tool_call['id'],
592
- "role": "tool",
593
- "name": function_name,
594
- "content": tool_response
595
- })
596
- payload['messages'] = messages
597
- # Make a second call to get the final response
598
- second_response = await self.session.post(
599
- self.chat_endpoint, json=payload, timeout=self.timeout
600
- )
601
- if second_response.is_success:
602
- resp = second_response.json()
603
- else:
604
- raise exceptions.FailedToGenerateResponseError(
605
- f"Failed to execute tool - {second_response.text}"
606
- )
607
-
608
- self.last_response.update(resp)
609
- self.conversation.update_chat_history(
610
- prompt, await self.get_message(self.last_response)
611
- )
612
- return resp
613
-
614
- return for_stream() if stream else await for_non_stream()
615
-
616
- async def chat(
617
- self,
618
- prompt: str,
619
- stream: bool = False,
620
- optimizer: str = None,
621
- conversationally: bool = False,
622
- tools: Optional[List[Dict[str, Any]]] = None,
623
- ) -> Union[str, AsyncGenerator]:
624
- """Generate response `str` asynchronously.
625
- Args:
626
- prompt (str): Prompt to be send.
627
- stream (bool, optional): Flag for streaming response. Defaults to False.
628
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
629
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
630
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
631
- Returns:
632
- str|AsyncGenerator: Response generated
633
- """
634
-
635
- async def for_stream():
636
- async_ask = await self.ask(
637
- prompt, True, optimizer=optimizer, conversationally=conversationally, tools=tools
638
- )
639
- async for response in async_ask:
640
- yield await self.get_message(response)
641
-
642
- async def for_non_stream():
643
- return await self.get_message(
644
- await self.ask(
645
- prompt,
646
- False,
647
- optimizer=optimizer,
648
- conversationally=conversationally,
649
- tools=tools
650
- )
651
- )
652
-
653
- return for_stream() if stream else await for_non_stream()
654
-
655
- async def get_message(self, response: dict) -> str:
656
- """Retrieves message only from response
657
-
658
- Args:
659
- response (dict): Response generated by `self.ask`
660
-
661
- Returns:
662
- str: Message extracted
663
- """
664
- assert isinstance(response, dict), "Response should be of dict data-type only"
665
- try:
666
- if response["choices"][0].get("delta"):
667
- return response["choices"][0]["delta"]["content"]
668
- return response["choices"][0]["message"]["content"]
669
- except KeyError:
670
- return ""
1
+ import json
2
+ from typing import Any, Callable, Dict, Generator, List, Optional, Union, cast
3
+
4
+ from curl_cffi import CurlError
5
+
6
+ # Import curl_cffi for improved request handling
7
+ from curl_cffi.requests import Session
8
+
9
+ from webscout import exceptions
10
+ from webscout.AIbase import Provider, Response
11
+ from webscout.AIutel import ( # Import sanitize_stream
12
+ AwesomePrompts,
13
+ Conversation,
14
+ Optimizers,
15
+ sanitize_stream,
16
+ )
17
+
18
+
19
+ class GROQ(Provider):
20
+ """
21
+ A class to interact with the GROQ AI API.
22
+ """
23
+
24
+ required_auth = True
25
+ # Default models list (will be updated dynamically)
26
+ AVAILABLE_MODELS = [
27
+ "distil-whisper-large-v3-en",
28
+ "gemma2-9b-it",
29
+ "llama-3.3-70b-versatile",
30
+ "llama-3.1-8b-instant",
31
+ "llama-guard-3-8b",
32
+ "llama3-70b-8192",
33
+ "llama3-8b-8192",
34
+ "whisper-large-v3",
35
+ "whisper-large-v3-turbo",
36
+ "meta-llama/llama-4-scout-17b-16e-instruct",
37
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
38
+ "playai-tts",
39
+ "playai-tts-arabic",
40
+ "qwen-qwq-32b",
41
+ "mistral-saba-24b",
42
+ "qwen-2.5-coder-32b",
43
+ "qwen-2.5-32b",
44
+ "deepseek-r1-distill-qwen-32b",
45
+ "deepseek-r1-distill-llama-70b",
46
+ "llama-3.3-70b-specdec",
47
+ "llama-3.2-1b-preview",
48
+ "llama-3.2-3b-preview",
49
+ "llama-3.2-11b-vision-preview",
50
+ "llama-3.2-90b-vision-preview",
51
+ "mixtral-8x7b-32768",
52
+ ]
53
+
54
+ @classmethod
55
+ def get_models(cls, api_key: Optional[str] = None):
56
+ """Fetch available models from Groq API.
57
+
58
+ Args:
59
+ api_key (str, optional): Groq API key. If not provided, returns default models.
60
+
61
+ Returns:
62
+ list: List of available model IDs
63
+ """
64
+ if not api_key:
65
+ return cls.AVAILABLE_MODELS
66
+
67
+ try:
68
+ # Use a temporary curl_cffi session for this class method
69
+ temp_session = Session()
70
+ headers = {
71
+ "Content-Type": "application/json",
72
+ "Authorization": f"Bearer {api_key}",
73
+ }
74
+
75
+ response = temp_session.get(
76
+ "https://api.groq.com/openai/v1/models",
77
+ headers=headers,
78
+ impersonate="chrome110", # Use impersonate for fetching
79
+ )
80
+
81
+ if response.status_code != 200:
82
+ return cls.AVAILABLE_MODELS
83
+
84
+ data = response.json()
85
+ if "data" in data and isinstance(data["data"], list):
86
+ return [model["id"] for model in data["data"]]
87
+ return cls.AVAILABLE_MODELS
88
+
89
+ except (CurlError, Exception):
90
+ # Fallback to default models list if fetching fails
91
+ return cls.AVAILABLE_MODELS
92
+
93
+ def __init__(
94
+ self,
95
+ api_key: str,
96
+ is_conversation: bool = True,
97
+ max_tokens: int = 600,
98
+ temperature: float = 1,
99
+ presence_penalty: int = 0,
100
+ frequency_penalty: int = 0,
101
+ top_p: float = 1,
102
+ model: str = "mixtral-8x7b-32768",
103
+ timeout: int = 30,
104
+ intro: Optional[str] = None,
105
+ filepath: Optional[str] = None,
106
+ update_file: bool = True,
107
+ proxies: dict = {},
108
+ history_offset: int = 10250,
109
+ act: Optional[str] = None,
110
+ system_prompt: Optional[str] = None,
111
+ ):
112
+ """Instantiates GROQ
113
+
114
+ Args:
115
+ api_key (key): GROQ's API key.
116
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
117
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
118
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
119
+ presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
120
+ frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
121
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
122
+ model (str, optional): LLM model name. Defaults to "mixtral-8x7b-32768".
123
+ timeout (int, optional): Http request timeout. Defaults to 30.
124
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
125
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
126
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
127
+ proxies (dict, optional): Http request proxies. Defaults to {}.
128
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
129
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
130
+ system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
131
+ """
132
+ # Update available models from API
133
+ self.update_available_models(api_key)
134
+
135
+ # Validate model after updating available models
136
+ if model not in self.AVAILABLE_MODELS:
137
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
138
+
139
+ # Initialize curl_cffi Session
140
+ self.session = Session()
141
+ self.is_conversation = is_conversation
142
+ self.max_tokens_to_sample = max_tokens
143
+ self.api_key = api_key
144
+ self.model = model
145
+ self.temperature = temperature
146
+ self.presence_penalty = presence_penalty
147
+ self.frequency_penalty = frequency_penalty
148
+ self.top_p = top_p
149
+ self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
150
+ self.stream_chunk_size = 64
151
+ self.timeout = timeout
152
+ self.last_response = {}
153
+ self.system_prompt = system_prompt
154
+ self.available_functions: Dict[str, Callable] = {} # Store available functions
155
+ self.headers = {
156
+ "Content-Type": "application/json",
157
+ "Authorization": f"Bearer {self.api_key}",
158
+ }
159
+
160
+ self.__available_optimizers = (
161
+ method
162
+ for method in dir(Optimizers)
163
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
164
+ )
165
+
166
+ # Update curl_cffi session headers
167
+ self.session.headers.update(self.headers)
168
+
169
+ self.conversation = Conversation(
170
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
171
+ )
172
+ self.conversation.history_offset = history_offset
173
+
174
+ if act:
175
+ self.conversation.intro = (
176
+ AwesomePrompts().get_act(
177
+ cast(Union[str, int], act),
178
+ default=self.conversation.intro,
179
+ case_insensitive=True,
180
+ )
181
+ or self.conversation.intro
182
+ )
183
+ elif intro:
184
+ self.conversation.intro = intro
185
+
186
+ # Set proxies for curl_cffi session
187
+ self.session = Session()
188
+ self.session.headers.update(self.headers)
189
+ if proxies:
190
+ self.session.proxies.update(proxies)
191
+
192
+ @staticmethod
193
+ def _groq_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[Dict]:
194
+ """Extracts the 'delta' object from Groq stream JSON chunks."""
195
+ if isinstance(chunk, dict):
196
+ # Return the delta object itself, or None if not found
197
+ return chunk.get("choices", [{}])[0].get("delta")
198
+ return None
199
+
200
+ @classmethod
201
+ def update_available_models(cls, api_key=None):
202
+ """Update the available models list from Groq API"""
203
+ try:
204
+ models = cls.get_models(api_key)
205
+ if models and len(models) > 0:
206
+ cls.AVAILABLE_MODELS = models
207
+ except Exception:
208
+ # Fallback to default models list if fetching fails
209
+ pass
210
+
211
+ def add_function(self, function_name: str, function: Callable):
212
+ """Add a function to the available functions dictionary.
213
+
214
+ Args:
215
+ function_name (str): The name of the function to be used in the prompt.
216
+ function (Callable): The function itself.
217
+ """
218
+ self.available_functions[function_name] = function
219
+
220
+ def ask(
221
+ self,
222
+ prompt: str,
223
+ stream: bool = False,
224
+ raw: bool = False,
225
+ optimizer: Optional[str] = None,
226
+ conversationally: bool = False,
227
+ **kwargs: Any,
228
+ ) -> Response:
229
+ """Chat with AI
230
+
231
+ Args:
232
+ prompt (str): Prompt to be send.
233
+ stream (bool, optional): Flag for streaming response. Defaults to False.
234
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
235
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
236
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
237
+ **kwargs: Additional parameters like tools.
238
+
239
+ Returns:
240
+ dict : {}
241
+ """
242
+ tools = kwargs.get("tools")
243
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
244
+ if optimizer:
245
+ if optimizer in self.__available_optimizers:
246
+ conversation_prompt = getattr(Optimizers, optimizer)(
247
+ conversation_prompt if conversationally else prompt
248
+ )
249
+ else:
250
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
251
+
252
+ messages = [{"content": conversation_prompt, "role": "user"}]
253
+ if self.system_prompt:
254
+ messages.insert(0, {"role": "system", "content": self.system_prompt})
255
+
256
+ self.session.headers.update(self.headers)
257
+ payload = {
258
+ "frequency_penalty": self.frequency_penalty,
259
+ "messages": messages,
260
+ "model": self.model,
261
+ "presence_penalty": self.presence_penalty,
262
+ "stream": stream,
263
+ "temperature": self.temperature,
264
+ "top_p": self.top_p,
265
+ "tools": tools, # Include tools in the payload
266
+ }
267
+
268
+ def for_stream():
269
+ resp = {}
270
+ try:
271
+ response = self.session.post(
272
+ self.chat_endpoint,
273
+ json=payload,
274
+ stream=True,
275
+ timeout=self.timeout,
276
+ impersonate="chrome110", # Use impersonate for better compatibility
277
+ )
278
+ if not response.status_code == 200:
279
+ raise exceptions.FailedToGenerateResponseError(
280
+ # Removed response.reason_phrase
281
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
282
+ )
283
+
284
+ streaming_text = ""
285
+ # Use sanitize_stream
286
+ processed_stream = sanitize_stream(
287
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
288
+ intro_value="data:",
289
+ to_json=True, # Stream sends JSON
290
+ content_extractor=self._groq_extractor, # Use the delta extractor
291
+ yield_raw_on_error=False, # Skip non-JSON lines or lines where extractor fails
292
+ raw=raw,
293
+ )
294
+
295
+ for delta in processed_stream:
296
+ # delta is the extracted 'delta' object or None
297
+ if raw:
298
+ yield delta
299
+ else:
300
+ if delta and isinstance(delta, dict):
301
+ content = delta.get("content")
302
+ if content:
303
+ streaming_text += content
304
+ resp = {"text": content} # Yield only the new chunk text
305
+ self.last_response = {
306
+ "choices": [{"delta": {"content": streaming_text}}]
307
+ } # Update last_response structure
308
+ yield resp if not raw else content # Yield dict or raw string chunk
309
+ # Note: Tool calls in streaming delta are less common in OpenAI format, usually in final message
310
+
311
+ except CurlError as e:
312
+ raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
313
+ except Exception as e:
314
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
315
+
316
+ # Handle tool calls if any
317
+ first_choice = self.last_response.get("choices", [{}])[0]
318
+ message = first_choice.get("message", {})
319
+ if "tool_calls" in message:
320
+ tool_calls = message.get("tool_calls", [])
321
+ for tool_call in tool_calls:
322
+ if not isinstance(tool_call, dict):
323
+ continue
324
+ function_name = tool_call.get("function", {}).get("name")
325
+ arguments = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
326
+ if function_name in self.available_functions:
327
+ tool_response = self.available_functions[function_name](**arguments)
328
+ messages.append(
329
+ {
330
+ "tool_call_id": tool_call.get("id"),
331
+ "role": "tool",
332
+ "name": function_name,
333
+ "content": tool_response,
334
+ }
335
+ )
336
+ payload["messages"] = messages
337
+ # Make a second call to get the final response
338
+ try:
339
+ second_response = self.session.post(
340
+ self.chat_endpoint,
341
+ json=payload,
342
+ timeout=self.timeout,
343
+ impersonate="chrome110", # Use impersonate for better compatibility
344
+ )
345
+ if second_response.status_code == 200:
346
+ self.last_response = second_response.json()
347
+ else:
348
+ raise exceptions.FailedToGenerateResponseError(
349
+ f"Failed to execute tool - {second_response.text}"
350
+ )
351
+ except CurlError as e:
352
+ raise exceptions.FailedToGenerateResponseError(
353
+ f"CurlError during tool execution: {str(e)}"
354
+ )
355
+ except Exception as e:
356
+ raise exceptions.FailedToGenerateResponseError(
357
+ f"Error during tool execution: {str(e)}"
358
+ )
359
+
360
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
361
+
362
+ def for_non_stream():
363
+ resp = {}
364
+ response = None
365
+ try:
366
+ response = self.session.post(
367
+ self.chat_endpoint,
368
+ json=payload,
369
+ stream=False,
370
+ timeout=self.timeout,
371
+ impersonate="chrome110", # Use impersonate for better compatibility
372
+ )
373
+ if not response.status_code == 200:
374
+ raise exceptions.FailedToGenerateResponseError(
375
+ # Removed response.reason_phrase
376
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
377
+ )
378
+
379
+ response_text = response.text # Get raw text
380
+
381
+ # Use sanitize_stream to parse the non-streaming JSON response
382
+ processed_stream = sanitize_stream(
383
+ data=response_text,
384
+ to_json=True, # Parse the whole text as JSON
385
+ intro_value=None,
386
+ # Extractor for non-stream structure (returns the whole parsed dict)
387
+ content_extractor=lambda chunk: chunk if isinstance(chunk, dict) else None,
388
+ yield_raw_on_error=False,
389
+ raw=raw,
390
+ )
391
+
392
+ # Extract the single result (the parsed JSON dictionary)
393
+ resp = next(processed_stream, None)
394
+ if raw:
395
+ return resp
396
+ if resp is None:
397
+ raise exceptions.FailedToGenerateResponseError(
398
+ "Failed to parse non-stream JSON response"
399
+ )
400
+
401
+ except CurlError as e:
402
+ raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
403
+ except Exception as e:
404
+ # Catch the original AttributeError here if it happens before the raise
405
+ if (
406
+ isinstance(e, AttributeError)
407
+ and "reason_phrase" in str(e)
408
+ and response is not None
409
+ ):
410
+ raise exceptions.FailedToGenerateResponseError(
411
+ f"Failed to generate response - ({response.status_code}) - {response.text}"
412
+ )
413
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
414
+
415
+ # Handle tool calls if any
416
+ first_choice = resp.get("choices", [{}])[0]
417
+ message = first_choice.get("message", {})
418
+ if "tool_calls" in message:
419
+ tool_calls = message.get("tool_calls", [])
420
+ for tool_call in tool_calls:
421
+ if not isinstance(tool_call, dict):
422
+ continue
423
+ function_name = tool_call.get("function", {}).get("name")
424
+ arguments = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
425
+ if function_name in self.available_functions:
426
+ tool_response = self.available_functions[function_name](**arguments)
427
+ messages.append(
428
+ {
429
+ "tool_call_id": tool_call.get("id"),
430
+ "role": "tool",
431
+ "name": function_name,
432
+ "content": tool_response,
433
+ }
434
+ )
435
+ payload["messages"] = messages
436
+ # Make a second call to get the final response
437
+ try:
438
+ second_response = self.session.post(
439
+ self.chat_endpoint,
440
+ json=payload,
441
+ timeout=self.timeout,
442
+ impersonate="chrome110", # Use impersonate for better compatibility
443
+ )
444
+ if second_response.status_code == 200:
445
+ resp = second_response.json()
446
+ else:
447
+ raise exceptions.FailedToGenerateResponseError(
448
+ f"Failed to execute tool - {second_response.text}"
449
+ )
450
+ except CurlError as e:
451
+ raise exceptions.FailedToGenerateResponseError(
452
+ f"CurlError during tool execution: {str(e)}"
453
+ )
454
+ except Exception as e:
455
+ raise exceptions.FailedToGenerateResponseError(
456
+ f"Error during tool execution: {str(e)}"
457
+ )
458
+
459
+ self.last_response.update(resp)
460
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
461
+ return resp
462
+
463
+ return for_stream() if stream else for_non_stream()
464
+
465
+ def chat(
466
+ self,
467
+ prompt: str,
468
+ stream: bool = False,
469
+ optimizer: Optional[str] = None,
470
+ conversationally: bool = False,
471
+ **kwargs: Any,
472
+ ) -> Union[str, Generator[str, None, None]]:
473
+ """Generate response `str`
474
+ Args:
475
+ prompt (str): Prompt to be send.
476
+ stream (bool, optional): Flag for streaming response. Defaults to False.
477
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
478
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
479
+ **kwargs: Additional parameters like tools.
480
+ Returns:
481
+ str: Response generated
482
+ """
483
+ raw = kwargs.get("raw", False)
484
+ if stream:
485
+
486
+ def for_stream():
487
+ gen = self.ask(
488
+ prompt,
489
+ True,
490
+ raw=raw,
491
+ optimizer=optimizer,
492
+ conversationally=conversationally,
493
+ **kwargs,
494
+ )
495
+ if hasattr(gen, "__iter__"):
496
+ for response in gen:
497
+ if raw:
498
+ yield cast(str, response)
499
+ else:
500
+ yield self.get_message(response)
501
+
502
+ return for_stream()
503
+ else:
504
+ result = self.ask(
505
+ prompt,
506
+ False,
507
+ raw=raw,
508
+ optimizer=optimizer,
509
+ conversationally=conversationally,
510
+ **kwargs,
511
+ )
512
+ if raw:
513
+ return cast(str, result)
514
+ return self.get_message(result)
515
+
516
+ def get_message(self, response: Response) -> str:
517
+ """Retrieves message only from response
518
+
519
+ Args:
520
+ response (Response): Response generated by `self.ask`
521
+
522
+ Returns:
523
+ str: Message extracted
524
+ """
525
+ if not isinstance(response, dict):
526
+ return str(response)
527
+
528
+ resp_dict = cast(Dict[str, Any], response)
529
+ try:
530
+ # Check delta first for streaming
531
+ if (
532
+ resp_dict.get("choices")
533
+ and resp_dict["choices"][0].get("delta")
534
+ and resp_dict["choices"][0]["delta"].get("content")
535
+ ):
536
+ return resp_dict["choices"][0]["delta"]["content"]
537
+ # Check message content for non-streaming or final message
538
+ if (
539
+ resp_dict.get("choices")
540
+ and resp_dict["choices"][0].get("message")
541
+ and resp_dict["choices"][0]["message"].get("content")
542
+ ):
543
+ return resp_dict["choices"][0]["message"]["content"]
544
+ except (KeyError, IndexError, TypeError):
545
+ # Handle cases where the structure might be different or content is null/missing
546
+ pass
547
+ return "" # Return empty string if no content found
548
+
549
+
550
+ if __name__ == "__main__":
551
+ # Example usage
552
+ api_key = "gsk_*******************************"
553
+ groq = GROQ(api_key=api_key, model="compound-beta")
554
+ prompt = "What is the capital of France?"
555
+ response = groq.chat(prompt)
556
+ print(response)