webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (306) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIbase.py +144 -7
  3. webscout/AIutel.py +249 -131
  4. webscout/Bard.py +579 -206
  5. webscout/DWEBS.py +78 -35
  6. webscout/__init__.py +0 -1
  7. webscout/cli.py +256 -0
  8. webscout/conversation.py +307 -436
  9. webscout/exceptions.py +23 -0
  10. webscout/prompt_manager.py +56 -42
  11. webscout/version.py +1 -1
  12. webscout/webscout_search.py +65 -47
  13. webscout/webscout_search_async.py +81 -126
  14. webscout/yep_search.py +93 -43
  15. {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
  16. webscout-8.2.7.dist-info/RECORD +26 -0
  17. {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
  18. webscout-8.2.7.dist-info/entry_points.txt +3 -0
  19. webscout-8.2.7.dist-info/top_level.txt +1 -0
  20. inferno/__init__.py +0 -6
  21. inferno/__main__.py +0 -9
  22. inferno/cli.py +0 -6
  23. webscout/Extra/GitToolkit/__init__.py +0 -10
  24. webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
  25. webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
  26. webscout/Extra/GitToolkit/gitapi/user.py +0 -96
  27. webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
  28. webscout/Extra/YTToolkit/YTdownloader.py +0 -957
  29. webscout/Extra/YTToolkit/__init__.py +0 -3
  30. webscout/Extra/YTToolkit/transcriber.py +0 -476
  31. webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
  32. webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
  33. webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
  34. webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
  35. webscout/Extra/YTToolkit/ytapi/https.py +0 -88
  36. webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
  37. webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
  38. webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
  39. webscout/Extra/YTToolkit/ytapi/query.py +0 -40
  40. webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
  41. webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
  42. webscout/Extra/YTToolkit/ytapi/video.py +0 -232
  43. webscout/Extra/__init__.py +0 -7
  44. webscout/Extra/autocoder/__init__.py +0 -9
  45. webscout/Extra/autocoder/autocoder.py +0 -849
  46. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  47. webscout/Extra/gguf.py +0 -682
  48. webscout/Extra/tempmail/__init__.py +0 -28
  49. webscout/Extra/tempmail/async_utils.py +0 -141
  50. webscout/Extra/tempmail/base.py +0 -161
  51. webscout/Extra/tempmail/cli.py +0 -187
  52. webscout/Extra/tempmail/emailnator.py +0 -84
  53. webscout/Extra/tempmail/mail_tm.py +0 -361
  54. webscout/Extra/tempmail/temp_mail_io.py +0 -292
  55. webscout/Extra/weather.py +0 -194
  56. webscout/Extra/weather_ascii.py +0 -76
  57. webscout/LLM.py +0 -442
  58. webscout/Litlogger/__init__.py +0 -67
  59. webscout/Litlogger/core/__init__.py +0 -6
  60. webscout/Litlogger/core/level.py +0 -23
  61. webscout/Litlogger/core/logger.py +0 -165
  62. webscout/Litlogger/handlers/__init__.py +0 -12
  63. webscout/Litlogger/handlers/console.py +0 -33
  64. webscout/Litlogger/handlers/file.py +0 -143
  65. webscout/Litlogger/handlers/network.py +0 -173
  66. webscout/Litlogger/styles/__init__.py +0 -7
  67. webscout/Litlogger/styles/colors.py +0 -249
  68. webscout/Litlogger/styles/formats.py +0 -458
  69. webscout/Litlogger/styles/text.py +0 -87
  70. webscout/Litlogger/utils/__init__.py +0 -6
  71. webscout/Litlogger/utils/detectors.py +0 -153
  72. webscout/Litlogger/utils/formatters.py +0 -200
  73. webscout/Local/__init__.py +0 -12
  74. webscout/Local/__main__.py +0 -9
  75. webscout/Local/api.py +0 -576
  76. webscout/Local/cli.py +0 -516
  77. webscout/Local/config.py +0 -75
  78. webscout/Local/llm.py +0 -287
  79. webscout/Local/model_manager.py +0 -253
  80. webscout/Local/server.py +0 -721
  81. webscout/Local/utils.py +0 -93
  82. webscout/Provider/AI21.py +0 -177
  83. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  84. webscout/Provider/AISEARCH/ISou.py +0 -256
  85. webscout/Provider/AISEARCH/Perplexity.py +0 -359
  86. webscout/Provider/AISEARCH/__init__.py +0 -10
  87. webscout/Provider/AISEARCH/felo_search.py +0 -228
  88. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  89. webscout/Provider/AISEARCH/hika_search.py +0 -194
  90. webscout/Provider/AISEARCH/iask_search.py +0 -436
  91. webscout/Provider/AISEARCH/monica_search.py +0 -246
  92. webscout/Provider/AISEARCH/scira_search.py +0 -324
  93. webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
  94. webscout/Provider/Aitopia.py +0 -292
  95. webscout/Provider/AllenAI.py +0 -413
  96. webscout/Provider/Andi.py +0 -228
  97. webscout/Provider/Blackboxai.py +0 -229
  98. webscout/Provider/C4ai.py +0 -432
  99. webscout/Provider/ChatGPTClone.py +0 -226
  100. webscout/Provider/ChatGPTES.py +0 -237
  101. webscout/Provider/ChatGPTGratis.py +0 -194
  102. webscout/Provider/Chatify.py +0 -175
  103. webscout/Provider/Cloudflare.py +0 -273
  104. webscout/Provider/Cohere.py +0 -208
  105. webscout/Provider/DeepSeek.py +0 -196
  106. webscout/Provider/Deepinfra.py +0 -297
  107. webscout/Provider/ElectronHub.py +0 -709
  108. webscout/Provider/ExaAI.py +0 -261
  109. webscout/Provider/ExaChat.py +0 -342
  110. webscout/Provider/Free2GPT.py +0 -241
  111. webscout/Provider/GPTWeb.py +0 -193
  112. webscout/Provider/Gemini.py +0 -169
  113. webscout/Provider/GithubChat.py +0 -367
  114. webscout/Provider/Glider.py +0 -211
  115. webscout/Provider/Groq.py +0 -670
  116. webscout/Provider/HF_space/__init__.py +0 -0
  117. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  118. webscout/Provider/HeckAI.py +0 -233
  119. webscout/Provider/HuggingFaceChat.py +0 -462
  120. webscout/Provider/Hunyuan.py +0 -272
  121. webscout/Provider/Jadve.py +0 -266
  122. webscout/Provider/Koboldai.py +0 -381
  123. webscout/Provider/LambdaChat.py +0 -392
  124. webscout/Provider/Llama.py +0 -200
  125. webscout/Provider/Llama3.py +0 -204
  126. webscout/Provider/Marcus.py +0 -148
  127. webscout/Provider/Netwrck.py +0 -228
  128. webscout/Provider/OLLAMA.py +0 -396
  129. webscout/Provider/OPENAI/__init__.py +0 -25
  130. webscout/Provider/OPENAI/base.py +0 -46
  131. webscout/Provider/OPENAI/c4ai.py +0 -367
  132. webscout/Provider/OPENAI/chatgpt.py +0 -549
  133. webscout/Provider/OPENAI/chatgptclone.py +0 -460
  134. webscout/Provider/OPENAI/deepinfra.py +0 -272
  135. webscout/Provider/OPENAI/e2b.py +0 -1350
  136. webscout/Provider/OPENAI/exaai.py +0 -404
  137. webscout/Provider/OPENAI/exachat.py +0 -433
  138. webscout/Provider/OPENAI/freeaichat.py +0 -352
  139. webscout/Provider/OPENAI/glider.py +0 -316
  140. webscout/Provider/OPENAI/heckai.py +0 -337
  141. webscout/Provider/OPENAI/llmchatco.py +0 -327
  142. webscout/Provider/OPENAI/netwrck.py +0 -348
  143. webscout/Provider/OPENAI/opkfc.py +0 -488
  144. webscout/Provider/OPENAI/scirachat.py +0 -463
  145. webscout/Provider/OPENAI/sonus.py +0 -294
  146. webscout/Provider/OPENAI/standardinput.py +0 -425
  147. webscout/Provider/OPENAI/textpollinations.py +0 -285
  148. webscout/Provider/OPENAI/toolbaz.py +0 -405
  149. webscout/Provider/OPENAI/typegpt.py +0 -346
  150. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  151. webscout/Provider/OPENAI/utils.py +0 -211
  152. webscout/Provider/OPENAI/venice.py +0 -413
  153. webscout/Provider/OPENAI/wisecat.py +0 -381
  154. webscout/Provider/OPENAI/writecream.py +0 -156
  155. webscout/Provider/OPENAI/x0gpt.py +0 -371
  156. webscout/Provider/OPENAI/yep.py +0 -327
  157. webscout/Provider/OpenGPT.py +0 -199
  158. webscout/Provider/Openai.py +0 -496
  159. webscout/Provider/PI.py +0 -344
  160. webscout/Provider/Perplexitylabs.py +0 -415
  161. webscout/Provider/Phind.py +0 -535
  162. webscout/Provider/PizzaGPT.py +0 -198
  163. webscout/Provider/QwenLM.py +0 -254
  164. webscout/Provider/Reka.py +0 -214
  165. webscout/Provider/StandardInput.py +0 -278
  166. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  167. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  168. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  169. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  170. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  171. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  172. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  173. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  174. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  175. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  176. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  177. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  178. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  179. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  180. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  181. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  182. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  183. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  184. webscout/Provider/TTI/__init__.py +0 -12
  185. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  186. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  187. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  188. webscout/Provider/TTI/artbit/__init__.py +0 -22
  189. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  190. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  191. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  192. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  193. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  194. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  195. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  196. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  197. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  198. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  199. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  200. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  203. webscout/Provider/TTI/talkai/__init__.py +0 -4
  204. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  205. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  206. webscout/Provider/TTS/__init__.py +0 -7
  207. webscout/Provider/TTS/deepgram.py +0 -156
  208. webscout/Provider/TTS/elevenlabs.py +0 -111
  209. webscout/Provider/TTS/gesserit.py +0 -127
  210. webscout/Provider/TTS/murfai.py +0 -113
  211. webscout/Provider/TTS/parler.py +0 -111
  212. webscout/Provider/TTS/speechma.py +0 -180
  213. webscout/Provider/TTS/streamElements.py +0 -333
  214. webscout/Provider/TTS/utils.py +0 -280
  215. webscout/Provider/TeachAnything.py +0 -187
  216. webscout/Provider/TextPollinationsAI.py +0 -231
  217. webscout/Provider/TwoAI.py +0 -199
  218. webscout/Provider/Venice.py +0 -219
  219. webscout/Provider/VercelAI.py +0 -234
  220. webscout/Provider/WebSim.py +0 -228
  221. webscout/Provider/WiseCat.py +0 -196
  222. webscout/Provider/Writecream.py +0 -211
  223. webscout/Provider/WritingMate.py +0 -197
  224. webscout/Provider/Youchat.py +0 -330
  225. webscout/Provider/__init__.py +0 -198
  226. webscout/Provider/ai4chat.py +0 -202
  227. webscout/Provider/aimathgpt.py +0 -189
  228. webscout/Provider/akashgpt.py +0 -342
  229. webscout/Provider/askmyai.py +0 -158
  230. webscout/Provider/asksteve.py +0 -203
  231. webscout/Provider/bagoodex.py +0 -145
  232. webscout/Provider/cerebras.py +0 -242
  233. webscout/Provider/chatglm.py +0 -205
  234. webscout/Provider/cleeai.py +0 -213
  235. webscout/Provider/copilot.py +0 -428
  236. webscout/Provider/elmo.py +0 -234
  237. webscout/Provider/freeaichat.py +0 -271
  238. webscout/Provider/gaurish.py +0 -244
  239. webscout/Provider/geminiapi.py +0 -208
  240. webscout/Provider/geminiprorealtime.py +0 -160
  241. webscout/Provider/granite.py +0 -187
  242. webscout/Provider/hermes.py +0 -219
  243. webscout/Provider/julius.py +0 -223
  244. webscout/Provider/koala.py +0 -268
  245. webscout/Provider/labyrinth.py +0 -340
  246. webscout/Provider/learnfastai.py +0 -266
  247. webscout/Provider/lepton.py +0 -194
  248. webscout/Provider/llama3mitril.py +0 -180
  249. webscout/Provider/llamatutor.py +0 -192
  250. webscout/Provider/llmchat.py +0 -213
  251. webscout/Provider/llmchatco.py +0 -311
  252. webscout/Provider/meta.py +0 -794
  253. webscout/Provider/multichat.py +0 -325
  254. webscout/Provider/promptrefine.py +0 -193
  255. webscout/Provider/scira_chat.py +0 -277
  256. webscout/Provider/scnet.py +0 -187
  257. webscout/Provider/searchchat.py +0 -293
  258. webscout/Provider/sonus.py +0 -208
  259. webscout/Provider/talkai.py +0 -194
  260. webscout/Provider/toolbaz.py +0 -320
  261. webscout/Provider/turboseek.py +0 -219
  262. webscout/Provider/tutorai.py +0 -252
  263. webscout/Provider/typefully.py +0 -280
  264. webscout/Provider/typegpt.py +0 -232
  265. webscout/Provider/uncovr.py +0 -312
  266. webscout/Provider/x0gpt.py +0 -256
  267. webscout/Provider/yep.py +0 -376
  268. webscout/litagent/__init__.py +0 -29
  269. webscout/litagent/agent.py +0 -455
  270. webscout/litagent/constants.py +0 -60
  271. webscout/litprinter/__init__.py +0 -59
  272. webscout/scout/__init__.py +0 -8
  273. webscout/scout/core/__init__.py +0 -7
  274. webscout/scout/core/crawler.py +0 -140
  275. webscout/scout/core/scout.py +0 -568
  276. webscout/scout/core/search_result.py +0 -96
  277. webscout/scout/core/text_analyzer.py +0 -63
  278. webscout/scout/core/text_utils.py +0 -277
  279. webscout/scout/core/web_analyzer.py +0 -52
  280. webscout/scout/core.py +0 -881
  281. webscout/scout/element.py +0 -460
  282. webscout/scout/parsers/__init__.py +0 -69
  283. webscout/scout/parsers/html5lib_parser.py +0 -172
  284. webscout/scout/parsers/html_parser.py +0 -236
  285. webscout/scout/parsers/lxml_parser.py +0 -178
  286. webscout/scout/utils.py +0 -37
  287. webscout/swiftcli/__init__.py +0 -809
  288. webscout/zeroart/__init__.py +0 -55
  289. webscout/zeroart/base.py +0 -60
  290. webscout/zeroart/effects.py +0 -99
  291. webscout/zeroart/fonts.py +0 -816
  292. webscout-8.2.2.dist-info/RECORD +0 -309
  293. webscout-8.2.2.dist-info/entry_points.txt +0 -5
  294. webscout-8.2.2.dist-info/top_level.txt +0 -3
  295. webstoken/__init__.py +0 -30
  296. webstoken/classifier.py +0 -189
  297. webstoken/keywords.py +0 -216
  298. webstoken/language.py +0 -128
  299. webstoken/ner.py +0 -164
  300. webstoken/normalizer.py +0 -35
  301. webstoken/processor.py +0 -77
  302. webstoken/sentiment.py +0 -206
  303. webstoken/stemmer.py +0 -73
  304. webstoken/tagger.py +0 -60
  305. webstoken/tokenizer.py +0 -158
  306. {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/Groq.py DELETED
@@ -1,670 +0,0 @@
1
- from typing import Any, AsyncGenerator, Dict, Optional, Callable, List, Union
2
-
3
- import httpx
4
- import requests
5
- import json
6
-
7
- from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
- from webscout.AIbase import Provider, AsyncProvider
11
- from webscout import exceptions
12
-
13
- class GROQ(Provider):
14
- """
15
- A class to interact with the GROQ AI API.
16
- """
17
-
18
- AVAILABLE_MODELS = [
19
- "distil-whisper-large-v3-en",
20
- "gemma2-9b-it",
21
- "llama-3.3-70b-versatile",
22
- "llama-3.1-8b-instant",
23
- "llama-guard-3-8b",
24
- "llama3-70b-8192",
25
- "llama3-8b-8192",
26
- "whisper-large-v3",
27
- "whisper-large-v3-turbo",
28
- "meta-llama/llama-4-scout-17b-16e-instruct",
29
- "meta-llama/llama-4-maverick-17b-128e-instruct",
30
- "playai-tts",
31
- "playai-tts-arabic",
32
- "qwen-qwq-32b",
33
- "mistral-saba-24b",
34
- "qwen-2.5-coder-32b",
35
- "qwen-2.5-32b",
36
- "deepseek-r1-distill-qwen-32b",
37
- "deepseek-r1-distill-llama-70b",
38
- "llama-3.3-70b-specdec",
39
- "llama-3.2-1b-preview",
40
- "llama-3.2-3b-preview",
41
- "llama-3.2-11b-vision-preview",
42
- "llama-3.2-90b-vision-preview",
43
- "mixtral-8x7b-32768"
44
- ]
45
-
46
- def __init__(
47
- self,
48
- api_key: str,
49
- is_conversation: bool = True,
50
- max_tokens: int = 600,
51
- temperature: float = 1,
52
- presence_penalty: int = 0,
53
- frequency_penalty: int = 0,
54
- top_p: float = 1,
55
- model: str = "mixtral-8x7b-32768",
56
- timeout: int = 30,
57
- intro: str = None,
58
- filepath: str = None,
59
- update_file: bool = True,
60
- proxies: dict = {},
61
- history_offset: int = 10250,
62
- act: str = None,
63
- system_prompt: Optional[str] = None,
64
- ):
65
- """Instantiates GROQ
66
-
67
- Args:
68
- api_key (key): GROQ's API key.
69
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
70
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
71
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
72
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
73
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
74
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
75
- model (str, optional): LLM model name. Defaults to "mixtral-8x7b-32768".
76
- timeout (int, optional): Http request timeout. Defaults to 30.
77
- intro (str, optional): Conversation introductory prompt. Defaults to None.
78
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
79
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
80
- proxies (dict, optional): Http request proxies. Defaults to {}.
81
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
82
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
83
- system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
84
- """
85
- if model not in self.AVAILABLE_MODELS:
86
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
87
-
88
- self.session = requests.Session()
89
- self.is_conversation = is_conversation
90
- self.max_tokens_to_sample = max_tokens
91
- self.api_key = api_key
92
- self.model = model
93
- self.temperature = temperature
94
- self.presence_penalty = presence_penalty
95
- self.frequency_penalty = frequency_penalty
96
- self.top_p = top_p
97
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
98
- self.stream_chunk_size = 64
99
- self.timeout = timeout
100
- self.last_response = {}
101
- self.system_prompt = system_prompt
102
- self.available_functions: Dict[str, Callable] = {} # Store available functions
103
- self.headers = {
104
- "Content-Type": "application/json",
105
- "Authorization": f"Bearer {self.api_key}",
106
- }
107
-
108
- self.__available_optimizers = (
109
- method
110
- for method in dir(Optimizers)
111
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
112
- )
113
- self.session.headers.update(self.headers)
114
- Conversation.intro = (
115
- AwesomePrompts().get_act(
116
- act, raise_not_found=True, default=None, case_insensitive=True
117
- )
118
- if act
119
- else intro or Conversation.intro
120
- )
121
- self.conversation = Conversation(
122
- is_conversation, self.max_tokens_to_sample, filepath, update_file
123
- )
124
- self.conversation.history_offset = history_offset
125
- self.session.proxies = proxies
126
-
127
- def add_function(self, function_name: str, function: Callable):
128
- """Add a function to the available functions dictionary.
129
-
130
- Args:
131
- function_name (str): The name of the function to be used in the prompt.
132
- function (Callable): The function itself.
133
- """
134
- self.available_functions[function_name] = function
135
-
136
- def ask(
137
- self,
138
- prompt: str,
139
- stream: bool = False,
140
- raw: bool = False,
141
- optimizer: str = None,
142
- conversationally: bool = False,
143
- tools: Optional[List[Dict[str, Any]]] = None, # Add tools parameter
144
- ) -> dict:
145
- """Chat with AI
146
-
147
- Args:
148
- prompt (str): Prompt to be send.
149
- stream (bool, optional): Flag for streaming response. Defaults to False.
150
- raw (bool, optional): Stream back raw response as received. Defaults to False.
151
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
152
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
153
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
154
-
155
- Returns:
156
- dict : {}
157
- """
158
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
159
- if optimizer:
160
- if optimizer in self.__available_optimizers:
161
- conversation_prompt = getattr(Optimizers, optimizer)(
162
- conversation_prompt if conversationally else prompt
163
- )
164
- else:
165
- raise Exception(
166
- f"Optimizer is not one of {self.__available_optimizers}"
167
- )
168
-
169
- messages = [{"content": conversation_prompt, "role": "user"}]
170
- if self.system_prompt:
171
- messages.insert(0, {"role": "system", "content": self.system_prompt})
172
-
173
- self.session.headers.update(self.headers)
174
- payload = {
175
- "frequency_penalty": self.frequency_penalty,
176
- "messages": messages,
177
- "model": self.model,
178
- "presence_penalty": self.presence_penalty,
179
- "stream": stream,
180
- "temperature": self.temperature,
181
- "top_p": self.top_p,
182
- "tools": tools # Include tools in the payload
183
- }
184
-
185
- def for_stream():
186
- response = self.session.post(
187
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
188
- )
189
- if not response.ok:
190
- raise exceptions.FailedToGenerateResponseError(
191
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
192
- )
193
-
194
- message_load = ""
195
- for value in response.iter_lines(
196
- decode_unicode=True,
197
- delimiter="" if raw else "data:",
198
- chunk_size=self.stream_chunk_size,
199
- ):
200
- try:
201
- resp = json.loads(value)
202
- incomplete_message = self.get_message(resp)
203
- if incomplete_message:
204
- message_load += incomplete_message
205
- resp["choices"][0]["delta"]["content"] = message_load
206
- self.last_response.update(resp)
207
- yield value if raw else resp
208
- elif raw:
209
- yield value
210
- except json.decoder.JSONDecodeError:
211
- pass
212
-
213
- # Handle tool calls if any
214
- if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
215
- tool_calls = self.last_response['choices'][0]['message']['tool_calls']
216
- for tool_call in tool_calls:
217
- function_name = tool_call.get('function', {}).get('name')
218
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
219
- if function_name in self.available_functions:
220
- tool_response = self.available_functions[function_name](**arguments)
221
- messages.append({
222
- "tool_call_id": tool_call['id'],
223
- "role": "tool",
224
- "name": function_name,
225
- "content": tool_response
226
- })
227
- payload['messages'] = messages
228
- # Make a second call to get the final response
229
- second_response = self.session.post(
230
- self.chat_endpoint, json=payload, timeout=self.timeout
231
- )
232
- if second_response.ok:
233
- self.last_response = second_response.json()
234
- else:
235
- raise exceptions.FailedToGenerateResponseError(
236
- f"Failed to execute tool - {second_response.text}"
237
- )
238
-
239
- self.conversation.update_chat_history(
240
- prompt, self.get_message(self.last_response)
241
- )
242
-
243
- def for_non_stream():
244
- response = self.session.post(
245
- self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
246
- )
247
- if (
248
- not response.ok
249
- or not response.headers.get("Content-Type", "") == "application/json"
250
- ):
251
- raise exceptions.FailedToGenerateResponseError(
252
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
253
- )
254
- resp = response.json()
255
-
256
- # Handle tool calls if any
257
- if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
258
- tool_calls = resp['choices'][0]['message']['tool_calls']
259
- for tool_call in tool_calls:
260
- function_name = tool_call.get('function', {}).get('name')
261
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
262
- if function_name in self.available_functions:
263
- tool_response = self.available_functions[function_name](**arguments)
264
- messages.append({
265
- "tool_call_id": tool_call['id'],
266
- "role": "tool",
267
- "name": function_name,
268
- "content": tool_response
269
- })
270
- payload['messages'] = messages
271
- # Make a second call to get the final response
272
- second_response = self.session.post(
273
- self.chat_endpoint, json=payload, timeout=self.timeout
274
- )
275
- if second_response.ok:
276
- resp = second_response.json()
277
- else:
278
- raise exceptions.FailedToGenerateResponseError(
279
- f"Failed to execute tool - {second_response.text}"
280
- )
281
-
282
- self.last_response.update(resp)
283
- self.conversation.update_chat_history(
284
- prompt, self.get_message(self.last_response)
285
- )
286
- return resp
287
-
288
- return for_stream() if stream else for_non_stream()
289
-
290
-
291
- def chat(
292
- self,
293
- prompt: str,
294
- stream: bool = False,
295
- optimizer: str = None,
296
- conversationally: bool = False,
297
- tools: Optional[List[Dict[str, Any]]] = None,
298
- ) -> str:
299
- """Generate response `str`
300
- Args:
301
- prompt (str): Prompt to be send.
302
- stream (bool, optional): Flag for streaming response. Defaults to False.
303
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
304
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
305
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
306
- Returns:
307
- str: Response generated
308
- """
309
-
310
- def for_stream():
311
- for response in self.ask(
312
- prompt, True, optimizer=optimizer, conversationally=conversationally, tools=tools
313
- ):
314
- yield self.get_message(response)
315
-
316
- def for_non_stream():
317
- return self.get_message(
318
- self.ask(
319
- prompt,
320
- False,
321
- optimizer=optimizer,
322
- conversationally=conversationally,
323
- tools=tools
324
- )
325
- )
326
-
327
- return for_stream() if stream else for_non_stream()
328
-
329
- def get_message(self, response: dict) -> str:
330
- """Retrieves message only from response
331
-
332
- Args:
333
- response (dict): Response generated by `self.ask`
334
-
335
- Returns:
336
- str: Message extracted
337
- """
338
- assert isinstance(response, dict), "Response should be of dict data-type only"
339
- try:
340
- if response["choices"][0].get("delta"):
341
- return response["choices"][0]["delta"]["content"]
342
- return response["choices"][0]["message"]["content"]
343
- except KeyError:
344
- return ""
345
-
346
-
347
- class AsyncGROQ(AsyncProvider):
348
- """
349
- An asynchronous class to interact with the GROQ AI API.
350
- """
351
-
352
- AVAILABLE_MODELS = [
353
- "distil-whisper-large-v3-en",
354
- "gemma2-9b-it",
355
- "llama-3.3-70b-versatile",
356
- "llama-3.1-8b-instant",
357
- "llama-guard-3-8b",
358
- "llama3-70b-8192",
359
- "llama3-8b-8192",
360
- "whisper-large-v3",
361
- "whisper-large-v3-turbo",
362
- "meta-llama/llama-4-scout-17b-16e-instruct",
363
- "meta-llama/llama-4-maverick-17b-128e-instruct",
364
- "playai-tts",
365
- "playai-tts-arabic",
366
- "qwen-qwq-32b",
367
- "mistral-saba-24b",
368
- "qwen-2.5-coder-32b",
369
- "qwen-2.5-32b",
370
- "deepseek-r1-distill-qwen-32b",
371
- "deepseek-r1-distill-llama-70b",
372
- "llama-3.3-70b-specdec",
373
- "llama-3.2-1b-preview",
374
- "llama-3.2-3b-preview",
375
- "llama-3.2-11b-vision-preview",
376
- "llama-3.2-90b-vision-preview",
377
- "mixtral-8x7b-32768"
378
- ]
379
-
380
- def __init__(
381
- self,
382
- api_key: str,
383
- is_conversation: bool = True,
384
- max_tokens: int = 600,
385
- temperature: float = 1,
386
- presence_penalty: int = 0,
387
- frequency_penalty: int = 0,
388
- top_p: float = 1,
389
- model: str = "mixtral-8x7b-32768",
390
- timeout: int = 30,
391
- intro: str = None,
392
- filepath: str = None,
393
- update_file: bool = True,
394
- proxies: dict = {},
395
- history_offset: int = 10250,
396
- act: str = None,
397
- system_prompt: Optional[str] = None,
398
- ):
399
- """Instantiates AsyncGROQ
400
-
401
- Args:
402
- api_key (key): GROQ's API key.
403
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
404
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
405
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 1.
406
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
407
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
408
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
409
- model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
410
- timeout (int, optional): Http request timeout. Defaults to 30.
411
- intro (str, optional): Conversation introductory prompt. Defaults to None.
412
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
413
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
414
- proxies (dict, optional): Http request proxies. Defaults to {}.
415
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
416
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
417
- system_prompt (str, optional): System prompt to guide the conversation. Defaults to None.
418
- """
419
- if model not in self.AVAILABLE_MODELS:
420
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
421
-
422
- self.is_conversation = is_conversation
423
- self.max_tokens_to_sample = max_tokens
424
- self.api_key = api_key
425
- self.model = model
426
- self.temperature = temperature
427
- self.presence_penalty = presence_penalty
428
- self.frequency_penalty = frequency_penalty
429
- self.top_p = top_p
430
- self.chat_endpoint = "https://api.groq.com/openai/v1/chat/completions"
431
- self.stream_chunk_size = 64
432
- self.timeout = timeout
433
- self.last_response = {}
434
- self.system_prompt = system_prompt
435
- self.available_functions: Dict[str, Callable] = {} # Store available functions
436
- self.headers = {
437
- "Content-Type": "application/json",
438
- "Authorization": f"Bearer {self.api_key}",
439
- }
440
-
441
- self.__available_optimizers = (
442
- method
443
- for method in dir(Optimizers)
444
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
445
- )
446
- Conversation.intro = (
447
- AwesomePrompts().get_act(
448
- act, raise_not_found=True, default=None, case_insensitive=True
449
- )
450
- if act
451
- else intro or Conversation.intro
452
- )
453
- self.conversation = Conversation(
454
- is_conversation, self.max_tokens_to_sample, filepath, update_file
455
- )
456
- self.conversation.history_offset = history_offset
457
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
458
-
459
- def add_function(self, function_name: str, function: Callable):
460
- """Add a function to the available functions dictionary.
461
-
462
- Args:
463
- function_name (str): The name of the function to be used in the prompt.
464
- function (Callable): The function itself.
465
- """
466
- self.available_functions[function_name] = function
467
-
468
- async def ask(
469
- self,
470
- prompt: str,
471
- stream: bool = False,
472
- raw: bool = False,
473
- optimizer: str = None,
474
- conversationally: bool = False,
475
- tools: Optional[List[Dict[str, Any]]] = None,
476
- ) -> Union[dict, AsyncGenerator]:
477
- """Chat with AI asynchronously.
478
-
479
- Args:
480
- prompt (str): Prompt to be send.
481
- stream (bool, optional): Flag for streaming response. Defaults to False.
482
- raw (bool, optional): Stream back raw response as received. Defaults to False.
483
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
484
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
485
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
486
- Returns:
487
- dict|AsyncGenerator : ai content
488
- """
489
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
490
- if optimizer:
491
- if optimizer in self.__available_optimizers:
492
- conversation_prompt = getattr(Optimizers, optimizer)(
493
- conversation_prompt if conversationally else prompt
494
- )
495
- else:
496
- raise Exception(
497
- f"Optimizer is not one of {self.__available_optimizers}"
498
- )
499
-
500
- messages = [{"content": conversation_prompt, "role": "user"}]
501
- if self.system_prompt:
502
- messages.insert(0, {"role": "system", "content": self.system_prompt})
503
-
504
- payload = {
505
- "frequency_penalty": self.frequency_penalty,
506
- "messages": messages,
507
- "model": self.model,
508
- "presence_penalty": self.presence_penalty,
509
- "stream": stream,
510
- "temperature": self.temperature,
511
- "top_p": self.top_p,
512
- "tools": tools
513
- }
514
-
515
- async def for_stream():
516
- async with self.session.stream(
517
- "POST", self.chat_endpoint, json=payload, timeout=self.timeout
518
- ) as response:
519
- if not response.is_success:
520
- raise exceptions.FailedToGenerateResponseError(
521
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
522
- )
523
-
524
- message_load = ""
525
- intro_value = "data:"
526
- async for value in response.aiter_lines():
527
- try:
528
- if value.startswith(intro_value):
529
- value = value[len(intro_value) :]
530
- resp = json.loads(value)
531
- incomplete_message = await self.get_message(resp)
532
- if incomplete_message:
533
- message_load += incomplete_message
534
- resp["choices"][0]["delta"]["content"] = message_load
535
- self.last_response.update(resp)
536
- yield value if raw else resp
537
- elif raw:
538
- yield value
539
- except json.decoder.JSONDecodeError:
540
- pass
541
-
542
- # Handle tool calls if any (in streaming mode)
543
- if 'tool_calls' in self.last_response.get('choices', [{}])[0].get('message', {}):
544
- tool_calls = self.last_response['choices'][0]['message']['tool_calls']
545
- for tool_call in tool_calls:
546
- function_name = tool_call.get('function', {}).get('name')
547
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
548
- if function_name in self.available_functions:
549
- tool_response = self.available_functions[function_name](**arguments)
550
- messages.append({
551
- "tool_call_id": tool_call['id'],
552
- "role": "tool",
553
- "name": function_name,
554
- "content": tool_response
555
- })
556
- payload['messages'] = messages
557
- # Make a second call to get the final response
558
- second_response = await self.session.post(
559
- self.chat_endpoint, json=payload, timeout=self.timeout
560
- )
561
- if second_response.is_success:
562
- self.last_response = second_response.json()
563
- else:
564
- raise exceptions.FailedToGenerateResponseError(
565
- f"Failed to execute tool - {second_response.text}"
566
- )
567
-
568
- self.conversation.update_chat_history(
569
- prompt, await self.get_message(self.last_response)
570
- )
571
-
572
- async def for_non_stream():
573
- response = await self.session.post(
574
- self.chat_endpoint, json=payload, timeout=self.timeout
575
- )
576
- if not response.is_success:
577
- raise exceptions.FailedToGenerateResponseError(
578
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
579
- )
580
- resp = response.json()
581
-
582
- # Handle tool calls if any (in non-streaming mode)
583
- if 'tool_calls' in resp.get('choices', [{}])[0].get('message', {}):
584
- tool_calls = resp['choices'][0]['message']['tool_calls']
585
- for tool_call in tool_calls:
586
- function_name = tool_call.get('function', {}).get('name')
587
- arguments = json.loads(tool_call.get('function', {}).get('arguments', "{}"))
588
- if function_name in self.available_functions:
589
- tool_response = self.available_functions[function_name](**arguments)
590
- messages.append({
591
- "tool_call_id": tool_call['id'],
592
- "role": "tool",
593
- "name": function_name,
594
- "content": tool_response
595
- })
596
- payload['messages'] = messages
597
- # Make a second call to get the final response
598
- second_response = await self.session.post(
599
- self.chat_endpoint, json=payload, timeout=self.timeout
600
- )
601
- if second_response.is_success:
602
- resp = second_response.json()
603
- else:
604
- raise exceptions.FailedToGenerateResponseError(
605
- f"Failed to execute tool - {second_response.text}"
606
- )
607
-
608
- self.last_response.update(resp)
609
- self.conversation.update_chat_history(
610
- prompt, await self.get_message(self.last_response)
611
- )
612
- return resp
613
-
614
- return for_stream() if stream else await for_non_stream()
615
-
616
- async def chat(
617
- self,
618
- prompt: str,
619
- stream: bool = False,
620
- optimizer: str = None,
621
- conversationally: bool = False,
622
- tools: Optional[List[Dict[str, Any]]] = None,
623
- ) -> Union[str, AsyncGenerator]:
624
- """Generate response `str` asynchronously.
625
- Args:
626
- prompt (str): Prompt to be send.
627
- stream (bool, optional): Flag for streaming response. Defaults to False.
628
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
629
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
630
- tools (List[Dict[str, Any]], optional): List of tool definitions. See example in class docstring. Defaults to None.
631
- Returns:
632
- str|AsyncGenerator: Response generated
633
- """
634
-
635
- async def for_stream():
636
- async_ask = await self.ask(
637
- prompt, True, optimizer=optimizer, conversationally=conversationally, tools=tools
638
- )
639
- async for response in async_ask:
640
- yield await self.get_message(response)
641
-
642
- async def for_non_stream():
643
- return await self.get_message(
644
- await self.ask(
645
- prompt,
646
- False,
647
- optimizer=optimizer,
648
- conversationally=conversationally,
649
- tools=tools
650
- )
651
- )
652
-
653
- return for_stream() if stream else await for_non_stream()
654
-
655
- async def get_message(self, response: dict) -> str:
656
- """Retrieves message only from response
657
-
658
- Args:
659
- response (dict): Response generated by `self.ask`
660
-
661
- Returns:
662
- str: Message extracted
663
- """
664
- assert isinstance(response, dict), "Response should be of dict data-type only"
665
- try:
666
- if response["choices"][0].get("delta"):
667
- return response["choices"][0]["delta"]["content"]
668
- return response["choices"][0]["message"]["content"]
669
- except KeyError:
670
- return ""
File without changes