webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (323) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -249
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/__init__.py +10 -0
  5. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  7. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  8. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  9. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  10. webscout/Extra/YTToolkit/README.md +375 -0
  11. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  12. webscout/Extra/YTToolkit/__init__.py +3 -0
  13. webscout/Extra/YTToolkit/transcriber.py +476 -0
  14. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  15. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  16. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  17. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  18. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  19. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  20. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  21. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  22. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  23. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  24. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  25. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  26. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  27. webscout/Extra/__init__.py +7 -0
  28. webscout/Extra/autocoder/__init__.py +9 -0
  29. webscout/Extra/autocoder/autocoder.py +1105 -0
  30. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  31. webscout/Extra/gguf.md +430 -0
  32. webscout/Extra/gguf.py +684 -0
  33. webscout/Extra/tempmail/README.md +488 -0
  34. webscout/Extra/tempmail/__init__.py +28 -0
  35. webscout/Extra/tempmail/async_utils.py +141 -0
  36. webscout/Extra/tempmail/base.py +161 -0
  37. webscout/Extra/tempmail/cli.py +187 -0
  38. webscout/Extra/tempmail/emailnator.py +84 -0
  39. webscout/Extra/tempmail/mail_tm.py +361 -0
  40. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  41. webscout/Extra/weather.md +281 -0
  42. webscout/Extra/weather.py +194 -0
  43. webscout/Extra/weather_ascii.py +76 -0
  44. webscout/Litlogger/Readme.md +175 -0
  45. webscout/Litlogger/__init__.py +67 -0
  46. webscout/Litlogger/core/__init__.py +6 -0
  47. webscout/Litlogger/core/level.py +23 -0
  48. webscout/Litlogger/core/logger.py +165 -0
  49. webscout/Litlogger/handlers/__init__.py +12 -0
  50. webscout/Litlogger/handlers/console.py +33 -0
  51. webscout/Litlogger/handlers/file.py +143 -0
  52. webscout/Litlogger/handlers/network.py +173 -0
  53. webscout/Litlogger/styles/__init__.py +7 -0
  54. webscout/Litlogger/styles/colors.py +249 -0
  55. webscout/Litlogger/styles/formats.py +458 -0
  56. webscout/Litlogger/styles/text.py +87 -0
  57. webscout/Litlogger/utils/__init__.py +6 -0
  58. webscout/Litlogger/utils/detectors.py +153 -0
  59. webscout/Litlogger/utils/formatters.py +200 -0
  60. webscout/Provider/AI21.py +177 -0
  61. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  62. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  63. webscout/Provider/AISEARCH/README.md +279 -0
  64. webscout/Provider/AISEARCH/__init__.py +9 -0
  65. webscout/Provider/AISEARCH/felo_search.py +228 -0
  66. webscout/Provider/AISEARCH/genspark_search.py +350 -0
  67. webscout/Provider/AISEARCH/hika_search.py +198 -0
  68. webscout/Provider/AISEARCH/iask_search.py +436 -0
  69. webscout/Provider/AISEARCH/monica_search.py +246 -0
  70. webscout/Provider/AISEARCH/scira_search.py +324 -0
  71. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  72. webscout/Provider/Aitopia.py +316 -0
  73. webscout/Provider/AllenAI.py +440 -0
  74. webscout/Provider/Andi.py +228 -0
  75. webscout/Provider/Blackboxai.py +673 -0
  76. webscout/Provider/ChatGPTClone.py +237 -0
  77. webscout/Provider/ChatGPTGratis.py +194 -0
  78. webscout/Provider/ChatSandbox.py +342 -0
  79. webscout/Provider/Cloudflare.py +324 -0
  80. webscout/Provider/Cohere.py +208 -0
  81. webscout/Provider/Deepinfra.py +340 -0
  82. webscout/Provider/ExaAI.py +261 -0
  83. webscout/Provider/ExaChat.py +358 -0
  84. webscout/Provider/Flowith.py +217 -0
  85. webscout/Provider/FreeGemini.py +250 -0
  86. webscout/Provider/Gemini.py +169 -0
  87. webscout/Provider/GithubChat.py +370 -0
  88. webscout/Provider/GizAI.py +295 -0
  89. webscout/Provider/Glider.py +225 -0
  90. webscout/Provider/Groq.py +801 -0
  91. webscout/Provider/HF_space/__init__.py +0 -0
  92. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  93. webscout/Provider/HeckAI.py +285 -0
  94. webscout/Provider/HuggingFaceChat.py +469 -0
  95. webscout/Provider/Hunyuan.py +283 -0
  96. webscout/Provider/Jadve.py +291 -0
  97. webscout/Provider/Koboldai.py +384 -0
  98. webscout/Provider/LambdaChat.py +411 -0
  99. webscout/Provider/Llama3.py +259 -0
  100. webscout/Provider/MCPCore.py +315 -0
  101. webscout/Provider/Marcus.py +198 -0
  102. webscout/Provider/Nemotron.py +218 -0
  103. webscout/Provider/Netwrck.py +270 -0
  104. webscout/Provider/OLLAMA.py +396 -0
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  106. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  107. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  108. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  109. webscout/Provider/OPENAI/README.md +1253 -0
  110. webscout/Provider/OPENAI/__init__.py +36 -0
  111. webscout/Provider/OPENAI/ai4chat.py +293 -0
  112. webscout/Provider/OPENAI/api.py +810 -0
  113. webscout/Provider/OPENAI/base.py +249 -0
  114. webscout/Provider/OPENAI/c4ai.py +373 -0
  115. webscout/Provider/OPENAI/chatgpt.py +556 -0
  116. webscout/Provider/OPENAI/chatgptclone.py +488 -0
  117. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  118. webscout/Provider/OPENAI/deepinfra.py +319 -0
  119. webscout/Provider/OPENAI/e2b.py +1356 -0
  120. webscout/Provider/OPENAI/exaai.py +411 -0
  121. webscout/Provider/OPENAI/exachat.py +443 -0
  122. webscout/Provider/OPENAI/flowith.py +162 -0
  123. webscout/Provider/OPENAI/freeaichat.py +359 -0
  124. webscout/Provider/OPENAI/glider.py +323 -0
  125. webscout/Provider/OPENAI/groq.py +361 -0
  126. webscout/Provider/OPENAI/heckai.py +307 -0
  127. webscout/Provider/OPENAI/llmchatco.py +335 -0
  128. webscout/Provider/OPENAI/mcpcore.py +383 -0
  129. webscout/Provider/OPENAI/multichat.py +376 -0
  130. webscout/Provider/OPENAI/netwrck.py +356 -0
  131. webscout/Provider/OPENAI/opkfc.py +496 -0
  132. webscout/Provider/OPENAI/scirachat.py +471 -0
  133. webscout/Provider/OPENAI/sonus.py +303 -0
  134. webscout/Provider/OPENAI/standardinput.py +433 -0
  135. webscout/Provider/OPENAI/textpollinations.py +339 -0
  136. webscout/Provider/OPENAI/toolbaz.py +413 -0
  137. webscout/Provider/OPENAI/typefully.py +355 -0
  138. webscout/Provider/OPENAI/typegpt.py +358 -0
  139. webscout/Provider/OPENAI/uncovrAI.py +462 -0
  140. webscout/Provider/OPENAI/utils.py +307 -0
  141. webscout/Provider/OPENAI/venice.py +425 -0
  142. webscout/Provider/OPENAI/wisecat.py +381 -0
  143. webscout/Provider/OPENAI/writecream.py +163 -0
  144. webscout/Provider/OPENAI/x0gpt.py +378 -0
  145. webscout/Provider/OPENAI/yep.py +356 -0
  146. webscout/Provider/OpenGPT.py +209 -0
  147. webscout/Provider/Openai.py +496 -0
  148. webscout/Provider/PI.py +429 -0
  149. webscout/Provider/Perplexitylabs.py +415 -0
  150. webscout/Provider/QwenLM.py +254 -0
  151. webscout/Provider/Reka.py +214 -0
  152. webscout/Provider/StandardInput.py +290 -0
  153. webscout/Provider/TTI/AiForce/README.md +159 -0
  154. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  155. webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
  156. webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
  157. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  158. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  159. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
  160. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
  161. webscout/Provider/TTI/ImgSys/README.md +174 -0
  162. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  163. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  164. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  165. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  166. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  167. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  168. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  169. webscout/Provider/TTI/Nexra/README.md +155 -0
  170. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  171. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  172. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  173. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  174. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  175. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
  176. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
  177. webscout/Provider/TTI/README.md +128 -0
  178. webscout/Provider/TTI/__init__.py +12 -0
  179. webscout/Provider/TTI/aiarta/README.md +134 -0
  180. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  181. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  182. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  183. webscout/Provider/TTI/artbit/README.md +100 -0
  184. webscout/Provider/TTI/artbit/__init__.py +22 -0
  185. webscout/Provider/TTI/artbit/async_artbit.py +155 -0
  186. webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
  187. webscout/Provider/TTI/fastflux/README.md +129 -0
  188. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  189. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  190. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  191. webscout/Provider/TTI/huggingface/README.md +114 -0
  192. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  193. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  194. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  195. webscout/Provider/TTI/piclumen/README.md +161 -0
  196. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  197. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  198. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  199. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  200. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  203. webscout/Provider/TTI/talkai/README.md +139 -0
  204. webscout/Provider/TTI/talkai/__init__.py +4 -0
  205. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  206. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  207. webscout/Provider/TTS/README.md +192 -0
  208. webscout/Provider/TTS/__init__.py +9 -0
  209. webscout/Provider/TTS/base.py +159 -0
  210. webscout/Provider/TTS/deepgram.py +156 -0
  211. webscout/Provider/TTS/elevenlabs.py +111 -0
  212. webscout/Provider/TTS/gesserit.py +128 -0
  213. webscout/Provider/TTS/murfai.py +113 -0
  214. webscout/Provider/TTS/parler.py +111 -0
  215. webscout/Provider/TTS/speechma.py +580 -0
  216. webscout/Provider/TTS/sthir.py +94 -0
  217. webscout/Provider/TTS/streamElements.py +333 -0
  218. webscout/Provider/TTS/utils.py +280 -0
  219. webscout/Provider/TeachAnything.py +229 -0
  220. webscout/Provider/TextPollinationsAI.py +308 -0
  221. webscout/Provider/TwoAI.py +280 -0
  222. webscout/Provider/TypliAI.py +305 -0
  223. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  224. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  225. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  226. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  227. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  228. webscout/Provider/Venice.py +258 -0
  229. webscout/Provider/VercelAI.py +253 -0
  230. webscout/Provider/WiseCat.py +233 -0
  231. webscout/Provider/WrDoChat.py +370 -0
  232. webscout/Provider/Writecream.py +246 -0
  233. webscout/Provider/WritingMate.py +269 -0
  234. webscout/Provider/__init__.py +172 -0
  235. webscout/Provider/ai4chat.py +149 -0
  236. webscout/Provider/akashgpt.py +335 -0
  237. webscout/Provider/asksteve.py +220 -0
  238. webscout/Provider/cerebras.py +290 -0
  239. webscout/Provider/chatglm.py +215 -0
  240. webscout/Provider/cleeai.py +213 -0
  241. webscout/Provider/copilot.py +425 -0
  242. webscout/Provider/elmo.py +283 -0
  243. webscout/Provider/freeaichat.py +285 -0
  244. webscout/Provider/geminiapi.py +208 -0
  245. webscout/Provider/granite.py +235 -0
  246. webscout/Provider/hermes.py +266 -0
  247. webscout/Provider/julius.py +223 -0
  248. webscout/Provider/koala.py +170 -0
  249. webscout/Provider/learnfastai.py +325 -0
  250. webscout/Provider/llama3mitril.py +215 -0
  251. webscout/Provider/llmchat.py +258 -0
  252. webscout/Provider/llmchatco.py +306 -0
  253. webscout/Provider/lmarena.py +198 -0
  254. webscout/Provider/meta.py +801 -0
  255. webscout/Provider/multichat.py +364 -0
  256. webscout/Provider/samurai.py +223 -0
  257. webscout/Provider/scira_chat.py +299 -0
  258. webscout/Provider/scnet.py +243 -0
  259. webscout/Provider/searchchat.py +292 -0
  260. webscout/Provider/sonus.py +258 -0
  261. webscout/Provider/talkai.py +194 -0
  262. webscout/Provider/toolbaz.py +353 -0
  263. webscout/Provider/turboseek.py +266 -0
  264. webscout/Provider/typefully.py +202 -0
  265. webscout/Provider/typegpt.py +289 -0
  266. webscout/Provider/uncovr.py +368 -0
  267. webscout/Provider/x0gpt.py +299 -0
  268. webscout/Provider/yep.py +389 -0
  269. webscout/__init__.py +4 -2
  270. webscout/cli.py +3 -28
  271. webscout/conversation.py +35 -35
  272. webscout/litagent/Readme.md +276 -0
  273. webscout/litagent/__init__.py +29 -0
  274. webscout/litagent/agent.py +455 -0
  275. webscout/litagent/constants.py +60 -0
  276. webscout/litprinter/__init__.py +59 -0
  277. webscout/scout/README.md +402 -0
  278. webscout/scout/__init__.py +8 -0
  279. webscout/scout/core/__init__.py +7 -0
  280. webscout/scout/core/crawler.py +140 -0
  281. webscout/scout/core/scout.py +568 -0
  282. webscout/scout/core/search_result.py +96 -0
  283. webscout/scout/core/text_analyzer.py +63 -0
  284. webscout/scout/core/text_utils.py +277 -0
  285. webscout/scout/core/web_analyzer.py +52 -0
  286. webscout/scout/element.py +460 -0
  287. webscout/scout/parsers/__init__.py +69 -0
  288. webscout/scout/parsers/html5lib_parser.py +172 -0
  289. webscout/scout/parsers/html_parser.py +236 -0
  290. webscout/scout/parsers/lxml_parser.py +178 -0
  291. webscout/scout/utils.py +37 -0
  292. webscout/swiftcli/Readme.md +323 -0
  293. webscout/swiftcli/__init__.py +95 -0
  294. webscout/swiftcli/core/__init__.py +7 -0
  295. webscout/swiftcli/core/cli.py +297 -0
  296. webscout/swiftcli/core/context.py +104 -0
  297. webscout/swiftcli/core/group.py +241 -0
  298. webscout/swiftcli/decorators/__init__.py +28 -0
  299. webscout/swiftcli/decorators/command.py +221 -0
  300. webscout/swiftcli/decorators/options.py +220 -0
  301. webscout/swiftcli/decorators/output.py +252 -0
  302. webscout/swiftcli/exceptions.py +21 -0
  303. webscout/swiftcli/plugins/__init__.py +9 -0
  304. webscout/swiftcli/plugins/base.py +135 -0
  305. webscout/swiftcli/plugins/manager.py +262 -0
  306. webscout/swiftcli/utils/__init__.py +59 -0
  307. webscout/swiftcli/utils/formatting.py +252 -0
  308. webscout/swiftcli/utils/parsing.py +267 -0
  309. webscout/version.py +1 -1
  310. webscout/webscout_search.py +2 -182
  311. webscout/webscout_search_async.py +1 -179
  312. webscout/zeroart/README.md +89 -0
  313. webscout/zeroart/__init__.py +135 -0
  314. webscout/zeroart/base.py +66 -0
  315. webscout/zeroart/effects.py +101 -0
  316. webscout/zeroart/fonts.py +1239 -0
  317. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
  318. webscout-8.2.8.dist-info/RECORD +334 -0
  319. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  320. webscout-8.2.7.dist-info/RECORD +0 -26
  321. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
  322. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
  323. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,266 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ from typing import Union, Any, Dict, Generator, Optional
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout.litagent import LitAgent
12
+ class NousHermes(Provider):
13
+ """
14
+ A class to interact with the Hermes API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = ["Hermes-3-Llama-3.1-70B", "Hermes-3-Llama-3.1-8B"]
18
+
19
+ def __init__(
20
+ self,
21
+ cookies_path: str,
22
+ is_conversation: bool = True,
23
+ max_tokens: int = 8000,
24
+ timeout: int = 30,
25
+ intro: str = None,
26
+ filepath: str = None,
27
+ update_file: bool = True,
28
+ proxies: dict = {},
29
+ history_offset: int = 10250,
30
+ act: str = None,
31
+ model: str = "Hermes-3-Llama-3.1-70B",
32
+ system_prompt: str = "You are a helpful AI assistant.",
33
+ temperature: float = 0.7,
34
+ top_p: float = 0.9,
35
+ ):
36
+ """Initializes the Hermes API client."""
37
+ if model not in self.AVAILABLE_MODELS:
38
+ raise ValueError(
39
+ f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
40
+ )
41
+
42
+ self.session = Session()
43
+ self.is_conversation = is_conversation
44
+ self.max_tokens_to_sample = max_tokens
45
+ self.timeout = timeout
46
+ self.last_response = {}
47
+ self.model = model
48
+ self.system_prompt = system_prompt
49
+ self.api_endpoint = "https://hermes.nousresearch.com/api/chat"
50
+ self.temperature = temperature
51
+ self.top_p = top_p
52
+ self.cookies_path = cookies_path
53
+ self.cookies_dict = self._load_cookies()
54
+
55
+ self.headers = {
56
+ 'accept': '*/*',
57
+ 'accept-language': 'en-US,en;q=0.9',
58
+ 'content-type': 'application/json',
59
+ 'origin': 'https://hermes.nousresearch.com',
60
+ 'referer': 'https://hermes.nousresearch.com/',
61
+ }
62
+
63
+ self.__available_optimizers = (
64
+ method
65
+ for method in dir(Optimizers)
66
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
+ )
68
+ self.session.headers.update(self.headers)
69
+ Conversation.intro = (
70
+ AwesomePrompts().get_act(
71
+ act, raise_not_found=True, default=None, case_insensitive=True
72
+ )
73
+ if act
74
+ else intro or Conversation.intro
75
+ )
76
+ self.conversation = Conversation(
77
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+ # Update curl_cffi session headers and proxies
81
+ self.session.proxies = proxies
82
+
83
+ # Apply cookies to curl_cffi session
84
+ if self.cookies_dict:
85
+ for name, value in self.cookies_dict.items():
86
+ self.session.cookies.set(name, value, domain="hermes.nousresearch.com")
87
+
88
+ def _load_cookies(self) -> Optional[Dict[str, str]]:
89
+ """Load cookies from a JSON file and return them as a dictionary."""
90
+ try:
91
+ with open(self.cookies_path, 'r') as f:
92
+ cookies_data = json.load(f)
93
+ # Convert list of cookie objects to a dictionary
94
+ return {cookie['name']: cookie['value'] for cookie in cookies_data if 'name' in cookie and 'value' in cookie}
95
+ except FileNotFoundError:
96
+ print(f"Warning: Cookies file not found at {self.cookies_path}")
97
+ return None
98
+ except json.JSONDecodeError:
99
+ print(f"Warning: Invalid JSON format in cookies file at {self.cookies_path}")
100
+ return None
101
+ except Exception as e:
102
+ print(f"Warning: Error loading cookies: {e}")
103
+ return None
104
+
105
+ @staticmethod
106
+ def _hermes_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
107
+ """Extracts content from Hermes stream JSON objects."""
108
+ if isinstance(chunk, dict) and chunk.get('type') == 'llm_response':
109
+ return chunk.get('content')
110
+ return None
111
+
112
+
113
+ def ask(
114
+ self,
115
+ prompt: str,
116
+ stream: bool = False,
117
+ raw: bool = False,
118
+ optimizer: str = None,
119
+ conversationally: bool = False,
120
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
121
+ """Chat with AI
122
+ Args:
123
+ prompt (str): Prompt to be send.
124
+ stream (bool, optional): Flag for streaming response. Defaults to False.
125
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
126
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
127
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
128
+ Returns:
129
+ dict|AsyncGenerator : ai content
130
+ ```json
131
+ {
132
+ "text" : "How may I assist you today?"
133
+ }
134
+ ```
135
+ """
136
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
137
+ if optimizer:
138
+ if optimizer in self.__available_optimizers:
139
+ conversation_prompt = getattr(Optimizers, optimizer)(
140
+ conversation_prompt if conversationally else prompt
141
+ )
142
+ else:
143
+ raise exceptions.FailedToGenerateResponseError(
144
+ f"Optimizer is not one of {self.__available_optimizers}"
145
+ )
146
+
147
+ payload = {
148
+ "messages": [{"role": "system", "content": self.system_prompt}, {"role": "user", "content": conversation_prompt}],
149
+ "model": self.model,
150
+ "max_tokens": self.max_tokens_to_sample,
151
+ "temperature": self.temperature,
152
+ "top_p": self.top_p,
153
+ }
154
+ def for_stream():
155
+ streaming_text = "" # Initialize outside try block
156
+ try:
157
+ response = self.session.post(
158
+ self.api_endpoint,
159
+ json=payload,
160
+ stream=True,
161
+ timeout=self.timeout,
162
+ impersonate="chrome110" # Keep impersonate
163
+ )
164
+ response.raise_for_status()
165
+
166
+ # Use sanitize_stream
167
+ processed_stream = sanitize_stream(
168
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
169
+ intro_value="data:",
170
+ to_json=True, # Stream sends JSON
171
+ content_extractor=self._hermes_extractor, # Use the specific extractor
172
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
173
+ )
174
+
175
+ for content_chunk in processed_stream:
176
+ # content_chunk is the string extracted by _hermes_extractor
177
+ if content_chunk and isinstance(content_chunk, str):
178
+ streaming_text += content_chunk
179
+ resp = dict(text=content_chunk)
180
+ yield resp if not raw else content_chunk
181
+
182
+ self.last_response = dict(text=streaming_text) # Use streaming_text
183
+ self.conversation.update_chat_history(
184
+ prompt, streaming_text # Use streaming_text
185
+ )
186
+
187
+ except CurlError as e:
188
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
189
+ except Exception as e:
190
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
191
+ raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
192
+
193
+
194
+ def for_non_stream():
195
+ collected_text = ""
196
+ try:
197
+ for chunk_data in for_stream():
198
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
199
+ collected_text += chunk_data["text"]
200
+ elif raw and isinstance(chunk_data, str):
201
+ collected_text += chunk_data
202
+ except Exception as e:
203
+ if not collected_text:
204
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
205
+
206
+ return collected_text if raw else self.last_response
207
+
208
+
209
+ return for_stream() if stream else for_non_stream()
210
+
211
+ def chat(
212
+ self,
213
+ prompt: str,
214
+ stream: bool = False,
215
+ optimizer: str = None,
216
+ conversationally: bool = False,
217
+ ) -> Union[str, Generator[str, None, None]]:
218
+ """Generate response `str`
219
+ Args:
220
+ prompt (str): Prompt to be send.
221
+ stream (bool, optional): Flag for streaming response. Defaults to False.
222
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
223
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
224
+ Returns:
225
+ str: Response generated
226
+ """
227
+
228
+ def for_stream_chat():
229
+ gen = self.ask(
230
+ prompt, stream=True, raw=False,
231
+ optimizer=optimizer, conversationally=conversationally
232
+ )
233
+ for response_dict in gen:
234
+ yield self.get_message(response_dict)
235
+
236
+ def for_non_stream_chat():
237
+ response_data = self.ask(
238
+ prompt,
239
+ stream=False,
240
+ raw=False,
241
+ optimizer=optimizer,
242
+ conversationally=conversationally,
243
+ )
244
+ return self.get_message(response_data)
245
+
246
+ return for_stream_chat() if stream else for_non_stream_chat()
247
+
248
+ def get_message(self, response: dict) -> str:
249
+ """Retrieves message only from response
250
+
251
+ Args:
252
+ response (dict): Response generated by `self.ask`
253
+
254
+ Returns:
255
+ str: Message extracted
256
+ """
257
+ assert isinstance(response, dict), "Response should be of dict data-type only"
258
+ return response["text"]
259
+
260
+
261
+ if __name__ == "__main__":
262
+ from rich import print
263
+ ai = NousHermes(cookies_path="cookies.json")
264
+ response = ai.chat(input(">>> "), stream=True)
265
+ for chunk in response:
266
+ print(chunk, end="", flush=True)
@@ -0,0 +1,223 @@
1
+
2
+ import uuid
3
+
4
+ import requests
5
+ import json
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from typing import Union, Any, AsyncGenerator, Dict
12
+
13
+
14
+ class Julius(Provider):
15
+ AVAILABLE_MODELS = [
16
+ "Llama 3",
17
+ "GPT-4o",
18
+ "GPT-3.5",
19
+ "Command R",
20
+ "Gemini Flash",
21
+ "Gemini 1.5",
22
+ "Claude Sonnet",
23
+ "Claude Opus",
24
+ "Claude Haiku",
25
+ "GPT-4",
26
+ "GPT-4o mini",
27
+ "Command R+",
28
+ "o1-mini",
29
+ "o1-preview",
30
+
31
+
32
+ ]
33
+ def __init__(
34
+ self,
35
+ api_key: str,
36
+ is_conversation: bool = True,
37
+ max_tokens: int = 600,
38
+ timeout: int = 30,
39
+ intro: str = None,
40
+ filepath: str = None,
41
+ update_file: bool = True,
42
+ proxies: dict = {},
43
+ history_offset: int = 10250,
44
+ act: str = None,
45
+ model: str = "Gemini Flash",
46
+ ):
47
+ """Instantiates Julius
48
+
49
+ Args:
50
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
51
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
52
+ timeout (int, optional): Http request timeout. Defaults to 30.
53
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
54
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
55
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
56
+ proxies (dict, optional): Http request proxies. Defaults to {}.
57
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
58
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
59
+ model (str, optional): Model to use for generating text. Defaults to "Gemini Flash".
60
+ Options: "Llama 3", "GPT-4o", "GPT-3.5", "Command R", "Gemini Flash", "Gemini 1.5".
61
+ """
62
+ if model not in self.AVAILABLE_MODELS:
63
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
64
+
65
+ self.session = requests.Session()
66
+ self.is_conversation = is_conversation
67
+ self.max_tokens_to_sample = max_tokens
68
+ self.chat_endpoint = "https://api.julius.ai/api/chat/message"
69
+ self.stream_chunk_size = 64
70
+ self.timeout = timeout
71
+ self.last_response = {}
72
+ self.model = model
73
+ self.api_key = api_key
74
+ self.headers = {
75
+ "authorization": f"Bearer {self.api_key}",
76
+ "content-type": "application/json",
77
+ "conversation-id": str(uuid.uuid4()),
78
+ "interactive-charts": "true",
79
+ "is-native": "false",
80
+ "orient-split": "true",
81
+ "request-id": str(uuid.uuid4()),
82
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
83
+ "visitor-id": str(uuid.uuid4())
84
+ }
85
+
86
+ self.__available_optimizers = (
87
+ method
88
+ for method in dir(Optimizers)
89
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
90
+ )
91
+ self.session.headers.update(self.headers)
92
+ Conversation.intro = (
93
+ AwesomePrompts().get_act(
94
+ act, raise_not_found=True, default=None, case_insensitive=True
95
+ )
96
+ if act
97
+ else intro or Conversation.intro
98
+ )
99
+ self.conversation = Conversation(
100
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
101
+ )
102
+ self.conversation.history_offset = history_offset
103
+ self.session.proxies = proxies
104
+
105
+ def ask(
106
+ self,
107
+ prompt: str,
108
+ stream: bool = False,
109
+ raw: bool = False,
110
+ optimizer: str = None,
111
+ conversationally: bool = False,
112
+ ) -> dict:
113
+ """Chat with AI
114
+
115
+ Args:
116
+ prompt (str): Prompt to be send.
117
+ stream (bool, optional): Whether to stream the response. Defaults to False.
118
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
119
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
120
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
121
+
122
+ Returns:
123
+ The response from the API.
124
+ """
125
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
126
+ if optimizer:
127
+ if optimizer in self.__available_optimizers:
128
+ conversation_prompt = getattr(Optimizers, optimizer)(
129
+ conversation_prompt if conversationally else prompt
130
+ )
131
+ else:
132
+ raise Exception(
133
+ f"Optimizer is not one of {self.__available_optimizers}"
134
+ )
135
+
136
+ payload = {
137
+ "message": {"content": conversation_prompt, "role": "user"},
138
+ "provider": "default",
139
+ "chat_mode": "auto",
140
+ "client_version": "20240130",
141
+ "theme": "dark",
142
+ "new_images": None,
143
+ "new_attachments": None,
144
+ "dataframe_format": "json",
145
+ "selectedModels": [self.model] # Choose the model here
146
+ }
147
+
148
+ def for_stream():
149
+ response = self.session.post(
150
+ self.chat_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
151
+ )
152
+
153
+ if not response.ok:
154
+ raise exceptions.FailedToGenerateResponseError(
155
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
156
+ )
157
+ streaming_response = ""
158
+ for line in response.iter_lines(decode_unicode=True):
159
+ if line:
160
+ try:
161
+ json_line = json.loads(line)
162
+ content = json_line['content']
163
+ streaming_response += content
164
+ yield content if raw else dict(text=content)
165
+ except:
166
+ continue
167
+ self.last_response.update(dict(text=streaming_response))
168
+ self.conversation.update_chat_history(
169
+ prompt, self.get_message(self.last_response)
170
+ )
171
+
172
+ def for_non_stream():
173
+
174
+ for _ in for_stream():
175
+ pass
176
+ return self.last_response
177
+
178
+
179
+ return for_stream() if stream else for_non_stream()
180
+ def chat(
181
+ self,
182
+ prompt: str,
183
+ stream: bool = False,
184
+ optimizer: str = None,
185
+ conversationally: bool = False,
186
+ ) -> str:
187
+ """Generate response `str`
188
+ Args:
189
+ prompt (str): Prompt to be send.
190
+ stream (bool, optional): Flag for streaming response. Defaults to False.
191
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
192
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
193
+ Returns:
194
+ str: Response generated
195
+ """
196
+
197
+ def for_stream():
198
+ for response in self.ask(
199
+ prompt, True, optimizer=optimizer, conversationally=conversationally
200
+ ):
201
+ yield self.get_message(response)
202
+
203
+ def for_non_stream():
204
+ return self.get_message(
205
+ self.ask(
206
+ prompt,
207
+ False,
208
+ optimizer=optimizer,
209
+ conversationally=conversationally,
210
+ )
211
+ )
212
+
213
+ return for_stream() if stream else for_non_stream()
214
+
215
+ def get_message(self, response: dict) -> str:
216
+ assert isinstance(response, dict), "Response should be of dict data-type only"
217
+ return response["text"]
218
+ if __name__ == '__main__':
219
+ from rich import print
220
+ ai = Julius(api_key="",timeout=5000)
221
+ response = ai.chat("write a poem about AI", stream=True)
222
+ for chunk in response:
223
+ print(chunk, end="", flush=True)
@@ -0,0 +1,170 @@
1
+ import requests
2
+ import re
3
+ from typing import Optional, Union, Any, Dict, Generator
4
+ from uuid import uuid4
5
+
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+ class KOALA(Provider):
11
+ """
12
+ A class to interact with the Koala.sh API, X0GPT-style, without sanitize_stream.
13
+ """
14
+ AVAILABLE_MODELS = [
15
+ "gpt-4.1-mini",
16
+ "gpt-4.1",
17
+ ]
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 600,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ model: str = "gpt-4.1",
31
+ web_search: bool = True,
32
+ ):
33
+ if model not in self.AVAILABLE_MODELS:
34
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
35
+ self.session = requests.Session()
36
+ self.is_conversation = is_conversation
37
+ self.max_tokens_to_sample = max_tokens
38
+ self.api_endpoint = "https://koala.sh/api/gpt/"
39
+ self.timeout = timeout
40
+ self.last_response = {}
41
+ self.model = model
42
+ self.headers = {
43
+ "accept": "text/event-stream",
44
+ "accept-encoding": "gzip, deflate, br, zstd",
45
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
46
+ "content-type": "application/json",
47
+ "dnt": "1",
48
+ "flag-real-time-data": "true" if web_search else "false",
49
+ "origin": "https://koala.sh",
50
+ "referer": "https://koala.sh/chat",
51
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
52
+ }
53
+ self.session.headers.update(self.headers)
54
+ Conversation.intro = (
55
+ AwesomePrompts().get_act(
56
+ act, raise_not_found=True, default=None, case_insensitive=True
57
+ )
58
+ if act
59
+ else intro or Conversation.intro
60
+ )
61
+ self.conversation = Conversation(
62
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
63
+ )
64
+ self.conversation.history_offset = history_offset
65
+ self.session.proxies = proxies
66
+
67
+ @staticmethod
68
+ def _koala_extractor(line: str) -> Optional[str]:
69
+ # Koala returns lines like: data: "Hello" or data: "..."
70
+ match = re.match(r'data:\s*"(.*)"', line)
71
+ if match:
72
+ return match.group(1)
73
+ return None
74
+
75
+ def ask(
76
+ self,
77
+ prompt: str,
78
+ stream: bool = False,
79
+ raw: bool = False,
80
+ optimizer: str = None,
81
+ conversationally: bool = False,
82
+ ) -> Dict[str, Any]:
83
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
+ if optimizer:
85
+ if hasattr(Optimizers, optimizer):
86
+ conversation_prompt = getattr(Optimizers, optimizer)(
87
+ conversation_prompt if conversationally else prompt
88
+ )
89
+ else:
90
+ raise Exception(f"Optimizer is not valid.")
91
+ payload = {
92
+ "input": conversation_prompt,
93
+ "inputHistory": [],
94
+ "outputHistory": [],
95
+ "model": self.model
96
+ }
97
+ def for_stream():
98
+ response = self.session.post(
99
+ self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
100
+ )
101
+ if not response.ok:
102
+ raise exceptions.FailedToGenerateResponseError(
103
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
104
+ )
105
+ streaming_response = ""
106
+ for line in response.iter_lines(decode_unicode=True):
107
+ if not line:
108
+ continue
109
+ # Only process lines starting with data:
110
+ if line.startswith("data:"):
111
+ content = self._koala_extractor(line)
112
+ if content and content.strip():
113
+ streaming_response += content
114
+ yield dict(text=content) if not raw else content
115
+ # Only update chat history if response is not empty
116
+ if streaming_response.strip():
117
+ self.last_response = dict(text=streaming_response)
118
+ self.conversation.update_chat_history(
119
+ prompt, self.get_message(self.last_response)
120
+ )
121
+ def for_non_stream():
122
+ # Use streaming logic to collect the full response
123
+ full_text = ""
124
+ for chunk in for_stream():
125
+ if isinstance(chunk, dict):
126
+ full_text += chunk.get("text", "")
127
+ elif isinstance(chunk, str):
128
+ full_text += chunk
129
+ # Only update chat history if response is not empty
130
+ if full_text.strip():
131
+ self.last_response = dict(text=full_text)
132
+ self.conversation.update_chat_history(
133
+ prompt, self.get_message(self.last_response)
134
+ )
135
+ return self.last_response
136
+ return for_stream() if stream else for_non_stream()
137
+
138
+ def chat(
139
+ self,
140
+ prompt: str,
141
+ stream: bool = False,
142
+ optimizer: str = None,
143
+ conversationally: bool = False,
144
+ ) -> Union[str, Generator[str, None, None]]:
145
+ def for_stream():
146
+ for response in self.ask(
147
+ prompt, True, optimizer=optimizer, conversationally=conversationally
148
+ ):
149
+ yield self.get_message(response)
150
+ def for_non_stream():
151
+ return self.get_message(
152
+ self.ask(
153
+ prompt,
154
+ False,
155
+ optimizer=optimizer,
156
+ conversationally=conversationally,
157
+ )
158
+ )
159
+ return for_stream() if stream else for_non_stream()
160
+
161
+ def get_message(self, response: dict) -> str:
162
+ assert isinstance(response, dict), "Response should be of dict data-type only"
163
+ return response.get("text", "")
164
+
165
+ if __name__ == "__main__":
166
+ from rich import print
167
+ ai = KOALA(timeout=60)
168
+ response = ai.chat("Say 'Hello' in one word", stream=True)
169
+ for chunk in response:
170
+ print(chunk, end="", flush=True)