webscout 8.2.6__py3-none-any.whl → 8.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (292) hide show
  1. webscout/AIutel.py +97 -87
  2. webscout/version.py +1 -1
  3. {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/METADATA +2 -15
  4. webscout-8.2.7.dist-info/RECORD +26 -0
  5. {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
  6. webscout-8.2.7.dist-info/entry_points.txt +3 -0
  7. webscout-8.2.7.dist-info/top_level.txt +1 -0
  8. webscout/Extra/GitToolkit/__init__.py +0 -10
  9. webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
  10. webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
  11. webscout/Extra/GitToolkit/gitapi/user.py +0 -96
  12. webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
  13. webscout/Extra/YTToolkit/YTdownloader.py +0 -957
  14. webscout/Extra/YTToolkit/__init__.py +0 -3
  15. webscout/Extra/YTToolkit/transcriber.py +0 -476
  16. webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
  17. webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
  18. webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
  19. webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
  20. webscout/Extra/YTToolkit/ytapi/https.py +0 -88
  21. webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
  22. webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
  23. webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
  24. webscout/Extra/YTToolkit/ytapi/query.py +0 -40
  25. webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
  26. webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
  27. webscout/Extra/YTToolkit/ytapi/video.py +0 -232
  28. webscout/Extra/__init__.py +0 -7
  29. webscout/Extra/autocoder/__init__.py +0 -9
  30. webscout/Extra/autocoder/autocoder.py +0 -910
  31. webscout/Extra/autocoder/autocoder_utiles.py +0 -332
  32. webscout/Extra/gguf.py +0 -684
  33. webscout/Extra/tempmail/__init__.py +0 -28
  34. webscout/Extra/tempmail/async_utils.py +0 -141
  35. webscout/Extra/tempmail/base.py +0 -161
  36. webscout/Extra/tempmail/cli.py +0 -187
  37. webscout/Extra/tempmail/emailnator.py +0 -84
  38. webscout/Extra/tempmail/mail_tm.py +0 -361
  39. webscout/Extra/tempmail/temp_mail_io.py +0 -292
  40. webscout/Extra/weather.py +0 -194
  41. webscout/Extra/weather_ascii.py +0 -76
  42. webscout/Litlogger/__init__.py +0 -67
  43. webscout/Litlogger/core/__init__.py +0 -6
  44. webscout/Litlogger/core/level.py +0 -23
  45. webscout/Litlogger/core/logger.py +0 -165
  46. webscout/Litlogger/handlers/__init__.py +0 -12
  47. webscout/Litlogger/handlers/console.py +0 -33
  48. webscout/Litlogger/handlers/file.py +0 -143
  49. webscout/Litlogger/handlers/network.py +0 -173
  50. webscout/Litlogger/styles/__init__.py +0 -7
  51. webscout/Litlogger/styles/colors.py +0 -249
  52. webscout/Litlogger/styles/formats.py +0 -458
  53. webscout/Litlogger/styles/text.py +0 -87
  54. webscout/Litlogger/utils/__init__.py +0 -6
  55. webscout/Litlogger/utils/detectors.py +0 -153
  56. webscout/Litlogger/utils/formatters.py +0 -200
  57. webscout/Provider/AI21.py +0 -177
  58. webscout/Provider/AISEARCH/DeepFind.py +0 -250
  59. webscout/Provider/AISEARCH/ISou.py +0 -256
  60. webscout/Provider/AISEARCH/Perplexity.py +0 -359
  61. webscout/Provider/AISEARCH/__init__.py +0 -10
  62. webscout/Provider/AISEARCH/felo_search.py +0 -228
  63. webscout/Provider/AISEARCH/genspark_search.py +0 -208
  64. webscout/Provider/AISEARCH/hika_search.py +0 -198
  65. webscout/Provider/AISEARCH/iask_search.py +0 -436
  66. webscout/Provider/AISEARCH/monica_search.py +0 -246
  67. webscout/Provider/AISEARCH/scira_search.py +0 -322
  68. webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
  69. webscout/Provider/Aitopia.py +0 -316
  70. webscout/Provider/AllenAI.py +0 -447
  71. webscout/Provider/Andi.py +0 -228
  72. webscout/Provider/Blackboxai.py +0 -229
  73. webscout/Provider/ChatGPTClone.py +0 -237
  74. webscout/Provider/ChatGPTGratis.py +0 -194
  75. webscout/Provider/ChatSandbox.py +0 -342
  76. webscout/Provider/Cloudflare.py +0 -325
  77. webscout/Provider/Cohere.py +0 -208
  78. webscout/Provider/Deepinfra.py +0 -338
  79. webscout/Provider/ElectronHub.py +0 -773
  80. webscout/Provider/ExaAI.py +0 -261
  81. webscout/Provider/ExaChat.py +0 -358
  82. webscout/Provider/Free2GPT.py +0 -241
  83. webscout/Provider/GPTWeb.py +0 -249
  84. webscout/Provider/Gemini.py +0 -169
  85. webscout/Provider/GithubChat.py +0 -370
  86. webscout/Provider/GizAI.py +0 -285
  87. webscout/Provider/Glider.py +0 -222
  88. webscout/Provider/Groq.py +0 -801
  89. webscout/Provider/HF_space/__init__.py +0 -0
  90. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  91. webscout/Provider/HeckAI.py +0 -257
  92. webscout/Provider/HuggingFaceChat.py +0 -469
  93. webscout/Provider/Hunyuan.py +0 -283
  94. webscout/Provider/Jadve.py +0 -291
  95. webscout/Provider/Koboldai.py +0 -381
  96. webscout/Provider/LambdaChat.py +0 -411
  97. webscout/Provider/Llama3.py +0 -259
  98. webscout/Provider/MCPCore.py +0 -315
  99. webscout/Provider/Marcus.py +0 -206
  100. webscout/Provider/Nemotron.py +0 -218
  101. webscout/Provider/Netwrck.py +0 -270
  102. webscout/Provider/OLLAMA.py +0 -396
  103. webscout/Provider/OPENAI/__init__.py +0 -28
  104. webscout/Provider/OPENAI/ai4chat.py +0 -286
  105. webscout/Provider/OPENAI/base.py +0 -46
  106. webscout/Provider/OPENAI/c4ai.py +0 -367
  107. webscout/Provider/OPENAI/chatgpt.py +0 -549
  108. webscout/Provider/OPENAI/chatgptclone.py +0 -481
  109. webscout/Provider/OPENAI/deepinfra.py +0 -309
  110. webscout/Provider/OPENAI/e2b.py +0 -1350
  111. webscout/Provider/OPENAI/exaai.py +0 -404
  112. webscout/Provider/OPENAI/exachat.py +0 -437
  113. webscout/Provider/OPENAI/freeaichat.py +0 -352
  114. webscout/Provider/OPENAI/glider.py +0 -316
  115. webscout/Provider/OPENAI/groq.py +0 -354
  116. webscout/Provider/OPENAI/heckai.py +0 -341
  117. webscout/Provider/OPENAI/llmchatco.py +0 -327
  118. webscout/Provider/OPENAI/mcpcore.py +0 -376
  119. webscout/Provider/OPENAI/multichat.py +0 -368
  120. webscout/Provider/OPENAI/netwrck.py +0 -350
  121. webscout/Provider/OPENAI/opkfc.py +0 -488
  122. webscout/Provider/OPENAI/scirachat.py +0 -462
  123. webscout/Provider/OPENAI/sonus.py +0 -294
  124. webscout/Provider/OPENAI/standardinput.py +0 -425
  125. webscout/Provider/OPENAI/textpollinations.py +0 -329
  126. webscout/Provider/OPENAI/toolbaz.py +0 -406
  127. webscout/Provider/OPENAI/typegpt.py +0 -346
  128. webscout/Provider/OPENAI/uncovrAI.py +0 -455
  129. webscout/Provider/OPENAI/utils.py +0 -211
  130. webscout/Provider/OPENAI/venice.py +0 -413
  131. webscout/Provider/OPENAI/wisecat.py +0 -381
  132. webscout/Provider/OPENAI/writecream.py +0 -156
  133. webscout/Provider/OPENAI/x0gpt.py +0 -371
  134. webscout/Provider/OPENAI/yep.py +0 -327
  135. webscout/Provider/OpenGPT.py +0 -209
  136. webscout/Provider/Openai.py +0 -496
  137. webscout/Provider/PI.py +0 -429
  138. webscout/Provider/Perplexitylabs.py +0 -415
  139. webscout/Provider/QwenLM.py +0 -254
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/StandardInput.py +0 -290
  142. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  143. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  144. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  145. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  146. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  147. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  148. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  149. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  150. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  151. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  152. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  153. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  154. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  155. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  156. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  157. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  158. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  159. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  160. webscout/Provider/TTI/__init__.py +0 -12
  161. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  162. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  163. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  164. webscout/Provider/TTI/artbit/__init__.py +0 -22
  165. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  166. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  167. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  168. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  169. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  170. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  171. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  172. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  173. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  174. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  175. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  176. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  177. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  178. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/TTS/__init__.py +0 -8
  183. webscout/Provider/TTS/base.py +0 -159
  184. webscout/Provider/TTS/deepgram.py +0 -156
  185. webscout/Provider/TTS/elevenlabs.py +0 -111
  186. webscout/Provider/TTS/gesserit.py +0 -128
  187. webscout/Provider/TTS/murfai.py +0 -113
  188. webscout/Provider/TTS/parler.py +0 -111
  189. webscout/Provider/TTS/speechma.py +0 -180
  190. webscout/Provider/TTS/streamElements.py +0 -333
  191. webscout/Provider/TTS/utils.py +0 -280
  192. webscout/Provider/TeachAnything.py +0 -233
  193. webscout/Provider/TextPollinationsAI.py +0 -306
  194. webscout/Provider/TwoAI.py +0 -280
  195. webscout/Provider/TypliAI.py +0 -305
  196. webscout/Provider/Venice.py +0 -258
  197. webscout/Provider/VercelAI.py +0 -253
  198. webscout/Provider/WiseCat.py +0 -233
  199. webscout/Provider/WrDoChat.py +0 -370
  200. webscout/Provider/Writecream.py +0 -237
  201. webscout/Provider/WritingMate.py +0 -269
  202. webscout/Provider/Youchat.py +0 -330
  203. webscout/Provider/__init__.py +0 -178
  204. webscout/Provider/ai4chat.py +0 -203
  205. webscout/Provider/aimathgpt.py +0 -189
  206. webscout/Provider/akashgpt.py +0 -335
  207. webscout/Provider/asksteve.py +0 -212
  208. webscout/Provider/bagoodex.py +0 -145
  209. webscout/Provider/cerebras.py +0 -288
  210. webscout/Provider/chatglm.py +0 -215
  211. webscout/Provider/cleeai.py +0 -213
  212. webscout/Provider/copilot.py +0 -425
  213. webscout/Provider/elmo.py +0 -283
  214. webscout/Provider/freeaichat.py +0 -285
  215. webscout/Provider/geminiapi.py +0 -208
  216. webscout/Provider/geminiprorealtime.py +0 -160
  217. webscout/Provider/granite.py +0 -235
  218. webscout/Provider/hermes.py +0 -266
  219. webscout/Provider/julius.py +0 -223
  220. webscout/Provider/koala.py +0 -268
  221. webscout/Provider/learnfastai.py +0 -325
  222. webscout/Provider/llama3mitril.py +0 -215
  223. webscout/Provider/llmchat.py +0 -255
  224. webscout/Provider/llmchatco.py +0 -306
  225. webscout/Provider/meta.py +0 -798
  226. webscout/Provider/multichat.py +0 -364
  227. webscout/Provider/scira_chat.py +0 -297
  228. webscout/Provider/scnet.py +0 -243
  229. webscout/Provider/searchchat.py +0 -292
  230. webscout/Provider/sonus.py +0 -258
  231. webscout/Provider/talkai.py +0 -194
  232. webscout/Provider/toolbaz.py +0 -353
  233. webscout/Provider/turboseek.py +0 -266
  234. webscout/Provider/typefully.py +0 -330
  235. webscout/Provider/typegpt.py +0 -289
  236. webscout/Provider/uncovr.py +0 -368
  237. webscout/Provider/x0gpt.py +0 -299
  238. webscout/Provider/yep.py +0 -389
  239. webscout/litagent/__init__.py +0 -29
  240. webscout/litagent/agent.py +0 -455
  241. webscout/litagent/constants.py +0 -60
  242. webscout/litprinter/__init__.py +0 -59
  243. webscout/scout/__init__.py +0 -8
  244. webscout/scout/core/__init__.py +0 -7
  245. webscout/scout/core/crawler.py +0 -140
  246. webscout/scout/core/scout.py +0 -568
  247. webscout/scout/core/search_result.py +0 -96
  248. webscout/scout/core/text_analyzer.py +0 -63
  249. webscout/scout/core/text_utils.py +0 -277
  250. webscout/scout/core/web_analyzer.py +0 -52
  251. webscout/scout/core.py +0 -881
  252. webscout/scout/element.py +0 -460
  253. webscout/scout/parsers/__init__.py +0 -69
  254. webscout/scout/parsers/html5lib_parser.py +0 -172
  255. webscout/scout/parsers/html_parser.py +0 -236
  256. webscout/scout/parsers/lxml_parser.py +0 -178
  257. webscout/scout/utils.py +0 -37
  258. webscout/swiftcli/__init__.py +0 -95
  259. webscout/swiftcli/core/__init__.py +0 -7
  260. webscout/swiftcli/core/cli.py +0 -297
  261. webscout/swiftcli/core/context.py +0 -104
  262. webscout/swiftcli/core/group.py +0 -241
  263. webscout/swiftcli/decorators/__init__.py +0 -28
  264. webscout/swiftcli/decorators/command.py +0 -221
  265. webscout/swiftcli/decorators/options.py +0 -220
  266. webscout/swiftcli/decorators/output.py +0 -252
  267. webscout/swiftcli/exceptions.py +0 -21
  268. webscout/swiftcli/plugins/__init__.py +0 -9
  269. webscout/swiftcli/plugins/base.py +0 -135
  270. webscout/swiftcli/plugins/manager.py +0 -262
  271. webscout/swiftcli/utils/__init__.py +0 -59
  272. webscout/swiftcli/utils/formatting.py +0 -252
  273. webscout/swiftcli/utils/parsing.py +0 -267
  274. webscout/zeroart/__init__.py +0 -55
  275. webscout/zeroart/base.py +0 -60
  276. webscout/zeroart/effects.py +0 -99
  277. webscout/zeroart/fonts.py +0 -816
  278. webscout-8.2.6.dist-info/RECORD +0 -307
  279. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  280. webscout-8.2.6.dist-info/top_level.txt +0 -2
  281. webstoken/__init__.py +0 -30
  282. webstoken/classifier.py +0 -189
  283. webstoken/keywords.py +0 -216
  284. webstoken/language.py +0 -128
  285. webstoken/ner.py +0 -164
  286. webstoken/normalizer.py +0 -35
  287. webstoken/processor.py +0 -77
  288. webstoken/sentiment.py +0 -206
  289. webstoken/stemmer.py +0 -73
  290. webstoken/tagger.py +0 -60
  291. webstoken/tokenizer.py +0 -158
  292. {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,215 +0,0 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- from typing import Union, Any, Dict, Generator
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
8
- from webscout.AIbase import Provider
9
- from webscout import exceptions
10
-
11
-
12
- class Llama3Mitril(Provider):
13
- """
14
- A class to interact with the Llama3 Mitril API. Implements the WebScout provider interface.
15
- """
16
-
17
- def __init__(
18
- self,
19
- is_conversation: bool = True,
20
- max_tokens: int = 2048,
21
- timeout: int = 30,
22
- intro: str = None,
23
- filepath: str = None,
24
- update_file: bool = True,
25
- proxies: dict = {},
26
- history_offset: int = 10250,
27
- act: str = None,
28
- system_prompt: str = "You are a helpful, respectful and honest assistant.",
29
- temperature: float = 0.8,
30
- ):
31
- """Initializes the Llama3Mitril API."""
32
- self.session = Session()
33
- self.is_conversation = is_conversation
34
- self.max_tokens = max_tokens
35
- self.temperature = temperature
36
- self.api_endpoint = "https://llama3.mithrilsecurity.io/generate_stream"
37
- self.timeout = timeout
38
- self.last_response = {}
39
- self.system_prompt = system_prompt
40
- self.headers = {
41
- "Content-Type": "application/json",
42
- "DNT": "1",
43
- }
44
- self.__available_optimizers = (
45
- method
46
- for method in dir(Optimizers)
47
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
48
- )
49
- Conversation.intro = (
50
- AwesomePrompts().get_act(
51
- act, raise_not_found=True, default=None, case_insensitive=True
52
- )
53
- if act
54
- else intro or Conversation.intro
55
- )
56
- self.conversation = Conversation(
57
- is_conversation, self.max_tokens, filepath, update_file
58
- )
59
- self.conversation.history_offset = history_offset
60
- # Update curl_cffi session headers and proxies
61
- self.session.headers.update(self.headers)
62
- self.session.proxies = proxies
63
-
64
- def _format_prompt(self, prompt: str) -> str:
65
- """Format the prompt for the Llama3 model"""
66
- return (
67
- f"<|begin_of_text|>"
68
- f"<|start_header_id|>system<|end_header_id|>{self.system_prompt}<|eot_id|>"
69
- f"<|start_header_id|>user<|end_header_id|>{prompt}<|eot_id|>"
70
- f"<|start_header_id|>assistant<|end_header_id|><|eot_id|>"
71
- f"<|start_header_id|>assistant<|end_header_id|>"
72
- )
73
-
74
- def ask(
75
- self,
76
- prompt: str,
77
- stream: bool = True, # API supports streaming
78
- raw: bool = False,
79
- optimizer: str = None,
80
- conversationally: bool = False,
81
- ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
82
- """Sends a prompt to the Llama3 Mitril API and returns the response."""
83
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
- if optimizer:
85
- if optimizer in self.__available_optimizers:
86
- conversation_prompt = getattr(Optimizers, optimizer)(
87
- conversation_prompt if conversationally else prompt
88
- )
89
- else:
90
- raise exceptions.FailedToGenerateResponseError(
91
- f"Optimizer is not one of {self.__available_optimizers}"
92
- )
93
-
94
- data = {
95
- "inputs": self._format_prompt(conversation_prompt),
96
- "parameters": {
97
- "max_new_tokens": self.max_tokens,
98
- "temperature": self.temperature,
99
- "return_full_text": False
100
- }
101
- }
102
-
103
- def for_stream():
104
- streaming_response = "" # Initialize outside try block
105
- try:
106
- # Use curl_cffi session post with impersonate
107
- response = self.session.post(
108
- self.api_endpoint,
109
- # headers are set on the session
110
- json=data,
111
- stream=True,
112
- timeout=self.timeout,
113
- # proxies are set on the session
114
- impersonate="chrome110" # Use a common impersonation profile
115
- )
116
- response.raise_for_status() # Check for HTTP errors
117
-
118
- # Iterate over bytes and decode manually
119
- for line_bytes in response.iter_lines():
120
- if line_bytes:
121
- try:
122
- line = line_bytes.decode('utf-8')
123
- if line.startswith('data: '):
124
- chunk_str = line.split('data: ', 1)[1]
125
- chunk = json.loads(chunk_str)
126
- if token_text := chunk.get('token', {}).get('text'):
127
- if '<|eot_id|>' not in token_text:
128
- streaming_response += token_text
129
- resp = {"text": token_text}
130
- # Yield dict or raw string chunk
131
- yield resp if not raw else token_text
132
- except (json.JSONDecodeError, IndexError, UnicodeDecodeError) as e:
133
- # Ignore errors in parsing specific lines
134
- continue
135
-
136
- # Update history after stream finishes
137
- self.last_response = {"text": streaming_response}
138
- self.conversation.update_chat_history(
139
- prompt, streaming_response
140
- )
141
-
142
- except CurlError as e: # Catch CurlError
143
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
144
- except Exception as e: # Catch other potential exceptions (like HTTPError)
145
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
146
- raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
147
-
148
- def for_non_stream():
149
- # Aggregate the stream using the updated for_stream logic
150
- full_response_text = ""
151
- try:
152
- # Ensure raw=False so for_stream yields dicts
153
- for chunk_data in for_stream():
154
- if isinstance(chunk_data, dict) and "text" in chunk_data:
155
- full_response_text += chunk_data["text"]
156
- # Handle raw string case if raw=True was passed
157
- elif raw and isinstance(chunk_data, str):
158
- full_response_text += chunk_data
159
- except Exception as e:
160
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
161
- if not full_response_text:
162
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
163
-
164
- # last_response and history are updated within for_stream
165
- # Return the final aggregated response dict or raw string
166
- return full_response_text if raw else self.last_response
167
-
168
- return for_stream() if stream else for_non_stream()
169
-
170
- def chat(
171
- self,
172
- prompt: str,
173
- stream: bool = True, # Default to True as API supports it
174
- optimizer: str = None,
175
- conversationally: bool = False,
176
- ) -> Union[str, Generator[str, None, None]]:
177
- """Generates a response from the Llama3 Mitril API."""
178
-
179
- def for_stream_chat():
180
- # ask() yields dicts or strings when streaming
181
- gen = self.ask(
182
- prompt, stream=True, raw=False, # Ensure ask yields dicts
183
- optimizer=optimizer, conversationally=conversationally
184
- )
185
- for response_dict in gen:
186
- yield self.get_message(response_dict) # get_message expects dict
187
-
188
- def for_non_stream_chat():
189
- # ask() returns dict or str when not streaming
190
- response_data = self.ask(
191
- prompt, stream=False, raw=False, # Ensure ask returns dict
192
- optimizer=optimizer, conversationally=conversationally
193
- )
194
- return self.get_message(response_data) # get_message expects dict
195
-
196
- return for_stream_chat() if stream else for_non_stream_chat()
197
-
198
- def get_message(self, response: Dict[str, Any]) -> str:
199
- """Extracts the message from the API response."""
200
- assert isinstance(response, dict), "Response should be of dict data-type only"
201
- return response["text"]
202
-
203
-
204
- if __name__ == "__main__":
205
- # Ensure curl_cffi is installed
206
- from rich import print
207
-
208
- ai = Llama3Mitril(
209
- max_tokens=2048,
210
- temperature=0.8,
211
- timeout=30
212
- )
213
-
214
- for response in ai.chat("Hello", stream=True):
215
- print(response, end="", flush=True)
@@ -1,255 +0,0 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- from typing import Union, Any, Dict, Optional, Generator, List
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts
9
- from webscout.AIbase import Provider
10
- from webscout import exceptions
11
- from webscout.litagent import LitAgent as Lit
12
-
13
- class LLMChat(Provider):
14
- """
15
- A class to interact with the LLMChat API
16
- """
17
-
18
- AVAILABLE_MODELS = [
19
- "@cf/meta/llama-3.1-70b-instruct",
20
- "@cf/meta/llama-3.1-8b-instruct",
21
- "@cf/meta/llama-3.2-3b-instruct",
22
- "@cf/meta/llama-3.2-1b-instruct",
23
- "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
24
- "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
25
- ]
26
-
27
- def __init__(
28
- self,
29
- is_conversation: bool = True,
30
- max_tokens: int = 2048,
31
- timeout: int = 30,
32
- intro: str = None,
33
- filepath: str = None,
34
- update_file: bool = True,
35
- proxies: dict = {},
36
- history_offset: int = 10250,
37
- act: str = None,
38
- model: str = "@cf/meta/llama-3.1-70b-instruct",
39
- system_prompt: str = "You are a helpful assistant."
40
- ):
41
- """
42
- Initializes the LLMChat API with given parameters.
43
- """
44
-
45
- if model not in self.AVAILABLE_MODELS:
46
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
47
-
48
- # Initialize curl_cffi Session
49
- self.session = Session()
50
- self.is_conversation = is_conversation
51
- self.max_tokens_to_sample = max_tokens
52
- self.api_endpoint = "https://llmchat.in/inference/stream"
53
- self.timeout = timeout
54
- self.last_response = {}
55
- self.model = model
56
- self.system_prompt = system_prompt
57
-
58
- self.headers = {
59
- "Content-Type": "application/json",
60
- "Accept": "*/*",
61
- "Origin": "https://llmchat.in",
62
- "Referer": "https://llmchat.in/"
63
- }
64
-
65
- self.__available_optimizers = (
66
- method
67
- for method in dir(Optimizers)
68
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
69
- )
70
-
71
- Conversation.intro = (
72
- AwesomePrompts().get_act(
73
- act, raise_not_found=True, default=None, case_insensitive=True
74
- )
75
- if act
76
- else intro or Conversation.intro
77
- )
78
-
79
- self.conversation = Conversation(
80
- is_conversation, self.max_tokens_to_sample, filepath, update_file
81
- )
82
- self.conversation.history_offset = history_offset
83
-
84
- # Update curl_cffi session headers and proxies
85
- self.session.headers.update(self.headers)
86
- self.session.proxies = proxies # Assign proxies directly
87
-
88
- def ask(
89
- self,
90
- prompt: str,
91
- stream: bool = False,
92
- raw: bool = False,
93
- optimizer: str = None,
94
- conversationally: bool = False,
95
- ) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
96
- """Chat with LLMChat with logging capabilities"""
97
-
98
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
99
- if optimizer:
100
- if optimizer in self.__available_optimizers:
101
- conversation_prompt = getattr(Optimizers, optimizer)(
102
- conversation_prompt if conversationally else prompt
103
- )
104
- else:
105
- raise exceptions.FailedToGenerateResponseError(
106
- f"Optimizer is not one of {self.__available_optimizers}"
107
- )
108
-
109
- url = f"{self.api_endpoint}?model={self.model}"
110
- payload = {
111
- "messages": [
112
- {"role": "system", "content": self.system_prompt},
113
- {"role": "user", "content": conversation_prompt}
114
- ],
115
- "max_tokens": self.max_tokens_to_sample,
116
- "stream": True # API seems to always stream based on endpoint name
117
- }
118
-
119
- def for_stream():
120
- full_response = "" # Initialize outside try block
121
- try:
122
- # Use curl_cffi session post with impersonate
123
- response = self.session.post(
124
- url,
125
- json=payload,
126
- stream=True,
127
- timeout=self.timeout,
128
- impersonate="chrome110" # Use a common impersonation profile
129
- )
130
- response.raise_for_status() # Check for HTTP errors
131
-
132
- # Iterate over bytes and decode manually
133
- for line_bytes in response.iter_lines():
134
- if line_bytes:
135
- try:
136
- line = line_bytes.decode('utf-8')
137
- if line.startswith('data: '):
138
- data_str = line[6:]
139
- if data_str == '[DONE]':
140
- break
141
- try:
142
- data = json.loads(data_str)
143
- if data.get('response'):
144
- response_text = data['response']
145
- full_response += response_text
146
- resp = dict(text=response_text)
147
- # Yield dict or raw string chunk
148
- yield resp if not raw else response_text
149
- except json.JSONDecodeError:
150
- continue # Ignore invalid JSON data
151
- except UnicodeDecodeError:
152
- continue # Ignore decoding errors
153
-
154
- # Update history after stream finishes
155
- self.last_response = dict(text=full_response)
156
- self.conversation.update_chat_history(
157
- prompt, full_response
158
- )
159
-
160
- except CurlError as e: # Catch CurlError
161
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
162
- except Exception as e: # Catch other potential exceptions (like HTTPError)
163
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
164
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
165
-
166
- def for_non_stream():
167
- # Aggregate the stream using the updated for_stream logic
168
- full_response_text = ""
169
- try:
170
- # Ensure raw=False so for_stream yields dicts
171
- for chunk_data in for_stream():
172
- if isinstance(chunk_data, dict) and "text" in chunk_data:
173
- full_response_text += chunk_data["text"]
174
- # Handle raw string case if raw=True was passed
175
- elif raw and isinstance(chunk_data, str):
176
- full_response_text += chunk_data
177
- except Exception as e:
178
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
179
- if not full_response_text:
180
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
181
-
182
- # last_response and history are updated within for_stream
183
- # Return the final aggregated response dict or raw string
184
- return full_response_text if raw else self.last_response
185
-
186
-
187
- # Since the API endpoint suggests streaming, always call the stream generator.
188
- # The non-stream wrapper will handle aggregation if stream=False.
189
- return for_stream() if stream else for_non_stream()
190
-
191
- def chat(
192
- self,
193
- prompt: str,
194
- stream: bool = False,
195
- optimizer: str = None,
196
- conversationally: bool = False,
197
- ) -> Union[str, Generator[str, None, None]]:
198
- """Generate response with logging capabilities"""
199
-
200
- def for_stream_chat():
201
- # ask() yields dicts or strings when streaming
202
- gen = self.ask(
203
- prompt, stream=True, raw=False, # Ensure ask yields dicts
204
- optimizer=optimizer, conversationally=conversationally
205
- )
206
- for response_dict in gen:
207
- yield self.get_message(response_dict) # get_message expects dict
208
-
209
- def for_non_stream_chat():
210
- # ask() returns dict or str when not streaming
211
- response_data = self.ask(
212
- prompt,
213
- stream=False,
214
- raw=False, # Ensure ask returns dict
215
- optimizer=optimizer,
216
- conversationally=conversationally,
217
- )
218
- return self.get_message(response_data) # get_message expects dict
219
-
220
- return for_stream_chat() if stream else for_non_stream_chat()
221
-
222
- def get_message(self, response: Dict[str, Any]) -> str:
223
- """Retrieves message from response with validation"""
224
- assert isinstance(response, dict), "Response should be of dict data-type only"
225
- return response["text"]
226
-
227
- if __name__ == "__main__":
228
- # Ensure curl_cffi is installed
229
- print("-" * 80)
230
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
231
- print("-" * 80)
232
-
233
- # Test all available models
234
- working = 0
235
- total = len(LLMChat.AVAILABLE_MODELS)
236
-
237
- for model in LLMChat.AVAILABLE_MODELS:
238
- try:
239
- test_ai = LLMChat(model=model, timeout=60)
240
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
241
- response_text = ""
242
- for chunk in response:
243
- response_text += chunk
244
- print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
245
-
246
- if response_text and len(response_text.strip()) > 0:
247
- status = "✓"
248
- # Truncate response if too long
249
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
250
- else:
251
- status = "✗"
252
- display_text = "Empty or invalid response"
253
- print(f"\r{model:<50} {status:<10} {display_text}")
254
- except Exception as e:
255
- print(f"\r{model:<50} {'✗':<10} {str(e)}")