webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
webscout/Extra/gguf.py ADDED
@@ -0,0 +1,684 @@
1
+ """
2
+ Convert Hugging Face models to GGUF format with advanced features.
3
+
4
+ For detailed documentation, see: webscout/Extra/gguf.md
5
+
6
+ >>> python -m webscout.Extra.gguf convert -m "OEvortex/HelpingAI-Lite-1.5T" -q "q4_k_m,q5_k_m"
7
+ >>> # With upload options:
8
+ >>> python -m webscout.Extra.gguf convert -m "your-model" -u "username" -t "token" -q "q4_k_m"
9
+ >>> # With imatrix quantization:
10
+ >>> python -m webscout.Extra.gguf convert -m "your-model" -i -q "iq4_nl" -t "train_data.txt"
11
+ >>> # With model splitting:
12
+ >>> python -m webscout.Extra.gguf convert -m "your-model" -s --split-max-tensors 256
13
+ """
14
+
15
+ import subprocess
16
+ import os
17
+ import sys
18
+ import signal
19
+ import tempfile
20
+ import platform
21
+ from pathlib import Path
22
+ from typing import Optional, Dict, List, Any, Union, Literal, TypedDict, Set
23
+
24
+ from huggingface_hub import HfApi
25
+ from webscout.zeroart import figlet_format
26
+ from rich.console import Console
27
+ from rich.panel import Panel
28
+ from rich.table import Table
29
+ from ..swiftcli import CLI, option
30
+
31
+ console = Console()
32
+
33
+ class ConversionError(Exception):
34
+ """Custom exception for when things don't go as planned! ⚠️"""
35
+ pass
36
+
37
+ class QuantizationMethod(TypedDict):
38
+ """Type definition for quantization method descriptions."""
39
+ description: str
40
+
41
+ class ModelConverter:
42
+ """Handles the conversion of Hugging Face models to GGUF format."""
43
+
44
+ VALID_METHODS: Dict[str, str] = {
45
+ "fp16": "16-bit floating point - maximum accuracy, largest size",
46
+ "q2_k": "2-bit quantization (smallest size, lowest accuracy)",
47
+ "q3_k_l": "3-bit quantization (large) - balanced for size/accuracy",
48
+ "q3_k_m": "3-bit quantization (medium) - good balance for most use cases",
49
+ "q3_k_s": "3-bit quantization (small) - optimized for speed",
50
+ "q4_0": "4-bit quantization (version 0) - standard 4-bit compression",
51
+ "q4_1": "4-bit quantization (version 1) - improved accuracy over q4_0",
52
+ "q4_k_m": "4-bit quantization (medium) - balanced for most models",
53
+ "q4_k_s": "4-bit quantization (small) - optimized for speed",
54
+ "q5_0": "5-bit quantization (version 0) - high accuracy, larger size",
55
+ "q5_1": "5-bit quantization (version 1) - improved accuracy over q5_0",
56
+ "q5_k_m": "5-bit quantization (medium) - best balance for quality/size",
57
+ "q5_k_s": "5-bit quantization (small) - optimized for speed",
58
+ "q6_k": "6-bit quantization - highest accuracy, largest size",
59
+ "q8_0": "8-bit quantization - maximum accuracy, largest size"
60
+ }
61
+
62
+ VALID_IMATRIX_METHODS: Dict[str, str] = {
63
+ "iq3_m": "3-bit imatrix quantization (medium) - balanced importance-based",
64
+ "iq3_xxs": "3-bit imatrix quantization (extra extra small) - maximum compression",
65
+ "q4_k_m": "4-bit imatrix quantization (medium) - balanced importance-based",
66
+ "q4_k_s": "4-bit imatrix quantization (small) - optimized for speed",
67
+ "iq4_nl": "4-bit imatrix quantization (non-linear) - best accuracy for 4-bit",
68
+ "iq4_xs": "4-bit imatrix quantization (extra small) - maximum compression",
69
+ "q5_k_m": "5-bit imatrix quantization (medium) - balanced importance-based",
70
+ "q5_k_s": "5-bit imatrix quantization (small) - optimized for speed"
71
+ }
72
+
73
+ def __init__(
74
+ self,
75
+ model_id: str,
76
+ username: Optional[str] = None,
77
+ token: Optional[str] = None,
78
+ quantization_methods: str = "q4_k_m",
79
+ use_imatrix: bool = False,
80
+ train_data_file: Optional[str] = None,
81
+ split_model: bool = False,
82
+ split_max_tensors: int = 256,
83
+ split_max_size: Optional[str] = None
84
+ ) -> None:
85
+ self.model_id = model_id
86
+ self.username = username
87
+ self.token = token
88
+ self.quantization_methods = quantization_methods.split(',')
89
+ self.model_name = model_id.split('/')[-1]
90
+ self.workspace = Path(os.getcwd())
91
+ self.use_imatrix = use_imatrix
92
+ self.train_data_file = train_data_file
93
+ self.split_model = split_model
94
+ self.split_max_tensors = split_max_tensors
95
+ self.split_max_size = split_max_size
96
+ self.fp16_only = "fp16" in self.quantization_methods and len(self.quantization_methods) == 1
97
+
98
+ def validate_inputs(self) -> None:
99
+ """Validates all input parameters."""
100
+ if not '/' in self.model_id:
101
+ raise ValueError("Invalid model ID format. Expected format: 'organization/model-name'")
102
+
103
+ if self.use_imatrix:
104
+ invalid_methods = [m for m in self.quantization_methods if m not in self.VALID_IMATRIX_METHODS]
105
+ if invalid_methods:
106
+ raise ValueError(
107
+ f"Invalid imatrix quantization methods: {', '.join(invalid_methods)}.\n"
108
+ f"Valid methods are: {', '.join(self.VALID_IMATRIX_METHODS.keys())}"
109
+ )
110
+ if not self.train_data_file and not os.path.exists("llama.cpp/groups_merged.txt"):
111
+ raise ValueError("Training data file is required for imatrix quantization")
112
+ else:
113
+ invalid_methods = [m for m in self.quantization_methods if m not in self.VALID_METHODS]
114
+ if invalid_methods:
115
+ raise ValueError(
116
+ f"Invalid quantization methods: {', '.join(invalid_methods)}.\n"
117
+ f"Valid methods are: {', '.join(self.VALID_METHODS.keys())}"
118
+ )
119
+
120
+ if bool(self.username) != bool(self.token):
121
+ raise ValueError("Both username and token must be provided for upload, or neither.")
122
+
123
+ if self.split_model and self.split_max_size:
124
+ try:
125
+ size = int(self.split_max_size[:-1])
126
+ unit = self.split_max_size[-1].upper()
127
+ if unit not in ['M', 'G']:
128
+ raise ValueError("Split max size must end with M or G")
129
+ except ValueError:
130
+ raise ValueError("Invalid split max size format. Use format like '256M' or '5G'")
131
+
132
+ @staticmethod
133
+ def check_dependencies() -> Dict[str, bool]:
134
+ """Check if all required dependencies are installed."""
135
+ dependencies: Dict[str, str] = {
136
+ 'git': 'Git version control',
137
+ 'pip3': 'Python package installer',
138
+ 'huggingface-cli': 'Hugging Face CLI',
139
+ 'cmake': 'CMake build system',
140
+ 'ninja': 'Ninja build system (optional)'
141
+ }
142
+
143
+ status: Dict[str, bool] = {}
144
+ for cmd, desc in dependencies.items():
145
+ status[cmd] = subprocess.run(['which', cmd], capture_output=True, text=True).returncode == 0
146
+
147
+ return status
148
+
149
+ def detect_hardware(self) -> Dict[str, bool]:
150
+ """Detect available hardware acceleration."""
151
+ hardware: Dict[str, bool] = {
152
+ 'cuda': False,
153
+ 'metal': False,
154
+ 'opencl': False,
155
+ 'vulkan': False,
156
+ 'rocm': False
157
+ }
158
+
159
+ # Check CUDA
160
+ try:
161
+ if subprocess.run(['nvcc', '--version'], capture_output=True).returncode == 0:
162
+ hardware['cuda'] = True
163
+ except FileNotFoundError:
164
+ pass
165
+
166
+ # Check Metal (macOS)
167
+ if platform.system() == 'Darwin':
168
+ try:
169
+ if subprocess.run(['xcrun', '--show-sdk-path'], capture_output=True).returncode == 0:
170
+ hardware['metal'] = True
171
+ except FileNotFoundError:
172
+ pass
173
+
174
+ # Check OpenCL
175
+ try:
176
+ if subprocess.run(['clinfo'], capture_output=True).returncode == 0:
177
+ hardware['opencl'] = True
178
+ except FileNotFoundError:
179
+ pass
180
+
181
+ # Check Vulkan
182
+ try:
183
+ if subprocess.run(['vulkaninfo'], capture_output=True).returncode == 0:
184
+ hardware['vulkan'] = True
185
+ except FileNotFoundError:
186
+ pass
187
+
188
+ # Check ROCm
189
+ try:
190
+ if subprocess.run(['rocm-smi'], capture_output=True).returncode == 0:
191
+ hardware['rocm'] = True
192
+ except FileNotFoundError:
193
+ pass
194
+
195
+ return hardware
196
+
197
+ def setup_llama_cpp(self) -> None:
198
+ """Sets up and builds llama.cpp repository."""
199
+ llama_path = self.workspace / "llama.cpp"
200
+
201
+ with console.status("[bold green]Setting up llama.cpp...") as status:
202
+ # Clone llama.cpp if not exists
203
+ if not llama_path.exists():
204
+ subprocess.run(['git', 'clone', 'https://github.com/ggerganov/llama.cpp'], check=True)
205
+
206
+ os.chdir(llama_path)
207
+
208
+ # Check if we're in a Nix environment
209
+ is_nix = platform.system() == "Linux" and os.path.exists("/nix/store")
210
+
211
+ if is_nix:
212
+ console.print("[yellow]Detected Nix environment. Using system Python packages...")
213
+ # In Nix, we need to use the system Python packages
214
+ try:
215
+ # Try to import required packages to check if they're available
216
+ import torch # type: ignore
217
+ import numpy # type: ignore
218
+ import sentencepiece # type: ignore
219
+ import transformers # type: ignore
220
+ console.print("[green]Required Python packages are already installed.")
221
+ except ImportError as e:
222
+ console.print("[red]Missing required Python packages in Nix environment.")
223
+ console.print("[yellow]Please install them using:")
224
+ console.print("nix-shell -p python3Packages.torch python3Packages.numpy python3Packages.sentencepiece python3Packages.transformers")
225
+ raise ConversionError("Missing required Python packages in Nix environment")
226
+ else:
227
+ # In non-Nix environments, install requirements
228
+ try:
229
+ subprocess.run(['pip3', 'install', '-r', 'requirements.txt'], check=True)
230
+ except subprocess.CalledProcessError as e:
231
+ if "externally-managed-environment" in str(e):
232
+ console.print("[yellow]Detected externally managed Python environment.")
233
+ console.print("[yellow]Please install the required packages manually:")
234
+ console.print("pip install torch numpy sentencepiece transformers")
235
+ raise ConversionError("Failed to install requirements in externally managed environment")
236
+ raise
237
+
238
+ # Detect available hardware
239
+ hardware = self.detect_hardware()
240
+ console.print("[bold green]Detected hardware acceleration:")
241
+ for hw, available in hardware.items():
242
+ console.print(f" {'✓' if available else '✗'} {hw.upper()}")
243
+
244
+ # Configure CMake build
245
+ cmake_args: List[str] = ['cmake', '-B', 'build']
246
+
247
+ # Add hardware acceleration options
248
+ if hardware['cuda']:
249
+ cmake_args.extend(['-DLLAMA_CUBLAS=ON'])
250
+ if hardware['metal']:
251
+ cmake_args.extend(['-DLLAMA_METAL=ON'])
252
+ if hardware['opencl']:
253
+ cmake_args.extend(['-DLLAMA_CLBLAST=ON'])
254
+ if hardware['vulkan']:
255
+ cmake_args.extend(['-DLLAMA_VULKAN=ON'])
256
+ if hardware['rocm']:
257
+ cmake_args.extend(['-DLLAMA_HIPBLAS=ON'])
258
+
259
+ # Use Ninja if available
260
+ if subprocess.run(['which', 'ninja'], capture_output=True).returncode == 0:
261
+ cmake_args.extend(['-G', 'Ninja'])
262
+
263
+ # Configure the build
264
+ subprocess.run(cmake_args, check=True)
265
+
266
+ # Build the project
267
+ if any(hardware.values()):
268
+ status.update("[bold green]Building with hardware acceleration...")
269
+ else:
270
+ status.update("[bold yellow]Building for CPU only...")
271
+
272
+ subprocess.run(['cmake', '--build', 'build', '-j', str(os.cpu_count() or 1)], check=True)
273
+
274
+ os.chdir(self.workspace)
275
+
276
+ def display_config(self) -> None:
277
+ """Displays the current configuration in a formatted table."""
278
+ table = Table(title="Configuration", show_header=True, header_style="bold magenta")
279
+ table.add_column("Setting", style="cyan")
280
+ table.add_column("Value", style="green")
281
+
282
+ table.add_row("Model ID", self.model_id)
283
+ table.add_row("Model Name", self.model_name)
284
+ table.add_row("Username", self.username or "Not provided")
285
+ table.add_row("Token", "****" if self.token else "Not provided")
286
+ table.add_row("Quantization Methods", "\n".join(
287
+ f"{method} ({self.VALID_METHODS[method]})"
288
+ for method in self.quantization_methods
289
+ ))
290
+
291
+ console.print(Panel(table))
292
+
293
+ def generate_importance_matrix(self, model_path: str, train_data_path: str, output_path: str) -> None:
294
+ """Generates importance matrix for quantization."""
295
+ imatrix_command: List[str] = [
296
+ "./llama.cpp/build/bin/llama-imatrix",
297
+ "-m", model_path,
298
+ "-f", train_data_path,
299
+ "-ngl", "99",
300
+ "--output-frequency", "10",
301
+ "-o", output_path,
302
+ ]
303
+
304
+ if not os.path.isfile(model_path):
305
+ raise ConversionError(f"Model file not found: {model_path}")
306
+
307
+ console.print("[bold green]Generating importance matrix...")
308
+ process = subprocess.Popen(imatrix_command, shell=False)
309
+
310
+ try:
311
+ process.wait(timeout=60)
312
+ except subprocess.TimeoutExpired:
313
+ console.print("[yellow]Imatrix computation timed out. Sending SIGINT...")
314
+ process.send_signal(signal.SIGINT)
315
+ try:
316
+ process.wait(timeout=5)
317
+ except subprocess.TimeoutExpired:
318
+ console.print("[red]Imatrix process still running. Force terminating...")
319
+ process.kill()
320
+
321
+ if process.returncode != 0:
322
+ raise ConversionError("Failed to generate importance matrix")
323
+
324
+ console.print("[green]Importance matrix generation completed.")
325
+
326
+ def split_model(self, model_path: str, outdir: str) -> List[str]:
327
+ """Splits the model into smaller chunks."""
328
+ split_cmd: List[str] = [
329
+ "./llama.cpp/build/bin/llama-gguf-split",
330
+ "--split",
331
+ ]
332
+
333
+ if self.split_max_size:
334
+ split_cmd.extend(["--split-max-size", self.split_max_size])
335
+ else:
336
+ split_cmd.extend(["--split-max-tensors", str(self.split_max_tensors)])
337
+
338
+ model_path_prefix = '.'.join(model_path.split('.')[:-1])
339
+ split_cmd.extend([model_path, model_path_prefix])
340
+
341
+ console.print(f"[bold green]Splitting model with command: {' '.join(split_cmd)}")
342
+
343
+ result = subprocess.run(split_cmd, shell=False, capture_output=True, text=True)
344
+
345
+ if result.returncode != 0:
346
+ raise ConversionError(f"Error splitting model: {result.stderr}")
347
+
348
+ console.print("[green]Model split successfully!")
349
+
350
+ # Get list of split files
351
+ model_file_prefix = model_path_prefix.split('/')[-1]
352
+ split_files = [f for f in os.listdir(outdir)
353
+ if f.startswith(model_file_prefix) and f.endswith(".gguf")]
354
+
355
+ if not split_files:
356
+ raise ConversionError("No split files found")
357
+
358
+ return split_files
359
+
360
+ def upload_split_files(self, split_files: List[str], outdir: str, repo_id: str) -> None:
361
+ """Uploads split model files to Hugging Face."""
362
+ api = HfApi(token=self.token)
363
+
364
+ for file in split_files:
365
+ file_path = os.path.join(outdir, file)
366
+ console.print(f"[bold green]Uploading file: {file}")
367
+ try:
368
+ api.upload_file(
369
+ path_or_fileobj=file_path,
370
+ path_in_repo=file,
371
+ repo_id=repo_id,
372
+ )
373
+ except Exception as e:
374
+ raise ConversionError(f"Error uploading file {file}: {e}")
375
+
376
+ def generate_readme(self, quantized_files: List[str]) -> str:
377
+ """Generate a README.md file for the Hugging Face Hub."""
378
+ readme = f"""# {self.model_name} GGUF
379
+
380
+ This repository contains GGUF quantized versions of [{self.model_id}](https://huggingface.co/{self.model_id}).
381
+
382
+ ## About
383
+
384
+ This model was converted using [Webscout](https://github.com/Webscout/webscout).
385
+
386
+ ## Quantization Methods
387
+
388
+ The following quantization methods were used:
389
+
390
+ """
391
+ # Add quantization method descriptions
392
+ for method in self.quantization_methods:
393
+ if self.use_imatrix:
394
+ readme += f"- `{method}`: {self.VALID_IMATRIX_METHODS[method]}\n"
395
+ else:
396
+ readme += f"- `{method}`: {self.VALID_METHODS[method]}\n"
397
+
398
+ readme += """
399
+ ## Available Files
400
+
401
+ The following quantized files are available:
402
+
403
+ """
404
+ # Add file information
405
+ for file in quantized_files:
406
+ readme += f"- `{file}`\n"
407
+
408
+ if self.use_imatrix:
409
+ readme += """
410
+ ## Importance Matrix
411
+
412
+ This model was quantized using importance matrix quantization. The `imatrix.dat` file contains the importance matrix used for quantization.
413
+
414
+ """
415
+
416
+ readme += """
417
+ ## Usage
418
+
419
+ These GGUF files can be used with [llama.cpp](https://github.com/ggerganov/llama.cpp) and compatible tools.
420
+
421
+ Example usage:
422
+ ```bash
423
+ ./main -m model.gguf -n 1024 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt
424
+ ```
425
+
426
+ ## Conversion Process
427
+
428
+ This model was converted using the following command:
429
+ ```bash
430
+ python -m webscout.Extra.gguf convert \\
431
+ -m "{self.model_id}" \\
432
+ -q "{','.join(self.quantization_methods)}" \\
433
+ {f'-i' if self.use_imatrix else ''} \\
434
+ {f'--train-data "{self.train_data_file}"' if self.train_data_file else ''} \\
435
+ {f'-s' if self.split_model else ''} \\
436
+ {f'--split-max-tensors {self.split_max_tensors}' if self.split_model else ''} \\
437
+ {f'--split-max-size {self.split_max_size}' if self.split_max_size else ''}
438
+ ```
439
+
440
+ ## License
441
+
442
+ This repository is licensed under the same terms as the original model.
443
+ """
444
+ return readme
445
+
446
+ def upload_readme(self, readme_content: str, repo_id: str) -> None:
447
+ """Upload README.md to Hugging Face Hub."""
448
+ api = HfApi(token=self.token)
449
+ try:
450
+ api.upload_file(
451
+ path_or_fileobj=readme_content.encode(),
452
+ path_in_repo="README.md",
453
+ repo_id=repo_id,
454
+ )
455
+ console.print("[green]README.md uploaded successfully!")
456
+ except Exception as e:
457
+ console.print(f"[yellow]Warning: Failed to upload README.md: {e}")
458
+
459
+ def convert(self) -> None:
460
+ """Performs the model conversion process."""
461
+ try:
462
+ # Display banner and configuration
463
+ console.print(f"[bold green]{figlet_format('GGUF Converter')}")
464
+ self.display_config()
465
+
466
+ # Validate inputs
467
+ self.validate_inputs()
468
+
469
+ # Check dependencies
470
+ deps = self.check_dependencies()
471
+ missing = [name for name, installed in deps.items() if not installed and name != 'ninja']
472
+ if missing:
473
+ raise ConversionError(f"Missing required dependencies: {', '.join(missing)}")
474
+
475
+ # Setup llama.cpp
476
+ self.setup_llama_cpp()
477
+
478
+ # Determine if we need temporary directories (only for uploads)
479
+ needs_temp = bool(self.username and self.token)
480
+
481
+ if needs_temp:
482
+ # Use temporary directories for upload case
483
+ with tempfile.TemporaryDirectory() as outdir:
484
+ with tempfile.TemporaryDirectory() as tmpdir:
485
+ self._convert_with_dirs(tmpdir, outdir)
486
+ else:
487
+ # Use current directory for local output
488
+ outdir = os.getcwd()
489
+ tmpdir = os.path.join(outdir, "temp_download")
490
+ os.makedirs(tmpdir, exist_ok=True)
491
+ try:
492
+ self._convert_with_dirs(tmpdir, outdir)
493
+ finally:
494
+ # Clean up temporary download directory
495
+ import shutil
496
+ shutil.rmtree(tmpdir, ignore_errors=True)
497
+
498
+ # Display success message
499
+ console.print(Panel.fit(
500
+ "[bold green]✓[/] Conversion completed successfully!\n\n"
501
+ f"[cyan]Output files can be found in: {self.workspace / self.model_name}[/]",
502
+ title="Success",
503
+ border_style="green"
504
+ ))
505
+
506
+ except Exception as e:
507
+ console.print(Panel.fit(
508
+ f"[bold red]✗[/] {str(e)}",
509
+ title="Error",
510
+ border_style="red"
511
+ ))
512
+ raise
513
+
514
+ def _convert_with_dirs(self, tmpdir: str, outdir: str) -> None:
515
+ """Helper method to perform conversion with given directories."""
516
+ fp16 = str(Path(outdir)/f"{self.model_name}.fp16.gguf")
517
+
518
+ # Download model
519
+ local_dir = Path(tmpdir)/self.model_name
520
+ console.print("[bold green]Downloading model...")
521
+ api = HfApi(token=self.token)
522
+ api.snapshot_download(
523
+ repo_id=self.model_id,
524
+ local_dir=local_dir,
525
+ local_dir_use_symlinks=False
526
+ )
527
+
528
+ # Convert to fp16
529
+ console.print("[bold green]Converting to fp16...")
530
+ result = subprocess.run([
531
+ "python", "llama.cpp/convert_hf_to_gguf.py",
532
+ str(local_dir),
533
+ "--outtype", "f16",
534
+ "--outfile", fp16
535
+ ], capture_output=True, text=True)
536
+
537
+ if result.returncode != 0:
538
+ raise ConversionError(f"Error converting to fp16: {result.stderr}")
539
+
540
+ # If fp16_only is True, we're done after fp16 conversion
541
+ if self.fp16_only:
542
+ quantized_files = [f"{self.model_name}.fp16.gguf"]
543
+ if self.username and self.token:
544
+ api.upload_file(
545
+ path_or_fileobj=fp16,
546
+ path_in_repo=f"{self.model_name}.fp16.gguf",
547
+ repo_id=f"{self.username}/{self.model_name}-GGUF"
548
+ )
549
+ return
550
+
551
+ # Generate importance matrix if needed
552
+ imatrix_path: Optional[str] = None
553
+ if self.use_imatrix:
554
+ train_data_path = self.train_data_file if self.train_data_file else "llama.cpp/groups_merged.txt"
555
+ imatrix_path = str(Path(outdir)/"imatrix.dat")
556
+ self.generate_importance_matrix(fp16, train_data_path, imatrix_path)
557
+
558
+ # Quantize model
559
+ console.print("[bold green]Quantizing model...")
560
+ quantized_files: List[str] = []
561
+ for method in self.quantization_methods:
562
+ quantized_name = f"{self.model_name.lower()}-{method.lower()}"
563
+ if self.use_imatrix:
564
+ quantized_name += "-imat"
565
+ quantized_path = str(Path(outdir)/f"{quantized_name}.gguf")
566
+
567
+ if self.use_imatrix:
568
+ quantize_cmd: List[str] = [
569
+ "./llama.cpp/build/bin/llama-quantize",
570
+ "--imatrix", imatrix_path,
571
+ fp16, quantized_path, method
572
+ ]
573
+ else:
574
+ quantize_cmd = [
575
+ "./llama.cpp/build/bin/llama-quantize",
576
+ fp16, quantized_path, method
577
+ ]
578
+
579
+ result = subprocess.run(quantize_cmd, capture_output=True, text=True)
580
+ if result.returncode != 0:
581
+ raise ConversionError(f"Error quantizing with {method}: {result.stderr}")
582
+
583
+ quantized_files.append(f"{quantized_name}.gguf")
584
+
585
+ # Split model if requested
586
+ if self.split_model:
587
+ split_files = self.split_model(quantized_path, outdir)
588
+ if self.username and self.token:
589
+ self.upload_split_files(split_files, outdir, f"{self.username}/{self.model_name}-GGUF")
590
+ else:
591
+ # Upload single file if credentials provided
592
+ if self.username and self.token:
593
+ api.upload_file(
594
+ path_or_fileobj=quantized_path,
595
+ path_in_repo=f"{self.model_name.lower()}-{self.quantization_methods[0].lower()}.gguf",
596
+ repo_id=f"{self.username}/{self.model_name}-GGUF"
597
+ )
598
+
599
+ # Upload imatrix if generated and credentials provided
600
+ if imatrix_path and self.username and self.token:
601
+ api.upload_file(
602
+ path_or_fileobj=imatrix_path,
603
+ path_in_repo="imatrix.dat",
604
+ repo_id=f"{self.username}/{self.model_name}-GGUF"
605
+ )
606
+
607
+ # Generate and upload README if credentials provided
608
+ if self.username and self.token:
609
+ readme_content = self.generate_readme(quantized_files)
610
+ self.upload_readme(readme_content, f"{self.username}/{self.model_name}-GGUF")
611
+
612
+ # Initialize CLI with HAI vibes
613
+ app = CLI(
614
+ name="gguf",
615
+ help="Convert HuggingFace models to GGUF format with style! 🔥",
616
+ version="1.0.0"
617
+ )
618
+
619
+ @app.command(name="convert")
620
+ @option("-m", "--model-id", help="The HuggingFace model ID (e.g., 'OEvortex/HelpingAI-Lite-1.5T')", required=True)
621
+ @option("-u", "--username", help="Your HuggingFace username for uploads", default=None)
622
+ @option("-t", "--token", help="Your HuggingFace API token for uploads", default=None)
623
+ @option("-q", "--quantization", help="Comma-separated quantization methods", default="q4_k_m")
624
+ @option("-i", "--use-imatrix", help="Use importance matrix for quantization", is_flag=True)
625
+ @option("--train-data", help="Training data file for imatrix quantization", default=None)
626
+ @option("-s", "--split-model", help="Split the model into smaller chunks", is_flag=True)
627
+ @option("--split-max-tensors", help="Maximum number of tensors per file when splitting", default=256)
628
+ @option("--split-max-size", help="Maximum file size when splitting (e.g., '256M', '5G')", default=None)
629
+ def convert_command(
630
+ model_id: str,
631
+ username: Optional[str] = None,
632
+ token: Optional[str] = None,
633
+ quantization: str = "q4_k_m",
634
+ use_imatrix: bool = False,
635
+ train_data: Optional[str] = None,
636
+ split_model: bool = False,
637
+ split_max_tensors: int = 256,
638
+ split_max_size: Optional[str] = None
639
+ ) -> None:
640
+ """
641
+ Convert and quantize HuggingFace models to GGUF format! 🚀
642
+
643
+ Args:
644
+ model_id (str): Your model's HF ID (like 'OEvortex/HelpingAI-Lite-1.5T') 🎯
645
+ username (str, optional): Your HF username for uploads 👤
646
+ token (str, optional): Your HF API token 🔑
647
+ quantization (str): Quantization methods (default: q4_k_m,q5_k_m) 🎮
648
+ use_imatrix (bool): Use importance matrix for quantization 🔍
649
+ train_data (str, optional): Training data file for imatrix quantization 📚
650
+ split_model (bool): Split the model into smaller chunks 🔪
651
+ split_max_tensors (int): Max tensors per file when splitting (default: 256) 📊
652
+ split_max_size (str, optional): Max file size when splitting (e.g., '256M', '5G') 📏
653
+
654
+ Example:
655
+ >>> python -m webscout.Extra.gguf convert \\
656
+ ... -m "OEvortex/HelpingAI-Lite-1.5T" \\
657
+ ... -q "q4_k_m,q5_k_m"
658
+ """
659
+ try:
660
+ converter = ModelConverter(
661
+ model_id=model_id,
662
+ username=username,
663
+ token=token,
664
+ quantization_methods=quantization,
665
+ use_imatrix=use_imatrix,
666
+ train_data_file=train_data,
667
+ split_model=split_model,
668
+ split_max_tensors=split_max_tensors,
669
+ split_max_size=split_max_size
670
+ )
671
+ converter.convert()
672
+ except (ConversionError, ValueError) as e:
673
+ console.print(f"[red]Error: {str(e)}")
674
+ sys.exit(1)
675
+ except Exception as e:
676
+ console.print(f"[red]Unexpected error: {str(e)}")
677
+ sys.exit(1)
678
+
679
+ def main() -> None:
680
+ """Fire up the GGUF converter! 🚀"""
681
+ app.run()
682
+
683
+ if __name__ == "__main__":
684
+ main()