webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/Provider/AISEARCH/__init__.py +4 -3
  2. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  3. webscout/Provider/AllenAI.py +282 -0
  4. webscout/Provider/C4ai.py +414 -0
  5. webscout/Provider/Cloudflare.py +18 -21
  6. webscout/Provider/DeepSeek.py +3 -32
  7. webscout/Provider/Deepinfra.py +52 -44
  8. webscout/Provider/ElectronHub.py +634 -0
  9. webscout/Provider/GithubChat.py +362 -0
  10. webscout/Provider/Glider.py +7 -41
  11. webscout/Provider/HeckAI.py +217 -0
  12. webscout/Provider/HuggingFaceChat.py +462 -0
  13. webscout/Provider/Jadve.py +49 -63
  14. webscout/Provider/Marcus.py +7 -50
  15. webscout/Provider/Netwrck.py +6 -53
  16. webscout/Provider/PI.py +106 -93
  17. webscout/Provider/Perplexitylabs.py +395 -0
  18. webscout/Provider/Phind.py +29 -3
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  22. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  23. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +3 -2
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/Youchat.py +1 -1
  32. webscout/Provider/__init__.py +25 -2
  33. webscout/Provider/akashgpt.py +315 -0
  34. webscout/Provider/chatglm.py +5 -5
  35. webscout/Provider/copilot.py +416 -0
  36. webscout/Provider/flowith.py +181 -0
  37. webscout/Provider/freeaichat.py +251 -221
  38. webscout/Provider/granite.py +17 -53
  39. webscout/Provider/koala.py +9 -1
  40. webscout/Provider/llamatutor.py +6 -46
  41. webscout/Provider/llmchat.py +7 -46
  42. webscout/Provider/multichat.py +29 -91
  43. webscout/Provider/yep.py +4 -24
  44. webscout/exceptions.py +19 -9
  45. webscout/update_checker.py +55 -93
  46. webscout/version.py +1 -1
  47. webscout-7.5.dist-info/LICENSE.md +146 -0
  48. {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
  49. {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
  50. webscout/Local/__init__.py +0 -10
  51. webscout/Local/_version.py +0 -3
  52. webscout/Local/formats.py +0 -747
  53. webscout/Local/model.py +0 -1368
  54. webscout/Local/samplers.py +0 -125
  55. webscout/Local/thread.py +0 -539
  56. webscout/Local/ui.py +0 -401
  57. webscout/Local/utils.py +0 -388
  58. webscout/Provider/dgaf.py +0 -214
  59. webscout-7.3.dist-info/LICENSE.md +0 -211
  60. {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  61. {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  62. {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 7.3
3
+ Version: 7.5
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -27,10 +27,12 @@ License-File: LICENSE.md
27
27
  Requires-Dist: setuptools
28
28
  Requires-Dist: wheel
29
29
  Requires-Dist: pip
30
+ Requires-Dist: nodriver
30
31
  Requires-Dist: mistune
31
32
  Requires-Dist: tenacity
32
33
  Requires-Dist: curl-cffi
33
34
  Requires-Dist: nest-asyncio
35
+ Requires-Dist: websocket-client
34
36
  Requires-Dist: rich
35
37
  Requires-Dist: markdownify
36
38
  Requires-Dist: requests
@@ -41,7 +43,7 @@ Requires-Dist: orjson
41
43
  Requires-Dist: PyYAML
42
44
  Requires-Dist: tls-client
43
45
  Requires-Dist: clipman
44
- Requires-Dist: playsound==1.2.2
46
+ Requires-Dist: playsound==1.3.0
45
47
  Requires-Dist: ollama
46
48
  Requires-Dist: pillow
47
49
  Requires-Dist: bson
@@ -55,25 +57,16 @@ Requires-Dist: primp
55
57
  Requires-Dist: pyreqwest-impersonate
56
58
  Requires-Dist: gradio-client
57
59
  Requires-Dist: psutil
58
- Requires-Dist: pygetwindow
59
60
  Requires-Dist: aiohttp
60
61
  Provides-Extra: dev
61
62
  Requires-Dist: ruff>=0.1.6; extra == "dev"
62
63
  Requires-Dist: pytest>=7.4.2; extra == "dev"
63
- Provides-Extra: local
64
- Requires-Dist: llama-cpp-python; extra == "local"
65
- Requires-Dist: colorama; extra == "local"
66
- Requires-Dist: numpy; extra == "local"
67
- Requires-Dist: huggingface-hub[cli]; extra == "local"
68
- Requires-Dist: unicorn; extra == "local"
69
64
 
70
-
71
- [![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/official_helpingai)
65
+ [![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/PyscoutAI)
72
66
  [![Instagram](https://img.shields.io/badge/Instagram-E4405F?style=for-the-badge&logo=instagram&logoColor=white)](https://www.instagram.com/oevortex/)
73
67
  [![LinkedIn](https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/oe-vortex-29a407265/)
74
68
  [![Buy Me A Coffee](https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black)](https://buymeacoffee.com/oevortex)
75
69
 
76
-
77
70
  <div align="center">
78
71
  <a href="https://youtube.com/@OEvortex">▶️ Vortex’s YouTube Channel</a> &bull;
79
72
  <a href="https://youtube.com/@devsdocode">▶️ Devs Do Code’s YouTube Channel</a> &bull;
@@ -85,7 +78,7 @@ Requires-Dist: unicorn; extra == "local"
85
78
  <p align="center">
86
79
  <strong>Webscout</strong> is the all-in-one search and AI toolkit you need.
87
80
  <br>
88
- Discover insights with Yep.com, DuckDuckGo, and Phind; access cutting-edge AI models; transcribe YouTube videos; generate temporary emails and phone numbers; perform text-to-speech conversions; run offline language models; and much more!
81
+ Discover insights with Yep.com, DuckDuckGo, and Phind; access cutting-edge AI models; transcribe YouTube videos; generate temporary emails and phone numbers; perform text-to-speech conversions; and much more!
89
82
  </p>
90
83
 
91
84
  <div align="center">
@@ -97,14 +90,13 @@ Requires-Dist: unicorn; extra == "local"
97
90
  ---
98
91
 
99
92
  ## 🚀 Features
93
+
100
94
  * **Comprehensive Search:** Leverage Google, DuckDuckGo for diverse search results.
101
95
  * **AI Powerhouse:** Access and interact with various AI models, including OpenAI, Cohere, and more.
102
96
  * **[YouTube Toolkit](webscout/Extra/YTToolkit):** Advanced YouTube video and transcript management with multi-language support, versatile downloading, and intelligent data extraction
97
+ * **[GitAPI](webscout/Extra/GitToolkit/gitapi):** Powerful GitHub data extraction toolkit for seamless repository and user information retrieval, featuring commit tracking, issue management, and comprehensive user analytics - all without authentication requirements for public data
103
98
  * **Tempmail & Temp Number:** Generate temporary email addresses and phone numbers for enhanced privacy.
104
99
  * **[Text-to-Speech (TTS)](webscout/Provider/TTS/README.md):** Convert text into natural-sounding speech using multiple AI-powered providers like ElevenLabs, StreamElements, and Voicepods.
105
- * **Offline LLMs:** Utilize powerful language models offline with GGUF support.
106
- * **Extensive Provider Ecosystem:** Explore a vast collection of AI providers
107
- * **Local LLM Execution:** Run GGUF models locally with minimal configuration.
108
100
  * **GGUF Conversion & Quantization:** Convert and quantize Hugging Face models to GGUF format.
109
101
  * **Autollama:** Download Hugging Face models and automatically convert them for Ollama compatibility.
110
102
  * **[SwiftCLI](webscout/swiftcli/Readme.md):** A powerful and elegant CLI framework that makes it easy to create beautiful command-line interfaces.
@@ -113,8 +105,10 @@ Requires-Dist: unicorn; extra == "local"
113
105
  * **[LitAgent](webscout/litagent/Readme.md):** Powerful and modern user agent generator that keeps your requests fresh and undetectable
114
106
  * **[Text-to-Image](webscout/Provider/TTI/README.md):** Generate high-quality images using a wide range of AI art providers
115
107
  * **[Scout](webscout/scout/README.md):** Advanced web parsing and crawling library with intelligent HTML/XML parsing, web crawling, and Markdown conversion
108
+ * **[AISearch](webscout/Provider/AISEARCH/README.md):** AI Search Providers offer powerful and flexible AI-powered search Search Engine
116
109
 
117
110
  ## ⚙️ Installation
111
+
118
112
  ```python
119
113
  pip install -U webscout
120
114
  ```
@@ -134,12 +128,13 @@ python -m webscout --help
134
128
  | python -m webscout suggestions -k Text | CLI function to perform a suggestions search using Webscout. |
135
129
  | python -m webscout text -k Text | CLI function to perform a text search using Webscout. |
136
130
  | python -m webscout translate -k Text | CLI function to perform translate using Webscout. |
137
- | python -m webscout version | A command-line interface command that prints and returns the version of the program. |
131
+ | python -m webscout version | A command-line interface command that prints and returns the version of the program. |
138
132
  | python -m webscout videos -k Text | CLI function to perform a videos search using DuckDuckGo API. |
139
133
 
140
- [Go To TOP](#webscout-️)
134
+ [Go To TOP](#webscout-️)
141
135
 
142
136
  ## 🌍 Regions
137
+
143
138
  <details>
144
139
  <summary>Expand</summary>
145
140
 
@@ -212,17 +207,14 @@ python -m webscout --help
212
207
  vn-vi for Vietnam
213
208
  wt-wt for No region
214
209
 
215
-
216
210
  </details>
217
211
 
218
-
219
212
  [Go To TOP](#webscout-️)
220
213
 
221
-
222
-
223
214
  ## ☀️ Weather
224
215
 
225
- ### 1. Weather
216
+ ### 1. Weather
217
+
226
218
  ```python
227
219
  from webscout import weather as w
228
220
  weather = w.get("Qazigund")
@@ -230,6 +222,7 @@ print(weather)
230
222
  ```
231
223
 
232
224
  ### 2. Weather ASCII
225
+
233
226
  ```python
234
227
  from webscout import weather_ascii as w
235
228
  weather = w.get("Qazigund")
@@ -323,7 +316,6 @@ for result in results:
323
316
  print(result)
324
317
  ```
325
318
 
326
-
327
319
  ## 🦆 WEBS and AsyncWEBS
328
320
 
329
321
  The `WEBS` and `AsyncWEBS` classes are used to retrieve search results from DuckDuckGo.com.
@@ -393,7 +385,7 @@ await main()
393
385
 
394
386
  ## 💻 Usage of WEBS
395
387
 
396
- ### 1. `text()` - Text Search by DuckDuckGo.com
388
+ ### 1. `text()` - Text Search by DuckDuckGo.com
397
389
 
398
390
  ```python
399
391
  from webscout import WEBS
@@ -407,7 +399,7 @@ with WEBS() as WEBS:
407
399
  print(r)
408
400
  ```
409
401
 
410
- ### 2. `answers()` - Instant Answers by DuckDuckGo.com
402
+ ### 2. `answers()` - Instant Answers by DuckDuckGo.com
411
403
 
412
404
  ```python
413
405
  from webscout import WEBS
@@ -418,7 +410,7 @@ with WEBS() as WEBS:
418
410
  print(r)
419
411
  ```
420
412
 
421
- ### 3. `images()` - Image Search by DuckDuckGo.com
413
+ ### 3. `images()` - Image Search by DuckDuckGo.com
422
414
 
423
415
  ```python
424
416
  from webscout import WEBS
@@ -440,7 +432,7 @@ with WEBS() as WEBS:
440
432
  print(r)
441
433
  ```
442
434
 
443
- ### 4. `videos()` - Video Search by DuckDuckGo.com
435
+ ### 4. `videos()` - Video Search by DuckDuckGo.com
444
436
 
445
437
  ```python
446
438
  from webscout import WEBS
@@ -461,7 +453,7 @@ with WEBS() as WEBS:
461
453
  print(r)
462
454
  ```
463
455
 
464
- ### 5. `news()` - News Search by DuckDuckGo.com
456
+ ### 5. `news()` - News Search by DuckDuckGo.com
465
457
 
466
458
  ```python
467
459
  from webscout import WEBS
@@ -554,19 +546,17 @@ with WEBS() as webs:
554
546
 
555
547
  ```
556
548
 
557
-
558
-
559
- ## ALL Acts
549
+ ## ALL Acts
560
550
 
561
551
  <details>
562
552
  <summary>Expand</summary>
563
553
 
564
- ## Webscout Supported Acts:
554
+ ## Webscout Supported Acts
565
555
 
566
556
  1. Free-mode
567
557
  2. Linux Terminal
568
558
  3. English Translator and Improver
569
- 4. `position` Interviewer
559
+ 4. `position` Interviewer
570
560
  5. JavaScript Console
571
561
  6. Excel Sheet
572
562
  7. English Pronunciation Helper
@@ -724,7 +714,7 @@ with WEBS() as webs:
724
714
  159. Wikipedia page
725
715
  160. Japanese Kanji quiz machine
726
716
  161. note-taking assistant
727
- 162. `language` Literary Critic
717
+ 162. `language` Literary Critic
728
718
  163. Cheap Travel Ticket Advisor
729
719
  164. DALL-E
730
720
  165. MathBot
@@ -749,7 +739,7 @@ with WEBS() as webs:
749
739
  184. Hypothetical response
750
740
  185. BH
751
741
  186. Text Continuation
752
- 187. Dude v3
742
+ 187. Dude v3
753
743
  188. SDA (Superior DAN)
754
744
  189. AntiGPT
755
745
  190. BasedGPT v2
@@ -798,19 +788,19 @@ with WEBS() as webs:
798
788
  233. LiveGPT
799
789
  234. DAN Jailbreak
800
790
  235. Cooper
801
- 236. Steve
791
+ 236. Steve
802
792
  237. DAN 5.0
803
793
  238. Axies
804
794
  239. OMNI
805
795
  240. Burple
806
- 241. JOHN
796
+ 241. JOHN
807
797
  242. An Ethereum Developer
808
798
  243. SEO Prompt
809
799
  244. Prompt Enhancer
810
800
  245. Data Scientist
811
801
  246. League of Legends Player
812
802
 
813
- **Note:** Some "acts" use placeholders like `position` or `language` which should be replaced with a specific value when using the prompt.
803
+ **Note:** Some "acts" use placeholders like `position` or `language` which should be replaced with a specific value when using the prompt.
814
804
  ___
815
805
  </details>
816
806
 
@@ -904,7 +894,7 @@ for chunk in response:
904
894
 
905
895
  ```
906
896
 
907
- ### ⬛ `BlackBox` - Search/Chat with BlackBox
897
+ ### ⬛ `BlackBox` - Search/Chat with BlackBox
908
898
 
909
899
  ```python
910
900
  from webscout import BLACKBOXAI
@@ -931,8 +921,7 @@ r = ai.chat(prompt)
931
921
  print(r)
932
922
  ```
933
923
 
934
-
935
- ### 🤖 `Meta AI` - Chat with Meta AI
924
+ ### 🤖 `Meta AI` - Chat with Meta AI
936
925
 
937
926
  ```python
938
927
  from webscout import Meta
@@ -966,7 +955,7 @@ for media in response["media"]:
966
955
 
967
956
  ```
968
957
 
969
- ### `KOBOLDAI`
958
+ ### `KOBOLDAI`
970
959
 
971
960
  ```python
972
961
  from webscout import KOBOLDAI
@@ -986,7 +975,7 @@ print(message)
986
975
 
987
976
  ```
988
977
 
989
- ### `Reka` - Chat with Reka
978
+ ### `Reka` - Chat with Reka
990
979
 
991
980
  ```python
992
981
  from webscout import REKA
@@ -998,7 +987,7 @@ response_str = a.chat(prompt)
998
987
  print(response_str)
999
988
  ```
1000
989
 
1001
- ### `Cohere` - Chat with Cohere
990
+ ### `Cohere` - Chat with Cohere
1002
991
 
1003
992
  ```python
1004
993
  from webscout import Cohere
@@ -1010,7 +999,7 @@ response_str = a.chat(prompt)
1010
999
  print(response_str)
1011
1000
  ```
1012
1001
 
1013
- ### `Deepinfra`
1002
+ ### `Deepinfra`
1014
1003
 
1015
1004
  ```python
1016
1005
  from webscout import DeepInfra
@@ -1037,8 +1026,7 @@ message = ai.get_message(response)
1037
1026
  print(message)
1038
1027
  ```
1039
1028
 
1040
-
1041
- ### `GROQ`
1029
+ ### `GROQ`
1042
1030
 
1043
1031
  ```python
1044
1032
  from webscout import GROQ
@@ -1125,7 +1113,7 @@ print(response_search)
1125
1113
 
1126
1114
  ```
1127
1115
 
1128
- ### `LLama 70b` - Chat with Meta's Llama 3 70b
1116
+ ### `LLama 70b` - Chat with Meta's Llama 3 70b
1129
1117
 
1130
1118
  ```python
1131
1119
 
@@ -1137,7 +1125,7 @@ r = llama.chat("What is the meaning of life?")
1137
1125
  print(r)
1138
1126
  ```
1139
1127
 
1140
- ### `AndiSearch`
1128
+ ### `AndiSearch`
1141
1129
 
1142
1130
  ```python
1143
1131
  from webscout import AndiSearch
@@ -1145,8 +1133,7 @@ a = AndiSearch()
1145
1133
  print(a.chat("HelpingAI-9B"))
1146
1134
  ```
1147
1135
 
1148
-
1149
- ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle, PIAI, Felo, Julius, YouChat, YEPCHAT, Cloudflare, TurboSeek, Editee, AI21, Chatify, Cerebras, X0GPT, Lepton, GEMINIAPI, Cleeai, Elmo, Free2GPT, Bing, DiscordRocks, GPTWeb, LlamaTutor, PromptRefine, TutorAI, ChatGPTES, Bagoodex, ChatHub, AmigoChat, AIMathGPT, GaurishCerebras, NinjaChat, GeminiPro, Talkai, LLMChat, AskMyAI, Llama3Mitril, Marcus, TypeGPT, Mhystical, Netwrck, MultiChatAI, JadveOpenAI, ChatGLM, Deepfind, NousHermes, TextPollinationsAI, GliderAI, DGAFAI, ChatGPTGratis, QwenLM, IBMGranite, WiseCat, DeepSeek
1136
+ ### `LLAMA`, `C4ai`, `Venice`, `Copilot`, `HuggingFaceChat`, `TwoAI`, `HeckAI`, `AllenAI`, `PerplexityLabs`, `AkashGPT`, `DeepSeek`, `WiseCat`, `IBMGranite`, `QwenLM`, `ChatGPTGratis`, `TextPollinationsAI`, `GliderAI`, `Cohere`, `REKA`, `GROQ`, `AsyncGROQ`, `OPENAI`, `AsyncOPENAI`, `KOBOLDAI`, `AsyncKOBOLDAI`, `BLACKBOXAI`, `PhindSearch`, `GEMINI`, `DeepInfra`, `AI4Chat`, `Phindv2`, `OLLAMA`, `AndiSearch`, `PIZZAGPT`, `Sambanova`, `DARKAI`, `KOALA`, `Meta`, `AskMyAI`, `DiscordRocks`, `PiAI`, `Julius`, `YouChat`, `YEPCHAT`, `Cloudflare`, `TurboSeek`, `Editee`, `TeachAnything`, `AI21`, `Chatify`, `X0GPT`, `Cerebras`, `Lepton`, `GEMINIAPI`, `Cleeai`, `Elmo`, `Free2GPT`, `Bing`, `GPTWeb`, `Netwrck`, `LlamaTutor`, `PromptRefine`, `TutorAI`, `ChatGPTES`, `AmigoChat`, `Bagoodex`, `AIMathGPT`, `GaurishCerebras`, `GeminiPro`, `LLMChat`, `Talkai`, `Llama3Mitril`, `Marcus`, `TypeGPT`, `Netwrck`, `MultiChatAI`, `JadveOpenAI`, `ChatGLM`, `NousHermes`, `FreeAIChat`, `ElectronHub`, `GithubChat`, `Flowith`
1150
1137
 
1151
1138
  Code is similar to other providers.
1152
1139
 
@@ -1170,119 +1157,7 @@ response = vlm.chat([{
1170
1157
  }])
1171
1158
  ```
1172
1159
 
1173
- ## 💻 Local-LLM
1174
-
1175
- Webscout can now run GGUF models locally. You can download and run your favorite models with minimal configuration.
1176
-
1177
- **Example:**
1178
-
1179
- ```python
1180
- from webscout.Local import *
1181
- model_path = download_model("Qwen/Qwen2.5-0.5B-Instruct-GGUF", "qwen2.5-0.5b-instruct-q2_k.gguf", token=None)
1182
- model = Model(model_path, n_gpu_layers=0, context_length=2048)
1183
- thread = Thread(model, format=chatml)
1184
- # print(thread.send("hi")) #send a single msg to ai
1185
-
1186
- # thread.interact() # interact with the model in terminal
1187
- # start webui
1188
- # webui = WebUI(thread)
1189
- # webui.start(host="0.0.0.0", port=8080, ssl=True) #Use ssl=True and make cert and key for https
1190
- ```
1191
-
1192
- ## 🐶 Local-rawdog
1193
-
1194
- Webscout's local raw-dog feature allows you to run Python scripts within your terminal prompt.
1195
-
1196
- **Example:**
1197
-
1198
- ```python
1199
- import webscout.Local as ws
1200
- from webscout.Local.rawdog import RawDog
1201
- from webscout.Local.samplers import DefaultSampling
1202
- from webscout.Local.formats import chatml, AdvancedFormat
1203
- from webscout.Local.utils import download_model
1204
- import datetime
1205
- import sys
1206
- import os
1207
-
1208
- repo_id = "YorkieOH10/granite-8b-code-instruct-Q8_0-GGUF"
1209
- filename = "granite-8b-code-instruct.Q8_0.gguf"
1210
- model_path = download_model(repo_id, filename, token='')
1211
-
1212
- # Load the model using the downloaded path
1213
- model = ws.Model(model_path, n_gpu_layers=10)
1214
-
1215
- rawdog = RawDog()
1216
-
1217
- # Create an AdvancedFormat and modify the system content
1218
- # Use a lambda to generate the prompt dynamically:
1219
- chat_format = AdvancedFormat(chatml)
1220
- # **Pre-format the intro_prompt string:**
1221
- system_content = f"""
1222
- You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
1223
-
1224
- A typical interaction goes like this:
1225
- 1. The user gives you a natural language PROMPT.
1226
- 2. You:
1227
- i. Determine what needs to be done
1228
- ii. Write a short Python SCRIPT to do it
1229
- iii. Communicate back to the user by printing to the console in that SCRIPT
1230
- 3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
1231
- it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
1232
- 4. In case of exception, regenerate error free script.
1233
-
1234
- If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
1235
- This can be useful for summarizing documents or technical readouts, reading instructions before
1236
- deciding what to do, or other tasks that require multi-step reasoning.
1237
- A typical 'CONTINUE' interaction looks like this:
1238
- 1. The user gives you a natural language PROMPT.
1239
- 2. You:
1240
- i. Determine what needs to be done
1241
- ii. Determine that you need to see the output of some subprocess call to complete the task
1242
- iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
1243
- 3. The compiler
1244
- i. Checks and runs your SCRIPT
1245
- ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
1246
- iii. Finds the word "CONTINUE" and sends control back to you
1247
- 4. You again:
1248
- i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
1249
- ii. Write a short Python SCRIPT to do it
1250
- iii. Communicate back to the user by printing to the console in that SCRIPT
1251
- 5. The compiler...
1252
-
1253
- Please follow these conventions carefully:
1254
- - Decline any tasks that seem dangerous, irreversible, or that you don't understand.
1255
- - Always review the full conversation prior to answering and maintain continuity.
1256
- - If asked for information, just print the information clearly and concisely.
1257
- - If asked to do something, print a concise summary of what you've done as confirmation.
1258
- - If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
1259
- - If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
1260
- - Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
1261
- - Actively clean up any temporary processes or files you use.
1262
- - When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
1263
- - You can plot anything with matplotlib.
1264
- - ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
1265
- """
1266
- chat_format.override('system_content', lambda: system_content)
1267
-
1268
- thread = ws.Thread(model, format=chat_format, sampler=DefaultSampling)
1269
-
1270
- while True:
1271
- prompt = input(">: ")
1272
- if prompt.lower() == "q":
1273
- break
1274
-
1275
- response = thread.send(prompt)
1276
-
1277
- # Process the response using RawDog
1278
- script_output = rawdog.main(response)
1279
-
1280
- if script_output:
1281
- print(script_output)
1282
-
1283
- ```
1284
-
1285
- ## GGUF
1160
+ ## GGUF
1286
1161
 
1287
1162
  Webscout provides tools to convert and quantize Hugging Face models into the GGUF format for use with offline LLMs.
1288
1163
 
@@ -1321,21 +1196,22 @@ autollama.main(model_path, gguf_file)
1321
1196
  **Command Line Usage:**
1322
1197
 
1323
1198
  * **GGUF Conversion:**
1199
+
1324
1200
  ```bash
1325
1201
  python -m webscout.Extra.gguf -m "OEvortex/HelpingAI-Lite-1.5T" -u "your_username" -t "your_hf_token" -q "q4_k_m,q5_k_m"
1326
1202
  ```
1327
1203
 
1328
1204
  * **Autollama:**
1205
+
1329
1206
  ```bash
1330
1207
  python -m webscout.Extra.autollama -m "OEvortex/HelpingAI-Lite-1.5T" -g "HelpingAI-Lite-1.5T.q4_k_m.gguf"
1331
1208
  ```
1332
1209
 
1333
- **Note:**
1210
+ **Note:**
1334
1211
 
1335
1212
  * Replace `"your_username"` and `"your_hf_token"` with your actual Hugging Face credentials.
1336
1213
  * The `model_path` in `autollama` is the Hugging Face model ID, and `gguf_file` is the GGUF file ID.
1337
1214
 
1338
-
1339
1215
  <div align="center">
1340
1216
  <!-- Replace `#` with your actual links -->
1341
1217
  <a href="https://t.me/official_helpingai"><img alt="Telegram" src="https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
@@ -1346,13 +1222,13 @@ autollama.main(model_path, gguf_file)
1346
1222
 
1347
1223
  <div align="center">
1348
1224
  <!-- Replace `#` with your actual links -->
1349
- <a href="https://youtube.com/@OEvortex">▶️ Vortex's YouTube Channel</a>
1225
+ <a href="https://youtube.com/@OEvortex">▶️ Vortex's YouTube Channel</a>
1350
1226
  </div>
1351
1227
  <div align="center">
1352
- <a href="https://youtube.com/@devsdocode">▶️ Devs Do Code's YouTube Channel</a>
1228
+ <a href="https://youtube.com/@devsdocode">▶️ Devs Do Code's YouTube Channel</a>
1353
1229
  </div>
1354
1230
  <div align="center">
1355
- <a href="https://t.me/ANONYMOUS_56788">📢 Anonymous Coder's Telegram</a>
1231
+ <a href="https://t.me/ANONYMOUS_56788">📢 Anonymous Coder's Telegram</a>
1356
1232
  </div>
1357
1233
 
1358
1234
  ## 🤝 Contributing
@@ -1365,9 +1241,7 @@ Contributions are welcome! If you'd like to contribute to Webscout, please follo
1365
1241
  4. Push your branch to your forked repository.
1366
1242
  5. Submit a pull request to the main repository.
1367
1243
 
1368
-
1369
1244
  ## 🙏 Acknowledgments
1370
1245
 
1371
1246
  * All the amazing developers who have contributed to the project!
1372
1247
  * The open-source community for their support and inspiration.
1373
-