webscout 6.0__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/Onlinesearcher.py +22 -10
  3. webscout/Agents/functioncall.py +2 -2
  4. webscout/Bard.py +21 -21
  5. webscout/Extra/autollama.py +37 -20
  6. webscout/Local/__init__.py +6 -7
  7. webscout/Local/formats.py +406 -194
  8. webscout/Local/model.py +1074 -477
  9. webscout/Local/samplers.py +108 -144
  10. webscout/Local/thread.py +251 -410
  11. webscout/Local/ui.py +401 -0
  12. webscout/Local/utils.py +338 -136
  13. webscout/Provider/Amigo.py +51 -38
  14. webscout/Provider/Deepseek.py +7 -6
  15. webscout/Provider/EDITEE.py +2 -2
  16. webscout/Provider/GPTWeb.py +1 -1
  17. webscout/Provider/Llama3.py +1 -1
  18. webscout/Provider/NinjaChat.py +200 -0
  19. webscout/Provider/OLLAMA.py +1 -1
  20. webscout/Provider/Perplexity.py +1 -1
  21. webscout/Provider/Reka.py +12 -5
  22. webscout/Provider/TTI/AIuncensored.py +103 -0
  23. webscout/Provider/TTI/Nexra.py +3 -3
  24. webscout/Provider/TTI/__init__.py +4 -2
  25. webscout/Provider/TTI/aiforce.py +2 -2
  26. webscout/Provider/TTI/imgninza.py +136 -0
  27. webscout/Provider/TTI/talkai.py +116 -0
  28. webscout/Provider/TeachAnything.py +0 -3
  29. webscout/Provider/Youchat.py +1 -1
  30. webscout/Provider/__init__.py +16 -12
  31. webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
  32. webscout/Provider/cerebras.py +143 -123
  33. webscout/Provider/cleeai.py +1 -1
  34. webscout/Provider/felo_search.py +1 -1
  35. webscout/Provider/gaurish.py +207 -0
  36. webscout/Provider/geminiprorealtime.py +160 -0
  37. webscout/Provider/genspark.py +1 -1
  38. webscout/Provider/julius.py +8 -3
  39. webscout/Provider/learnfastai.py +1 -1
  40. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  41. webscout/Provider/promptrefine.py +3 -1
  42. webscout/Provider/talkai.py +196 -0
  43. webscout/Provider/turboseek.py +3 -8
  44. webscout/Provider/tutorai.py +1 -1
  45. webscout/__init__.py +2 -43
  46. webscout/exceptions.py +5 -1
  47. webscout/tempid.py +4 -73
  48. webscout/utils.py +3 -0
  49. webscout/version.py +1 -1
  50. webscout/webai.py +1 -1
  51. webscout/webscout_search.py +154 -123
  52. {webscout-6.0.dist-info → webscout-6.2.dist-info}/METADATA +164 -245
  53. {webscout-6.0.dist-info → webscout-6.2.dist-info}/RECORD +57 -55
  54. webscout/Local/rawdog.py +0 -946
  55. webscout/Provider/BasedGPT.py +0 -214
  56. webscout/Provider/TTI/amigo.py +0 -148
  57. webscout/Provider/bixin.py +0 -264
  58. webscout/Provider/xdash.py +0 -182
  59. webscout/websx_search.py +0 -19
  60. {webscout-6.0.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  61. {webscout-6.0.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  62. {webscout-6.0.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  63. {webscout-6.0.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
@@ -1,13 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 6.0
3
+ Version: 6.2
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
7
7
  License: HelpingAI
8
- Project-URL: Documentation, https://github.com/OE-LUCIFER/Webscout/wiki
9
- Project-URL: Source, https://github.com/OE-LUCIFER/Webscout
10
- Project-URL: Tracker, https://github.com/OE-LUCIFER/Webscout/issues
8
+ Project-URL: Source, https://github.com/HelpingAI/Webscout
9
+ Project-URL: Tracker, https://github.com/HelpingAI/Webscout/issues
11
10
  Project-URL: YouTube, https://youtube.com/@OEvortex
12
11
  Classifier: Development Status :: 5 - Production/Stable
13
12
  Classifier: Intended Audience :: Developers
@@ -22,6 +21,7 @@ Classifier: Programming Language :: Python :: 3.12
22
21
  Classifier: Programming Language :: Python :: Implementation :: CPython
23
22
  Classifier: Topic :: Internet :: WWW/HTTP :: Indexing/Search
24
23
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
24
+ Requires-Python: >=3.7
25
25
  Description-Content-Type: text/markdown
26
26
  License-File: LICENSE.md
27
27
  Requires-Dist: docstring-inheritance
@@ -53,6 +53,14 @@ Requires-Dist: emoji
53
53
  Requires-Dist: openai
54
54
  Requires-Dist: prompt-toolkit
55
55
  Requires-Dist: fake-useragent
56
+ Requires-Dist: primp
57
+ Requires-Dist: pyreqwest-impersonate
58
+ Requires-Dist: lxml-html-clean
59
+ Requires-Dist: gradio-client
60
+ Requires-Dist: psutil
61
+ Requires-Dist: colorlog
62
+ Requires-Dist: yaspin
63
+ Requires-Dist: cerebras-cloud-sdk
56
64
  Provides-Extra: dev
57
65
  Requires-Dist: ruff>=0.1.6; extra == "dev"
58
66
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -61,12 +69,13 @@ Requires-Dist: llama-cpp-python; extra == "local"
61
69
  Requires-Dist: colorama; extra == "local"
62
70
  Requires-Dist: numpy; extra == "local"
63
71
  Requires-Dist: huggingface-hub[cli]; extra == "local"
72
+ Requires-Dist: unicorn; extra == "local"
64
73
 
65
74
  <div align="center">
66
75
  <!-- Replace `#` with your actual links -->
67
- <a href="https://t.me/devsdocode"><img alt="Telegram" src="https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
68
- <a href="https://www.instagram.com/sree.shades_/"><img alt="Instagram" src="https://img.shields.io/badge/Instagram-E4405F?style=for-the-badge&logo=instagram&logoColor=white"></a>
69
- <a href="https://www.linkedin.com/in/developer-sreejan/"><img alt="LinkedIn" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"></a>
76
+ <a href="https://t.me/official_helpingai"><img alt="Telegram" src="https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
77
+ <a href="https://www.instagram.com/oevortex/"><img alt="Instagram" src="https://img.shields.io/badge/Instagram-E4405F?style=for-the-badge&logo=instagram&logoColor=white"></a>
78
+ <a href="https://www.linkedin.com/in/oe-vortex-29a407265/"><img alt="LinkedIn" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"></a>
70
79
  <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
71
80
  </div>
72
81
 
@@ -257,90 +266,41 @@ weather = w.get("Qazigund")
257
266
  print(weather)
258
267
  ```
259
268
 
260
- ## ✉️ Tempmail and 📞 Temp Number
269
+ ## ✉️ TempMail and VNEngine
261
270
 
262
- ### Temp Number
263
- ```python
264
- from rich.console import Console
265
- from webscout import tempid
266
-
267
- def main():
268
- console = Console()
269
- phone = tempid.TemporaryPhoneNumber()
270
-
271
- try:
272
- # Get a temporary phone number for a specific country (or random)
273
- number = phone.get_number(country="Finland")
274
- console.print(f"Your temporary phone number: [bold cyan]{number}[/bold cyan]")
275
-
276
- # Pause execution briefly (replace with your actual logic)
277
- # import time module
278
- import time
279
- time.sleep(30) # Adjust the waiting time as needed
280
-
281
- # Retrieve and print messages
282
- messages = phone.get_messages(number)
283
- if messages:
284
- # Access individual messages using indexing:
285
- console.print(f"[bold green]{messages[0].frm}:[/] {messages[0].content}")
286
- # (Add more lines if you expect multiple messages)
287
- else:
288
- console.print("No messages received.")
289
-
290
- except Exception as e:
291
- console.print(f"[bold red]An error occurred: {e}")
292
-
293
- if __name__ == "__main__":
294
- main()
295
-
296
- ```
297
-
298
- ### Tempmail
299
271
  ```python
272
+ import json
300
273
  import asyncio
301
- from rich.console import Console
302
- from rich.table import Table
303
- from rich.text import Text
304
- from webscout import tempid
305
-
306
- async def main() -> None:
307
- console = Console()
308
- client = tempid.Client()
274
+ from webscout import VNEngine
275
+ from webscout import TempMail
276
+
277
+ async def main():
278
+ vn = VNEngine()
279
+ countries = vn.get_online_countries()
280
+ if countries:
281
+ country = countries[0]['country']
282
+ numbers = vn.get_country_numbers(country)
283
+ if numbers:
284
+ number = numbers[0]['full_number']
285
+ inbox = vn.get_number_inbox(country, number)
286
+
287
+ # Serialize inbox data to JSON string
288
+ json_data = json.dumps(inbox, ensure_ascii=False, indent=4)
289
+
290
+ # Print with UTF-8 encoding
291
+ print(json_data)
309
292
 
310
- try:
293
+ async with TempMail() as client:
311
294
  domains = await client.get_domains()
312
- if not domains:
313
- console.print("[bold red]No domains available. Please try again later.")
314
- return
315
-
316
- email = await client.create_email(domain=domains[0].name)
317
- console.print(f"Your temporary email: [bold cyan]{email.email}[/bold cyan]")
318
- console.print(f"Token for accessing the email: [bold cyan]{email.token}[/bold cyan]")
319
-
320
- while True:
321
- messages = await client.get_messages(email.email)
322
- if messages is not None:
323
- break
324
-
325
- if messages:
326
- table = Table(show_header=True, header_style="bold magenta")
327
- table.add_column("From", style="bold cyan")
328
- table.add_column("Subject", style="bold yellow")
329
- table.add_column("Body", style="bold green")
330
- for message in messages:
331
- body_preview = Text(message.body_text if message.body_text else "No body")
332
- table.add_row(message.email_from or "Unknown", message.subject or "No Subject", body_preview)
333
- console.print(table)
334
- else:
335
- console.print("No messages found.")
336
-
337
- except Exception as e:
338
- console.print(f"[bold red]An error occurred: {e}")
339
-
340
- finally:
341
- await client.close()
295
+ print("Available Domains:", domains)
296
+ email_response = await client.create_email(alias="testuser")
297
+ print("Created Email:", email_response)
298
+ messages = await client.get_messages(email_response.email)
299
+ print("Messages:", messages)
300
+ await client.delete_email(email_response.email, email_response.token)
301
+ print("Email Deleted")
342
302
 
343
- if __name__ == '__main__':
303
+ if __name__ == "__main__":
344
304
  asyncio.run(main())
345
305
  ```
346
306
 
@@ -599,16 +559,6 @@ with WEBS() as WEBS:
599
559
  print(r)
600
560
  ```
601
561
 
602
- ## 🌐 WEBSX - Another Web Search Tool
603
-
604
- ```python
605
- from webscout import WEBSX
606
- s = "Python development tools"
607
-
608
- result = WEBSX(s)
609
-
610
- print(result)
611
- ```
612
562
 
613
563
  ## 🎭 ALL Acts
614
564
 
@@ -868,7 +818,7 @@ print(result)
868
818
  ___
869
819
  </details>
870
820
 
871
- ### 🖼️ Text to Images - DeepInfraImager, PollinationsAI, BlackboxAIImager, AiForceimagger, NexraImager, HFimager, ArtbitImager
821
+ ### 🖼️ Text to Images - DeepInfraImager, PollinationsAI, BlackboxAIImager, AiForceimager, NexraImager, HFimager, ArtbitImager, NinjaImager, WebSimAI, AIUncensoredImager, TalkaiImager
872
822
 
873
823
  **Every TTI provider has the same usage code, you just need to change the import.**
874
824
 
@@ -897,7 +847,7 @@ voicepods.play_audio(audio_file)
897
847
 
898
848
  ```python
899
849
  from webscout import WEBS as w
900
- R = w().chat("Who are you", model='gpt-4o-mini') # GPT-3.5 Turbo, mixtral-8x7b, llama-3-70b, claude-3-haiku, gpt-4o-mini
850
+ R = w().chat("Who are you", model='gpt-4o-mini') # mixtral-8x7b, llama-3.1-70b, claude-3-haiku, gpt-4o-mini
901
851
  print(R)
902
852
  ```
903
853
 
@@ -1125,29 +1075,6 @@ response_str = a.chat(prompt)
1125
1075
  print(response_str)
1126
1076
  ```
1127
1077
 
1128
- ### `BasedGPT` - Chat with GPT
1129
-
1130
- ```python
1131
- from webscout import BasedGPT
1132
-
1133
- # Initialize the BasedGPT provider
1134
- basedgpt = BasedGPT(
1135
- is_conversation=True, # Chat conversationally
1136
- max_tokens=600, # Maximum tokens to generate
1137
- timeout=30, # HTTP request timeout
1138
- intro="You are a helpful and friendly AI.", # Introductory prompt
1139
- filepath="chat_history.txt", # File to store conversation history
1140
- update_file=True, # Update the chat history file
1141
- )
1142
-
1143
- # Send a prompt to the AI
1144
- prompt = "What is the meaning of life?"
1145
- response = basedgpt.chat(prompt)
1146
-
1147
- # Print the AI's response
1148
- print(response)
1149
- ```
1150
-
1151
1078
  ### `DeepSeek` - Chat with DeepSeek
1152
1079
 
1153
1080
  ```python
@@ -1316,117 +1243,120 @@ print(a.chat("HelpingAI-9B"))
1316
1243
  ```python
1317
1244
  import json
1318
1245
  import logging
1319
- from webscout import LLAMA3, WEBS
1246
+ from webscout import Julius, WEBS
1320
1247
  from webscout.Agents.functioncall import FunctionCallingAgent
1248
+ from rich import print
1321
1249
 
1322
- # Define tools that the agent can use
1323
- tools = [
1324
- {
1325
- "type": "function",
1326
- "function": {
1327
- "name": "UserDetail",
1328
- "parameters": {
1329
- "type": "object",
1330
- "title": "UserDetail",
1331
- "properties": {
1332
- "name": {
1333
- "title": "Name",
1334
- "type": "string"
1250
+ class FunctionExecutor:
1251
+ def __init__(self, llama):
1252
+ self.llama = llama
1253
+
1254
+ def execute_web_search(self, arguments):
1255
+ query = arguments.get("query")
1256
+ if not query:
1257
+ return "Please provide a search query."
1258
+ with WEBS() as webs:
1259
+ search_results = webs.text(query, max_results=5)
1260
+ prompt = (
1261
+ f"Based on the following search results:\n\n{search_results}\n\n"
1262
+ f"Question: {query}\n\n"
1263
+ "Please provide a comprehensive answer to the question based on the search results above. "
1264
+ "Include relevant webpage URLs in your answer when appropriate. "
1265
+ "If the search results don't contain relevant information, please state that and provide the best answer you can based on your general knowledge."
1266
+ )
1267
+ return self.llama.chat(prompt)
1268
+
1269
+ def execute_general_ai(self, arguments):
1270
+ question = arguments.get("question")
1271
+ if not question:
1272
+ return "Please provide a question."
1273
+ return self.llama.chat(question)
1274
+
1275
+ def execute_UserDetail(self, arguments):
1276
+ name = arguments.get("name")
1277
+ age = arguments.get("age")
1278
+ return f"User details - Name: {name}, Age: {age}"
1279
+
1280
+ def main():
1281
+ tools = [
1282
+ {
1283
+ "type": "function",
1284
+ "function": {
1285
+ "name": "UserDetail",
1286
+ "parameters": {
1287
+ "type": "object",
1288
+ "properties": {
1289
+ "name": {"title": "Name", "type": "string"},
1290
+ "age": {"title": "Age", "type": "integer"}
1335
1291
  },
1336
- "age": {
1337
- "title": "Age",
1338
- "type": "integer"
1339
- }
1340
- },
1341
- "required": ["name", "age"]
1292
+ "required": ["name", "age"]
1293
+ }
1342
1294
  }
1343
- }
1344
- },
1345
- {
1346
- "type": "function",
1347
- "function": {
1348
- "name": "web_search",
1349
- "description": "Search query on google",
1350
- "parameters": {
1351
- "type": "object",
1352
- "properties": {
1353
- "query": {
1354
- "type": "string",
1355
- "description": "web search query"
1356
- }
1357
- },
1358
- "required": ["query"]
1295
+ },
1296
+ {
1297
+ "type": "function",
1298
+ "function": {
1299
+ "name": "web_search",
1300
+ "description": "Search the web for information using Google Search.",
1301
+ "parameters": {
1302
+ "type": "object",
1303
+ "properties": {
1304
+ "query": {
1305
+ "type": "string",
1306
+ "description": "The search query to be executed."
1307
+ }
1308
+ },
1309
+ "required": ["query"]
1310
+ }
1359
1311
  }
1360
- }
1361
- },
1362
- { # New general AI tool
1363
- "type": "function",
1364
- "function": {
1365
- "name": "general_ai",
1366
- "description": "Use general AI knowledge to answer the question",
1367
- "parameters": {
1368
- "type": "object",
1369
- "properties": {
1370
- "question": {
1371
- "type": "string",
1372
- "description": "The question to answer"
1373
- }
1374
- },
1375
- "required": ["question"]
1312
+ },
1313
+ {
1314
+ "type": "function",
1315
+ "function": {
1316
+ "name": "general_ai",
1317
+ "description": "Use general AI knowledge to answer the question",
1318
+ "parameters": {
1319
+ "type": "object",
1320
+ "properties": {
1321
+ "question": {"type": "string", "description": "The question to answer"}
1322
+ },
1323
+ "required": ["question"]
1324
+ }
1376
1325
  }
1377
1326
  }
1378
- }
1379
- ]
1327
+ ]
1380
1328
 
1381
- # Initialize the FunctionCallingAgent with the specified tools
1382
- agent = FunctionCallingAgent(tools=tools)
1383
- llama = LLAMA3()
1384
- from rich import print
1385
- # Input message from the user
1386
- user = input(">>> ")
1387
- message = user
1388
- function_call_data = agent.function_call_handler(message)
1389
- print(f"Function Call Data: {function_call_data}")
1390
-
1391
- # Check for errors in the function call data
1392
- if "error" not in function_call_data:
1393
- function_name = function_call_data.get("tool_name") # Use 'tool_name' instead of 'name'
1394
- if function_name == "web_search":
1395
- arguments = function_call_data.get("tool_input", {}) # Get tool input arguments
1396
- query = arguments.get("query")
1397
- if query:
1398
- with WEBS() as webs:
1399
- search_results = webs.text(query, max_results=5)
1400
- prompt = (
1401
- f"Based on the following search results:\n\n{search_results}\n\n"
1402
- f"Question: {user}\n\n"
1403
- "Please provide a comprehensive answer to the question based on the search results above. "
1404
- "Include relevant webpage URLs in your answer when appropriate. "
1405
- "If the search results don't contain relevant information, please state that and provide the best answer you can based on your general knowledge."
1406
- )
1407
- response = llama.chat(prompt)
1408
- for c in response:
1409
- print(c, end="", flush=True)
1329
+ agent = FunctionCallingAgent(tools=tools)
1330
+ llama = Julius()
1331
+ function_executor = FunctionExecutor(llama)
1410
1332
 
1333
+ user_input = input(">>> ")
1334
+ function_call_data = agent.function_call_handler(user_input)
1335
+ print(f"Function Call Data: {function_call_data}")
1336
+
1337
+ try:
1338
+ if "error" not in function_call_data:
1339
+ function_name = function_call_data.get("tool_name")
1340
+ arguments = function_call_data.get("tool_input", {})
1341
+
1342
+ execute_function = getattr(function_executor, f"execute_{function_name}", None)
1343
+ if execute_function:
1344
+ result = execute_function(arguments)
1345
+ print("Function Execution Result:")
1346
+ for c in result:
1347
+ print(c, end="", flush=True)
1348
+ else:
1349
+ print(f"Unknown function: {function_name}")
1411
1350
  else:
1412
- print("Please provide a search query.")
1413
- elif function_name == "general_ai": # Handle general AI tool
1414
- arguments = function_call_data.get("tool_input", {})
1415
- question = arguments.get("question")
1416
- if question:
1417
- response = llama.chat(question) # Use LLM directly
1418
- for c in response:
1419
- print(c, end="", flush=True)
1420
- else:
1421
- print("Please provide a question.")
1422
- else:
1423
- result = agent.execute_function(function_call_data)
1424
- print(f"Function Execution Result: {result}")
1425
- else:
1426
- print(f"Error: {function_call_data['error']}")
1351
+ print(f"Error: {function_call_data['error']}")
1352
+ except Exception as e:
1353
+ print(f"An error occurred: {str(e)}")
1354
+
1355
+ if __name__ == "__main__":
1356
+ main()
1427
1357
  ```
1428
1358
 
1429
- ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle, PIAI, Felo, XDASH, Julius, YouChat, YEPCHAT, Cloudflare, TurboSeek, Editee, AI21, Chatify, Cerebras, X0GPT, Lepton, GEMINIAPI, Cleeai, Elmo, Genspark, Upstage, Free2GPT, Bing, DiscordRocks, GPTWeb, AIGameIO, LlamaTutor, PromptRefine, AIUncensored, TutorAI, Bixin, ChatGPTES, Bagoodex, ChatHub, AmigoChat
1359
+ ### LLAMA3, pizzagpt, RUBIKSAI, Koala, Darkai, AI4Chat, Farfalle, PIAI, Felo, Julius, YouChat, YEPCHAT, Cloudflare, TurboSeek, Editee, AI21, Chatify, Cerebras, X0GPT, Lepton, GEMINIAPI, Cleeai, Elmo, Genspark, Upstage, Free2GPT, Bing, DiscordRocks, GPTWeb, LlamaTutor, PromptRefine, AIUncensored, TutorAI, ChatGPTES, Bagoodex, ChatHub, AmigoChat, AIMathGPT, GaurishCerebras, NinjaChat, GeminiPro, Talkai, LLMChat
1430
1360
 
1431
1361
  Code is similar to other providers.
1432
1362
 
@@ -1465,24 +1395,16 @@ Webscout can now run GGUF models locally. You can download and run your favorite
1465
1395
  **Example:**
1466
1396
 
1467
1397
  ```python
1468
- from webscout.Local.utils import download_model
1469
- from webscout.Local.model import Model
1470
- from webscout.Local.thread import Thread
1471
- from webscout.Local import formats
1472
-
1473
- # 1. Download the model
1474
- repo_id = "microsoft/Phi-3-mini-4k-instruct-gguf" # Replace with the desired Hugging Face repo
1475
- filename = "Phi-3-mini-4k-instruct-q4.gguf" # Replace with the correct filename
1476
- model_path = download_model(repo_id, filename, token="")
1477
-
1478
- # 2. Load the model
1479
- model = Model(model_path, n_gpu_layers=4)
1480
-
1481
- # 3. Create a Thread for conversation
1482
- thread = Thread(model, formats.phi3)
1483
-
1484
- # 4. Start interacting with the model
1485
- thread.interact()
1398
+ from webscout.Local import *
1399
+ model_path = download_model("Qwen/Qwen2.5-0.5B-Instruct-GGUF", "qwen2.5-0.5b-instruct-q2_k.gguf", token=None)
1400
+ model = Model(model_path, n_gpu_layers=0, context_length=2048)
1401
+ thread = Thread(model, format=chatml)
1402
+ # print(thread.send("hi")) #send a single msg to ai
1403
+
1404
+ # thread.interact() # interact with the model in terminal
1405
+ # start webui
1406
+ # webui = WebUI(thread)
1407
+ # webui.start(host="0.0.0.0", port=8080, ssl=True) #Use ssl=True and make cert and key for https
1486
1408
  ```
1487
1409
 
1488
1410
  ## 🐶 Local-rawdog
@@ -1585,7 +1507,7 @@ Webscout provides tools to convert and quantize Hugging Face models into the GGU
1585
1507
  **Example:**
1586
1508
 
1587
1509
  ```python
1588
- from webscout import gguf
1510
+ from webscout.Extra import gguf
1589
1511
  """
1590
1512
  Valid quantization methods:
1591
1513
  "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
@@ -1606,7 +1528,7 @@ gguf.convert(
1606
1528
  Webscout's `autollama` utility downloads a model from Hugging Face and then automatically makes it Ollama-ready.
1607
1529
 
1608
1530
  ```python
1609
- from webscout import autollama
1531
+ from webscout.Extra import autollama
1610
1532
 
1611
1533
  model_path = "Vortex4ai/Jarvis-0.5B"
1612
1534
  gguf_file = "test2-q4_k_m.gguf"
@@ -1640,9 +1562,9 @@ python -m webscout.webai webai --provider "phind" --rawdog
1640
1562
 
1641
1563
  <div align="center">
1642
1564
  <!-- Replace `#` with your actual links -->
1643
- <a href="https://t.me/devsdocode"><img alt="Telegram" src="https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
1644
- <a href="https://www.instagram.com/sree.shades_/"><img alt="Instagram" src="https://img.shields.io/badge/Instagram-E4405F?style=for-the-badge&logo=instagram&logoColor=white"></a>
1645
- <a href="https://www.linkedin.com/in/developer-sreejan/"><img alt="LinkedIn" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"></a>
1565
+ <a href="https://t.me/official_helpingai"><img alt="Telegram" src="https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
1566
+ <a href="https://www.instagram.com/oevortex/"><img alt="Instagram" src="https://img.shields.io/badge/Instagram-E4405F?style=for-the-badge&logo=instagram&logoColor=white"></a>
1567
+ <a href="https://www.linkedin.com/in/oe-vortex-29a407265/"><img alt="LinkedIn" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"></a>
1646
1568
  <a href="https://buymeacoffee.com/oevortex"><img alt="Buy Me A Coffee" src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-FFDD00?style=for-the-badge&logo=buymeacoffee&logoColor=black"></a>
1647
1569
  </div>
1648
1570
 
@@ -1667,9 +1589,6 @@ Contributions are welcome! If you'd like to contribute to Webscout, please follo
1667
1589
  4. Push your branch to your forked repository.
1668
1590
  5. Submit a pull request to the main repository.
1669
1591
 
1670
- ## 📜 License
1671
-
1672
- This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
1673
1592
 
1674
1593
  ## 🙏 Acknowledgments
1675
1594