webscout 8.1__py3-none-any.whl → 8.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (60) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  14. webscout/Provider/AISEARCH/__init__.py +2 -1
  15. webscout/Provider/AISEARCH/scira_search.py +8 -4
  16. webscout/Provider/ExaChat.py +18 -8
  17. webscout/Provider/GithubChat.py +5 -1
  18. webscout/Provider/Glider.py +4 -2
  19. webscout/Provider/OPENAI/__init__.py +9 -1
  20. webscout/Provider/OPENAI/c4ai.py +22 -2
  21. webscout/Provider/OPENAI/chatgpt.py +549 -0
  22. webscout/Provider/OPENAI/deepinfra.py +1 -13
  23. webscout/Provider/OPENAI/e2b.py +1192 -0
  24. webscout/Provider/OPENAI/exaai.py +1 -16
  25. webscout/Provider/OPENAI/exachat.py +20 -8
  26. webscout/Provider/OPENAI/freeaichat.py +1 -4
  27. webscout/Provider/OPENAI/glider.py +3 -1
  28. webscout/Provider/OPENAI/llmchatco.py +3 -1
  29. webscout/Provider/OPENAI/opkfc.py +488 -0
  30. webscout/Provider/OPENAI/scirachat.py +11 -7
  31. webscout/Provider/OPENAI/standardinput.py +425 -0
  32. webscout/Provider/OPENAI/textpollinations.py +285 -0
  33. webscout/Provider/OPENAI/toolbaz.py +405 -0
  34. webscout/Provider/OPENAI/typegpt.py +1 -16
  35. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  36. webscout/Provider/OPENAI/venice.py +1 -16
  37. webscout/Provider/OPENAI/writecream.py +156 -0
  38. webscout/Provider/OPENAI/x0gpt.py +2 -20
  39. webscout/Provider/OPENAI/yep.py +2 -4
  40. webscout/Provider/StandardInput.py +278 -0
  41. webscout/Provider/TextPollinationsAI.py +27 -28
  42. webscout/Provider/Writecream.py +211 -0
  43. webscout/Provider/WritingMate.py +197 -0
  44. webscout/Provider/Youchat.py +30 -26
  45. webscout/Provider/__init__.py +10 -2
  46. webscout/Provider/koala.py +2 -2
  47. webscout/Provider/llmchatco.py +5 -0
  48. webscout/Provider/scira_chat.py +5 -2
  49. webscout/Provider/scnet.py +187 -0
  50. webscout/Provider/toolbaz.py +320 -0
  51. webscout/Provider/uncovr.py +3 -3
  52. webscout/conversation.py +32 -32
  53. webscout/version.py +1 -1
  54. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/METADATA +54 -3
  55. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/RECORD +59 -33
  56. webscout-8.2.1.dist-info/entry_points.txt +5 -0
  57. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/top_level.txt +1 -0
  58. webscout-8.1.dist-info/entry_points.txt +0 -3
  59. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/LICENSE.md +0 -0
  60. {webscout-8.1.dist-info → webscout-8.2.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,320 @@
1
+ import re
2
+ import requests
3
+ import uuid
4
+ import base64
5
+ import json
6
+ import random
7
+ import string
8
+ import time
9
+ from datetime import datetime
10
+ from typing import Any, Dict, Optional, Generator, Union, List
11
+
12
+ from webscout.AIutel import Optimizers
13
+ from webscout.AIutel import Conversation
14
+ from webscout.AIutel import AwesomePrompts
15
+ from webscout.AIbase import Provider, AsyncProvider
16
+ from webscout import exceptions
17
+
18
+ class Toolbaz(Provider):
19
+ """
20
+ A class to interact with the Toolbaz API. Supports streaming responses.
21
+ """
22
+
23
+ AVAILABLE_MODELS = [
24
+ "gemini-2.0-flash-thinking",
25
+ "gemini-2.0-flash",
26
+ "gemini-1.5-flash",
27
+ "gpt-4o-latest",
28
+ "gpt-4o-mini",
29
+ "gpt-4o",
30
+ "deepseek-r1",
31
+ "Llama-3.3-70B",
32
+ "Llama-3.1-405B",
33
+ "Llama-3.1-70B",
34
+ "Qwen2.5-72B",
35
+ "Qwen2-72B",
36
+ "grok-2-1212",
37
+ "grok-beta",
38
+ "toolbaz_v3.5_pro",
39
+ "toolbaz_v3",
40
+ "mixtral_8x22b",
41
+ "L3-70B-Euryale-v2.1",
42
+ "midnight-rose",
43
+ "unity",
44
+ "unfiltered_x"
45
+ ]
46
+
47
+ def __init__(
48
+ self,
49
+ is_conversation: bool = True,
50
+ max_tokens: int = 600,
51
+ timeout: int = 30,
52
+ intro: str = None,
53
+ filepath: str = None,
54
+ update_file: bool = True,
55
+ proxies: dict = {},
56
+ history_offset: int = 10250,
57
+ act: str = None,
58
+ model: str = "gemini-2.0-flash",
59
+ system_prompt: str = "You are a helpful AI assistant."
60
+ ):
61
+ """
62
+ Initializes the Toolbaz API with given parameters.
63
+ """
64
+ if model not in self.AVAILABLE_MODELS:
65
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
66
+
67
+ self.session = requests.Session()
68
+ self.is_conversation = is_conversation
69
+ self.max_tokens_to_sample = max_tokens
70
+ self.timeout = timeout
71
+ self.last_response = {}
72
+ self.system_prompt = system_prompt
73
+ self.model = model
74
+ self.proxies = proxies
75
+
76
+ # Set up headers
77
+ self.session.headers.update({
78
+ "user-agent": "Mozilla/5.0 (Linux; Android 10)",
79
+ "accept": "*/*",
80
+ "accept-language": "en-US",
81
+ "cache-control": "no-cache",
82
+ "connection": "keep-alive",
83
+ "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
84
+ "origin": "https://toolbaz.com",
85
+ "pragma": "no-cache",
86
+ "referer": "https://toolbaz.com/",
87
+ "sec-fetch-mode": "cors"
88
+ })
89
+
90
+ # Initialize conversation history
91
+ self.__available_optimizers = (
92
+ method
93
+ for method in dir(Optimizers)
94
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
95
+ )
96
+
97
+ Conversation.intro = (
98
+ AwesomePrompts().get_act(
99
+ act, raise_not_found=True, default=None, case_insensitive=True
100
+ )
101
+ if act
102
+ else intro or Conversation.intro
103
+ )
104
+
105
+ self.conversation = Conversation(
106
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
107
+ )
108
+ self.conversation.history_offset = history_offset
109
+
110
+ def random_string(self, length):
111
+ return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
112
+
113
+ def generate_token(self):
114
+ payload = {
115
+ "bR6wF": {
116
+ "nV5kP": "Mozilla/5.0 (Linux; Android 10)",
117
+ "lQ9jX": "en-US",
118
+ "sD2zR": "431x958",
119
+ "tY4hL": time.tzname[0] if time.tzname else "UTC",
120
+ "pL8mC": "Linux armv81",
121
+ "cQ3vD": datetime.now().year,
122
+ "hK7jN": datetime.now().hour
123
+ },
124
+ "uT4bX": {
125
+ "mM9wZ": [],
126
+ "kP8jY": []
127
+ },
128
+ "tuTcS": int(time.time()),
129
+ "tDfxy": None,
130
+ "RtyJt": str(uuid.uuid4())
131
+ }
132
+ return "d8TW0v" + base64.b64encode(json.dumps(payload).encode()).decode()
133
+
134
+ def get_auth(self):
135
+ try:
136
+ session_id = self.random_string(36)
137
+ token = self.generate_token()
138
+ data = {
139
+ "session_id": session_id,
140
+ "token": token
141
+ }
142
+ resp = self.session.post("https://data.toolbaz.com/token.php", data=data)
143
+ resp.raise_for_status()
144
+ result = resp.json()
145
+ if result.get("success"):
146
+ return {"token": result["token"], "session_id": session_id}
147
+ return None
148
+ except Exception:
149
+ return None
150
+
151
+ def ask(
152
+ self,
153
+ prompt: str,
154
+ stream: bool = False,
155
+ raw: bool = False, # Kept for compatibility with other providers
156
+ optimizer: Optional[str] = None,
157
+ conversationally: bool = False,
158
+ ) -> Union[Dict[str, Any], Generator]:
159
+ """Sends a prompt to the Toolbaz API and returns the response."""
160
+ if optimizer and optimizer not in self.__available_optimizers:
161
+ raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {self.__available_optimizers}")
162
+
163
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
164
+ if optimizer:
165
+ conversation_prompt = getattr(Optimizers, optimizer)(
166
+ conversation_prompt if conversationally else prompt
167
+ )
168
+
169
+ auth = self.get_auth()
170
+ if not auth:
171
+ raise exceptions.ProviderConnectionError("Failed to authenticate with Toolbaz API")
172
+
173
+ data = {
174
+ "text": conversation_prompt,
175
+ "capcha": auth["token"],
176
+ "model": self.model,
177
+ "session_id": auth["session_id"]
178
+ }
179
+
180
+ def for_stream():
181
+ try:
182
+ resp = self.session.post(
183
+ "https://data.toolbaz.com/writing.php",
184
+ data=data,
185
+ stream=True,
186
+ proxies=self.proxies,
187
+ timeout=self.timeout
188
+ )
189
+ resp.raise_for_status()
190
+
191
+ buffer = ""
192
+ tag_start = "[model:"
193
+ streaming_text = ""
194
+
195
+ for chunk in resp.iter_content(chunk_size=1):
196
+ if chunk:
197
+ text = chunk.decode(errors="ignore")
198
+ buffer += text
199
+ # Remove all complete [model: ...] tags in buffer
200
+ while True:
201
+ match = re.search(r"\[model:.*?\]", buffer)
202
+ if not match:
203
+ break
204
+ buffer = buffer[:match.start()] + buffer[match.end():]
205
+ # Only yield up to the last possible start of a tag
206
+ last_tag = buffer.rfind(tag_start)
207
+ if last_tag == -1 or last_tag + len(tag_start) > len(buffer):
208
+ if buffer:
209
+ streaming_text += buffer
210
+ yield {"text": buffer}
211
+ buffer = ""
212
+ else:
213
+ if buffer[:last_tag]:
214
+ streaming_text += buffer[:last_tag]
215
+ yield {"text": buffer[:last_tag]}
216
+ buffer = buffer[last_tag:]
217
+
218
+ # Remove any remaining [model: ...] tag in the buffer
219
+ buffer = re.sub(r"\[model:.*?\]", "", buffer)
220
+ if buffer:
221
+ streaming_text += buffer
222
+ yield {"text": buffer}
223
+
224
+ self.last_response = {"text": streaming_text}
225
+ self.conversation.update_chat_history(prompt, streaming_text)
226
+
227
+ except requests.exceptions.RequestException as e:
228
+ raise exceptions.ProviderConnectionError(f"Network error: {str(e)}") from e
229
+ except Exception as e:
230
+ raise exceptions.ProviderConnectionError(f"Unexpected error: {str(e)}") from e
231
+
232
+ def for_non_stream():
233
+ try:
234
+ resp = self.session.post(
235
+ "https://data.toolbaz.com/writing.php",
236
+ data=data,
237
+ proxies=self.proxies,
238
+ timeout=self.timeout
239
+ )
240
+ resp.raise_for_status()
241
+
242
+ text = resp.text
243
+ # Remove [model: ...] tags
244
+ text = re.sub(r"\[model:.*?\]", "", text)
245
+
246
+ self.last_response = {"text": text}
247
+ self.conversation.update_chat_history(prompt, text)
248
+
249
+ return self.last_response
250
+
251
+ except requests.exceptions.RequestException as e:
252
+ raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
253
+ except Exception as e:
254
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}") from e
255
+
256
+ return for_stream() if stream else for_non_stream()
257
+
258
+ def chat(
259
+ self,
260
+ prompt: str,
261
+ stream: bool = False,
262
+ optimizer: Optional[str] = None,
263
+ conversationally: bool = False,
264
+ ) -> Union[str, Generator[str, None, None]]:
265
+ """Generates a response from the Toolbaz API."""
266
+ def for_stream():
267
+ for response in self.ask(
268
+ prompt,
269
+ stream=True,
270
+ optimizer=optimizer,
271
+ conversationally=conversationally
272
+ ):
273
+ yield self.get_message(response)
274
+
275
+ def for_non_stream():
276
+ return self.get_message(
277
+ self.ask(
278
+ prompt,
279
+ stream=False,
280
+ optimizer=optimizer,
281
+ conversationally=conversationally,
282
+ )
283
+ )
284
+
285
+ return for_stream() if stream else for_non_stream()
286
+
287
+ def get_message(self, response: Dict[str, Any]) -> str:
288
+ """Extract the message from the response.
289
+
290
+ Args:
291
+ response: Response dictionary
292
+
293
+ Returns:
294
+ str: Message extracted
295
+ """
296
+ assert isinstance(response, dict), "Response should be of dict data-type only"
297
+ return response.get("text", "")
298
+
299
+ # Example usage
300
+ if __name__ == "__main__":
301
+ # Test the provider with different models
302
+ for model in Toolbaz.AVAILABLE_MODELS:
303
+ try:
304
+ test_ai = Toolbaz(model=model, timeout=60)
305
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
306
+ response_text = ""
307
+ for chunk in response:
308
+ response_text += chunk
309
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
310
+
311
+ if response_text and len(response_text.strip()) > 0:
312
+ status = "✓"
313
+ # Truncate response if too long
314
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
315
+ else:
316
+ status = "✗"
317
+ display_text = "Empty or invalid response"
318
+ print(f"\r{model:<50} {status:<10} {display_text}")
319
+ except Exception as e:
320
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -20,14 +20,14 @@ class UncovrAI(Provider):
20
20
  "gpt-4o-mini",
21
21
  "gemini-2-flash",
22
22
  "gemini-2-flash-lite",
23
- "groq-llama-3-1-8b"
23
+ "groq-llama-3-1-8b",
24
+ "o3-mini",
25
+ "deepseek-r1-distill-qwen-32b",
24
26
  # The following models are not available in the free plan:
25
- # "o3-mini",
26
27
  # "claude-3-7-sonnet",
27
28
  # "gpt-4o",
28
29
  # "claude-3-5-sonnet-v2",
29
30
  # "deepseek-r1-distill-llama-70b",
30
- # "deepseek-r1-distill-qwen-32b",
31
31
  # "gemini-2-flash-lite-preview",
32
32
  # "qwen-qwq-32b"
33
33
  ]
webscout/conversation.py CHANGED
@@ -281,11 +281,11 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
281
281
  if not os.path.exists(self.file):
282
282
  with open(self.file, "w", encoding="utf-8") as fh:
283
283
  fh.write(self.intro + "\n")
284
-
284
+
285
285
  # Append new history
286
286
  with open(self.file, "a", encoding="utf-8") as fh:
287
287
  fh.write(new_history)
288
-
288
+
289
289
  self.chat_history += new_history
290
290
  # logger.info(f"Chat history updated with prompt: {prompt}")
291
291
 
@@ -317,21 +317,21 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
317
317
  "tool": tool_name,
318
318
  "result": tool_result
319
319
  }
320
-
320
+
321
321
  if self.file and self.update_file:
322
322
  # Create file if it doesn't exist
323
323
  if not os.path.exists(self.file):
324
324
  with open(self.file, "w", encoding="utf-8") as fh:
325
325
  fh.write(self.intro + "\n")
326
-
326
+
327
327
  # Append new history
328
328
  with open(self.file, "a", encoding="utf-8") as fh:
329
329
  fh.write(new_history)
330
-
330
+
331
331
  self.chat_history += new_history
332
332
 
333
333
  def add_message(self, role: str, content: str) -> None:
334
- """Add a new message to the chat - simple and clean!
334
+ """Add a new message to the chat - simple and clean!
335
335
 
336
336
  This method:
337
337
  - Validates the message role
@@ -379,10 +379,10 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
379
379
 
380
380
  def _parse_function_call(self, response: str) -> FunctionCallData:
381
381
  """Parse a function call from the LLM's response.
382
-
382
+
383
383
  Args:
384
384
  response (str): The LLM's response containing a function call
385
-
385
+
386
386
  Returns:
387
387
  FunctionCallData: Parsed function call data or error
388
388
  """
@@ -399,13 +399,13 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
399
399
  end_tag = "</tool_call>"
400
400
  start_idx = response.find(start_tag)
401
401
  end_idx = response.rfind(end_tag)
402
-
402
+
403
403
  if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
404
404
  raise ValueError("No valid <tool_call> JSON structure found in the response.")
405
-
405
+
406
406
  # Extract JSON content - for the format without brackets
407
407
  json_str: str = response[start_idx + len(start_tag):end_idx].strip()
408
-
408
+
409
409
  # Try to parse the JSON directly
410
410
  try:
411
411
  parsed_response: Any = json.loads(json_str)
@@ -425,7 +425,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
425
425
  # Extract JSON content - for the format with brackets
426
426
  json_str: str = response[start_idx + len(start_tag):end_idx].strip()
427
427
  parsed_response: Any = json.loads(json_str)
428
-
428
+
429
429
  if isinstance(parsed_response, list):
430
430
  return {"tool_calls": parsed_response}
431
431
  elif isinstance(parsed_response, dict):
@@ -439,10 +439,10 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
439
439
 
440
440
  def execute_function(self, function_call_data: FunctionCallData) -> str:
441
441
  """Execute a function call and return the result.
442
-
442
+
443
443
  Args:
444
444
  function_call_data (FunctionCallData): The function call data
445
-
445
+
446
446
  Returns:
447
447
  str: Result of the function execution
448
448
  """
@@ -450,7 +450,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
450
450
 
451
451
  if not tool_calls or not isinstance(tool_calls, list):
452
452
  return "Invalid tool_calls format."
453
-
453
+
454
454
  results: List[str] = []
455
455
  for tool_call in tool_calls:
456
456
  function_name: str = tool_call.get("name")
@@ -465,19 +465,19 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
465
465
  results.append(f"Executed {function_name} with arguments {arguments}")
466
466
 
467
467
  return "; ".join(results)
468
-
468
+
469
469
  def _convert_fns_to_tools(self, fns: Optional[List[Fn]]) -> List[ToolDefinition]:
470
470
  """Convert functions to tool definitions for the LLM.
471
-
471
+
472
472
  Args:
473
473
  fns (Optional[List[Fn]]): List of function definitions
474
-
474
+
475
475
  Returns:
476
476
  List[ToolDefinition]: List of tool definitions
477
477
  """
478
478
  if not fns:
479
479
  return []
480
-
480
+
481
481
  tools: List[ToolDefinition] = []
482
482
  for fn in fns:
483
483
  tool: ToolDefinition = {
@@ -499,55 +499,55 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
499
499
  }
500
500
  tools.append(tool)
501
501
  return tools
502
-
502
+
503
503
  def get_tools_description(self) -> str:
504
504
  """Get a formatted string of available tools for the intro prompt.
505
-
505
+
506
506
  Returns:
507
507
  str: Formatted tools description
508
508
  """
509
509
  if not self.tools:
510
510
  return ""
511
-
511
+
512
512
  tools_desc = []
513
513
  for fn in self.tools:
514
514
  params_desc = ", ".join([f"{name}: {typ}" for name, typ in fn.parameters.items()])
515
515
  tools_desc.append(f"- {fn.name}: {fn.description} (Parameters: {params_desc})")
516
-
516
+
517
517
  return "\n".join(tools_desc)
518
518
 
519
519
  def handle_tool_response(self, response: str) -> Dict[str, Any]:
520
520
  """Process a response that might contain a tool call.
521
-
521
+
522
522
  This method:
523
523
  - Checks if the response contains a tool call
524
524
  - Parses and executes the tool call if present
525
525
  - Returns the appropriate result
526
-
526
+
527
527
  Args:
528
528
  response (str): The LLM's response
529
-
529
+
530
530
  Returns:
531
531
  Dict[str, Any]: Result containing 'is_tool_call', 'result', and 'original_response'
532
532
  """
533
533
  # Check if response contains a tool call
534
534
  if "<tool_call>" in response:
535
535
  function_call_data = self._parse_function_call(response)
536
-
536
+
537
537
  if "error" in function_call_data:
538
538
  return {
539
- "is_tool_call": True,
539
+ "is_tool_call": True,
540
540
  "success": False,
541
541
  "result": function_call_data["error"],
542
542
  "original_response": response
543
543
  }
544
-
544
+
545
545
  # Execute the function call
546
546
  result = self.execute_function(function_call_data)
547
-
547
+
548
548
  # Add the result to chat history as a tool message
549
549
  self.add_message("tool", result)
550
-
550
+
551
551
  return {
552
552
  "is_tool_call": True,
553
553
  "success": True,
@@ -555,7 +555,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
555
555
  "tool_calls": function_call_data.get("tool_calls", []),
556
556
  "original_response": response
557
557
  }
558
-
558
+
559
559
  return {
560
560
  "is_tool_call": False,
561
561
  "result": response,
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "8.1"
1
+ __version__ = "8.2.1"
2
2
  __prog__ = "webscout"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 8.1
3
+ Version: 8.2.1
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -8,10 +8,17 @@ License: HelpingAI
8
8
  Project-URL: Source, https://github.com/OE-LUCIFER/Webscout
9
9
  Project-URL: Tracker, https://github.com/OE-LUCIFER/Webscout/issues
10
10
  Project-URL: YouTube, https://youtube.com/@OEvortex
11
+ Keywords: search,ai,chatbot,llm,language-model,gpt,openai,gemini,claude,llama,search-engine,text-to-speech,tts,text-to-image,tti,weather,youtube,toolkit,utilities,web-search,duckduckgo,google,yep
11
12
  Classifier: Development Status :: 5 - Production/Stable
12
13
  Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: End Users/Desktop
15
+ Classifier: Intended Audience :: Science/Research
13
16
  Classifier: License :: Other/Proprietary License
17
+ Classifier: Natural Language :: English
14
18
  Classifier: Operating System :: OS Independent
19
+ Classifier: Operating System :: Microsoft :: Windows
20
+ Classifier: Operating System :: POSIX :: Linux
21
+ Classifier: Operating System :: MacOS :: MacOS X
15
22
  Classifier: Programming Language :: Python :: 3
16
23
  Classifier: Programming Language :: Python :: 3.9
17
24
  Classifier: Programming Language :: Python :: 3.10
@@ -21,6 +28,10 @@ Classifier: Programming Language :: Python :: 3.13
21
28
  Classifier: Programming Language :: Python :: Implementation :: CPython
22
29
  Classifier: Topic :: Internet :: WWW/HTTP :: Indexing/Search
23
30
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
31
+ Classifier: Topic :: Text Processing :: Linguistic
32
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
33
+ Classifier: Topic :: Communications
34
+ Classifier: Topic :: Utilities
24
35
  Requires-Python: >=3.9
25
36
  Description-Content-Type: text/markdown
26
37
  License-File: LICENSE.md
@@ -55,6 +66,15 @@ Requires-Dist: pyreqwest-impersonate
55
66
  Requires-Dist: gradio-client
56
67
  Requires-Dist: psutil
57
68
  Requires-Dist: aiohttp
69
+ Provides-Extra: local
70
+ Requires-Dist: llama-cpp-python; extra == "local"
71
+ Requires-Dist: fastapi; extra == "local"
72
+ Requires-Dist: uvicorn; extra == "local"
73
+ Requires-Dist: rich; extra == "local"
74
+ Requires-Dist: typer; extra == "local"
75
+ Requires-Dist: huggingface-hub; extra == "local"
76
+ Requires-Dist: pydantic; extra == "local"
77
+ Requires-Dist: requests; extra == "local"
58
78
  Provides-Extra: dev
59
79
  Requires-Dist: ruff>=0.1.6; extra == "dev"
60
80
  Requires-Dist: pytest>=7.4.2; extra == "dev"
@@ -82,9 +102,10 @@ Requires-Dist: pytest>=7.4.2; extra == "dev"
82
102
  </div>
83
103
 
84
104
  > [!IMPORTANT]
85
- > Webscout supports two types of compatibility:
105
+ > Webscout supports three types of compatibility:
86
106
  > - **Native Compatibility:** Webscout's own native API for maximum flexibility
87
107
  > - **OpenAI Compatibility:** Use providers with OpenAI-compatible interfaces
108
+ > - **Local LLM Compatibility:** Run local models with [Inferno](webscout/Local/README.md), an OpenAI-compatible server
88
109
  >
89
110
  > Choose the approach that best fits your needs! For OpenAI compatibility, check the [OpenAI Providers README](webscout/Provider/OPENAI/README.md).
90
111
 
@@ -108,9 +129,10 @@ Requires-Dist: pytest>=7.4.2; extra == "dev"
108
129
 
109
130
  ### Search & AI
110
131
  * **Comprehensive Search:** Leverage Google, DuckDuckGo, and Yep for diverse search results
111
- * **AI Powerhouse:** Access and interact with various AI models through two compatibility options:
132
+ * **AI Powerhouse:** Access and interact with various AI models through three compatibility options:
112
133
  * **Native API:** Use Webscout's native interfaces for providers like OpenAI, Cohere, Gemini, and many more
113
134
  * **[OpenAI-Compatible Providers](webscout/Provider/OPENAI/README.md):** Seamlessly integrate with various AI providers using standardized OpenAI-compatible interfaces
135
+ * **[Local LLMs with Inferno](webscout/Local/README.md):** Run local models with an OpenAI-compatible server
114
136
  * **[AI Search](webscout/Provider/AISEARCH/README.md):** AI-powered search engines with advanced capabilities
115
137
 
116
138
  ### Media & Content Tools
@@ -126,6 +148,7 @@ Requires-Dist: pytest>=7.4.2; extra == "dev"
126
148
  * **[LitLogger](webscout/litlogger/Readme.md):** Simplified logging with customizable formats and color schemes
127
149
  * **[LitAgent](webscout/litagent/Readme.md):** Modern user agent generator that keeps your requests undetectable
128
150
  * **[Scout](webscout/scout/README.md):** Advanced web parsing and crawling library with intelligent HTML/XML parsing
151
+ * **[Inferno](webscout/Local/README.md):** Run local LLMs with an OpenAI-compatible API and interactive CLI
129
152
  * **GGUF Conversion:** Convert and quantize Hugging Face models to GGUF format
130
153
 
131
154
  ### Privacy & Utilities
@@ -148,6 +171,8 @@ Webscout provides a powerful command-line interface for quick access to its feat
148
171
  python -m webscout --help
149
172
  ```
150
173
 
174
+ ### Web Search Commands
175
+
151
176
  | Command | Description |
152
177
  |---------|-------------|
153
178
  | `python -m webscout answers -k "query"` | Perform an answers search |
@@ -162,6 +187,32 @@ python -m webscout --help
162
187
  | `python -m webscout videos -k "query"` | Search for videos |
163
188
  | `python -m webscout weather -l "location"` | Get weather information |
164
189
 
190
+ ### Inferno LLM Commands
191
+
192
+ Inferno provides commands for managing and using local LLMs:
193
+
194
+ ```bash
195
+ python -m inferno --help
196
+ ```
197
+
198
+ | Command | Description |
199
+ |---------|-------------|
200
+ | `python -m inferno pull <model>` | Download a model from Hugging Face |
201
+ | `python -m inferno list` | List downloaded models |
202
+ | `python -m inferno serve <model>` | Start a model server with OpenAI-compatible API |
203
+ | `python -m inferno run <model>` | Chat with a model interactively |
204
+ | `python -m inferno remove <model>` | Remove a downloaded model |
205
+ | `python -m inferno version` | Show version information |
206
+
207
+ > [!NOTE]
208
+ > Hardware requirements for running models:
209
+ > - Around 2 GB of RAM for 1B models
210
+ > - Around 4 GB of RAM for 3B models
211
+ > - At least 8 GB of RAM for 7B models
212
+ > - 16 GB of RAM for 13B models
213
+ > - 32 GB of RAM for 33B models
214
+ > - GPU acceleration is recommended for better performance
215
+
165
216
 
166
217
 
167
218
  ## 🔍 Search Engines