webscout 2.8__tar.gz → 3.0b0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (72) hide show
  1. {webscout-2.8 → webscout-3.0b0}/PKG-INFO +2 -1
  2. {webscout-2.8 → webscout-3.0b0}/setup.py +2 -1
  3. {webscout-2.8 → webscout-3.0b0}/webscout/AIutel.py +1 -0
  4. {webscout-2.8 → webscout-3.0b0}/webscout/Local/_version.py +1 -1
  5. {webscout-2.8 → webscout-3.0b0}/webscout/Local/model.py +75 -4
  6. {webscout-2.8 → webscout-3.0b0}/webscout/Local/thread.py +13 -2
  7. {webscout-2.8 → webscout-3.0b0}/webscout/Local/utils.py +3 -2
  8. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/__init__.py +3 -2
  9. {webscout-2.8 → webscout-3.0b0}/webscout/__init__.py +1 -0
  10. webscout-3.0b0/webscout/version.py +2 -0
  11. {webscout-2.8 → webscout-3.0b0}/webscout.egg-info/PKG-INFO +2 -1
  12. {webscout-2.8 → webscout-3.0b0}/webscout.egg-info/requires.txt +1 -0
  13. webscout-2.8/webscout/version.py +0 -2
  14. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/__init__.py +0 -0
  15. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/documents/__init__.py +0 -0
  16. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/documents/query_results_extractor.py +0 -0
  17. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/documents/webpage_content_extractor.py +0 -0
  18. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/networks/__init__.py +0 -0
  19. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/networks/filepath_converter.py +0 -0
  20. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/networks/google_searcher.py +0 -0
  21. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/networks/network_configs.py +0 -0
  22. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/networks/webpage_fetcher.py +0 -0
  23. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/utilsdw/__init__.py +0 -0
  24. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/utilsdw/enver.py +0 -0
  25. {webscout-2.8 → webscout-3.0b0}/DeepWEBS/utilsdw/logger.py +0 -0
  26. {webscout-2.8 → webscout-3.0b0}/LICENSE.md +0 -0
  27. {webscout-2.8 → webscout-3.0b0}/README.md +0 -0
  28. {webscout-2.8 → webscout-3.0b0}/setup.cfg +0 -0
  29. {webscout-2.8 → webscout-3.0b0}/webscout/AIauto.py +0 -0
  30. {webscout-2.8 → webscout-3.0b0}/webscout/AIbase.py +0 -0
  31. {webscout-2.8 → webscout-3.0b0}/webscout/DWEBS.py +0 -0
  32. {webscout-2.8 → webscout-3.0b0}/webscout/LLM.py +0 -0
  33. {webscout-2.8 → webscout-3.0b0}/webscout/Local/__init__.py +0 -0
  34. {webscout-2.8 → webscout-3.0b0}/webscout/Local/formats.py +0 -0
  35. {webscout-2.8 → webscout-3.0b0}/webscout/Local/samplers.py +0 -0
  36. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/BasedGPT.py +0 -0
  37. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Berlin4h.py +0 -0
  38. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Blackboxai.py +0 -0
  39. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/ChatGPTUK.py +0 -0
  40. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Cohere.py +0 -0
  41. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Gemini.py +0 -0
  42. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Groq.py +0 -0
  43. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Koboldai.py +0 -0
  44. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Leo.py +0 -0
  45. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Llama2.py +0 -0
  46. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/OpenGPT.py +0 -0
  47. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Openai.py +0 -0
  48. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Perplexity.py +0 -0
  49. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Phind.py +0 -0
  50. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Poe.py +0 -0
  51. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Reka.py +0 -0
  52. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/ThinkAnyAI.py +0 -0
  53. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Xjai.py +0 -0
  54. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Yepchat.py +0 -0
  55. {webscout-2.8 → webscout-3.0b0}/webscout/Provider/Youchat.py +0 -0
  56. {webscout-2.8 → webscout-3.0b0}/webscout/__main__.py +0 -0
  57. {webscout-2.8 → webscout-3.0b0}/webscout/async_providers.py +0 -0
  58. {webscout-2.8 → webscout-3.0b0}/webscout/cli.py +0 -0
  59. {webscout-2.8 → webscout-3.0b0}/webscout/exceptions.py +0 -0
  60. {webscout-2.8 → webscout-3.0b0}/webscout/g4f.py +0 -0
  61. {webscout-2.8 → webscout-3.0b0}/webscout/models.py +0 -0
  62. {webscout-2.8 → webscout-3.0b0}/webscout/tempid.py +0 -0
  63. {webscout-2.8 → webscout-3.0b0}/webscout/transcriber.py +0 -0
  64. {webscout-2.8 → webscout-3.0b0}/webscout/utils.py +0 -0
  65. {webscout-2.8 → webscout-3.0b0}/webscout/voice.py +0 -0
  66. {webscout-2.8 → webscout-3.0b0}/webscout/webai.py +0 -0
  67. {webscout-2.8 → webscout-3.0b0}/webscout/webscout_search.py +0 -0
  68. {webscout-2.8 → webscout-3.0b0}/webscout/webscout_search_async.py +0 -0
  69. {webscout-2.8 → webscout-3.0b0}/webscout.egg-info/SOURCES.txt +0 -0
  70. {webscout-2.8 → webscout-3.0b0}/webscout.egg-info/dependency_links.txt +0 -0
  71. {webscout-2.8 → webscout-3.0b0}/webscout.egg-info/entry_points.txt +0 -0
  72. {webscout-2.8 → webscout-3.0b0}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 2.8
3
+ Version: 3.0b0
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -61,6 +61,7 @@ Provides-Extra: local
61
61
  Requires-Dist: llama-cpp-python; extra == "local"
62
62
  Requires-Dist: colorama; extra == "local"
63
63
  Requires-Dist: numpy; extra == "local"
64
+ Requires-Dist: huggingface_hub; extra == "local"
64
65
 
65
66
  <div align="center">
66
67
  <!-- Replace `#` with your actual links -->
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="2.8",
8
+ version="3.0-beta",
9
9
  description="Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -74,6 +74,7 @@ setup(
74
74
  'llama-cpp-python',
75
75
  'colorama',
76
76
  'numpy',
77
+ 'huggingface_hub',
77
78
  ],
78
79
  },
79
80
  license="HelpingAI Simplified Universal License",
@@ -46,6 +46,7 @@ webai = [
46
46
  "chatgptuk",
47
47
  "auto",
48
48
  "poe",
49
+ "basedgpt",
49
50
  ]
50
51
  gpt4free_providers = [
51
52
  provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '2.7'
3
+ __version__ = '2.9'
@@ -1,3 +1,4 @@
1
+ import json
1
2
  from ._version import __version__, __llama_cpp_version__
2
3
 
3
4
  """Submodule containing the Model class to work with language models"""
@@ -15,7 +16,7 @@ from .utils import (
15
16
 
16
17
  from .samplers import SamplerSettings, DefaultSampling
17
18
  from llama_cpp import Llama, StoppingCriteriaList
18
- from typing import Generator, Optional, Union
19
+ from typing import Callable, Generator, Optional, Union
19
20
  from os.path import isdir, exists
20
21
  from heapq import nlargest
21
22
 
@@ -68,7 +69,7 @@ class Model:
68
69
  n_gpu_layers: int = 0,
69
70
  offload_kqv: bool = True,
70
71
  flash_attn: bool = False,
71
- verbose: bool = False
72
+ verbose: bool = False,
72
73
  ):
73
74
  """
74
75
  Given the path to a GGUF file, construct a Model instance.
@@ -105,7 +106,7 @@ class Model:
105
106
  self._offload_kqv = offload_kqv
106
107
  self._flash_attn = flash_attn
107
108
  self._verbose = self.verbose = verbose
108
-
109
+ self.tools = {}
109
110
  # if context_length <= 0, use n_ctx_train
110
111
  if isinstance(context_length, int) and context_length <= 0:
111
112
  context_length = None
@@ -269,7 +270,77 @@ class Model:
269
270
  print_verbose(f"param: self.context_length == {self.context_length}")
270
271
  print_verbose(f" gguf: rope_freq_base_train == {rope_freq_base_train}")
271
272
  print_verbose(f"param: rope_freq_base == {rope_freq_base}")
272
-
273
+ def register_tool(self, name: str, function: Callable):
274
+ """
275
+ Registers a tool for function calling.
276
+
277
+ Args:
278
+ name: The name of the tool.
279
+ function: The Python function to execute when the tool is called.
280
+ """
281
+ self.tools[name] = function
282
+
283
+ def _extract_tool_code(self, text: str) -> dict:
284
+ """
285
+ Extracts tool code from the model's output using the chatml-function-calling format.
286
+
287
+ Args:
288
+ text: The model's generated text.
289
+
290
+ Returns:
291
+ A dictionary containing the tool name and arguments, or None if no tool call is found.
292
+ """
293
+ try:
294
+ # Assuming tool code is enclosed in ```tool_code\n...\n```tool_code```
295
+ start = text.find("```tool_code\n") + len("```tool_code\n")
296
+ end = text.find("\n```tool_code```")
297
+ tool_code_json = text[start:end]
298
+ tool_code = json.loads(tool_code_json)
299
+ return tool_code
300
+ except (ValueError, json.JSONDecodeError):
301
+ return None
302
+
303
+ def generate(
304
+ self,
305
+ prompt: Union[str, list[int]],
306
+ stops: list[Union[str, int]] = [],
307
+ sampler: SamplerSettings = DefaultSampling
308
+ ) -> str:
309
+ """
310
+ Given a prompt, return a generated string, potentially calling and executing tools.
311
+
312
+ Args:
313
+ prompt: The text from which to generate.
314
+ stops: A list of strings and/or token IDs at which to end the generation early.
315
+ sampler: The SamplerSettings object used to control text generation.
316
+
317
+ Returns:
318
+ The generated string.
319
+ """
320
+ assert_model_is_loaded(self)
321
+ response_text = self.llama.create_completion(
322
+ prompt,
323
+ max_tokens=sampler.max_len_tokens,
324
+ temperature=sampler.temp,
325
+ top_p=sampler.top_p,
326
+ min_p=sampler.min_p,
327
+ frequency_penalty=sampler.frequency_penalty,
328
+ presence_penalty=sampler.presence_penalty,
329
+ repeat_penalty=sampler.repeat_penalty,
330
+ top_k=sampler.top_k,
331
+ stop=stops
332
+ )['choices'][0]['text']
333
+
334
+ tool_code = self._extract_tool_code(response_text)
335
+ if tool_code:
336
+ # Execute the tool and get its output
337
+ tool_name = tool_code.get("function", {}).get("name")
338
+ arguments = tool_code.get("function", {}).get("arguments", "")
339
+ if tool_name and arguments and tool_name in self.tools:
340
+ tool_output = self.tools[tool_name](**json.loads(arguments))
341
+ # Append the tool output to the response
342
+ response_text += f"\n{tool_output}"
343
+ return response_text
273
344
  def __repr__(self) -> str:
274
345
  return \
275
346
  f"Model({repr(self._model_path)}, " + \
@@ -1,3 +1,4 @@
1
+ import json
1
2
  from ._version import __version__, __llama_cpp_version__
2
3
 
3
4
  """Submodule containing the Thread class, used for interaction with a Model"""
@@ -80,7 +81,9 @@ class Thread:
80
81
  format: Union[dict, AdvancedFormat],
81
82
  sampler: SamplerSettings = DefaultSampling,
82
83
  messages: Optional[list[Message]] = None,
84
+
83
85
  ):
86
+
84
87
  """
85
88
  Given a Model and a format, construct a Thread instance.
86
89
 
@@ -141,7 +144,7 @@ class Thread:
141
144
  self.create_message("system", self.format['system_content'])
142
145
  ] if self._messages is None else self._messages
143
146
  self.sampler: SamplerSettings = sampler
144
-
147
+ self.tools = []
145
148
  if self.model.verbose:
146
149
  print_verbose("new Thread instance with the following attributes:")
147
150
  print_verbose(f"model == {self.model}")
@@ -162,8 +165,16 @@ class Thread:
162
165
  print_verbose(f"sampler.presence_penalty == {self.sampler.presence_penalty}")
163
166
  print_verbose(f"sampler.repeat_penalty == {self.sampler.repeat_penalty}")
164
167
  print_verbose(f"sampler.top_k == {self.sampler.top_k}")
165
-
168
+ def add_tool(self, tool: dict):
169
+ """
170
+ Adds a tool to the Thread for function calling.
171
+ ... (Rest of your add_tool docstring)
172
+ """
173
+ self.tools.append(tool)
174
+ self.model.register_tool(tool['function']['name'], tool['function'].get('execute', None))
166
175
 
176
+ # Include tool information in the system message
177
+ self.messages[0]['content'] += f"\nYou have access to the following tool:\n```tool_code\n{json.dumps(tool)}\n```tool_code"
167
178
  def __repr__(self) -> str:
168
179
  return \
169
180
  f"Thread({repr(self.model)}, {repr(self.format)}, " + \
@@ -25,18 +25,19 @@ class _ArrayLike(Iterable):
25
25
  class _SupportsWriteAndFlush(TextIO):
26
26
  pass
27
27
 
28
- def download_model(repo_id: str, filename: str, cache_dir: str = ".cache") -> str:
28
+ def download_model(repo_id: str, filename: str, token: str, cache_dir: str = ".cache") -> str:
29
29
  """
30
30
  Downloads a GGUF model file from Hugging Face Hub.
31
31
 
32
32
  repo_id: The Hugging Face repository ID (e.g., 'facebook/bart-large-cnn').
33
33
  filename: The name of the GGUF file within the repository (e.g., 'model.gguf').
34
+ token: The Hugging Face token for authentication.
34
35
  cache_dir: The directory where the downloaded file should be stored.
35
36
 
36
37
  Returns: The path to the downloaded file.
37
38
  """
38
39
  url = hf_hub_url(repo_id, filename)
39
- filepath = cached_download(url, cache_dir=cache_dir, force_filename=filename)
40
+ filepath = cached_download(url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token)
40
41
  return filepath
41
42
 
42
43
  class GGUFReader:
@@ -28,7 +28,7 @@ from .Gemini import GEMINI
28
28
  from .Berlin4h import Berlin4h
29
29
  from .ChatGPTUK import ChatGPTUK
30
30
  from .Poe import POE
31
- from .BasedGPT import *
31
+ from .BasedGPT import BasedGPT
32
32
  __all__ = [
33
33
  'ThinkAnyAI',
34
34
  'Xjai',
@@ -57,5 +57,6 @@ __all__ = [
57
57
  'GEMINI',
58
58
  'Berlin4h',
59
59
  'ChatGPTUK',
60
- 'POE'
60
+ 'POE',
61
+ 'BasedGPT',
61
62
  ]
@@ -34,6 +34,7 @@ webai = [
34
34
  "chatgptuk",
35
35
  "auto",
36
36
  "poe",
37
+ "basedgpt",
37
38
  ]
38
39
 
39
40
  gpt4free_providers = [
@@ -0,0 +1,2 @@
1
+ __version__ = "2.9"
2
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 2.8
3
+ Version: 3.0b0
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -61,6 +61,7 @@ Provides-Extra: local
61
61
  Requires-Dist: llama-cpp-python; extra == "local"
62
62
  Requires-Dist: colorama; extra == "local"
63
63
  Requires-Dist: numpy; extra == "local"
64
+ Requires-Dist: huggingface_hub; extra == "local"
64
65
 
65
66
  <div align="center">
66
67
  <!-- Replace `#` with your actual links -->
@@ -37,3 +37,4 @@ pytest>=7.4.2
37
37
  llama-cpp-python
38
38
  colorama
39
39
  numpy
40
+ huggingface_hub
@@ -1,2 +0,0 @@
1
- __version__ = "2.7"
2
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes