webscout 6.1__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (48) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/functioncall.py +2 -2
  3. webscout/Extra/autollama.py +37 -20
  4. webscout/Local/formats.py +4 -2
  5. webscout/Local/utils.py +37 -12
  6. webscout/Provider/Amigo.py +50 -37
  7. webscout/Provider/Deepseek.py +7 -6
  8. webscout/Provider/EDITEE.py +2 -2
  9. webscout/Provider/GPTWeb.py +1 -1
  10. webscout/Provider/Llama3.py +1 -1
  11. webscout/Provider/NinjaChat.py +2 -2
  12. webscout/Provider/OLLAMA.py +1 -1
  13. webscout/Provider/Perplexity.py +1 -1
  14. webscout/Provider/Reka.py +12 -5
  15. webscout/Provider/TTI/AIuncensored.py +103 -0
  16. webscout/Provider/TTI/__init__.py +3 -2
  17. webscout/Provider/TTI/talkai.py +116 -0
  18. webscout/Provider/TeachAnything.py +0 -3
  19. webscout/Provider/__init__.py +8 -11
  20. webscout/Provider/cerebras.py +143 -123
  21. webscout/Provider/cleeai.py +1 -1
  22. webscout/Provider/felo_search.py +1 -1
  23. webscout/Provider/gaurish.py +41 -2
  24. webscout/Provider/geminiprorealtime.py +1 -1
  25. webscout/Provider/genspark.py +1 -1
  26. webscout/Provider/julius.py +4 -3
  27. webscout/Provider/learnfastai.py +1 -1
  28. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  29. webscout/Provider/promptrefine.py +3 -1
  30. webscout/Provider/talkai.py +196 -0
  31. webscout/Provider/turboseek.py +3 -8
  32. webscout/Provider/tutorai.py +1 -1
  33. webscout/__init__.py +2 -43
  34. webscout/tempid.py +4 -73
  35. webscout/version.py +1 -1
  36. webscout/webai.py +1 -1
  37. {webscout-6.1.dist-info → webscout-6.2.dist-info}/METADATA +44 -128
  38. {webscout-6.1.dist-info → webscout-6.2.dist-info}/RECORD +42 -45
  39. webscout/Provider/BasedGPT.py +0 -214
  40. webscout/Provider/ChatHub.py +0 -209
  41. webscout/Provider/TTI/amigo.py +0 -148
  42. webscout/Provider/bixin.py +0 -264
  43. webscout/Provider/xdash.py +0 -182
  44. webscout/websx_search.py +0 -19
  45. {webscout-6.1.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  46. {webscout-6.1.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  47. {webscout-6.1.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  48. {webscout-6.1.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
webscout/AIauto.py CHANGED
@@ -1,123 +1,34 @@
1
- from webscout.AIbase import Provider, AsyncProvider
2
- from webscout.Provider.ThinkAnyAI import ThinkAnyAI
3
- from webscout.Provider.Llama import LLAMA
4
-
5
- from webscout.Provider.Koboldai import KOBOLDAI
6
- from webscout.Provider.Koboldai import AsyncKOBOLDAI
7
-
8
- from webscout.Provider.Perplexity import Perplexity
9
- from webscout.Provider.Blackboxai import BLACKBOXAI
10
- from webscout.Provider.Blackboxai import AsyncBLACKBOXAI
11
- from webscout.Provider.Phind import PhindSearch
12
- from webscout.Provider.Phind import Phindv2
13
- from webscout.Provider.yep import YEPCHAT
14
- from webscout.Provider.Poe import POE
15
- from webscout.Provider.BasedGPT import BasedGPT
16
- from webscout.Provider.Deepseek import DeepSeek
17
- from webscout.Provider.Deepinfra import DeepInfra, VLM, AsyncDeepInfra
18
- from webscout.Provider.OLLAMA import OLLAMA
19
- from webscout.Provider.Andi import AndiSearch
20
- from webscout.Provider.Llama3 import LLAMA3
21
- from webscout.Provider.DARKAI import DARKAI
22
- from webscout.Provider.koala import KOALA
23
- from webscout.Provider.RUBIKSAI import RUBIKSAI
24
- from webscout.Provider.meta import Meta
25
-
26
- from webscout.Provider.DiscordRocks import DiscordRocks
27
- from webscout.Provider.felo_search import Felo
28
- from webscout.Provider.xdash import XDASH
29
- from webscout.Provider.julius import Julius
30
- from webscout.Provider.Youchat import YouChat
31
- from webscout.Provider.Cloudflare import Cloudflare
32
- from webscout.Provider.turboseek import TurboSeek
33
- from webscout.Provider.NetFly import NetFly
34
- from webscout.Provider.EDITEE import Editee
35
- from webscout.Provider.Chatify import Chatify
36
- from webscout.Provider.PI import PiAI
37
- from webscout.g4f import GPT4FREE, AsyncGPT4FREE
38
- from webscout.g4f import TestProviders
1
+ from webscout.AIbase import Provider
2
+ from webscout.g4f import GPT4FREE, TestProviders
39
3
  from webscout.exceptions import AllProvidersFailure
40
- from typing import AsyncGenerator
41
-
42
- from typing import Union
43
- from typing import Any
4
+ from typing import Union, Any, Dict, Generator
5
+ import importlib
6
+ import pkgutil
44
7
  import logging
45
-
46
-
47
- provider_map: dict[
48
- str,
49
- Union[
50
- ThinkAnyAI,
51
- LLAMA,
52
- KOBOLDAI,
53
- Perplexity,
54
- BLACKBOXAI,
55
- PhindSearch,
56
- Phindv2,
57
- YEPCHAT,
58
- POE,
59
- BasedGPT,
60
- DeepSeek,
61
- DeepInfra,
62
- VLM,
63
- GPT4FREE,
64
- OLLAMA,
65
- AndiSearch,
66
- LLAMA3,
67
- DARKAI,
68
- KOALA,
69
- RUBIKSAI,
70
- Meta,
71
-
72
- DiscordRocks,
73
- Felo,
74
- XDASH,
75
- Julius,
76
- YouChat,
77
- Cloudflare,
78
- TurboSeek,
79
- NetFly,
80
- Editee,
81
- Chatify,
82
- PiAI,
83
- ],
84
- ] = {
85
- "ThinkAnyAI": ThinkAnyAI,
86
- "LLAMA2": LLAMA,
87
- "KOBOLDAI": KOBOLDAI,
88
- "PERPLEXITY": Perplexity,
89
- "BLACKBOXAI": BLACKBOXAI,
90
- "PhindSearch": PhindSearch,
91
- "Phindv2": Phindv2,
92
- "YEPCHAT": YEPCHAT,
93
-
94
- "POE": POE,
95
- "BasedGPT": BasedGPT,
96
- "DeepSeek": DeepSeek,
97
- "DeepInfra": DeepInfra,
98
- "VLM": VLM,
99
- "gpt4free": GPT4FREE,
100
- "ollama": OLLAMA,
101
- "andi": AndiSearch,
102
- "llama3": LLAMA3,
103
- "darkai": DARKAI,
104
- "koala": KOALA,
105
- "rubiksai": RUBIKSAI,
106
- "meta": Meta,
107
-
108
- "discordrocks": DiscordRocks,
109
- "felo": Felo,
110
- "xdash": XDASH,
111
- "julius": Julius,
112
- "you": YouChat,
113
- "cloudflare": Cloudflare,
114
- "turboseek": TurboSeek,
115
- "netfly": NetFly,
116
- "editee": Editee,
117
- # "chatify": Chatify,
118
- "pi": PiAI,
119
- }
120
-
8
+ import random
9
+ import inspect
10
+
11
+ def load_providers():
12
+ provider_map = {}
13
+ api_key_providers = set()
14
+ provider_package = importlib.import_module("webscout.Provider")
15
+
16
+ for _, module_name, _ in pkgutil.iter_modules(provider_package.__path__):
17
+ try:
18
+ module = importlib.import_module(f"webscout.Provider.{module_name}")
19
+ for attr_name in dir(module):
20
+ attr = getattr(module, attr_name)
21
+ if isinstance(attr, type) and issubclass(attr, Provider) and attr != Provider:
22
+ provider_map[attr_name.upper()] = attr
23
+ # Check if the provider needs an API key
24
+ if 'api_key' in inspect.signature(attr.__init__).parameters:
25
+ api_key_providers.add(attr_name.upper())
26
+ except Exception as e:
27
+ logging.warning(f"Failed to load provider {module_name}: {e}")
28
+
29
+ return provider_map, api_key_providers
30
+
31
+ provider_map, api_key_providers = load_providers()
121
32
 
122
33
  class AUTO(Provider):
123
34
  def __init__(
@@ -133,57 +44,8 @@ class AUTO(Provider):
133
44
  act: str = None,
134
45
  exclude: list[str] = [],
135
46
  ):
136
- """Instantiates AUTO
137
-
138
- Args:
139
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
140
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
141
- timeout (int, optional): Http request timeout. Defaults to 30.
142
- intro (str, optional): Conversation introductory prompt. Defaults to None.
143
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
144
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
145
- proxies (dict, optional): Http request proxies. Defaults to {}.
146
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
147
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
148
- exclude(list[str], optional): List of providers to be excluded. Defaults to [].
149
- """
150
- self.provider: Union[
151
- ThinkAnyAI,
152
- LLAMA,
153
- KOBOLDAI,
154
- Perplexity,
155
- BLACKBOXAI,
156
- PhindSearch,
157
- Phindv2,
158
- YEPCHAT,
159
-
160
- POE,
161
- BasedGPT,
162
- DeepSeek,
163
- DeepInfra,
164
- VLM,
165
- GPT4FREE,
166
- OLLAMA,
167
- AndiSearch,
168
- LLAMA3,
169
- DARKAI,
170
- KOALA,
171
- RUBIKSAI,
172
- Meta,
173
-
174
- DiscordRocks,
175
- Felo,
176
- XDASH,
177
- Julius,
178
- YouChat,
179
- Cloudflare,
180
- TurboSeek,
181
- NetFly,
182
- Editee,
183
- # Chatify,
184
- PiAI,
185
- ] = None
186
- self.provider_name: str = None
47
+ self.provider = None
48
+ self.provider_name = None
187
49
  self.is_conversation = is_conversation
188
50
  self.max_tokens = max_tokens
189
51
  self.timeout = timeout
@@ -193,15 +55,15 @@ class AUTO(Provider):
193
55
  self.proxies = proxies
194
56
  self.history_offset = history_offset
195
57
  self.act = act
196
- self.exclude = exclude
58
+ self.exclude = [e.upper() for e in exclude]
197
59
 
198
60
  @property
199
61
  def last_response(self) -> dict[str, Any]:
200
- return self.provider.last_response
62
+ return self.provider.last_response if self.provider else {}
201
63
 
202
64
  @property
203
65
  def conversation(self) -> object:
204
- return self.provider.conversation
66
+ return self.provider.conversation if self.provider else None
205
67
 
206
68
  def ask(
207
69
  self,
@@ -211,20 +73,8 @@ class AUTO(Provider):
211
73
  optimizer: str = None,
212
74
  conversationally: bool = False,
213
75
  run_new_test: bool = False,
214
- ) -> dict:
215
- """Chat with AI
216
-
217
- Args:
218
- prompt (str): Prompt to be send.
219
- stream (bool, optional): Flag for streaming response. Defaults to False.
220
- raw (bool, optional): Stream back raw response as received. Defaults to False.
221
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
222
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
223
- run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
224
- Returns:
225
- dict : {}
226
- """
227
- ask_kwargs: dict[str, Union[str, bool]] = {
76
+ ) -> Union[Dict, Generator]:
77
+ ask_kwargs = {
228
78
  "prompt": prompt,
229
79
  "stream": stream,
230
80
  "raw": raw,
@@ -232,14 +82,20 @@ class AUTO(Provider):
232
82
  "conversationally": conversationally,
233
83
  }
234
84
 
235
- # webscout-based providers
236
- for provider_name, provider_obj in provider_map.items():
237
- # continue
238
- if provider_name in self.exclude:
239
- continue
85
+ # Filter out API key required providers and excluded providers
86
+ available_providers = [
87
+ (name, cls) for name, cls in provider_map.items()
88
+ if name not in api_key_providers and name not in self.exclude
89
+ ]
90
+
91
+ # Shuffle the list of available providers
92
+ random.shuffle(available_providers)
93
+
94
+ # Try webscout-based providers
95
+ for provider_name, provider_class in available_providers:
240
96
  try:
241
97
  self.provider_name = f"webscout-{provider_name}"
242
- self.provider = provider_obj(
98
+ self.provider = provider_class(
243
99
  is_conversation=self.is_conversation,
244
100
  max_tokens=self.max_tokens,
245
101
  timeout=self.timeout,
@@ -251,26 +107,19 @@ class AUTO(Provider):
251
107
  act=self.act,
252
108
  )
253
109
 
254
- def for_stream():
255
- for chunk in self.provider.ask(**ask_kwargs):
256
- yield chunk
257
-
258
- def for_non_stream():
259
- return self.provider.ask(**ask_kwargs)
260
-
261
- return for_stream() if stream else for_non_stream()
110
+ return self.provider.ask(**ask_kwargs)
262
111
 
263
112
  except Exception as e:
264
113
  logging.debug(
265
114
  f"Failed to generate response using provider {provider_name} - {e}"
266
115
  )
267
116
 
268
- # g4f-based providers
269
-
270
- for provider_info in TestProviders(timeout=self.timeout).get_results(
271
- run=run_new_test
272
- ):
273
- if provider_info["name"] in self.exclude:
117
+ # Try GPT4FREE providers
118
+ gpt4free_providers = TestProviders(timeout=self.timeout).get_results(run=run_new_test)
119
+ random.shuffle(gpt4free_providers)
120
+
121
+ for provider_info in gpt4free_providers:
122
+ if provider_info["name"].upper() in self.exclude:
274
123
  continue
275
124
  try:
276
125
  self.provider_name = f"g4f-{provider_info['name']}"
@@ -286,23 +135,15 @@ class AUTO(Provider):
286
135
  act=self.act,
287
136
  )
288
137
 
289
- def for_stream():
290
- for chunk in self.provider.ask(**ask_kwargs):
291
- yield chunk
292
-
293
- def for_non_stream():
294
- return self.provider.ask(**ask_kwargs)
295
-
296
- return for_stream() if stream else for_non_stream()
138
+ print(f"Using provider: {self.provider_name}")
139
+ return self.provider.ask(**ask_kwargs)
297
140
 
298
141
  except Exception as e:
299
142
  logging.debug(
300
- f"Failed to generate response using GPT4FREE-base provider {provider_name} - {e}"
143
+ f"Failed to generate response using GPT4FREE-based provider {provider_info['name']} - {e}"
301
144
  )
302
145
 
303
- raise AllProvidersFailure(
304
- "None of the providers generated response successfully."
305
- )
146
+ raise AllProvidersFailure("None of the providers generated response successfully.")
306
147
 
307
148
  def chat(
308
149
  self,
@@ -311,48 +152,25 @@ class AUTO(Provider):
311
152
  optimizer: str = None,
312
153
  conversationally: bool = False,
313
154
  run_new_test: bool = False,
314
- ) -> str:
315
- """Generate response `str`
316
- Args:
317
- prompt (str): Prompt to be send.
318
- stream (bool, optional): Flag for streaming response. Defaults to False.
319
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
320
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
321
- run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False.
322
- Returns:
323
- str: Response generated
324
- """
325
-
326
- def for_stream():
327
- for response in self.ask(
328
- prompt,
329
- True,
330
- optimizer=optimizer,
331
- conversationally=conversationally,
332
- run_new_test=run_new_test,
333
- ):
334
- yield self.get_message(response)
335
-
336
- def for_non_stream():
337
- ask_response = self.ask(
338
- prompt,
339
- False,
340
- optimizer=optimizer,
341
- conversationally=conversationally,
342
- run_new_test=run_new_test,
343
- )
344
- return self.get_message(ask_response)
345
-
346
- return for_stream() if stream else for_non_stream()
155
+ ) -> Union[str, Generator[str, None, None]]:
156
+ response = self.ask(
157
+ prompt,
158
+ stream,
159
+ optimizer=optimizer,
160
+ conversationally=conversationally,
161
+ run_new_test=run_new_test,
162
+ )
163
+
164
+ if stream:
165
+ return (self.get_message(chunk) for chunk in response)
166
+ else:
167
+ return self.get_message(response)
347
168
 
348
169
  def get_message(self, response: dict) -> str:
349
- """Retrieves message only from response
350
-
351
- Args:
352
- response (dict): Response generated by `self.ask`
353
-
354
- Returns:
355
- str: Message extracted
356
- """
357
170
  assert self.provider is not None, "Chat with AI first"
358
171
  return self.provider.get_message(response)
172
+ if __name__ == "__main__":
173
+ auto = AUTO()
174
+
175
+ response = auto.chat("Hello, how are you?")
176
+ print(response)
@@ -4,12 +4,12 @@ import json
4
4
  import time
5
5
  from typing import Any, Dict, Optional
6
6
  import requests
7
- from webscout import WEBS, Julius
7
+ from webscout import WEBS, GEMINIAPI
8
8
 
9
9
  class FunctionCallingAgent:
10
10
  def __init__(self,
11
11
  tools: list = None):
12
- self.ai = Julius(timeout=300, intro=None)
12
+ self.ai = GEMINIAPI(api_key="Gemini api key", timeout=300, intro=None)
13
13
  self.tools = tools if tools is not None else []
14
14
  self.knowledge_cutoff = "September 2022"
15
15
 
@@ -5,10 +5,10 @@ import sys
5
5
  import subprocess
6
6
  import logging
7
7
  import psutil
8
- from huggingface_hub import hf_hub_url, cached_download
8
+ from huggingface_hub import hf_hub_download # Updated import
9
9
  import colorlog
10
- import ollama # Import ollama for interactive chat
11
- import argparse # Import argparse for command-line arguments
10
+ import ollama
11
+ import argparse
12
12
 
13
13
  # Suppress specific warnings
14
14
  warnings.filterwarnings(
@@ -36,7 +36,6 @@ if not logger.hasHandlers():
36
36
  logger.addHandler(handler)
37
37
  logger.setLevel(logging.INFO)
38
38
 
39
- # Redirect warnings to the logger but avoid duplication
40
39
  logging.captureWarnings(True)
41
40
  py_warnings_logger = logging.getLogger("py.warnings")
42
41
  if not py_warnings_logger.hasHandlers():
@@ -76,11 +75,37 @@ def is_model_created(model_name):
76
75
 
77
76
 
78
77
  def download_model(repo_id, filename, token, cache_dir="downloads"):
79
- url = hf_hub_url(repo_id, filename)
80
- filepath = cached_download(
81
- url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token
82
- )
83
- return filepath
78
+ """
79
+ Downloads a model file from the Hugging Face Hub using hf_hub_download.
80
+ """
81
+ try:
82
+ os.makedirs(cache_dir, exist_ok=True)
83
+
84
+ # Download using hf_hub_download
85
+ filepath = hf_hub_download(
86
+ repo_id=repo_id,
87
+ filename=filename,
88
+ token=token,
89
+ cache_dir=cache_dir,
90
+ resume_download=True,
91
+ force_download=False,
92
+ local_files_only=False
93
+ )
94
+
95
+ # Ensure file is in the expected location
96
+ expected_path = os.path.join(cache_dir, filename)
97
+ if filepath != expected_path:
98
+ os.makedirs(os.path.dirname(expected_path), exist_ok=True)
99
+ if not os.path.exists(expected_path):
100
+ import shutil
101
+ shutil.copy2(filepath, expected_path)
102
+ filepath = expected_path
103
+
104
+ return filepath
105
+
106
+ except Exception as e:
107
+ logger.error(f"Error downloading model: {str(e)}")
108
+ raise
84
109
 
85
110
 
86
111
  def is_ollama_running():
@@ -90,16 +115,14 @@ def is_ollama_running():
90
115
  return False
91
116
 
92
117
 
93
- def main(model_path=None, gguf_file=None): # Modified to handle both CLI and non-CLI
118
+ def main(model_path=None, gguf_file=None):
94
119
  show_art()
95
120
 
96
- # Parse command-line arguments if provided
97
121
  parser = argparse.ArgumentParser(description="Download and create an Ollama model")
98
122
  parser.add_argument("-m", "--model_path", help="Path to the model on Hugging Face Hub")
99
123
  parser.add_argument("-g", "--gguf_file", help="Name of the GGUF file")
100
124
  args = parser.parse_args()
101
125
 
102
- # Use arguments from command line or function parameters
103
126
  model_path = args.model_path if args.model_path else model_path
104
127
  gguf_file = args.gguf_file if args.gguf_file else gguf_file
105
128
 
@@ -112,12 +135,10 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
112
135
  download_log = "downloaded_models.log"
113
136
  logging_name = f"{model_path}_{model_name}"
114
137
 
115
- # Ensure the log file exists
116
138
  if not os.path.exists(download_log):
117
139
  with open(download_log, 'w') as f:
118
140
  pass
119
141
 
120
- # Check if huggingface-hub is installed, and install it if not
121
142
  try:
122
143
  subprocess.check_output(['pip', 'show', 'huggingface-hub'])
123
144
  except subprocess.CalledProcessError:
@@ -126,7 +147,6 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
126
147
  else:
127
148
  logger.info("huggingface-hub is already installed.")
128
149
 
129
- # Check if the model has already been downloaded
130
150
  if is_model_downloaded(logging_name, download_log):
131
151
  logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
132
152
  else:
@@ -134,13 +154,11 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
134
154
  token = os.getenv('HUGGINGFACE_TOKEN', None)
135
155
  if not token:
136
156
  logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
137
- token = None
138
-
157
+
139
158
  filepath = download_model(model_path, gguf_file, token)
140
159
  log_downloaded_model(logging_name, download_log)
141
160
  logger.info(f"Model {logging_name} downloaded and logged.")
142
161
 
143
- # Check if Ollama is installed, and install it if not
144
162
  try:
145
163
  subprocess.check_output(['ollama', '--version'])
146
164
  except subprocess.CalledProcessError:
@@ -149,7 +167,6 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
149
167
  else:
150
168
  logger.info("Ollama is already installed.")
151
169
 
152
- # Check if Ollama is already running
153
170
  if is_ollama_running():
154
171
  logger.info("Ollama is already running. Skipping the start.")
155
172
  else:
@@ -162,7 +179,6 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
162
179
 
163
180
  logger.info("Ollama has started.")
164
181
 
165
- # Check if the model has already been created
166
182
  if is_model_created(model_name):
167
183
  logger.info(f"Model {model_name} is already created. Skipping creation.")
168
184
  else:
@@ -175,5 +191,6 @@ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and no
175
191
  logger.info(f"model name is > {model_name}")
176
192
  logger.info(f"Use Ollama run {model_name}")
177
193
 
194
+
178
195
  if __name__ == "__main__":
179
196
  main()
webscout/Local/formats.py CHANGED
@@ -291,7 +291,7 @@ llama2chat: dict[str, str | list] = {
291
291
  # https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/
292
292
  llama3: dict[str, str | list] = {
293
293
  "system_prefix": "<|start_header_id|>system<|end_header_id|>\n\n",
294
- "system_prompt": 'You are a helpful AI assistant called "Llama 3".',
294
+ "system_prompt": 'You are a helpful AI assistant.',
295
295
  "system_suffix": "<|eot_id|>\n",
296
296
  "user_prefix": "<|start_header_id|>user<|end_header_id|>\n\n",
297
297
  "user_suffix": "<|eot_id|>\n",
@@ -628,6 +628,8 @@ airoboros = llama2chat.copy()
628
628
  airoboros['system_prompt'] = \
629
629
  "You are a helpful, unbiased, uncensored assistant."
630
630
  tess = synthia.copy()
631
+ helpingai2 = llama3.copy()
632
+ helpingai2['system_prompt'] = "You are HelpingAI, an emotional AI. Always answer my questions in the HelpingAI style."
631
633
  tess['system_prompt'] = '' # Tess can use any system prompt, or none
632
634
  alpaca_strict = alpaca.copy() # Alpaca with more stopping strings
633
635
  alpaca_strict['stops'] = [
@@ -651,7 +653,7 @@ def _llama3_suffix_with_timestamp():
651
653
 
652
654
  Llama3WithTimestamps = AdvancedFormat({
653
655
  "system_prefix": "<|start_header_id|>system<|end_header_id|>\n\n",
654
- "system_prompt": 'You are a helpful AI assistant called "Llama 3".',
656
+ "system_prompt": 'You are a helpful AI assistant.',
655
657
  "system_suffix": _llama3_suffix_with_timestamp,
656
658
  "user_prefix": "<|start_header_id|>user<|end_header_id|>\n\n",
657
659
  "user_suffix": _llama3_suffix_with_timestamp,
webscout/Local/utils.py CHANGED
@@ -5,7 +5,7 @@ from enum import IntEnum
5
5
  from io import BufferedReader
6
6
  from typing import Dict, Iterable, TextIO, Optional, Union, Tuple, Generator, Any
7
7
 
8
- from huggingface_hub import hf_hub_url, cached_download
8
+ from huggingface_hub import hf_hub_download
9
9
  import numpy as np
10
10
 
11
11
  from ._version import __version__, __llama_cpp_version__
@@ -42,25 +42,50 @@ class UnreachableException(Exception):
42
42
  "https://github.com/ddh0/easy-llama/issues/new/choose"
43
43
  )
44
44
 
45
- def download_model(repo_id: str, filename: str, token: str, cache_dir: str = ".cache") -> str:
45
+ def download_model(
46
+ repo_id: str,
47
+ filename: str,
48
+ token: Optional[str] = None,
49
+ cache_dir: str = ".cache",
50
+ revision: str = "main"
51
+ ) -> str:
46
52
  """
47
- Downloads a GGUF model file from the Hugging Face Hub.
53
+ Downloads a model file from the Hugging Face Hub.
48
54
 
49
55
  Args:
50
- repo_id (str): Hugging Face repository ID (e.g., 'facebook/bart-large-cnn').
51
- filename (str): Name of the GGUF file (e.g., 'model.gguf').
52
- token (str): Hugging Face API token.
56
+ repo_id (str): Hugging Face repository ID (e.g., 'facebook/bart-large-cnn')
57
+ filename (str): Name of the file to download (e.g., 'model.bin', 'tokenizer.json')
58
+ token (str, optional): Hugging Face API token for private repos. Defaults to None.
53
59
  cache_dir (str, optional): Local directory for storing downloaded files.
54
- Defaults to ".cache".
60
+ Defaults to ".cache".
61
+ revision (str, optional): The specific model version to use. Defaults to "main".
55
62
 
56
63
  Returns:
57
64
  str: Path to the downloaded file.
65
+
66
+ Raises:
67
+ ValueError: If the repository or file is not found
68
+ Exception: For other download-related errors
58
69
  """
59
- url = hf_hub_url(repo_id, filename)
60
- filepath = cached_download(
61
- url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token
62
- )
63
- return filepath
70
+ try:
71
+ # Create cache directory if it doesn't exist
72
+ os.makedirs(cache_dir, exist_ok=True)
73
+
74
+ # Download the file
75
+ downloaded_path = hf_hub_download(
76
+ repo_id=repo_id,
77
+ filename=filename,
78
+ token=token,
79
+ cache_dir=cache_dir,
80
+ revision=revision,
81
+ resume_download=True, # Resume interrupted downloads
82
+ force_download=False # Use cached version if available
83
+ )
84
+
85
+ return downloaded_path
86
+
87
+ except Exception as e:
88
+ raise Exception(f"Error downloading model from {repo_id}: {str(e)}")
64
89
 
65
90
  def softmax(z: _ArrayLike, T: Optional[float] = None, dtype: Optional[np.dtype] = None) -> np.ndarray:
66
91
  """