webscout 3.5__py3-none-any.whl → 3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIutel.py CHANGED
@@ -648,14 +648,14 @@ Current Datetime : {datetime.datetime.now()}
648
648
  else:
649
649
  logging.info(message)
650
650
 
651
- def main(self, response: str) -> None:
651
+ def main(self, response: str):
652
652
  """Exec code in response accordingly
653
653
 
654
654
  Args:
655
- response (str): AI response
655
+ response: AI response
656
656
 
657
657
  Returns:
658
- None|str: None if script executed successfully else stdout data
658
+ Optional[str]: None if script executed successfully else stdout data
659
659
  """
660
660
  code_blocks = re.findall(r"```python.*?```", response, re.DOTALL)
661
661
  if len(code_blocks) != 1:
@@ -691,6 +691,7 @@ Current Datetime : {datetime.datetime.now()}
691
691
  self.log("Returning success feedback")
692
692
  return f"LAST SCRIPT OUTPUT:\n{proc.stdout}"
693
693
  else:
694
+
694
695
  self.log("Returning error feedback", "error")
695
696
  return f"PREVIOUS SCRIPT EXCEPTION:\n{proc.stderr}"
696
697
  else:
@@ -701,12 +702,14 @@ Current Datetime : {datetime.datetime.now()}
701
702
  self.log("Executing script internally")
702
703
  exec(raw_code_plus)
703
704
  except Exception as e:
705
+ error_message = str(e)
704
706
  self.log(
705
- "Exception occurred while executing script. Responding with error: "
706
- f"{e.args[1] if len(e.args)>1 else str(e)}",
707
- "error",
707
+ f"Exception occurred while executing script. Responding with error: {error_message}",
708
+ "error"
708
709
  )
709
- return f"PREVIOUS SCRIPT EXCEPTION:\n{str(e)}"
710
+ # Return the exact error message
711
+ return f"PREVIOUS SCRIPT EXCEPTION:\n{error_message}"
712
+
710
713
  class Audio:
711
714
  # Request headers
712
715
  headers: dict[str, str] = {
@@ -0,0 +1,2 @@
1
+ from .gguf import *
2
+ from .autollama import *
@@ -0,0 +1,47 @@
1
+ import subprocess
2
+ import argparse
3
+ import os
4
+
5
+ def autollama(model_path, gguf_file):
6
+ """Manages models with Ollama using the autollama.sh script.
7
+
8
+ Args:
9
+ model_path (str): The path to the Hugging Face model.
10
+ gguf_file (str): The name of the GGUF file.
11
+ """
12
+
13
+ # Get the directory where this script is located
14
+ script_dir = os.path.dirname(os.path.abspath(__file__))
15
+
16
+ # Construct the path to the shell script
17
+ script_path = os.path.join(script_dir, "autollama.sh")
18
+
19
+ # Initialize command list
20
+ command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
21
+
22
+ # Execute the command
23
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
24
+
25
+ # Print the output and error in real-time
26
+ for line in process.stdout:
27
+ print(line, end='')
28
+
29
+ for line in process.stderr:
30
+ print(line, end='')
31
+
32
+ process.wait()
33
+
34
+ def main():
35
+ parser = argparse.ArgumentParser(description='Run autollama.sh to manage models with Ollama')
36
+ parser.add_argument('-m', '--model_path', required=True, help='Set the path to the Hugging Face model')
37
+ parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
38
+ args = parser.parse_args()
39
+
40
+ try:
41
+ autollama(args.model_path, args.gguf_file)
42
+ except Exception as e:
43
+ print(f"Error: {e}")
44
+ exit(1)
45
+
46
+ if __name__ == "__main__":
47
+ main()
webscout/Extra/gguf.py ADDED
@@ -0,0 +1,80 @@
1
+ import subprocess
2
+ import argparse
3
+ import os
4
+
5
+ def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5_k_m"):
6
+ """Converts and quantizes a Hugging Face model to GGUF format.
7
+
8
+ Args:
9
+ model_id (str): The Hugging Face model ID (e.g., 'google/flan-t5-xl').
10
+ username (str, optional): Your Hugging Face username. Required for uploads.
11
+ token (str, optional): Your Hugging Face API token. Required for uploads.
12
+ quantization_methods (str, optional): Comma-separated quantization methods.
13
+ Defaults to "q4_k_m,q5_k_m".
14
+
15
+ Raises:
16
+ ValueError: If an invalid quantization method is provided.
17
+ """
18
+
19
+ # List of valid quantization methods
20
+ valid_methods = [
21
+ "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
22
+ "q4_0", "q4_1", "q4_k_m", "q4_k_s",
23
+ "q5_0", "q5_1", "q5_k_m", "q5_k_s",
24
+ "q6_k", "q8_0"
25
+ ]
26
+
27
+ # Validate the selected quantization methods
28
+ selected_methods_list = quantization_methods.split(',')
29
+ for method in selected_methods_list:
30
+ if method not in valid_methods:
31
+ raise ValueError(f"Invalid method: {method}. Please select from the available methods: {', '.join(valid_methods)}")
32
+
33
+ # Get the directory where this script is located
34
+ script_dir = os.path.dirname(os.path.abspath(__file__))
35
+
36
+ # Construct the path to the shell script
37
+ script_path = os.path.join(script_dir, "gguf.sh")
38
+
39
+ # Construct the command
40
+ command = ["bash", script_path, "-m", model_id]
41
+
42
+ if username:
43
+ command.extend(["-u", username])
44
+
45
+ if token:
46
+ command.extend(["-t", token])
47
+
48
+ if quantization_methods:
49
+ command.extend(["-q", quantization_methods])
50
+
51
+ # Execute the command
52
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
53
+
54
+ # Print the output and error in real-time
55
+ for line in process.stdout:
56
+ print(line, end='')
57
+
58
+ for line in process.stderr:
59
+ print(line, end='')
60
+
61
+ process.wait()
62
+
63
+ def main():
64
+ parser = argparse.ArgumentParser(description='Convert and quantize model using gguf.sh')
65
+ parser.add_argument('-m', '--model_id', required=True, help='Set the HF model ID (e.g., "google/flan-t5-xl")')
66
+ parser.add_argument('-u', '--username', help='Set your Hugging Face username (required for uploads)')
67
+ parser.add_argument('-t', '--token', help='Set your Hugging Face API token (required for uploads)')
68
+ parser.add_argument('-q', '--quantization_methods', default="q4_k_m,q5_k_m",
69
+ help='Comma-separated quantization methods (default: q4_k_m,q5_k_m). Valid methods: q2_k, q3_k_l, q3_k_m, q3_k_s, q4_0, q4_1, q4_k_m, q4_k_s, q5_0, q5_1, q5_k_m, q5_k_s, q6_k, q8_0')
70
+
71
+ args = parser.parse_args()
72
+
73
+ try:
74
+ convert(args.model_id, args.username, args.token, args.quantization_methods)
75
+ except ValueError as e:
76
+ print(e)
77
+ exit(1)
78
+
79
+ if __name__ == "__main__":
80
+ main()
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '3.4'
3
+ __version__ = '3.7'