webscout 5.0__py3-none-any.whl → 5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -1,209 +1,179 @@
1
- import subprocess
2
- import argparse
1
+ import warnings
2
+ from datetime import time
3
3
  import os
4
- from rich.console import Console
5
- from rich.panel import Panel
6
- from rich.progress import track
7
- from yaspin import yaspin
8
- from pyfiglet import figlet_format
9
- import time
10
-
11
- console = Console()
12
-
13
- def autollama(model_path, gguf_file):
14
- """Manages models with Ollama using the autollama.sh script.
15
-
16
- Args:
17
- model_path (str): The path to the Hugging Face model.
18
- gguf_file (str): The name of the GGUF file.
19
- """
20
- console.print(f"[bold green]{figlet_format('Autollama')}[/]\n", justify="center")
21
-
22
- # Check if autollama.sh exists in the current working directory
23
- script_path = os.path.join(os.getcwd(), "autollama.sh")
24
- if not os.path.exists(script_path):
25
- # Create autollama.sh with the content provided
26
- with open(script_path, "w") as f:
27
- f.write("""
28
- function show_art() {
29
- cat << "EOF"
30
- Made with love in India
31
- EOF
32
- }
33
-
34
- show_art
35
-
36
- # Initialize default values
37
- MODEL_PATH=""
38
- GGUF_FILE=""
39
-
40
- # Display help/usage information
41
- usage() {
42
- echo "Usage: $0 -m <model_path> -g <gguf_file>"
43
- echo
44
- echo "Options:"
45
- echo " -m <model_path> Set the path to the model"
46
- echo " -g <gguf_file> Set the GGUF file name"
47
- echo " -h Display this help and exit"
48
- echo
49
- }
50
-
51
- # Parse command-line options
52
- while getopts ":m:g:h" opt; do
53
- case ${opt} in
54
- m )
55
- MODEL_PATH=$OPTARG
56
- ;;
57
- g )
58
- GGUF_FILE=$OPTARG
59
- ;;
60
- h )
61
- usage
62
- exit 0
63
- ;;
64
- \? )
65
- echo "Invalid Option: -$OPTARG" 1>&2
66
- usage
67
- exit 1
68
- ;;
69
- : )
70
- echo "Invalid Option: -$OPTARG requires an argument" 1>&2
71
- usage
72
- exit 1
73
- ;;
74
- esac
75
- done
76
-
77
- # Check required parameters
78
- if [ -z "$MODEL_PATH" ] || [ -z "$GGUF_FILE" ]; then
79
- echo "Error: -m (model_path) and -g (gguf_file) are required."
80
- usage
81
- exit 1
82
- fi
83
-
84
- # Derive MODEL_NAME
85
- MODEL_NAME=$(echo $GGUF_FILE | sed 's/\(.*\)\.Q4.*/\\1/')
86
-
87
- # Log file where downloaded models are recorded
88
- DOWNLOAD_LOG="downloaded_models.log"
89
-
90
- # Composite logging name
91
- LOGGING_NAME="${MODEL_PATH}_${MODEL_NAME}"
92
-
93
- # Check if the model has been downloaded
94
- function is_model_downloaded {
95
- grep -qxF "$LOGGING_NAME" "$DOWNLOAD_LOG" && return 0 || return 1
96
- }
97
-
98
- # Log the downloaded model
99
- function log_downloaded_model {
100
- echo "$LOGGING_NAME" >> "$DOWNLOAD_LOG"
101
- }
102
-
103
- # Function to check if the model has already been created
104
- function is_model_created {
105
- # 'ollama list' lists all models
106
- ollama list | grep -q "$MODEL_NAME" && return 0 || return 1
107
- }
108
-
109
- # Check if huggingface-hub is installed, and install it if not
110
- if ! pip show huggingface-hub > /dev/null; then
111
- echo "Installing huggingface-hub..."
112
- pip install -U "huggingface_hub[cli]"
113
- else
114
- echo "huggingface-hub is already installed."
115
- fi
116
-
117
- # Check if the model has already been downloaded
118
- if is_model_downloaded; then
119
- echo "Model $LOGGING_NAME has already been downloaded. Skipping download."
120
- else
121
- echo "Downloading model $LOGGING_NAME..."
122
- # Download the model
123
- huggingface-cli download $MODEL_PATH $GGUF_FILE --local-dir downloads --local-dir-use-symlinks False
124
-
125
- # Log the downloaded model
126
- log_downloaded_model
127
- echo "Model $LOGGING_NAME downloaded and logged."
128
- fi
129
-
130
- # Check if Ollama is installed, and install it if not
131
- if ! command -v ollama &> /dev/null; then
132
- echo "Installing Ollama..."
133
- curl -fsSL https://ollama.com/install.sh | sh
134
- else
135
- echo "Ollama is already installed."
136
- fi
137
-
138
- # Check if Ollama is already running
139
- if pgrep -f 'ollama serve' > /dev/null; then
140
- echo "Ollama is already running. Skipping the start."
141
- else
142
- echo "Starting Ollama..."
143
- # Start Ollama in the background
144
- ollama serve &
145
-
146
- # Wait for Ollama to start
147
- while true; do
148
- if pgrep -f 'ollama serve' > /dev/null; then
149
- echo "Ollama has started."
150
- sleep 60
151
- break
152
- else
153
- echo "Waiting for Ollama to start..."
154
- sleep 1 # Wait for 1 second before checking again
155
- fi
156
- done
157
- fi
158
-
159
- # Check if the model has already been created
160
- if is_model_created; then
161
- echo "Model $MODEL_NAME is already created. Skipping creation."
162
- else
163
- echo "Creating model $MODEL_NAME..."
164
- # Create the model in Ollama
165
- # Prepare Modelfile with the downloaded path
166
- echo "FROM ./downloads/$GGUF_FILE" > Modelfile
167
- ollama create $MODEL_NAME -f Modelfile
168
- echo "Model $MODEL_NAME created."
169
- fi
170
-
171
-
172
- echo "model name is > $MODEL_NAME"
173
- echo "Use Ollama run $MODEL_NAME"
174
- """)
175
- # Make autollama.sh executable (using chmod)
176
- os.chmod(script_path, 0o755)
177
-
178
- # Initialize command list
179
- command = ["bash", script_path, "-m", model_path, "-g", gguf_file]
180
-
181
- # Execute the command
182
- process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
183
-
184
- for line in iter(process.stdout.readline, ''):
185
- console.print(Panel(line.strip(), title="Autollama Output", expand=False))
186
-
187
- for line in iter(process.stderr.readline, ''):
188
- console.print(Panel(line.strip(), title="Autollama Errors (if any)", expand=False))
189
-
190
- process.wait()
191
- console.print("[green]Model is ready![/]")
192
-
193
- def main():
194
- parser = argparse.ArgumentParser(description='Automatically create and run an Ollama model in Ollama')
195
- parser.add_argument('-m', '--model_path', required=True, help='Set the huggingface model id to the Hugging Face model')
196
- parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
4
+ import sys
5
+ import subprocess
6
+ import logging
7
+ import psutil
8
+ from huggingface_hub import hf_hub_url, cached_download
9
+ import colorlog
10
+ import ollama # Import ollama for interactive chat
11
+ import argparse # Import argparse for command-line arguments
12
+
13
+ # Suppress specific warnings
14
+ warnings.filterwarnings(
15
+ "ignore", category=FutureWarning, module="huggingface_hub.file_download"
16
+ )
17
+
18
+ # Configure logging with colors
19
+ handler = colorlog.StreamHandler()
20
+ handler.setFormatter(
21
+ colorlog.ColoredFormatter(
22
+ "%(log_color)s%(asctime)s - %(levelname)s - %(message)s",
23
+ datefmt="%Y-%m-%d %H:%M:%S",
24
+ log_colors={
25
+ "DEBUG": "cyan",
26
+ "INFO": "green",
27
+ "WARNING": "yellow",
28
+ "ERROR": "red",
29
+ "CRITICAL": "red,bg_white",
30
+ },
31
+ )
32
+ )
33
+
34
+ logger = colorlog.getLogger(__name__)
35
+ if not logger.hasHandlers():
36
+ logger.addHandler(handler)
37
+ logger.setLevel(logging.INFO)
38
+
39
+ # Redirect warnings to the logger but avoid duplication
40
+ logging.captureWarnings(True)
41
+ py_warnings_logger = logging.getLogger("py.warnings")
42
+ if not py_warnings_logger.hasHandlers():
43
+ py_warnings_logger.addHandler(handler)
44
+
45
+
46
+ def show_art():
47
+ logger.info("Made with love in India")
48
+
49
+
50
+ def usage():
51
+ logger.info("Usage: python script.py -m <model_path> -g <gguf_file>")
52
+ logger.info("Options:")
53
+ logger.info(" -m <model_path> Set the path to the model")
54
+ logger.info(" -g <gguf_file> Set the GGUF file name")
55
+ logger.info(" -h Display this help and exit")
56
+
57
+
58
+ def is_model_downloaded(logging_name, download_log):
59
+ if not os.path.exists(download_log):
60
+ return False
61
+ with open(download_log, "r") as f:
62
+ for line in f:
63
+ if line.strip() == logging_name:
64
+ return True
65
+ return False
66
+
67
+
68
+ def log_downloaded_model(logging_name, download_log):
69
+ with open(download_log, "a") as f:
70
+ f.write(logging_name + "\n")
71
+
72
+
73
+ def is_model_created(model_name):
74
+ result = subprocess.run(["ollama", "list"], stdout=subprocess.PIPE)
75
+ return model_name in result.stdout.decode("utf-8")
76
+
77
+
78
+ def download_model(repo_id, filename, token, cache_dir="downloads"):
79
+ url = hf_hub_url(repo_id, filename)
80
+ filepath = cached_download(
81
+ url, cache_dir=cache_dir, force_filename=filename, use_auth_token=token
82
+ )
83
+ return filepath
84
+
85
+
86
+ def is_ollama_running():
87
+ for proc in psutil.process_iter(["name"]):
88
+ if proc.info["name"] in ["ollama", "ollama.exe"]:
89
+ return True
90
+ return False
91
+
92
+
93
+ def main(model_path=None, gguf_file=None): # Modified to handle both CLI and non-CLI
94
+ show_art()
95
+
96
+ # Parse command-line arguments if provided
97
+ parser = argparse.ArgumentParser(description="Download and create an Ollama model")
98
+ parser.add_argument("-m", "--model_path", help="Path to the model on Hugging Face Hub")
99
+ parser.add_argument("-g", "--gguf_file", help="Name of the GGUF file")
197
100
  args = parser.parse_args()
198
101
 
102
+ # Use arguments from command line or function parameters
103
+ model_path = args.model_path if args.model_path else model_path
104
+ gguf_file = args.gguf_file if args.gguf_file else gguf_file
105
+
106
+ if not model_path or not gguf_file:
107
+ logger.error("Error: model_path and gguf_file are required.")
108
+ usage()
109
+ sys.exit(2)
110
+
111
+ model_name = gguf_file.split(".Q4")[0]
112
+ download_log = "downloaded_models.log"
113
+ logging_name = f"{model_path}_{model_name}"
114
+
115
+ # Ensure the log file exists
116
+ if not os.path.exists(download_log):
117
+ with open(download_log, 'w') as f:
118
+ pass
119
+
120
+ # Check if huggingface-hub is installed, and install it if not
199
121
  try:
200
- with yaspin(text="Processing...") as spinner:
201
- autollama(args.model_path, args.gguf_file)
202
- spinner.ok("Done!")
203
- except Exception as e:
204
- console.print(f"[red]Error: {e}[/]")
205
- exit(1)
122
+ subprocess.check_output(['pip', 'show', 'huggingface-hub'])
123
+ except subprocess.CalledProcessError:
124
+ logger.info("Installing huggingface-hub...")
125
+ subprocess.check_call(['pip', 'install', '-U', 'huggingface_hub[cli]'])
126
+ else:
127
+ logger.info("huggingface-hub is already installed.")
128
+
129
+ # Check if the model has already been downloaded
130
+ if is_model_downloaded(logging_name, download_log):
131
+ logger.info(f"Model {logging_name} has already been downloaded. Skipping download.")
132
+ else:
133
+ logger.info(f"Downloading model {logging_name}...")
134
+ token = os.getenv('HUGGINGFACE_TOKEN', None)
135
+ if not token:
136
+ logger.warning("Warning: HUGGINGFACE_TOKEN environment variable is not set. Using None.")
137
+ token = None
138
+
139
+ filepath = download_model(model_path, gguf_file, token)
140
+ log_downloaded_model(logging_name, download_log)
141
+ logger.info(f"Model {logging_name} downloaded and logged.")
142
+
143
+ # Check if Ollama is installed, and install it if not
144
+ try:
145
+ subprocess.check_output(['ollama', '--version'])
146
+ except subprocess.CalledProcessError:
147
+ logger.info("Installing Ollama...")
148
+ subprocess.check_call(['curl', '-fsSL', 'https://ollama.com/install.sh', '|', 'sh'])
149
+ else:
150
+ logger.info("Ollama is already installed.")
151
+
152
+ # Check if Ollama is already running
153
+ if is_ollama_running():
154
+ logger.info("Ollama is already running. Skipping the start.")
155
+ else:
156
+ logger.info("Starting Ollama...")
157
+ subprocess.Popen(['ollama', 'serve'])
158
+
159
+ while not is_ollama_running():
160
+ logger.info("Waiting for Ollama to start...")
161
+ time.sleep(1)
162
+
163
+ logger.info("Ollama has started.")
164
+
165
+ # Check if the model has already been created
166
+ if is_model_created(model_name):
167
+ logger.info(f"Model {model_name} is already created. Skipping creation.")
168
+ else:
169
+ logger.info(f"Creating model {model_name}...")
170
+ with open('Modelfile', 'w') as f:
171
+ f.write(f"FROM ./downloads/{gguf_file}")
172
+ subprocess.check_call(['ollama', 'create', model_name, '-f', 'Modelfile'])
173
+ logger.info(f"Model {model_name} created.")
174
+
175
+ logger.info(f"model name is > {model_name}")
176
+ logger.info(f"Use Ollama run {model_name}")
206
177
 
207
178
  if __name__ == "__main__":
208
- main()
209
-
179
+ main()
@@ -21,7 +21,7 @@ class LLAMA3(Provider):
21
21
  history_offset: int = 10250,
22
22
  act: str = None,
23
23
  model: str = "llama3-8b",
24
- system: str = "Answer as concisely as possible.",
24
+ system: str = "GPT syle",
25
25
  ):
26
26
  """Instantiates Snova
27
27
 
@@ -36,11 +36,12 @@ class OLLAMA(Provider):
36
36
  proxies: dict = {},
37
37
  history_offset: int = 10250,
38
38
  act: str = None,
39
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
39
40
  ):
40
41
  """Instantiates Ollama
41
42
 
42
43
  Args:
43
- model (str, optional): Model name. Defaults to 'llama2'.
44
+ model (str, optional): Model name. Defaults to 'qwen2:0.5b'.
44
45
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
45
46
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
47
  timeout (int, optional): Http request timeout. Defaults to 30.
@@ -50,12 +51,14 @@ class OLLAMA(Provider):
50
51
  proxies (dict, optional): Http request proxies. Defaults to {}.
51
52
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
53
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
54
+ system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
53
55
  """
54
56
  self.model = model
55
57
  self.is_conversation = is_conversation
56
58
  self.max_tokens_to_sample = max_tokens
57
59
  self.timeout = timeout
58
60
  self.last_response = {}
61
+ self.system_prompt = system_prompt
59
62
 
60
63
  self.__available_optimizers = (
61
64
  method
@@ -110,21 +113,19 @@ class OLLAMA(Provider):
110
113
  )
111
114
 
112
115
  def for_stream():
116
+ # Correctly call ollama.chat with stream=True
113
117
  stream = ollama.chat(model=self.model, messages=[
118
+ {'role': 'system', 'content': self.system_prompt},
114
119
  {'role': 'user', 'content': conversation_prompt}
115
120
  ], stream=True)
116
121
 
117
- message_load = ""
122
+ # Yield each chunk directly
118
123
  for chunk in stream:
119
- message_load += chunk['message']['content']
120
- yield chunk['message']['content'] if raw else dict(text=message_load)
121
- self.last_response.update(dict(text=message_load))
122
- self.conversation.update_chat_history(
123
- prompt, self.get_message(self.last_response)
124
- )
124
+ yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
125
125
 
126
126
  def for_non_stream():
127
127
  response = ollama.chat(model=self.model, messages=[
128
+ {'role': 'system', 'content': self.system_prompt}, # Add system message
128
129
  {'role': 'user', 'content': conversation_prompt}
129
130
  ])
130
131
  self.last_response.update(dict(text=response['message']['content']))
@@ -183,6 +184,6 @@ class OLLAMA(Provider):
183
184
  return response["text"]
184
185
  if __name__ == "__main__":
185
186
  ollama_provider = OLLAMA(model="qwen:0.5b")
186
- response = ollama_provider.chat("hi")
187
+ response = ollama_provider.chat("hi", stream=True)
187
188
  for r in response:
188
- print(r, end="", flush=True)
189
+ print(r, end="", flush=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 5.0
3
+ Version: 5.1
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -66,6 +66,8 @@ Requires-Dist: requests-html
66
66
  Requires-Dist: bson
67
67
  Requires-Dist: cloudscraper
68
68
  Requires-Dist: emoji
69
+ Requires-Dist: colorlog
70
+ Requires-Dist: openai
69
71
  Provides-Extra: dev
70
72
  Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
71
73
  Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
@@ -1645,15 +1647,13 @@ gguf.convert(
1645
1647
 
1646
1648
  Webscout's `autollama` utility download model from huggingface and then automatically makes it ollama ready
1647
1649
 
1648
- **Example:**
1649
-
1650
1650
  ```python
1651
1651
  from webscout import autollama
1652
1652
 
1653
- autollama(
1654
- model_path="OEvortex/HelpingAI-Lite-1.5T", # Hugging Face model ID
1655
- gguf_file="HelpingAI-Lite-1.5T.q4_k_m.gguf" # GGUF file ID
1656
- )
1653
+ model_path = "Vortex4ai/Jarvis-0.5B"
1654
+ gguf_file = "test2-q4_k_m.gguf"
1655
+
1656
+ autollama.main(model_path, gguf_file)
1657
1657
  ```
1658
1658
 
1659
1659
  **Command Line Usage:**
@@ -26,7 +26,7 @@ webscout/Agents/Onlinesearcher.py,sha256=GzF2JcMfj07d74mxQEoaxwtxahgLHl3b_ugTbXj
26
26
  webscout/Agents/__init__.py,sha256=VbGyW5pulh3LRqbVTv54n5TwWsrTqOANRioG18xtdJ0,58
27
27
  webscout/Agents/functioncall.py,sha256=qH1Tofi4h5CK5RhXaLQhXu8swEUmcyK9R5xpS6jMLrs,5784
28
28
  webscout/Extra/__init__.py,sha256=GG1qUwS-HspT4TeeAIT4qFpM8PaO1ZdQhpelctaM7Rs,99
29
- webscout/Extra/autollama.py,sha256=8lyodIWAgJABzlMMHytlolPCgvUKh8ynkZD6MMEltXs,5970
29
+ webscout/Extra/autollama.py,sha256=qM8alxlWzg10BGIYKZBUtIEAXrkvEOWBwSxdPp3zq9I,6226
30
30
  webscout/Extra/gguf.py,sha256=RvSp7xuaD6epAA9iAzthUnAQ3HA5N-svMyKUadAVnw8,7009
31
31
  webscout/Extra/weather.py,sha256=wdSrQxZRpbNfyaux0BeLdaDWyde5KwxZjSUM13820X0,2460
32
32
  webscout/Extra/weather_ascii.py,sha256=Aed-_EUzvTEjBXbOpNRxkJBLa6fXsclknXP06HnQD18,808
@@ -53,8 +53,8 @@ webscout/Provider/Gemini.py,sha256=V79nIi5vhPfvjlGYg5XuH6RfY7AyNnBqnJM-OBK99hE,8
53
53
  webscout/Provider/Groq.py,sha256=h_dPKwqXRwmgvmEmkDYKdXwrlI4Zm2vZuCnSMItoa2w,28662
54
54
  webscout/Provider/Koboldai.py,sha256=KwWx2yPlvT9BGx37iNvSbgzWkJ9I8kSOmeg7sL1hb0M,15806
55
55
  webscout/Provider/Llama.py,sha256=pqjpB09MFufImzTav1PwTWsukSCr3QiB-yFGJIIBAu8,8293
56
- webscout/Provider/Llama3.py,sha256=qO5R5mNznrobi7eKZR8opb_UekmO0_PUEOkPTnNw9nE,7583
57
- webscout/Provider/OLLAMA.py,sha256=Modmkp_WiZaBYsv4-_5y7fHpqBJY20zWxyZZwtSfqVs,7117
56
+ webscout/Provider/Llama3.py,sha256=UnSWyBMSkp4WAxU4zNI9VNsZY0aAOHvT7AK0xJlJW90,7559
57
+ webscout/Provider/OLLAMA.py,sha256=g8ejBjEZN0zya-10-v_njADZ796Uxu4Nbj_gaNrlj5I,7374
58
58
  webscout/Provider/Openai.py,sha256=SjfVOwY94unVnXhvN0Fkome-q2-wi4mPJk_vCGq5Fjc,20617
59
59
  webscout/Provider/PI.py,sha256=IodVvGR_RIZpGJ0ypFF4U6NBMZAZ5O1BlRFMelot8so,8364
60
60
  webscout/Provider/Perplexity.py,sha256=gUnXyVNbl6tWAqirwHEoPkjCzxpORcKxL77aoFKepBk,21485
@@ -75,9 +75,9 @@ webscout/Provider/meta.py,sha256=3iBylmAk9d673Axvw6hFi0-0x_Fq7ZgtH_1j2_rcDwY,307
75
75
  webscout/Provider/turboseek.py,sha256=BNx_urbs6Ixr7SEOgL4Uo1iZdjYC7CxoefJcsN4LK6I,9138
76
76
  webscout/Provider/xdash.py,sha256=KUDTEX8I0z72bIDi-w5Se7xmB_lbmaX7KlCmIl2ad4c,7925
77
77
  webscout/Provider/yep.py,sha256=RbEBzHeEFxgfdnHXHuBny6NKHcYYYNA6bvTggvAzoLk,10399
78
- webscout-5.0.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
79
- webscout-5.0.dist-info/METADATA,sha256=anlsD-HmXJT4_UV8LyrT5mxdnEKznprEDn2oPcf-Ucg,50819
80
- webscout-5.0.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
81
- webscout-5.0.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
82
- webscout-5.0.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
83
- webscout-5.0.dist-info/RECORD,,
78
+ webscout-5.1.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
79
+ webscout-5.1.dist-info/METADATA,sha256=5QksBKkY8zwt12vphkMh6GFj6SiwZrQXYdetRMd0HMY,50812
80
+ webscout-5.1.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
81
+ webscout-5.1.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
82
+ webscout-5.1.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
83
+ webscout-5.1.dist-info/RECORD,,
File without changes