webscout 5.7__py3-none-any.whl → 5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,354 @@
1
+ import requests
2
+ import os
3
+ import time
4
+ from typing import List, Optional
5
+ from string import punctuation
6
+ from random import choice
7
+ from requests.exceptions import RequestException
8
+ import json
9
+ from html.parser import HTMLParser
10
+ import re
11
+ import html.entities
12
+
13
+ from webscout.AIutel import Optimizers
14
+ from webscout.AIutel import Conversation
15
+ from webscout.AIutel import AwesomePrompts
16
+ from webscout.AIbase import Provider
17
+ from webscout import exceptions
18
+
19
+ class TerminalFormatter(HTMLParser):
20
+ """
21
+ A custom HTML parser that converts HTML content to terminal-friendly formatted text
22
+ using ANSI escape codes.
23
+ """
24
+
25
+ def __init__(self):
26
+ super().__init__()
27
+ self.output = []
28
+ self.list_stack = []
29
+ self.ol_counters = []
30
+ self.bold = False
31
+ self.italic = False
32
+
33
+ def handle_starttag(self, tag, attrs):
34
+ if tag in ["strong", "b"]:
35
+ self.output.append("\033[1m") # Bold
36
+ self.bold = True
37
+ elif tag in ["em", "i"]:
38
+ self.output.append("\033[3m") # Italic
39
+ self.italic = True
40
+ elif tag == "br":
41
+ self.output.append("\n")
42
+ elif tag in ["p", "div", "h1", "h2", "h3", "h4", "h5", "h6"]:
43
+ self.output.append("\n")
44
+ elif tag == "ul":
45
+ self.list_stack.append("ul")
46
+ self.output.append("\n")
47
+ elif tag == "ol":
48
+ self.list_stack.append("ol")
49
+ self.ol_counters.append(1)
50
+ self.output.append("\n")
51
+ elif tag == "li":
52
+ if self.list_stack:
53
+ if self.list_stack[-1] == "ul":
54
+ self.output.append("• ") # Bullet point
55
+ elif self.list_stack[-1] == "ol":
56
+ number = self.ol_counters[-1]
57
+ self.output.append(f"{number}. ")
58
+ self.ol_counters[-1] += 1
59
+
60
+ def handle_endtag(self, tag):
61
+ if tag in ["strong", "b"]:
62
+ self.output.append("\033[0m") # Reset
63
+ self.bold = False
64
+ elif tag in ["em", "i"]:
65
+ self.output.append("\033[0m") # Reset
66
+ self.italic = False
67
+ elif tag in ["p", "div", "h1", "h2", "h3", "h4", "h5", "h6"]:
68
+ self.output.append("\n")
69
+ elif tag == "ul":
70
+ if self.list_stack and self.list_stack[-1] == "ul":
71
+ self.list_stack.pop()
72
+ self.output.append("\n")
73
+ elif tag == "ol":
74
+ if self.list_stack and self.list_stack[-1] == "ol":
75
+ self.list_stack.pop()
76
+ self.ol_counters.pop()
77
+ self.output.append("\n")
78
+ elif tag == "li":
79
+ self.output.append("\n")
80
+
81
+ def handle_data(self, data):
82
+ # Remove ANSI escape codes from the data
83
+ data = re.sub(r'\033\[[0-9;]*m', '', data)
84
+ data = re.sub(r"\s+", " ", data)
85
+ self.output.append(data)
86
+
87
+ def handle_entityref(self, name):
88
+ entity = f"&{name};"
89
+ char = html.entities.name2codepoint.get(name, entity)
90
+ self.output.append(chr(char))
91
+
92
+ def handle_charref(self, name):
93
+ try:
94
+ if name.startswith("x") or name.startswith("X"):
95
+ char = chr(int(name[1:], 16))
96
+ else:
97
+ char = chr(int(name))
98
+ self.output.append(char)
99
+ except ValueError:
100
+ self.output.append(f"&#{name};")
101
+
102
+ def get_text(self):
103
+ return "".join(self.output).strip()
104
+
105
+
106
+ def html_to_terminal(html_content):
107
+ parser = TerminalFormatter()
108
+ parser.feed(html_content)
109
+ return parser.get_text()
110
+
111
+
112
+ class TutorAI(Provider):
113
+ """
114
+ A class to interact with the TutorAI.me API.
115
+ """
116
+
117
+ def __init__(
118
+ self,
119
+ is_conversation: bool = True,
120
+ max_tokens: int = 600,
121
+ timeout: int = 30,
122
+ intro: str = None,
123
+ filepath: str = None,
124
+ update_file: bool = True,
125
+ proxies: dict = {},
126
+ history_offset: int = 10250,
127
+ act: str = None,
128
+ system_prompt: str = "You are a helpful AI assistant.",
129
+ ):
130
+ """
131
+ Initializes the TutorAI.me API with given parameters.
132
+
133
+ Args:
134
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
135
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
136
+ timeout (int, optional): Http request timeout. Defaults to 30.
137
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
138
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
139
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
140
+ proxies (dict, optional): Http request proxies. Defaults to {}.
141
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
142
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
143
+ system_prompt (str, optional): System prompt for TutorAI.
144
+ Defaults to "You are a helpful AI assistant.".
145
+ """
146
+ self.session = requests.Session()
147
+ self.is_conversation = is_conversation
148
+ self.max_tokens_to_sample = max_tokens
149
+ self.api_endpoint = "https://tutorai.me/api/generate-homeworkify-response"
150
+ self.stream_chunk_size = 1024
151
+ self.timeout = timeout
152
+ self.last_response = {}
153
+ self.system_prompt = system_prompt
154
+ self.headers = {
155
+ "Accept": "*/*",
156
+ "Accept-Encoding": "gzip, deflate, br, zstd",
157
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
158
+ "Cookie": (
159
+ "ARRAffinity=5ef5a1afbc0178c19fc7bc85047a2309cb69de3271923483302c69744e2b1d24; "
160
+ "ARRAffinitySameSite=5ef5a1afbc0178c19fc7bc85047a2309cb69de3271923483302c69744e2b1d24; "
161
+ "_ga=GA1.1.412867530.1726937399; "
162
+ "_clck=1kwy10j%7C2%7Cfpd%7C0%7C1725; "
163
+ "_clsk=1cqd2q1%7C1726937402133%7C1%7C1%7Cm.clarity.ms%2Fcollect; "
164
+ "_ga_0WF5W33HD7=GS1.1.1726937399.1.1.1726937459.0.0.0"
165
+ ),
166
+ "DNT": "1",
167
+ "Origin": "https://tutorai.me",
168
+ "Priority": "u=1, i",
169
+ "Referer": "https://tutorai.me/homeworkify?ref=taaft&utm_source=taaft&utm_medium=referral",
170
+ "Sec-Ch-Ua": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
171
+ "Sec-Ch-Ua-Mobile": "?0",
172
+ "Sec-Ch-Ua-Platform": '"Windows"',
173
+ "Sec-Fetch-Dest": "empty",
174
+ "Sec-Fetch-Mode": "cors",
175
+ "Sec-Fetch-Site": "same-origin",
176
+ "User-Agent": (
177
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
178
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
179
+ "Chrome/129.0.0.0 Safari/537.36 Edg/128.0.0.0"
180
+ ),
181
+ }
182
+
183
+ self.__available_optimizers = (
184
+ method
185
+ for method in dir(Optimizers)
186
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
187
+ )
188
+ self.session.headers.update(self.headers)
189
+ Conversation.intro = (
190
+ AwesomePrompts().get_act(
191
+ act, raise_not_found=True, default=None, case_insensitive=True
192
+ )
193
+ if act
194
+ else intro or Conversation.intro
195
+ )
196
+ self.conversation = Conversation(
197
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
198
+ )
199
+ self.conversation.history_offset = history_offset
200
+ self.session.proxies = proxies
201
+
202
+ def ask(
203
+ self,
204
+ prompt: str,
205
+ stream: bool = False,
206
+ raw: bool = False,
207
+ optimizer: str = None,
208
+ conversationally: bool = False,
209
+ attachment_path: Optional[str] = None
210
+ ) -> dict:
211
+ """Chat with TutorAI
212
+
213
+ Args:
214
+ prompt (str): Prompt to be send.
215
+ stream (bool, optional): Flag for streaming response. Defaults to False.
216
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
217
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
218
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
219
+ attachment_path (str, optional): Path to attachment file. Defaults to None.
220
+
221
+ Returns:
222
+ dict : {}
223
+ ```json
224
+ {
225
+ "text" : "How may I assist you today?"
226
+ }
227
+ ```
228
+ """
229
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
230
+ if optimizer:
231
+ if optimizer in self.__available_optimizers:
232
+ conversation_prompt = getattr(Optimizers, optimizer)(
233
+ conversation_prompt if conversationally else prompt
234
+ )
235
+ else:
236
+ raise Exception(
237
+ f"Optimizer is not one of {self.__available_optimizers}"
238
+ )
239
+
240
+ form_data = {
241
+ "inputMessage": conversation_prompt,
242
+ "attachmentsCount": "1" if attachment_path else "0"
243
+ }
244
+ files = {}
245
+ if attachment_path:
246
+ if not os.path.isfile(attachment_path):
247
+ raise FileNotFoundError(f"Error: The file '{attachment_path}' does not exist.")
248
+ try:
249
+ files["attachment0"] = (os.path.basename(attachment_path), open(attachment_path, 'rb'), 'image/png')
250
+ except Exception as e:
251
+ raise exceptions.FailedToGenerateResponseError(f"Error opening the file: {e}")
252
+
253
+ def for_stream():
254
+ try:
255
+ with requests.post(self.api_endpoint, headers=self.headers, data=form_data, files=files, stream=True, timeout=self.timeout) as response:
256
+ response.raise_for_status()
257
+ response_chunks = []
258
+ json_str = ''
259
+ for chunk in response.iter_content(chunk_size=self.stream_chunk_size, decode_unicode=True):
260
+ if chunk:
261
+ response_chunks.append(chunk)
262
+ yield chunk if raw else dict(text=chunk)
263
+ json_str = ''.join(response_chunks)
264
+ try:
265
+ response_data = json.loads(json_str)
266
+ except json.JSONDecodeError as json_err:
267
+ raise exceptions.FailedToGenerateResponseError(f"\nError decoding JSON: {json_err}")
268
+ homeworkify_html = response_data.get("homeworkifyResponse", "")
269
+ if not homeworkify_html:
270
+ raise exceptions.FailedToGenerateResponseError("\nNo 'homeworkifyResponse' found in the response.")
271
+ clean_text = html_to_terminal(homeworkify_html)
272
+ self.last_response.update(dict(text=clean_text))
273
+ self.conversation.update_chat_history(
274
+ prompt, self.get_message(self.last_response)
275
+ )
276
+ except requests.exceptions.RequestException as e:
277
+ raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
278
+
279
+ def for_non_stream():
280
+ response = self.session.post(self.api_endpoint, headers=self.headers, data=form_data, files=files, timeout=self.timeout)
281
+ if not response.ok:
282
+ raise Exception(
283
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
284
+ )
285
+
286
+ # Parse the entire JSON response
287
+ response_data = response.json()
288
+ homeworkify_html = response_data.get("homeworkifyResponse", "")
289
+ if not homeworkify_html:
290
+ return {"text": "No content found in the response"} # Default in case content not found
291
+ clean_text = html_to_terminal(homeworkify_html)
292
+
293
+ # Simulate streaming by yielding chunks of the content
294
+ chunk_size = self.stream_chunk_size
295
+ for i in range(0, len(clean_text), chunk_size):
296
+ chunk = clean_text[i:i + chunk_size]
297
+ self.last_response.update(dict(text=chunk))
298
+ yield chunk if raw else dict(text=chunk)
299
+ return self.last_response
300
+
301
+ return for_stream() if stream else for_non_stream()
302
+
303
+ def chat(
304
+ self,
305
+ prompt: str,
306
+ stream: bool = False,
307
+ optimizer: str = None,
308
+ conversationally: bool = False,
309
+ attachment_path: Optional[str] = None,
310
+ ) -> str:
311
+ """Generate response `str`
312
+ Args:
313
+ prompt (str): Prompt to be send.
314
+ stream (bool, optional): Flag for streaming response. Defaults to False.
315
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
316
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
317
+ attachment_path (str, optional): Path to attachment file. Defaults to None.
318
+ Returns:
319
+ str: Response generated
320
+ """
321
+
322
+ def for_stream():
323
+ for response in self.ask(
324
+ prompt, True, optimizer=optimizer, conversationally=conversationally, attachment_path=attachment_path,
325
+ ):
326
+ yield self.get_message(response)
327
+
328
+ def for_non_stream():
329
+ for response in self.ask(
330
+ prompt, False, optimizer=optimizer, conversationally=conversationally, attachment_path=attachment_path,
331
+ ):
332
+ yield self.get_message(response)
333
+
334
+ return for_stream() if stream else for_non_stream()
335
+
336
+ def get_message(self, response: dict) -> str:
337
+ """Retrieves message only from response
338
+
339
+ Args:
340
+ response (dict): Response generated by `self.ask`
341
+
342
+ Returns:
343
+ str: Message extracted
344
+ """
345
+ assert isinstance(response, dict), "Response should be of dict data-type only"
346
+ return response["text"]
347
+
348
+ if __name__ == "__main__":
349
+ from rich import print
350
+
351
+ ai = TutorAI()
352
+ response = ai.chat(input(">>> "), attachment_path='photo_2024-07-06_22-19-42.jpg')
353
+ for chunk in response:
354
+ print(chunk, end="", flush=True)
@@ -0,0 +1,260 @@
1
+ import requests
2
+ import json
3
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
4
+ from webscout.AIbase import Provider
5
+ from webscout import exceptions
6
+
7
+ class AIUncensored(Provider):
8
+ """
9
+ A class to interact with the AIUncensored.info API.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ is_conversation: bool = True,
15
+ max_tokens: int = 600,
16
+ timeout: int = 30,
17
+ intro: str = None,
18
+ filepath: str = None,
19
+ update_file: bool = True,
20
+ proxies: dict = {},
21
+ history_offset: int = 10250,
22
+ act: str = None,
23
+ system_prompt: str = "You are a helpful AI assistant.",
24
+ ):
25
+ """
26
+ Initializes the AIUncensored.info API with given parameters.
27
+
28
+ Args:
29
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
30
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
31
+ timeout (int, optional): Http request timeout. Defaults to 30.
32
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
33
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
34
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
35
+ proxies (dict, optional): Http request proxies. Defaults to {}.
36
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
37
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
38
+ system_prompt (str, optional): System prompt for AIUncensored.
39
+ Defaults to "You are a helpful AI assistant.".
40
+ """
41
+ self.session = requests.Session()
42
+ self.is_conversation = is_conversation
43
+ self.max_tokens_to_sample = max_tokens
44
+ self.api_endpoint = 'https://twitterclone-i0wr.onrender.com/api/chat'
45
+ self.stream_chunk_size = 64
46
+ self.timeout = timeout
47
+ self.last_response = {}
48
+ self.system_prompt = system_prompt
49
+ self.headers = {
50
+ "authority": "twitterclone-i0wr.onrender.com",
51
+ "accept": "*/*",
52
+ "accept-encoding": "gzip, deflate, br, zstd",
53
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
54
+ "content-type": "application/json",
55
+ "dnt": "1",
56
+ "origin": "https://www.aiuncensored.info",
57
+ "priority": "u=1, i",
58
+ "referer": "https://www.aiuncensored.info/",
59
+ "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
60
+ "sec-ch-ua-mobile": "?0",
61
+ "sec-ch-ua-platform": '"Windows"',
62
+ "sec-fetch-dest": "empty",
63
+ "sec-fetch-mode": "cors",
64
+ "sec-fetch-site": "cross-site",
65
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
66
+ }
67
+
68
+ self.__available_optimizers = [
69
+ method for method in dir(Optimizers)
70
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
71
+ ]
72
+ self.session.headers.update(self.headers)
73
+ Conversation.intro = (
74
+ AwesomePrompts().get_act(
75
+ act, raise_not_found=True, default=None, case_insensitive=True
76
+ )
77
+ if act
78
+ else intro or Conversation.intro
79
+ )
80
+ self.conversation = Conversation(
81
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
82
+ )
83
+ self.conversation.history_offset = history_offset
84
+ self.session.proxies = proxies
85
+
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ ):
95
+ """
96
+ Chat with AI
97
+
98
+ Args:
99
+ prompt (str): Prompt to be sent.
100
+ stream (bool, optional): Flag for streaming response. Defaults to False.
101
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
102
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
103
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
+
105
+ Returns:
106
+ dict or generator:
107
+ If stream is False, returns a dict:
108
+ ```json
109
+ {
110
+ "text" : "How may I assist you today?"
111
+ }
112
+ ```
113
+ If stream is True, yields dicts with incremental text.
114
+ """
115
+
116
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
117
+
118
+
119
+ if optimizer:
120
+
121
+ if optimizer in self.__available_optimizers:
122
+ try:
123
+ conversation_prompt = getattr(Optimizers, optimizer)(
124
+ conversation_prompt if conversationally else prompt
125
+ )
126
+
127
+ except Exception as e:
128
+
129
+ raise
130
+ else:
131
+
132
+ raise Exception(
133
+ f"Optimizer is not one of {self.__available_optimizers}"
134
+ )
135
+
136
+ payload = {
137
+ "messages": [
138
+ {
139
+ "role": "system",
140
+ "content": self.system_prompt
141
+ },
142
+ {
143
+ "role": "user",
144
+ "content": conversation_prompt
145
+ }
146
+ ]
147
+ }
148
+
149
+
150
+
151
+ def for_stream():
152
+
153
+ try:
154
+ with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
155
+
156
+ if response.status_code == 200:
157
+ full_content = ''
158
+ for line in response.iter_lines():
159
+ decoded_line = line.decode('utf-8').strip()
160
+ if decoded_line:
161
+
162
+ if decoded_line == "data: [DONE]":
163
+
164
+ break
165
+ if decoded_line.startswith("data: "):
166
+ data_str = decoded_line[len("data: "):]
167
+ try:
168
+ data_json = json.loads(data_str)
169
+ content = data_json.get("data", "")
170
+ if content:
171
+ full_content += content
172
+
173
+ yield content if raw else {"text": full_content}
174
+ except json.JSONDecodeError:
175
+ if data_str != "[DONE]":
176
+ return None
177
+ else:
178
+
179
+ raise exceptions.FailedToGenerateResponseError(
180
+ f"Request failed with status code: {response.status_code}"
181
+ )
182
+ self.last_response = {"text": full_content}
183
+
184
+ self.conversation.update_chat_history(
185
+ prompt, self.get_message(self.last_response)
186
+ )
187
+ except requests.exceptions.RequestException as e:
188
+
189
+ raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
190
+
191
+ def for_non_stream():
192
+
193
+ for _ in for_stream():
194
+ pass
195
+
196
+ return self.last_response
197
+
198
+ return for_stream() if stream else for_non_stream()
199
+
200
+ def chat(
201
+ self,
202
+ prompt: str,
203
+ stream: bool = False,
204
+ optimizer: str = None,
205
+ conversationally: bool = False,
206
+ ):
207
+ """
208
+ Generate response `str`
209
+ Args:
210
+ prompt (str): Prompt to be sent.
211
+ stream (bool, optional): Flag for streaming response. Defaults to False.
212
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
213
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
214
+ Returns:
215
+ str or generator:
216
+ If stream is False, returns a string.
217
+ If stream is True, yields incremental strings.
218
+ """
219
+
220
+ def for_stream():
221
+
222
+ for response in self.ask(
223
+ prompt, True, optimizer=optimizer, conversationally=conversationally
224
+ ):
225
+
226
+ yield self.get_message(response)
227
+
228
+ def for_non_stream():
229
+
230
+ response = self.ask(
231
+ prompt,
232
+ False,
233
+ optimizer=optimizer,
234
+ conversationally=conversationally,
235
+ )
236
+
237
+ return self.get_message(response)
238
+
239
+ return for_stream() if stream else for_non_stream()
240
+
241
+ def get_message(self, response: dict) -> str:
242
+ """Retrieves message only from response
243
+
244
+ Args:
245
+ response (dict): Response generated by `self.ask`
246
+
247
+ Returns:
248
+ str: Message extracted
249
+ """
250
+ assert isinstance(response, dict), "Response should be of dict data-type only"
251
+ return response["text"]
252
+
253
+ if __name__ == "__main__":
254
+ from rich import print
255
+ ai = AIUncensored()
256
+ user_input = 'hi'
257
+ response = ai.chat(user_input)
258
+ for chunk in response:
259
+ print(chunk, end="", flush=True)
260
+ print() # For a newline after streaming completes
webscout/__init__.py CHANGED
@@ -18,6 +18,7 @@ from .Extra import gguf
18
18
  from .Extra import autollama
19
19
  from .Extra import weather_ascii, weather
20
20
  from .Agents import *
21
+
21
22
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
22
23
 
23
24
  webai = [
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "5.7"
1
+ __version__ = "5.9"
2
2
  __prog__ = "webscout"