webscout 1.3.1__py3-none-any.whl → 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIutel.py CHANGED
@@ -224,7 +224,7 @@ class Conversation:
224
224
 
225
225
  class AwesomePrompts:
226
226
  awesome_prompt_url = (
227
- "https://github.com/Simatwa/gpt-cli/blob/main/assets/all-acts.json?raw=true"
227
+ "https://raw.githubusercontent.com/OE-LUCIFER/prompts/main/prompt.json"
228
228
  )
229
229
  awesome_prompt_path = os.path.join(default_path, "all-acts.json")
230
230
 
@@ -363,9 +363,9 @@ class AwesomePrompts:
363
363
 
364
364
 
365
365
  class Updates:
366
- """Pytgpt latest release info"""
366
+ """Webscout latest release info"""
367
367
 
368
- url = "https://api.github.com/repos/Simatwa/python-tgpt/releases/latest"
368
+ url = "https://api.github.com/repos/OE-LUCIFER/Webscout/releases/latest"
369
369
 
370
370
  @property
371
371
  def latest_version(self):
@@ -385,7 +385,7 @@ class Updates:
385
385
  return entry.get("url")
386
386
 
387
387
  def latest(self, whole: bool = False, version: bool = False) -> dict:
388
- """Check pytgpt latest version info
388
+ """Check Webscout latest version info
389
389
 
390
390
  Args:
391
391
  whole (bool, optional): Return whole json response. Defaults to False.
@@ -508,7 +508,7 @@ print("The essay is about...")
508
508
  f"{self.interpreter} --version",
509
509
  exit_on_error=True,
510
510
  stdout_error=True,
511
- help="If you're using pytgpt-cli, use the flag '--internal-exec'",
511
+ help="If you're using Webscout-cli, use the flag '--internal-exec'",
512
512
  )[1].stdout.split(" ")[1]
513
513
  )
514
514
 
@@ -588,7 +588,7 @@ Current Datetime : {datetime.datetime.now()}
588
588
  if self.quiet:
589
589
  return
590
590
 
591
- message = "[PYTGPT] - " + message
591
+ message = "[Webscout] - " + message
592
592
  if category == "error":
593
593
  logging.error(message)
594
594
  else:
webscout/__init__.py CHANGED
@@ -3,7 +3,7 @@
3
3
  Search for words, documents, images, videos, news, maps and text translation
4
4
  using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models
5
5
  """
6
-
6
+ import g4f
7
7
  import logging
8
8
  from .webscout_search import WEBS
9
9
  from .webscout_search_async import AsyncWEBS
@@ -13,6 +13,26 @@ from .transcriber import transcriber
13
13
  from .voice import play_audio
14
14
  from .LLM import LLM
15
15
 
16
+ __repo__ = "https://github.com/OE-LUCIFER/Webscout"
17
+
18
+ webai = [
19
+ "leo",
20
+ "openai",
21
+ "opengpt",
22
+ "koboldai",
23
+ "gemini",
24
+ "phind",
25
+ "blackboxai",
26
+ "g4fauto",
27
+ "perplexity",
28
+ "sean",
29
+ ]
30
+
31
+ gpt4free_providers = [
32
+ provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
33
+ ]
34
+
35
+ available_providers = webai + gpt4free_providers
16
36
 
17
37
  __all__ = ["WEBS", "AsyncWEBS", "__version__", "cli"]
18
38
 
webscout/g4f.py ADDED
@@ -0,0 +1,474 @@
1
+ import g4f
2
+ from webscout.AIutel import Optimizers
3
+ from webscout.AIutel import Conversation
4
+ from webscout.AIutel import AwesomePrompts
5
+ from webscout.AIutel import Provider
6
+ from webscout.AIutel import available_providers
7
+
8
+
9
+ g4f.debug.version_check = False
10
+
11
+ working_providers = available_providers
12
+
13
+ completion_allowed_models = [
14
+ "code-davinci-002",
15
+ "text-ada-001",
16
+ "text-babbage-001",
17
+ "text-curie-001",
18
+ "text-davinci-002",
19
+ "text-davinci-003",
20
+ ]
21
+
22
+ default_models = {
23
+ "completion": "text-davinci-003",
24
+ "chat_completion": "gpt-3.5-turbo",
25
+ }
26
+
27
+ default_provider = "Koala"
28
+
29
+
30
+ class GPT4FREE(Provider):
31
+ def __init__(
32
+ self,
33
+ provider: str = default_provider,
34
+ is_conversation: bool = True,
35
+ auth: str = None,
36
+ max_tokens: int = 600,
37
+ model: str = None,
38
+ chat_completion: bool = False,
39
+ ignore_working: bool = False,
40
+ timeout: int = 30,
41
+ intro: str = None,
42
+ filepath: str = None,
43
+ update_file: bool = True,
44
+ proxies: dict = {},
45
+ history_offset: int = 10250,
46
+ act: str = None,
47
+ ):
48
+ """Initialies GPT4FREE
49
+
50
+ Args:
51
+ provider (str, optional): gpt4free based provider name. Defaults to Koala.
52
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
53
+ auth (str, optional): Authentication value for the provider incase it needs. Defaults to None.
54
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
55
+ model (str, optional): LLM model name. Defaults to text-davinci-003|gpt-3.5-turbo.
56
+ chat_completion(bool, optional): Provide native auto-contexting (conversationally). Defaults to False.
57
+ ignore_working (bool, optional): Ignore working status of the provider. Defaults to False.
58
+ timeout (int, optional): Http request timeout. Defaults to 30.
59
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
60
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
61
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
62
+ proxies (dict, optional): Http request proxies. Defaults to {}.
63
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
64
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
65
+ """
66
+ assert provider in available_providers, (
67
+ f"Provider '{provider}' is not yet supported. "
68
+ f"Try others like {', '.join(available_providers)}"
69
+ )
70
+ if model is None:
71
+ model = (
72
+ default_models["chat_completion"]
73
+ if chat_completion
74
+ else default_models["completion"]
75
+ )
76
+
77
+ elif not chat_completion:
78
+ assert model in completion_allowed_models, (
79
+ f"Model '{model}' is not yet supported for completion. "
80
+ f"Try other models like {', '.join(completion_allowed_models)}"
81
+ )
82
+ self.is_conversation = is_conversation
83
+ self.max_tokens_to_sample = max_tokens
84
+ self.stream_chunk_size = 64
85
+ self.timeout = timeout
86
+ self.last_response = {}
87
+
88
+ self.__available_optimizers = (
89
+ method
90
+ for method in dir(Optimizers)
91
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
+ )
93
+ Conversation.intro = (
94
+ AwesomePrompts().get_act(
95
+ act, raise_not_found=True, default=None, case_insensitive=True
96
+ )
97
+ if act
98
+ else intro or Conversation.intro
99
+ )
100
+ self.conversation = Conversation(
101
+ False if chat_completion else is_conversation,
102
+ self.max_tokens_to_sample,
103
+ filepath,
104
+ update_file,
105
+ )
106
+ self.conversation.history_offset = history_offset
107
+ self.model = model
108
+ self.provider = provider
109
+ self.chat_completion = chat_completion
110
+ self.ignore_working = ignore_working
111
+ self.auth = auth
112
+ self.proxy = None if not proxies else list(proxies.values())[0]
113
+ self.__chat_class = g4f.ChatCompletion if chat_completion else g4f.Completion
114
+
115
+ def ask(
116
+ self,
117
+ prompt: str,
118
+ stream: bool = False,
119
+ raw: bool = False,
120
+ optimizer: str = None,
121
+ conversationally: bool = False,
122
+ ) -> dict:
123
+ """Chat with AI
124
+
125
+ Args:
126
+ prompt (str): Prompt to be send.
127
+ stream (bool, optional): Flag for streaming response. Defaults to False.
128
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
129
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
130
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
131
+ Returns:
132
+ dict : {}
133
+ ```json
134
+ {
135
+ "text" : "How may I help you today?"
136
+ }
137
+ ```
138
+ """
139
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
140
+ if optimizer:
141
+ if optimizer in self.__available_optimizers:
142
+ conversation_prompt = getattr(Optimizers, optimizer)(
143
+ conversation_prompt if conversationally else prompt
144
+ )
145
+ else:
146
+ raise Exception(
147
+ f"Optimizer is not one of {self.__available_optimizers}"
148
+ )
149
+
150
+ def payload():
151
+ if self.chat_completion:
152
+ return dict(
153
+ model=self.model,
154
+ provider=self.provider, # g4f.Provider.Aichat,
155
+ messages=[{"role": "user", "content": conversation_prompt}],
156
+ stream=stream,
157
+ ignore_working=self.ignore_working,
158
+ auth=self.auth,
159
+ proxy=self.proxy,
160
+ timeout=self.timeout,
161
+ )
162
+
163
+ else:
164
+ return dict(
165
+ model=self.model,
166
+ prompt=conversation_prompt,
167
+ provider=self.provider,
168
+ stream=stream,
169
+ ignore_working=self.ignore_working,
170
+ auth=self.auth,
171
+ proxy=self.proxy,
172
+ timeout=self.timeout,
173
+ )
174
+
175
+ def format_response(response):
176
+ return dict(text=response)
177
+
178
+ def for_stream():
179
+ previous_chunks = ""
180
+ response = self.__chat_class.create(**payload())
181
+
182
+ for chunk in response:
183
+ previous_chunks += chunk
184
+ formatted_resp = format_response(previous_chunks)
185
+ self.last_response.update(formatted_resp)
186
+ yield previous_chunks if raw else formatted_resp
187
+
188
+ self.conversation.update_chat_history(
189
+ prompt,
190
+ previous_chunks,
191
+ )
192
+
193
+ def for_non_stream():
194
+ response = self.__chat_class.create(**payload())
195
+ formatted_resp = format_response(response)
196
+
197
+ self.last_response.update(formatted_resp)
198
+ self.conversation.update_chat_history(prompt, response)
199
+
200
+ return response if raw else formatted_resp
201
+
202
+ return for_stream() if stream else for_non_stream()
203
+
204
+ def chat(
205
+ self,
206
+ prompt: str,
207
+ stream: bool = False,
208
+ optimizer: str = None,
209
+ conversationally: bool = False,
210
+ ) -> str:
211
+ """Generate response `str`
212
+ Args:
213
+ prompt (str): Prompt to be send.
214
+ stream (bool, optional): Flag for streaming response. Defaults to False.
215
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
216
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
217
+ Returns:
218
+ str: Response generated
219
+ """
220
+
221
+ def for_stream():
222
+ for response in self.ask(
223
+ prompt, True, optimizer=optimizer, conversationally=conversationally
224
+ ):
225
+ yield self.get_message(response)
226
+
227
+ def for_non_stream():
228
+ return self.get_message(
229
+ self.ask(
230
+ prompt,
231
+ False,
232
+ optimizer=optimizer,
233
+ conversationally=conversationally,
234
+ )
235
+ )
236
+
237
+ return for_stream() if stream else for_non_stream()
238
+
239
+ def get_message(self, response: dict) -> str:
240
+ """Retrieves message only from response
241
+
242
+ Args:
243
+ response (dict): Response generated by `self.ask`
244
+
245
+ Returns:
246
+ str: Message extracted
247
+ """
248
+ assert isinstance(response, dict), "Response should be of dict data-type only"
249
+ return response["text"]
250
+ from pathlib import Path
251
+ from webscout.AIutel import default_path
252
+ from json import dump, load
253
+ from time import time
254
+ from threading import Thread as thr
255
+ from functools import wraps
256
+ from rich.progress import Progress
257
+ import logging
258
+
259
+ results_path = Path(default_path) / "provider_test.json"
260
+
261
+
262
+ def exception_handler(func):
263
+
264
+ @wraps(func)
265
+ def decorator(*args, **kwargs):
266
+ try:
267
+ return func(*args, **kwargs)
268
+ except Exception as e:
269
+ pass
270
+
271
+ return decorator
272
+
273
+
274
+ @exception_handler
275
+ def is_working(provider: str) -> bool:
276
+ """Test working status of a provider
277
+
278
+ Args:
279
+ provider (str): Provider name
280
+
281
+ Returns:
282
+ bool: is_working status
283
+ """
284
+ bot = GPT4FREE(provider=provider, is_conversation=False)
285
+ text = bot.chat("hello")
286
+ assert isinstance(text, str)
287
+ assert bool(text.strip())
288
+ assert "</" not in text
289
+ assert ":" not in text
290
+ assert len(text) > 2
291
+ return True
292
+
293
+
294
+ class TestProviders:
295
+
296
+ def __init__(
297
+ self,
298
+ test_at_once: int = 5,
299
+ quiet: bool = False,
300
+ timeout: int = 20,
301
+ selenium: bool = False,
302
+ do_log: bool = True,
303
+ ):
304
+ """Constructor
305
+
306
+ Args:
307
+ test_at_once (int, optional): Test n providers at once. Defaults to 5.
308
+ quiet (bool, optinal): Disable stdout. Defaults to False.
309
+ timout (int, optional): Thread timeout for each provider. Defaults to 20.
310
+ selenium (bool, optional): Test even selenium dependent providers. Defaults to False.
311
+ do_log (bool, optional): Flag to control logging. Defaults to True.
312
+ """
313
+ self.test_at_once: int = test_at_once
314
+ self.quiet = quiet
315
+ self.timeout = timeout
316
+ self.do_log = do_log
317
+ self.__logger = logging.getLogger(__name__)
318
+ self.working_providers: list = [
319
+ provider.__name__
320
+ for provider in g4f.Provider.__providers__
321
+ if provider.working
322
+ ]
323
+
324
+ if not selenium:
325
+ import g4f.Provider.selenium as selenium_based
326
+ from g4f import webdriver
327
+
328
+ webdriver.has_requirements = False
329
+ selenium_based_providers: list = dir(selenium_based)
330
+ for provider in self.working_providers:
331
+ try:
332
+ selenium_based_providers.index(provider)
333
+ except ValueError:
334
+ pass
335
+ else:
336
+ self.__log(
337
+ 10, f"Dropping provider - {provider} - [Selenium dependent]"
338
+ )
339
+ self.working_providers.remove(provider)
340
+
341
+ self.results_path: Path = results_path
342
+ self.__create_empty_file(ignore_if_found=True)
343
+ self.results_file_is_empty: bool = False
344
+
345
+ def __log(
346
+ self,
347
+ level: int,
348
+ message: str,
349
+ ):
350
+ """class logger"""
351
+ if self.do_log:
352
+ self.__logger.log(level, message)
353
+ else:
354
+ pass
355
+
356
+ def __create_empty_file(self, ignore_if_found: bool = False):
357
+ if ignore_if_found and self.results_path.is_file():
358
+ return
359
+ with self.results_path.open("w") as fh:
360
+ dump({"results": []}, fh)
361
+ self.results_file_is_empty = True
362
+
363
+ def test_provider(self, name: str):
364
+ """Test each provider and save successful ones
365
+
366
+ Args:
367
+ name (str): Provider name
368
+ """
369
+
370
+ try:
371
+ bot = GPT4FREE(provider=name, is_conversation=False)
372
+ start_time = time()
373
+ text = bot.chat("hello there")
374
+ assert isinstance(text, str), "Non-string response returned"
375
+ assert bool(text.strip()), "Empty string"
376
+ assert "</" not in text, "Html code returned."
377
+ assert ":" not in text, "Json formatted response returned"
378
+ assert len(text) > 2
379
+ except Exception as e:
380
+ pass
381
+ else:
382
+ self.results_file_is_empty = False
383
+ with self.results_path.open() as fh:
384
+ current_results = load(fh)
385
+ new_result = dict(time=time() - start_time, name=name)
386
+ current_results["results"].append(new_result)
387
+ self.__log(20, f"Test result - {new_result['name']} - {new_result['time']}")
388
+
389
+ with self.results_path.open("w") as fh:
390
+ dump(current_results, fh)
391
+
392
+ @exception_handler
393
+ def main(
394
+ self,
395
+ ):
396
+ self.__create_empty_file()
397
+ threads = []
398
+ # Create a progress bar
399
+ total = len(self.working_providers)
400
+ with Progress() as progress:
401
+ self.__log(20, f"Testing {total} providers : {self.working_providers}")
402
+ task = progress.add_task(
403
+ f"[cyan]Testing...[{self.test_at_once}]",
404
+ total=total,
405
+ visible=self.quiet == False,
406
+ )
407
+ while not progress.finished:
408
+ for count, provider in enumerate(self.working_providers, start=1):
409
+ t1 = thr(
410
+ target=self.test_provider,
411
+ args=(provider,),
412
+ )
413
+ t1.start()
414
+ if count % self.test_at_once == 0 or count == len(provider):
415
+ for t in threads:
416
+ try:
417
+ t.join(self.timeout)
418
+ except Exception as e:
419
+ pass
420
+ threads.clear()
421
+ else:
422
+ threads.append(t1)
423
+ progress.update(task, advance=1)
424
+
425
+ def get_results(self, run: bool = False, best: bool = False) -> list[dict]:
426
+ """Get test results
427
+
428
+ Args:
429
+ run (bool, optional): Run the test first. Defaults to False.
430
+ best (bool, optional): Return name of the best provider. Defaults to False.
431
+
432
+ Returns:
433
+ list[dict]|str: Test results.
434
+ """
435
+ if run or self.results_file_is_empty:
436
+ self.main()
437
+
438
+ with self.results_path.open() as fh:
439
+ results: dict = load(fh)
440
+
441
+ results = results["results"]
442
+ if not results:
443
+ if run:
444
+ raise Exception("Unable to find working g4f provider")
445
+ else:
446
+ self.__log(30, "Hunting down working g4f providers.")
447
+ return self.get_results(run=True, best=best)
448
+
449
+ time_list = []
450
+
451
+ sorted_list = []
452
+ for entry in results:
453
+ time_list.append(entry["time"])
454
+
455
+ time_list.sort()
456
+
457
+ for time_value in time_list:
458
+ for entry in results:
459
+ if entry["time"] == time_value:
460
+ sorted_list.append(entry)
461
+ return sorted_list[0]["name"] if best else sorted_list
462
+
463
+ @property
464
+ def best(self):
465
+ """Fastest provider overally"""
466
+ return self.get_results(run=False, best=True)
467
+
468
+ @property
469
+ def auto(self):
470
+ """Best working provider"""
471
+ for result in self.get_results(run=False, best=False):
472
+ self.__log(20, "Confirming working status of provider : " + result["name"])
473
+ if is_working(result["name"]):
474
+ return result["name"]
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "1.3.1"
1
+ __version__ = "1.3.2"
2
2