webscout 6.3__py3-none-any.whl → 6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (131) hide show
  1. webscout/AIauto.py +191 -176
  2. webscout/AIbase.py +0 -197
  3. webscout/AIutel.py +441 -1130
  4. webscout/DWEBS.py +189 -35
  5. webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
  6. webscout/Extra/YTToolkit/__init__.py +3 -0
  7. webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +479 -551
  8. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  10. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  11. webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
  12. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  13. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  14. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  15. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  16. webscout/Extra/YTToolkit/ytapi/query.py +37 -0
  17. webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
  18. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  19. webscout/Extra/YTToolkit/ytapi/video.py +102 -0
  20. webscout/Extra/__init__.py +3 -1
  21. webscout/Extra/autocoder/__init__.py +9 -0
  22. webscout/Extra/autocoder/autocoder_utiles.py +121 -0
  23. webscout/Extra/autocoder/rawdog.py +680 -0
  24. webscout/Extra/autollama.py +246 -195
  25. webscout/Extra/gguf.py +81 -56
  26. webscout/Extra/markdownlite/__init__.py +862 -0
  27. webscout/Extra/weather_ascii.py +2 -2
  28. webscout/LLM.py +206 -43
  29. webscout/Litlogger/__init__.py +681 -0
  30. webscout/Provider/DARKAI.py +1 -1
  31. webscout/Provider/EDITEE.py +1 -1
  32. webscout/Provider/NinjaChat.py +1 -1
  33. webscout/Provider/PI.py +120 -35
  34. webscout/Provider/Perplexity.py +590 -598
  35. webscout/Provider/Reka.py +0 -1
  36. webscout/Provider/RoboCoders.py +206 -0
  37. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  38. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
  39. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
  40. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  41. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  42. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  43. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  44. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
  45. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
  46. webscout/Provider/TTI/__init__.py +2 -4
  47. webscout/Provider/TTI/artbit/__init__.py +22 -0
  48. webscout/Provider/TTI/artbit/async_artbit.py +184 -0
  49. webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
  50. webscout/Provider/TTI/blackbox/__init__.py +4 -0
  51. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
  52. webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
  53. webscout/Provider/TTI/deepinfra/__init__.py +4 -0
  54. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
  55. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
  56. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  57. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  58. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  59. webscout/Provider/TTI/imgninza/__init__.py +4 -0
  60. webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
  61. webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
  62. webscout/Provider/TTI/talkai/__init__.py +4 -0
  63. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  65. webscout/Provider/TTS/__init__.py +5 -1
  66. webscout/Provider/TTS/deepgram.py +183 -0
  67. webscout/Provider/TTS/elevenlabs.py +137 -0
  68. webscout/Provider/TTS/gesserit.py +151 -0
  69. webscout/Provider/TTS/murfai.py +139 -0
  70. webscout/Provider/TTS/parler.py +134 -107
  71. webscout/Provider/TTS/streamElements.py +360 -275
  72. webscout/Provider/TTS/utils.py +280 -0
  73. webscout/Provider/TTS/voicepod.py +116 -116
  74. webscout/Provider/__init__.py +8 -1
  75. webscout/Provider/askmyai.py +2 -2
  76. webscout/Provider/cerebras.py +227 -219
  77. webscout/Provider/llama3mitril.py +0 -1
  78. webscout/Provider/meta.py +794 -779
  79. webscout/Provider/mhystical.py +176 -0
  80. webscout/Provider/perplexitylabs.py +265 -0
  81. webscout/Provider/twitterclone.py +251 -245
  82. webscout/Provider/typegpt.py +358 -0
  83. webscout/__init__.py +9 -8
  84. webscout/__main__.py +5 -5
  85. webscout/cli.py +252 -280
  86. webscout/conversation.py +227 -0
  87. webscout/exceptions.py +161 -29
  88. webscout/litagent/__init__.py +172 -0
  89. webscout/litprinter/__init__.py +832 -0
  90. webscout/optimizers.py +270 -0
  91. webscout/prompt_manager.py +279 -0
  92. webscout/scout/__init__.py +11 -0
  93. webscout/scout/core.py +884 -0
  94. webscout/scout/element.py +459 -0
  95. webscout/scout/parsers/__init__.py +69 -0
  96. webscout/scout/parsers/html5lib_parser.py +172 -0
  97. webscout/scout/parsers/html_parser.py +236 -0
  98. webscout/scout/parsers/lxml_parser.py +178 -0
  99. webscout/scout/utils.py +38 -0
  100. webscout/swiftcli/__init__.py +810 -0
  101. webscout/update_checker.py +125 -0
  102. webscout/version.py +1 -1
  103. webscout/zeroart/__init__.py +55 -0
  104. webscout/zeroart/base.py +61 -0
  105. webscout/zeroart/effects.py +99 -0
  106. webscout/zeroart/fonts.py +816 -0
  107. webscout/zerodir/__init__.py +225 -0
  108. {webscout-6.3.dist-info → webscout-6.5.dist-info}/METADATA +37 -112
  109. webscout-6.5.dist-info/RECORD +179 -0
  110. webscout/Agents/Onlinesearcher.py +0 -182
  111. webscout/Agents/__init__.py +0 -2
  112. webscout/Agents/functioncall.py +0 -248
  113. webscout/Bing_search.py +0 -154
  114. webscout/Provider/TTI/AIuncensoredimage.py +0 -103
  115. webscout/Provider/TTI/Nexra.py +0 -120
  116. webscout/Provider/TTI/PollinationsAI.py +0 -138
  117. webscout/Provider/TTI/WebSimAI.py +0 -142
  118. webscout/Provider/TTI/aiforce.py +0 -160
  119. webscout/Provider/TTI/artbit.py +0 -141
  120. webscout/Provider/TTI/deepinfra.py +0 -148
  121. webscout/Provider/TTI/huggingface.py +0 -155
  122. webscout/Provider/TTI/talkai.py +0 -116
  123. webscout/g4f.py +0 -666
  124. webscout/models.py +0 -23
  125. webscout/requestsHTMLfix.py +0 -775
  126. webscout/webai.py +0 -2590
  127. webscout-6.3.dist-info/RECORD +0 -124
  128. {webscout-6.3.dist-info → webscout-6.5.dist-info}/LICENSE.md +0 -0
  129. {webscout-6.3.dist-info → webscout-6.5.dist-info}/WHEEL +0 -0
  130. {webscout-6.3.dist-info → webscout-6.5.dist-info}/entry_points.txt +0 -0
  131. {webscout-6.3.dist-info → webscout-6.5.dist-info}/top_level.txt +0 -0
webscout/webai.py DELETED
@@ -1,2590 +0,0 @@
1
- import webscout
2
- import click
3
- import cmd
4
- import logging
5
- import os
6
- import sys
7
- import clipman
8
- import re
9
- import rich
10
- import getpass
11
- import json
12
- import re
13
- import sys
14
- import datetime
15
- import time
16
- import subprocess
17
- from threading import Thread as thr
18
- from functools import wraps
19
- from rich.panel import Panel
20
- from rich.style import Style
21
- from rich.markdown import Markdown
22
- from rich.console import Console
23
- from rich.live import Live
24
- from rich.table import Table
25
- from rich.prompt import Prompt
26
- from rich.progress import Progress
27
- from typing import Iterator
28
- from .AIutel import Optimizers
29
- from .AIutel import default_path
30
- from .AIutel import AwesomePrompts
31
- from .AIutel import RawDog
32
- from .AIutel import Audio
33
- from .AIutel import available_providers
34
- from colorama import Fore
35
- from colorama import init as init_colorama
36
- from dotenv import load_dotenv
37
- import g4f
38
- import webscout
39
- import webscout.AIutel
40
- from pyfiglet import figlet_format
41
-
42
- init_colorama(autoreset=True)
43
-
44
- load_dotenv() # loads .env variables
45
-
46
- console = Console()
47
- logging.basicConfig(
48
- format="%(asctime)s - %(levelname)s : %(message)s ",
49
- datefmt="%H:%M:%S",
50
- level=logging.INFO,
51
- )
52
-
53
- try:
54
- clipman.init()
55
- except Exception as e:
56
- logging.debug(f"Dropping clipman in favor of pyperclip - {(e)}")
57
- import pyperclip
58
-
59
- clipman.set = pyperclip.copy
60
- clipman.get = pyperclip.paste
61
-
62
-
63
- class this:
64
- """Console's common variables"""
65
-
66
- rich_code_themes = ["monokai", "paraiso-dark", "igor", "vs", "fruity", "xcode"]
67
-
68
- default_provider = "phind"
69
-
70
- getExc = lambda e: e.args[1] if len(e.args) > 1 else str(e)
71
-
72
- context_settings = dict(auto_envvar_prefix="Webscout")
73
-
74
- """Console utils"""
75
-
76
- @staticmethod
77
- def run_system_command(
78
- command: str, exit_on_error: bool = True, stdout_error: bool = True
79
- ):
80
- """Run commands against system
81
- Args:
82
- command (str): shell command
83
- exit_on_error (bool, optional): Exit on error. Defaults to True.
84
- stdout_error (bool, optional): Print out the error. Defaults to True.
85
-
86
- Returns:
87
- tuple : (is_successfull, object[Exception|Subprocess.run])
88
- """
89
- try:
90
- # Run the command and capture the output
91
- result = subprocess.run(
92
- command,
93
- shell=True,
94
- check=True,
95
- text=True,
96
- stdout=subprocess.PIPE,
97
- stderr=subprocess.PIPE,
98
- )
99
- return (True, result)
100
- except subprocess.CalledProcessError as e:
101
- # Handle error if the command returns a non-zero exit code
102
- if stdout_error:
103
- click.secho(f"Error Occurred: while running '{command}'", fg="yellow")
104
- click.secho(e.stderr, fg="red")
105
- sys.exit(e.returncode) if exit_on_error else None
106
- return (False, e)
107
-
108
- def g4f_providers_in_dict(
109
- url=True,
110
- working=True,
111
- stream=False,
112
- context=False,
113
- gpt35=False,
114
- gpt4=False,
115
- selenium=False,
116
- ):
117
- from webscout.g4f import GPT4FREE
118
- import g4f.Provider.selenium as selenium_based
119
-
120
- selenium_based_providers: list = dir(selenium_based)
121
- hunted_providers = []
122
- required_attrs = (
123
- "url",
124
- "working",
125
- "supports_gpt_35_turbo",
126
- "supports_gpt_4",
127
- "supports_stream",
128
- "supports_message_history",
129
- )
130
-
131
- def sanitize_provider(provider: object):
132
- for attr in required_attrs:
133
- if not hasattr(provider, attr):
134
- setattr(provider, attr, False)
135
-
136
- return provider
137
-
138
- for provider_name, provider_class in g4f.Provider.__map__.items():
139
- provider = sanitize_provider(provider_class)
140
- provider_meta = dict(name=provider_name)
141
- if url:
142
- provider_meta["url"] = provider.url
143
- if working:
144
- provider_meta["working"] = provider.working
145
- if stream:
146
- provider_meta["stream"] = provider.supports_stream
147
- if context:
148
- provider_meta["context"] = provider.supports_message_history
149
- if gpt35:
150
- provider_meta["gpt35_turbo"] = provider.supports_gpt_35_turbo
151
- if gpt4:
152
- provider_meta["gpt4"] = provider.supports_gpt_4
153
- if selenium:
154
- try:
155
- selenium_based_providers.index(provider_meta["name"])
156
- value = True
157
- except ValueError:
158
- value = False
159
- provider_meta["non_selenium"] = value
160
-
161
- hunted_providers.append(provider_meta)
162
-
163
- return hunted_providers
164
-
165
- @staticmethod
166
- def stream_output(
167
- iterable: Iterator,
168
- title: str = "",
169
- is_markdown: bool = True,
170
- style: object = Style(),
171
- transient: bool = False,
172
- title_generator: object = None,
173
- title_generator_params: dict = {},
174
- code_theme: str = "monokai",
175
- vertical_overflow: str = "ellipsis",
176
- ) -> None:
177
- """Stdout streaming response
178
-
179
- Args:
180
- iterable (Iterator): Iterator containing contents to be stdout
181
- title (str, optional): Content title. Defaults to ''.
182
- is_markdown (bool, optional): Flag for markdown content. Defaults to True.
183
- style (object, optional): `rich.style` instance. Defaults to Style().
184
- transient (bool, optional): Flag for transient. Defaults to False.
185
- title_generator (object, optional): Function for generating title. Defaults to None.
186
- title_generator_params (dict, optional): Kwargs for `title_generator` function. Defaults to {}.
187
- code_theme (str, optional): Theme for styling codes. Defaults to `monokai`
188
- vertical_overflow (str, optional): Vertical overflow behaviour on content display. Defaultss to ellipsis.
189
- """
190
- render_this = ""
191
- with Live(
192
- render_this,
193
- transient=transient,
194
- refresh_per_second=8,
195
- vertical_overflow=vertical_overflow,
196
- ) as live:
197
- for entry in iterable:
198
- render_this += entry
199
- live.update(
200
- Panel(
201
- (
202
- Markdown(entry, code_theme=code_theme)
203
- if is_markdown
204
- else entry
205
- ),
206
- title=title,
207
- style=style,
208
- )
209
- )
210
- if title_generator:
211
- title = title_generator(**title_generator_params)
212
- live.update(
213
- Panel(
214
- Markdown(entry, code_theme=code_theme) if is_markdown else entry,
215
- title=title,
216
- style=style,
217
- )
218
- )
219
-
220
- @staticmethod
221
- def clear_history_file(file_path, is_true):
222
- """When --new flag is True"""
223
- if is_true and os.path.isfile(file_path):
224
- try:
225
- os.remove(file_path)
226
- except Exception as e:
227
- logging.error(
228
- f"Failed to clear previous chat history - {this.getExc(e)}"
229
- )
230
-
231
- @staticmethod
232
- def handle_exception(func):
233
- """Safely handles cli-based exceptions and exit status-codes"""
234
-
235
- @wraps(func)
236
- def decorator(*args, **kwargs):
237
- try:
238
- exit_status = func(*args, **kwargs)
239
- except Exception as e:
240
- exit_status = False
241
- logging.error(this.getExc(e))
242
- finally:
243
- sys.exit(0 if exit_status not in (False, "") else 1)
244
-
245
- return decorator
246
-
247
-
248
- class busy_bar:
249
- querying = None
250
- __spinner = (
251
- (),
252
- ("-", "\\", "|", "/"),
253
- (
254
- "█■■■■",
255
- "■█■■■",
256
- "■■█■■",
257
- "■■■█■",
258
- "■■■■█",
259
- ),
260
- ("⣾ ", "⣽ ", "⣻ ", "⢿ ", "⡿ ", "⣟ ", "⣯ ", "⣷ "),
261
- )
262
- spin_index = 0
263
- sleep_time = 0.1
264
-
265
- @classmethod
266
- def __action(
267
- cls,
268
- ):
269
- while cls.querying:
270
- for spin in cls.__spinner[cls.spin_index]:
271
- print(" " + spin, end="\r", flush=True)
272
- if not cls.querying:
273
- break
274
- time.sleep(cls.sleep_time)
275
-
276
- @classmethod
277
- def start_spinning(
278
- cls,
279
- ):
280
- try:
281
- cls.querying = True
282
- t1 = thr(
283
- target=cls.__action,
284
- args=(),
285
- )
286
- t1.start()
287
- except Exception as e:
288
- cls.querying = False
289
- logging.debug(this.getExc(e))
290
- t1.join()
291
-
292
- @classmethod
293
- def stop_spinning(cls):
294
- """Stop displaying busy-bar"""
295
- if cls.querying:
296
- cls.querying = False
297
- time.sleep(cls.sleep_time)
298
-
299
- @classmethod
300
- def run(cls, help: str = "Exception", index: int = None, immediate: bool = False):
301
- """Handle function exceptions safely why showing busy bar
302
-
303
- Args:
304
- help (str, optional): Message to be shown incase of an exception. Defaults to ''.
305
- index (int, optional): Busy bars spin index. Defaults to `default`.
306
- immediate (bool, optional): Start the spinning immediately. Defaults to False.
307
- """
308
- if isinstance(index, int):
309
- cls.spin_index = index
310
-
311
- def decorator(func):
312
- @wraps(func) # Preserves function metadata
313
- def main(*args, **kwargs):
314
- try:
315
- if immediate:
316
- cls.start_spinning()
317
- return func(*args, **kwargs)
318
- except KeyboardInterrupt:
319
- cls.stop_spinning()
320
- return
321
- except EOFError:
322
- cls.querying = False
323
- sys.exit(logging.info("Stopping program"))
324
- except Exception as e:
325
- logging.error(f"{help} - {this.getExc(e)}")
326
- finally:
327
- cls.stop_spinning()
328
-
329
- return main
330
-
331
- return decorator
332
-
333
-
334
- class Main(cmd.Cmd):
335
- intro = (
336
- "Welcome to webai Chat in terminal. "
337
- "Type 'help' or 'h' for usage info.\n"
338
- )
339
-
340
- def __init__(
341
- self,
342
- max_tokens,
343
- temperature,
344
- top_k,
345
- top_p,
346
- model,
347
- auth,
348
- timeout,
349
- disable_conversation,
350
- filepath,
351
- update_file,
352
- intro,
353
- history_offset,
354
- awesome_prompt,
355
- proxy_path,
356
- provider,
357
- quiet=False,
358
- chat_completion=False,
359
- ignore_working=False,
360
- rawdog=False,
361
- internal_exec=False,
362
- confirm_script=False,
363
- interpreter="python",
364
- *args,
365
- **kwargs,
366
- ):
367
- super().__init__(*args, **kwargs)
368
- if proxy_path:
369
- with open(proxy_path) as fh:
370
- proxies = json.load(fh)
371
- else:
372
- proxies = {}
373
-
374
- try:
375
- getOr = lambda option, default: option if option else default
376
-
377
- if rawdog:
378
-
379
- self.RawDog = RawDog(
380
- quiet=quiet,
381
- internal_exec=internal_exec,
382
- confirm_script=confirm_script,
383
- interpreter=interpreter,
384
- prettify=True,
385
- )
386
- intro = self.RawDog.intro_prompt
387
- getpass.getuser = lambda: "RawDog"
388
-
389
- if provider == "g4fauto":
390
- from webscout.g4f import TestProviders
391
-
392
- test = TestProviders(quiet=quiet, timeout=timeout)
393
- g4fauto = test.best if ignore_working else test.auto
394
- if isinstance(g4fauto, str):
395
- provider = "g4fauto+" + g4fauto
396
- from webscout.g4f import GPT4FREE
397
-
398
- self.bot = GPT4FREE(
399
- provider=g4fauto,
400
- auth=auth,
401
- max_tokens=max_tokens,
402
- model=model,
403
- chat_completion=chat_completion,
404
- ignore_working=ignore_working,
405
- timeout=timeout,
406
- intro=intro,
407
- filepath=filepath,
408
- update_file=update_file,
409
- proxies=proxies,
410
- history_offset=history_offset,
411
- act=awesome_prompt,
412
- )
413
- else:
414
- raise Exception(
415
- "No working g4f provider found. "
416
- "Consider running 'webscout gpt4free test -y' first"
417
- )
418
- elif provider == "poe":
419
- assert auth, (
420
- "Path to poe.com.cookies.json file or 'p-b' cookie-value is required. "
421
- "Use the flag `--key` or `-k`"
422
- )
423
- from webscout import POE
424
-
425
- self.bot = POE(
426
- cookie=auth,
427
- model=getOr(model, "Assistant"),
428
- proxy=bool(proxies),
429
- timeout=timeout,
430
- filepath=filepath,
431
- update_file=update_file,
432
- intro=intro,
433
- act=awesome_prompt,
434
- )
435
- elif provider == "openai":
436
- assert auth, (
437
- "OpenAI's API-key is required. " "Use the flag `--key` or `-k`"
438
- )
439
- from webscout import OPENAI
440
-
441
- self.bot = OPENAI(
442
- api_key=auth,
443
- is_conversation=disable_conversation,
444
- max_tokens=max_tokens,
445
- temperature=temperature,
446
- presence_penalty=top_p,
447
- frequency_penalty=top_k,
448
- top_p=top_p,
449
- model=getOr(model, model),
450
- timeout=timeout,
451
- intro=intro,
452
- filepath=filepath,
453
- update_file=update_file,
454
- proxies=proxies,
455
- history_offset=history_offset,
456
- act=awesome_prompt,
457
- )
458
- if provider == "auto":
459
- from webscout import AUTO
460
-
461
- self.bot = AUTO(
462
- is_conversation=disable_conversation,
463
- max_tokens=max_tokens,
464
- timeout=timeout,
465
- intro=intro,
466
- filepath=filepath,
467
- update_file=update_file,
468
- proxies=proxies,
469
- history_offset=history_offset,
470
- act=awesome_prompt,
471
- )
472
- elif provider == "opengpt":
473
- from webscout import OPENGPT
474
-
475
- self.bot = OPENGPT(
476
- is_conversation=disable_conversation,
477
- max_tokens=max_tokens,
478
- timeout=timeout,
479
- intro=intro,
480
- filepath=filepath,
481
- update_file=update_file,
482
- proxies=proxies,
483
- history_offset=history_offset,
484
- act=awesome_prompt,
485
- assistant_id="bca37014-6f97-4f2b-8928-81ea8d478d88"
486
- )
487
- elif provider == "thinkany":
488
- from webscout import ThinkAnyAI
489
-
490
- self.bot = ThinkAnyAI(
491
- is_conversation=disable_conversation,
492
- max_tokens=max_tokens,
493
- timeout=timeout,
494
- intro=intro,
495
- filepath=filepath,
496
- update_file=update_file,
497
- proxies=proxies,
498
- history_offset=history_offset,
499
- act=awesome_prompt,
500
- )
501
- elif provider == "llama3":
502
- from webscout import LLAMA3
503
- self.bot = LLAMA3(
504
- is_conversation=disable_conversation,
505
- max_tokens=max_tokens,
506
- timeout=timeout,
507
- intro=intro,
508
- filepath=filepath,
509
- update_file=update_file,
510
- proxies=proxies,
511
- history_offset=history_offset,
512
- act=awesome_prompt,
513
- model=getOr(model, "llama3-8b"),
514
- )
515
- elif provider == "berlin4h":
516
- from webscout import Berlin4h
517
-
518
- self.bot = Berlin4h(
519
- is_conversation=disable_conversation,
520
- max_tokens=max_tokens,
521
- timeout=timeout,
522
- intro=intro,
523
- filepath=filepath,
524
- update_file=update_file,
525
- proxies=proxies,
526
- history_offset=history_offset,
527
- act=awesome_prompt,
528
- )
529
- elif provider == "yepchat":
530
- from webscout import YEPCHAT
531
-
532
- self.bot = YEPCHAT(
533
- is_conversation=disable_conversation,
534
- max_tokens=max_tokens,
535
- temperature=temperature,
536
- presence_penalty=top_p,
537
- frequency_penalty=top_k,
538
- top_p=top_p,
539
- model=getOr(model, "Mixtral-8x7B-Instruct-v0.1"),
540
- timeout=timeout,
541
- intro=intro,
542
- filepath=filepath,
543
- update_file=update_file,
544
- proxies=proxies,
545
- history_offset=history_offset,
546
- act=awesome_prompt,
547
- )
548
- elif provider == "groq":
549
- assert auth, (
550
- "GROQ's API-key is required. " "Use the flag `--key` or `-k`"
551
- )
552
- from webscout import GROQ
553
-
554
-
555
- self.bot = GROQ(
556
- api_key=auth,
557
- is_conversation=disable_conversation,
558
- max_tokens=max_tokens,
559
- temperature=temperature,
560
- presence_penalty=top_p,
561
- frequency_penalty=top_k,
562
- top_p=top_p,
563
- model=getOr(model, "mixtral-8x7b-32768"),
564
- timeout=timeout,
565
- intro=intro,
566
- filepath=filepath,
567
- update_file=update_file,
568
- proxies=proxies,
569
- history_offset=history_offset,
570
- act=awesome_prompt,
571
- )
572
- elif provider == "cohere":
573
- assert auth, (
574
- "Cohere's API-key is required. Use the flag `--key` or `-k`"
575
- )
576
- from webscout import Cohere
577
- self.bot = Cohere(
578
- api_key=auth,
579
- is_conversation=disable_conversation,
580
- max_tokens=max_tokens,
581
- temperature=temperature,
582
- top_k=top_k,
583
- top_p=top_p,
584
- model=getOr(model, "command-r-plus"),
585
- timeout=timeout,
586
- intro=intro,
587
- filepath=filepath,
588
- update_file=update_file,
589
- proxies=proxies,
590
- history_offset=history_offset,
591
- act=awesome_prompt,
592
- )
593
- elif provider == "reka":
594
- from webscout import REKA
595
-
596
- self.bot = REKA(
597
- api_key=auth,
598
- is_conversation=disable_conversation,
599
- max_tokens=max_tokens,
600
- timeout=timeout,
601
- intro=intro,
602
- filepath=filepath,
603
- update_file=update_file,
604
- proxies=proxies,
605
- history_offset=history_offset,
606
- act=awesome_prompt,
607
- model=getOr(model, "reka-core"),
608
- # quiet=quiet,
609
- )
610
- elif provider == "deepseek":
611
- from webscout import DeepSeek
612
-
613
- self.bot = DeepSeek(
614
- api_key=auth,
615
- is_conversation=disable_conversation,
616
- max_tokens=max_tokens,
617
- timeout=timeout,
618
- intro=intro,
619
- filepath=filepath,
620
- update_file=update_file,
621
- proxies=proxies,
622
- history_offset=history_offset,
623
- act=awesome_prompt,
624
- model=getOr(model, "deepseek_chat"),
625
- # quiet=quiet,
626
- )
627
- elif provider == "koboldai":
628
- from webscout import KOBOLDAI
629
-
630
- self.bot = KOBOLDAI(
631
- is_conversation=disable_conversation,
632
- max_tokens=max_tokens,
633
- temperature=temperature,
634
- top_p=top_p,
635
- timeout=timeout,
636
- intro=intro,
637
- filepath=filepath,
638
- update_file=update_file,
639
- proxies=proxies,
640
- history_offset=history_offset,
641
- act=awesome_prompt,
642
- )
643
- elif provider == "deepinfra":
644
- from webscout import DeepInfra
645
-
646
- self.bot = DeepInfra(
647
- is_conversation=disable_conversation,
648
- max_tokens=max_tokens,
649
- timeout=timeout,
650
- intro=intro,
651
- filepath=filepath,
652
- update_file=update_file,
653
- proxies=proxies,
654
- model=getOr(model, "Qwen/Qwen2-72B-Instruct"),
655
- history_offset=history_offset,
656
- act=awesome_prompt,
657
- )
658
- elif provider == "xjai":
659
- from webscout import Xjai
660
-
661
- self.bot = Xjai(
662
- is_conversation=disable_conversation,
663
- max_tokens=max_tokens,
664
- temperature=temperature,
665
- top_p=top_p,
666
- timeout=timeout,
667
- intro=intro,
668
- filepath=filepath,
669
- update_file=update_file,
670
- proxies=proxies,
671
- history_offset=history_offset,
672
- act=awesome_prompt,
673
- )
674
-
675
- elif provider == "vtlchat":
676
- from webscout import VTLchat
677
-
678
- self.bot = VTLchat(
679
- is_conversation=disable_conversation,
680
- max_tokens=max_tokens,
681
- temperature=temperature,
682
- top_p=top_p,
683
- timeout=timeout,
684
- intro=intro,
685
- filepath=filepath,
686
- update_file=update_file,
687
- proxies=proxies,
688
- history_offset=history_offset,
689
- act=awesome_prompt,
690
- )
691
- elif provider == "gemini":
692
- from webscout import GEMINI
693
-
694
- assert auth, (
695
- "Path to gemini.google.com.cookies.json file is required. "
696
- "Use the flag `--key` or `-k`"
697
- )
698
- self.bot = GEMINI(
699
- cookie_file=auth,
700
- proxy=proxies,
701
- timeout=timeout,
702
- )
703
-
704
- elif provider == "phind":
705
- from webscout import PhindSearch
706
-
707
- self.bot = PhindSearch(
708
- is_conversation=disable_conversation,
709
- max_tokens=max_tokens,
710
- timeout=timeout,
711
- intro=intro,
712
- filepath=filepath,
713
- update_file=update_file,
714
- proxies=proxies,
715
- history_offset=history_offset,
716
- act=awesome_prompt,
717
- model=getOr(model, "Phind Model"),
718
- quiet=quiet,
719
- )
720
- elif provider == "andi":
721
- from webscout import AndiSearch
722
-
723
- self.bot = AndiSearch(
724
- is_conversation=disable_conversation,
725
- max_tokens=max_tokens,
726
- timeout=timeout,
727
- intro=intro,
728
- filepath=filepath,
729
- update_file=update_file,
730
- proxies=proxies,
731
- history_offset=history_offset,
732
- act=awesome_prompt,
733
- )
734
- elif provider == "blackboxai":
735
-
736
- from webscout import BLACKBOXAI
737
-
738
- self.bot = BLACKBOXAI(
739
- is_conversation=disable_conversation,
740
- max_tokens=max_tokens,
741
- timeout=timeout,
742
- intro=intro,
743
- filepath=filepath,
744
- update_file=update_file,
745
- proxies=proxies,
746
- history_offset=history_offset,
747
- act=awesome_prompt,
748
- )
749
- elif provider == "you":
750
-
751
- from webscout import YouChat
752
-
753
- self.bot = YouChat(
754
- is_conversation=disable_conversation,
755
- max_tokens=max_tokens,
756
- timeout=timeout,
757
- intro=intro,
758
- filepath=filepath,
759
- update_file=update_file,
760
- proxies=proxies,
761
- history_offset=history_offset,
762
- act=awesome_prompt,
763
- )
764
-
765
- elif provider in webscout.gpt4free_providers:
766
- from webscout.g4f import GPT4FREE
767
-
768
- self.bot = GPT4FREE(
769
- provider=provider,
770
- is_conversation=disable_conversation,
771
- auth=auth,
772
- max_tokens=max_tokens,
773
- model=model,
774
- chat_completion=chat_completion,
775
- ignore_working=ignore_working,
776
- timeout=timeout,
777
- intro=intro,
778
- filepath=filepath,
779
- update_file=update_file,
780
- proxies=proxies,
781
- history_offset=history_offset,
782
- act=awesome_prompt,
783
- )
784
-
785
-
786
- elif provider == "perplexity":
787
- from webscout import Perplexity
788
-
789
- self.bot = Perplexity(
790
- is_conversation=disable_conversation,
791
- max_tokens=max_tokens,
792
- timeout=timeout,
793
- intro=intro,
794
- filepath=filepath,
795
- update_file=update_file,
796
- proxies=proxies,
797
- history_offset=history_offset,
798
- act=awesome_prompt,
799
- quiet=quiet,
800
- )
801
- elif provider == "ollama":
802
- from webscout import OLLAMA
803
-
804
- self.bot = OLLAMA(
805
- is_conversation=disable_conversation,
806
- max_tokens=max_tokens,
807
- timeout=timeout,
808
- intro=intro,
809
- filepath=filepath,
810
- update_file=update_file,
811
- proxies=proxies,
812
- history_offset=history_offset,
813
- act=awesome_prompt,
814
- model=getOr(model, "qwen2:0.5b")
815
- )
816
- else:
817
- raise NotImplementedError(
818
- f"The provider `{provider}` is not yet implemented."
819
- )
820
-
821
- except Exception as e:
822
- logging.error(this.getExc(e))
823
- click.secho("Quitting", fg="red")
824
- sys.exit(1)
825
- self.prettify = True
826
- self.color = "cyan"
827
- self.code_theme = "monokai"
828
- self.quiet = quiet
829
- self.vertical_overflow = "ellipsis"
830
- self.disable_stream = False
831
- self.provider = provider
832
- self.disable_coloring = False
833
- self.internal_exec = internal_exec
834
- self.confirm_script = confirm_script
835
- self.interpreter = interpreter
836
- self.rawdog = rawdog
837
- self.read_aloud = False
838
- self.read_aloud_voice = "Brian"
839
- self.path_to_last_response_audio = None
840
- self.__init_time = time.time()
841
- self.__start_time = time.time()
842
- self.__end_time = time.time()
843
-
844
- @property
845
- def get_provider(self):
846
- if self.provider == "auto" and self.bot.provider_name is not None:
847
- return self.bot.provider_name
848
- else:
849
- return self.provider
850
- @property
851
- def prompt(self):
852
- current_time = datetime.datetime.now().strftime("%H:%M:%S")
853
-
854
- def find_range(start, end, hms: bool = False):
855
- in_seconds = round(end - start, 1)
856
- return (
857
- str(datetime.timedelta(seconds=in_seconds)).split(".")[0].zfill(8)
858
- if hms
859
- else in_seconds
860
- )
861
- if not self.disable_coloring:
862
- cmd_prompt = (
863
- f"╭─[`{Fore.GREEN}{getpass.getuser().capitalize()}@webai]`"
864
- f"(`{Fore.YELLOW}{self.get_provider})`"
865
- f"~[`{Fore.LIGHTWHITE_EX}⏰{Fore.MAGENTA}{current_time}-`"
866
- f"{Fore.LIGHTWHITE_EX}💻{Fore.BLUE}{find_range(self.__init_time, time.time(), True)}-`"
867
- f"{Fore.LIGHTWHITE_EX}⚡️{Fore.RED}{find_range(self.__start_time, self.__end_time)}s]`"
868
- f"\n╰─>"
869
- )
870
- whitelist = ["[", "]", "~", "-", "(", ")"]
871
- for character in whitelist:
872
- cmd_prompt = cmd_prompt.replace(character + "`", Fore.RESET + character)
873
- return cmd_prompt
874
-
875
- else:
876
- return (
877
- f"╭─[{getpass.getuser().capitalize()}@webscout]({self.get_provider})"
878
- f"~[⏰{current_time}"
879
- f"-💻{find_range(self.__init_time, time.time(), True)}"
880
- f"-⚡️{find_range(self.__start_time, self.__end_time)}s]"
881
- f"~[⏰{current_time}"
882
- f"-💻{find_range(self.__init_time, time.time(), True)}"
883
- f"-⚡️{find_range(self.__start_time, self.__end_time)}s]"
884
- "\n╰─>"
885
- )
886
-
887
- def output_bond(
888
- self,
889
- title: str,
890
- text: str,
891
- color: str = "cyan",
892
- frame: bool = True,
893
- is_json: bool = False,
894
- ):
895
- """Print prettified output
896
-
897
- Args:
898
- title (str): Title
899
- text (str): Info to be printed
900
- color (str, optional): Output color. Defaults to "cyan".
901
- frame (bool, optional): Add frame. Defaults to True.
902
- """
903
- if is_json:
904
- text = f"""
905
- ```json
906
- {json.dumps(text,indent=4)}
907
- ```
908
- """
909
- rich.print(
910
- Panel(
911
- Markdown(text, code_theme=self.code_theme),
912
- title=title.title(),
913
- style=Style(
914
- color=color,
915
- frame=frame,
916
- ),
917
- ),
918
- )
919
- if is_json and click.confirm("Do you wish to save this"):
920
- default_path = title + ".json"
921
- save_to = click.prompt(
922
- "Enter path to save to", default=default_path, type=click.STRING
923
- )
924
- with open(save_to, "a") as fh:
925
- json.dump(text, fh, indent=4)
926
- click.secho(f"Successfuly saved to `{save_to}`", fg="green")
927
-
928
- def do_h(self, line):
929
- """Show help info in tabular form"""
930
- table = Table(
931
- title="Help info",
932
- show_lines=True,
933
- )
934
- table.add_column("No.", style="white", justify="center")
935
- table.add_column("Command", style="yellow", justify="left")
936
- table.add_column("Function", style="cyan")
937
- command_methods = [
938
- getattr(self, method)
939
- for method in dir(self)
940
- if callable(getattr(self, method)) and method.startswith("do_")
941
- ]
942
- command_methods.append(self.default)
943
- command_methods.reverse()
944
- for no, method in enumerate(command_methods):
945
- table.add_row(
946
- str(no + 1),
947
- method.__name__[3:] if not method == self.default else method.__name__,
948
- method.__doc__,
949
- )
950
- Console().print(table)
951
-
952
- @busy_bar.run("Settings saved")
953
- def do_settings(self, line):
954
- """Configure settings"""
955
- self.prettify = click.confirm(
956
- "\nPrettify markdown response", default=self.prettify
957
- )
958
- busy_bar.spin_index = click.prompt(
959
- "Spin bar index [0: None, 1:/, 2:■█■■■, 3:⣻]",
960
- default=busy_bar.spin_index,
961
- type=click.IntRange(0, 3),
962
- )
963
- self.color = click.prompt(
964
- "Response stdout font color", default=self.color or "white"
965
- )
966
- self.code_theme = Prompt.ask(
967
- "Enter code_theme", choices=this.rich_code_themes, default=self.code_theme
968
- )
969
- self.vertical_overflow = Prompt.ask(
970
- "\nVertical overflow behaviour",
971
- choices=["ellipsis", "visible", "crop"],
972
- default=self.vertical_overflow,
973
- )
974
- self.bot.max_tokens_to_sample = click.prompt(
975
- "\nMaximum tokens to sample",
976
- type=click.INT,
977
- default=self.bot.max_tokens_to_sample,
978
- )
979
- self.bot.temperature = click.prompt(
980
- "Temperature", type=click.FLOAT, default=self.bot.temperature
981
- )
982
- self.bot.top_k = click.prompt(
983
- "Chance of topic being repeated, top_k",
984
- type=click.FLOAT,
985
- default=self.bot.top_k,
986
- )
987
- self.bot.top_p = click.prompt(
988
- "Sampling threshold during inference time, top_p",
989
- type=click.FLOAT,
990
- default=self.bot.top_p,
991
- )
992
- self.bot.model = click.prompt(
993
- "Model name", type=click.STRING, default=self.bot.model
994
- )
995
-
996
- @busy_bar.run(help="System error")
997
- def do_copy_this(self, line):
998
- """Copy last response
999
- Usage:
1000
- copy_this:
1001
- text-copied = {whole last-response}
1002
- copy_this code:
1003
- text-copied = {All codes in last response}
1004
- """
1005
- if self.bot.last_response:
1006
- global last_response
1007
- last_response = self.bot.get_message(self.bot.last_response)
1008
- if not "code" in line:
1009
- clipman.set(last_response)
1010
- click.secho("Last response copied successfully!", fg="cyan")
1011
- return
1012
-
1013
- # Copies just code
1014
- sanitized_codes = []
1015
- code_blocks = re.findall(r"```.*?```", last_response, re.DOTALL)
1016
- for code_block in code_blocks:
1017
- new_code_block = re.sub(
1018
- "^```.*$", "", code_block.strip(), flags=re.MULTILINE
1019
- )
1020
- if bool(new_code_block.strip()):
1021
- sanitized_codes.append(new_code_block)
1022
- if sanitized_codes:
1023
- if len(sanitized_codes) > 1:
1024
- if not click.confirm("Do you wish to copy all codes"):
1025
- for index, code in enumerate(sanitized_codes):
1026
- rich.print(
1027
- Panel(
1028
- Markdown(
1029
- code_blocks[index], code_theme=self.code_theme
1030
- ),
1031
- title=f"Index : {index}",
1032
- title_align="left",
1033
- )
1034
- )
1035
-
1036
- clipman.set(
1037
- sanitized_codes[
1038
- click.prompt(
1039
- "Enter code index",
1040
- type=click.IntRange(0, len(sanitized_codes) - 1),
1041
- )
1042
- ]
1043
- )
1044
- click.secho("Code copied successfully", fg="cyan")
1045
- else:
1046
- clipman.set("\n\n".join(sanitized_codes))
1047
- click.secho(
1048
- f"All {len(sanitized_codes)} codes copied successfully!",
1049
- fg="cyan",
1050
- )
1051
- else:
1052
- clipman.set(sanitized_codes[0])
1053
- click.secho("Code copied successfully!", fg="cyan")
1054
- else:
1055
- click.secho("No code found in the last response!", fg="red")
1056
- else:
1057
- click.secho("Chat with AI first.", fg="yellow")
1058
-
1059
- @busy_bar.run()
1060
- def do_with_copied(self, line):
1061
- """Attach last copied text to the prompt
1062
- Usage:
1063
- from_copied:
1064
- prompt = {text-copied}
1065
- from_copied Debug this code:
1066
- prompt = Debug this code {newline} {text-copied}
1067
- """
1068
- issued_prompt = (
1069
- f"{line}\n{clipman.get()}" if bool(line.strip()) else clipman.get()
1070
- )
1071
- click.secho(issued_prompt, fg="yellow")
1072
- if click.confirm("Do you wish to proceed"):
1073
- self.default(issued_prompt)
1074
-
1075
- @busy_bar.run()
1076
- def do_code(self, line):
1077
- """Enhance prompt for code generation
1078
- usage :
1079
- code <Code description>
1080
- """
1081
- self.default(Optimizers.code(line))
1082
-
1083
- @busy_bar.run()
1084
- def do_shell(self, line):
1085
- """Enhance prompt for system command (shell) generation
1086
- Usage:
1087
- shell <Action to be accomplished>
1088
- """
1089
- self.default(Optimizers.shell_command(line))
1090
- if click.confirm("Do you wish to run the command(s) generated in your system"):
1091
- self.do_sys(self.bot.get_message(self.bot.last_response))
1092
-
1093
- @busy_bar.run("While changing directory")
1094
- def do_cd(self, line):
1095
- """Change directory
1096
- Usage :
1097
- cd <path-to-directory>
1098
- """
1099
- assert line, "File path is required"
1100
- os.chdir(line)
1101
-
1102
- def do_clear(self, line):
1103
- """Clear console"""
1104
- sys.stdout.write("\u001b[2J\u001b[H")
1105
- sys.stdout.flush()
1106
-
1107
- @busy_bar.run("While handling history")
1108
- def do_history(self, line):
1109
- """Show current conversation history"""
1110
- history = self.bot.conversation.chat_history
1111
- formatted_history = re.sub(
1112
- "\nLLM :",
1113
- "\n\n**LLM** :",
1114
- re.sub("\nUser :", "\n\n**User** :", history),
1115
- )
1116
- self.output_bond("Chat History", formatted_history, self.color)
1117
- if click.confirm("Do you wish to save this chat"):
1118
- save_to = click.prompt(
1119
- "Enter path/file-name", default=f"{self.provider}-chat.txt"
1120
- )
1121
- with open(save_to, "a") as fh:
1122
- fh.write(history)
1123
- click.secho(f"Conversation saved successfully to '{save_to}'", fg="cyan")
1124
-
1125
- @busy_bar.run("while resetting conversation")
1126
- def do_reset(self, line):
1127
- """Start new conversation thread"""
1128
- self.bot.conversation.chat_history = click.prompt(
1129
- "Introductory prompt", default=self.bot.conversation.intro
1130
- )
1131
- if hasattr(self.bot, "reset"):
1132
- self.bot.reset()
1133
- click.secho("Conversation reset successfully. New one created.", fg="cyan")
1134
-
1135
- @busy_bar.run("while loading conversation")
1136
- def do_load(self, line):
1137
- """Load conversation history from file"""
1138
- history_file = click.prompt("Enter path to history path", default=line)
1139
- if not os.path.isfile(history_file):
1140
- click.secho(f"Path `{history_file}` does not exist!", fg="red")
1141
- return
1142
- with open(history_file) as fh:
1143
- self.bot.conversation.chat_history = fh.read()
1144
- click.secho("Conversation loaded successfully.", fg="cyan")
1145
-
1146
- def do_last_response(self, line):
1147
- """Show whole last response in json format"""
1148
- self.output_bond(
1149
- "Last Response",
1150
- self.bot.last_response,
1151
- is_json=True,
1152
- )
1153
- @busy_bar.run(help="While rereading aloud", index=3, immediate=True)
1154
- def do_reread(self, line):
1155
- """Reread aloud last ai response"""
1156
- if not self.path_to_last_response_audio:
1157
- raise Exception("Path to last response audio is null")
1158
- Audio.play(self.path_to_last_response_audio)
1159
-
1160
- @busy_bar.run()
1161
- def do_exec(self, line):
1162
- """Exec python code in last response with RawDog"""
1163
- last_response = self.bot.get_message(self.bot.last_response)
1164
- assert last_response, "Last response is null"
1165
- assert "```python" in last_response, "Last response has no python code"
1166
- if self.rawdog:
1167
- self.RawDog.main(last_response)
1168
- else:
1169
- rawdog = RawDog(
1170
- quiet=self.quiet,
1171
- internal_exec=self.internal_exec,
1172
- confirm_script=self.confirm_script,
1173
- interpreter=self.interpreter,
1174
- prettify=self.prettify,
1175
- )
1176
- rawdog.main(last_response)
1177
-
1178
- @busy_bar.run()
1179
- def do_rawdog(self, line):
1180
- """Repeat executing last rawdog's python code"""
1181
- assert self.rawdog, "Session not in rawdog mode. Restart with --rawdog"
1182
- self.default(self.bot.get_message(self.bot.last_response))
1183
-
1184
- @busy_bar.run()
1185
- def default(self, line, exit_on_error: bool = False, normal_stdout: bool = False):
1186
- """Chat with LLM"""
1187
- if not bool(line):
1188
- return
1189
- if line.startswith("./"):
1190
- os.system(line[2:])
1191
-
1192
- elif self.rawdog:
1193
- self.__start_time = time.time()
1194
- busy_bar.start_spinning()
1195
- ai_response = self.bot.chat(line, stream=False)
1196
- busy_bar.stop_spinning()
1197
- is_feedback = self.RawDog.main(ai_response)
1198
- if is_feedback:
1199
- return self.default(is_feedback)
1200
- self.__end_time = time.time()
1201
-
1202
- else:
1203
- self.__start_time = time.time()
1204
- try:
1205
-
1206
- def generate_response():
1207
- # Ensure response is yielded
1208
- def for_stream():
1209
- return self.bot.chat(line, stream=True)
1210
-
1211
- def for_non_stream():
1212
- yield self.bot.chat(line, stream=False)
1213
-
1214
- return for_non_stream() if self.disable_stream else for_stream()
1215
-
1216
- busy_bar.start_spinning()
1217
- generated_response = generate_response()
1218
-
1219
- if normal_stdout or not self.prettify and not self.disable_stream:
1220
- cached_response: str = ""
1221
- if not normal_stdout:
1222
- busy_bar.stop_spinning()
1223
- for response in generated_response:
1224
- offset = len(cached_response)
1225
- print(response[offset:], end="")
1226
- cached_response = response
1227
- if not normal_stdout:
1228
- print("")
1229
- return
1230
-
1231
- if self.quiet:
1232
- busy_bar.stop_spinning()
1233
- console_ = Console()
1234
- with Live(
1235
- console=console_,
1236
- refresh_per_second=16,
1237
- vertical_overflow=self.vertical_overflow,
1238
- ) as live:
1239
- for response in generated_response:
1240
- live.update(
1241
- Markdown(response, code_theme=self.code_theme)
1242
- if self.prettify
1243
- else response
1244
- )
1245
- else:
1246
- busy_bar.stop_spinning()
1247
- this.stream_output(
1248
- generated_response,
1249
- title="Webscout",
1250
- is_markdown=self.prettify,
1251
- style=Style(
1252
- color=self.color,
1253
- ),
1254
- code_theme=self.code_theme,
1255
- vertical_overflow=self.vertical_overflow,
1256
- )
1257
- except (KeyboardInterrupt, EOFError):
1258
- busy_bar.stop_spinning()
1259
- print("")
1260
- return False # Exit cmd
1261
-
1262
- except Exception as e:
1263
- # logging.exception(e)
1264
- busy_bar.stop_spinning()
1265
- logging.error(this.getExc(e))
1266
- if exit_on_error:
1267
- sys.exit(1)
1268
-
1269
- else:
1270
- self.post_default()
1271
-
1272
- finally:
1273
- self.__end_time = time.time()
1274
- @busy_bar.run(help="While reading aloud", immediate=True, index=3)
1275
- def post_default(self):
1276
- """Actions to be taken after upon successfull complete response generation triggered by `default` function"""
1277
- last_text: str = self.bot.get_message(self.bot.last_response)
1278
- if self.read_aloud and last_text is not None:
1279
- # Talk back to user
1280
- self.path_to_last_response_audio = Audio.text_to_audio(
1281
- last_text, voice=self.read_aloud_voice, auto=True
1282
- )
1283
- Audio.play(self.path_to_last_response_audio)
1284
- def do_sys(self, line):
1285
- """Execute system commands
1286
- shortcut [./<command>]
1287
- Usage:
1288
- sys <System command>
1289
- or
1290
- ./<System command>
1291
- """
1292
- os.system(line)
1293
-
1294
- def do_exit(self, line):
1295
- """Quit this program"""
1296
- if click.confirm("Are you sure to exit"):
1297
- click.secho("Okay Goodbye!", fg="yellow")
1298
- return True
1299
-
1300
-
1301
- class EntryGroup:
1302
- """Entry commands"""
1303
-
1304
- # @staticmethod
1305
- @click.group()
1306
- @click.version_option(
1307
- webscout.__version__, "-v", "--version", package_name="webscout"
1308
- )
1309
- @click.help_option("-h", "--help")
1310
- def webai_():
1311
- pass
1312
-
1313
- @staticmethod
1314
- @webai_.group()
1315
- @click.help_option("-h", "--help")
1316
- def utils():
1317
- """Utility endpoint for webscout"""
1318
- pass
1319
-
1320
- @staticmethod
1321
- @webai_.group()
1322
- @click.help_option("-h", "--help")
1323
- def gpt4free():
1324
- """Discover gpt4free models, providers etc"""
1325
- pass
1326
-
1327
- @staticmethod
1328
- @webai_.group()
1329
- @click.help_option("-h", "--help")
1330
- def awesome():
1331
- """Perform CRUD operations on awesome-prompts"""
1332
- pass
1333
-
1334
-
1335
- import webscout
1336
- class Chatwebai:
1337
- """webai command"""
1338
-
1339
- @staticmethod
1340
- @click.command(context_settings=this.context_settings)
1341
- @click.option(
1342
- "-m",
1343
- "--model",
1344
- help="Model name for text-generation", # default="llama-2-13b-chat"
1345
- )
1346
- @click.option(
1347
- "-t",
1348
- "--temperature",
1349
- help="Charge of the generated text's randomness",
1350
- type=click.FloatRange(0, 1),
1351
- default=0.2,
1352
- )
1353
- @click.option(
1354
- "-mt",
1355
- "--max-tokens",
1356
- help="Maximum number of tokens to be generated upon completion",
1357
- type=click.INT,
1358
- default=600,
1359
- )
1360
- @click.option(
1361
- "-tp",
1362
- "--top-p",
1363
- help="Sampling threshold during inference time",
1364
- type=click.FLOAT,
1365
- default=0.999,
1366
- )
1367
- @click.option(
1368
- "-tk",
1369
- "--top-k",
1370
- help="Chance of topic being repeated",
1371
- type=click.FLOAT,
1372
- default=0,
1373
- )
1374
- @click.option(
1375
- "-k",
1376
- "--key",
1377
- help="LLM API access key or auth value or path to LLM with provider.",
1378
- )
1379
- @click.option(
1380
- "-ct",
1381
- "--code-theme",
1382
- help="Theme for displaying codes in response",
1383
- type=click.Choice(this.rich_code_themes),
1384
- default="monokai",
1385
- )
1386
- @click.option(
1387
- "-bi",
1388
- "--busy-bar-index",
1389
- help="Index of busy bar icon : [0: None, 1:/, 2:■█■■■, 3:⣻]",
1390
- type=click.IntRange(0, 3),
1391
- default=3,
1392
- )
1393
- @click.option("-fc", "--font-color", help="Stdout font color")
1394
- @click.option(
1395
- "-to", "--timeout", help="Http requesting timeout", type=click.INT, default=30
1396
- )
1397
- @click.argument("prompt", required=False)
1398
- @click.option(
1399
- "--prettify/--raw",
1400
- help="Flag for prettifying markdowned response",
1401
- default=True,
1402
- )
1403
- @click.option(
1404
- "-dc",
1405
- "--disable-conversation",
1406
- is_flag=True,
1407
- default=True, # is_conversation = True
1408
- help="Disable chatting conversationally (Stable)",
1409
- )
1410
- @click.option(
1411
- "-fp",
1412
- "--filepath",
1413
- type=click.Path(),
1414
- default=os.path.join(default_path, "chat-history.txt"),
1415
- help="Path to chat history - new will be created incase doesn't exist",
1416
- )
1417
- @click.option(
1418
- "--update-file/--retain-file",
1419
- help="Controls updating chat history in file",
1420
- default=True,
1421
- )
1422
- @click.option(
1423
- "-i",
1424
- "--intro",
1425
- help="Conversation introductory prompt",
1426
- )
1427
- @click.option(
1428
- "-ho",
1429
- "--history-offset",
1430
- help="Limit conversation history to this number of last texts",
1431
- type=click.IntRange(100, 16000),
1432
- default=10250,
1433
- )
1434
- @click.option(
1435
- "-ap",
1436
- "--awesome-prompt",
1437
- default="0",
1438
- callback=lambda ctx, param, value: (
1439
- int(value) if str(value).isdigit() else value
1440
- ),
1441
- help="Awesome prompt key or index. Alt. to intro",
1442
- )
1443
- @click.option(
1444
- "-pp",
1445
- "--proxy-path",
1446
- type=click.Path(exists=True),
1447
- help="Path to .json file containing proxies",
1448
- )
1449
- @click.option(
1450
- "-p",
1451
- "--provider",
1452
- type=click.Choice(available_providers),
1453
- default=this.default_provider,
1454
- help="Name of LLM provider.",
1455
- metavar=(
1456
- f"[{'|'.join(webscout.webai)}] etc, "
1457
- "run 'webscout gpt4free list providers -w' to "
1458
- "view more providers and 'webscout gpt4free test -y' "
1459
- "for advanced g4f providers test"
1460
- ),
1461
- )
1462
- @click.option(
1463
- "-vo",
1464
- "--vertical-overflow",
1465
- help="Vertical overflow behaviour on content display",
1466
- type=click.Choice(["visible", "crop", "ellipsis"]),
1467
- default="ellipsis",
1468
- )
1469
- @click.option(
1470
- "-w",
1471
- "--whole",
1472
- is_flag=True,
1473
- default=False,
1474
- help="Disable streaming response",
1475
- )
1476
- @click.option(
1477
- "-q",
1478
- "--quiet",
1479
- is_flag=True,
1480
- help="Flag for controlling response-framing and response verbosity",
1481
- default=False,
1482
- )
1483
- @click.option(
1484
- "-n",
1485
- "--new",
1486
- help="Overwrite the filepath contents",
1487
- is_flag=True,
1488
- )
1489
- @click.option(
1490
- "-wc",
1491
- "--with-copied",
1492
- is_flag=True,
1493
- help="Postfix prompt with last copied text",
1494
- )
1495
- @click.option(
1496
- "-nc", "--no-coloring", is_flag=True, help="Disable intro prompt font-coloring"
1497
- )
1498
- @click.option(
1499
- "-cc",
1500
- "--chat-completion",
1501
- is_flag=True,
1502
- help="Provide native context for gpt4free providers",
1503
- )
1504
- @click.option(
1505
- "-iw",
1506
- "--ignore-working",
1507
- is_flag=True,
1508
- help="Ignore working status of the provider",
1509
- )
1510
- @click.option(
1511
- "-rd",
1512
- "--rawdog",
1513
- is_flag=True,
1514
- help="Generate and auto-execute Python scripts - (experimental)",
1515
- )
1516
- @click.option(
1517
- "-ix",
1518
- "--internal-exec",
1519
- is_flag=True,
1520
- help="RawDog : Execute scripts with exec function instead of out-of-script interpreter",
1521
- )
1522
- @click.option(
1523
- "-cs",
1524
- "--confirm-script",
1525
- is_flag=True,
1526
- help="RawDog : Give consent to generated scripts prior to execution",
1527
- )
1528
- @click.option(
1529
- "-int",
1530
- "--interpreter",
1531
- default="python",
1532
- help="RawDog : Python's interpreter name",
1533
- )
1534
- @click.option(
1535
- "-ttm",
1536
- "--talk-to-me",
1537
- is_flag=True,
1538
- help="Audiolize responses upon complete generation",
1539
- )
1540
- @click.option(
1541
- "-ttmv",
1542
- "--talk-to-me-voice",
1543
- help="The voice to use for speech synthesis",
1544
- type=click.Choice(Audio.all_voices),
1545
- metavar="|".join(Audio.all_voices[:8]),
1546
- default="Brian",
1547
- )
1548
- @click.help_option("-h", "--help")
1549
- def webai(
1550
- model,
1551
- temperature,
1552
- max_tokens,
1553
- top_p,
1554
- top_k,
1555
- key,
1556
- code_theme,
1557
- busy_bar_index,
1558
- font_color,
1559
- timeout,
1560
- prompt,
1561
- prettify,
1562
- disable_conversation,
1563
- filepath,
1564
- update_file,
1565
- intro,
1566
- history_offset,
1567
- awesome_prompt,
1568
- proxy_path,
1569
- provider,
1570
- vertical_overflow,
1571
- whole,
1572
- quiet,
1573
- new,
1574
- with_copied,
1575
- no_coloring,
1576
- chat_completion,
1577
- ignore_working,
1578
- rawdog,
1579
- internal_exec,
1580
- confirm_script,
1581
- interpreter,
1582
- talk_to_me,
1583
- talk_to_me_voice,
1584
- ):
1585
- """Chat with AI webaily (Default)"""
1586
- this.clear_history_file(filepath, new)
1587
- bot = Main(
1588
- max_tokens,
1589
- temperature,
1590
- top_k,
1591
- top_p,
1592
- model,
1593
- key,
1594
- timeout,
1595
- disable_conversation,
1596
- filepath,
1597
- update_file,
1598
- intro,
1599
- history_offset,
1600
- awesome_prompt,
1601
- proxy_path,
1602
- provider,
1603
- quiet,
1604
- chat_completion,
1605
- ignore_working,
1606
- rawdog=rawdog,
1607
- internal_exec=internal_exec,
1608
- confirm_script=confirm_script,
1609
- interpreter=interpreter,
1610
- )
1611
- busy_bar.spin_index = busy_bar_index
1612
- bot.code_theme = code_theme
1613
- bot.color = font_color
1614
- bot.disable_coloring = no_coloring
1615
- bot.prettify = prettify
1616
- bot.vertical_overflow = vertical_overflow
1617
- bot.disable_stream = whole
1618
- bot.read_aloud = talk_to_me
1619
- bot.read_aloud_voice = talk_to_me_voice
1620
- if prompt:
1621
- if with_copied:
1622
- prompt = prompt + "\n" + clipman.get()
1623
- bot.default(prompt)
1624
- bot.cmdloop()
1625
-
1626
-
1627
- class ChatGenerate:
1628
- """Generate command"""
1629
-
1630
- @staticmethod
1631
- @click.command(context_settings=this.context_settings)
1632
- @click.option(
1633
- "-m",
1634
- "--model",
1635
- help="Model name for text-generation",
1636
- )
1637
- @click.option(
1638
- "-t",
1639
- "--temperature",
1640
- help="Charge of the generated text's randomness",
1641
- type=click.FloatRange(0, 1),
1642
- default=0.2,
1643
- )
1644
- @click.option(
1645
- "-mt",
1646
- "--max-tokens",
1647
- help="Maximum number of tokens to be generated upon completion",
1648
- type=click.INT,
1649
- default=600,
1650
- )
1651
- @click.option(
1652
- "-tp",
1653
- "--top-p",
1654
- help="Sampling threshold during inference time",
1655
- type=click.FLOAT,
1656
- default=0.999,
1657
- )
1658
- @click.option(
1659
- "-tk",
1660
- "--top-k",
1661
- help="Chance of topic being repeated",
1662
- type=click.FLOAT,
1663
- default=0,
1664
- )
1665
- @click.option(
1666
- "-k",
1667
- "--key",
1668
- help="LLM API access key or auth value or path to LLM with provider.",
1669
- )
1670
- @click.option(
1671
- "-ct",
1672
- "--code-theme",
1673
- help="Theme for displaying codes in response",
1674
- type=click.Choice(this.rich_code_themes),
1675
- default="monokai",
1676
- )
1677
- @click.option(
1678
- "-bi",
1679
- "--busy-bar-index",
1680
- help="Index of busy bar icon : [0: None, 1:/, 2:■█■■■, 3:⣻]",
1681
- type=click.IntRange(0, 3),
1682
- default=3,
1683
- )
1684
- @click.option(
1685
- "-fc",
1686
- "--font-color",
1687
- help="Stdout font color",
1688
- )
1689
- @click.option(
1690
- "-to", "--timeout", help="Http requesting timeout", type=click.INT, default=30
1691
- )
1692
- @click.argument("prompt", required=False)
1693
- @click.option(
1694
- "--prettify/--raw",
1695
- help="Flag for prettifying markdowned response",
1696
- default=True,
1697
- )
1698
- @click.option(
1699
- "-w",
1700
- "--whole",
1701
- is_flag=True,
1702
- default=False,
1703
- help="Disable streaming response",
1704
- )
1705
- @click.option(
1706
- "-c",
1707
- "--code",
1708
- is_flag=True,
1709
- default=False,
1710
- help="Optimize prompt for code generation",
1711
- )
1712
- @click.option(
1713
- "-s",
1714
- "--shell",
1715
- is_flag=True,
1716
- default=False,
1717
- help="Optimize prompt for shell command generation",
1718
- )
1719
- @click.option(
1720
- "-dc",
1721
- "--disable-conversation",
1722
- is_flag=True,
1723
- default=True, # is_conversation = True
1724
- help="Disable chatting conversationally (Stable)",
1725
- )
1726
- @click.option(
1727
- "-fp",
1728
- "--filepath",
1729
- type=click.Path(),
1730
- default=os.path.join(default_path, "chat-history.txt"),
1731
- help="Path to chat history - new will be created incase doesn't exist",
1732
- )
1733
- @click.option(
1734
- "--update-file/--retain-file",
1735
- help="Controls updating chat history in file",
1736
- default=True,
1737
- )
1738
- @click.option(
1739
- "-i",
1740
- "--intro",
1741
- help="Conversation introductory prompt",
1742
- )
1743
- @click.option(
1744
- "-ho",
1745
- "--history-offset",
1746
- help="Limit conversation history to this number of last texts",
1747
- type=click.IntRange(100, 16000),
1748
- default=10250,
1749
- )
1750
- @click.option(
1751
- "-ap",
1752
- "--awesome-prompt",
1753
- default="0",
1754
- callback=lambda ctx, param, value: (
1755
- int(value) if str(value).isdigit() else value
1756
- ),
1757
- help="Awesome prompt key or index. Alt. to intro",
1758
- )
1759
- @click.option(
1760
- "-pp",
1761
- "--proxy-path",
1762
- type=click.Path(exists=True),
1763
- help="Path to .json file containing proxies",
1764
- )
1765
- @click.option(
1766
- "-p",
1767
- "--provider",
1768
- type=click.Choice(webscout.available_providers),
1769
- default=this.default_provider,
1770
- help="Name of LLM provider.",
1771
- metavar=(
1772
- f"[{'|'.join(webscout.webai)}] etc, "
1773
- "run 'webscout gpt4free list providers -w' to "
1774
- "view more providers and 'webscout gpt4free test -y' "
1775
- "for advanced g4f providers test"
1776
- ),
1777
- )
1778
- @click.option(
1779
- "-vo",
1780
- "--vertical-overflow",
1781
- help="Vertical overflow behaviour on content display",
1782
- type=click.Choice(["visible", "crop", "ellipsis"]),
1783
- default="ellipsis",
1784
- )
1785
- @click.option(
1786
- "-q",
1787
- "--quiet",
1788
- is_flag=True,
1789
- help="Flag for controlling response-framing and response verbosity",
1790
- default=False,
1791
- )
1792
- @click.option(
1793
- "-n",
1794
- "--new",
1795
- help="Override the filepath contents",
1796
- is_flag=True,
1797
- )
1798
- @click.option(
1799
- "-wc",
1800
- "--with-copied",
1801
- is_flag=True,
1802
- help="Postfix prompt with last copied text",
1803
- )
1804
- @click.option(
1805
- "-iw",
1806
- "--ignore-working",
1807
- is_flag=True,
1808
- help="Ignore working status of the provider",
1809
- )
1810
- @click.option(
1811
- "-rd",
1812
- "--rawdog",
1813
- is_flag=True,
1814
- help="Generate and auto-execute Python scripts - (experimental)",
1815
- )
1816
- @click.option(
1817
- "-ix",
1818
- "--internal-exec",
1819
- is_flag=True,
1820
- help="RawDog : Execute scripts with exec function instead of out-of-script interpreter",
1821
- )
1822
- @click.option(
1823
- "-cs",
1824
- "--confirm-script",
1825
- is_flag=True,
1826
- help="RawDog : Give consent to generated scripts prior to execution",
1827
- )
1828
- @click.option(
1829
- "-int",
1830
- "--interpreter",
1831
- default="python",
1832
- help="RawDog : Python's interpreter name",
1833
- )
1834
- @click.option(
1835
- "-ttm",
1836
- "--talk-to-me",
1837
- is_flag=True,
1838
- help="Audiolize responses upon complete generation",
1839
- )
1840
- @click.option(
1841
- "-ttmv",
1842
- "--talk-to-me-voice",
1843
- help="The voice to use for speech synthesis",
1844
- type=click.Choice(Audio.all_voices),
1845
- metavar="|".join(Audio.all_voices[:8]),
1846
- default="Brian",
1847
- )
1848
- @click.help_option("-h", "--help")
1849
- def generate(
1850
- model,
1851
- temperature,
1852
- max_tokens,
1853
- top_p,
1854
- top_k,
1855
- key,
1856
- code_theme,
1857
- busy_bar_index,
1858
- font_color,
1859
- timeout,
1860
- prompt,
1861
- prettify,
1862
- whole,
1863
- code,
1864
- shell,
1865
- disable_conversation,
1866
- filepath,
1867
- update_file,
1868
- intro,
1869
- history_offset,
1870
- awesome_prompt,
1871
- proxy_path,
1872
- provider,
1873
- vertical_overflow,
1874
- quiet,
1875
- new,
1876
- with_copied,
1877
- ignore_working,
1878
- rawdog,
1879
- internal_exec,
1880
- confirm_script,
1881
- interpreter,
1882
- talk_to_me,
1883
- talk_to_me_voice,
1884
- ):
1885
- """Generate a quick response with AI"""
1886
- this.clear_history_file(filepath, new)
1887
- bot = Main(
1888
- max_tokens,
1889
- temperature,
1890
- top_k,
1891
- top_p,
1892
- model,
1893
- key,
1894
- timeout,
1895
- disable_conversation,
1896
- filepath,
1897
- update_file,
1898
- intro,
1899
- history_offset,
1900
- awesome_prompt,
1901
- proxy_path,
1902
- provider,
1903
- quiet,
1904
- ignore_working=ignore_working,
1905
- rawdog=rawdog,
1906
- internal_exec=internal_exec,
1907
- confirm_script=confirm_script,
1908
- interpreter=interpreter,
1909
- )
1910
- prompt = prompt if prompt else ""
1911
- copied_placeholder = "{{copied}}"
1912
- stream_placeholder = "{{stream}}"
1913
-
1914
- if with_copied or copied_placeholder in prompt:
1915
- last_copied_text = clipman.get()
1916
- assert last_copied_text, "No copied text found, issue prompt"
1917
-
1918
- if copied_placeholder in prompt:
1919
- prompt = prompt.replace(copied_placeholder, last_copied_text)
1920
-
1921
- else:
1922
- sep = "\n" if prompt else ""
1923
- prompt = prompt + sep + last_copied_text
1924
-
1925
- if not prompt and sys.stdin.isatty(): # No prompt issued and no piped input
1926
- help_info = (
1927
- "Usage: webscout generate [OPTIONS] PROMPT\n"
1928
- "Try 'webscout generate --help' for help.\n"
1929
- "Error: Missing argument 'PROMPT'."
1930
- )
1931
- click.secho(
1932
- help_info
1933
- ) # Let's try to mimic the click's missing argument help info
1934
- sys.exit(1)
1935
-
1936
- if not sys.stdin.isatty(): # Piped input detected - True
1937
- # Let's try to read piped input
1938
- stream_text = click.get_text_stream("stdin").read()
1939
- if stream_placeholder in prompt:
1940
- prompt = prompt.replace(stream_placeholder, stream_text)
1941
- else:
1942
- prompt = prompt + "\n" + stream_text if prompt else stream_text
1943
-
1944
- assert stream_placeholder not in prompt, (
1945
- "No piped input detected ~ " + stream_placeholder
1946
- )
1947
- assert copied_placeholder not in prompt, (
1948
- "No copied text found ~ " + copied_placeholder
1949
- )
1950
-
1951
- prompt = Optimizers.code(prompt) if code else prompt
1952
- prompt = Optimizers.shell_command(prompt) if shell else prompt
1953
- busy_bar.spin_index = (
1954
- 0 if any([quiet, sys.stdout.isatty() == False]) else busy_bar_index
1955
- )
1956
- bot.code_theme = code_theme
1957
- bot.color = font_color
1958
- bot.prettify = prettify
1959
- bot.vertical_overflow = vertical_overflow
1960
- bot.disable_stream = whole
1961
- bot.read_aloud = talk_to_me
1962
- bot.read_aloud_voice = talk_to_me_voice
1963
- bot.default(prompt, True, normal_stdout=(sys.stdout.isatty() == False))
1964
-
1965
-
1966
- class Awesome:
1967
- """Awesome commands"""
1968
-
1969
- @staticmethod
1970
- @click.command(context_settings=this.context_settings)
1971
- @click.option(
1972
- "-r",
1973
- "--remote",
1974
- help="Remote source to update from",
1975
- default=AwesomePrompts.awesome_prompt_url,
1976
- )
1977
- @click.option(
1978
- "-o",
1979
- "--output",
1980
- help="Path to save the prompts",
1981
- default=AwesomePrompts.awesome_prompt_path,
1982
- )
1983
- @click.option(
1984
- "-n", "--new", is_flag=True, help="Override the existing contents in path"
1985
- )
1986
- @click.help_option("-h", "--help")
1987
- @this.handle_exception
1988
- def update(remote, output, new):
1989
- """Update awesome-prompts from remote source."""
1990
- AwesomePrompts.awesome_prompt_url = remote
1991
- AwesomePrompts.awesome_prompt_path = output
1992
- AwesomePrompts().update_prompts_from_online(new)
1993
- click.secho(
1994
- f"Prompts saved to - '{AwesomePrompts.awesome_prompt_path}'", fg="cyan"
1995
- )
1996
-
1997
- @staticmethod
1998
- @click.command(context_settings=this.context_settings)
1999
- @click.argument(
2000
- "key",
2001
- required=True,
2002
- type=click.STRING,
2003
- )
2004
- @click.option(
2005
- "-d", "--default", help="Return this value if not found", default=None
2006
- )
2007
- @click.option(
2008
- "-c",
2009
- "--case-sensitive",
2010
- default=True,
2011
- flag_value=False,
2012
- help="Perform case-sensitive search",
2013
- )
2014
- @click.option(
2015
- "-f",
2016
- "--file",
2017
- type=click.Path(exists=True),
2018
- help="Path to existing prompts",
2019
- default=AwesomePrompts.awesome_prompt_path,
2020
- )
2021
- @click.help_option("-h", "--help")
2022
- @this.handle_exception
2023
- def search(
2024
- key,
2025
- default,
2026
- case_sensitive,
2027
- file,
2028
- ):
2029
- """Search for a particular awesome-prompt by key or index"""
2030
- AwesomePrompts.awesome_prompt_path = file
2031
- resp = AwesomePrompts().get_act(
2032
- key,
2033
- default=default,
2034
- case_insensitive=case_sensitive,
2035
- )
2036
- if resp:
2037
- click.secho(resp)
2038
- return resp != default
2039
-
2040
- @staticmethod
2041
- @click.command(context_settings=this.context_settings)
2042
- @click.option("-n", "--name", required=True, help="Prompt name")
2043
- @click.option("-p", "--prompt", required=True, help="Prompt value")
2044
- @click.option(
2045
- "-f",
2046
- "--file",
2047
- type=click.Path(exists=True),
2048
- help="Path to existing prompts",
2049
- default=AwesomePrompts.awesome_prompt_path,
2050
- )
2051
- @click.help_option("-h", "--help")
2052
- @this.handle_exception
2053
- def add(name, prompt, file):
2054
- """Add new prompt to awesome-prompt list"""
2055
- AwesomePrompts.awesome_prompt_path = file
2056
- return AwesomePrompts().add_prompt(name, prompt)
2057
-
2058
- @staticmethod
2059
- @click.command(context_settings=this.context_settings)
2060
- @click.argument("name")
2061
- @click.option(
2062
- "--case-sensitive",
2063
- is_flag=True,
2064
- flag_value=False,
2065
- default=True,
2066
- help="Perform name case-sensitive search",
2067
- )
2068
- @click.option(
2069
- "-f",
2070
- "--file",
2071
- type=click.Path(exists=True),
2072
- help="Path to existing prompts",
2073
- default=AwesomePrompts.awesome_prompt_path,
2074
- )
2075
- @click.help_option("-h", "--help")
2076
- @this.handle_exception
2077
- def delete(name, case_sensitive, file):
2078
- """Delete a specific awesome-prompt"""
2079
- AwesomePrompts.awesome_prompt_path = file
2080
- return AwesomePrompts().delete_prompt(name, case_sensitive)
2081
-
2082
- @staticmethod
2083
- @click.command(context_settings=this.context_settings)
2084
- @click.option(
2085
- "-j",
2086
- "--json",
2087
- is_flag=True,
2088
- help="Display prompts in json format",
2089
- )
2090
- @click.option(
2091
- "-i",
2092
- "--indent",
2093
- type=click.IntRange(1, 20),
2094
- help="Json format indentation level",
2095
- default=4,
2096
- )
2097
- @click.option(
2098
- "-x",
2099
- "--index",
2100
- is_flag=True,
2101
- help="Display prompts with their corresponding indexes",
2102
- )
2103
- @click.option("-c", "--color", help="Prompts stdout font color")
2104
- @click.option("-o", "--output", type=click.Path(), help="Path to save the prompts")
2105
- @click.help_option("-h", "--help")
2106
- def whole(json, indent, index, color, output):
2107
- """Stdout all awesome prompts"""
2108
- ap = AwesomePrompts()
2109
- awesome_prompts = ap.all_acts if index else ap.get_acts()
2110
-
2111
- if json:
2112
- # click.secho(formatted_awesome_prompts, fg=color)
2113
- rich.print_json(data=awesome_prompts, indent=indent)
2114
-
2115
- else:
2116
- awesome_table = Table(show_lines=True, title="All Awesome-Prompts")
2117
- awesome_table.add_column("index", justify="center", style="yellow")
2118
- awesome_table.add_column("Act Name/Index", justify="left", style="cyan")
2119
- awesome_table.add_column(
2120
- "Prompt",
2121
- style=color,
2122
- )
2123
- for index, key_value in enumerate(awesome_prompts.items()):
2124
- awesome_table.add_row(str(index), str(key_value[0]), key_value[1])
2125
- rich.print(awesome_table)
2126
-
2127
- if output:
2128
- from json import dump
2129
-
2130
- with open(output, "w") as fh:
2131
- dump(awesome_prompts, fh, indent=4)
2132
-
2133
-
2134
- class Gpt4free:
2135
- """Commands for gpt4free"""
2136
-
2137
- @staticmethod
2138
- @click.command(context_settings=this.context_settings)
2139
- @busy_bar.run(index=1, immediate=True)
2140
- @click.help_option("-h", "--help")
2141
- def version():
2142
- """Check current installed version of gpt4free"""
2143
- version_string = this.run_system_command("pip show g4f")[1].stdout.split("\n")[
2144
- 1
2145
- ]
2146
- click.secho(version_string, fg="cyan")
2147
-
2148
- @staticmethod
2149
- @click.command(context_settings=this.context_settings)
2150
- @click.help_option("-h", "--help")
2151
- @click.option(
2152
- "-e",
2153
- "--extra",
2154
- help="Extra required dependencies category",
2155
- multiple=True,
2156
- type=click.Choice(
2157
- ["all", "image", "webdriver", "openai", "api", "gui", "none"]
2158
- ),
2159
- default=["all"],
2160
- )
2161
- @click.option("-l", "--log", is_flag=True, help="Stdout installation logs")
2162
- @click.option(
2163
- "-s",
2164
- "--sudo",
2165
- is_flag=True,
2166
- flag_value="sudo ",
2167
- help="Install with sudo privileges",
2168
- )
2169
- @busy_bar.run(index=1, immediate=True)
2170
- def update(extra, log, sudo):
2171
- """Update GPT4FREE package (Models, Providers etc)"""
2172
- if "none" in extra:
2173
- command = f"{sudo or ''}pip install --upgrade g4f"
2174
- else:
2175
- command = f"{sudo or ''}pip install --upgrade g4f[{','.join(extra)}]"
2176
- is_successful, response = this.run_system_command(command)
2177
- if log and is_successful:
2178
- click.echo(response.stdout)
2179
- version_string = this.run_system_command("pip show g4f")[1].stdout.split("\n")[
2180
- 1
2181
- ]
2182
- click.secho(f"GPT4FREE updated successfully - {version_string}", fg="cyan")
2183
-
2184
- @staticmethod
2185
- @click.command("list", context_settings=this.context_settings)
2186
- @click.argument("target")
2187
- @click.option("-w", "--working", is_flag=True, help="Restrict to working providers")
2188
- @click.option("-u", "--url", is_flag=True, help="Restrict to providers with url")
2189
- @click.option(
2190
- "-s", "--stream", is_flag=True, help="Restrict to providers supporting stream"
2191
- )
2192
- @click.option(
2193
- "-c",
2194
- "--context",
2195
- is_flag=True,
2196
- help="Restrict to providers supporing context natively",
2197
- )
2198
- @click.option(
2199
- "-35",
2200
- "--gpt35",
2201
- is_flag=True,
2202
- help="Restrict to providers supporting gpt3.5_turbo model",
2203
- )
2204
- @click.option(
2205
- "-4", "--gpt4", is_flag=True, help="Restrict to providers supporting gpt4 model"
2206
- )
2207
- @click.option(
2208
- "-se",
2209
- "--selenium",
2210
- is_flag=True,
2211
- help="Restrict to selenium dependent providers",
2212
- )
2213
- @click.option("-j", "--json", is_flag=True, help="Format output in json")
2214
- @click.help_option("-h", "--help")
2215
- def show(target, working, url, stream, context, gpt35, gpt4, selenium, json):
2216
- """List available models and providers"""
2217
- available_targets = ["models", "providers"]
2218
- assert (
2219
- target in available_targets
2220
- ), f"Target must be one of [{', '.join(available_targets)}]"
2221
- if target == "providers":
2222
- hunted_providers = list(
2223
- set(
2224
- map(
2225
- lambda provider: (
2226
- provider["name"] if all(list(provider.values())) else None
2227
- ),
2228
- this.g4f_providers_in_dict(
2229
- url=url,
2230
- working=working,
2231
- stream=stream,
2232
- context=context,
2233
- gpt35=gpt35,
2234
- gpt4=gpt4,
2235
- selenium=selenium,
2236
- ),
2237
- )
2238
- )
2239
- )
2240
- while None in hunted_providers:
2241
- hunted_providers.remove(None)
2242
-
2243
- hunted_providers.sort()
2244
- if json:
2245
- rich.print_json(data=dict(providers=hunted_providers), indent=4)
2246
-
2247
- else:
2248
- table = Table(show_lines=True)
2249
- table.add_column("No.", style="yellow", justify="center")
2250
- table.add_column("Provider", style="cyan")
2251
- for no, provider in enumerate(hunted_providers):
2252
- table.add_row(str(no), provider)
2253
- rich.print(table)
2254
- else:
2255
- models = dict(
2256
- Bard=[
2257
- "palm",
2258
- ],
2259
- HuggingFace=[
2260
- "h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
2261
- "h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
2262
- "h2ogpt-gm-oasst1-en-2048-open-llama-13b",
2263
- "gpt-neox-20b",
2264
- "oasst-sft-1-pythia-12b",
2265
- "oasst-sft-4-pythia-12b-epoch-3.5",
2266
- "santacoder",
2267
- "bloom",
2268
- "flan-t5-xxl",
2269
- ],
2270
- Anthropic=[
2271
- "claude-instant-v1",
2272
- "claude-v1",
2273
- "claude-v2",
2274
- ],
2275
- Cohere=[
2276
- "command-light-nightly",
2277
- "command-nightly",
2278
- ],
2279
- OpenAI=[
2280
- "code-davinci-002",
2281
- "text-ada-001",
2282
- "text-babbage-001",
2283
- "text-curie-001",
2284
- "text-davinci-002",
2285
- "text-davinci-003",
2286
- "gpt-3.5-turbo-16k",
2287
- "gpt-3.5-turbo-16k-0613",
2288
- "gpt-4-0613",
2289
- ],
2290
- Replicate=[
2291
- "llama13b-v2-chat",
2292
- "llama7b-v2-chat",
2293
- ],
2294
- )
2295
- for provider in webscout.g4f.Provider.__providers__:
2296
- if hasattr(provider, "models"):
2297
- models[provider.__name__] = provider.models
2298
- if json:
2299
- for key, value in models.items():
2300
- while None in value:
2301
- value.remove(None)
2302
- value.sort()
2303
- models[key] = value
2304
-
2305
- rich.print_json(data=models, indent=4)
2306
- else:
2307
- table = Table(show_lines=True)
2308
- table.add_column("No.", justify="center", style="white")
2309
- table.add_column("Base Provider", style="cyan")
2310
- table.add_column("Model(s)", style="yellow")
2311
- for count, provider_models in enumerate(models.items()):
2312
- models = provider_models[1]
2313
- models.sort()
2314
- table.add_row(str(count), provider_models[0], "\n".join(models))
2315
- rich.print(table)
2316
-
2317
- @staticmethod
2318
- @click.command(context_settings=this.context_settings)
2319
- @click.argument("port", type=click.INT, required=False)
2320
- @click.option(
2321
- "-a", "--address", help="Host on this particular address", default="127.0.0.1"
2322
- )
2323
- @click.option("-d", "--debug", is_flag=True, help="Start server in debug mode")
2324
- @click.option(
2325
- "-o", "--open", is_flag=True, help="Proceed to the interface immediately"
2326
- )
2327
- @click.help_option("-h", "--help")
2328
- def gui(port, address, debug, open):
2329
- """Launch gpt4free web interface"""
2330
- from g4f.gui import run_gui
2331
-
2332
- port = port or 8000
2333
- t1 = thr(
2334
- target=run_gui,
2335
- args=(
2336
- address,
2337
- port,
2338
- debug,
2339
- ),
2340
- )
2341
- # run_gui(host=address, port=port, debug=debug)
2342
- t1.start()
2343
- if open:
2344
- click.launch(f"http://{address}:{port}")
2345
- t1.join()
2346
-
2347
- @staticmethod
2348
- @click.command(context_settings=this.context_settings)
2349
- @click.option(
2350
- "-t",
2351
- "--timeout",
2352
- type=click.INT,
2353
- help="Provider's response generation timeout",
2354
- default=20,
2355
- )
2356
- @click.option(
2357
- "-r",
2358
- "--thread",
2359
- type=click.INT,
2360
- help="Test n amount of providers at once",
2361
- default=5,
2362
- )
2363
- @click.option("-q", "--quiet", is_flag=True, help="Suppress progress bar")
2364
- @click.option(
2365
- "-j", "--json", is_flag=True, help="Stdout test results in json format"
2366
- )
2367
- @click.option("-d", "--dry-test", is_flag=True, help="Return previous test results")
2368
- @click.option(
2369
- "-b", "--best", is_flag=True, help="Stdout the fastest provider <name only>"
2370
- )
2371
- @click.option(
2372
- "-se",
2373
- "--selenium",
2374
- help="Test even selenium dependent providers",
2375
- is_flag=True,
2376
- )
2377
- @click.option(
2378
- "-dl",
2379
- "--disable-logging",
2380
- is_flag=True,
2381
- help="Disable logging",
2382
- )
2383
- @click.option("-y", "--yes", is_flag=True, help="Okay to all confirmations")
2384
- @click.help_option("-h", "--help")
2385
- def test(
2386
- timeout, thread, quiet, json, dry_test, best, selenium, disable_logging, yes
2387
- ):
2388
- """Test and save working providers"""
2389
- from webscout.g4f import TestProviders
2390
-
2391
- test = TestProviders(
2392
- test_at_once=thread,
2393
- quiet=quiet,
2394
- timeout=timeout,
2395
- selenium=selenium,
2396
- do_log=disable_logging == False,
2397
- )
2398
- if best:
2399
- click.secho(test.best)
2400
- return
2401
- elif dry_test:
2402
- results = test.get_results(
2403
- run=False,
2404
- )
2405
- else:
2406
- if (
2407
- yes
2408
- or os.path.isfile(webscout.AIutel.results_path)
2409
- and click.confirm("Are you sure to run new test")
2410
- ):
2411
- results = test.get_results(run=True)
2412
- else:
2413
- results = test.get_results(
2414
- run=False,
2415
- )
2416
- if json:
2417
- rich.print_json(data=dict(results=results))
2418
- else:
2419
- table = Table(
2420
- title="G4f Providers Test Results",
2421
- show_lines=True,
2422
- )
2423
- table.add_column("No.", style="white", justify="center")
2424
- table.add_column("Provider", style="yellow", justify="left")
2425
- table.add_column("Response Time(s)", style="cyan")
2426
-
2427
- for no, provider in enumerate(results, start=1):
2428
- table.add_row(
2429
- str(no), provider["name"], str(round(provider["time"], 2))
2430
- )
2431
- rich.print(table)
2432
-
2433
-
2434
-
2435
- @staticmethod
2436
- @click.command(context_settings=this.context_settings)
2437
- @click.argument("prompt")
2438
- @click.option(
2439
- "-d",
2440
- "--directory",
2441
- type=click.Path(exists=True),
2442
- help="Folder for saving the images",
2443
- default=os.getcwd(),
2444
- )
2445
- @click.option(
2446
- "-a",
2447
- "--amount",
2448
- type=click.IntRange(1, 100),
2449
- help="Total images to be generated",
2450
- default=1,
2451
- )
2452
- @click.option("-n", "--name", help="Name for the generated images")
2453
- @click.option(
2454
- "-t",
2455
- "--timeout",
2456
- type=click.IntRange(5, 300),
2457
- help="Http request timeout in seconds",
2458
- )
2459
- @click.option("-p", "--proxy", help="Http request proxy")
2460
- @click.option(
2461
- "-nd",
2462
- "--no-additives",
2463
- is_flag=True,
2464
- help="Disable prompt altering for effective image generation",
2465
- )
2466
- @click.option("-q", "--quiet", is_flag=True, help="Suppress progress bar")
2467
- @click.help_option("-h", "--help")
2468
- def generate_image(
2469
- prompt, directory, amount, name, timeout, proxy, no_additives, quiet
2470
- ):
2471
- """Generate images with pollinations.ai"""
2472
- with Progress() as progress:
2473
- task = progress.add_task(
2474
- f"[cyan]Generating ...[{amount}]",
2475
- total=amount,
2476
- visible=quiet == False,
2477
- )
2478
-
2479
-
2480
-
2481
- class Utils:
2482
- """Utilities command"""
2483
-
2484
- @staticmethod
2485
- @click.command(context_settings=this.context_settings)
2486
- @click.argument("source", required=False)
2487
- @click.option(
2488
- "-d", "--dev", is_flag=True, help="Update from version control (development)"
2489
- )
2490
- @click.option(
2491
- "-s",
2492
- "--sudo",
2493
- is_flag=True,
2494
- flag_value="sudo ",
2495
- help="Install with sudo privileges",
2496
- )
2497
- @click.help_option("-h", "--help")
2498
- @busy_bar.run(index=1, immediate=True)
2499
- def update(source, dev, sudo):
2500
- """Install latest version of webscout"""
2501
- if dev:
2502
- source = "git+" + webscout.__repo__ + ".git"
2503
- source = "webscout" if source is None else source
2504
- assert (
2505
- "webscout" in source or source == "."
2506
- ), f"Cannot update webscout from the source '{source}'"
2507
- click.secho(
2508
- f"[*] Updating from '{'pip' if source=='webscout' else source}'",
2509
- fg="yellow",
2510
- )
2511
- this.run_system_command(f"{sudo or ''}pip install --upgrade {source}")
2512
- response = this.run_system_command("pip show webscout")[1]
2513
- click.secho(response.stdout)
2514
- click.secho("Congratulations! webscout updated successfully.", fg="cyan")
2515
-
2516
- @staticmethod
2517
- @click.command(context_settings=this.context_settings)
2518
- @click.option("-w", "--whole", is_flag=True, help="Stdout whole json info")
2519
- @click.option(
2520
- "-v", "--version", is_flag=True, help="Stdout latest version name only"
2521
- )
2522
- @click.option("-b", "--body", is_flag=True, help="Stdout changelog info only")
2523
- @click.option(
2524
- "-e", "--executable", is_flag=True, help="Stdout url to binary for your system"
2525
- )
2526
- @click.help_option("-h", "--help")
2527
- def latest(whole, version, body, executable):
2528
- """Check webscout latest version info"""
2529
- from webscout.utils import Updates
2530
-
2531
- update = Updates()
2532
- if whole:
2533
- rich.print_json(data=update.latest(whole=True))
2534
-
2535
- elif version:
2536
- rich.print(update.latest_version)
2537
- elif body:
2538
- rich.print(Markdown(update.latest()["body"]))
2539
- elif executable:
2540
- rich.print(update.executable())
2541
- else:
2542
- rich.print_json(data=update.latest())
2543
-
2544
-
2545
- def make_commands():
2546
- """Make webscout chained commands"""
2547
-
2548
- # generate
2549
- EntryGroup.webai_.add_command(ChatGenerate.generate)
2550
-
2551
- # webai
2552
- EntryGroup.webai_.add_command(Chatwebai.webai)
2553
-
2554
- # utils
2555
- EntryGroup.utils.add_command(Utils.update)
2556
- EntryGroup.utils.add_command(Utils.latest)
2557
-
2558
- # gpt4free
2559
- EntryGroup.gpt4free.add_command(Gpt4free.version)
2560
- EntryGroup.gpt4free.add_command(Gpt4free.update)
2561
- EntryGroup.gpt4free.add_command(Gpt4free.show)
2562
- EntryGroup.gpt4free.add_command(Gpt4free.gui)
2563
- EntryGroup.gpt4free.add_command(Gpt4free.test)
2564
-
2565
- # Awesome
2566
- EntryGroup.awesome.add_command(Awesome.add)
2567
- EntryGroup.awesome.add_command(Awesome.delete)
2568
- EntryGroup.awesome.add_command(Awesome.search)
2569
- EntryGroup.awesome.add_command(Awesome.update)
2570
- EntryGroup.awesome.add_command(Awesome.whole)
2571
-
2572
-
2573
- # @this.handle_exception
2574
- def main(*args):
2575
- """Fireup console programmically"""
2576
- console.print(f"[bold green]{figlet_format('WebAI')}[/]\n", justify="center")
2577
- sys.argv += list(args)
2578
- args = sys.argv
2579
- if len(args) == 1:
2580
- sys.argv.insert(1, "webai") # Just a hack to make default command
2581
- try:
2582
- make_commands()
2583
- return EntryGroup.webai_()
2584
- except Exception as e:
2585
- logging.error(this.getExc(e))
2586
- sys.exit(1)
2587
-
2588
-
2589
- if __name__ == "__main__":
2590
- main()