webscout 1.3.6__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/webai.py CHANGED
@@ -1,2361 +1,2420 @@
1
- import webscout
2
- import click
3
- import cmd
4
- import logging
5
- import os
6
- import sys
7
- import clipman
8
- import re
9
- import rich
10
- import getpass
11
- import json
12
- import re
13
- import sys
14
- import datetime
15
- import time
16
- import subprocess
17
- from threading import Thread as thr
18
- from functools import wraps
19
- from rich.panel import Panel
20
- from rich.style import Style
21
- from rich.markdown import Markdown
22
- from rich.console import Console
23
- from rich.live import Live
24
- from rich.table import Table
25
- from rich.prompt import Prompt
26
- from rich.progress import Progress
27
- from typing import Iterator
28
- from webscout.AIutel import Optimizers
29
- from webscout.AIutel import default_path
30
- from webscout.AIutel import AwesomePrompts
31
- from webscout.AIutel import RawDog
32
- from webscout import available_providers
33
- from colorama import Fore
34
- from colorama import init as init_colorama
35
- from dotenv import load_dotenv
36
- import g4f
37
-
38
- import webscout.AIutel
39
-
40
- init_colorama(autoreset=True)
41
-
42
- load_dotenv() # loads .env variables
43
-
44
- logging.basicConfig(
45
- format="%(asctime)s - %(levelname)s : %(message)s ",
46
- datefmt="%H:%M:%S",
47
- level=logging.INFO,
48
- )
49
-
50
- try:
51
- clipman.init()
52
- except Exception as e:
53
- logging.debug(f"Dropping clipman in favor of pyperclip - {(e)}")
54
- import pyperclip
55
-
56
- clipman.set = pyperclip.copy
57
- clipman.get = pyperclip.paste
58
-
59
-
60
- class this:
61
- """Console's common variables"""
62
-
63
- rich_code_themes = ["monokai", "paraiso-dark", "igor", "vs", "fruity", "xcode"]
64
-
65
- default_provider = "phind"
66
-
67
- getExc = lambda e: e.args[1] if len(e.args) > 1 else str(e)
68
-
69
- context_settings = dict(auto_envvar_prefix="Webscout")
70
-
71
- """Console utils"""
72
-
73
- @staticmethod
74
- def run_system_command(
75
- command: str, exit_on_error: bool = True, stdout_error: bool = True
76
- ):
77
- """Run commands against system
78
- Args:
79
- command (str): shell command
80
- exit_on_error (bool, optional): Exit on error. Defaults to True.
81
- stdout_error (bool, optional): Print out the error. Defaults to True.
82
-
83
- Returns:
84
- tuple : (is_successfull, object[Exception|Subprocess.run])
85
- """
86
- try:
87
- # Run the command and capture the output
88
- result = subprocess.run(
89
- command,
90
- shell=True,
91
- check=True,
92
- text=True,
93
- stdout=subprocess.PIPE,
94
- stderr=subprocess.PIPE,
95
- )
96
- return (True, result)
97
- except subprocess.CalledProcessError as e:
98
- # Handle error if the command returns a non-zero exit code
99
- if stdout_error:
100
- click.secho(f"Error Occurred: while running '{command}'", fg="yellow")
101
- click.secho(e.stderr, fg="red")
102
- sys.exit(e.returncode) if exit_on_error else None
103
- return (False, e)
104
-
105
- def g4f_providers_in_dict(
106
- url=True,
107
- working=True,
108
- stream=False,
109
- context=False,
110
- gpt35=False,
111
- gpt4=False,
112
- selenium=False,
113
- ):
114
- from webscout.g4f import GPT4FREE
115
- import g4f.Provider.selenium as selenium_based
116
-
117
- selenium_based_providers: list = dir(selenium_based)
118
- hunted_providers = []
119
- required_attrs = (
120
- "url",
121
- "working",
122
- "supports_gpt_35_turbo",
123
- "supports_gpt_4",
124
- "supports_stream",
125
- "supports_message_history",
126
- )
127
-
128
- def sanitize_provider(provider: object):
129
- for attr in required_attrs:
130
- if not hasattr(provider, attr):
131
- setattr(provider, attr, False)
132
-
133
- return provider
134
-
135
- for provider_name, provider_class in g4f.Provider.__map__.items():
136
- provider = sanitize_provider(provider_class)
137
- provider_meta = dict(name=provider_name)
138
- if url:
139
- provider_meta["url"] = provider.url
140
- if working:
141
- provider_meta["working"] = provider.working
142
- if stream:
143
- provider_meta["stream"] = provider.supports_stream
144
- if context:
145
- provider_meta["context"] = provider.supports_message_history
146
- if gpt35:
147
- provider_meta["gpt35_turbo"] = provider.supports_gpt_35_turbo
148
- if gpt4:
149
- provider_meta["gpt4"] = provider.supports_gpt_4
150
- if selenium:
151
- try:
152
- selenium_based_providers.index(provider_meta["name"])
153
- value = True
154
- except ValueError:
155
- value = False
156
- provider_meta["non_selenium"] = value
157
-
158
- hunted_providers.append(provider_meta)
159
-
160
- return hunted_providers
161
-
162
- @staticmethod
163
- def stream_output(
164
- iterable: Iterator,
165
- title: str = "",
166
- is_markdown: bool = True,
167
- style: object = Style(),
168
- transient: bool = False,
169
- title_generator: object = None,
170
- title_generator_params: dict = {},
171
- code_theme: str = "monokai",
172
- vertical_overflow: str = "ellipsis",
173
- ) -> None:
174
- """Stdout streaming response
175
-
176
- Args:
177
- iterable (Iterator): Iterator containing contents to be stdout
178
- title (str, optional): Content title. Defaults to ''.
179
- is_markdown (bool, optional): Flag for markdown content. Defaults to True.
180
- style (object, optional): `rich.style` instance. Defaults to Style().
181
- transient (bool, optional): Flag for transient. Defaults to False.
182
- title_generator (object, optional): Function for generating title. Defaults to None.
183
- title_generator_params (dict, optional): Kwargs for `title_generator` function. Defaults to {}.
184
- code_theme (str, optional): Theme for styling codes. Defaults to `monokai`
185
- vertical_overflow (str, optional): Vertical overflow behaviour on content display. Defaultss to ellipsis.
186
- """
187
- render_this = ""
188
- with Live(
189
- render_this,
190
- transient=transient,
191
- refresh_per_second=8,
192
- vertical_overflow=vertical_overflow,
193
- ) as live:
194
- for entry in iterable:
195
- render_this += entry
196
- live.update(
197
- Panel(
198
- (
199
- Markdown(entry, code_theme=code_theme)
200
- if is_markdown
201
- else entry
202
- ),
203
- title=title,
204
- style=style,
205
- )
206
- )
207
- if title_generator:
208
- title = title_generator(**title_generator_params)
209
- live.update(
210
- Panel(
211
- Markdown(entry, code_theme=code_theme) if is_markdown else entry,
212
- title=title,
213
- style=style,
214
- )
215
- )
216
-
217
- @staticmethod
218
- def clear_history_file(file_path, is_true):
219
- """When --new flag is True"""
220
- if is_true and os.path.isfile(file_path):
221
- try:
222
- os.remove(file_path)
223
- except Exception as e:
224
- logging.error(
225
- f"Failed to clear previous chat history - {this.getExc(e)}"
226
- )
227
-
228
- @staticmethod
229
- def handle_exception(func):
230
- """Safely handles cli-based exceptions and exit status-codes"""
231
-
232
- @wraps(func)
233
- def decorator(*args, **kwargs):
234
- try:
235
- exit_status = func(*args, **kwargs)
236
- except Exception as e:
237
- exit_status = False
238
- logging.error(this.getExc(e))
239
- finally:
240
- sys.exit(0 if exit_status not in (False, "") else 1)
241
-
242
- return decorator
243
-
244
-
245
- class busy_bar:
246
- querying = None
247
- __spinner = (
248
- (),
249
- ("-", "\\", "|", "/"),
250
- (
251
- "█■■■■",
252
- "■█■■■",
253
- "■■█■■",
254
- "■■■█■",
255
- "■■■■█",
256
- ),
257
- ("⣾ ", "⣽ ", "⣻ ", "⢿ ", "⡿ ", "⣟ ", "⣯ ", "⣷ "),
258
- )
259
- spin_index = 0
260
- sleep_time = 0.1
261
-
262
- @classmethod
263
- def __action(
264
- cls,
265
- ):
266
- while cls.querying:
267
- for spin in cls.__spinner[cls.spin_index]:
268
- print(" " + spin, end="\r", flush=True)
269
- if not cls.querying:
270
- break
271
- time.sleep(cls.sleep_time)
272
-
273
- @classmethod
274
- def start_spinning(
275
- cls,
276
- ):
277
- try:
278
- cls.querying = True
279
- t1 = thr(
280
- target=cls.__action,
281
- args=(),
282
- )
283
- t1.start()
284
- except Exception as e:
285
- cls.querying = False
286
- logging.debug(this.getExc(e))
287
- t1.join()
288
-
289
- @classmethod
290
- def stop_spinning(cls):
291
- """Stop displaying busy-bar"""
292
- if cls.querying:
293
- cls.querying = False
294
- time.sleep(cls.sleep_time)
295
-
296
- @classmethod
297
- def run(cls, help: str = "Exception", index: int = None, immediate: bool = False):
298
- """Handle function exceptions safely why showing busy bar
299
-
300
- Args:
301
- help (str, optional): Message to be shown incase of an exception. Defaults to ''.
302
- index (int, optional): Busy bars spin index. Defaults to `default`.
303
- immediate (bool, optional): Start the spinning immediately. Defaults to False.
304
- """
305
- if isinstance(index, int):
306
- cls.spin_index = index
307
-
308
- def decorator(func):
309
- @wraps(func) # Preserves function metadata
310
- def main(*args, **kwargs):
311
- try:
312
- if immediate:
313
- cls.start_spinning()
314
- return func(*args, **kwargs)
315
- except KeyboardInterrupt:
316
- cls.stop_spinning()
317
- return
318
- except EOFError:
319
- cls.querying = False
320
- sys.exit(logging.info("Stopping program"))
321
- except Exception as e:
322
- logging.error(f"{help} - {this.getExc(e)}")
323
- finally:
324
- cls.stop_spinning()
325
-
326
- return main
327
-
328
- return decorator
329
-
330
-
331
- class Main(cmd.Cmd):
332
- intro = (
333
- "Welcome to webai Chat in terminal. "
334
- "Type 'help' or 'h' for usage info.\n"
335
- )
336
-
337
- def __init__(
338
- self,
339
- max_tokens,
340
- temperature,
341
- top_k,
342
- top_p,
343
- model,
344
- auth,
345
- timeout,
346
- disable_conversation,
347
- filepath,
348
- update_file,
349
- intro,
350
- history_offset,
351
- awesome_prompt,
352
- proxy_path,
353
- provider,
354
- quiet=False,
355
- chat_completion=False,
356
- ignore_working=False,
357
- rawdog=False,
358
- internal_exec=False,
359
- confirm_script=False,
360
- interpreter="python",
361
- *args,
362
- **kwargs,
363
- ):
364
- super().__init__(*args, **kwargs)
365
- if proxy_path:
366
- with open(proxy_path) as fh:
367
- proxies = json.load(fh)
368
- else:
369
- proxies = {}
370
-
371
- try:
372
- getOr = lambda option, default: option if option else default
373
-
374
- if rawdog:
375
-
376
- self.RawDog = RawDog(
377
- quiet=quiet,
378
- internal_exec=internal_exec,
379
- confirm_script=confirm_script,
380
- interpreter=interpreter,
381
- prettify=True,
382
- )
383
- intro = self.RawDog.intro_prompt
384
- getpass.getuser = lambda: "RawDog"
385
-
386
- if provider == "g4fauto":
387
- from webscout.g4f import TestProviders
388
-
389
- test = TestProviders(quiet=quiet, timeout=timeout)
390
- g4fauto = test.best if ignore_working else test.auto
391
- if isinstance(g4fauto, str):
392
- provider = "g4fauto+" + g4fauto
393
- from webscout.g4f import GPT4FREE
394
-
395
- self.bot = GPT4FREE(
396
- provider=g4fauto,
397
- auth=auth,
398
- max_tokens=max_tokens,
399
- model=model,
400
- chat_completion=chat_completion,
401
- ignore_working=ignore_working,
402
- timeout=timeout,
403
- intro=intro,
404
- filepath=filepath,
405
- update_file=update_file,
406
- proxies=proxies,
407
- history_offset=history_offset,
408
- act=awesome_prompt,
409
- )
410
- else:
411
- raise Exception(
412
- "No working g4f provider found. "
413
- "Consider running 'webscout gpt4free test -y' first"
414
- )
415
-
416
- elif provider == "leo":
417
- from webscout.AI import LEO
418
-
419
- self.bot = LEO(
420
- is_conversation=disable_conversation,
421
- max_tokens=max_tokens,
422
- temperature=temperature,
423
- top_k=top_k,
424
- top_p=top_p,
425
- model=getOr(model, "llama-2-13b-chat"),
426
- brave_key=getOr(auth, "qztbjzBqJueQZLFkwTTJrieu8Vw3789u"),
427
- timeout=timeout,
428
- intro=intro,
429
- filepath=filepath,
430
- update_file=update_file,
431
- proxies=proxies,
432
- history_offset=history_offset,
433
- act=awesome_prompt,
434
- )
435
-
436
- elif provider == "openai":
437
- assert auth, (
438
- "OpenAI's API-key is required. " "Use the flag `--key` or `-k`"
439
- )
440
- from webscout.AI import OPENAI
441
-
442
- self.bot = OPENAI(
443
- api_key=auth,
444
- is_conversation=disable_conversation,
445
- max_tokens=max_tokens,
446
- temperature=temperature,
447
- presence_penalty=top_p,
448
- frequency_penalty=top_k,
449
- top_p=top_p,
450
- model=getOr(model, model),
451
- timeout=timeout,
452
- intro=intro,
453
- filepath=filepath,
454
- update_file=update_file,
455
- proxies=proxies,
456
- history_offset=history_offset,
457
- act=awesome_prompt,
458
- )
459
-
460
- elif provider == "opengpt":
461
- from webscout.AI import OPENGPT
462
-
463
- self.bot = OPENGPT(
464
- is_conversation=disable_conversation,
465
- max_tokens=max_tokens,
466
- timeout=timeout,
467
- intro=intro,
468
- filepath=filepath,
469
- update_file=update_file,
470
- proxies=proxies,
471
- history_offset=history_offset,
472
- act=awesome_prompt,
473
- )
474
- elif provider == "groq":
475
- assert auth, (
476
- "GROQ's API-key is required. " "Use the flag `--key` or `-k`"
477
- )
478
- from webscout.AI import GROQ
479
-
480
-
481
- self.bot = GROQ(
482
- api_key=auth,
483
- is_conversation=disable_conversation,
484
- max_tokens=max_tokens,
485
- temperature=temperature,
486
- presence_penalty=top_p,
487
- frequency_penalty=top_k,
488
- top_p=top_p,
489
- model=getOr(model, "mixtral-8x7b-32768"),
490
- timeout=timeout,
491
- intro=intro,
492
- filepath=filepath,
493
- update_file=update_file,
494
- proxies=proxies,
495
- history_offset=history_offset,
496
- act=awesome_prompt,
497
- )
498
- elif provider == "sean":
499
- from webscout.AI import Sean
500
-
501
- self.bot = Sean(
502
- is_conversation=disable_conversation,
503
- max_tokens=max_tokens,
504
- timeout=timeout,
505
- intro=intro,
506
- filepath=filepath,
507
- update_file=update_file,
508
- proxies=proxies,
509
- history_offset=history_offset,
510
- act=awesome_prompt,
511
- )
512
- elif provider == "cohere":
513
- assert auth, (
514
- "Cohere's API-key is required. Use the flag `--key` or `-k`"
515
- )
516
- from webscout.AI import Cohere
517
- self.bot = Cohere(
518
- api_key=auth,
519
- is_conversation=disable_conversation,
520
- max_tokens=max_tokens,
521
- temperature=temperature,
522
- top_k=top_k,
523
- top_p=top_p,
524
- model=getOr(model, "command-r-plus"),
525
- timeout=timeout,
526
- intro=intro,
527
- filepath=filepath,
528
- update_file=update_file,
529
- proxies=proxies,
530
- history_offset=history_offset,
531
- act=awesome_prompt,
532
- )
533
- elif provider == "reka":
534
- from webscout.AI import REKA
535
-
536
- self.bot = REKA(
537
- api_key=auth,
538
- is_conversation=disable_conversation,
539
- max_tokens=max_tokens,
540
- timeout=timeout,
541
- intro=intro,
542
- filepath=filepath,
543
- update_file=update_file,
544
- proxies=proxies,
545
- history_offset=history_offset,
546
- act=awesome_prompt,
547
- model=getOr(model, "reka-core"),
548
- # quiet=quiet,
549
- )
550
-
551
- elif provider == "koboldai":
552
- from webscout.AI import KOBOLDAI
553
-
554
- self.bot = KOBOLDAI(
555
- is_conversation=disable_conversation,
556
- max_tokens=max_tokens,
557
- temperature=temperature,
558
- top_p=top_p,
559
- timeout=timeout,
560
- intro=intro,
561
- filepath=filepath,
562
- update_file=update_file,
563
- proxies=proxies,
564
- history_offset=history_offset,
565
- act=awesome_prompt,
566
- )
567
-
568
- elif provider == "gemini":
569
- from webscout.AI import GEMINI
570
-
571
- assert auth, (
572
- "Path to gemini.google.com.cookies.json file is required. "
573
- "Use the flag `--key` or `-k`"
574
- )
575
- self.bot = GEMINI(
576
- cookie_file=auth,
577
- proxy=proxies,
578
- timeout=timeout,
579
- )
580
-
581
- elif provider == "phind":
582
- from webscout.AI import PhindSearch
583
-
584
- self.bot = PhindSearch(
585
- is_conversation=disable_conversation,
586
- max_tokens=max_tokens,
587
- timeout=timeout,
588
- intro=intro,
589
- filepath=filepath,
590
- update_file=update_file,
591
- proxies=proxies,
592
- history_offset=history_offset,
593
- act=awesome_prompt,
594
- model=getOr(model, "Phind Model"),
595
- quiet=quiet,
596
- )
597
-
598
- elif provider == "blackboxai":
599
-
600
- from webscout.AI import BLACKBOXAI
601
-
602
- self.bot = BLACKBOXAI(
603
- is_conversation=disable_conversation,
604
- max_tokens=max_tokens,
605
- timeout=timeout,
606
- intro=intro,
607
- filepath=filepath,
608
- update_file=update_file,
609
- proxies=proxies,
610
- history_offset=history_offset,
611
- act=awesome_prompt,
612
- )
613
-
614
-
615
- elif provider in webscout.gpt4free_providers:
616
- from webscout.g4f import GPT4FREE
617
-
618
- self.bot = GPT4FREE(
619
- provider=provider,
620
- is_conversation=disable_conversation,
621
- auth=auth,
622
- max_tokens=max_tokens,
623
- model=model,
624
- chat_completion=chat_completion,
625
- ignore_working=ignore_working,
626
- timeout=timeout,
627
- intro=intro,
628
- filepath=filepath,
629
- update_file=update_file,
630
- proxies=proxies,
631
- history_offset=history_offset,
632
- act=awesome_prompt,
633
- )
634
-
635
-
636
- elif provider == "perplexity":
637
- from webscout.AI import PERPLEXITY
638
-
639
- self.bot = PERPLEXITY(
640
- is_conversation=disable_conversation,
641
- max_tokens=max_tokens,
642
- timeout=timeout,
643
- intro=intro,
644
- filepath=filepath,
645
- update_file=update_file,
646
- proxies=proxies,
647
- history_offset=history_offset,
648
- act=awesome_prompt,
649
- quiet=quiet,
650
- )
651
-
652
- else:
653
- raise NotImplementedError(
654
- f"The provider `{provider}` is not yet implemented."
655
- )
656
-
657
- except Exception as e:
658
- logging.error(this.getExc(e))
659
- click.secho("Quitting", fg="red")
660
- sys.exit(1)
661
- self.prettify = True
662
- self.color = "cyan"
663
- self.code_theme = "monokai"
664
- self.quiet = quiet
665
- self.vertical_overflow = "ellipsis"
666
- self.disable_stream = False
667
- self.provider = provider
668
- self.disable_coloring = False
669
- self.internal_exec = internal_exec
670
- self.confirm_script = confirm_script
671
- self.interpreter = interpreter
672
- self.rawdog = rawdog
673
- self.__init_time = time.time()
674
- self.__start_time = time.time()
675
- self.__end_time = time.time()
676
-
677
- @property
678
- def prompt(self):
679
- current_time = datetime.datetime.now().strftime("%H:%M:%S")
680
-
681
- def find_range(start, end, hms: bool = False):
682
- in_seconds = round(end - start, 1)
683
- return (
684
- str(datetime.timedelta(seconds=in_seconds)).split(".")[0].zfill(8)
685
- if hms
686
- else in_seconds
687
- )
688
- if not self.disable_coloring:
689
- cmd_prompt = (
690
- f"╭─[`{Fore.GREEN}{getpass.getuser().capitalize()}@webai]`"
691
- f"(`{Fore.YELLOW}{self.provider})`"
692
- f"~[`{Fore.LIGHTWHITE_EX}⏰{Fore.MAGENTA}{current_time}-`"
693
- f"{Fore.LIGHTWHITE_EX}💻{Fore.BLUE}{find_range(self.__init_time, time.time(), True)}-`"
694
- f"{Fore.LIGHTWHITE_EX}⚡️{Fore.RED}{find_range(self.__start_time, self.__end_time)}s]`"
695
- f"\n╰─>"
696
- )
697
- whitelist = ["[", "]", "~", "-", "(", ")"]
698
- for character in whitelist:
699
- cmd_prompt = cmd_prompt.replace(character + "`", Fore.RESET + character)
700
- return cmd_prompt
701
-
702
- else:
703
- return (
704
- f"╭─[{getpass.getuser().capitalize()}@webscout]({self.provider})"
705
- f"~[⏰{current_time}"
706
- f"-💻{find_range(self.__init_time, time.time(), True)}"
707
- f"-⚡️{find_range(self.__start_time, self.__end_time)}s]"
708
- f"~[{current_time}"
709
- f"-💻{find_range(self.__init_time, time.time(), True)}"
710
- f"-⚡️{find_range(self.__start_time, self.__end_time)}s]"
711
- "\n╰─>"
712
- )
713
-
714
- def output_bond(
715
- self,
716
- title: str,
717
- text: str,
718
- color: str = "cyan",
719
- frame: bool = True,
720
- is_json: bool = False,
721
- ):
722
- """Print prettified output
723
-
724
- Args:
725
- title (str): Title
726
- text (str): Info to be printed
727
- color (str, optional): Output color. Defaults to "cyan".
728
- frame (bool, optional): Add frame. Defaults to True.
729
- """
730
- if is_json:
731
- text = f"""
732
- ```json
733
- {json.dumps(text,indent=4)}
734
- ```
735
- """
736
- rich.print(
737
- Panel(
738
- Markdown(text, code_theme=self.code_theme),
739
- title=title.title(),
740
- style=Style(
741
- color=color,
742
- frame=frame,
743
- ),
744
- ),
745
- )
746
- if is_json and click.confirm("Do you wish to save this"):
747
- default_path = title + ".json"
748
- save_to = click.prompt(
749
- "Enter path to save to", default=default_path, type=click.STRING
750
- )
751
- with open(save_to, "a") as fh:
752
- json.dump(text, fh, indent=4)
753
- click.secho(f"Successfuly saved to `{save_to}`", fg="green")
754
-
755
- def do_h(self, line):
756
- """Show help info in tabular form"""
757
- table = Table(
758
- title="Help info",
759
- show_lines=True,
760
- )
761
- table.add_column("No.", style="white", justify="center")
762
- table.add_column("Command", style="yellow", justify="left")
763
- table.add_column("Function", style="cyan")
764
- command_methods = [
765
- getattr(self, method)
766
- for method in dir(self)
767
- if callable(getattr(self, method)) and method.startswith("do_")
768
- ]
769
- command_methods.append(self.default)
770
- command_methods.reverse()
771
- for no, method in enumerate(command_methods):
772
- table.add_row(
773
- str(no + 1),
774
- method.__name__[3:] if not method == self.default else method.__name__,
775
- method.__doc__,
776
- )
777
- Console().print(table)
778
-
779
- @busy_bar.run("Settings saved")
780
- def do_settings(self, line):
781
- """Configure settings"""
782
- self.prettify = click.confirm(
783
- "\nPrettify markdown response", default=self.prettify
784
- )
785
- busy_bar.spin_index = click.prompt(
786
- "Spin bar index [0: None, 1:/, 2:■█■■■, 3:⣻]",
787
- default=busy_bar.spin_index,
788
- type=click.IntRange(0, 3),
789
- )
790
- self.color = click.prompt(
791
- "Response stdout font color", default=self.color or "white"
792
- )
793
- self.code_theme = Prompt.ask(
794
- "Enter code_theme", choices=this.rich_code_themes, default=self.code_theme
795
- )
796
- self.vertical_overflow = Prompt.ask(
797
- "\nVertical overflow behaviour",
798
- choices=["ellipsis", "visible", "crop"],
799
- default=self.vertical_overflow,
800
- )
801
- self.bot.max_tokens_to_sample = click.prompt(
802
- "\nMaximum tokens to sample",
803
- type=click.INT,
804
- default=self.bot.max_tokens_to_sample,
805
- )
806
- self.bot.temperature = click.prompt(
807
- "Temperature", type=click.FLOAT, default=self.bot.temperature
808
- )
809
- self.bot.top_k = click.prompt(
810
- "Chance of topic being repeated, top_k",
811
- type=click.FLOAT,
812
- default=self.bot.top_k,
813
- )
814
- self.bot.top_p = click.prompt(
815
- "Sampling threshold during inference time, top_p",
816
- type=click.FLOAT,
817
- default=self.bot.top_p,
818
- )
819
- self.bot.model = click.prompt(
820
- "Model name", type=click.STRING, default=self.bot.model
821
- )
822
-
823
- @busy_bar.run(help="System error")
824
- def do_copy_this(self, line):
825
- """Copy last response
826
- Usage:
827
- copy_this:
828
- text-copied = {whole last-response}
829
- copy_this code:
830
- text-copied = {All codes in last response}
831
- """
832
- if self.bot.last_response:
833
- global last_response
834
- last_response = self.bot.get_message(self.bot.last_response)
835
- if not "code" in line:
836
- clipman.set(last_response)
837
- click.secho("Last response copied successfully!", fg="cyan")
838
- return
839
-
840
- # Copies just code
841
- sanitized_codes = []
842
- code_blocks = re.findall(r"```.*?```", last_response, re.DOTALL)
843
- for code_block in code_blocks:
844
- new_code_block = re.sub(
845
- "^```.*$", "", code_block.strip(), flags=re.MULTILINE
846
- )
847
- if bool(new_code_block.strip()):
848
- sanitized_codes.append(new_code_block)
849
- if sanitized_codes:
850
- if len(sanitized_codes) > 1:
851
- if not click.confirm("Do you wish to copy all codes"):
852
- for index, code in enumerate(sanitized_codes):
853
- rich.print(
854
- Panel(
855
- Markdown(
856
- code_blocks[index], code_theme=self.code_theme
857
- ),
858
- title=f"Index : {index}",
859
- title_align="left",
860
- )
861
- )
862
-
863
- clipman.set(
864
- sanitized_codes[
865
- click.prompt(
866
- "Enter code index",
867
- type=click.IntRange(0, len(sanitized_codes) - 1),
868
- )
869
- ]
870
- )
871
- click.secho("Code copied successfully", fg="cyan")
872
- else:
873
- clipman.set("\n\n".join(sanitized_codes))
874
- click.secho(
875
- f"All {len(sanitized_codes)} codes copied successfully!",
876
- fg="cyan",
877
- )
878
- else:
879
- clipman.set(sanitized_codes[0])
880
- click.secho("Code copied successfully!", fg="cyan")
881
- else:
882
- click.secho("No code found in the last response!", fg="red")
883
- else:
884
- click.secho("Chat with AI first.", fg="yellow")
885
-
886
- @busy_bar.run()
887
- def do_with_copied(self, line):
888
- """Attach last copied text to the prompt
889
- Usage:
890
- from_copied:
891
- prompt = {text-copied}
892
- from_copied Debug this code:
893
- prompt = Debug this code {newline} {text-copied}
894
- """
895
- issued_prompt = (
896
- f"{line}\n{clipman.get()}" if bool(line.strip()) else clipman.get()
897
- )
898
- click.secho(issued_prompt, fg="yellow")
899
- if click.confirm("Do you wish to proceed"):
900
- self.default(issued_prompt)
901
-
902
- @busy_bar.run()
903
- def do_code(self, line):
904
- """Enhance prompt for code generation
905
- usage :
906
- code <Code description>
907
- """
908
- self.default(Optimizers.code(line))
909
-
910
- @busy_bar.run()
911
- def do_shell(self, line):
912
- """Enhance prompt for system command (shell) generation
913
- Usage:
914
- shell <Action to be accomplished>
915
- """
916
- self.default(Optimizers.shell_command(line))
917
- if click.confirm("Do you wish to run the command(s) generated in your system"):
918
- self.do_sys(self.bot.get_message(self.bot.last_response))
919
-
920
- @busy_bar.run("While changing directory")
921
- def do_cd(self, line):
922
- """Change directory
923
- Usage :
924
- cd <path-to-directory>
925
- """
926
- assert line, "File path is required"
927
- os.chdir(line)
928
-
929
- def do_clear(self, line):
930
- """Clear console"""
931
- sys.stdout.write("\u001b[2J\u001b[H")
932
- sys.stdout.flush()
933
-
934
- @busy_bar.run("While handling history")
935
- def do_history(self, line):
936
- """Show current conversation history"""
937
- history = self.bot.conversation.chat_history
938
- formatted_history = re.sub(
939
- "\nLLM :",
940
- "\n\n**LLM** :",
941
- re.sub("\nUser :", "\n\n**User** :", history),
942
- )
943
- self.output_bond("Chat History", formatted_history, self.color)
944
- if click.confirm("Do you wish to save this chat"):
945
- save_to = click.prompt(
946
- "Enter path/file-name", default="llama-conversation.txt"
947
- )
948
- with open(save_to, "a") as fh:
949
- fh.write(history)
950
- click.secho(f"Conversation saved successfully to '{save_to}'", fg="cyan")
951
-
952
- @busy_bar.run("while resetting conversation")
953
- def do_reset(self, line):
954
- """Start new conversation thread"""
955
- self.bot.conversation.chat_history = click.prompt(
956
- "Introductory prompt", default=self.bot.conversation.intro
957
- )
958
- if hasattr(self.bot, "reset"):
959
- self.bot.reset()
960
- click.secho("Conversation reset successfully. New one created.", fg="cyan")
961
-
962
- @busy_bar.run("while loading conversation")
963
- def do_load(self, line):
964
- """Load conversation history from file"""
965
- history_file = click.prompt("Enter path to history path", default=line)
966
- if not os.path.isfile(history_file):
967
- click.secho(f"Path `{history_file}` does not exist!", fg="red")
968
- return
969
- with open(history_file) as fh:
970
- self.bot.conversation.chat_history = fh.read()
971
- click.secho("Conversation loaded successfully.", fg="cyan")
972
-
973
- def do_last_response(self, line):
974
- """Show whole last response in json format"""
975
- self.output_bond(
976
- "Last Response",
977
- self.bot.last_response,
978
- is_json=True,
979
- )
980
-
981
- @busy_bar.run()
982
- def do_exec(self, line):
983
- """Exec python code in last response with RawDog"""
984
- last_response = self.bot.get_message(self.bot.last_response)
985
- assert last_response, "Last response is null"
986
- assert "```python" in last_response, "Last response has no python code"
987
- if self.rawdog:
988
- self.RawDog.main(last_response)
989
- else:
990
- rawdog = RawDog(
991
- quiet=self.quiet,
992
- internal_exec=self.internal_exec,
993
- confirm_script=self.confirm_script,
994
- interpreter=self.interpreter,
995
- prettify=self.prettify,
996
- )
997
- rawdog.main(last_response)
998
-
999
- @busy_bar.run()
1000
- def do_rawdog(self, line):
1001
- """Repeat executing last rawdog's python code"""
1002
- assert self.rawdog, "Session not in rawdog mode. Restart with --rawdog"
1003
- self.default(self.bot.get_message(self.bot.last_response))
1004
-
1005
- @busy_bar.run()
1006
- def default(self, line, exit_on_error: bool = False, normal_stdout: bool = False):
1007
- """Chat with LLM"""
1008
- if not bool(line):
1009
- return
1010
- if line.startswith("./"):
1011
- os.system(line[2:])
1012
-
1013
- elif self.rawdog:
1014
- self.__start_time = time.time()
1015
- busy_bar.start_spinning()
1016
- ai_response = self.bot.chat(line, stream=False)
1017
- busy_bar.stop_spinning()
1018
- is_feedback = self.RawDog.main(ai_response)
1019
- if is_feedback:
1020
- return self.default(is_feedback)
1021
- self.__end_time = time.time()
1022
-
1023
- else:
1024
- self.__start_time = time.time()
1025
- try:
1026
-
1027
- def generate_response():
1028
- # Ensure response is yielded
1029
- def for_stream():
1030
- return self.bot.chat(line, stream=True)
1031
-
1032
- def for_non_stream():
1033
- yield self.bot.chat(line, stream=False)
1034
-
1035
- return for_non_stream() if self.disable_stream else for_stream()
1036
-
1037
- busy_bar.start_spinning()
1038
- generated_response = generate_response()
1039
-
1040
- if normal_stdout or not self.prettify and not self.disable_stream:
1041
- cached_response: str = ""
1042
- if not normal_stdout:
1043
- busy_bar.stop_spinning()
1044
- for response in generated_response:
1045
- offset = len(cached_response)
1046
- print(response[offset:], end="")
1047
- cached_response = response
1048
- if not normal_stdout:
1049
- print("")
1050
- return
1051
-
1052
- if self.quiet:
1053
- busy_bar.stop_spinning()
1054
- console_ = Console()
1055
- with Live(
1056
- console=console_,
1057
- refresh_per_second=16,
1058
- vertical_overflow=self.vertical_overflow,
1059
- ) as live:
1060
- for response in generated_response:
1061
- live.update(
1062
- Markdown(response, code_theme=self.code_theme)
1063
- if self.prettify
1064
- else response
1065
- )
1066
- else:
1067
- busy_bar.stop_spinning()
1068
- this.stream_output(
1069
- generated_response,
1070
- title="AI Response",
1071
- is_markdown=self.prettify,
1072
- style=Style(
1073
- color=self.color,
1074
- ),
1075
- code_theme=self.code_theme,
1076
- vertical_overflow=self.vertical_overflow,
1077
- )
1078
- except (KeyboardInterrupt, EOFError):
1079
- busy_bar.stop_spinning()
1080
- print("")
1081
- return False # Exit cmd
1082
-
1083
- except Exception as e:
1084
- # logging.exception(e)
1085
- busy_bar.stop_spinning()
1086
- logging.error(this.getExc(e))
1087
- if exit_on_error:
1088
- sys.exit(1)
1089
- finally:
1090
- self.__end_time = time.time()
1091
-
1092
- def do_sys(self, line):
1093
- """Execute system commands
1094
- shortcut [./<command>]
1095
- Usage:
1096
- sys <System command>
1097
- or
1098
- ./<System command>
1099
- """
1100
- os.system(line)
1101
-
1102
- def do_exit(self, line):
1103
- """Quit this program"""
1104
- if click.confirm("Are you sure to exit"):
1105
- click.secho("Okay Goodbye!", fg="yellow")
1106
- return True
1107
-
1108
-
1109
- class EntryGroup:
1110
- """Entry commands"""
1111
-
1112
- # @staticmethod
1113
- @click.group()
1114
- @click.version_option(
1115
- webscout.__version__, "-v", "--version", package_name="Webscout"
1116
- )
1117
- @click.help_option("-h", "--help")
1118
- def webai_():
1119
- pass
1120
-
1121
- @staticmethod
1122
- @webai_.group()
1123
- @click.help_option("-h", "--help")
1124
- def utils():
1125
- """Utility endpoint for webscout"""
1126
- pass
1127
-
1128
- @staticmethod
1129
- @webai_.group()
1130
- @click.help_option("-h", "--help")
1131
- def gpt4free():
1132
- """Discover gpt4free models, providers etc"""
1133
- pass
1134
-
1135
- @staticmethod
1136
- @webai_.group()
1137
- @click.help_option("-h", "--help")
1138
- def awesome():
1139
- """Perform CRUD operations on awesome-prompts"""
1140
- pass
1141
-
1142
-
1143
- import webscout
1144
- class Chatwebai:
1145
- """webai command"""
1146
-
1147
- @staticmethod
1148
- @click.command(context_settings=this.context_settings)
1149
- @click.option(
1150
- "-m",
1151
- "--model",
1152
- help="Model name for text-generation", # default="llama-2-13b-chat"
1153
- )
1154
- @click.option(
1155
- "-t",
1156
- "--temperature",
1157
- help="Charge of the generated text's randomness",
1158
- type=click.FloatRange(0, 1),
1159
- default=0.2,
1160
- )
1161
- @click.option(
1162
- "-mt",
1163
- "--max-tokens",
1164
- help="Maximum number of tokens to be generated upon completion",
1165
- type=click.INT,
1166
- default=600,
1167
- )
1168
- @click.option(
1169
- "-tp",
1170
- "--top-p",
1171
- help="Sampling threshold during inference time",
1172
- type=click.FLOAT,
1173
- default=0.999,
1174
- )
1175
- @click.option(
1176
- "-tk",
1177
- "--top-k",
1178
- help="Chance of topic being repeated",
1179
- type=click.FLOAT,
1180
- default=0,
1181
- )
1182
- @click.option(
1183
- "-k",
1184
- "--key",
1185
- help="LLM API access key or auth value or path to LLM with provider.",
1186
- )
1187
- @click.option(
1188
- "-ct",
1189
- "--code-theme",
1190
- help="Theme for displaying codes in response",
1191
- type=click.Choice(this.rich_code_themes),
1192
- default="monokai",
1193
- )
1194
- @click.option(
1195
- "-bi",
1196
- "--busy-bar-index",
1197
- help="Index of busy bar icon : [0: None, 1:/, 2:■█■■■, 3:⣻]",
1198
- type=click.IntRange(0, 3),
1199
- default=3,
1200
- )
1201
- @click.option("-fc", "--font-color", help="Stdout font color")
1202
- @click.option(
1203
- "-to", "--timeout", help="Http requesting timeout", type=click.INT, default=30
1204
- )
1205
- @click.argument("prompt", required=False)
1206
- @click.option(
1207
- "--prettify/--raw",
1208
- help="Flag for prettifying markdowned response",
1209
- default=True,
1210
- )
1211
- @click.option(
1212
- "-dc",
1213
- "--disable-conversation",
1214
- is_flag=True,
1215
- default=True, # is_conversation = True
1216
- help="Disable chatting conversationally (Stable)",
1217
- )
1218
- @click.option(
1219
- "-fp",
1220
- "--filepath",
1221
- type=click.Path(),
1222
- default=os.path.join(default_path, "chat-history.txt"),
1223
- help="Path to chat history - new will be created incase doesn't exist",
1224
- )
1225
- @click.option(
1226
- "--update-file/--retain-file",
1227
- help="Controls updating chat history in file",
1228
- default=True,
1229
- )
1230
- @click.option(
1231
- "-i",
1232
- "--intro",
1233
- help="Conversation introductory prompt",
1234
- )
1235
- @click.option(
1236
- "-ho",
1237
- "--history-offset",
1238
- help="Limit conversation history to this number of last texts",
1239
- type=click.IntRange(100, 16000),
1240
- default=10250,
1241
- )
1242
- @click.option(
1243
- "-ap",
1244
- "--awesome-prompt",
1245
- default="0",
1246
- callback=lambda ctx, param, value: (
1247
- int(value) if str(value).isdigit() else value
1248
- ),
1249
- help="Awesome prompt key or index. Alt. to intro",
1250
- )
1251
- @click.option(
1252
- "-pp",
1253
- "--proxy-path",
1254
- type=click.Path(exists=True),
1255
- help="Path to .json file containing proxies",
1256
- )
1257
- @click.option(
1258
- "-p",
1259
- "--provider",
1260
- type=click.Choice(available_providers),
1261
- default=this.default_provider,
1262
- help="Name of LLM provider.",
1263
- metavar=(
1264
- f"[{'|'.join(webscout.webai)}] etc, "
1265
- "run 'webscout gpt4free list providers -w' to "
1266
- "view more providers and 'webscout gpt4free test -y' "
1267
- "for advanced g4f providers test"
1268
- ),
1269
- )
1270
- @click.option(
1271
- "-vo",
1272
- "--vertical-overflow",
1273
- help="Vertical overflow behaviour on content display",
1274
- type=click.Choice(["visible", "crop", "ellipsis"]),
1275
- default="ellipsis",
1276
- )
1277
- @click.option(
1278
- "-w",
1279
- "--whole",
1280
- is_flag=True,
1281
- default=False,
1282
- help="Disable streaming response",
1283
- )
1284
- @click.option(
1285
- "-q",
1286
- "--quiet",
1287
- is_flag=True,
1288
- help="Flag for controlling response-framing and response verbosity",
1289
- default=False,
1290
- )
1291
- @click.option(
1292
- "-n",
1293
- "--new",
1294
- help="Overwrite the filepath contents",
1295
- is_flag=True,
1296
- )
1297
- @click.option(
1298
- "-wc",
1299
- "--with-copied",
1300
- is_flag=True,
1301
- help="Postfix prompt with last copied text",
1302
- )
1303
- @click.option(
1304
- "-nc", "--no-coloring", is_flag=True, help="Disable intro prompt font-coloring"
1305
- )
1306
- @click.option(
1307
- "-cc",
1308
- "--chat-completion",
1309
- is_flag=True,
1310
- help="Provide native context for gpt4free providers",
1311
- )
1312
- @click.option(
1313
- "-iw",
1314
- "--ignore-working",
1315
- is_flag=True,
1316
- help="Ignore working status of the provider",
1317
- )
1318
- @click.option(
1319
- "-rd",
1320
- "--rawdog",
1321
- is_flag=True,
1322
- help="Generate and auto-execute Python scripts - (experimental)",
1323
- )
1324
- @click.option(
1325
- "-ix",
1326
- "--internal-exec",
1327
- is_flag=True,
1328
- help="RawDog : Execute scripts with exec function instead of out-of-script interpreter",
1329
- )
1330
- @click.option(
1331
- "-cs",
1332
- "--confirm-script",
1333
- is_flag=True,
1334
- help="RawDog : Give consent to generated scripts prior to execution",
1335
- )
1336
- @click.option(
1337
- "-int",
1338
- "--interpreter",
1339
- default="python",
1340
- help="RawDog : Python's interpreter name",
1341
- )
1342
- @click.help_option("-h", "--help")
1343
- def webai(
1344
- model,
1345
- temperature,
1346
- max_tokens,
1347
- top_p,
1348
- top_k,
1349
- key,
1350
- code_theme,
1351
- busy_bar_index,
1352
- font_color,
1353
- timeout,
1354
- prompt,
1355
- prettify,
1356
- disable_conversation,
1357
- filepath,
1358
- update_file,
1359
- intro,
1360
- history_offset,
1361
- awesome_prompt,
1362
- proxy_path,
1363
- provider,
1364
- vertical_overflow,
1365
- whole,
1366
- quiet,
1367
- new,
1368
- with_copied,
1369
- no_coloring,
1370
- chat_completion,
1371
- ignore_working,
1372
- rawdog,
1373
- internal_exec,
1374
- confirm_script,
1375
- interpreter,
1376
- ):
1377
- """Chat with AI webaily (Default)"""
1378
- this.clear_history_file(filepath, new)
1379
- bot = Main(
1380
- max_tokens,
1381
- temperature,
1382
- top_k,
1383
- top_p,
1384
- model,
1385
- key,
1386
- timeout,
1387
- disable_conversation,
1388
- filepath,
1389
- update_file,
1390
- intro,
1391
- history_offset,
1392
- awesome_prompt,
1393
- proxy_path,
1394
- provider,
1395
- quiet,
1396
- chat_completion,
1397
- ignore_working,
1398
- rawdog=rawdog,
1399
- internal_exec=internal_exec,
1400
- confirm_script=confirm_script,
1401
- interpreter=interpreter,
1402
- )
1403
- busy_bar.spin_index = busy_bar_index
1404
- bot.code_theme = code_theme
1405
- bot.color = font_color
1406
- bot.disable_coloring = no_coloring
1407
- bot.prettify = prettify
1408
- bot.vertical_overflow = vertical_overflow
1409
- bot.disable_stream = whole
1410
- if prompt:
1411
- if with_copied:
1412
- prompt = prompt + "\n" + clipman.get()
1413
- bot.default(prompt)
1414
- bot.cmdloop()
1415
-
1416
-
1417
- class ChatGenerate:
1418
- """Generate command"""
1419
-
1420
- @staticmethod
1421
- @click.command(context_settings=this.context_settings)
1422
- @click.option(
1423
- "-m",
1424
- "--model",
1425
- help="Model name for text-generation",
1426
- )
1427
- @click.option(
1428
- "-t",
1429
- "--temperature",
1430
- help="Charge of the generated text's randomness",
1431
- type=click.FloatRange(0, 1),
1432
- default=0.2,
1433
- )
1434
- @click.option(
1435
- "-mt",
1436
- "--max-tokens",
1437
- help="Maximum number of tokens to be generated upon completion",
1438
- type=click.INT,
1439
- default=600,
1440
- )
1441
- @click.option(
1442
- "-tp",
1443
- "--top-p",
1444
- help="Sampling threshold during inference time",
1445
- type=click.FLOAT,
1446
- default=0.999,
1447
- )
1448
- @click.option(
1449
- "-tk",
1450
- "--top-k",
1451
- help="Chance of topic being repeated",
1452
- type=click.FLOAT,
1453
- default=0,
1454
- )
1455
- @click.option(
1456
- "-k",
1457
- "--key",
1458
- help="LLM API access key or auth value or path to LLM with provider.",
1459
- )
1460
- @click.option(
1461
- "-ct",
1462
- "--code-theme",
1463
- help="Theme for displaying codes in response",
1464
- type=click.Choice(this.rich_code_themes),
1465
- default="monokai",
1466
- )
1467
- @click.option(
1468
- "-bi",
1469
- "--busy-bar-index",
1470
- help="Index of busy bar icon : [0: None, 1:/, 2:■█■■■, 3:⣻]",
1471
- type=click.IntRange(0, 3),
1472
- default=3,
1473
- )
1474
- @click.option(
1475
- "-fc",
1476
- "--font-color",
1477
- help="Stdout font color",
1478
- )
1479
- @click.option(
1480
- "-to", "--timeout", help="Http requesting timeout", type=click.INT, default=30
1481
- )
1482
- @click.argument("prompt", required=False)
1483
- @click.option(
1484
- "--prettify/--raw",
1485
- help="Flag for prettifying markdowned response",
1486
- default=True,
1487
- )
1488
- @click.option(
1489
- "-w",
1490
- "--whole",
1491
- is_flag=True,
1492
- default=False,
1493
- help="Disable streaming response",
1494
- )
1495
- @click.option(
1496
- "-c",
1497
- "--code",
1498
- is_flag=True,
1499
- default=False,
1500
- help="Optimize prompt for code generation",
1501
- )
1502
- @click.option(
1503
- "-s",
1504
- "--shell",
1505
- is_flag=True,
1506
- default=False,
1507
- help="Optimize prompt for shell command generation",
1508
- )
1509
- @click.option(
1510
- "-dc",
1511
- "--disable-conversation",
1512
- is_flag=True,
1513
- default=True, # is_conversation = True
1514
- help="Disable chatting conversationally (Stable)",
1515
- )
1516
- @click.option(
1517
- "-fp",
1518
- "--filepath",
1519
- type=click.Path(),
1520
- default=os.path.join(default_path, "chat-history.txt"),
1521
- help="Path to chat history - new will be created incase doesn't exist",
1522
- )
1523
- @click.option(
1524
- "--update-file/--retain-file",
1525
- help="Controls updating chat history in file",
1526
- default=True,
1527
- )
1528
- @click.option(
1529
- "-i",
1530
- "--intro",
1531
- help="Conversation introductory prompt",
1532
- )
1533
- @click.option(
1534
- "-ho",
1535
- "--history-offset",
1536
- help="Limit conversation history to this number of last texts",
1537
- type=click.IntRange(100, 16000),
1538
- default=10250,
1539
- )
1540
- @click.option(
1541
- "-ap",
1542
- "--awesome-prompt",
1543
- default="0",
1544
- callback=lambda ctx, param, value: (
1545
- int(value) if str(value).isdigit() else value
1546
- ),
1547
- help="Awesome prompt key or index. Alt. to intro",
1548
- )
1549
- @click.option(
1550
- "-pp",
1551
- "--proxy-path",
1552
- type=click.Path(exists=True),
1553
- help="Path to .json file containing proxies",
1554
- )
1555
- @click.option(
1556
- "-p",
1557
- "--provider",
1558
- type=click.Choice(webscout.available_providers),
1559
- default=this.default_provider,
1560
- help="Name of LLM provider.",
1561
- metavar=(
1562
- f"[{'|'.join(webscout.webai)}] etc, "
1563
- "run 'webscout gpt4free list providers -w' to "
1564
- "view more providers and 'webscout gpt4free test -y' "
1565
- "for advanced g4f providers test"
1566
- ),
1567
- )
1568
- @click.option(
1569
- "-vo",
1570
- "--vertical-overflow",
1571
- help="Vertical overflow behaviour on content display",
1572
- type=click.Choice(["visible", "crop", "ellipsis"]),
1573
- default="ellipsis",
1574
- )
1575
- @click.option(
1576
- "-q",
1577
- "--quiet",
1578
- is_flag=True,
1579
- help="Flag for controlling response-framing and response verbosity",
1580
- default=False,
1581
- )
1582
- @click.option(
1583
- "-n",
1584
- "--new",
1585
- help="Override the filepath contents",
1586
- is_flag=True,
1587
- )
1588
- @click.option(
1589
- "-wc",
1590
- "--with-copied",
1591
- is_flag=True,
1592
- help="Postfix prompt with last copied text",
1593
- )
1594
- @click.option(
1595
- "-iw",
1596
- "--ignore-working",
1597
- is_flag=True,
1598
- help="Ignore working status of the provider",
1599
- )
1600
- @click.option(
1601
- "-rd",
1602
- "--rawdog",
1603
- is_flag=True,
1604
- help="Generate and auto-execute Python scripts - (experimental)",
1605
- )
1606
- @click.option(
1607
- "-ix",
1608
- "--internal-exec",
1609
- is_flag=True,
1610
- help="RawDog : Execute scripts with exec function instead of out-of-script interpreter",
1611
- )
1612
- @click.option(
1613
- "-cs",
1614
- "--confirm-script",
1615
- is_flag=True,
1616
- help="RawDog : Give consent to generated scripts prior to execution",
1617
- )
1618
- @click.option(
1619
- "-int",
1620
- "--interpreter",
1621
- default="python",
1622
- help="RawDog : Python's interpreter name",
1623
- )
1624
- @click.help_option("-h", "--help")
1625
- def generate(
1626
- model,
1627
- temperature,
1628
- max_tokens,
1629
- top_p,
1630
- top_k,
1631
- key,
1632
- code_theme,
1633
- busy_bar_index,
1634
- font_color,
1635
- timeout,
1636
- prompt,
1637
- prettify,
1638
- whole,
1639
- code,
1640
- shell,
1641
- disable_conversation,
1642
- filepath,
1643
- update_file,
1644
- intro,
1645
- history_offset,
1646
- awesome_prompt,
1647
- proxy_path,
1648
- provider,
1649
- vertical_overflow,
1650
- quiet,
1651
- new,
1652
- with_copied,
1653
- ignore_working,
1654
- rawdog,
1655
- internal_exec,
1656
- confirm_script,
1657
- interpreter,
1658
- ):
1659
- """Generate a quick response with AI"""
1660
- this.clear_history_file(filepath, new)
1661
- bot = Main(
1662
- max_tokens,
1663
- temperature,
1664
- top_k,
1665
- top_p,
1666
- model,
1667
- key,
1668
- timeout,
1669
- disable_conversation,
1670
- filepath,
1671
- update_file,
1672
- intro,
1673
- history_offset,
1674
- awesome_prompt,
1675
- proxy_path,
1676
- provider,
1677
- quiet,
1678
- ignore_working=ignore_working,
1679
- rawdog=rawdog,
1680
- internal_exec=internal_exec,
1681
- confirm_script=confirm_script,
1682
- interpreter=interpreter,
1683
- )
1684
- prompt = prompt if prompt else ""
1685
- copied_placeholder = "{{copied}}"
1686
- stream_placeholder = "{{stream}}"
1687
-
1688
- if with_copied or copied_placeholder in prompt:
1689
- last_copied_text = clipman.get()
1690
- assert last_copied_text, "No copied text found, issue prompt"
1691
-
1692
- if copied_placeholder in prompt:
1693
- prompt = prompt.replace(copied_placeholder, last_copied_text)
1694
-
1695
- else:
1696
- sep = "\n" if prompt else ""
1697
- prompt = prompt + sep + last_copied_text
1698
-
1699
- if not prompt and sys.stdin.isatty(): # No prompt issued and no piped input
1700
- help_info = (
1701
- "Usage: webscout generate [OPTIONS] PROMPT\n"
1702
- "Try 'webscout generate --help' for help.\n"
1703
- "Error: Missing argument 'PROMPT'."
1704
- )
1705
- click.secho(
1706
- help_info
1707
- ) # Let's try to mimic the click's missing argument help info
1708
- sys.exit(1)
1709
-
1710
- if not sys.stdin.isatty(): # Piped input detected - True
1711
- # Let's try to read piped input
1712
- stream_text = click.get_text_stream("stdin").read()
1713
- if stream_placeholder in prompt:
1714
- prompt = prompt.replace(stream_placeholder, stream_text)
1715
- else:
1716
- prompt = prompt + "\n" + stream_text if prompt else stream_text
1717
-
1718
- assert stream_placeholder not in prompt, (
1719
- "No piped input detected ~ " + stream_placeholder
1720
- )
1721
- assert copied_placeholder not in prompt, (
1722
- "No copied text found ~ " + copied_placeholder
1723
- )
1724
-
1725
- prompt = Optimizers.code(prompt) if code else prompt
1726
- prompt = Optimizers.shell_command(prompt) if shell else prompt
1727
- busy_bar.spin_index = (
1728
- 0 if any([quiet, sys.stdout.isatty() == False]) else busy_bar_index
1729
- )
1730
- bot.code_theme = code_theme
1731
- bot.color = font_color
1732
- bot.prettify = prettify
1733
- bot.vertical_overflow = vertical_overflow
1734
- bot.disable_stream = whole
1735
- bot.default(prompt, True, normal_stdout=(sys.stdout.isatty() == False))
1736
-
1737
-
1738
- class Awesome:
1739
- """Awesome commands"""
1740
-
1741
- @staticmethod
1742
- @click.command(context_settings=this.context_settings)
1743
- @click.option(
1744
- "-r",
1745
- "--remote",
1746
- help="Remote source to update from",
1747
- default=AwesomePrompts.awesome_prompt_url,
1748
- )
1749
- @click.option(
1750
- "-o",
1751
- "--output",
1752
- help="Path to save the prompts",
1753
- default=AwesomePrompts.awesome_prompt_path,
1754
- )
1755
- @click.option(
1756
- "-n", "--new", is_flag=True, help="Override the existing contents in path"
1757
- )
1758
- @click.help_option("-h", "--help")
1759
- @this.handle_exception
1760
- def update(remote, output, new):
1761
- """Update awesome-prompts from remote source."""
1762
- AwesomePrompts.awesome_prompt_url = remote
1763
- AwesomePrompts.awesome_prompt_path = output
1764
- AwesomePrompts().update_prompts_from_online(new)
1765
- click.secho(
1766
- f"Prompts saved to - '{AwesomePrompts.awesome_prompt_path}'", fg="cyan"
1767
- )
1768
-
1769
- @staticmethod
1770
- @click.command(context_settings=this.context_settings)
1771
- @click.argument(
1772
- "key",
1773
- required=True,
1774
- type=click.STRING,
1775
- )
1776
- @click.option(
1777
- "-d", "--default", help="Return this value if not found", default=None
1778
- )
1779
- @click.option(
1780
- "-c",
1781
- "--case-sensitive",
1782
- default=True,
1783
- flag_value=False,
1784
- help="Perform case-sensitive search",
1785
- )
1786
- @click.option(
1787
- "-f",
1788
- "--file",
1789
- type=click.Path(exists=True),
1790
- help="Path to existing prompts",
1791
- default=AwesomePrompts.awesome_prompt_path,
1792
- )
1793
- @click.help_option("-h", "--help")
1794
- @this.handle_exception
1795
- def search(
1796
- key,
1797
- default,
1798
- case_sensitive,
1799
- file,
1800
- ):
1801
- """Search for a particular awesome-prompt by key or index"""
1802
- AwesomePrompts.awesome_prompt_path = file
1803
- resp = AwesomePrompts().get_act(
1804
- key,
1805
- default=default,
1806
- case_insensitive=case_sensitive,
1807
- )
1808
- if resp:
1809
- click.secho(resp)
1810
- return resp != default
1811
-
1812
- @staticmethod
1813
- @click.command(context_settings=this.context_settings)
1814
- @click.option("-n", "--name", required=True, help="Prompt name")
1815
- @click.option("-p", "--prompt", required=True, help="Prompt value")
1816
- @click.option(
1817
- "-f",
1818
- "--file",
1819
- type=click.Path(exists=True),
1820
- help="Path to existing prompts",
1821
- default=AwesomePrompts.awesome_prompt_path,
1822
- )
1823
- @click.help_option("-h", "--help")
1824
- @this.handle_exception
1825
- def add(name, prompt, file):
1826
- """Add new prompt to awesome-prompt list"""
1827
- AwesomePrompts.awesome_prompt_path = file
1828
- return AwesomePrompts().add_prompt(name, prompt)
1829
-
1830
- @staticmethod
1831
- @click.command(context_settings=this.context_settings)
1832
- @click.argument("name")
1833
- @click.option(
1834
- "--case-sensitive",
1835
- is_flag=True,
1836
- flag_value=False,
1837
- default=True,
1838
- help="Perform name case-sensitive search",
1839
- )
1840
- @click.option(
1841
- "-f",
1842
- "--file",
1843
- type=click.Path(exists=True),
1844
- help="Path to existing prompts",
1845
- default=AwesomePrompts.awesome_prompt_path,
1846
- )
1847
- @click.help_option("-h", "--help")
1848
- @this.handle_exception
1849
- def delete(name, case_sensitive, file):
1850
- """Delete a specific awesome-prompt"""
1851
- AwesomePrompts.awesome_prompt_path = file
1852
- return AwesomePrompts().delete_prompt(name, case_sensitive)
1853
-
1854
- @staticmethod
1855
- @click.command(context_settings=this.context_settings)
1856
- @click.option(
1857
- "-j",
1858
- "--json",
1859
- is_flag=True,
1860
- help="Display prompts in json format",
1861
- )
1862
- @click.option(
1863
- "-i",
1864
- "--indent",
1865
- type=click.IntRange(1, 20),
1866
- help="Json format indentation level",
1867
- default=4,
1868
- )
1869
- @click.option(
1870
- "-x",
1871
- "--index",
1872
- is_flag=True,
1873
- help="Display prompts with their corresponding indexes",
1874
- )
1875
- @click.option("-c", "--color", help="Prompts stdout font color")
1876
- @click.option("-o", "--output", type=click.Path(), help="Path to save the prompts")
1877
- @click.help_option("-h", "--help")
1878
- def whole(json, indent, index, color, output):
1879
- """Stdout all awesome prompts"""
1880
- ap = AwesomePrompts()
1881
- awesome_prompts = ap.all_acts if index else ap.get_acts()
1882
-
1883
- if json:
1884
- # click.secho(formatted_awesome_prompts, fg=color)
1885
- rich.print_json(data=awesome_prompts, indent=indent)
1886
-
1887
- else:
1888
- awesome_table = Table(show_lines=True, title="All Awesome-Prompts")
1889
- awesome_table.add_column("index", justify="center", style="yellow")
1890
- awesome_table.add_column("Act Name/Index", justify="left", style="cyan")
1891
- awesome_table.add_column(
1892
- "Prompt",
1893
- style=color,
1894
- )
1895
- for index, key_value in enumerate(awesome_prompts.items()):
1896
- awesome_table.add_row(str(index), str(key_value[0]), key_value[1])
1897
- rich.print(awesome_table)
1898
-
1899
- if output:
1900
- from json import dump
1901
-
1902
- with open(output, "w") as fh:
1903
- dump(awesome_prompts, fh, indent=4)
1904
-
1905
-
1906
- class Gpt4free:
1907
- """Commands for gpt4free"""
1908
-
1909
- @staticmethod
1910
- @click.command(context_settings=this.context_settings)
1911
- @busy_bar.run(index=1, immediate=True)
1912
- @click.help_option("-h", "--help")
1913
- def version():
1914
- """Check current installed version of gpt4free"""
1915
- version_string = this.run_system_command("pip show g4f")[1].stdout.split("\n")[
1916
- 1
1917
- ]
1918
- click.secho(version_string, fg="cyan")
1919
-
1920
- @staticmethod
1921
- @click.command(context_settings=this.context_settings)
1922
- @click.help_option("-h", "--help")
1923
- @click.option(
1924
- "-e",
1925
- "--extra",
1926
- help="Extra required dependencies category",
1927
- multiple=True,
1928
- type=click.Choice(
1929
- ["all", "image", "webdriver", "openai", "api", "gui", "none"]
1930
- ),
1931
- default=["all"],
1932
- )
1933
- @click.option("-l", "--log", is_flag=True, help="Stdout installation logs")
1934
- @click.option(
1935
- "-s",
1936
- "--sudo",
1937
- is_flag=True,
1938
- flag_value="sudo ",
1939
- help="Install with sudo privileges",
1940
- )
1941
- @busy_bar.run(index=1, immediate=True)
1942
- def update(extra, log, sudo):
1943
- """Update GPT4FREE package (Models, Providers etc)"""
1944
- if "none" in extra:
1945
- command = f"{sudo or ''}pip install --upgrade g4f"
1946
- else:
1947
- command = f"{sudo or ''}pip install --upgrade g4f[{','.join(extra)}]"
1948
- is_successful, response = this.run_system_command(command)
1949
- if log and is_successful:
1950
- click.echo(response.stdout)
1951
- version_string = this.run_system_command("pip show g4f")[1].stdout.split("\n")[
1952
- 1
1953
- ]
1954
- click.secho(f"GPT4FREE updated successfully - {version_string}", fg="cyan")
1955
-
1956
- @staticmethod
1957
- @click.command("list", context_settings=this.context_settings)
1958
- @click.argument("target")
1959
- @click.option("-w", "--working", is_flag=True, help="Restrict to working providers")
1960
- @click.option("-u", "--url", is_flag=True, help="Restrict to providers with url")
1961
- @click.option(
1962
- "-s", "--stream", is_flag=True, help="Restrict to providers supporting stream"
1963
- )
1964
- @click.option(
1965
- "-c",
1966
- "--context",
1967
- is_flag=True,
1968
- help="Restrict to providers supporing context natively",
1969
- )
1970
- @click.option(
1971
- "-35",
1972
- "--gpt35",
1973
- is_flag=True,
1974
- help="Restrict to providers supporting gpt3.5_turbo model",
1975
- )
1976
- @click.option(
1977
- "-4", "--gpt4", is_flag=True, help="Restrict to providers supporting gpt4 model"
1978
- )
1979
- @click.option(
1980
- "-se",
1981
- "--selenium",
1982
- is_flag=True,
1983
- help="Restrict to selenium dependent providers",
1984
- )
1985
- @click.option("-j", "--json", is_flag=True, help="Format output in json")
1986
- @click.help_option("-h", "--help")
1987
- def show(target, working, url, stream, context, gpt35, gpt4, selenium, json):
1988
- """List available models and providers"""
1989
- available_targets = ["models", "providers"]
1990
- assert (
1991
- target in available_targets
1992
- ), f"Target must be one of [{', '.join(available_targets)}]"
1993
- if target == "providers":
1994
- hunted_providers = list(
1995
- set(
1996
- map(
1997
- lambda provider: (
1998
- provider["name"] if all(list(provider.values())) else None
1999
- ),
2000
- this.g4f_providers_in_dict(
2001
- url=url,
2002
- working=working,
2003
- stream=stream,
2004
- context=context,
2005
- gpt35=gpt35,
2006
- gpt4=gpt4,
2007
- selenium=selenium,
2008
- ),
2009
- )
2010
- )
2011
- )
2012
- while None in hunted_providers:
2013
- hunted_providers.remove(None)
2014
-
2015
- hunted_providers.sort()
2016
- if json:
2017
- rich.print_json(data=dict(providers=hunted_providers), indent=4)
2018
-
2019
- else:
2020
- table = Table(show_lines=True)
2021
- table.add_column("No.", style="yellow", justify="center")
2022
- table.add_column("Provider", style="cyan")
2023
- for no, provider in enumerate(hunted_providers):
2024
- table.add_row(str(no), provider)
2025
- rich.print(table)
2026
- else:
2027
- models = dict(
2028
- Bard=[
2029
- "palm",
2030
- ],
2031
- HuggingFace=[
2032
- "h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
2033
- "h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
2034
- "h2ogpt-gm-oasst1-en-2048-open-llama-13b",
2035
- "gpt-neox-20b",
2036
- "oasst-sft-1-pythia-12b",
2037
- "oasst-sft-4-pythia-12b-epoch-3.5",
2038
- "santacoder",
2039
- "bloom",
2040
- "flan-t5-xxl",
2041
- ],
2042
- Anthropic=[
2043
- "claude-instant-v1",
2044
- "claude-v1",
2045
- "claude-v2",
2046
- ],
2047
- Cohere=[
2048
- "command-light-nightly",
2049
- "command-nightly",
2050
- ],
2051
- OpenAI=[
2052
- "code-davinci-002",
2053
- "text-ada-001",
2054
- "text-babbage-001",
2055
- "text-curie-001",
2056
- "text-davinci-002",
2057
- "text-davinci-003",
2058
- "gpt-3.5-turbo-16k",
2059
- "gpt-3.5-turbo-16k-0613",
2060
- "gpt-4-0613",
2061
- ],
2062
- Replicate=[
2063
- "llama13b-v2-chat",
2064
- "llama7b-v2-chat",
2065
- ],
2066
- )
2067
- for provider in webscout.g4f.Provider.__providers__:
2068
- if hasattr(provider, "models"):
2069
- models[provider.__name__] = provider.models
2070
- if json:
2071
- for key, value in models.items():
2072
- while None in value:
2073
- value.remove(None)
2074
- value.sort()
2075
- models[key] = value
2076
-
2077
- rich.print_json(data=models, indent=4)
2078
- else:
2079
- table = Table(show_lines=True)
2080
- table.add_column("No.", justify="center", style="white")
2081
- table.add_column("Base Provider", style="cyan")
2082
- table.add_column("Model(s)", style="yellow")
2083
- for count, provider_models in enumerate(models.items()):
2084
- models = provider_models[1]
2085
- models.sort()
2086
- table.add_row(str(count), provider_models[0], "\n".join(models))
2087
- rich.print(table)
2088
-
2089
- @staticmethod
2090
- @click.command(context_settings=this.context_settings)
2091
- @click.argument("port", type=click.INT, required=False)
2092
- @click.option(
2093
- "-a", "--address", help="Host on this particular address", default="127.0.0.1"
2094
- )
2095
- @click.option("-d", "--debug", is_flag=True, help="Start server in debug mode")
2096
- @click.option(
2097
- "-o", "--open", is_flag=True, help="Proceed to the interface immediately"
2098
- )
2099
- @click.help_option("-h", "--help")
2100
- def gui(port, address, debug, open):
2101
- """Launch gpt4free web interface"""
2102
- from g4f.gui import run_gui
2103
-
2104
- port = port or 8000
2105
- t1 = thr(
2106
- target=run_gui,
2107
- args=(
2108
- address,
2109
- port,
2110
- debug,
2111
- ),
2112
- )
2113
- # run_gui(host=address, port=port, debug=debug)
2114
- t1.start()
2115
- if open:
2116
- click.launch(f"http://{address}:{port}")
2117
- t1.join()
2118
-
2119
- @staticmethod
2120
- @click.command(context_settings=this.context_settings)
2121
- @click.option(
2122
- "-t",
2123
- "--timeout",
2124
- type=click.INT,
2125
- help="Provider's response generation timeout",
2126
- default=20,
2127
- )
2128
- @click.option(
2129
- "-r",
2130
- "--thread",
2131
- type=click.INT,
2132
- help="Test n amount of providers at once",
2133
- default=5,
2134
- )
2135
- @click.option("-q", "--quiet", is_flag=True, help="Suppress progress bar")
2136
- @click.option(
2137
- "-j", "--json", is_flag=True, help="Stdout test results in json format"
2138
- )
2139
- @click.option("-d", "--dry-test", is_flag=True, help="Return previous test results")
2140
- @click.option(
2141
- "-b", "--best", is_flag=True, help="Stdout the fastest provider <name only>"
2142
- )
2143
- @click.option(
2144
- "-se",
2145
- "--selenium",
2146
- help="Test even selenium dependent providers",
2147
- is_flag=True,
2148
- )
2149
- @click.option(
2150
- "-dl",
2151
- "--disable-logging",
2152
- is_flag=True,
2153
- help="Disable logging",
2154
- )
2155
- @click.option("-y", "--yes", is_flag=True, help="Okay to all confirmations")
2156
- @click.help_option("-h", "--help")
2157
- def test(
2158
- timeout, thread, quiet, json, dry_test, best, selenium, disable_logging, yes
2159
- ):
2160
- """Test and save working providers"""
2161
- from webscout.g4f import TestProviders
2162
-
2163
- test = TestProviders(
2164
- test_at_once=thread,
2165
- quiet=quiet,
2166
- timeout=timeout,
2167
- selenium=selenium,
2168
- do_log=disable_logging == False,
2169
- )
2170
- if best:
2171
- click.secho(test.best)
2172
- return
2173
- elif dry_test:
2174
- results = test.get_results(
2175
- run=False,
2176
- )
2177
- else:
2178
- if (
2179
- yes
2180
- or os.path.isfile(webscout.AIutel.results_path)
2181
- and click.confirm("Are you sure to run new test")
2182
- ):
2183
- results = test.get_results(run=True)
2184
- else:
2185
- results = test.get_results(
2186
- run=False,
2187
- )
2188
- if json:
2189
- rich.print_json(data=dict(results=results))
2190
- else:
2191
- table = Table(
2192
- title="G4f Providers Test Results",
2193
- show_lines=True,
2194
- )
2195
- table.add_column("No.", style="white", justify="center")
2196
- table.add_column("Provider", style="yellow", justify="left")
2197
- table.add_column("Response Time(s)", style="cyan")
2198
-
2199
- for no, provider in enumerate(results, start=1):
2200
- table.add_row(
2201
- str(no), provider["name"], str(round(provider["time"], 2))
2202
- )
2203
- rich.print(table)
2204
-
2205
-
2206
-
2207
- @staticmethod
2208
- @click.command(context_settings=this.context_settings)
2209
- @click.argument("prompt")
2210
- @click.option(
2211
- "-d",
2212
- "--directory",
2213
- type=click.Path(exists=True),
2214
- help="Folder for saving the images",
2215
- default=os.getcwd(),
2216
- )
2217
- @click.option(
2218
- "-a",
2219
- "--amount",
2220
- type=click.IntRange(1, 100),
2221
- help="Total images to be generated",
2222
- default=1,
2223
- )
2224
- @click.option("-n", "--name", help="Name for the generated images")
2225
- @click.option(
2226
- "-t",
2227
- "--timeout",
2228
- type=click.IntRange(5, 300),
2229
- help="Http request timeout in seconds",
2230
- )
2231
- @click.option("-p", "--proxy", help="Http request proxy")
2232
- @click.option(
2233
- "-nd",
2234
- "--no-additives",
2235
- is_flag=True,
2236
- help="Disable prompt altering for effective image generation",
2237
- )
2238
- @click.option("-q", "--quiet", is_flag=True, help="Suppress progress bar")
2239
- @click.help_option("-h", "--help")
2240
- def generate_image(
2241
- prompt, directory, amount, name, timeout, proxy, no_additives, quiet
2242
- ):
2243
- """Generate images with pollinations.ai"""
2244
- with Progress() as progress:
2245
- task = progress.add_task(
2246
- f"[cyan]Generating ...[{amount}]",
2247
- total=amount,
2248
- visible=quiet == False,
2249
- )
2250
-
2251
-
2252
-
2253
- class Utils:
2254
- """Utilities command"""
2255
-
2256
- @staticmethod
2257
- @click.command(context_settings=this.context_settings)
2258
- @click.argument("source", required=False)
2259
- @click.option(
2260
- "-d", "--dev", is_flag=True, help="Update from version control (development)"
2261
- )
2262
- @click.option(
2263
- "-s",
2264
- "--sudo",
2265
- is_flag=True,
2266
- flag_value="sudo ",
2267
- help="Install with sudo privileges",
2268
- )
2269
- @click.help_option("-h", "--help")
2270
- @busy_bar.run(index=1, immediate=True)
2271
- def update(source, dev, sudo):
2272
- """Install latest version of webscout"""
2273
- if dev:
2274
- source = "git+" + webscout.__repo__ + ".git"
2275
- source = "webscout" if source is None else source
2276
- assert (
2277
- "tgpt" in source or source == "."
2278
- ), f"Cannot update webscout from the source '{source}'"
2279
- click.secho(
2280
- f"[*] Updating from '{'pip' if source=='webscout' else source}'",
2281
- fg="yellow",
2282
- )
2283
- this.run_system_command(f"{sudo or ''}pip install --upgrade {source}")
2284
- response = this.run_system_command("pip show webscout")[1]
2285
- click.secho(response.stdout)
2286
- click.secho("Congratulations! webscout updated successfully.", fg="cyan")
2287
-
2288
- @staticmethod
2289
- @click.command(context_settings=this.context_settings)
2290
- @click.option("-w", "--whole", is_flag=True, help="Stdout whole json info")
2291
- @click.option(
2292
- "-v", "--version", is_flag=True, help="Stdout latest version name only"
2293
- )
2294
- @click.option("-b", "--body", is_flag=True, help="Stdout changelog info only")
2295
- @click.option(
2296
- "-e", "--executable", is_flag=True, help="Stdout url to binary for your system"
2297
- )
2298
- @click.help_option("-h", "--help")
2299
- def latest(whole, version, body, executable):
2300
- """Check webscout latest version info"""
2301
- from webscout.utils import Updates
2302
-
2303
- update = Updates()
2304
- if whole:
2305
- rich.print_json(data=update.latest(whole=True))
2306
-
2307
- elif version:
2308
- rich.print(update.latest_version)
2309
- elif body:
2310
- rich.print(Markdown(update.latest()["body"]))
2311
- elif executable:
2312
- rich.print(update.executable())
2313
- else:
2314
- rich.print_json(data=update.latest())
2315
-
2316
-
2317
- def make_commands():
2318
- """Make webscout chained commands"""
2319
-
2320
- # generate
2321
- EntryGroup.webai_.add_command(ChatGenerate.generate)
2322
-
2323
- # webai
2324
- EntryGroup.webai_.add_command(Chatwebai.webai)
2325
-
2326
- # utils
2327
- EntryGroup.utils.add_command(Utils.update)
2328
- EntryGroup.utils.add_command(Utils.latest)
2329
-
2330
- # gpt4free
2331
- EntryGroup.gpt4free.add_command(Gpt4free.version)
2332
- EntryGroup.gpt4free.add_command(Gpt4free.update)
2333
- EntryGroup.gpt4free.add_command(Gpt4free.show)
2334
- EntryGroup.gpt4free.add_command(Gpt4free.gui)
2335
- EntryGroup.gpt4free.add_command(Gpt4free.test)
2336
-
2337
- # Awesome
2338
- EntryGroup.awesome.add_command(Awesome.add)
2339
- EntryGroup.awesome.add_command(Awesome.delete)
2340
- EntryGroup.awesome.add_command(Awesome.search)
2341
- EntryGroup.awesome.add_command(Awesome.update)
2342
- EntryGroup.awesome.add_command(Awesome.whole)
2343
-
2344
-
2345
- # @this.handle_exception
2346
- def main(*args):
2347
- """Fireup console programmically"""
2348
- sys.argv += list(args)
2349
- args = sys.argv
2350
- if len(args) == 1:
2351
- sys.argv.insert(1, "webai") # Just a hack to make default command
2352
- try:
2353
- make_commands()
2354
- return EntryGroup.webai_()
2355
- except Exception as e:
2356
- logging.error(this.getExc(e))
2357
- sys.exit(1)
2358
-
2359
-
2360
- if __name__ == "__main__":
1
+ import webscout
2
+ import click
3
+ import cmd
4
+ import logging
5
+ import os
6
+ import sys
7
+ import clipman
8
+ import re
9
+ import rich
10
+ import getpass
11
+ import json
12
+ import re
13
+ import sys
14
+ import datetime
15
+ import time
16
+ import subprocess
17
+ from threading import Thread as thr
18
+ from functools import wraps
19
+ from rich.panel import Panel
20
+ from rich.style import Style
21
+ from rich.markdown import Markdown
22
+ from rich.console import Console
23
+ from rich.live import Live
24
+ from rich.table import Table
25
+ from rich.prompt import Prompt
26
+ from rich.progress import Progress
27
+ from typing import Iterator
28
+ from webscout.AIutel import Optimizers
29
+ from webscout.AIutel import default_path
30
+ from webscout.AIutel import AwesomePrompts
31
+ from webscout.AIutel import RawDog
32
+ from webscout.AIutel import Audio
33
+ from webscout import available_providers
34
+ from colorama import Fore
35
+ from colorama import init as init_colorama
36
+ from dotenv import load_dotenv
37
+ import g4f
38
+
39
+ import webscout.AIutel
40
+
41
+ init_colorama(autoreset=True)
42
+
43
+ load_dotenv() # loads .env variables
44
+
45
+ logging.basicConfig(
46
+ format="%(asctime)s - %(levelname)s : %(message)s ",
47
+ datefmt="%H:%M:%S",
48
+ level=logging.INFO,
49
+ )
50
+
51
+ try:
52
+ clipman.init()
53
+ except Exception as e:
54
+ logging.debug(f"Dropping clipman in favor of pyperclip - {(e)}")
55
+ import pyperclip
56
+
57
+ clipman.set = pyperclip.copy
58
+ clipman.get = pyperclip.paste
59
+
60
+
61
+ class this:
62
+ """Console's common variables"""
63
+
64
+ rich_code_themes = ["monokai", "paraiso-dark", "igor", "vs", "fruity", "xcode"]
65
+
66
+ default_provider = "phind"
67
+
68
+ getExc = lambda e: e.args[1] if len(e.args) > 1 else str(e)
69
+
70
+ context_settings = dict(auto_envvar_prefix="Webscout")
71
+
72
+ """Console utils"""
73
+
74
+ @staticmethod
75
+ def run_system_command(
76
+ command: str, exit_on_error: bool = True, stdout_error: bool = True
77
+ ):
78
+ """Run commands against system
79
+ Args:
80
+ command (str): shell command
81
+ exit_on_error (bool, optional): Exit on error. Defaults to True.
82
+ stdout_error (bool, optional): Print out the error. Defaults to True.
83
+
84
+ Returns:
85
+ tuple : (is_successfull, object[Exception|Subprocess.run])
86
+ """
87
+ try:
88
+ # Run the command and capture the output
89
+ result = subprocess.run(
90
+ command,
91
+ shell=True,
92
+ check=True,
93
+ text=True,
94
+ stdout=subprocess.PIPE,
95
+ stderr=subprocess.PIPE,
96
+ )
97
+ return (True, result)
98
+ except subprocess.CalledProcessError as e:
99
+ # Handle error if the command returns a non-zero exit code
100
+ if stdout_error:
101
+ click.secho(f"Error Occurred: while running '{command}'", fg="yellow")
102
+ click.secho(e.stderr, fg="red")
103
+ sys.exit(e.returncode) if exit_on_error else None
104
+ return (False, e)
105
+
106
+ def g4f_providers_in_dict(
107
+ url=True,
108
+ working=True,
109
+ stream=False,
110
+ context=False,
111
+ gpt35=False,
112
+ gpt4=False,
113
+ selenium=False,
114
+ ):
115
+ from webscout.g4f import GPT4FREE
116
+ import g4f.Provider.selenium as selenium_based
117
+
118
+ selenium_based_providers: list = dir(selenium_based)
119
+ hunted_providers = []
120
+ required_attrs = (
121
+ "url",
122
+ "working",
123
+ "supports_gpt_35_turbo",
124
+ "supports_gpt_4",
125
+ "supports_stream",
126
+ "supports_message_history",
127
+ )
128
+
129
+ def sanitize_provider(provider: object):
130
+ for attr in required_attrs:
131
+ if not hasattr(provider, attr):
132
+ setattr(provider, attr, False)
133
+
134
+ return provider
135
+
136
+ for provider_name, provider_class in g4f.Provider.__map__.items():
137
+ provider = sanitize_provider(provider_class)
138
+ provider_meta = dict(name=provider_name)
139
+ if url:
140
+ provider_meta["url"] = provider.url
141
+ if working:
142
+ provider_meta["working"] = provider.working
143
+ if stream:
144
+ provider_meta["stream"] = provider.supports_stream
145
+ if context:
146
+ provider_meta["context"] = provider.supports_message_history
147
+ if gpt35:
148
+ provider_meta["gpt35_turbo"] = provider.supports_gpt_35_turbo
149
+ if gpt4:
150
+ provider_meta["gpt4"] = provider.supports_gpt_4
151
+ if selenium:
152
+ try:
153
+ selenium_based_providers.index(provider_meta["name"])
154
+ value = True
155
+ except ValueError:
156
+ value = False
157
+ provider_meta["non_selenium"] = value
158
+
159
+ hunted_providers.append(provider_meta)
160
+
161
+ return hunted_providers
162
+
163
+ @staticmethod
164
+ def stream_output(
165
+ iterable: Iterator,
166
+ title: str = "",
167
+ is_markdown: bool = True,
168
+ style: object = Style(),
169
+ transient: bool = False,
170
+ title_generator: object = None,
171
+ title_generator_params: dict = {},
172
+ code_theme: str = "monokai",
173
+ vertical_overflow: str = "ellipsis",
174
+ ) -> None:
175
+ """Stdout streaming response
176
+
177
+ Args:
178
+ iterable (Iterator): Iterator containing contents to be stdout
179
+ title (str, optional): Content title. Defaults to ''.
180
+ is_markdown (bool, optional): Flag for markdown content. Defaults to True.
181
+ style (object, optional): `rich.style` instance. Defaults to Style().
182
+ transient (bool, optional): Flag for transient. Defaults to False.
183
+ title_generator (object, optional): Function for generating title. Defaults to None.
184
+ title_generator_params (dict, optional): Kwargs for `title_generator` function. Defaults to {}.
185
+ code_theme (str, optional): Theme for styling codes. Defaults to `monokai`
186
+ vertical_overflow (str, optional): Vertical overflow behaviour on content display. Defaultss to ellipsis.
187
+ """
188
+ render_this = ""
189
+ with Live(
190
+ render_this,
191
+ transient=transient,
192
+ refresh_per_second=8,
193
+ vertical_overflow=vertical_overflow,
194
+ ) as live:
195
+ for entry in iterable:
196
+ render_this += entry
197
+ live.update(
198
+ Panel(
199
+ (
200
+ Markdown(entry, code_theme=code_theme)
201
+ if is_markdown
202
+ else entry
203
+ ),
204
+ title=title,
205
+ style=style,
206
+ )
207
+ )
208
+ if title_generator:
209
+ title = title_generator(**title_generator_params)
210
+ live.update(
211
+ Panel(
212
+ Markdown(entry, code_theme=code_theme) if is_markdown else entry,
213
+ title=title,
214
+ style=style,
215
+ )
216
+ )
217
+
218
+ @staticmethod
219
+ def clear_history_file(file_path, is_true):
220
+ """When --new flag is True"""
221
+ if is_true and os.path.isfile(file_path):
222
+ try:
223
+ os.remove(file_path)
224
+ except Exception as e:
225
+ logging.error(
226
+ f"Failed to clear previous chat history - {this.getExc(e)}"
227
+ )
228
+
229
+ @staticmethod
230
+ def handle_exception(func):
231
+ """Safely handles cli-based exceptions and exit status-codes"""
232
+
233
+ @wraps(func)
234
+ def decorator(*args, **kwargs):
235
+ try:
236
+ exit_status = func(*args, **kwargs)
237
+ except Exception as e:
238
+ exit_status = False
239
+ logging.error(this.getExc(e))
240
+ finally:
241
+ sys.exit(0 if exit_status not in (False, "") else 1)
242
+
243
+ return decorator
244
+
245
+
246
+ class busy_bar:
247
+ querying = None
248
+ __spinner = (
249
+ (),
250
+ ("-", "\\", "|", "/"),
251
+ (
252
+ "█■■■■",
253
+ "■█■■■",
254
+ "■■█■■",
255
+ "■■■█■",
256
+ "■■■■█",
257
+ ),
258
+ ("⣾ ", "⣽ ", "⣻ ", "⢿ ", "⡿ ", "⣟ ", "⣯ ", "⣷ "),
259
+ )
260
+ spin_index = 0
261
+ sleep_time = 0.1
262
+
263
+ @classmethod
264
+ def __action(
265
+ cls,
266
+ ):
267
+ while cls.querying:
268
+ for spin in cls.__spinner[cls.spin_index]:
269
+ print(" " + spin, end="\r", flush=True)
270
+ if not cls.querying:
271
+ break
272
+ time.sleep(cls.sleep_time)
273
+
274
+ @classmethod
275
+ def start_spinning(
276
+ cls,
277
+ ):
278
+ try:
279
+ cls.querying = True
280
+ t1 = thr(
281
+ target=cls.__action,
282
+ args=(),
283
+ )
284
+ t1.start()
285
+ except Exception as e:
286
+ cls.querying = False
287
+ logging.debug(this.getExc(e))
288
+ t1.join()
289
+
290
+ @classmethod
291
+ def stop_spinning(cls):
292
+ """Stop displaying busy-bar"""
293
+ if cls.querying:
294
+ cls.querying = False
295
+ time.sleep(cls.sleep_time)
296
+
297
+ @classmethod
298
+ def run(cls, help: str = "Exception", index: int = None, immediate: bool = False):
299
+ """Handle function exceptions safely why showing busy bar
300
+
301
+ Args:
302
+ help (str, optional): Message to be shown incase of an exception. Defaults to ''.
303
+ index (int, optional): Busy bars spin index. Defaults to `default`.
304
+ immediate (bool, optional): Start the spinning immediately. Defaults to False.
305
+ """
306
+ if isinstance(index, int):
307
+ cls.spin_index = index
308
+
309
+ def decorator(func):
310
+ @wraps(func) # Preserves function metadata
311
+ def main(*args, **kwargs):
312
+ try:
313
+ if immediate:
314
+ cls.start_spinning()
315
+ return func(*args, **kwargs)
316
+ except KeyboardInterrupt:
317
+ cls.stop_spinning()
318
+ return
319
+ except EOFError:
320
+ cls.querying = False
321
+ sys.exit(logging.info("Stopping program"))
322
+ except Exception as e:
323
+ logging.error(f"{help} - {this.getExc(e)}")
324
+ finally:
325
+ cls.stop_spinning()
326
+
327
+ return main
328
+
329
+ return decorator
330
+
331
+
332
+ class Main(cmd.Cmd):
333
+ intro = (
334
+ "Welcome to webai Chat in terminal. "
335
+ "Type 'help' or 'h' for usage info.\n"
336
+ )
337
+
338
+ def __init__(
339
+ self,
340
+ max_tokens,
341
+ temperature,
342
+ top_k,
343
+ top_p,
344
+ model,
345
+ auth,
346
+ timeout,
347
+ disable_conversation,
348
+ filepath,
349
+ update_file,
350
+ intro,
351
+ history_offset,
352
+ awesome_prompt,
353
+ proxy_path,
354
+ provider,
355
+ quiet=False,
356
+ chat_completion=False,
357
+ ignore_working=False,
358
+ rawdog=False,
359
+ internal_exec=False,
360
+ confirm_script=False,
361
+ interpreter="python",
362
+ *args,
363
+ **kwargs,
364
+ ):
365
+ super().__init__(*args, **kwargs)
366
+ if proxy_path:
367
+ with open(proxy_path) as fh:
368
+ proxies = json.load(fh)
369
+ else:
370
+ proxies = {}
371
+
372
+ try:
373
+ getOr = lambda option, default: option if option else default
374
+
375
+ if rawdog:
376
+
377
+ self.RawDog = RawDog(
378
+ quiet=quiet,
379
+ internal_exec=internal_exec,
380
+ confirm_script=confirm_script,
381
+ interpreter=interpreter,
382
+ prettify=True,
383
+ )
384
+ intro = self.RawDog.intro_prompt
385
+ getpass.getuser = lambda: "RawDog"
386
+
387
+ if provider == "g4fauto":
388
+ from webscout.g4f import TestProviders
389
+
390
+ test = TestProviders(quiet=quiet, timeout=timeout)
391
+ g4fauto = test.best if ignore_working else test.auto
392
+ if isinstance(g4fauto, str):
393
+ provider = "g4fauto+" + g4fauto
394
+ from webscout.g4f import GPT4FREE
395
+
396
+ self.bot = GPT4FREE(
397
+ provider=g4fauto,
398
+ auth=auth,
399
+ max_tokens=max_tokens,
400
+ model=model,
401
+ chat_completion=chat_completion,
402
+ ignore_working=ignore_working,
403
+ timeout=timeout,
404
+ intro=intro,
405
+ filepath=filepath,
406
+ update_file=update_file,
407
+ proxies=proxies,
408
+ history_offset=history_offset,
409
+ act=awesome_prompt,
410
+ )
411
+ else:
412
+ raise Exception(
413
+ "No working g4f provider found. "
414
+ "Consider running 'webscout gpt4free test -y' first"
415
+ )
416
+
417
+ elif provider == "leo":
418
+ from webscout.AI import LEO
419
+
420
+ self.bot = LEO(
421
+ is_conversation=disable_conversation,
422
+ max_tokens=max_tokens,
423
+ temperature=temperature,
424
+ top_k=top_k,
425
+ top_p=top_p,
426
+ model=getOr(model, "llama-2-13b-chat"),
427
+ brave_key=getOr(auth, "qztbjzBqJueQZLFkwTTJrieu8Vw3789u"),
428
+ timeout=timeout,
429
+ intro=intro,
430
+ filepath=filepath,
431
+ update_file=update_file,
432
+ proxies=proxies,
433
+ history_offset=history_offset,
434
+ act=awesome_prompt,
435
+ )
436
+
437
+ elif provider == "openai":
438
+ assert auth, (
439
+ "OpenAI's API-key is required. " "Use the flag `--key` or `-k`"
440
+ )
441
+ from webscout.AI import OPENAI
442
+
443
+ self.bot = OPENAI(
444
+ api_key=auth,
445
+ is_conversation=disable_conversation,
446
+ max_tokens=max_tokens,
447
+ temperature=temperature,
448
+ presence_penalty=top_p,
449
+ frequency_penalty=top_k,
450
+ top_p=top_p,
451
+ model=getOr(model, model),
452
+ timeout=timeout,
453
+ intro=intro,
454
+ filepath=filepath,
455
+ update_file=update_file,
456
+ proxies=proxies,
457
+ history_offset=history_offset,
458
+ act=awesome_prompt,
459
+ )
460
+
461
+ elif provider == "opengpt":
462
+ from webscout.AI import OPENGPT
463
+
464
+ self.bot = OPENGPT(
465
+ is_conversation=disable_conversation,
466
+ max_tokens=max_tokens,
467
+ timeout=timeout,
468
+ intro=intro,
469
+ filepath=filepath,
470
+ update_file=update_file,
471
+ proxies=proxies,
472
+ history_offset=history_offset,
473
+ act=awesome_prompt,
474
+ )
475
+ elif provider == "groq":
476
+ assert auth, (
477
+ "GROQ's API-key is required. " "Use the flag `--key` or `-k`"
478
+ )
479
+ from webscout.AI import GROQ
480
+
481
+
482
+ self.bot = GROQ(
483
+ api_key=auth,
484
+ is_conversation=disable_conversation,
485
+ max_tokens=max_tokens,
486
+ temperature=temperature,
487
+ presence_penalty=top_p,
488
+ frequency_penalty=top_k,
489
+ top_p=top_p,
490
+ model=getOr(model, "mixtral-8x7b-32768"),
491
+ timeout=timeout,
492
+ intro=intro,
493
+ filepath=filepath,
494
+ update_file=update_file,
495
+ proxies=proxies,
496
+ history_offset=history_offset,
497
+ act=awesome_prompt,
498
+ )
499
+ elif provider == "sean":
500
+ from webscout.AI import Sean
501
+
502
+ self.bot = Sean(
503
+ is_conversation=disable_conversation,
504
+ max_tokens=max_tokens,
505
+ timeout=timeout,
506
+ intro=intro,
507
+ filepath=filepath,
508
+ update_file=update_file,
509
+ proxies=proxies,
510
+ history_offset=history_offset,
511
+ act=awesome_prompt,
512
+ )
513
+ elif provider == "cohere":
514
+ assert auth, (
515
+ "Cohere's API-key is required. Use the flag `--key` or `-k`"
516
+ )
517
+ from webscout.AI import Cohere
518
+ self.bot = Cohere(
519
+ api_key=auth,
520
+ is_conversation=disable_conversation,
521
+ max_tokens=max_tokens,
522
+ temperature=temperature,
523
+ top_k=top_k,
524
+ top_p=top_p,
525
+ model=getOr(model, "command-r-plus"),
526
+ timeout=timeout,
527
+ intro=intro,
528
+ filepath=filepath,
529
+ update_file=update_file,
530
+ proxies=proxies,
531
+ history_offset=history_offset,
532
+ act=awesome_prompt,
533
+ )
534
+ elif provider == "reka":
535
+ from webscout.AI import REKA
536
+
537
+ self.bot = REKA(
538
+ api_key=auth,
539
+ is_conversation=disable_conversation,
540
+ max_tokens=max_tokens,
541
+ timeout=timeout,
542
+ intro=intro,
543
+ filepath=filepath,
544
+ update_file=update_file,
545
+ proxies=proxies,
546
+ history_offset=history_offset,
547
+ act=awesome_prompt,
548
+ model=getOr(model, "reka-core"),
549
+ # quiet=quiet,
550
+ )
551
+
552
+ elif provider == "koboldai":
553
+ from webscout.AI import KOBOLDAI
554
+
555
+ self.bot = KOBOLDAI(
556
+ is_conversation=disable_conversation,
557
+ max_tokens=max_tokens,
558
+ temperature=temperature,
559
+ top_p=top_p,
560
+ timeout=timeout,
561
+ intro=intro,
562
+ filepath=filepath,
563
+ update_file=update_file,
564
+ proxies=proxies,
565
+ history_offset=history_offset,
566
+ act=awesome_prompt,
567
+ )
568
+
569
+ elif provider == "gemini":
570
+ from webscout.AI import GEMINI
571
+
572
+ assert auth, (
573
+ "Path to gemini.google.com.cookies.json file is required. "
574
+ "Use the flag `--key` or `-k`"
575
+ )
576
+ self.bot = GEMINI(
577
+ cookie_file=auth,
578
+ proxy=proxies,
579
+ timeout=timeout,
580
+ )
581
+
582
+ elif provider == "phind":
583
+ from webscout.AI import PhindSearch
584
+
585
+ self.bot = PhindSearch(
586
+ is_conversation=disable_conversation,
587
+ max_tokens=max_tokens,
588
+ timeout=timeout,
589
+ intro=intro,
590
+ filepath=filepath,
591
+ update_file=update_file,
592
+ proxies=proxies,
593
+ history_offset=history_offset,
594
+ act=awesome_prompt,
595
+ model=getOr(model, "Phind Model"),
596
+ quiet=quiet,
597
+ )
598
+
599
+ elif provider == "blackboxai":
600
+
601
+ from webscout.AI import BLACKBOXAI
602
+
603
+ self.bot = BLACKBOXAI(
604
+ is_conversation=disable_conversation,
605
+ max_tokens=max_tokens,
606
+ timeout=timeout,
607
+ intro=intro,
608
+ filepath=filepath,
609
+ update_file=update_file,
610
+ proxies=proxies,
611
+ history_offset=history_offset,
612
+ act=awesome_prompt,
613
+ )
614
+
615
+
616
+ elif provider in webscout.gpt4free_providers:
617
+ from webscout.g4f import GPT4FREE
618
+
619
+ self.bot = GPT4FREE(
620
+ provider=provider,
621
+ is_conversation=disable_conversation,
622
+ auth=auth,
623
+ max_tokens=max_tokens,
624
+ model=model,
625
+ chat_completion=chat_completion,
626
+ ignore_working=ignore_working,
627
+ timeout=timeout,
628
+ intro=intro,
629
+ filepath=filepath,
630
+ update_file=update_file,
631
+ proxies=proxies,
632
+ history_offset=history_offset,
633
+ act=awesome_prompt,
634
+ )
635
+
636
+
637
+ elif provider == "perplexity":
638
+ from webscout.AI import PERPLEXITY
639
+
640
+ self.bot = PERPLEXITY(
641
+ is_conversation=disable_conversation,
642
+ max_tokens=max_tokens,
643
+ timeout=timeout,
644
+ intro=intro,
645
+ filepath=filepath,
646
+ update_file=update_file,
647
+ proxies=proxies,
648
+ history_offset=history_offset,
649
+ act=awesome_prompt,
650
+ quiet=quiet,
651
+ )
652
+
653
+ else:
654
+ raise NotImplementedError(
655
+ f"The provider `{provider}` is not yet implemented."
656
+ )
657
+
658
+ except Exception as e:
659
+ logging.error(this.getExc(e))
660
+ click.secho("Quitting", fg="red")
661
+ sys.exit(1)
662
+ self.prettify = True
663
+ self.color = "cyan"
664
+ self.code_theme = "monokai"
665
+ self.quiet = quiet
666
+ self.vertical_overflow = "ellipsis"
667
+ self.disable_stream = False
668
+ self.provider = provider
669
+ self.disable_coloring = False
670
+ self.internal_exec = internal_exec
671
+ self.confirm_script = confirm_script
672
+ self.interpreter = interpreter
673
+ self.rawdog = rawdog
674
+ self.read_aloud = False
675
+ self.read_aloud_voice = "Brian"
676
+ self.path_to_last_response_audio = None
677
+ self.__init_time = time.time()
678
+ self.__start_time = time.time()
679
+ self.__end_time = time.time()
680
+
681
+ @property
682
+ def prompt(self):
683
+ current_time = datetime.datetime.now().strftime("%H:%M:%S")
684
+
685
+ def find_range(start, end, hms: bool = False):
686
+ in_seconds = round(end - start, 1)
687
+ return (
688
+ str(datetime.timedelta(seconds=in_seconds)).split(".")[0].zfill(8)
689
+ if hms
690
+ else in_seconds
691
+ )
692
+ if not self.disable_coloring:
693
+ cmd_prompt = (
694
+ f"╭─[`{Fore.GREEN}{getpass.getuser().capitalize()}@webai]`"
695
+ f"(`{Fore.YELLOW}{self.provider})`"
696
+ f"~[`{Fore.LIGHTWHITE_EX}⏰{Fore.MAGENTA}{current_time}-`"
697
+ f"{Fore.LIGHTWHITE_EX}💻{Fore.BLUE}{find_range(self.__init_time, time.time(), True)}-`"
698
+ f"{Fore.LIGHTWHITE_EX}⚡️{Fore.RED}{find_range(self.__start_time, self.__end_time)}s]`"
699
+ f"\n╰─>"
700
+ )
701
+ whitelist = ["[", "]", "~", "-", "(", ")"]
702
+ for character in whitelist:
703
+ cmd_prompt = cmd_prompt.replace(character + "`", Fore.RESET + character)
704
+ return cmd_prompt
705
+
706
+ else:
707
+ return (
708
+ f"╭─[{getpass.getuser().capitalize()}@webscout]({self.provider})"
709
+ f"~[⏰{current_time}"
710
+ f"-💻{find_range(self.__init_time, time.time(), True)}"
711
+ f"-⚡️{find_range(self.__start_time, self.__end_time)}s]"
712
+ f"~[⏰{current_time}"
713
+ f"-💻{find_range(self.__init_time, time.time(), True)}"
714
+ f"-⚡️{find_range(self.__start_time, self.__end_time)}s]"
715
+ "\n╰─>"
716
+ )
717
+
718
+ def output_bond(
719
+ self,
720
+ title: str,
721
+ text: str,
722
+ color: str = "cyan",
723
+ frame: bool = True,
724
+ is_json: bool = False,
725
+ ):
726
+ """Print prettified output
727
+
728
+ Args:
729
+ title (str): Title
730
+ text (str): Info to be printed
731
+ color (str, optional): Output color. Defaults to "cyan".
732
+ frame (bool, optional): Add frame. Defaults to True.
733
+ """
734
+ if is_json:
735
+ text = f"""
736
+ ```json
737
+ {json.dumps(text,indent=4)}
738
+ ```
739
+ """
740
+ rich.print(
741
+ Panel(
742
+ Markdown(text, code_theme=self.code_theme),
743
+ title=title.title(),
744
+ style=Style(
745
+ color=color,
746
+ frame=frame,
747
+ ),
748
+ ),
749
+ )
750
+ if is_json and click.confirm("Do you wish to save this"):
751
+ default_path = title + ".json"
752
+ save_to = click.prompt(
753
+ "Enter path to save to", default=default_path, type=click.STRING
754
+ )
755
+ with open(save_to, "a") as fh:
756
+ json.dump(text, fh, indent=4)
757
+ click.secho(f"Successfuly saved to `{save_to}`", fg="green")
758
+
759
+ def do_h(self, line):
760
+ """Show help info in tabular form"""
761
+ table = Table(
762
+ title="Help info",
763
+ show_lines=True,
764
+ )
765
+ table.add_column("No.", style="white", justify="center")
766
+ table.add_column("Command", style="yellow", justify="left")
767
+ table.add_column("Function", style="cyan")
768
+ command_methods = [
769
+ getattr(self, method)
770
+ for method in dir(self)
771
+ if callable(getattr(self, method)) and method.startswith("do_")
772
+ ]
773
+ command_methods.append(self.default)
774
+ command_methods.reverse()
775
+ for no, method in enumerate(command_methods):
776
+ table.add_row(
777
+ str(no + 1),
778
+ method.__name__[3:] if not method == self.default else method.__name__,
779
+ method.__doc__,
780
+ )
781
+ Console().print(table)
782
+
783
+ @busy_bar.run("Settings saved")
784
+ def do_settings(self, line):
785
+ """Configure settings"""
786
+ self.prettify = click.confirm(
787
+ "\nPrettify markdown response", default=self.prettify
788
+ )
789
+ busy_bar.spin_index = click.prompt(
790
+ "Spin bar index [0: None, 1:/, 2:■█■■■, 3:⣻]",
791
+ default=busy_bar.spin_index,
792
+ type=click.IntRange(0, 3),
793
+ )
794
+ self.color = click.prompt(
795
+ "Response stdout font color", default=self.color or "white"
796
+ )
797
+ self.code_theme = Prompt.ask(
798
+ "Enter code_theme", choices=this.rich_code_themes, default=self.code_theme
799
+ )
800
+ self.vertical_overflow = Prompt.ask(
801
+ "\nVertical overflow behaviour",
802
+ choices=["ellipsis", "visible", "crop"],
803
+ default=self.vertical_overflow,
804
+ )
805
+ self.bot.max_tokens_to_sample = click.prompt(
806
+ "\nMaximum tokens to sample",
807
+ type=click.INT,
808
+ default=self.bot.max_tokens_to_sample,
809
+ )
810
+ self.bot.temperature = click.prompt(
811
+ "Temperature", type=click.FLOAT, default=self.bot.temperature
812
+ )
813
+ self.bot.top_k = click.prompt(
814
+ "Chance of topic being repeated, top_k",
815
+ type=click.FLOAT,
816
+ default=self.bot.top_k,
817
+ )
818
+ self.bot.top_p = click.prompt(
819
+ "Sampling threshold during inference time, top_p",
820
+ type=click.FLOAT,
821
+ default=self.bot.top_p,
822
+ )
823
+ self.bot.model = click.prompt(
824
+ "Model name", type=click.STRING, default=self.bot.model
825
+ )
826
+
827
+ @busy_bar.run(help="System error")
828
+ def do_copy_this(self, line):
829
+ """Copy last response
830
+ Usage:
831
+ copy_this:
832
+ text-copied = {whole last-response}
833
+ copy_this code:
834
+ text-copied = {All codes in last response}
835
+ """
836
+ if self.bot.last_response:
837
+ global last_response
838
+ last_response = self.bot.get_message(self.bot.last_response)
839
+ if not "code" in line:
840
+ clipman.set(last_response)
841
+ click.secho("Last response copied successfully!", fg="cyan")
842
+ return
843
+
844
+ # Copies just code
845
+ sanitized_codes = []
846
+ code_blocks = re.findall(r"```.*?```", last_response, re.DOTALL)
847
+ for code_block in code_blocks:
848
+ new_code_block = re.sub(
849
+ "^```.*$", "", code_block.strip(), flags=re.MULTILINE
850
+ )
851
+ if bool(new_code_block.strip()):
852
+ sanitized_codes.append(new_code_block)
853
+ if sanitized_codes:
854
+ if len(sanitized_codes) > 1:
855
+ if not click.confirm("Do you wish to copy all codes"):
856
+ for index, code in enumerate(sanitized_codes):
857
+ rich.print(
858
+ Panel(
859
+ Markdown(
860
+ code_blocks[index], code_theme=self.code_theme
861
+ ),
862
+ title=f"Index : {index}",
863
+ title_align="left",
864
+ )
865
+ )
866
+
867
+ clipman.set(
868
+ sanitized_codes[
869
+ click.prompt(
870
+ "Enter code index",
871
+ type=click.IntRange(0, len(sanitized_codes) - 1),
872
+ )
873
+ ]
874
+ )
875
+ click.secho("Code copied successfully", fg="cyan")
876
+ else:
877
+ clipman.set("\n\n".join(sanitized_codes))
878
+ click.secho(
879
+ f"All {len(sanitized_codes)} codes copied successfully!",
880
+ fg="cyan",
881
+ )
882
+ else:
883
+ clipman.set(sanitized_codes[0])
884
+ click.secho("Code copied successfully!", fg="cyan")
885
+ else:
886
+ click.secho("No code found in the last response!", fg="red")
887
+ else:
888
+ click.secho("Chat with AI first.", fg="yellow")
889
+
890
+ @busy_bar.run()
891
+ def do_with_copied(self, line):
892
+ """Attach last copied text to the prompt
893
+ Usage:
894
+ from_copied:
895
+ prompt = {text-copied}
896
+ from_copied Debug this code:
897
+ prompt = Debug this code {newline} {text-copied}
898
+ """
899
+ issued_prompt = (
900
+ f"{line}\n{clipman.get()}" if bool(line.strip()) else clipman.get()
901
+ )
902
+ click.secho(issued_prompt, fg="yellow")
903
+ if click.confirm("Do you wish to proceed"):
904
+ self.default(issued_prompt)
905
+
906
+ @busy_bar.run()
907
+ def do_code(self, line):
908
+ """Enhance prompt for code generation
909
+ usage :
910
+ code <Code description>
911
+ """
912
+ self.default(Optimizers.code(line))
913
+
914
+ @busy_bar.run()
915
+ def do_shell(self, line):
916
+ """Enhance prompt for system command (shell) generation
917
+ Usage:
918
+ shell <Action to be accomplished>
919
+ """
920
+ self.default(Optimizers.shell_command(line))
921
+ if click.confirm("Do you wish to run the command(s) generated in your system"):
922
+ self.do_sys(self.bot.get_message(self.bot.last_response))
923
+
924
+ @busy_bar.run("While changing directory")
925
+ def do_cd(self, line):
926
+ """Change directory
927
+ Usage :
928
+ cd <path-to-directory>
929
+ """
930
+ assert line, "File path is required"
931
+ os.chdir(line)
932
+
933
+ def do_clear(self, line):
934
+ """Clear console"""
935
+ sys.stdout.write("\u001b[2J\u001b[H")
936
+ sys.stdout.flush()
937
+
938
+ @busy_bar.run("While handling history")
939
+ def do_history(self, line):
940
+ """Show current conversation history"""
941
+ history = self.bot.conversation.chat_history
942
+ formatted_history = re.sub(
943
+ "\nLLM :",
944
+ "\n\n**LLM** :",
945
+ re.sub("\nUser :", "\n\n**User** :", history),
946
+ )
947
+ self.output_bond("Chat History", formatted_history, self.color)
948
+ if click.confirm("Do you wish to save this chat"):
949
+ save_to = click.prompt(
950
+ "Enter path/file-name", default="llama-conversation.txt"
951
+ )
952
+ with open(save_to, "a") as fh:
953
+ fh.write(history)
954
+ click.secho(f"Conversation saved successfully to '{save_to}'", fg="cyan")
955
+
956
+ @busy_bar.run("while resetting conversation")
957
+ def do_reset(self, line):
958
+ """Start new conversation thread"""
959
+ self.bot.conversation.chat_history = click.prompt(
960
+ "Introductory prompt", default=self.bot.conversation.intro
961
+ )
962
+ if hasattr(self.bot, "reset"):
963
+ self.bot.reset()
964
+ click.secho("Conversation reset successfully. New one created.", fg="cyan")
965
+
966
+ @busy_bar.run("while loading conversation")
967
+ def do_load(self, line):
968
+ """Load conversation history from file"""
969
+ history_file = click.prompt("Enter path to history path", default=line)
970
+ if not os.path.isfile(history_file):
971
+ click.secho(f"Path `{history_file}` does not exist!", fg="red")
972
+ return
973
+ with open(history_file) as fh:
974
+ self.bot.conversation.chat_history = fh.read()
975
+ click.secho("Conversation loaded successfully.", fg="cyan")
976
+
977
+ def do_last_response(self, line):
978
+ """Show whole last response in json format"""
979
+ self.output_bond(
980
+ "Last Response",
981
+ self.bot.last_response,
982
+ is_json=True,
983
+ )
984
+ @busy_bar.run(help="While rereading aloud", index=3, immediate=True)
985
+ def do_reread(self, line):
986
+ """Reread aloud last ai response"""
987
+ if not self.path_to_last_response_audio:
988
+ raise Exception("Path to last response audio is null")
989
+ Audio.play(self.path_to_last_response_audio)
990
+
991
+ @busy_bar.run()
992
+ def do_exec(self, line):
993
+ """Exec python code in last response with RawDog"""
994
+ last_response = self.bot.get_message(self.bot.last_response)
995
+ assert last_response, "Last response is null"
996
+ assert "```python" in last_response, "Last response has no python code"
997
+ if self.rawdog:
998
+ self.RawDog.main(last_response)
999
+ else:
1000
+ rawdog = RawDog(
1001
+ quiet=self.quiet,
1002
+ internal_exec=self.internal_exec,
1003
+ confirm_script=self.confirm_script,
1004
+ interpreter=self.interpreter,
1005
+ prettify=self.prettify,
1006
+ )
1007
+ rawdog.main(last_response)
1008
+
1009
+ @busy_bar.run()
1010
+ def do_rawdog(self, line):
1011
+ """Repeat executing last rawdog's python code"""
1012
+ assert self.rawdog, "Session not in rawdog mode. Restart with --rawdog"
1013
+ self.default(self.bot.get_message(self.bot.last_response))
1014
+
1015
+ @busy_bar.run()
1016
+ def default(self, line, exit_on_error: bool = False, normal_stdout: bool = False):
1017
+ """Chat with LLM"""
1018
+ if not bool(line):
1019
+ return
1020
+ if line.startswith("./"):
1021
+ os.system(line[2:])
1022
+
1023
+ elif self.rawdog:
1024
+ self.__start_time = time.time()
1025
+ busy_bar.start_spinning()
1026
+ ai_response = self.bot.chat(line, stream=False)
1027
+ busy_bar.stop_spinning()
1028
+ is_feedback = self.RawDog.main(ai_response)
1029
+ if is_feedback:
1030
+ return self.default(is_feedback)
1031
+ self.__end_time = time.time()
1032
+
1033
+ else:
1034
+ self.__start_time = time.time()
1035
+ try:
1036
+
1037
+ def generate_response():
1038
+ # Ensure response is yielded
1039
+ def for_stream():
1040
+ return self.bot.chat(line, stream=True)
1041
+
1042
+ def for_non_stream():
1043
+ yield self.bot.chat(line, stream=False)
1044
+
1045
+ return for_non_stream() if self.disable_stream else for_stream()
1046
+
1047
+ busy_bar.start_spinning()
1048
+ generated_response = generate_response()
1049
+
1050
+ if normal_stdout or not self.prettify and not self.disable_stream:
1051
+ cached_response: str = ""
1052
+ if not normal_stdout:
1053
+ busy_bar.stop_spinning()
1054
+ for response in generated_response:
1055
+ offset = len(cached_response)
1056
+ print(response[offset:], end="")
1057
+ cached_response = response
1058
+ if not normal_stdout:
1059
+ print("")
1060
+ return
1061
+
1062
+ if self.quiet:
1063
+ busy_bar.stop_spinning()
1064
+ console_ = Console()
1065
+ with Live(
1066
+ console=console_,
1067
+ refresh_per_second=16,
1068
+ vertical_overflow=self.vertical_overflow,
1069
+ ) as live:
1070
+ for response in generated_response:
1071
+ live.update(
1072
+ Markdown(response, code_theme=self.code_theme)
1073
+ if self.prettify
1074
+ else response
1075
+ )
1076
+ else:
1077
+ busy_bar.stop_spinning()
1078
+ this.stream_output(
1079
+ generated_response,
1080
+ title="AI Response",
1081
+ is_markdown=self.prettify,
1082
+ style=Style(
1083
+ color=self.color,
1084
+ ),
1085
+ code_theme=self.code_theme,
1086
+ vertical_overflow=self.vertical_overflow,
1087
+ )
1088
+ except (KeyboardInterrupt, EOFError):
1089
+ busy_bar.stop_spinning()
1090
+ print("")
1091
+ return False # Exit cmd
1092
+
1093
+ except Exception as e:
1094
+ # logging.exception(e)
1095
+ busy_bar.stop_spinning()
1096
+ logging.error(this.getExc(e))
1097
+ if exit_on_error:
1098
+ sys.exit(1)
1099
+
1100
+ else:
1101
+ self.post_default()
1102
+
1103
+ finally:
1104
+ self.__end_time = time.time()
1105
+ @busy_bar.run(help="While reading aloud", immediate=True, index=3)
1106
+ def post_default(self):
1107
+ """Actions to be taken after upon successfull complete response generation triggered by `default` function"""
1108
+ last_text: str = self.bot.get_message(self.bot.last_response)
1109
+ if self.read_aloud and last_text is not None:
1110
+ # Talk back to user
1111
+ self.path_to_last_response_audio = Audio.text_to_audio(
1112
+ last_text, voice=self.read_aloud_voice, auto=True
1113
+ )
1114
+ Audio.play(self.path_to_last_response_audio)
1115
+ def do_sys(self, line):
1116
+ """Execute system commands
1117
+ shortcut [./<command>]
1118
+ Usage:
1119
+ sys <System command>
1120
+ or
1121
+ ./<System command>
1122
+ """
1123
+ os.system(line)
1124
+
1125
+ def do_exit(self, line):
1126
+ """Quit this program"""
1127
+ if click.confirm("Are you sure to exit"):
1128
+ click.secho("Okay Goodbye!", fg="yellow")
1129
+ return True
1130
+
1131
+
1132
+ class EntryGroup:
1133
+ """Entry commands"""
1134
+
1135
+ # @staticmethod
1136
+ @click.group()
1137
+ @click.version_option(
1138
+ webscout.__version__, "-v", "--version", package_name="Webscout"
1139
+ )
1140
+ @click.help_option("-h", "--help")
1141
+ def webai_():
1142
+ pass
1143
+
1144
+ @staticmethod
1145
+ @webai_.group()
1146
+ @click.help_option("-h", "--help")
1147
+ def utils():
1148
+ """Utility endpoint for webscout"""
1149
+ pass
1150
+
1151
+ @staticmethod
1152
+ @webai_.group()
1153
+ @click.help_option("-h", "--help")
1154
+ def gpt4free():
1155
+ """Discover gpt4free models, providers etc"""
1156
+ pass
1157
+
1158
+ @staticmethod
1159
+ @webai_.group()
1160
+ @click.help_option("-h", "--help")
1161
+ def awesome():
1162
+ """Perform CRUD operations on awesome-prompts"""
1163
+ pass
1164
+
1165
+
1166
+ import webscout
1167
+ class Chatwebai:
1168
+ """webai command"""
1169
+
1170
+ @staticmethod
1171
+ @click.command(context_settings=this.context_settings)
1172
+ @click.option(
1173
+ "-m",
1174
+ "--model",
1175
+ help="Model name for text-generation", # default="llama-2-13b-chat"
1176
+ )
1177
+ @click.option(
1178
+ "-t",
1179
+ "--temperature",
1180
+ help="Charge of the generated text's randomness",
1181
+ type=click.FloatRange(0, 1),
1182
+ default=0.2,
1183
+ )
1184
+ @click.option(
1185
+ "-mt",
1186
+ "--max-tokens",
1187
+ help="Maximum number of tokens to be generated upon completion",
1188
+ type=click.INT,
1189
+ default=600,
1190
+ )
1191
+ @click.option(
1192
+ "-tp",
1193
+ "--top-p",
1194
+ help="Sampling threshold during inference time",
1195
+ type=click.FLOAT,
1196
+ default=0.999,
1197
+ )
1198
+ @click.option(
1199
+ "-tk",
1200
+ "--top-k",
1201
+ help="Chance of topic being repeated",
1202
+ type=click.FLOAT,
1203
+ default=0,
1204
+ )
1205
+ @click.option(
1206
+ "-k",
1207
+ "--key",
1208
+ help="LLM API access key or auth value or path to LLM with provider.",
1209
+ )
1210
+ @click.option(
1211
+ "-ct",
1212
+ "--code-theme",
1213
+ help="Theme for displaying codes in response",
1214
+ type=click.Choice(this.rich_code_themes),
1215
+ default="monokai",
1216
+ )
1217
+ @click.option(
1218
+ "-bi",
1219
+ "--busy-bar-index",
1220
+ help="Index of busy bar icon : [0: None, 1:/, 2:■█■■■, 3:⣻]",
1221
+ type=click.IntRange(0, 3),
1222
+ default=3,
1223
+ )
1224
+ @click.option("-fc", "--font-color", help="Stdout font color")
1225
+ @click.option(
1226
+ "-to", "--timeout", help="Http requesting timeout", type=click.INT, default=30
1227
+ )
1228
+ @click.argument("prompt", required=False)
1229
+ @click.option(
1230
+ "--prettify/--raw",
1231
+ help="Flag for prettifying markdowned response",
1232
+ default=True,
1233
+ )
1234
+ @click.option(
1235
+ "-dc",
1236
+ "--disable-conversation",
1237
+ is_flag=True,
1238
+ default=True, # is_conversation = True
1239
+ help="Disable chatting conversationally (Stable)",
1240
+ )
1241
+ @click.option(
1242
+ "-fp",
1243
+ "--filepath",
1244
+ type=click.Path(),
1245
+ default=os.path.join(default_path, "chat-history.txt"),
1246
+ help="Path to chat history - new will be created incase doesn't exist",
1247
+ )
1248
+ @click.option(
1249
+ "--update-file/--retain-file",
1250
+ help="Controls updating chat history in file",
1251
+ default=True,
1252
+ )
1253
+ @click.option(
1254
+ "-i",
1255
+ "--intro",
1256
+ help="Conversation introductory prompt",
1257
+ )
1258
+ @click.option(
1259
+ "-ho",
1260
+ "--history-offset",
1261
+ help="Limit conversation history to this number of last texts",
1262
+ type=click.IntRange(100, 16000),
1263
+ default=10250,
1264
+ )
1265
+ @click.option(
1266
+ "-ap",
1267
+ "--awesome-prompt",
1268
+ default="0",
1269
+ callback=lambda ctx, param, value: (
1270
+ int(value) if str(value).isdigit() else value
1271
+ ),
1272
+ help="Awesome prompt key or index. Alt. to intro",
1273
+ )
1274
+ @click.option(
1275
+ "-pp",
1276
+ "--proxy-path",
1277
+ type=click.Path(exists=True),
1278
+ help="Path to .json file containing proxies",
1279
+ )
1280
+ @click.option(
1281
+ "-p",
1282
+ "--provider",
1283
+ type=click.Choice(available_providers),
1284
+ default=this.default_provider,
1285
+ help="Name of LLM provider.",
1286
+ metavar=(
1287
+ f"[{'|'.join(webscout.webai)}] etc, "
1288
+ "run 'webscout gpt4free list providers -w' to "
1289
+ "view more providers and 'webscout gpt4free test -y' "
1290
+ "for advanced g4f providers test"
1291
+ ),
1292
+ )
1293
+ @click.option(
1294
+ "-vo",
1295
+ "--vertical-overflow",
1296
+ help="Vertical overflow behaviour on content display",
1297
+ type=click.Choice(["visible", "crop", "ellipsis"]),
1298
+ default="ellipsis",
1299
+ )
1300
+ @click.option(
1301
+ "-w",
1302
+ "--whole",
1303
+ is_flag=True,
1304
+ default=False,
1305
+ help="Disable streaming response",
1306
+ )
1307
+ @click.option(
1308
+ "-q",
1309
+ "--quiet",
1310
+ is_flag=True,
1311
+ help="Flag for controlling response-framing and response verbosity",
1312
+ default=False,
1313
+ )
1314
+ @click.option(
1315
+ "-n",
1316
+ "--new",
1317
+ help="Overwrite the filepath contents",
1318
+ is_flag=True,
1319
+ )
1320
+ @click.option(
1321
+ "-wc",
1322
+ "--with-copied",
1323
+ is_flag=True,
1324
+ help="Postfix prompt with last copied text",
1325
+ )
1326
+ @click.option(
1327
+ "-nc", "--no-coloring", is_flag=True, help="Disable intro prompt font-coloring"
1328
+ )
1329
+ @click.option(
1330
+ "-cc",
1331
+ "--chat-completion",
1332
+ is_flag=True,
1333
+ help="Provide native context for gpt4free providers",
1334
+ )
1335
+ @click.option(
1336
+ "-iw",
1337
+ "--ignore-working",
1338
+ is_flag=True,
1339
+ help="Ignore working status of the provider",
1340
+ )
1341
+ @click.option(
1342
+ "-rd",
1343
+ "--rawdog",
1344
+ is_flag=True,
1345
+ help="Generate and auto-execute Python scripts - (experimental)",
1346
+ )
1347
+ @click.option(
1348
+ "-ix",
1349
+ "--internal-exec",
1350
+ is_flag=True,
1351
+ help="RawDog : Execute scripts with exec function instead of out-of-script interpreter",
1352
+ )
1353
+ @click.option(
1354
+ "-cs",
1355
+ "--confirm-script",
1356
+ is_flag=True,
1357
+ help="RawDog : Give consent to generated scripts prior to execution",
1358
+ )
1359
+ @click.option(
1360
+ "-int",
1361
+ "--interpreter",
1362
+ default="python",
1363
+ help="RawDog : Python's interpreter name",
1364
+ )
1365
+ @click.option(
1366
+ "-ttm",
1367
+ "--talk-to-me",
1368
+ is_flag=True,
1369
+ help="Audiolize responses upon complete generation",
1370
+ )
1371
+ @click.option(
1372
+ "-ttmv",
1373
+ "--talk-to-me-voice",
1374
+ help="The voice to use for speech synthesis",
1375
+ type=click.Choice(Audio.all_voices),
1376
+ metavar="|".join(Audio.all_voices[:8]),
1377
+ default="Brian",
1378
+ )
1379
+ @click.help_option("-h", "--help")
1380
+ def webai(
1381
+ model,
1382
+ temperature,
1383
+ max_tokens,
1384
+ top_p,
1385
+ top_k,
1386
+ key,
1387
+ code_theme,
1388
+ busy_bar_index,
1389
+ font_color,
1390
+ timeout,
1391
+ prompt,
1392
+ prettify,
1393
+ disable_conversation,
1394
+ filepath,
1395
+ update_file,
1396
+ intro,
1397
+ history_offset,
1398
+ awesome_prompt,
1399
+ proxy_path,
1400
+ provider,
1401
+ vertical_overflow,
1402
+ whole,
1403
+ quiet,
1404
+ new,
1405
+ with_copied,
1406
+ no_coloring,
1407
+ chat_completion,
1408
+ ignore_working,
1409
+ rawdog,
1410
+ internal_exec,
1411
+ confirm_script,
1412
+ interpreter,
1413
+ talk_to_me,
1414
+ talk_to_me_voice,
1415
+ ):
1416
+ """Chat with AI webaily (Default)"""
1417
+ this.clear_history_file(filepath, new)
1418
+ bot = Main(
1419
+ max_tokens,
1420
+ temperature,
1421
+ top_k,
1422
+ top_p,
1423
+ model,
1424
+ key,
1425
+ timeout,
1426
+ disable_conversation,
1427
+ filepath,
1428
+ update_file,
1429
+ intro,
1430
+ history_offset,
1431
+ awesome_prompt,
1432
+ proxy_path,
1433
+ provider,
1434
+ quiet,
1435
+ chat_completion,
1436
+ ignore_working,
1437
+ rawdog=rawdog,
1438
+ internal_exec=internal_exec,
1439
+ confirm_script=confirm_script,
1440
+ interpreter=interpreter,
1441
+ )
1442
+ busy_bar.spin_index = busy_bar_index
1443
+ bot.code_theme = code_theme
1444
+ bot.color = font_color
1445
+ bot.disable_coloring = no_coloring
1446
+ bot.prettify = prettify
1447
+ bot.vertical_overflow = vertical_overflow
1448
+ bot.disable_stream = whole
1449
+ bot.read_aloud = talk_to_me
1450
+ bot.read_aloud_voice = talk_to_me_voice
1451
+ if prompt:
1452
+ if with_copied:
1453
+ prompt = prompt + "\n" + clipman.get()
1454
+ bot.default(prompt)
1455
+ bot.cmdloop()
1456
+
1457
+
1458
+ class ChatGenerate:
1459
+ """Generate command"""
1460
+
1461
+ @staticmethod
1462
+ @click.command(context_settings=this.context_settings)
1463
+ @click.option(
1464
+ "-m",
1465
+ "--model",
1466
+ help="Model name for text-generation",
1467
+ )
1468
+ @click.option(
1469
+ "-t",
1470
+ "--temperature",
1471
+ help="Charge of the generated text's randomness",
1472
+ type=click.FloatRange(0, 1),
1473
+ default=0.2,
1474
+ )
1475
+ @click.option(
1476
+ "-mt",
1477
+ "--max-tokens",
1478
+ help="Maximum number of tokens to be generated upon completion",
1479
+ type=click.INT,
1480
+ default=600,
1481
+ )
1482
+ @click.option(
1483
+ "-tp",
1484
+ "--top-p",
1485
+ help="Sampling threshold during inference time",
1486
+ type=click.FLOAT,
1487
+ default=0.999,
1488
+ )
1489
+ @click.option(
1490
+ "-tk",
1491
+ "--top-k",
1492
+ help="Chance of topic being repeated",
1493
+ type=click.FLOAT,
1494
+ default=0,
1495
+ )
1496
+ @click.option(
1497
+ "-k",
1498
+ "--key",
1499
+ help="LLM API access key or auth value or path to LLM with provider.",
1500
+ )
1501
+ @click.option(
1502
+ "-ct",
1503
+ "--code-theme",
1504
+ help="Theme for displaying codes in response",
1505
+ type=click.Choice(this.rich_code_themes),
1506
+ default="monokai",
1507
+ )
1508
+ @click.option(
1509
+ "-bi",
1510
+ "--busy-bar-index",
1511
+ help="Index of busy bar icon : [0: None, 1:/, 2:■█■■■, 3:⣻]",
1512
+ type=click.IntRange(0, 3),
1513
+ default=3,
1514
+ )
1515
+ @click.option(
1516
+ "-fc",
1517
+ "--font-color",
1518
+ help="Stdout font color",
1519
+ )
1520
+ @click.option(
1521
+ "-to", "--timeout", help="Http requesting timeout", type=click.INT, default=30
1522
+ )
1523
+ @click.argument("prompt", required=False)
1524
+ @click.option(
1525
+ "--prettify/--raw",
1526
+ help="Flag for prettifying markdowned response",
1527
+ default=True,
1528
+ )
1529
+ @click.option(
1530
+ "-w",
1531
+ "--whole",
1532
+ is_flag=True,
1533
+ default=False,
1534
+ help="Disable streaming response",
1535
+ )
1536
+ @click.option(
1537
+ "-c",
1538
+ "--code",
1539
+ is_flag=True,
1540
+ default=False,
1541
+ help="Optimize prompt for code generation",
1542
+ )
1543
+ @click.option(
1544
+ "-s",
1545
+ "--shell",
1546
+ is_flag=True,
1547
+ default=False,
1548
+ help="Optimize prompt for shell command generation",
1549
+ )
1550
+ @click.option(
1551
+ "-dc",
1552
+ "--disable-conversation",
1553
+ is_flag=True,
1554
+ default=True, # is_conversation = True
1555
+ help="Disable chatting conversationally (Stable)",
1556
+ )
1557
+ @click.option(
1558
+ "-fp",
1559
+ "--filepath",
1560
+ type=click.Path(),
1561
+ default=os.path.join(default_path, "chat-history.txt"),
1562
+ help="Path to chat history - new will be created incase doesn't exist",
1563
+ )
1564
+ @click.option(
1565
+ "--update-file/--retain-file",
1566
+ help="Controls updating chat history in file",
1567
+ default=True,
1568
+ )
1569
+ @click.option(
1570
+ "-i",
1571
+ "--intro",
1572
+ help="Conversation introductory prompt",
1573
+ )
1574
+ @click.option(
1575
+ "-ho",
1576
+ "--history-offset",
1577
+ help="Limit conversation history to this number of last texts",
1578
+ type=click.IntRange(100, 16000),
1579
+ default=10250,
1580
+ )
1581
+ @click.option(
1582
+ "-ap",
1583
+ "--awesome-prompt",
1584
+ default="0",
1585
+ callback=lambda ctx, param, value: (
1586
+ int(value) if str(value).isdigit() else value
1587
+ ),
1588
+ help="Awesome prompt key or index. Alt. to intro",
1589
+ )
1590
+ @click.option(
1591
+ "-pp",
1592
+ "--proxy-path",
1593
+ type=click.Path(exists=True),
1594
+ help="Path to .json file containing proxies",
1595
+ )
1596
+ @click.option(
1597
+ "-p",
1598
+ "--provider",
1599
+ type=click.Choice(webscout.available_providers),
1600
+ default=this.default_provider,
1601
+ help="Name of LLM provider.",
1602
+ metavar=(
1603
+ f"[{'|'.join(webscout.webai)}] etc, "
1604
+ "run 'webscout gpt4free list providers -w' to "
1605
+ "view more providers and 'webscout gpt4free test -y' "
1606
+ "for advanced g4f providers test"
1607
+ ),
1608
+ )
1609
+ @click.option(
1610
+ "-vo",
1611
+ "--vertical-overflow",
1612
+ help="Vertical overflow behaviour on content display",
1613
+ type=click.Choice(["visible", "crop", "ellipsis"]),
1614
+ default="ellipsis",
1615
+ )
1616
+ @click.option(
1617
+ "-q",
1618
+ "--quiet",
1619
+ is_flag=True,
1620
+ help="Flag for controlling response-framing and response verbosity",
1621
+ default=False,
1622
+ )
1623
+ @click.option(
1624
+ "-n",
1625
+ "--new",
1626
+ help="Override the filepath contents",
1627
+ is_flag=True,
1628
+ )
1629
+ @click.option(
1630
+ "-wc",
1631
+ "--with-copied",
1632
+ is_flag=True,
1633
+ help="Postfix prompt with last copied text",
1634
+ )
1635
+ @click.option(
1636
+ "-iw",
1637
+ "--ignore-working",
1638
+ is_flag=True,
1639
+ help="Ignore working status of the provider",
1640
+ )
1641
+ @click.option(
1642
+ "-rd",
1643
+ "--rawdog",
1644
+ is_flag=True,
1645
+ help="Generate and auto-execute Python scripts - (experimental)",
1646
+ )
1647
+ @click.option(
1648
+ "-ix",
1649
+ "--internal-exec",
1650
+ is_flag=True,
1651
+ help="RawDog : Execute scripts with exec function instead of out-of-script interpreter",
1652
+ )
1653
+ @click.option(
1654
+ "-cs",
1655
+ "--confirm-script",
1656
+ is_flag=True,
1657
+ help="RawDog : Give consent to generated scripts prior to execution",
1658
+ )
1659
+ @click.option(
1660
+ "-int",
1661
+ "--interpreter",
1662
+ default="python",
1663
+ help="RawDog : Python's interpreter name",
1664
+ )
1665
+ @click.option(
1666
+ "-ttm",
1667
+ "--talk-to-me",
1668
+ is_flag=True,
1669
+ help="Audiolize responses upon complete generation",
1670
+ )
1671
+ @click.option(
1672
+ "-ttmv",
1673
+ "--talk-to-me-voice",
1674
+ help="The voice to use for speech synthesis",
1675
+ type=click.Choice(Audio.all_voices),
1676
+ metavar="|".join(Audio.all_voices[:8]),
1677
+ default="Brian",
1678
+ )
1679
+ @click.help_option("-h", "--help")
1680
+ def generate(
1681
+ model,
1682
+ temperature,
1683
+ max_tokens,
1684
+ top_p,
1685
+ top_k,
1686
+ key,
1687
+ code_theme,
1688
+ busy_bar_index,
1689
+ font_color,
1690
+ timeout,
1691
+ prompt,
1692
+ prettify,
1693
+ whole,
1694
+ code,
1695
+ shell,
1696
+ disable_conversation,
1697
+ filepath,
1698
+ update_file,
1699
+ intro,
1700
+ history_offset,
1701
+ awesome_prompt,
1702
+ proxy_path,
1703
+ provider,
1704
+ vertical_overflow,
1705
+ quiet,
1706
+ new,
1707
+ with_copied,
1708
+ ignore_working,
1709
+ rawdog,
1710
+ internal_exec,
1711
+ confirm_script,
1712
+ interpreter,
1713
+ talk_to_me,
1714
+ talk_to_me_voice,
1715
+ ):
1716
+ """Generate a quick response with AI"""
1717
+ this.clear_history_file(filepath, new)
1718
+ bot = Main(
1719
+ max_tokens,
1720
+ temperature,
1721
+ top_k,
1722
+ top_p,
1723
+ model,
1724
+ key,
1725
+ timeout,
1726
+ disable_conversation,
1727
+ filepath,
1728
+ update_file,
1729
+ intro,
1730
+ history_offset,
1731
+ awesome_prompt,
1732
+ proxy_path,
1733
+ provider,
1734
+ quiet,
1735
+ ignore_working=ignore_working,
1736
+ rawdog=rawdog,
1737
+ internal_exec=internal_exec,
1738
+ confirm_script=confirm_script,
1739
+ interpreter=interpreter,
1740
+ )
1741
+ prompt = prompt if prompt else ""
1742
+ copied_placeholder = "{{copied}}"
1743
+ stream_placeholder = "{{stream}}"
1744
+
1745
+ if with_copied or copied_placeholder in prompt:
1746
+ last_copied_text = clipman.get()
1747
+ assert last_copied_text, "No copied text found, issue prompt"
1748
+
1749
+ if copied_placeholder in prompt:
1750
+ prompt = prompt.replace(copied_placeholder, last_copied_text)
1751
+
1752
+ else:
1753
+ sep = "\n" if prompt else ""
1754
+ prompt = prompt + sep + last_copied_text
1755
+
1756
+ if not prompt and sys.stdin.isatty(): # No prompt issued and no piped input
1757
+ help_info = (
1758
+ "Usage: webscout generate [OPTIONS] PROMPT\n"
1759
+ "Try 'webscout generate --help' for help.\n"
1760
+ "Error: Missing argument 'PROMPT'."
1761
+ )
1762
+ click.secho(
1763
+ help_info
1764
+ ) # Let's try to mimic the click's missing argument help info
1765
+ sys.exit(1)
1766
+
1767
+ if not sys.stdin.isatty(): # Piped input detected - True
1768
+ # Let's try to read piped input
1769
+ stream_text = click.get_text_stream("stdin").read()
1770
+ if stream_placeholder in prompt:
1771
+ prompt = prompt.replace(stream_placeholder, stream_text)
1772
+ else:
1773
+ prompt = prompt + "\n" + stream_text if prompt else stream_text
1774
+
1775
+ assert stream_placeholder not in prompt, (
1776
+ "No piped input detected ~ " + stream_placeholder
1777
+ )
1778
+ assert copied_placeholder not in prompt, (
1779
+ "No copied text found ~ " + copied_placeholder
1780
+ )
1781
+
1782
+ prompt = Optimizers.code(prompt) if code else prompt
1783
+ prompt = Optimizers.shell_command(prompt) if shell else prompt
1784
+ busy_bar.spin_index = (
1785
+ 0 if any([quiet, sys.stdout.isatty() == False]) else busy_bar_index
1786
+ )
1787
+ bot.code_theme = code_theme
1788
+ bot.color = font_color
1789
+ bot.prettify = prettify
1790
+ bot.vertical_overflow = vertical_overflow
1791
+ bot.disable_stream = whole
1792
+ bot.read_aloud = talk_to_me
1793
+ bot.read_aloud_voice = talk_to_me_voice
1794
+ bot.default(prompt, True, normal_stdout=(sys.stdout.isatty() == False))
1795
+
1796
+
1797
+ class Awesome:
1798
+ """Awesome commands"""
1799
+
1800
+ @staticmethod
1801
+ @click.command(context_settings=this.context_settings)
1802
+ @click.option(
1803
+ "-r",
1804
+ "--remote",
1805
+ help="Remote source to update from",
1806
+ default=AwesomePrompts.awesome_prompt_url,
1807
+ )
1808
+ @click.option(
1809
+ "-o",
1810
+ "--output",
1811
+ help="Path to save the prompts",
1812
+ default=AwesomePrompts.awesome_prompt_path,
1813
+ )
1814
+ @click.option(
1815
+ "-n", "--new", is_flag=True, help="Override the existing contents in path"
1816
+ )
1817
+ @click.help_option("-h", "--help")
1818
+ @this.handle_exception
1819
+ def update(remote, output, new):
1820
+ """Update awesome-prompts from remote source."""
1821
+ AwesomePrompts.awesome_prompt_url = remote
1822
+ AwesomePrompts.awesome_prompt_path = output
1823
+ AwesomePrompts().update_prompts_from_online(new)
1824
+ click.secho(
1825
+ f"Prompts saved to - '{AwesomePrompts.awesome_prompt_path}'", fg="cyan"
1826
+ )
1827
+
1828
+ @staticmethod
1829
+ @click.command(context_settings=this.context_settings)
1830
+ @click.argument(
1831
+ "key",
1832
+ required=True,
1833
+ type=click.STRING,
1834
+ )
1835
+ @click.option(
1836
+ "-d", "--default", help="Return this value if not found", default=None
1837
+ )
1838
+ @click.option(
1839
+ "-c",
1840
+ "--case-sensitive",
1841
+ default=True,
1842
+ flag_value=False,
1843
+ help="Perform case-sensitive search",
1844
+ )
1845
+ @click.option(
1846
+ "-f",
1847
+ "--file",
1848
+ type=click.Path(exists=True),
1849
+ help="Path to existing prompts",
1850
+ default=AwesomePrompts.awesome_prompt_path,
1851
+ )
1852
+ @click.help_option("-h", "--help")
1853
+ @this.handle_exception
1854
+ def search(
1855
+ key,
1856
+ default,
1857
+ case_sensitive,
1858
+ file,
1859
+ ):
1860
+ """Search for a particular awesome-prompt by key or index"""
1861
+ AwesomePrompts.awesome_prompt_path = file
1862
+ resp = AwesomePrompts().get_act(
1863
+ key,
1864
+ default=default,
1865
+ case_insensitive=case_sensitive,
1866
+ )
1867
+ if resp:
1868
+ click.secho(resp)
1869
+ return resp != default
1870
+
1871
+ @staticmethod
1872
+ @click.command(context_settings=this.context_settings)
1873
+ @click.option("-n", "--name", required=True, help="Prompt name")
1874
+ @click.option("-p", "--prompt", required=True, help="Prompt value")
1875
+ @click.option(
1876
+ "-f",
1877
+ "--file",
1878
+ type=click.Path(exists=True),
1879
+ help="Path to existing prompts",
1880
+ default=AwesomePrompts.awesome_prompt_path,
1881
+ )
1882
+ @click.help_option("-h", "--help")
1883
+ @this.handle_exception
1884
+ def add(name, prompt, file):
1885
+ """Add new prompt to awesome-prompt list"""
1886
+ AwesomePrompts.awesome_prompt_path = file
1887
+ return AwesomePrompts().add_prompt(name, prompt)
1888
+
1889
+ @staticmethod
1890
+ @click.command(context_settings=this.context_settings)
1891
+ @click.argument("name")
1892
+ @click.option(
1893
+ "--case-sensitive",
1894
+ is_flag=True,
1895
+ flag_value=False,
1896
+ default=True,
1897
+ help="Perform name case-sensitive search",
1898
+ )
1899
+ @click.option(
1900
+ "-f",
1901
+ "--file",
1902
+ type=click.Path(exists=True),
1903
+ help="Path to existing prompts",
1904
+ default=AwesomePrompts.awesome_prompt_path,
1905
+ )
1906
+ @click.help_option("-h", "--help")
1907
+ @this.handle_exception
1908
+ def delete(name, case_sensitive, file):
1909
+ """Delete a specific awesome-prompt"""
1910
+ AwesomePrompts.awesome_prompt_path = file
1911
+ return AwesomePrompts().delete_prompt(name, case_sensitive)
1912
+
1913
+ @staticmethod
1914
+ @click.command(context_settings=this.context_settings)
1915
+ @click.option(
1916
+ "-j",
1917
+ "--json",
1918
+ is_flag=True,
1919
+ help="Display prompts in json format",
1920
+ )
1921
+ @click.option(
1922
+ "-i",
1923
+ "--indent",
1924
+ type=click.IntRange(1, 20),
1925
+ help="Json format indentation level",
1926
+ default=4,
1927
+ )
1928
+ @click.option(
1929
+ "-x",
1930
+ "--index",
1931
+ is_flag=True,
1932
+ help="Display prompts with their corresponding indexes",
1933
+ )
1934
+ @click.option("-c", "--color", help="Prompts stdout font color")
1935
+ @click.option("-o", "--output", type=click.Path(), help="Path to save the prompts")
1936
+ @click.help_option("-h", "--help")
1937
+ def whole(json, indent, index, color, output):
1938
+ """Stdout all awesome prompts"""
1939
+ ap = AwesomePrompts()
1940
+ awesome_prompts = ap.all_acts if index else ap.get_acts()
1941
+
1942
+ if json:
1943
+ # click.secho(formatted_awesome_prompts, fg=color)
1944
+ rich.print_json(data=awesome_prompts, indent=indent)
1945
+
1946
+ else:
1947
+ awesome_table = Table(show_lines=True, title="All Awesome-Prompts")
1948
+ awesome_table.add_column("index", justify="center", style="yellow")
1949
+ awesome_table.add_column("Act Name/Index", justify="left", style="cyan")
1950
+ awesome_table.add_column(
1951
+ "Prompt",
1952
+ style=color,
1953
+ )
1954
+ for index, key_value in enumerate(awesome_prompts.items()):
1955
+ awesome_table.add_row(str(index), str(key_value[0]), key_value[1])
1956
+ rich.print(awesome_table)
1957
+
1958
+ if output:
1959
+ from json import dump
1960
+
1961
+ with open(output, "w") as fh:
1962
+ dump(awesome_prompts, fh, indent=4)
1963
+
1964
+
1965
+ class Gpt4free:
1966
+ """Commands for gpt4free"""
1967
+
1968
+ @staticmethod
1969
+ @click.command(context_settings=this.context_settings)
1970
+ @busy_bar.run(index=1, immediate=True)
1971
+ @click.help_option("-h", "--help")
1972
+ def version():
1973
+ """Check current installed version of gpt4free"""
1974
+ version_string = this.run_system_command("pip show g4f")[1].stdout.split("\n")[
1975
+ 1
1976
+ ]
1977
+ click.secho(version_string, fg="cyan")
1978
+
1979
+ @staticmethod
1980
+ @click.command(context_settings=this.context_settings)
1981
+ @click.help_option("-h", "--help")
1982
+ @click.option(
1983
+ "-e",
1984
+ "--extra",
1985
+ help="Extra required dependencies category",
1986
+ multiple=True,
1987
+ type=click.Choice(
1988
+ ["all", "image", "webdriver", "openai", "api", "gui", "none"]
1989
+ ),
1990
+ default=["all"],
1991
+ )
1992
+ @click.option("-l", "--log", is_flag=True, help="Stdout installation logs")
1993
+ @click.option(
1994
+ "-s",
1995
+ "--sudo",
1996
+ is_flag=True,
1997
+ flag_value="sudo ",
1998
+ help="Install with sudo privileges",
1999
+ )
2000
+ @busy_bar.run(index=1, immediate=True)
2001
+ def update(extra, log, sudo):
2002
+ """Update GPT4FREE package (Models, Providers etc)"""
2003
+ if "none" in extra:
2004
+ command = f"{sudo or ''}pip install --upgrade g4f"
2005
+ else:
2006
+ command = f"{sudo or ''}pip install --upgrade g4f[{','.join(extra)}]"
2007
+ is_successful, response = this.run_system_command(command)
2008
+ if log and is_successful:
2009
+ click.echo(response.stdout)
2010
+ version_string = this.run_system_command("pip show g4f")[1].stdout.split("\n")[
2011
+ 1
2012
+ ]
2013
+ click.secho(f"GPT4FREE updated successfully - {version_string}", fg="cyan")
2014
+
2015
+ @staticmethod
2016
+ @click.command("list", context_settings=this.context_settings)
2017
+ @click.argument("target")
2018
+ @click.option("-w", "--working", is_flag=True, help="Restrict to working providers")
2019
+ @click.option("-u", "--url", is_flag=True, help="Restrict to providers with url")
2020
+ @click.option(
2021
+ "-s", "--stream", is_flag=True, help="Restrict to providers supporting stream"
2022
+ )
2023
+ @click.option(
2024
+ "-c",
2025
+ "--context",
2026
+ is_flag=True,
2027
+ help="Restrict to providers supporing context natively",
2028
+ )
2029
+ @click.option(
2030
+ "-35",
2031
+ "--gpt35",
2032
+ is_flag=True,
2033
+ help="Restrict to providers supporting gpt3.5_turbo model",
2034
+ )
2035
+ @click.option(
2036
+ "-4", "--gpt4", is_flag=True, help="Restrict to providers supporting gpt4 model"
2037
+ )
2038
+ @click.option(
2039
+ "-se",
2040
+ "--selenium",
2041
+ is_flag=True,
2042
+ help="Restrict to selenium dependent providers",
2043
+ )
2044
+ @click.option("-j", "--json", is_flag=True, help="Format output in json")
2045
+ @click.help_option("-h", "--help")
2046
+ def show(target, working, url, stream, context, gpt35, gpt4, selenium, json):
2047
+ """List available models and providers"""
2048
+ available_targets = ["models", "providers"]
2049
+ assert (
2050
+ target in available_targets
2051
+ ), f"Target must be one of [{', '.join(available_targets)}]"
2052
+ if target == "providers":
2053
+ hunted_providers = list(
2054
+ set(
2055
+ map(
2056
+ lambda provider: (
2057
+ provider["name"] if all(list(provider.values())) else None
2058
+ ),
2059
+ this.g4f_providers_in_dict(
2060
+ url=url,
2061
+ working=working,
2062
+ stream=stream,
2063
+ context=context,
2064
+ gpt35=gpt35,
2065
+ gpt4=gpt4,
2066
+ selenium=selenium,
2067
+ ),
2068
+ )
2069
+ )
2070
+ )
2071
+ while None in hunted_providers:
2072
+ hunted_providers.remove(None)
2073
+
2074
+ hunted_providers.sort()
2075
+ if json:
2076
+ rich.print_json(data=dict(providers=hunted_providers), indent=4)
2077
+
2078
+ else:
2079
+ table = Table(show_lines=True)
2080
+ table.add_column("No.", style="yellow", justify="center")
2081
+ table.add_column("Provider", style="cyan")
2082
+ for no, provider in enumerate(hunted_providers):
2083
+ table.add_row(str(no), provider)
2084
+ rich.print(table)
2085
+ else:
2086
+ models = dict(
2087
+ Bard=[
2088
+ "palm",
2089
+ ],
2090
+ HuggingFace=[
2091
+ "h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
2092
+ "h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
2093
+ "h2ogpt-gm-oasst1-en-2048-open-llama-13b",
2094
+ "gpt-neox-20b",
2095
+ "oasst-sft-1-pythia-12b",
2096
+ "oasst-sft-4-pythia-12b-epoch-3.5",
2097
+ "santacoder",
2098
+ "bloom",
2099
+ "flan-t5-xxl",
2100
+ ],
2101
+ Anthropic=[
2102
+ "claude-instant-v1",
2103
+ "claude-v1",
2104
+ "claude-v2",
2105
+ ],
2106
+ Cohere=[
2107
+ "command-light-nightly",
2108
+ "command-nightly",
2109
+ ],
2110
+ OpenAI=[
2111
+ "code-davinci-002",
2112
+ "text-ada-001",
2113
+ "text-babbage-001",
2114
+ "text-curie-001",
2115
+ "text-davinci-002",
2116
+ "text-davinci-003",
2117
+ "gpt-3.5-turbo-16k",
2118
+ "gpt-3.5-turbo-16k-0613",
2119
+ "gpt-4-0613",
2120
+ ],
2121
+ Replicate=[
2122
+ "llama13b-v2-chat",
2123
+ "llama7b-v2-chat",
2124
+ ],
2125
+ )
2126
+ for provider in webscout.g4f.Provider.__providers__:
2127
+ if hasattr(provider, "models"):
2128
+ models[provider.__name__] = provider.models
2129
+ if json:
2130
+ for key, value in models.items():
2131
+ while None in value:
2132
+ value.remove(None)
2133
+ value.sort()
2134
+ models[key] = value
2135
+
2136
+ rich.print_json(data=models, indent=4)
2137
+ else:
2138
+ table = Table(show_lines=True)
2139
+ table.add_column("No.", justify="center", style="white")
2140
+ table.add_column("Base Provider", style="cyan")
2141
+ table.add_column("Model(s)", style="yellow")
2142
+ for count, provider_models in enumerate(models.items()):
2143
+ models = provider_models[1]
2144
+ models.sort()
2145
+ table.add_row(str(count), provider_models[0], "\n".join(models))
2146
+ rich.print(table)
2147
+
2148
+ @staticmethod
2149
+ @click.command(context_settings=this.context_settings)
2150
+ @click.argument("port", type=click.INT, required=False)
2151
+ @click.option(
2152
+ "-a", "--address", help="Host on this particular address", default="127.0.0.1"
2153
+ )
2154
+ @click.option("-d", "--debug", is_flag=True, help="Start server in debug mode")
2155
+ @click.option(
2156
+ "-o", "--open", is_flag=True, help="Proceed to the interface immediately"
2157
+ )
2158
+ @click.help_option("-h", "--help")
2159
+ def gui(port, address, debug, open):
2160
+ """Launch gpt4free web interface"""
2161
+ from g4f.gui import run_gui
2162
+
2163
+ port = port or 8000
2164
+ t1 = thr(
2165
+ target=run_gui,
2166
+ args=(
2167
+ address,
2168
+ port,
2169
+ debug,
2170
+ ),
2171
+ )
2172
+ # run_gui(host=address, port=port, debug=debug)
2173
+ t1.start()
2174
+ if open:
2175
+ click.launch(f"http://{address}:{port}")
2176
+ t1.join()
2177
+
2178
+ @staticmethod
2179
+ @click.command(context_settings=this.context_settings)
2180
+ @click.option(
2181
+ "-t",
2182
+ "--timeout",
2183
+ type=click.INT,
2184
+ help="Provider's response generation timeout",
2185
+ default=20,
2186
+ )
2187
+ @click.option(
2188
+ "-r",
2189
+ "--thread",
2190
+ type=click.INT,
2191
+ help="Test n amount of providers at once",
2192
+ default=5,
2193
+ )
2194
+ @click.option("-q", "--quiet", is_flag=True, help="Suppress progress bar")
2195
+ @click.option(
2196
+ "-j", "--json", is_flag=True, help="Stdout test results in json format"
2197
+ )
2198
+ @click.option("-d", "--dry-test", is_flag=True, help="Return previous test results")
2199
+ @click.option(
2200
+ "-b", "--best", is_flag=True, help="Stdout the fastest provider <name only>"
2201
+ )
2202
+ @click.option(
2203
+ "-se",
2204
+ "--selenium",
2205
+ help="Test even selenium dependent providers",
2206
+ is_flag=True,
2207
+ )
2208
+ @click.option(
2209
+ "-dl",
2210
+ "--disable-logging",
2211
+ is_flag=True,
2212
+ help="Disable logging",
2213
+ )
2214
+ @click.option("-y", "--yes", is_flag=True, help="Okay to all confirmations")
2215
+ @click.help_option("-h", "--help")
2216
+ def test(
2217
+ timeout, thread, quiet, json, dry_test, best, selenium, disable_logging, yes
2218
+ ):
2219
+ """Test and save working providers"""
2220
+ from webscout.g4f import TestProviders
2221
+
2222
+ test = TestProviders(
2223
+ test_at_once=thread,
2224
+ quiet=quiet,
2225
+ timeout=timeout,
2226
+ selenium=selenium,
2227
+ do_log=disable_logging == False,
2228
+ )
2229
+ if best:
2230
+ click.secho(test.best)
2231
+ return
2232
+ elif dry_test:
2233
+ results = test.get_results(
2234
+ run=False,
2235
+ )
2236
+ else:
2237
+ if (
2238
+ yes
2239
+ or os.path.isfile(webscout.AIutel.results_path)
2240
+ and click.confirm("Are you sure to run new test")
2241
+ ):
2242
+ results = test.get_results(run=True)
2243
+ else:
2244
+ results = test.get_results(
2245
+ run=False,
2246
+ )
2247
+ if json:
2248
+ rich.print_json(data=dict(results=results))
2249
+ else:
2250
+ table = Table(
2251
+ title="G4f Providers Test Results",
2252
+ show_lines=True,
2253
+ )
2254
+ table.add_column("No.", style="white", justify="center")
2255
+ table.add_column("Provider", style="yellow", justify="left")
2256
+ table.add_column("Response Time(s)", style="cyan")
2257
+
2258
+ for no, provider in enumerate(results, start=1):
2259
+ table.add_row(
2260
+ str(no), provider["name"], str(round(provider["time"], 2))
2261
+ )
2262
+ rich.print(table)
2263
+
2264
+
2265
+
2266
+ @staticmethod
2267
+ @click.command(context_settings=this.context_settings)
2268
+ @click.argument("prompt")
2269
+ @click.option(
2270
+ "-d",
2271
+ "--directory",
2272
+ type=click.Path(exists=True),
2273
+ help="Folder for saving the images",
2274
+ default=os.getcwd(),
2275
+ )
2276
+ @click.option(
2277
+ "-a",
2278
+ "--amount",
2279
+ type=click.IntRange(1, 100),
2280
+ help="Total images to be generated",
2281
+ default=1,
2282
+ )
2283
+ @click.option("-n", "--name", help="Name for the generated images")
2284
+ @click.option(
2285
+ "-t",
2286
+ "--timeout",
2287
+ type=click.IntRange(5, 300),
2288
+ help="Http request timeout in seconds",
2289
+ )
2290
+ @click.option("-p", "--proxy", help="Http request proxy")
2291
+ @click.option(
2292
+ "-nd",
2293
+ "--no-additives",
2294
+ is_flag=True,
2295
+ help="Disable prompt altering for effective image generation",
2296
+ )
2297
+ @click.option("-q", "--quiet", is_flag=True, help="Suppress progress bar")
2298
+ @click.help_option("-h", "--help")
2299
+ def generate_image(
2300
+ prompt, directory, amount, name, timeout, proxy, no_additives, quiet
2301
+ ):
2302
+ """Generate images with pollinations.ai"""
2303
+ with Progress() as progress:
2304
+ task = progress.add_task(
2305
+ f"[cyan]Generating ...[{amount}]",
2306
+ total=amount,
2307
+ visible=quiet == False,
2308
+ )
2309
+
2310
+
2311
+
2312
+ class Utils:
2313
+ """Utilities command"""
2314
+
2315
+ @staticmethod
2316
+ @click.command(context_settings=this.context_settings)
2317
+ @click.argument("source", required=False)
2318
+ @click.option(
2319
+ "-d", "--dev", is_flag=True, help="Update from version control (development)"
2320
+ )
2321
+ @click.option(
2322
+ "-s",
2323
+ "--sudo",
2324
+ is_flag=True,
2325
+ flag_value="sudo ",
2326
+ help="Install with sudo privileges",
2327
+ )
2328
+ @click.help_option("-h", "--help")
2329
+ @busy_bar.run(index=1, immediate=True)
2330
+ def update(source, dev, sudo):
2331
+ """Install latest version of webscout"""
2332
+ if dev:
2333
+ source = "git+" + webscout.__repo__ + ".git"
2334
+ source = "webscout" if source is None else source
2335
+ assert (
2336
+ "tgpt" in source or source == "."
2337
+ ), f"Cannot update webscout from the source '{source}'"
2338
+ click.secho(
2339
+ f"[*] Updating from '{'pip' if source=='webscout' else source}'",
2340
+ fg="yellow",
2341
+ )
2342
+ this.run_system_command(f"{sudo or ''}pip install --upgrade {source}")
2343
+ response = this.run_system_command("pip show webscout")[1]
2344
+ click.secho(response.stdout)
2345
+ click.secho("Congratulations! webscout updated successfully.", fg="cyan")
2346
+
2347
+ @staticmethod
2348
+ @click.command(context_settings=this.context_settings)
2349
+ @click.option("-w", "--whole", is_flag=True, help="Stdout whole json info")
2350
+ @click.option(
2351
+ "-v", "--version", is_flag=True, help="Stdout latest version name only"
2352
+ )
2353
+ @click.option("-b", "--body", is_flag=True, help="Stdout changelog info only")
2354
+ @click.option(
2355
+ "-e", "--executable", is_flag=True, help="Stdout url to binary for your system"
2356
+ )
2357
+ @click.help_option("-h", "--help")
2358
+ def latest(whole, version, body, executable):
2359
+ """Check webscout latest version info"""
2360
+ from webscout.utils import Updates
2361
+
2362
+ update = Updates()
2363
+ if whole:
2364
+ rich.print_json(data=update.latest(whole=True))
2365
+
2366
+ elif version:
2367
+ rich.print(update.latest_version)
2368
+ elif body:
2369
+ rich.print(Markdown(update.latest()["body"]))
2370
+ elif executable:
2371
+ rich.print(update.executable())
2372
+ else:
2373
+ rich.print_json(data=update.latest())
2374
+
2375
+
2376
+ def make_commands():
2377
+ """Make webscout chained commands"""
2378
+
2379
+ # generate
2380
+ EntryGroup.webai_.add_command(ChatGenerate.generate)
2381
+
2382
+ # webai
2383
+ EntryGroup.webai_.add_command(Chatwebai.webai)
2384
+
2385
+ # utils
2386
+ EntryGroup.utils.add_command(Utils.update)
2387
+ EntryGroup.utils.add_command(Utils.latest)
2388
+
2389
+ # gpt4free
2390
+ EntryGroup.gpt4free.add_command(Gpt4free.version)
2391
+ EntryGroup.gpt4free.add_command(Gpt4free.update)
2392
+ EntryGroup.gpt4free.add_command(Gpt4free.show)
2393
+ EntryGroup.gpt4free.add_command(Gpt4free.gui)
2394
+ EntryGroup.gpt4free.add_command(Gpt4free.test)
2395
+
2396
+ # Awesome
2397
+ EntryGroup.awesome.add_command(Awesome.add)
2398
+ EntryGroup.awesome.add_command(Awesome.delete)
2399
+ EntryGroup.awesome.add_command(Awesome.search)
2400
+ EntryGroup.awesome.add_command(Awesome.update)
2401
+ EntryGroup.awesome.add_command(Awesome.whole)
2402
+
2403
+
2404
+ # @this.handle_exception
2405
+ def main(*args):
2406
+ """Fireup console programmically"""
2407
+ sys.argv += list(args)
2408
+ args = sys.argv
2409
+ if len(args) == 1:
2410
+ sys.argv.insert(1, "webai") # Just a hack to make default command
2411
+ try:
2412
+ make_commands()
2413
+ return EntryGroup.webai_()
2414
+ except Exception as e:
2415
+ logging.error(this.getExc(e))
2416
+ sys.exit(1)
2417
+
2418
+
2419
+ if __name__ == "__main__":
2361
2420
  main()