webscout 1.1.7__py3-none-any.whl → 1.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIutel.py CHANGED
@@ -1,655 +1,655 @@
1
- import os
2
- import json
3
- import platform
4
- import subprocess
5
- import logging
6
- import appdirs
7
- import datetime
8
- import re
9
- import sys
10
- import click
11
- from rich.markdown import Markdown
12
- from rich.console import Console
13
-
14
- appdir = appdirs.AppDirs("pytgpt", "Smartwa")
15
-
16
- default_path = appdir.user_cache_dir
17
-
18
- if not os.path.exists(default_path):
19
- os.makedirs(default_path)
20
-
21
-
22
- def run_system_command(
23
- command: str,
24
- exit_on_error: bool = True,
25
- stdout_error: bool = True,
26
- help: str = None,
27
- ):
28
- """Run commands against system
29
- Args:
30
- command (str): shell command
31
- exit_on_error (bool, optional): Exit on error. Defaults to True.
32
- stdout_error (bool, optional): Print out the error. Defaults to True
33
- help (str, optional): Help info incase of exception. Defaults to None.
34
- Returns:
35
- tuple : (is_successfull, object[Exception|Subprocess.run])
36
- """
37
- try:
38
- # Run the command and capture the output
39
- result = subprocess.run(
40
- command,
41
- shell=True,
42
- check=True,
43
- text=True,
44
- stdout=subprocess.PIPE,
45
- stderr=subprocess.PIPE,
46
- )
47
- return (True, result)
48
- except subprocess.CalledProcessError as e:
49
- # Handle error if the command returns a non-zero exit code
50
- if stdout_error:
51
- click.secho(f"Error Occurred: while running '{command}'", fg="yellow")
52
- click.secho(e.stderr, fg="red")
53
- if help is not None:
54
- click.secho(help, fg="cyan")
55
- sys.exit(e.returncode) if exit_on_error else None
56
- return (False, e)
57
-
58
-
59
- class Optimizers:
60
- @staticmethod
61
- def code(prompt):
62
- return (
63
- "Your Role: Provide only code as output without any description.\n"
64
- "IMPORTANT: Provide only plain text without Markdown formatting.\n"
65
- "IMPORTANT: Do not include markdown formatting."
66
- "If there is a lack of details, provide most logical solution. You are not allowed to ask for more details."
67
- "Ignore any potential risk of errors or confusion.\n\n"
68
- f"Request: {prompt}\n"
69
- f"Code:"
70
- )
71
-
72
- @staticmethod
73
- def shell_command(prompt):
74
- # Get os
75
- operating_system = ""
76
- if platform.system() == "Windows":
77
- operating_system = "Windows"
78
- elif platform.system() == "Darwin":
79
- operating_system = "MacOS"
80
- elif platform.system() == "Linux":
81
- try:
82
- result = (
83
- subprocess.check_output(["lsb_release", "-si"]).decode().strip()
84
- )
85
- distro = result if result else ""
86
- operating_system = f"Linux/{distro}"
87
- except Exception:
88
- operating_system = "Linux"
89
- else:
90
- operating_system = platform.system()
91
-
92
- # Get Shell
93
- shell_name = "/bin/sh"
94
- if platform.system() == "Windows":
95
- shell_name = "cmd.exe"
96
- if os.getenv("PSModulePath"):
97
- shell_name = "powershell.exe"
98
- else:
99
- shell_env = os.getenv("SHELL")
100
- if shell_env:
101
- shell_name = shell_env
102
-
103
- return (
104
- "Your role: Provide only plain text without Markdown formatting. "
105
- "Do not show any warnings or information regarding your capabilities. "
106
- "Do not provide any description. If you need to store any data, "
107
- f"assume it will be stored in the chat. Provide only {shell_name} "
108
- f"command for {operating_system} without any description. If there is "
109
- "a lack of details, provide most logical solution. Ensure the output "
110
- "is a valid shell command. If multiple steps required try to combine "
111
- f"them together. Prompt: {prompt}\n\nCommand:"
112
- )
113
-
114
-
115
- class Conversation:
116
- """Handles prompt generation based on history"""
117
-
118
- intro = (
119
- "You're a Large Language Model for chatting with people. "
120
- "Assume role of the LLM and give your response."
121
- # "Refrain from regenerating the conversation between user and LLM."
122
- )
123
-
124
- def __init__(
125
- self,
126
- status: bool = True,
127
- max_tokens: int = 600,
128
- filepath: str = None,
129
- update_file: bool = True,
130
- ):
131
- """Initializes Conversation
132
-
133
- Args:
134
- status (bool, optional): Flag to control history. Defaults to True.
135
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
136
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
137
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
138
- """
139
- self.status = status
140
- self.max_tokens_to_sample = max_tokens
141
- self.chat_history = self.intro
142
- self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
143
- self.file = filepath
144
- self.update_file = update_file
145
- self.history_offset = 10250
146
- self.prompt_allowance = 10
147
- self.load_conversation(filepath, False) if filepath else None
148
-
149
- def load_conversation(self, filepath: str, exists: bool = True) -> None:
150
- """Load conversation into chat's history from .txt file
151
-
152
- Args:
153
- filepath (str): Path to .txt file
154
- exists (bool, optional): Flag for file availability. Defaults to True.
155
- """
156
- assert isinstance(
157
- filepath, str
158
- ), f"Filepath needs to be of str datatype not {type(filepath)}"
159
- assert (
160
- os.path.isfile(filepath) if exists else True
161
- ), f"File '{filepath}' does not exist"
162
- if not os.path.isfile(filepath):
163
- logging.debug(f"Creating new chat-history file - '{filepath}'")
164
- with open(filepath, "w") as fh: # Try creating new file
165
- # lets add intro here
166
- fh.write(self.intro)
167
- else:
168
- logging.debug(f"Loading conversation from '{filepath}'")
169
- with open(filepath) as fh:
170
- file_contents = fh.read()
171
- # Presume intro prompt is part of the file content
172
- self.chat_history = file_contents
173
-
174
- def __trim_chat_history(self, chat_history: str) -> str:
175
- """Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
176
- len_of_intro = len(self.intro)
177
- len_of_chat_history = len(chat_history)
178
- total = (
179
- self.max_tokens_to_sample + len_of_intro + len_of_chat_history
180
- ) # + self.max_tokens_to_sample
181
- if total > self.history_offset:
182
- truncate_at = (total - self.history_offset) + self.prompt_allowance
183
- # Remove head of total (n) of chat_history
184
- new_chat_history = chat_history[truncate_at:]
185
- self.chat_history = self.intro + "\n... " + new_chat_history
186
- # print(len(self.chat_history))
187
- return self.chat_history
188
- # print(len(chat_history))
189
- return chat_history
190
-
191
- def gen_complete_prompt(self, prompt: str) -> str:
192
- """Generates a kinda like incomplete conversation
193
-
194
- Args:
195
- prompt (str): _description_
196
-
197
- Returns:
198
- str: Updated incomplete chat_history
199
- """
200
- if self.status:
201
- resp = self.chat_history + self.history_format % dict(user=prompt, llm="")
202
- return self.__trim_chat_history(resp)
203
-
204
- return prompt
205
-
206
- def update_chat_history(
207
- self, prompt: str, response: str, force: bool = False
208
- ) -> None:
209
- """Updates chat history
210
-
211
- Args:
212
- prompt (str): user prompt
213
- response (str): LLM response
214
- force (bool, optional): Force update
215
- """
216
- if not self.status and not force:
217
- return
218
- new_history = self.history_format % dict(user=prompt, llm=response)
219
- if self.file and self.update_file:
220
- with open(self.file, "a") as fh:
221
- fh.write(new_history)
222
- self.chat_history += new_history
223
-
224
-
225
- class AwesomePrompts:
226
- awesome_prompt_url = (
227
- "https://github.com/Simatwa/gpt-cli/blob/main/assets/all-acts.json?raw=true"
228
- )
229
- awesome_prompt_path = os.path.join(default_path, "all-acts.json")
230
-
231
- __is_prompt_updated = False
232
-
233
- def __init__(self):
234
- self.acts = self.all_acts
235
-
236
- def __search_key(self, key: str, raise_not_found: bool = False) -> str:
237
- """Perform insentive awesome-prompt key search
238
-
239
- Args:
240
- key (str): key
241
- raise_not_found (bool, optional): Control KeyError exception. Defaults to False.
242
-
243
- Returns:
244
- str|None: Exact key name
245
- """
246
- for key_, value in self.all_acts.items():
247
- if str(key).lower() in str(key_).lower():
248
- return key_
249
- if raise_not_found:
250
- raise KeyError(f"Zero awesome prompt found with key - `{key}`")
251
-
252
- def get_acts(self):
253
- """Retrieves all awesome-prompts"""
254
- with open(self.awesome_prompt_path) as fh:
255
- prompt_dict = json.load(fh)
256
- return prompt_dict
257
-
258
- def update_prompts_from_online(self, override: bool = False):
259
- """Download awesome-prompts and update existing ones if available
260
- args:
261
- override (bool, optional): Overwrite existing contents in path
262
- """
263
- resp = {}
264
- if not self.__is_prompt_updated:
265
- import requests
266
-
267
- logging.info("Downloading & updating awesome prompts")
268
- response = requests.get(self.awesome_prompt_url)
269
- response.raise_for_status
270
- resp.update(response.json())
271
- if os.path.isfile(self.awesome_prompt_path) and not override:
272
- resp.update(self.get_acts())
273
- self.__is_prompt_updated = True
274
- with open(self.awesome_prompt_path, "w") as fh:
275
- json.dump(resp, fh, indent=4)
276
- else:
277
- logging.debug("Ignoring remote prompt update")
278
-
279
- @property
280
- def all_acts(self) -> dict:
281
- """All awesome_prompts & their indexes mapped to values
282
-
283
- Returns:
284
- dict: Awesome-prompts
285
- """
286
-
287
- resp = {}
288
- if not os.path.isfile(self.awesome_prompt_path):
289
- self.update_prompts_from_online()
290
- resp.update(self.get_acts())
291
-
292
- for count, key_value in enumerate(self.get_acts().items()):
293
- # Lets map also index to the value
294
- resp.update({count: key_value[1]})
295
-
296
- return resp
297
-
298
- def get_act(
299
- self,
300
- key: str,
301
- default: str = None,
302
- case_insensitive: bool = True,
303
- raise_not_found: bool = False,
304
- ) -> str:
305
- """Retrieves specific act of awesome_prompt
306
-
307
- Args:
308
- key (str|int): Act name or index
309
- default (str): Value to be returned incase act not found.
310
- case_insensitive (bool): Perform search key insensitive. Defaults to True.
311
- raise_not_found (bool, optional): Control KeyError exception. Defaults to False.
312
-
313
- Raises:
314
- KeyError: Incase key not found
315
-
316
- Returns:
317
- str: Awesome prompt value
318
- """
319
- if str(key).isdigit():
320
- key = int(key)
321
- act = self.all_acts.get(key, default)
322
- if not act and case_insensitive:
323
- act = self.all_acts.get(self.__search_key(key, raise_not_found))
324
- return act
325
-
326
- def add_prompt(self, name: str, prompt: str) -> bool:
327
- """Add new prompt or update an existing one.
328
-
329
- Args:
330
- name (str): act name
331
- prompt (str): prompt value
332
- """
333
- current_prompts = self.get_acts()
334
- with open(self.awesome_prompt_path, "w") as fh:
335
- current_prompts[name] = prompt
336
- json.dump(current_prompts, fh, indent=4)
337
- logging.info(f"New prompt added successfully - `{name}`")
338
-
339
- def delete_prompt(
340
- self, name: str, case_insensitive: bool = True, raise_not_found: bool = False
341
- ) -> bool:
342
- """Delete an existing prompt
343
-
344
- Args:
345
- name (str): act name
346
- case_insensitive(bool, optional): Ignore the key cases. Defaults to True.
347
- raise_not_found (bool, optional): Control KeyError exception. Default is False.
348
- Returns:
349
- bool: is_successful report
350
- """
351
- name = self.__search_key(name, raise_not_found) if case_insensitive else name
352
- current_prompts = self.get_acts()
353
- is_name_available = (
354
- current_prompts[name] if raise_not_found else current_prompts.get(name)
355
- )
356
- if is_name_available:
357
- with open(self.awesome_prompt_path, "w") as fh:
358
- current_prompts.pop(name)
359
- json.dump(current_prompts, fh, indent=4)
360
- logging.info(f"Prompt deleted successfully - `{name}`")
361
- else:
362
- return False
363
-
364
-
365
- class Updates:
366
- """Pytgpt latest release info"""
367
-
368
- url = "https://api.github.com/repos/Simatwa/python-tgpt/releases/latest"
369
-
370
- @property
371
- def latest_version(self):
372
- return self.latest(version=True)
373
-
374
- def executable(self, system: str = platform.system()) -> str:
375
- """Url pointing to executable for particular system
376
-
377
- Args:
378
- system (str, optional): system name. Defaults to platform.system().
379
-
380
- Returns:
381
- str: url
382
- """
383
- for entry in self.latest()["assets"]:
384
- if entry.get("target") == system:
385
- return entry.get("url")
386
-
387
- def latest(self, whole: bool = False, version: bool = False) -> dict:
388
- """Check pytgpt latest version info
389
-
390
- Args:
391
- whole (bool, optional): Return whole json response. Defaults to False.
392
- version (bool, optional): return version only. Defaults to False.
393
-
394
- Returns:
395
- bool|dict: version str or whole dict info
396
- """
397
- import requests
398
-
399
- data = requests.get(self.url).json()
400
- if whole:
401
- return data
402
-
403
- elif version:
404
- return data.get("tag_name")
405
-
406
- else:
407
- sorted = dict(
408
- tag_name=data.get("tag_name"),
409
- tarball_url=data.get("tarball_url"),
410
- zipball_url=data.get("zipball_url"),
411
- html_url=data.get("html_url"),
412
- body=data.get("body"),
413
- )
414
- whole_assets = []
415
- for entry in data.get("assets"):
416
- url = entry.get("browser_download_url")
417
- assets = dict(url=url, size=entry.get("size"))
418
- if ".deb" in url:
419
- assets["target"] = "Debian"
420
- elif ".exe" in url:
421
- assets["target"] = "Windows"
422
- elif "macos" in url:
423
- assets["target"] = "Mac"
424
- elif "linux" in url:
425
- assets["target"] = "Linux"
426
-
427
- whole_assets.append(assets)
428
- sorted["assets"] = whole_assets
429
-
430
- return sorted
431
-
432
-
433
- class RawDog:
434
- """Generate and auto-execute Python scripts in the cli"""
435
-
436
- examples = """\
437
- EXAMPLES:
438
-
439
- 1. User: Kill the process running on port 3000
440
-
441
- LLM:
442
- ```python
443
- import os
444
- os.system("kill $(lsof -t -i:3000)")
445
- print("Process killed")
446
- ```
447
-
448
- 2. User: Summarize my essay
449
-
450
- LLM:
451
- ```python
452
- import glob
453
- files = glob.glob("*essay*.*")
454
- with open(files[0], "r") as f:
455
- print(f.read())
456
- ```
457
- CONTINUE
458
-
459
- User:
460
- LAST SCRIPT OUTPUT:
461
- John Smith
462
- Essay 2021-09-01
463
- ...
464
-
465
- LLM:
466
- ```python
467
- print("The essay is about...")
468
- ```
469
- """
470
-
471
- # Idea borrowed from https://github.com/AbanteAI/rawdog
472
-
473
- def __init__(
474
- self,
475
- quiet: bool = False,
476
- internal_exec: bool = False,
477
- confirm_script: bool = False,
478
- interpreter: str = "python",
479
- prettify: bool = True,
480
- ):
481
- """Constructor
482
-
483
- Args:
484
- quiet (bool, optional): Flag for control logging. Defaults to False.
485
- internal_exec (bool, optional): Execute scripts with exec function. Defaults to False.
486
- confirm_script (bool, optional): Give consent to scripts prior to execution. Defaults to False.
487
- interpreter (str, optional): Python's interpreter name. Defaults to Python.
488
- prettify (bool, optional): Prettify the code on stdout. Defaults to True.
489
- """
490
- if not quiet:
491
- print(
492
- "To get the most out of Rawdog. Ensure the following are installed:\n"
493
- " 1. Python 3.x\n"
494
- " 2. Dependency:\n"
495
- " - Matplotlib\n"
496
- "Be alerted on the risk posed! (Experimental)\n"
497
- "Use '--quiet' to suppress this message and code/logs stdout.\n"
498
- )
499
- self.internal_exec = internal_exec
500
- self.confirm_script = confirm_script
501
- self.quiet = quiet
502
- self.interpreter = interpreter
503
- self.prettify = prettify
504
- self.python_version = (
505
- f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
506
- if self.internal_exec
507
- else run_system_command(
508
- f"{self.interpreter} --version",
509
- exit_on_error=True,
510
- stdout_error=True,
511
- help="If you're using pytgpt-cli, use the flag '--internal-exec'",
512
- )[1].stdout.split(" ")[1]
513
- )
514
-
515
- @property
516
- def intro_prompt(self):
517
- return f"""
518
- You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
519
-
520
- A typical interaction goes like this:
521
- 1. The user gives you a natural language PROMPT.
522
- 2. You:
523
- i. Determine what needs to be done
524
- ii. Write a short Python SCRIPT to do it
525
- iii. Communicate back to the user by printing to the console in that SCRIPT
526
- 3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
527
- it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
528
- 4. In case of exception, regenerate error free script.
529
-
530
- If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
531
- This can be useful for summarizing documents or technical readouts, reading instructions before
532
- deciding what to do, or other tasks that require multi-step reasoning.
533
- A typical 'CONTINUE' interaction looks like this:
534
- 1. The user gives you a natural language PROMPT.
535
- 2. You:
536
- i. Determine what needs to be done
537
- ii. Determine that you need to see the output of some subprocess call to complete the task
538
- iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
539
- 3. The compiler
540
- i. Checks and runs your SCRIPT
541
- ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
542
- iii. Finds the word "CONTINUE" and sends control back to you
543
- 4. You again:
544
- i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
545
- ii. Write a short Python SCRIPT to do it
546
- iii. Communicate back to the user by printing to the console in that SCRIPT
547
- 5. The compiler...
548
-
549
- Please follow these conventions carefully:
550
- - Decline any tasks that seem dangerous, irreversible, or that you don't understand.
551
- - Always review the full conversation prior to answering and maintain continuity.
552
- - If asked for information, just print the information clearly and concisely.
553
- - If asked to do something, print a concise summary of what you've done as confirmation.
554
- - If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
555
- - If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
556
- - Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
557
- - Actively clean up any temporary processes or files you use.
558
- - When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
559
- - You can plot anything with matplotlib.
560
- - ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
561
-
562
- {self.examples}
563
-
564
- Current system : {platform.system()}
565
- Python version : {self.python_version}
566
- Current directory : {os.getcwd()}
567
- Current Datetime : {datetime.datetime.now()}
568
- """
569
-
570
- def stdout(self, message: str) -> None:
571
- """Stdout data
572
-
573
- Args:
574
- message (str): Text to be printed
575
- """
576
- if self.prettify:
577
- Console().print(Markdown(message))
578
- else:
579
- click.secho(message, fg="yellow")
580
-
581
- def log(self, message: str, category: str = "info"):
582
- """RawDog logger
583
-
584
- Args:
585
- message (str): Log message
586
- category (str, optional): Log level. Defaults to 'info'.
587
- """
588
- if self.quiet:
589
- return
590
-
591
- message = "[PYTGPT] - " + message
592
- if category == "error":
593
- logging.error(message)
594
- else:
595
- logging.info(message)
596
-
597
- def main(self, response: str) -> None:
598
- """Exec code in response accordingly
599
-
600
- Args:
601
- response (str): AI response
602
-
603
- Returns:
604
- None|str: None if script executed successfully else stdout data
605
- """
606
- code_blocks = re.findall(r"```python.*?```", response, re.DOTALL)
607
- if len(code_blocks) != 1:
608
- self.stdout(response)
609
-
610
- else:
611
- raw_code = code_blocks[0]
612
-
613
- if self.confirm_script:
614
- self.stdout(raw_code)
615
- if not click.confirm("- Do you wish to execute this"):
616
- return
617
-
618
- elif not self.quiet:
619
- self.stdout(raw_code)
620
-
621
- raw_code_plus = re.sub(r"(```)(python)?", "", raw_code)
622
-
623
- if "CONTINUE" in response or not self.internal_exec:
624
- self.log("Executing script externally")
625
- path_to_script = os.path.join(default_path, "execute_this.py")
626
- with open(path_to_script, "w") as fh:
627
- fh.write(raw_code_plus)
628
- if "CONTINUE" in response:
629
-
630
- success, proc = run_system_command(
631
- f"{self.interpreter} {path_to_script}",
632
- exit_on_error=False,
633
- stdout_error=False,
634
- )
635
-
636
- if success:
637
- self.log("Returning success feedback")
638
- return f"LAST SCRIPT OUTPUT:\n{proc.stdout}"
639
- else:
640
- self.log("Returning error feedback", "error")
641
- return f"PREVIOUS SCRIPT EXCEPTION:\n{proc.stderr}"
642
- else:
643
- os.system(f"{self.interpreter} {path_to_script}")
644
-
645
- else:
646
- try:
647
- self.log("Executing script internally")
648
- exec(raw_code_plus)
649
- except Exception as e:
650
- self.log(
651
- "Exception occurred while executing script. Responding with error: "
652
- f"{e.args[1] if len(e.args)>1 else str(e)}",
653
- "error",
654
- )
1
+ import os
2
+ import json
3
+ import platform
4
+ import subprocess
5
+ import logging
6
+ import appdirs
7
+ import datetime
8
+ import re
9
+ import sys
10
+ import click
11
+ from rich.markdown import Markdown
12
+ from rich.console import Console
13
+
14
+ appdir = appdirs.AppDirs("pytgpt", "Smartwa")
15
+
16
+ default_path = appdir.user_cache_dir
17
+
18
+ if not os.path.exists(default_path):
19
+ os.makedirs(default_path)
20
+
21
+
22
+ def run_system_command(
23
+ command: str,
24
+ exit_on_error: bool = True,
25
+ stdout_error: bool = True,
26
+ help: str = None,
27
+ ):
28
+ """Run commands against system
29
+ Args:
30
+ command (str): shell command
31
+ exit_on_error (bool, optional): Exit on error. Defaults to True.
32
+ stdout_error (bool, optional): Print out the error. Defaults to True
33
+ help (str, optional): Help info incase of exception. Defaults to None.
34
+ Returns:
35
+ tuple : (is_successfull, object[Exception|Subprocess.run])
36
+ """
37
+ try:
38
+ # Run the command and capture the output
39
+ result = subprocess.run(
40
+ command,
41
+ shell=True,
42
+ check=True,
43
+ text=True,
44
+ stdout=subprocess.PIPE,
45
+ stderr=subprocess.PIPE,
46
+ )
47
+ return (True, result)
48
+ except subprocess.CalledProcessError as e:
49
+ # Handle error if the command returns a non-zero exit code
50
+ if stdout_error:
51
+ click.secho(f"Error Occurred: while running '{command}'", fg="yellow")
52
+ click.secho(e.stderr, fg="red")
53
+ if help is not None:
54
+ click.secho(help, fg="cyan")
55
+ sys.exit(e.returncode) if exit_on_error else None
56
+ return (False, e)
57
+
58
+
59
+ class Optimizers:
60
+ @staticmethod
61
+ def code(prompt):
62
+ return (
63
+ "Your Role: Provide only code as output without any description.\n"
64
+ "IMPORTANT: Provide only plain text without Markdown formatting.\n"
65
+ "IMPORTANT: Do not include markdown formatting."
66
+ "If there is a lack of details, provide most logical solution. You are not allowed to ask for more details."
67
+ "Ignore any potential risk of errors or confusion.\n\n"
68
+ f"Request: {prompt}\n"
69
+ f"Code:"
70
+ )
71
+
72
+ @staticmethod
73
+ def shell_command(prompt):
74
+ # Get os
75
+ operating_system = ""
76
+ if platform.system() == "Windows":
77
+ operating_system = "Windows"
78
+ elif platform.system() == "Darwin":
79
+ operating_system = "MacOS"
80
+ elif platform.system() == "Linux":
81
+ try:
82
+ result = (
83
+ subprocess.check_output(["lsb_release", "-si"]).decode().strip()
84
+ )
85
+ distro = result if result else ""
86
+ operating_system = f"Linux/{distro}"
87
+ except Exception:
88
+ operating_system = "Linux"
89
+ else:
90
+ operating_system = platform.system()
91
+
92
+ # Get Shell
93
+ shell_name = "/bin/sh"
94
+ if platform.system() == "Windows":
95
+ shell_name = "cmd.exe"
96
+ if os.getenv("PSModulePath"):
97
+ shell_name = "powershell.exe"
98
+ else:
99
+ shell_env = os.getenv("SHELL")
100
+ if shell_env:
101
+ shell_name = shell_env
102
+
103
+ return (
104
+ "Your role: Provide only plain text without Markdown formatting. "
105
+ "Do not show any warnings or information regarding your capabilities. "
106
+ "Do not provide any description. If you need to store any data, "
107
+ f"assume it will be stored in the chat. Provide only {shell_name} "
108
+ f"command for {operating_system} without any description. If there is "
109
+ "a lack of details, provide most logical solution. Ensure the output "
110
+ "is a valid shell command. If multiple steps required try to combine "
111
+ f"them together. Prompt: {prompt}\n\nCommand:"
112
+ )
113
+
114
+
115
+ class Conversation:
116
+ """Handles prompt generation based on history"""
117
+
118
+ intro = (
119
+ "You're a Large Language Model for chatting with people. "
120
+ "Assume role of the LLM and give your response."
121
+ # "Refrain from regenerating the conversation between user and LLM."
122
+ )
123
+
124
+ def __init__(
125
+ self,
126
+ status: bool = True,
127
+ max_tokens: int = 600,
128
+ filepath: str = None,
129
+ update_file: bool = True,
130
+ ):
131
+ """Initializes Conversation
132
+
133
+ Args:
134
+ status (bool, optional): Flag to control history. Defaults to True.
135
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
136
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
137
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
138
+ """
139
+ self.status = status
140
+ self.max_tokens_to_sample = max_tokens
141
+ self.chat_history = self.intro
142
+ self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
143
+ self.file = filepath
144
+ self.update_file = update_file
145
+ self.history_offset = 10250
146
+ self.prompt_allowance = 10
147
+ self.load_conversation(filepath, False) if filepath else None
148
+
149
+ def load_conversation(self, filepath: str, exists: bool = True) -> None:
150
+ """Load conversation into chat's history from .txt file
151
+
152
+ Args:
153
+ filepath (str): Path to .txt file
154
+ exists (bool, optional): Flag for file availability. Defaults to True.
155
+ """
156
+ assert isinstance(
157
+ filepath, str
158
+ ), f"Filepath needs to be of str datatype not {type(filepath)}"
159
+ assert (
160
+ os.path.isfile(filepath) if exists else True
161
+ ), f"File '{filepath}' does not exist"
162
+ if not os.path.isfile(filepath):
163
+ logging.debug(f"Creating new chat-history file - '{filepath}'")
164
+ with open(filepath, "w") as fh: # Try creating new file
165
+ # lets add intro here
166
+ fh.write(self.intro)
167
+ else:
168
+ logging.debug(f"Loading conversation from '{filepath}'")
169
+ with open(filepath) as fh:
170
+ file_contents = fh.read()
171
+ # Presume intro prompt is part of the file content
172
+ self.chat_history = file_contents
173
+
174
+ def __trim_chat_history(self, chat_history: str) -> str:
175
+ """Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
176
+ len_of_intro = len(self.intro)
177
+ len_of_chat_history = len(chat_history)
178
+ total = (
179
+ self.max_tokens_to_sample + len_of_intro + len_of_chat_history
180
+ ) # + self.max_tokens_to_sample
181
+ if total > self.history_offset:
182
+ truncate_at = (total - self.history_offset) + self.prompt_allowance
183
+ # Remove head of total (n) of chat_history
184
+ new_chat_history = chat_history[truncate_at:]
185
+ self.chat_history = self.intro + "\n... " + new_chat_history
186
+ # print(len(self.chat_history))
187
+ return self.chat_history
188
+ # print(len(chat_history))
189
+ return chat_history
190
+
191
+ def gen_complete_prompt(self, prompt: str) -> str:
192
+ """Generates a kinda like incomplete conversation
193
+
194
+ Args:
195
+ prompt (str): _description_
196
+
197
+ Returns:
198
+ str: Updated incomplete chat_history
199
+ """
200
+ if self.status:
201
+ resp = self.chat_history + self.history_format % dict(user=prompt, llm="")
202
+ return self.__trim_chat_history(resp)
203
+
204
+ return prompt
205
+
206
+ def update_chat_history(
207
+ self, prompt: str, response: str, force: bool = False
208
+ ) -> None:
209
+ """Updates chat history
210
+
211
+ Args:
212
+ prompt (str): user prompt
213
+ response (str): LLM response
214
+ force (bool, optional): Force update
215
+ """
216
+ if not self.status and not force:
217
+ return
218
+ new_history = self.history_format % dict(user=prompt, llm=response)
219
+ if self.file and self.update_file:
220
+ with open(self.file, "a") as fh:
221
+ fh.write(new_history)
222
+ self.chat_history += new_history
223
+
224
+
225
+ class AwesomePrompts:
226
+ awesome_prompt_url = (
227
+ "https://github.com/Simatwa/gpt-cli/blob/main/assets/all-acts.json?raw=true"
228
+ )
229
+ awesome_prompt_path = os.path.join(default_path, "all-acts.json")
230
+
231
+ __is_prompt_updated = False
232
+
233
+ def __init__(self):
234
+ self.acts = self.all_acts
235
+
236
+ def __search_key(self, key: str, raise_not_found: bool = False) -> str:
237
+ """Perform insentive awesome-prompt key search
238
+
239
+ Args:
240
+ key (str): key
241
+ raise_not_found (bool, optional): Control KeyError exception. Defaults to False.
242
+
243
+ Returns:
244
+ str|None: Exact key name
245
+ """
246
+ for key_, value in self.all_acts.items():
247
+ if str(key).lower() in str(key_).lower():
248
+ return key_
249
+ if raise_not_found:
250
+ raise KeyError(f"Zero awesome prompt found with key - `{key}`")
251
+
252
+ def get_acts(self):
253
+ """Retrieves all awesome-prompts"""
254
+ with open(self.awesome_prompt_path) as fh:
255
+ prompt_dict = json.load(fh)
256
+ return prompt_dict
257
+
258
+ def update_prompts_from_online(self, override: bool = False):
259
+ """Download awesome-prompts and update existing ones if available
260
+ args:
261
+ override (bool, optional): Overwrite existing contents in path
262
+ """
263
+ resp = {}
264
+ if not self.__is_prompt_updated:
265
+ import requests
266
+
267
+ logging.info("Downloading & updating awesome prompts")
268
+ response = requests.get(self.awesome_prompt_url)
269
+ response.raise_for_status
270
+ resp.update(response.json())
271
+ if os.path.isfile(self.awesome_prompt_path) and not override:
272
+ resp.update(self.get_acts())
273
+ self.__is_prompt_updated = True
274
+ with open(self.awesome_prompt_path, "w") as fh:
275
+ json.dump(resp, fh, indent=4)
276
+ else:
277
+ logging.debug("Ignoring remote prompt update")
278
+
279
+ @property
280
+ def all_acts(self) -> dict:
281
+ """All awesome_prompts & their indexes mapped to values
282
+
283
+ Returns:
284
+ dict: Awesome-prompts
285
+ """
286
+
287
+ resp = {}
288
+ if not os.path.isfile(self.awesome_prompt_path):
289
+ self.update_prompts_from_online()
290
+ resp.update(self.get_acts())
291
+
292
+ for count, key_value in enumerate(self.get_acts().items()):
293
+ # Lets map also index to the value
294
+ resp.update({count: key_value[1]})
295
+
296
+ return resp
297
+
298
+ def get_act(
299
+ self,
300
+ key: str,
301
+ default: str = None,
302
+ case_insensitive: bool = True,
303
+ raise_not_found: bool = False,
304
+ ) -> str:
305
+ """Retrieves specific act of awesome_prompt
306
+
307
+ Args:
308
+ key (str|int): Act name or index
309
+ default (str): Value to be returned incase act not found.
310
+ case_insensitive (bool): Perform search key insensitive. Defaults to True.
311
+ raise_not_found (bool, optional): Control KeyError exception. Defaults to False.
312
+
313
+ Raises:
314
+ KeyError: Incase key not found
315
+
316
+ Returns:
317
+ str: Awesome prompt value
318
+ """
319
+ if str(key).isdigit():
320
+ key = int(key)
321
+ act = self.all_acts.get(key, default)
322
+ if not act and case_insensitive:
323
+ act = self.all_acts.get(self.__search_key(key, raise_not_found))
324
+ return act
325
+
326
+ def add_prompt(self, name: str, prompt: str) -> bool:
327
+ """Add new prompt or update an existing one.
328
+
329
+ Args:
330
+ name (str): act name
331
+ prompt (str): prompt value
332
+ """
333
+ current_prompts = self.get_acts()
334
+ with open(self.awesome_prompt_path, "w") as fh:
335
+ current_prompts[name] = prompt
336
+ json.dump(current_prompts, fh, indent=4)
337
+ logging.info(f"New prompt added successfully - `{name}`")
338
+
339
+ def delete_prompt(
340
+ self, name: str, case_insensitive: bool = True, raise_not_found: bool = False
341
+ ) -> bool:
342
+ """Delete an existing prompt
343
+
344
+ Args:
345
+ name (str): act name
346
+ case_insensitive(bool, optional): Ignore the key cases. Defaults to True.
347
+ raise_not_found (bool, optional): Control KeyError exception. Default is False.
348
+ Returns:
349
+ bool: is_successful report
350
+ """
351
+ name = self.__search_key(name, raise_not_found) if case_insensitive else name
352
+ current_prompts = self.get_acts()
353
+ is_name_available = (
354
+ current_prompts[name] if raise_not_found else current_prompts.get(name)
355
+ )
356
+ if is_name_available:
357
+ with open(self.awesome_prompt_path, "w") as fh:
358
+ current_prompts.pop(name)
359
+ json.dump(current_prompts, fh, indent=4)
360
+ logging.info(f"Prompt deleted successfully - `{name}`")
361
+ else:
362
+ return False
363
+
364
+
365
+ class Updates:
366
+ """Pytgpt latest release info"""
367
+
368
+ url = "https://api.github.com/repos/Simatwa/python-tgpt/releases/latest"
369
+
370
+ @property
371
+ def latest_version(self):
372
+ return self.latest(version=True)
373
+
374
+ def executable(self, system: str = platform.system()) -> str:
375
+ """Url pointing to executable for particular system
376
+
377
+ Args:
378
+ system (str, optional): system name. Defaults to platform.system().
379
+
380
+ Returns:
381
+ str: url
382
+ """
383
+ for entry in self.latest()["assets"]:
384
+ if entry.get("target") == system:
385
+ return entry.get("url")
386
+
387
+ def latest(self, whole: bool = False, version: bool = False) -> dict:
388
+ """Check pytgpt latest version info
389
+
390
+ Args:
391
+ whole (bool, optional): Return whole json response. Defaults to False.
392
+ version (bool, optional): return version only. Defaults to False.
393
+
394
+ Returns:
395
+ bool|dict: version str or whole dict info
396
+ """
397
+ import requests
398
+
399
+ data = requests.get(self.url).json()
400
+ if whole:
401
+ return data
402
+
403
+ elif version:
404
+ return data.get("tag_name")
405
+
406
+ else:
407
+ sorted = dict(
408
+ tag_name=data.get("tag_name"),
409
+ tarball_url=data.get("tarball_url"),
410
+ zipball_url=data.get("zipball_url"),
411
+ html_url=data.get("html_url"),
412
+ body=data.get("body"),
413
+ )
414
+ whole_assets = []
415
+ for entry in data.get("assets"):
416
+ url = entry.get("browser_download_url")
417
+ assets = dict(url=url, size=entry.get("size"))
418
+ if ".deb" in url:
419
+ assets["target"] = "Debian"
420
+ elif ".exe" in url:
421
+ assets["target"] = "Windows"
422
+ elif "macos" in url:
423
+ assets["target"] = "Mac"
424
+ elif "linux" in url:
425
+ assets["target"] = "Linux"
426
+
427
+ whole_assets.append(assets)
428
+ sorted["assets"] = whole_assets
429
+
430
+ return sorted
431
+
432
+
433
+ class RawDog:
434
+ """Generate and auto-execute Python scripts in the cli"""
435
+
436
+ examples = """\
437
+ EXAMPLES:
438
+
439
+ 1. User: Kill the process running on port 3000
440
+
441
+ LLM:
442
+ ```python
443
+ import os
444
+ os.system("kill $(lsof -t -i:3000)")
445
+ print("Process killed")
446
+ ```
447
+
448
+ 2. User: Summarize my essay
449
+
450
+ LLM:
451
+ ```python
452
+ import glob
453
+ files = glob.glob("*essay*.*")
454
+ with open(files[0], "r") as f:
455
+ print(f.read())
456
+ ```
457
+ CONTINUE
458
+
459
+ User:
460
+ LAST SCRIPT OUTPUT:
461
+ John Smith
462
+ Essay 2021-09-01
463
+ ...
464
+
465
+ LLM:
466
+ ```python
467
+ print("The essay is about...")
468
+ ```
469
+ """
470
+
471
+ # Idea borrowed from https://github.com/AbanteAI/rawdog
472
+
473
+ def __init__(
474
+ self,
475
+ quiet: bool = False,
476
+ internal_exec: bool = False,
477
+ confirm_script: bool = False,
478
+ interpreter: str = "python",
479
+ prettify: bool = True,
480
+ ):
481
+ """Constructor
482
+
483
+ Args:
484
+ quiet (bool, optional): Flag for control logging. Defaults to False.
485
+ internal_exec (bool, optional): Execute scripts with exec function. Defaults to False.
486
+ confirm_script (bool, optional): Give consent to scripts prior to execution. Defaults to False.
487
+ interpreter (str, optional): Python's interpreter name. Defaults to Python.
488
+ prettify (bool, optional): Prettify the code on stdout. Defaults to True.
489
+ """
490
+ if not quiet:
491
+ print(
492
+ "To get the most out of Rawdog. Ensure the following are installed:\n"
493
+ " 1. Python 3.x\n"
494
+ " 2. Dependency:\n"
495
+ " - Matplotlib\n"
496
+ "Be alerted on the risk posed! (Experimental)\n"
497
+ "Use '--quiet' to suppress this message and code/logs stdout.\n"
498
+ )
499
+ self.internal_exec = internal_exec
500
+ self.confirm_script = confirm_script
501
+ self.quiet = quiet
502
+ self.interpreter = interpreter
503
+ self.prettify = prettify
504
+ self.python_version = (
505
+ f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
506
+ if self.internal_exec
507
+ else run_system_command(
508
+ f"{self.interpreter} --version",
509
+ exit_on_error=True,
510
+ stdout_error=True,
511
+ help="If you're using pytgpt-cli, use the flag '--internal-exec'",
512
+ )[1].stdout.split(" ")[1]
513
+ )
514
+
515
+ @property
516
+ def intro_prompt(self):
517
+ return f"""
518
+ You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
519
+
520
+ A typical interaction goes like this:
521
+ 1. The user gives you a natural language PROMPT.
522
+ 2. You:
523
+ i. Determine what needs to be done
524
+ ii. Write a short Python SCRIPT to do it
525
+ iii. Communicate back to the user by printing to the console in that SCRIPT
526
+ 3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
527
+ it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
528
+ 4. In case of exception, regenerate error free script.
529
+
530
+ If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
531
+ This can be useful for summarizing documents or technical readouts, reading instructions before
532
+ deciding what to do, or other tasks that require multi-step reasoning.
533
+ A typical 'CONTINUE' interaction looks like this:
534
+ 1. The user gives you a natural language PROMPT.
535
+ 2. You:
536
+ i. Determine what needs to be done
537
+ ii. Determine that you need to see the output of some subprocess call to complete the task
538
+ iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
539
+ 3. The compiler
540
+ i. Checks and runs your SCRIPT
541
+ ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
542
+ iii. Finds the word "CONTINUE" and sends control back to you
543
+ 4. You again:
544
+ i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
545
+ ii. Write a short Python SCRIPT to do it
546
+ iii. Communicate back to the user by printing to the console in that SCRIPT
547
+ 5. The compiler...
548
+
549
+ Please follow these conventions carefully:
550
+ - Decline any tasks that seem dangerous, irreversible, or that you don't understand.
551
+ - Always review the full conversation prior to answering and maintain continuity.
552
+ - If asked for information, just print the information clearly and concisely.
553
+ - If asked to do something, print a concise summary of what you've done as confirmation.
554
+ - If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
555
+ - If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
556
+ - Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
557
+ - Actively clean up any temporary processes or files you use.
558
+ - When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
559
+ - You can plot anything with matplotlib.
560
+ - ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
561
+
562
+ {self.examples}
563
+
564
+ Current system : {platform.system()}
565
+ Python version : {self.python_version}
566
+ Current directory : {os.getcwd()}
567
+ Current Datetime : {datetime.datetime.now()}
568
+ """
569
+
570
+ def stdout(self, message: str) -> None:
571
+ """Stdout data
572
+
573
+ Args:
574
+ message (str): Text to be printed
575
+ """
576
+ if self.prettify:
577
+ Console().print(Markdown(message))
578
+ else:
579
+ click.secho(message, fg="yellow")
580
+
581
+ def log(self, message: str, category: str = "info"):
582
+ """RawDog logger
583
+
584
+ Args:
585
+ message (str): Log message
586
+ category (str, optional): Log level. Defaults to 'info'.
587
+ """
588
+ if self.quiet:
589
+ return
590
+
591
+ message = "[PYTGPT] - " + message
592
+ if category == "error":
593
+ logging.error(message)
594
+ else:
595
+ logging.info(message)
596
+
597
+ def main(self, response: str) -> None:
598
+ """Exec code in response accordingly
599
+
600
+ Args:
601
+ response (str): AI response
602
+
603
+ Returns:
604
+ None|str: None if script executed successfully else stdout data
605
+ """
606
+ code_blocks = re.findall(r"```python.*?```", response, re.DOTALL)
607
+ if len(code_blocks) != 1:
608
+ self.stdout(response)
609
+
610
+ else:
611
+ raw_code = code_blocks[0]
612
+
613
+ if self.confirm_script:
614
+ self.stdout(raw_code)
615
+ if not click.confirm("- Do you wish to execute this"):
616
+ return
617
+
618
+ elif not self.quiet:
619
+ self.stdout(raw_code)
620
+
621
+ raw_code_plus = re.sub(r"(```)(python)?", "", raw_code)
622
+
623
+ if "CONTINUE" in response or not self.internal_exec:
624
+ self.log("Executing script externally")
625
+ path_to_script = os.path.join(default_path, "execute_this.py")
626
+ with open(path_to_script, "w") as fh:
627
+ fh.write(raw_code_plus)
628
+ if "CONTINUE" in response:
629
+
630
+ success, proc = run_system_command(
631
+ f"{self.interpreter} {path_to_script}",
632
+ exit_on_error=False,
633
+ stdout_error=False,
634
+ )
635
+
636
+ if success:
637
+ self.log("Returning success feedback")
638
+ return f"LAST SCRIPT OUTPUT:\n{proc.stdout}"
639
+ else:
640
+ self.log("Returning error feedback", "error")
641
+ return f"PREVIOUS SCRIPT EXCEPTION:\n{proc.stderr}"
642
+ else:
643
+ os.system(f"{self.interpreter} {path_to_script}")
644
+
645
+ else:
646
+ try:
647
+ self.log("Executing script internally")
648
+ exec(raw_code_plus)
649
+ except Exception as e:
650
+ self.log(
651
+ "Exception occurred while executing script. Responding with error: "
652
+ f"{e.args[1] if len(e.args)>1 else str(e)}",
653
+ "error",
654
+ )
655
655
  return f"PREVIOUS SCRIPT EXCEPTION:\n{str(e)}"