rubber-ducky 1.0.1__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: rubber-ducky
3
- Version: 1.0.1
3
+ Version: 1.1.0
4
4
  Summary: AI Companion for Pair Programming
5
5
  Home-page: https://github.com/ParthSareen/ducky
6
6
  Author: Parth Sareen
@@ -9,6 +9,7 @@ License: MIT
9
9
  Description-Content-Type: text/markdown
10
10
  License-File: LICENSE
11
11
  Requires-Dist: langchain
12
+ Requires-Dist: termcolor
12
13
 
13
14
  # rubber ducky
14
15
 
@@ -17,7 +18,7 @@ Requires-Dist: langchain
17
18
  - Install ollama
18
19
  - `ollama run codellama` (first time and then you can just have application in background)
19
20
  - There are probably other dependencies which I forgot to put in setup.py sorry in advance.
20
- - run with `ducky --file <path> --prompt (optional) <prompt to override>`
21
+ - Run with `ducky -f <file path>`
21
22
 
22
23
  ## Why did I make this
23
24
 
@@ -34,12 +35,25 @@ You will need Ollama installed on your machine. The model I use for this project
34
35
  For the first installation you can run `ollama run codellama` and it should pull the necessary binaries for you. Ollama is also great because it'll spin up a server which can run in the background and can even do automatic model switching as long as you have it installed.
35
36
 
36
37
  ## Usage
37
- Make sure you have the package installed. Easiest through [pypi](https://pypi.org/project/rubber-ducky/).
38
38
 
39
- `pip install rubber-ducky` also works.
39
+ Install through [pypi](https://pypi.org/project/rubber-ducky/):
40
40
 
41
- To run:
41
+ `pip install rubber-ducky` .
42
42
 
43
- `ducky --file <path> --prompt (optional) <prompt to override>`
43
+ ### Simple run
44
+ `ducky`
44
45
 
45
- I have yet to implement some methods so if you do something I don't say that's on you.
46
+ ### To use additional options:
47
+
48
+ `ducky --file <path> --prompt <prompt> --directory <directory> --chain --model <model>`
49
+
50
+ Where:
51
+ - `--prompt` or `-p`: Custom prompt to be used
52
+ - `--file` or `-f`: The file to be processed
53
+ - `--directory` or `-d`: The directory to be processed
54
+ - `--chain` or `-c`: Chain the output of the previous command to the next command
55
+ - `--model` or `-m`: The model to be used (default is "codellama")
56
+
57
+
58
+ ## Example output
59
+ ![Screenshot of ducky](image.png)
@@ -5,7 +5,7 @@
5
5
  - Install ollama
6
6
  - `ollama run codellama` (first time and then you can just have application in background)
7
7
  - There are probably other dependencies which I forgot to put in setup.py sorry in advance.
8
- - run with `ducky --file <path> --prompt (optional) <prompt to override>`
8
+ - Run with `ducky -f <file path>`
9
9
 
10
10
  ## Why did I make this
11
11
 
@@ -22,12 +22,25 @@ You will need Ollama installed on your machine. The model I use for this project
22
22
  For the first installation you can run `ollama run codellama` and it should pull the necessary binaries for you. Ollama is also great because it'll spin up a server which can run in the background and can even do automatic model switching as long as you have it installed.
23
23
 
24
24
  ## Usage
25
- Make sure you have the package installed. Easiest through [pypi](https://pypi.org/project/rubber-ducky/).
26
25
 
27
- `pip install rubber-ducky` also works.
26
+ Install through [pypi](https://pypi.org/project/rubber-ducky/):
28
27
 
29
- To run:
28
+ `pip install rubber-ducky` .
30
29
 
31
- `ducky --file <path> --prompt (optional) <prompt to override>`
30
+ ### Simple run
31
+ `ducky`
32
32
 
33
- I have yet to implement some methods so if you do something I don't say that's on you.
33
+ ### To use additional options:
34
+
35
+ `ducky --file <path> --prompt <prompt> --directory <directory> --chain --model <model>`
36
+
37
+ Where:
38
+ - `--prompt` or `-p`: Custom prompt to be used
39
+ - `--file` or `-f`: The file to be processed
40
+ - `--directory` or `-d`: The directory to be processed
41
+ - `--chain` or `-c`: Chain the output of the previous command to the next command
42
+ - `--model` or `-m`: The model to be used (default is "codellama")
43
+
44
+
45
+ ## Example output
46
+ ![Screenshot of ducky](image.png)
@@ -0,0 +1,96 @@
1
+ import argparse
2
+ from typing import Optional
3
+ from langchain.llms.ollama import Ollama
4
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
+ from termcolor import colored
6
+
7
+ class RubberDuck:
8
+ """
9
+ This class is a wrapper around the Ollama model.
10
+ """
11
+ def __init__(self, model: str = "codellama") -> None:
12
+ """
13
+ This function initializes the RubberDuck class.
14
+
15
+ Args:
16
+ model (str, optional): The model to be used. Defaults to "codellama".
17
+ """
18
+ self.system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code.
19
+ Help the user think through their approach and provide feedback on the code."""
20
+ self.llm = Ollama(model=model, callbacks=[StreamingStdOutCallbackHandler()], system=self.system_prompt)
21
+
22
+
23
+ def call_llama(self, code: str = "", prompt: Optional[str] = None, chain: bool = False) -> None:
24
+ """
25
+ This function calls the Ollama model to provide feedback on the given code.
26
+
27
+ Args:
28
+ code (str): The code to be reviewed.
29
+ prompt (Optional[str]): Custom prompt to be used. Defaults to None.
30
+ """
31
+ if prompt is None:
32
+ prompt = "review the code, find any issues if any, suggest cleanups if any:" + code
33
+ else:
34
+ prompt = prompt + code
35
+
36
+
37
+ self.llm(prompt)
38
+ if chain:
39
+ while(True):
40
+ prompt = input(colored("\n What's on your mind? \n ", 'green'))
41
+ self.llm(prompt)
42
+
43
+
44
+ def read_files_from_dir(directory: str) -> str:
45
+ """
46
+ This function reads all the files from a directory and returns the concatenated string.
47
+
48
+ Args:
49
+ directory (str): The directory to be processed.
50
+
51
+ Returns:
52
+ str: The concatenated string of all the files.
53
+ """
54
+ import os
55
+ files = os.listdir(directory)
56
+ code = ""
57
+ for file in files:
58
+ code += open(directory + "/" + file).read()
59
+ return code
60
+
61
+
62
+ def ducky() -> None:
63
+ """
64
+ This function parses the command line arguments and calls the Ollama model.
65
+ """
66
+ parser = argparse.ArgumentParser()
67
+ parser.add_argument("--prompt", "-p", help="Custom prompt to be used", default=None)
68
+ parser.add_argument("--file", "-f", help="The file to be processed", default=None)
69
+ parser.add_argument("--directory", "-d", help="The directory to be processed", default=None)
70
+ parser.add_argument("--chain", "-c", help="Chain the output of the previous command to the next command", action="store_true", default=False)
71
+ parser.add_argument("--model", "-m", help="The model to be used", default="codellama")
72
+ args, _ = parser.parse_known_args()
73
+
74
+ # My testing has shown that the codellama:7b-python is good for returning python code from the program.
75
+ # My intention with this tool was to give more general feedback and have back a back and forth with the user.
76
+ rubber_ducky = RubberDuck(model=args.model)
77
+ if args.file is None and args.directory is None:
78
+ if args.chain:
79
+ while(True):
80
+ prompt = input(colored("\n What's on your mind? \n ", 'green'))
81
+ rubber_ducky.call_llama(prompt=prompt, chain=args.chain)
82
+ else:
83
+ prompt = input(colored("\n What's on your mind? \n ", 'green'))
84
+ rubber_ducky.call_llama(prompt=prompt, chain=args.chain)
85
+
86
+ if args.file is not None:
87
+ code = open(args.file).read()
88
+ rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain)
89
+
90
+ elif args.directory is not None:
91
+ code = read_files_from_dir(args.directory)
92
+ rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain)
93
+
94
+
95
+ if __name__ == "__main__":
96
+ ducky()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: rubber-ducky
3
- Version: 1.0.1
3
+ Version: 1.1.0
4
4
  Summary: AI Companion for Pair Programming
5
5
  Home-page: https://github.com/ParthSareen/ducky
6
6
  Author: Parth Sareen
@@ -9,6 +9,7 @@ License: MIT
9
9
  Description-Content-Type: text/markdown
10
10
  License-File: LICENSE
11
11
  Requires-Dist: langchain
12
+ Requires-Dist: termcolor
12
13
 
13
14
  # rubber ducky
14
15
 
@@ -17,7 +18,7 @@ Requires-Dist: langchain
17
18
  - Install ollama
18
19
  - `ollama run codellama` (first time and then you can just have application in background)
19
20
  - There are probably other dependencies which I forgot to put in setup.py sorry in advance.
20
- - run with `ducky --file <path> --prompt (optional) <prompt to override>`
21
+ - Run with `ducky -f <file path>`
21
22
 
22
23
  ## Why did I make this
23
24
 
@@ -34,12 +35,25 @@ You will need Ollama installed on your machine. The model I use for this project
34
35
  For the first installation you can run `ollama run codellama` and it should pull the necessary binaries for you. Ollama is also great because it'll spin up a server which can run in the background and can even do automatic model switching as long as you have it installed.
35
36
 
36
37
  ## Usage
37
- Make sure you have the package installed. Easiest through [pypi](https://pypi.org/project/rubber-ducky/).
38
38
 
39
- `pip install rubber-ducky` also works.
39
+ Install through [pypi](https://pypi.org/project/rubber-ducky/):
40
40
 
41
- To run:
41
+ `pip install rubber-ducky` .
42
42
 
43
- `ducky --file <path> --prompt (optional) <prompt to override>`
43
+ ### Simple run
44
+ `ducky`
44
45
 
45
- I have yet to implement some methods so if you do something I don't say that's on you.
46
+ ### To use additional options:
47
+
48
+ `ducky --file <path> --prompt <prompt> --directory <directory> --chain --model <model>`
49
+
50
+ Where:
51
+ - `--prompt` or `-p`: Custom prompt to be used
52
+ - `--file` or `-f`: The file to be processed
53
+ - `--directory` or `-d`: The directory to be processed
54
+ - `--chain` or `-c`: Chain the output of the previous command to the next command
55
+ - `--model` or `-m`: The model to be used (default is "codellama")
56
+
57
+
58
+ ## Example output
59
+ ![Screenshot of ducky](image.png)
@@ -5,7 +5,7 @@ with open('README.md', 'r', encoding='utf-8') as f:
5
5
 
6
6
  setup(
7
7
  name='rubber-ducky',
8
- version='1.0.1',
8
+ version='1.1.0',
9
9
  description='AI Companion for Pair Programming',
10
10
  long_description=long_description,
11
11
  long_description_content_type='text/markdown',
@@ -16,6 +16,7 @@ setup(
16
16
  packages=find_packages(),
17
17
  install_requires=[
18
18
  'langchain',
19
+ 'termcolor'
19
20
  ],
20
21
  entry_points={
21
22
  'console_scripts': [
@@ -1,44 +0,0 @@
1
- import argparse
2
- from typing import Optional
3
- from langchain.llms.ollama import Ollama
4
- from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
-
6
-
7
- def call_llama(code: str, prompt: Optional[str] = None) -> None:
8
- """
9
- This function calls the Ollama model to provide feedback on the given code.
10
-
11
- Args:
12
- code (str): The code to be reviewed.
13
- prompt (Optional[str], optional): Custom prompt to be used. Defaults to None.
14
- """
15
- if prompt is None:
16
- prompt = "review the code, find any issues if any, suggest cleanups if any:" + code
17
- else:
18
- prompt = prompt + code
19
- system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code. Help the user rubber duck by providing feedback on the code."""
20
-
21
- # TODO: find out how to enable the python trained model
22
- llm = Ollama(model="codellama", callbacks=[StreamingStdOutCallbackHandler()], system=system_prompt)
23
-
24
- # TODO: add chaining if it makes sense
25
- llm(prompt)
26
-
27
- def ducky() -> None:
28
- """
29
- This function parses the command line arguments and calls the Ollama model.
30
- """
31
- parser = argparse.ArgumentParser()
32
- parser.add_argument("--prompt", help="Custom prompt to be used", default=None)
33
- parser.add_argument("--file", help="The file to be processed", default=None)
34
- parser.add_argument("--directory", help="The directory to be processed", default=None)
35
- args, _ = parser.parse_known_args()
36
-
37
- if args.file is not None:
38
- code = open(args.file).read()
39
- call_llama(code=code, prompt=args.prompt)
40
- else:
41
- raise Exception("No file provided")
42
-
43
- if __name__ == "__main__":
44
- ducky()
File without changes
File without changes