rubber-ducky 1.0.1__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ducky/ducky.py +79 -28
- rubber_ducky-1.1.1.dist-info/METADATA +60 -0
- rubber_ducky-1.1.1.dist-info/RECORD +8 -0
- {rubber_ducky-1.0.1.dist-info → rubber_ducky-1.1.1.dist-info}/WHEEL +1 -1
- rubber_ducky-1.1.1.dist-info/entry_points.txt +2 -0
- rubber_ducky-1.0.1.dist-info/METADATA +0 -45
- rubber_ducky-1.0.1.dist-info/RECORD +0 -8
- rubber_ducky-1.0.1.dist-info/entry_points.txt +0 -2
- {rubber_ducky-1.0.1.dist-info → rubber_ducky-1.1.1.dist-info}/LICENSE +0 -0
- {rubber_ducky-1.0.1.dist-info → rubber_ducky-1.1.1.dist-info}/top_level.txt +0 -0
ducky/ducky.py
CHANGED
|
@@ -1,44 +1,95 @@
|
|
|
1
1
|
import argparse
|
|
2
|
+
import asyncio
|
|
2
3
|
from typing import Optional
|
|
3
|
-
from
|
|
4
|
-
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
|
4
|
+
from ollama import AsyncClient
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
|
|
8
|
-
""
|
|
9
|
-
|
|
7
|
+
class RubberDuck:
|
|
8
|
+
def __init__(self, model: str = "codellama") -> None:
|
|
9
|
+
self.system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code.
|
|
10
|
+
Help the user think through their approach and provide feedback on the code. Think step by step and ask clarifying questions if needed."""
|
|
11
|
+
self.client = AsyncClient()
|
|
12
|
+
self.model = model
|
|
13
|
+
|
|
14
|
+
async def call_llama(self, code: str = "", prompt: Optional[str] = None, chain: bool = False) -> None:
|
|
15
|
+
if prompt is None:
|
|
16
|
+
user_prompt = input("\nEnter your prompt (or press Enter for default review): ")
|
|
17
|
+
if not user_prompt:
|
|
18
|
+
prompt = "review the code, find any issues if any, suggest cleanups if any:" + code
|
|
19
|
+
else:
|
|
20
|
+
prompt = user_prompt + code
|
|
21
|
+
else:
|
|
22
|
+
prompt = prompt + code
|
|
23
|
+
|
|
24
|
+
responses = []
|
|
25
|
+
while True:
|
|
26
|
+
# Include previous responses in the prompt for context
|
|
27
|
+
context_prompt = "\n".join(responses) + "\n" + prompt
|
|
28
|
+
response = await self.client.generate(model="codellama", prompt=context_prompt)
|
|
29
|
+
print(response['response'])
|
|
30
|
+
responses.append(response['response'])
|
|
31
|
+
if not chain:
|
|
32
|
+
break
|
|
33
|
+
prompt = input("\nAny questions? \n")
|
|
10
34
|
|
|
11
|
-
Args:
|
|
12
|
-
code (str): The code to be reviewed.
|
|
13
|
-
prompt (Optional[str], optional): Custom prompt to be used. Defaults to None.
|
|
14
|
-
"""
|
|
15
|
-
if prompt is None:
|
|
16
|
-
prompt = "review the code, find any issues if any, suggest cleanups if any:" + code
|
|
17
|
-
else:
|
|
18
|
-
prompt = prompt + code
|
|
19
|
-
system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code. Help the user rubber duck by providing feedback on the code."""
|
|
20
35
|
|
|
21
|
-
|
|
22
|
-
|
|
36
|
+
def read_files_from_dir(directory: str) -> str:
|
|
37
|
+
import os
|
|
23
38
|
|
|
24
|
-
|
|
25
|
-
|
|
39
|
+
files = os.listdir(directory)
|
|
40
|
+
code = ""
|
|
41
|
+
for file in files:
|
|
42
|
+
code += open(directory + "/" + file).read()
|
|
43
|
+
return code
|
|
26
44
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
This function parses the command line arguments and calls the Ollama model.
|
|
30
|
-
"""
|
|
45
|
+
|
|
46
|
+
async def ducky() -> None:
|
|
31
47
|
parser = argparse.ArgumentParser()
|
|
32
|
-
parser.add_argument("
|
|
33
|
-
parser.add_argument("--
|
|
34
|
-
parser.add_argument("--
|
|
48
|
+
parser.add_argument("question", nargs="?", help="Direct question to ask", default=None)
|
|
49
|
+
parser.add_argument("--prompt", "-p", help="Custom prompt to be used", default=None)
|
|
50
|
+
parser.add_argument("--file", "-f", help="The file to be processed", default=None)
|
|
51
|
+
parser.add_argument("--directory", "-d", help="The directory to be processed", default=None)
|
|
52
|
+
parser.add_argument(
|
|
53
|
+
"--chain",
|
|
54
|
+
"-c",
|
|
55
|
+
help="Chain the output of the previous command to the next command",
|
|
56
|
+
action="store_true",
|
|
57
|
+
default=False,
|
|
58
|
+
)
|
|
59
|
+
parser.add_argument(
|
|
60
|
+
"--model", "-m", help="The model to be used", default="codellama"
|
|
61
|
+
)
|
|
35
62
|
args, _ = parser.parse_known_args()
|
|
36
63
|
|
|
64
|
+
# My testing has shown that the codellama:7b-python is good for returning python code from the program.
|
|
65
|
+
# My intention with this tool was to give more general feedback and have back a back and forth with the user.
|
|
66
|
+
rubber_ducky = RubberDuck(model=args.model)
|
|
67
|
+
|
|
68
|
+
# Handle direct question from CLI
|
|
69
|
+
if args.question is not None:
|
|
70
|
+
await rubber_ducky.call_llama(prompt=args.question, chain=args.chain)
|
|
71
|
+
return
|
|
72
|
+
|
|
73
|
+
if args.file is None and args.directory is None:
|
|
74
|
+
# Handle interactive mode (no file/directory specified)
|
|
75
|
+
await rubber_ducky.call_llama(prompt=args.prompt, chain=args.chain)
|
|
76
|
+
if args.chain:
|
|
77
|
+
while True:
|
|
78
|
+
await rubber_ducky.call_llama(prompt=args.prompt, chain=args.chain)
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
# Handle file input
|
|
37
82
|
if args.file is not None:
|
|
38
83
|
code = open(args.file).read()
|
|
39
|
-
|
|
84
|
+
# Handle directory input
|
|
40
85
|
else:
|
|
41
|
-
|
|
86
|
+
code = read_files_from_dir(args.directory)
|
|
87
|
+
|
|
88
|
+
await rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def main():
|
|
92
|
+
asyncio.run(ducky())
|
|
42
93
|
|
|
43
94
|
if __name__ == "__main__":
|
|
44
|
-
|
|
95
|
+
main()
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: rubber-ducky
|
|
3
|
+
Version: 1.1.1
|
|
4
|
+
Summary: AI Companion for Pair Programming
|
|
5
|
+
Home-page: https://github.com/ParthSareen/ducky
|
|
6
|
+
Author: Parth Sareen
|
|
7
|
+
Author-email: psareen@uwaterloo.ca
|
|
8
|
+
License: MIT
|
|
9
|
+
Description-Content-Type: text/markdown
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Requires-Dist: ollama
|
|
12
|
+
|
|
13
|
+
# rubber ducky
|
|
14
|
+
|
|
15
|
+
## tl;dr
|
|
16
|
+
- `pip install rubber-ducky`
|
|
17
|
+
- Install ollama
|
|
18
|
+
- `ollama pull codellama` (first time and then you can just have application in background)
|
|
19
|
+
- There are probably other dependencies which I forgot to put in setup.py sorry in advance.
|
|
20
|
+
- Run with `ducky <path>` or `ducky <question>`
|
|
21
|
+
|
|
22
|
+
## Dependencies
|
|
23
|
+
|
|
24
|
+
You will need Ollama installed on your machine. The model I use for this project is `codellama`.
|
|
25
|
+
|
|
26
|
+
For the first installation you can run `ollama pull codellama` and it should pull the necessary binaries for you.
|
|
27
|
+
|
|
28
|
+
Ollama is also great because it'll spin up a server which can run in the background and can even do automatic model switching as long as you have it installed.
|
|
29
|
+
|
|
30
|
+
## Usage
|
|
31
|
+
|
|
32
|
+
Install through [pypi](https://pypi.org/project/rubber-ducky/):
|
|
33
|
+
|
|
34
|
+
`pip install rubber-ducky` .
|
|
35
|
+
|
|
36
|
+
### Simple run
|
|
37
|
+
`ducky`
|
|
38
|
+
|
|
39
|
+
or
|
|
40
|
+
|
|
41
|
+
`ducky <question>`
|
|
42
|
+
|
|
43
|
+
or
|
|
44
|
+
|
|
45
|
+
`ducky -f <path>`
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
### All options
|
|
49
|
+
`ducky --file <path> --prompt <prompt> --directory <directory> --chain --model <model>`
|
|
50
|
+
|
|
51
|
+
Where:
|
|
52
|
+
- `--prompt` or `-p`: Custom prompt to be used
|
|
53
|
+
- `--file` or `-f`: The file to be processed
|
|
54
|
+
- `--directory` or `-d`: The directory to be processed
|
|
55
|
+
- `--chain` or `-c`: Chain the output of the previous command to the next command
|
|
56
|
+
- `--model` or `-m`: The model to be used (default is "codellama")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
## Example output
|
|
60
|
+

|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
ducky/__init__.py,sha256=_7imP8Jc2SIapn4fzGkspmKvqxPTFDcDJWHZ_o_MnlE,24
|
|
2
|
+
ducky/ducky.py,sha256=NwOzoLMd7fzimq-4OkfEL6buACY2Y13cze2uLEerVBU,3540
|
|
3
|
+
rubber_ducky-1.1.1.dist-info/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
|
|
4
|
+
rubber_ducky-1.1.1.dist-info/METADATA,sha256=uPUKC85hZxy2j2jVojKPzWH_rVYt1GAAB7Z1deWUFbk,1638
|
|
5
|
+
rubber_ducky-1.1.1.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
|
6
|
+
rubber_ducky-1.1.1.dist-info/entry_points.txt,sha256=LPndtj8UqEWtwYApv5LJJniH4FUrsriOqV2LA1X_UPQ,43
|
|
7
|
+
rubber_ducky-1.1.1.dist-info/top_level.txt,sha256=4Q75MONDNPpQ3o17bTu7RFuKwFhTIRzlXP3_LDWQQ30,6
|
|
8
|
+
rubber_ducky-1.1.1.dist-info/RECORD,,
|
|
@@ -1,45 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: rubber-ducky
|
|
3
|
-
Version: 1.0.1
|
|
4
|
-
Summary: AI Companion for Pair Programming
|
|
5
|
-
Home-page: https://github.com/ParthSareen/ducky
|
|
6
|
-
Author: Parth Sareen
|
|
7
|
-
Author-email: psareen@uwaterloo.ca
|
|
8
|
-
License: MIT
|
|
9
|
-
Description-Content-Type: text/markdown
|
|
10
|
-
License-File: LICENSE
|
|
11
|
-
Requires-Dist: langchain
|
|
12
|
-
|
|
13
|
-
# rubber ducky
|
|
14
|
-
|
|
15
|
-
## tl;dr
|
|
16
|
-
- `pip install rubber-ducky`
|
|
17
|
-
- Install ollama
|
|
18
|
-
- `ollama run codellama` (first time and then you can just have application in background)
|
|
19
|
-
- There are probably other dependencies which I forgot to put in setup.py sorry in advance.
|
|
20
|
-
- run with `ducky --file <path> --prompt (optional) <prompt to override>`
|
|
21
|
-
|
|
22
|
-
## Why did I make this
|
|
23
|
-
|
|
24
|
-
I wrote ducky because I annoy engineers too much and I needed to talk someone through my code quickly and validate my approach. Maybe this is why I'm not a senior engineer.
|
|
25
|
-
|
|
26
|
-
Since I can't dump all my code to GPT and make it tell me I know how to code, I decided to build something for quick iteration. All. Local. I also didn't want to get fired by leaking all our data. Not again.
|
|
27
|
-
|
|
28
|
-
## Dependencies
|
|
29
|
-
Bless the folks at Ollama cause they have been carrying my recent projects.
|
|
30
|
-
|
|
31
|
-
This project is currently only supported on Mac and Linux cause Ollama is a dependency.
|
|
32
|
-
You will need Ollama installed on your machine. The model I use for this project is `codellama`.
|
|
33
|
-
|
|
34
|
-
For the first installation you can run `ollama run codellama` and it should pull the necessary binaries for you. Ollama is also great because it'll spin up a server which can run in the background and can even do automatic model switching as long as you have it installed.
|
|
35
|
-
|
|
36
|
-
## Usage
|
|
37
|
-
Make sure you have the package installed. Easiest through [pypi](https://pypi.org/project/rubber-ducky/).
|
|
38
|
-
|
|
39
|
-
`pip install rubber-ducky` also works.
|
|
40
|
-
|
|
41
|
-
To run:
|
|
42
|
-
|
|
43
|
-
`ducky --file <path> --prompt (optional) <prompt to override>`
|
|
44
|
-
|
|
45
|
-
I have yet to implement some methods so if you do something I don't say that's on you.
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
ducky/__init__.py,sha256=_7imP8Jc2SIapn4fzGkspmKvqxPTFDcDJWHZ_o_MnlE,24
|
|
2
|
-
ducky/ducky.py,sha256=Yc3eHtn9zf0tFdxhAkHPCmpdNoGlJ_5Fvwjp_-j6xyE,1698
|
|
3
|
-
rubber_ducky-1.0.1.dist-info/LICENSE,sha256=gQ1rCmw18NqTk5GxG96F6vgyN70e1c4kcKUtWDwdNaE,1069
|
|
4
|
-
rubber_ducky-1.0.1.dist-info/METADATA,sha256=FzklJu6cdH6MVPWFCpYJXlG2f4dVHTy8KSD6LppIWnY,1912
|
|
5
|
-
rubber_ducky-1.0.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
6
|
-
rubber_ducky-1.0.1.dist-info/entry_points.txt,sha256=Dpnmsfu54R9NqaSw8HZVsI52ICS3grSuSV_YwZa_x2Y,38
|
|
7
|
-
rubber_ducky-1.0.1.dist-info/top_level.txt,sha256=4Q75MONDNPpQ3o17bTu7RFuKwFhTIRzlXP3_LDWQQ30,6
|
|
8
|
-
rubber_ducky-1.0.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|