rubber-ducky 1.1.3__tar.gz → 1.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/PKG-INFO +10 -2
- rubber_ducky-1.1.5/ducky/ducky.py +93 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/rubber_ducky.egg-info/PKG-INFO +10 -2
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/setup.py +1 -1
- rubber_ducky-1.1.3/ducky/ducky.py +0 -101
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/LICENSE +0 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/README.md +0 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/ducky/__init__.py +0 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/rubber_ducky.egg-info/SOURCES.txt +0 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/rubber_ducky.egg-info/dependency_links.txt +0 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/rubber_ducky.egg-info/entry_points.txt +0 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/rubber_ducky.egg-info/requires.txt +0 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/rubber_ducky.egg-info/top_level.txt +0 -0
- {rubber_ducky-1.1.3 → rubber_ducky-1.1.5}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
2
|
Name: rubber-ducky
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.5
|
|
4
4
|
Summary: AI Companion for Pair Programming
|
|
5
5
|
Home-page: https://github.com/ParthSareen/ducky
|
|
6
6
|
Author: Parth Sareen
|
|
@@ -9,6 +9,14 @@ License: MIT
|
|
|
9
9
|
Description-Content-Type: text/markdown
|
|
10
10
|
License-File: LICENSE
|
|
11
11
|
Requires-Dist: ollama
|
|
12
|
+
Dynamic: author
|
|
13
|
+
Dynamic: author-email
|
|
14
|
+
Dynamic: description
|
|
15
|
+
Dynamic: description-content-type
|
|
16
|
+
Dynamic: home-page
|
|
17
|
+
Dynamic: license
|
|
18
|
+
Dynamic: requires-dist
|
|
19
|
+
Dynamic: summary
|
|
12
20
|
|
|
13
21
|
# rubber ducky
|
|
14
22
|
<p align="center">
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import asyncio
|
|
3
|
+
from ollama import AsyncClient
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class RubberDuck:
|
|
7
|
+
def __init__(self, model: str, quick: bool = False) -> None:
|
|
8
|
+
self.system_prompt = """You are a pair progamming tool called Ducky or RubberDucky to help developers debug, think through design, and write code.
|
|
9
|
+
Help the user think through their approach and provide feedback on the code. Think step by step and ask clarifying questions if needed.
|
|
10
|
+
If asked """
|
|
11
|
+
self.client = AsyncClient()
|
|
12
|
+
self.model = model
|
|
13
|
+
self.quick = quick
|
|
14
|
+
|
|
15
|
+
async def call_llm(self, prompt: str | None = None) -> None:
|
|
16
|
+
chain = False if prompt else True
|
|
17
|
+
|
|
18
|
+
if prompt is None:
|
|
19
|
+
prompt = input("\nEnter your prompt (or press Enter for default review): ")
|
|
20
|
+
|
|
21
|
+
if self.quick:
|
|
22
|
+
prompt += ". Return a command and be extremely concise"
|
|
23
|
+
|
|
24
|
+
responses = [self.system_prompt]
|
|
25
|
+
while True:
|
|
26
|
+
context_prompt = "\n".join(responses) + "\n" + prompt
|
|
27
|
+
stream = await self.client.generate(model=self.model, prompt=context_prompt, stream=True)
|
|
28
|
+
response_text = ""
|
|
29
|
+
async for chunk in stream:
|
|
30
|
+
if 'response' in chunk:
|
|
31
|
+
print(chunk['response'], end='', flush=True)
|
|
32
|
+
response_text += chunk['response']
|
|
33
|
+
print()
|
|
34
|
+
responses.append(response_text)
|
|
35
|
+
if not chain:
|
|
36
|
+
break
|
|
37
|
+
prompt = input("\n>> ")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def read_files_from_dir(directory: str) -> str:
|
|
41
|
+
import os
|
|
42
|
+
|
|
43
|
+
files = os.listdir(directory)
|
|
44
|
+
code = ""
|
|
45
|
+
for file in files:
|
|
46
|
+
code += open(directory + "/" + file).read()
|
|
47
|
+
return code
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
async def ducky() -> None:
|
|
51
|
+
parser = argparse.ArgumentParser()
|
|
52
|
+
parser.add_argument("question", nargs="*", help="Direct question to ask", default=None)
|
|
53
|
+
parser.add_argument("--prompt", "-p", help="Custom prompt to be used", default=None)
|
|
54
|
+
parser.add_argument("--file", "-f", help="The file to be processed", default=None)
|
|
55
|
+
parser.add_argument("--directory", "-d", help="The directory to be processed", default=None)
|
|
56
|
+
parser.add_argument("--quick", "-q", help="Quick mode", default=False)
|
|
57
|
+
parser.add_argument(
|
|
58
|
+
"--chain",
|
|
59
|
+
"-c",
|
|
60
|
+
help="Chain the output of the previous command to the next command",
|
|
61
|
+
action="store_true",
|
|
62
|
+
default=False,
|
|
63
|
+
)
|
|
64
|
+
parser.add_argument(
|
|
65
|
+
"--model", "-m", help="The model to be used", default="qwen2.5-coder"
|
|
66
|
+
)
|
|
67
|
+
args, _ = parser.parse_known_args()
|
|
68
|
+
|
|
69
|
+
rubber_ducky = RubberDuck(model=args.model, quick=args.quick)
|
|
70
|
+
|
|
71
|
+
# Handle direct question from CLI
|
|
72
|
+
if args.question:
|
|
73
|
+
question = " ".join(args.question)
|
|
74
|
+
await rubber_ducky.call_llm(prompt=question)
|
|
75
|
+
return
|
|
76
|
+
|
|
77
|
+
# Handle interactive mode (no file/directory specified)
|
|
78
|
+
if args.file is None and args.directory is None:
|
|
79
|
+
await rubber_ducky.call_llm(prompt=args.prompt)
|
|
80
|
+
return
|
|
81
|
+
|
|
82
|
+
# Get code from file or directory
|
|
83
|
+
code = (open(args.file).read() if args.file
|
|
84
|
+
else read_files_from_dir(args.directory))
|
|
85
|
+
|
|
86
|
+
await rubber_ducky.call_llm(code=code, prompt=args.prompt)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def main():
|
|
90
|
+
asyncio.run(ducky())
|
|
91
|
+
|
|
92
|
+
if __name__ == "__main__":
|
|
93
|
+
main()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
2
|
Name: rubber-ducky
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.5
|
|
4
4
|
Summary: AI Companion for Pair Programming
|
|
5
5
|
Home-page: https://github.com/ParthSareen/ducky
|
|
6
6
|
Author: Parth Sareen
|
|
@@ -9,6 +9,14 @@ License: MIT
|
|
|
9
9
|
Description-Content-Type: text/markdown
|
|
10
10
|
License-File: LICENSE
|
|
11
11
|
Requires-Dist: ollama
|
|
12
|
+
Dynamic: author
|
|
13
|
+
Dynamic: author-email
|
|
14
|
+
Dynamic: description
|
|
15
|
+
Dynamic: description-content-type
|
|
16
|
+
Dynamic: home-page
|
|
17
|
+
Dynamic: license
|
|
18
|
+
Dynamic: requires-dist
|
|
19
|
+
Dynamic: summary
|
|
12
20
|
|
|
13
21
|
# rubber ducky
|
|
14
22
|
<p align="center">
|
|
@@ -1,101 +0,0 @@
|
|
|
1
|
-
import argparse
|
|
2
|
-
import asyncio
|
|
3
|
-
from typing import Optional
|
|
4
|
-
from ollama import AsyncClient
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class RubberDuck:
|
|
8
|
-
def __init__(self, model: str = "codellama") -> None:
|
|
9
|
-
self.system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code.
|
|
10
|
-
Help the user think through their approach and provide feedback on the code. Think step by step and ask clarifying questions if needed."""
|
|
11
|
-
self.client = AsyncClient()
|
|
12
|
-
self.model = model
|
|
13
|
-
|
|
14
|
-
async def call_llama(self, code: str = "", prompt: Optional[str] = None, chain: bool = False) -> None:
|
|
15
|
-
if prompt is None:
|
|
16
|
-
user_prompt = input("\nEnter your prompt (or press Enter for default review): ")
|
|
17
|
-
if not user_prompt:
|
|
18
|
-
prompt = "review the code, find any issues if any, suggest cleanups if any:" + code
|
|
19
|
-
else:
|
|
20
|
-
prompt = user_prompt + code
|
|
21
|
-
else:
|
|
22
|
-
prompt = prompt + code
|
|
23
|
-
|
|
24
|
-
responses = []
|
|
25
|
-
while True:
|
|
26
|
-
# Include previous responses in the prompt for context
|
|
27
|
-
context_prompt = "\n".join(responses) + "\n" + prompt
|
|
28
|
-
stream = await self.client.generate(model=self.model, prompt=context_prompt, stream=True)
|
|
29
|
-
response_text = ""
|
|
30
|
-
async for chunk in stream:
|
|
31
|
-
if 'response' in chunk:
|
|
32
|
-
print(chunk['response'], end='', flush=True)
|
|
33
|
-
response_text += chunk['response']
|
|
34
|
-
print() # New line after response completes
|
|
35
|
-
responses.append(response_text)
|
|
36
|
-
if not chain:
|
|
37
|
-
break
|
|
38
|
-
prompt = input("\nAny questions? \n")
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def read_files_from_dir(directory: str) -> str:
|
|
42
|
-
import os
|
|
43
|
-
|
|
44
|
-
files = os.listdir(directory)
|
|
45
|
-
code = ""
|
|
46
|
-
for file in files:
|
|
47
|
-
code += open(directory + "/" + file).read()
|
|
48
|
-
return code
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
async def ducky() -> None:
|
|
52
|
-
parser = argparse.ArgumentParser()
|
|
53
|
-
parser.add_argument("question", nargs="*", help="Direct question to ask", default=None)
|
|
54
|
-
parser.add_argument("--prompt", "-p", help="Custom prompt to be used", default=None)
|
|
55
|
-
parser.add_argument("--file", "-f", help="The file to be processed", default=None)
|
|
56
|
-
parser.add_argument("--directory", "-d", help="The directory to be processed", default=None)
|
|
57
|
-
parser.add_argument(
|
|
58
|
-
"--chain",
|
|
59
|
-
"-c",
|
|
60
|
-
help="Chain the output of the previous command to the next command",
|
|
61
|
-
action="store_true",
|
|
62
|
-
default=False,
|
|
63
|
-
)
|
|
64
|
-
parser.add_argument(
|
|
65
|
-
"--model", "-m", help="The model to be used", default="codellama"
|
|
66
|
-
)
|
|
67
|
-
args, _ = parser.parse_known_args()
|
|
68
|
-
|
|
69
|
-
# My testing has shown that the codellama:7b-python is good for returning python code from the program.
|
|
70
|
-
# My intention with this tool was to give more general feedback and have back a back and forth with the user.
|
|
71
|
-
rubber_ducky = RubberDuck(model=args.model)
|
|
72
|
-
|
|
73
|
-
# Handle direct question from CLI
|
|
74
|
-
if args.question is not None:
|
|
75
|
-
question = " ".join(args.question) + " be as concise as possible"
|
|
76
|
-
await rubber_ducky.call_llama(prompt=question, chain=args.chain)
|
|
77
|
-
return
|
|
78
|
-
|
|
79
|
-
if args.file is None and args.directory is None:
|
|
80
|
-
# Handle interactive mode (no file/directory specified)
|
|
81
|
-
await rubber_ducky.call_llama(prompt=args.prompt, chain=args.chain)
|
|
82
|
-
if args.chain:
|
|
83
|
-
while True:
|
|
84
|
-
await rubber_ducky.call_llama(prompt=args.prompt, chain=args.chain)
|
|
85
|
-
return
|
|
86
|
-
|
|
87
|
-
# Handle file input
|
|
88
|
-
if args.file is not None:
|
|
89
|
-
code = open(args.file).read()
|
|
90
|
-
# Handle directory input
|
|
91
|
-
else:
|
|
92
|
-
code = read_files_from_dir(args.directory)
|
|
93
|
-
|
|
94
|
-
await rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain)
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
def main():
|
|
98
|
-
asyncio.run(ducky())
|
|
99
|
-
|
|
100
|
-
if __name__ == "__main__":
|
|
101
|
-
main()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|