devseeker 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devseeker-0.0.1/LICENSE +22 -0
- devseeker-0.0.1/PKG-INFO +120 -0
- devseeker-0.0.1/README.md +78 -0
- devseeker-0.0.1/devseeker/__init__.py +4 -0
- devseeker-0.0.1/devseeker/applications/__init__.py +0 -0
- devseeker-0.0.1/devseeker/applications/cli/__init__.py +0 -0
- devseeker-0.0.1/devseeker/applications/cli/cli_agent.py +232 -0
- devseeker-0.0.1/devseeker/applications/cli/collect.py +177 -0
- devseeker-0.0.1/devseeker/applications/cli/file_selector.py +540 -0
- devseeker-0.0.1/devseeker/applications/cli/learning.py +299 -0
- devseeker-0.0.1/devseeker/applications/cli/main.py +561 -0
- devseeker-0.0.1/devseeker/benchmark/__init__.py +0 -0
- devseeker-0.0.1/devseeker/benchmark/__main__.py +154 -0
- devseeker-0.0.1/devseeker/benchmark/bench_config.py +69 -0
- devseeker-0.0.1/devseeker/benchmark/benchmarks/apps/load.py +118 -0
- devseeker-0.0.1/devseeker/benchmark/benchmarks/apps/problem.py +25 -0
- devseeker-0.0.1/devseeker/benchmark/benchmarks/apps/problems.py +3 -0
- devseeker-0.0.1/devseeker/benchmark/benchmarks/gptme/load.py +104 -0
- devseeker-0.0.1/devseeker/benchmark/benchmarks/load.py +48 -0
- devseeker-0.0.1/devseeker/benchmark/benchmarks/mbpp/load.py +114 -0
- devseeker-0.0.1/devseeker/benchmark/benchmarks/mbpp/problem.py +25 -0
- devseeker-0.0.1/devseeker/benchmark/benchmarks/mbpp/problems.py +3 -0
- devseeker-0.0.1/devseeker/benchmark/default_bench_config.toml +16 -0
- devseeker-0.0.1/devseeker/benchmark/run.py +150 -0
- devseeker-0.0.1/devseeker/benchmark/types.py +94 -0
- devseeker-0.0.1/devseeker/core/__init__.py +0 -0
- devseeker-0.0.1/devseeker/core/ai.py +437 -0
- devseeker-0.0.1/devseeker/core/base_agent.py +31 -0
- devseeker-0.0.1/devseeker/core/base_execution_env.py +42 -0
- devseeker-0.0.1/devseeker/core/base_memory.py +15 -0
- devseeker-0.0.1/devseeker/core/chat_to_files.py +245 -0
- devseeker-0.0.1/devseeker/core/default/__init__.py +0 -0
- devseeker-0.0.1/devseeker/core/default/constants.py +12 -0
- devseeker-0.0.1/devseeker/core/default/disk_execution_env.py +111 -0
- devseeker-0.0.1/devseeker/core/default/disk_memory.py +326 -0
- devseeker-0.0.1/devseeker/core/default/file_store.py +62 -0
- devseeker-0.0.1/devseeker/core/default/paths.py +86 -0
- devseeker-0.0.1/devseeker/core/default/simple_agent.py +100 -0
- devseeker-0.0.1/devseeker/core/default/steps.py +397 -0
- devseeker-0.0.1/devseeker/core/diff.py +419 -0
- devseeker-0.0.1/devseeker/core/files_dict.py +115 -0
- devseeker-0.0.1/devseeker/core/git.py +85 -0
- devseeker-0.0.1/devseeker/core/linting.py +64 -0
- devseeker-0.0.1/devseeker/core/preprompts_holder.py +29 -0
- devseeker-0.0.1/devseeker/core/project_config.py +167 -0
- devseeker-0.0.1/devseeker/core/prompt.py +44 -0
- devseeker-0.0.1/devseeker/core/token_usage.py +297 -0
- devseeker-0.0.1/devseeker/core/version_manager.py +30 -0
- devseeker-0.0.1/devseeker/preprompts/clarify +3 -0
- devseeker-0.0.1/devseeker/preprompts/entrypoint +6 -0
- devseeker-0.0.1/devseeker/preprompts/file_format +20 -0
- devseeker-0.0.1/devseeker/preprompts/file_format_diff +41 -0
- devseeker-0.0.1/devseeker/preprompts/file_format_fix +22 -0
- devseeker-0.0.1/devseeker/preprompts/generate +15 -0
- devseeker-0.0.1/devseeker/preprompts/improve +15 -0
- devseeker-0.0.1/devseeker/preprompts/philosophy +12 -0
- devseeker-0.0.1/devseeker/preprompts/roadmap +2 -0
- devseeker-0.0.1/devseeker/tools/__init__.py +0 -0
- devseeker-0.0.1/devseeker/tools/custom_steps.py +233 -0
- devseeker-0.0.1/devseeker/tools/supported_languages.py +58 -0
- devseeker-0.0.1/pyproject.toml +119 -0
devseeker-0.0.1/LICENSE
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2023 Ibrahim Rayamah
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
22
|
+
|
devseeker-0.0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: devseeker
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Coolest coding agent
|
|
5
|
+
License: MIT
|
|
6
|
+
Author: Ibrahim Rayamah
|
|
7
|
+
Author-email: ibz04.dev@gmail.com
|
|
8
|
+
Requires-Python: >=3.10,<3.13
|
|
9
|
+
Classifier: Development Status :: 4 - Beta
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
16
|
+
Provides-Extra: doc
|
|
17
|
+
Provides-Extra: test
|
|
18
|
+
Requires-Dist: black (==23.3.0)
|
|
19
|
+
Requires-Dist: dataclasses-json (==0.5.7)
|
|
20
|
+
Requires-Dist: datasets (>=2.17.1,<3.0.0)
|
|
21
|
+
Requires-Dist: langchain (>=0.1.2)
|
|
22
|
+
Requires-Dist: langchain-anthropic (>=0.1.1,<0.2.0)
|
|
23
|
+
Requires-Dist: langchain-community (>=0.2.0,<0.3.0)
|
|
24
|
+
Requires-Dist: langchain_openai
|
|
25
|
+
Requires-Dist: openai (>=1.0,<2.0)
|
|
26
|
+
Requires-Dist: pillow (>=10.2.0,<11.0.0)
|
|
27
|
+
Requires-Dist: pyperclip (>=1.8.2,<2.0.0)
|
|
28
|
+
Requires-Dist: python-dotenv (>=0.21.0)
|
|
29
|
+
Requires-Dist: regex (>=2023.12.25,<2024.0.0)
|
|
30
|
+
Requires-Dist: rudder-sdk-python (>=2.0.2)
|
|
31
|
+
Requires-Dist: tabulate (==0.9.0)
|
|
32
|
+
Requires-Dist: termcolor (==2.3.0)
|
|
33
|
+
Requires-Dist: tiktoken (>=0.0.4)
|
|
34
|
+
Requires-Dist: toml (>=0.10.2)
|
|
35
|
+
Requires-Dist: tomlkit (>=0.12.4,<0.13.0)
|
|
36
|
+
Requires-Dist: typer (>=0.3.2)
|
|
37
|
+
Project-URL: Documentation, https://github.com/iBz-04/Devseeker/blob/main/README.md
|
|
38
|
+
Project-URL: Homepage, https://github.com/iBz-04/Devseeker
|
|
39
|
+
Project-URL: Repository, https://github.com/iBz-04/Devseekerr
|
|
40
|
+
Description-Content-Type: text/markdown
|
|
41
|
+
|
|
42
|
+
# devseeker
|
|
43
|
+
|
|
44
|
+
[](https://pypi.org/project/devseeker)
|
|
45
|
+
[](LICENSE)
|
|
46
|
+
[](https://github.com/devseeker-org/devseeker/actions/workflows/ci.yaml)
|
|
47
|
+
|
|
48
|
+
**devseeker is an CLI coding agent for generating and improving code through prompts**
|
|
49
|
+
|
|
50
|
+
## Installation
|
|
51
|
+
|
|
52
|
+
### Stable release
|
|
53
|
+
|
|
54
|
+
```sh
|
|
55
|
+
pip install devseeker
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Development installation
|
|
59
|
+
|
|
60
|
+
```sh
|
|
61
|
+
git clone https://github.com/iBz-04/devseeker.git
|
|
62
|
+
cd devseeker
|
|
63
|
+
poetry install
|
|
64
|
+
poetry shell
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
## Configuration
|
|
68
|
+
|
|
69
|
+
devseeker requires an OpenAI API key. Set it as an environment variable or in a `.env` file:
|
|
70
|
+
|
|
71
|
+
```sh
|
|
72
|
+
export OPENAI_API_KEY=your_api_key
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
or create a `.env` file:
|
|
76
|
+
|
|
77
|
+
```
|
|
78
|
+
OPENAI_API_KEY=your_api_key
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
## Usage
|
|
82
|
+
|
|
83
|
+
### Creating a new project
|
|
84
|
+
|
|
85
|
+
1. Create an empty directory for your project.
|
|
86
|
+
2. Inside the directory, create a file named `prompt` containing your instructions.
|
|
87
|
+
3. Run:
|
|
88
|
+
|
|
89
|
+
```sh
|
|
90
|
+
devseeker projects/my-new-project
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Improving existing code
|
|
94
|
+
|
|
95
|
+
```sh
|
|
96
|
+
devseeker projects/my-existing-project -i
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### Benchmarking AI agents
|
|
100
|
+
|
|
101
|
+
```sh
|
|
102
|
+
bench run --help
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
## Commands
|
|
106
|
+
|
|
107
|
+
- `devseeker` (alias `ds`, `dste`) runs the main CLI application.
|
|
108
|
+
- `bench` runs benchmarks on AI agents.
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
- Windows users can refer to [WINDOWS_README.md](WINDOWS_README.md).
|
|
112
|
+
|
|
113
|
+
## Contributing
|
|
114
|
+
|
|
115
|
+
Contributions are welcome!
|
|
116
|
+
|
|
117
|
+
## License
|
|
118
|
+
|
|
119
|
+
This project is licensed under the MIT License. See [LICENSE](LICENSE) for details.
|
|
120
|
+
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# devseeker
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/devseeker)
|
|
4
|
+
[](LICENSE)
|
|
5
|
+
[](https://github.com/devseeker-org/devseeker/actions/workflows/ci.yaml)
|
|
6
|
+
|
|
7
|
+
**devseeker is an CLI coding agent for generating and improving code through prompts**
|
|
8
|
+
|
|
9
|
+
## Installation
|
|
10
|
+
|
|
11
|
+
### Stable release
|
|
12
|
+
|
|
13
|
+
```sh
|
|
14
|
+
pip install devseeker
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
### Development installation
|
|
18
|
+
|
|
19
|
+
```sh
|
|
20
|
+
git clone https://github.com/iBz-04/devseeker.git
|
|
21
|
+
cd devseeker
|
|
22
|
+
poetry install
|
|
23
|
+
poetry shell
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Configuration
|
|
27
|
+
|
|
28
|
+
devseeker requires an OpenAI API key. Set it as an environment variable or in a `.env` file:
|
|
29
|
+
|
|
30
|
+
```sh
|
|
31
|
+
export OPENAI_API_KEY=your_api_key
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
or create a `.env` file:
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
OPENAI_API_KEY=your_api_key
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Usage
|
|
41
|
+
|
|
42
|
+
### Creating a new project
|
|
43
|
+
|
|
44
|
+
1. Create an empty directory for your project.
|
|
45
|
+
2. Inside the directory, create a file named `prompt` containing your instructions.
|
|
46
|
+
3. Run:
|
|
47
|
+
|
|
48
|
+
```sh
|
|
49
|
+
devseeker projects/my-new-project
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### Improving existing code
|
|
53
|
+
|
|
54
|
+
```sh
|
|
55
|
+
devseeker projects/my-existing-project -i
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Benchmarking AI agents
|
|
59
|
+
|
|
60
|
+
```sh
|
|
61
|
+
bench run --help
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## Commands
|
|
65
|
+
|
|
66
|
+
- `devseeker` (alias `ds`, `dste`) runs the main CLI application.
|
|
67
|
+
- `bench` runs benchmarks on AI agents.
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
- Windows users can refer to [WINDOWS_README.md](WINDOWS_README.md).
|
|
71
|
+
|
|
72
|
+
## Contributing
|
|
73
|
+
|
|
74
|
+
Contributions are welcome!
|
|
75
|
+
|
|
76
|
+
## License
|
|
77
|
+
|
|
78
|
+
This project is licensed under the MIT License. See [LICENSE](LICENSE) for details.
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module provides the CliAgent class which manages the lifecycle of code generation and improvement
|
|
3
|
+
using an AI model. It includes functionalities to initialize code generation, improve existing code,
|
|
4
|
+
and process the code through various steps defined in the step bundle.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Callable, Optional, TypeVar
|
|
8
|
+
|
|
9
|
+
# from devseeker.core.default.git_version_manager import GitVersionManager
|
|
10
|
+
from devseeker.core.ai import AI
|
|
11
|
+
from devseeker.core.base_agent import BaseAgent
|
|
12
|
+
from devseeker.core.base_execution_env import BaseExecutionEnv
|
|
13
|
+
from devseeker.core.base_memory import BaseMemory
|
|
14
|
+
from devseeker.core.default.disk_execution_env import DiskExecutionEnv
|
|
15
|
+
from devseeker.core.default.disk_memory import DiskMemory
|
|
16
|
+
from devseeker.core.default.paths import PREPROMPTS_PATH
|
|
17
|
+
from devseeker.core.default.steps import (
|
|
18
|
+
execute_entrypoint,
|
|
19
|
+
gen_code,
|
|
20
|
+
gen_entrypoint,
|
|
21
|
+
improve_fn,
|
|
22
|
+
)
|
|
23
|
+
from devseeker.core.files_dict import FilesDict
|
|
24
|
+
from devseeker.core.preprompts_holder import PrepromptsHolder
|
|
25
|
+
from devseeker.core.prompt import Prompt
|
|
26
|
+
|
|
27
|
+
CodeGenType = TypeVar("CodeGenType", bound=Callable[[AI, str, BaseMemory], FilesDict])
|
|
28
|
+
CodeProcessor = TypeVar(
|
|
29
|
+
"CodeProcessor", bound=Callable[[AI, BaseExecutionEnv, FilesDict], FilesDict]
|
|
30
|
+
)
|
|
31
|
+
ImproveType = TypeVar(
|
|
32
|
+
"ImproveType", bound=Callable[[AI, str, FilesDict, BaseMemory], FilesDict]
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class CliAgent(BaseAgent):
|
|
37
|
+
"""
|
|
38
|
+
The `CliAgent` class is responsible for managing the lifecycle of code generation and improvement
|
|
39
|
+
using an AI model. It orchestrates the generation of new code and the improvement of existing code
|
|
40
|
+
based on given prompts and utilizes a memory system and execution environment for processing.
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
memory : BaseMemory
|
|
45
|
+
An instance of a class that adheres to the BaseMemory interface, used for storing and retrieving
|
|
46
|
+
information during the code generation process.
|
|
47
|
+
execution_env : BaseExecutionEnv
|
|
48
|
+
An instance of a class that adheres to the BaseExecutionEnv interface, used for executing code
|
|
49
|
+
and managing the execution environment.
|
|
50
|
+
ai : AI, optional
|
|
51
|
+
An instance of the AI class that manages calls to the language model. If not provided, a default
|
|
52
|
+
instance is created.
|
|
53
|
+
code_gen_fn : CodeGenType, optional
|
|
54
|
+
A callable that takes an AI instance, a prompt, and a memory instance to generate code. Defaults
|
|
55
|
+
to the `gen_code` function.
|
|
56
|
+
improve_fn : ImproveType, optional
|
|
57
|
+
A callable that takes an AI instance, a prompt, a FilesDict instance, and a memory instance to
|
|
58
|
+
improve code. Defaults to the `improve` function.
|
|
59
|
+
process_code_fn : CodeProcessor, optional
|
|
60
|
+
A callable that takes an AI instance, an execution environment, and a FilesDict instance to
|
|
61
|
+
process code. Defaults to the `execute_entrypoint` function.
|
|
62
|
+
preprompts_holder : PrepromptsHolder, optional
|
|
63
|
+
An instance of PrepromptsHolder that manages preprompt templates. If not provided, a default
|
|
64
|
+
instance is created using the PREPROMPTS_PATH.
|
|
65
|
+
|
|
66
|
+
Attributes
|
|
67
|
+
----------
|
|
68
|
+
memory : BaseMemory
|
|
69
|
+
The memory instance where the agent stores and retrieves information.
|
|
70
|
+
execution_env : BaseExecutionEnv
|
|
71
|
+
The execution environment instance where the agent executes and manages code.
|
|
72
|
+
ai : AI
|
|
73
|
+
The AI instance used for interacting with the language model.
|
|
74
|
+
code_gen_fn : CodeGenType
|
|
75
|
+
The function used for generating code.
|
|
76
|
+
improve_fn : ImproveType
|
|
77
|
+
The function used for improving code.
|
|
78
|
+
process_code_fn : CodeProcessor
|
|
79
|
+
The function used for processing code.
|
|
80
|
+
preprompts_holder : PrepromptsHolder
|
|
81
|
+
The holder for preprompt templates.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
memory: BaseMemory,
|
|
87
|
+
execution_env: BaseExecutionEnv,
|
|
88
|
+
ai: AI = None,
|
|
89
|
+
code_gen_fn: CodeGenType = gen_code,
|
|
90
|
+
improve_fn: ImproveType = improve_fn,
|
|
91
|
+
process_code_fn: CodeProcessor = execute_entrypoint,
|
|
92
|
+
preprompts_holder: PrepromptsHolder = None,
|
|
93
|
+
):
|
|
94
|
+
self.memory = memory
|
|
95
|
+
self.execution_env = execution_env
|
|
96
|
+
self.ai = ai or AI()
|
|
97
|
+
self.code_gen_fn = code_gen_fn
|
|
98
|
+
self.process_code_fn = process_code_fn
|
|
99
|
+
self.improve_fn = improve_fn
|
|
100
|
+
self.preprompts_holder = preprompts_holder or PrepromptsHolder(PREPROMPTS_PATH)
|
|
101
|
+
|
|
102
|
+
@classmethod
|
|
103
|
+
def with_default_config(
|
|
104
|
+
cls,
|
|
105
|
+
memory: DiskMemory,
|
|
106
|
+
execution_env: DiskExecutionEnv,
|
|
107
|
+
ai: AI = None,
|
|
108
|
+
code_gen_fn: CodeGenType = gen_code,
|
|
109
|
+
improve_fn: ImproveType = improve_fn,
|
|
110
|
+
process_code_fn: CodeProcessor = execute_entrypoint,
|
|
111
|
+
preprompts_holder: PrepromptsHolder = None,
|
|
112
|
+
diff_timeout=3,
|
|
113
|
+
):
|
|
114
|
+
"""
|
|
115
|
+
Creates a new instance of CliAgent with default configurations for memory, execution environment,
|
|
116
|
+
AI, and other functional parameters.
|
|
117
|
+
|
|
118
|
+
Parameters
|
|
119
|
+
----------
|
|
120
|
+
memory : DiskMemory
|
|
121
|
+
An instance of DiskMemory for storing and retrieving information.
|
|
122
|
+
execution_env : DiskExecutionEnv
|
|
123
|
+
An instance of DiskExecutionEnv for executing code.
|
|
124
|
+
ai : AI, optional
|
|
125
|
+
An instance of AI for interacting with the language model. Defaults to None, which will create
|
|
126
|
+
a new AI instance.
|
|
127
|
+
code_gen_fn : CodeGenType, optional
|
|
128
|
+
A function for generating code. Defaults to `gen_code`.
|
|
129
|
+
improve_fn : ImproveType, optional
|
|
130
|
+
A function for improving code. Defaults to `improve`.
|
|
131
|
+
process_code_fn : CodeProcessor, optional
|
|
132
|
+
A function for processing code. Defaults to `execute_entrypoint`.
|
|
133
|
+
preprompts_holder : PrepromptsHolder, optional
|
|
134
|
+
An instance of PrepromptsHolder for managing preprompt templates. Defaults to None, which will
|
|
135
|
+
create a new PrepromptsHolder instance using PREPROMPTS_PATH.
|
|
136
|
+
|
|
137
|
+
Returns
|
|
138
|
+
-------
|
|
139
|
+
CliAgent
|
|
140
|
+
An instance of CliAgent configured with the provided or default parameters.
|
|
141
|
+
"""
|
|
142
|
+
return cls(
|
|
143
|
+
memory=memory,
|
|
144
|
+
execution_env=execution_env,
|
|
145
|
+
ai=ai,
|
|
146
|
+
code_gen_fn=code_gen_fn,
|
|
147
|
+
process_code_fn=process_code_fn,
|
|
148
|
+
improve_fn=improve_fn,
|
|
149
|
+
preprompts_holder=preprompts_holder or PrepromptsHolder(PREPROMPTS_PATH),
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
def init(self, prompt: Prompt) -> FilesDict:
|
|
153
|
+
"""
|
|
154
|
+
Generates a new piece of code using the AI and step bundle based on the provided prompt.
|
|
155
|
+
|
|
156
|
+
Parameters
|
|
157
|
+
----------
|
|
158
|
+
prompt : str
|
|
159
|
+
A string prompt that guides the code generation process.
|
|
160
|
+
|
|
161
|
+
Returns
|
|
162
|
+
-------
|
|
163
|
+
FilesDict
|
|
164
|
+
An instance of the `FilesDict` class containing the generated code.
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
files_dict = self.code_gen_fn(
|
|
168
|
+
self.ai, prompt, self.memory, self.preprompts_holder
|
|
169
|
+
)
|
|
170
|
+
entrypoint = gen_entrypoint(
|
|
171
|
+
self.ai, prompt, files_dict, self.memory, self.preprompts_holder
|
|
172
|
+
)
|
|
173
|
+
combined_dict = {**files_dict, **entrypoint}
|
|
174
|
+
files_dict = FilesDict(combined_dict)
|
|
175
|
+
files_dict = self.process_code_fn(
|
|
176
|
+
self.ai,
|
|
177
|
+
self.execution_env,
|
|
178
|
+
files_dict,
|
|
179
|
+
preprompts_holder=self.preprompts_holder,
|
|
180
|
+
prompt=prompt,
|
|
181
|
+
memory=self.memory,
|
|
182
|
+
)
|
|
183
|
+
return files_dict
|
|
184
|
+
|
|
185
|
+
def improve(
|
|
186
|
+
self,
|
|
187
|
+
files_dict: FilesDict,
|
|
188
|
+
prompt: Prompt,
|
|
189
|
+
execution_command: Optional[str] = None,
|
|
190
|
+
diff_timeout=3,
|
|
191
|
+
) -> FilesDict:
|
|
192
|
+
"""
|
|
193
|
+
Improves an existing piece of code using the AI and step bundle based on the provided prompt.
|
|
194
|
+
|
|
195
|
+
Parameters
|
|
196
|
+
----------
|
|
197
|
+
files_dict : FilesDict
|
|
198
|
+
An instance of `FilesDict` containing the code to be improved.
|
|
199
|
+
prompt : str
|
|
200
|
+
A string prompt that guides the code improvement process.
|
|
201
|
+
execution_command : str, optional
|
|
202
|
+
An optional command to execute the code. If not provided, the default execution command is used.
|
|
203
|
+
|
|
204
|
+
Returns
|
|
205
|
+
-------
|
|
206
|
+
FilesDict
|
|
207
|
+
An instance of the `FilesDict` class containing the improved code.
|
|
208
|
+
"""
|
|
209
|
+
|
|
210
|
+
files_dict = self.improve_fn(
|
|
211
|
+
self.ai,
|
|
212
|
+
prompt,
|
|
213
|
+
files_dict,
|
|
214
|
+
self.memory,
|
|
215
|
+
self.preprompts_holder,
|
|
216
|
+
diff_timeout=diff_timeout,
|
|
217
|
+
)
|
|
218
|
+
# entrypoint = gen_entrypoint(
|
|
219
|
+
# self.ai, prompt, files_dict, self.memory, self.preprompts_holder
|
|
220
|
+
# )
|
|
221
|
+
# combined_dict = {**files_dict, **entrypoint}
|
|
222
|
+
# files_dict = FilesDict(combined_dict)
|
|
223
|
+
# files_dict = self.process_code_fn(
|
|
224
|
+
# self.ai,
|
|
225
|
+
# self.execution_env,
|
|
226
|
+
# files_dict,
|
|
227
|
+
# preprompts_holder=self.preprompts_holder,
|
|
228
|
+
# prompt=prompt,
|
|
229
|
+
# memory=self.memory,
|
|
230
|
+
# )
|
|
231
|
+
|
|
232
|
+
return files_dict
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module `collect` - Data Handling and RudderStack Integration
|
|
3
|
+
|
|
4
|
+
This module provides functionalities to handle and send learning data to RudderStack
|
|
5
|
+
for the purpose of analysis and to improve the gpt-engineer system. The data is sent
|
|
6
|
+
only when the user gives consent to share.
|
|
7
|
+
|
|
8
|
+
Functions:
|
|
9
|
+
send_learning(learning): Sends learning data to RudderStack.
|
|
10
|
+
collect_learnings(prompt, model, temperature, config, memory, review): Processes and sends learning data.
|
|
11
|
+
collect_and_send_human_review(prompt, model, temperature, config, memory): Collects human feedback and sends it.
|
|
12
|
+
|
|
13
|
+
Dependencies:
|
|
14
|
+
hashlib: For generating SHA-256 hash.
|
|
15
|
+
typing: For type annotations.
|
|
16
|
+
devseeker.core: Core functionalities of gpt-engineer.
|
|
17
|
+
devseeker.cli.learning: Handles the extraction of learning data.
|
|
18
|
+
|
|
19
|
+
Notes:
|
|
20
|
+
Data sent to RudderStack is not shared with third parties and is used solely to
|
|
21
|
+
improve gpt-engineer and allow it to handle a broader range of use cases.
|
|
22
|
+
Consent logic is in devseeker/learning.py.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from typing import Tuple
|
|
26
|
+
|
|
27
|
+
from devseeker.applications.cli.learning import (
|
|
28
|
+
Learning,
|
|
29
|
+
Review,
|
|
30
|
+
extract_learning,
|
|
31
|
+
human_review_input,
|
|
32
|
+
)
|
|
33
|
+
from devseeker.core.default.disk_memory import DiskMemory
|
|
34
|
+
from devseeker.core.prompt import Prompt
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def send_learning(learning: Learning):
|
|
38
|
+
"""
|
|
39
|
+
Send the learning data to RudderStack for analysis.
|
|
40
|
+
|
|
41
|
+
Parameters
|
|
42
|
+
----------
|
|
43
|
+
learning : Learning
|
|
44
|
+
An instance of the Learning class containing the data to be sent.
|
|
45
|
+
|
|
46
|
+
Notes
|
|
47
|
+
-----
|
|
48
|
+
This function is only called if consent is given to share data.
|
|
49
|
+
Data is not shared to a third party. It is used with the sole purpose of
|
|
50
|
+
improving gpt-engineer, and letting it handle more use cases.
|
|
51
|
+
Consent logic is in devseeker/learning.py.
|
|
52
|
+
"""
|
|
53
|
+
import rudderstack.analytics as rudder_analytics
|
|
54
|
+
|
|
55
|
+
rudder_analytics.write_key = "2Re4kqwL61GDp7S8ewe6K5dbogG"
|
|
56
|
+
rudder_analytics.dataPlaneUrl = "https://gptengineerezm.dataplane.rudderstack.com"
|
|
57
|
+
|
|
58
|
+
rudder_analytics.track(
|
|
59
|
+
user_id=learning.session,
|
|
60
|
+
event="learning",
|
|
61
|
+
properties=learning.to_dict(), # type: ignore
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def collect_learnings(
|
|
66
|
+
prompt: Prompt,
|
|
67
|
+
model: str,
|
|
68
|
+
temperature: float,
|
|
69
|
+
config: any,
|
|
70
|
+
memory: DiskMemory,
|
|
71
|
+
review: Review,
|
|
72
|
+
):
|
|
73
|
+
"""
|
|
74
|
+
Collect the learning data and send it to RudderStack for analysis.
|
|
75
|
+
|
|
76
|
+
Parameters
|
|
77
|
+
----------
|
|
78
|
+
prompt : str
|
|
79
|
+
The initial prompt or question that was provided to the model.
|
|
80
|
+
model : str
|
|
81
|
+
The name of the model used for generating the response.
|
|
82
|
+
temperature : float
|
|
83
|
+
The temperature setting used in the model's response generation.
|
|
84
|
+
config : any
|
|
85
|
+
Configuration parameters used for the learning session.
|
|
86
|
+
memory : DiskMemory
|
|
87
|
+
An instance of DiskMemory for storing and retrieving data.
|
|
88
|
+
review : Review
|
|
89
|
+
An instance of Review containing human feedback on the model's response.
|
|
90
|
+
|
|
91
|
+
Notes
|
|
92
|
+
-----
|
|
93
|
+
This function attempts to send the learning data to RudderStack. If the data size exceeds
|
|
94
|
+
the maximum allowed size, it trims the data and retries sending it.
|
|
95
|
+
"""
|
|
96
|
+
learnings = extract_learning(prompt, model, temperature, config, memory, review)
|
|
97
|
+
try:
|
|
98
|
+
send_learning(learnings)
|
|
99
|
+
except RuntimeError:
|
|
100
|
+
# try to remove some parts of learning that might be too big
|
|
101
|
+
# rudderstack max event size is 32kb
|
|
102
|
+
max_size = 32 << 10 # 32KB in bytes
|
|
103
|
+
current_size = len(learnings.to_json().encode("utf-8")) # get size in bytes
|
|
104
|
+
|
|
105
|
+
overflow = current_size - max_size
|
|
106
|
+
|
|
107
|
+
# Add some extra characters for the "[REMOVED...]" string and for safety margin
|
|
108
|
+
remove_length = overflow + len(f"[REMOVED {overflow} CHARACTERS]") + 100
|
|
109
|
+
|
|
110
|
+
learnings.logs = (
|
|
111
|
+
learnings.logs[:-remove_length]
|
|
112
|
+
+ f"\n\n[REMOVED {remove_length} CHARACTERS]"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
print(
|
|
116
|
+
"WARNING: learning too big, removing some parts. "
|
|
117
|
+
"Please report if this results in a crash."
|
|
118
|
+
)
|
|
119
|
+
try:
|
|
120
|
+
send_learning(learnings)
|
|
121
|
+
except RuntimeError:
|
|
122
|
+
print(
|
|
123
|
+
"Sending learnings crashed despite truncation. Progressing without saving learnings."
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
# def steps_file_hash():
|
|
128
|
+
# """
|
|
129
|
+
# Compute the SHA-256 hash of the steps file.
|
|
130
|
+
#
|
|
131
|
+
# Returns
|
|
132
|
+
# -------
|
|
133
|
+
# str
|
|
134
|
+
# The SHA-256 hash of the steps file.
|
|
135
|
+
# """
|
|
136
|
+
# with open(steps.__file__, "r") as f:
|
|
137
|
+
# content = f.read()
|
|
138
|
+
# return hashlib.sha256(content.encode("utf-8")).hexdigest()
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def collect_and_send_human_review(
|
|
142
|
+
prompt: Prompt,
|
|
143
|
+
model: str,
|
|
144
|
+
temperature: float,
|
|
145
|
+
config: Tuple[str, ...],
|
|
146
|
+
memory: DiskMemory,
|
|
147
|
+
):
|
|
148
|
+
"""
|
|
149
|
+
Collects human feedback on the code and sends it for analysis.
|
|
150
|
+
|
|
151
|
+
Parameters
|
|
152
|
+
----------
|
|
153
|
+
prompt : str
|
|
154
|
+
The initial prompt or question that was provided to the model.
|
|
155
|
+
model : str
|
|
156
|
+
The name of the model used for generating the response.
|
|
157
|
+
temperature : float
|
|
158
|
+
The temperature setting used in the model's response generation.
|
|
159
|
+
config : Tuple[str, ...]
|
|
160
|
+
Configuration parameters used for the learning session.
|
|
161
|
+
memory : DiskMemory
|
|
162
|
+
An instance of DiskMemory for storing and retrieving data.
|
|
163
|
+
|
|
164
|
+
Returns
|
|
165
|
+
-------
|
|
166
|
+
None
|
|
167
|
+
|
|
168
|
+
Notes
|
|
169
|
+
-----
|
|
170
|
+
This function prompts the user for a review of the generated or improved code using the
|
|
171
|
+
`human_review_input` function. If a valid review is provided, it's serialized to JSON format
|
|
172
|
+
and stored within the database's memory under the "review" key.
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
review = human_review_input()
|
|
176
|
+
if review:
|
|
177
|
+
collect_learnings(prompt, model, temperature, config, memory, review)
|