ai_refactor 0.3.0 → 0.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +55 -2
- data/Gemfile +2 -0
- data/Gemfile.lock +5 -1
- data/README.md +68 -24
- data/Rakefile +1 -1
- data/ai_refactor.gemspec +1 -0
- data/exe/ai_refactor +78 -44
- data/lib/ai_refactor/cli.rb +86 -0
- data/lib/ai_refactor/context.rb +33 -0
- data/lib/ai_refactor/file_processor.rb +34 -17
- data/lib/ai_refactor/prompt.rb +84 -0
- data/lib/ai_refactor/prompts/diff.md +17 -0
- data/lib/ai_refactor/prompts/input.md +1 -0
- data/lib/ai_refactor/refactors/base_refactor.rb +176 -0
- data/lib/ai_refactor/refactors/generic.rb +6 -80
- data/lib/ai_refactor/refactors/minitest/write_test_for_class.md +11 -0
- data/lib/ai_refactor/refactors/minitest/write_test_for_class.rb +51 -0
- data/lib/ai_refactor/refactors/project/write_changelog_from_history.md +35 -0
- data/lib/ai_refactor/refactors/project/write_changelog_from_history.rb +50 -0
- data/lib/ai_refactor/refactors/{prompts/rspec_to_minitest_rails.md → rails/minitest/rspec_to_minitest.md} +40 -1
- data/lib/ai_refactor/refactors/rails/minitest/rspec_to_minitest.rb +77 -0
- data/lib/ai_refactor/refactors/rspec/minitest_to_rspec.rb +13 -0
- data/lib/ai_refactor/refactors.rb +13 -5
- data/lib/ai_refactor/{refactors/tests → test_runners}/minitest_runner.rb +1 -1
- data/lib/ai_refactor/{refactors/tests → test_runners}/rspec_runner.rb +1 -1
- data/lib/ai_refactor/{refactors/tests → test_runners}/test_run_diff_report.rb +1 -1
- data/lib/ai_refactor/{refactors/tests → test_runners}/test_run_result.rb +1 -1
- data/lib/ai_refactor/version.rb +1 -1
- data/lib/ai_refactor.rb +13 -8
- metadata +34 -11
- data/lib/ai_refactor/base_refactor.rb +0 -70
- data/lib/ai_refactor/refactors/minitest_to_rspec.rb +0 -11
- data/lib/ai_refactor/refactors/rspec_to_minitest_rails.rb +0 -103
- /data/lib/ai_refactor/refactors/{prompts → rspec}/minitest_to_rspec.md +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 725fb1da5fc311d3740687d97fa3b6c3c4292a3b531f467ea1d8ff9e734608bc
|
4
|
+
data.tar.gz: 4c16d310c643ae1158442816a9f863961ae151bb4ae486c965e96340342e2d44
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: d0f380ef54a29be017b23c2caaa4491ee6304dc410e52ab6e11038b9f30c30a09a04597f3342332f464007e0e932f5b81112f712d46c584b2728d51bee61c036
|
7
|
+
data.tar.gz: 9ffa29857815cb73daef7d6902bf7301dc80e36361b0451d3d3df1b7921b1c0978ef16ea7963c544b893db1bd0d6db34120378aa6a7ae4ac5add0831e4b61dbf
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,58 @@
|
|
1
|
-
|
1
|
+
# AI Refactor Changelog
|
2
|
+
|
3
|
+
## [0.4.0] - 2023-08-15
|
4
|
+
|
5
|
+
### Added
|
6
|
+
|
7
|
+
- Support for providing files as context for prompt.
|
8
|
+
- Output path configuration made available to all refactors.
|
9
|
+
- New refactor to write minitest tests for classes
|
10
|
+
- New refactor to write changelog entries.
|
11
|
+
- 'review' prompt CLI switch without invoking AI.
|
12
|
+
- CLI switch to control output overwrite behaviour.
|
13
|
+
- Extra text context for prompts via command line with -x
|
14
|
+
- Support for .ai_refactor config file which can provide default options/switches for prompts.
|
15
|
+
|
16
|
+
### Changed
|
17
|
+
|
18
|
+
- Moved to using zeitwerk for loading and dotenv.
|
19
|
+
- Simple registry for refactors and change in naming convention.
|
20
|
+
- Updated diff prompt option and fixes for new structure.
|
21
|
+
- Reorganised refactors.
|
22
|
+
- Tweaked rspec to minitest prompt.
|
23
|
+
- Fixed check for custom prompt path.
|
24
|
+
- Updated docs.
|
25
|
+
|
26
|
+
### Fixed
|
27
|
+
|
28
|
+
- Fixed reference to built in prompt paths.
|
29
|
+
|
30
|
+
## [0.3.1] - 2023-05-25
|
31
|
+
|
32
|
+
### Added
|
33
|
+
|
34
|
+
- Added support for outputting to file from generic refactor.
|
35
|
+
|
36
|
+
## [0.2.0] - 2023-05-24
|
37
|
+
|
38
|
+
### Added
|
39
|
+
|
40
|
+
- Introduced a generic refactor type which uses the user supplied prompt and outputs to stdout.
|
41
|
+
- Added support for outputting to file from generic refactor.
|
42
|
+
- Added a prompt for rspec_to_minitest.
|
43
|
+
|
44
|
+
### Fixed
|
45
|
+
|
46
|
+
- Fixed example.
|
2
47
|
|
3
48
|
## [0.1.0] - 2023-05-19
|
4
49
|
|
5
|
-
|
50
|
+
### Added
|
51
|
+
|
52
|
+
- First version of CLI.
|
53
|
+
|
54
|
+
### Changed
|
55
|
+
|
56
|
+
- Gem dependencies are not open-ended.
|
57
|
+
- Renamed to clean up intention.
|
58
|
+
- Updated docs.
|
data/Gemfile
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,16 +1,18 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
ai_refactor (0.
|
4
|
+
ai_refactor (0.4.0)
|
5
5
|
colorize (< 2.0)
|
6
6
|
open3 (< 2.0)
|
7
7
|
ruby-openai (>= 3.4.0, < 5.0)
|
8
|
+
zeitwerk (~> 2.6)
|
8
9
|
|
9
10
|
GEM
|
10
11
|
remote: https://rubygems.org/
|
11
12
|
specs:
|
12
13
|
ast (2.4.2)
|
13
14
|
colorize (0.8.1)
|
15
|
+
dotenv (2.8.1)
|
14
16
|
faraday (2.7.4)
|
15
17
|
faraday-net_http (>= 2.0, < 3.1)
|
16
18
|
ruby2_keywords (>= 0.0.4)
|
@@ -62,12 +64,14 @@ GEM
|
|
62
64
|
lint_roller (~> 1.0)
|
63
65
|
rubocop-performance (~> 1.16.0)
|
64
66
|
unicode-display_width (2.4.2)
|
67
|
+
zeitwerk (2.6.8)
|
65
68
|
|
66
69
|
PLATFORMS
|
67
70
|
arm64-darwin-22
|
68
71
|
|
69
72
|
DEPENDENCIES
|
70
73
|
ai_refactor!
|
74
|
+
dotenv
|
71
75
|
minitest (~> 5.0)
|
72
76
|
rake (~> 13.0)
|
73
77
|
standard (~> 1.3)
|
data/README.md
CHANGED
@@ -1,20 +1,24 @@
|
|
1
|
-
# AI Refactor
|
1
|
+
# AI Refactor for Ruby
|
2
2
|
|
3
|
-
AI Refactor is an experimental tool to
|
3
|
+
AI Refactor is an experimental tool to use AI to help apply refactoring to code.
|
4
4
|
|
5
|
-
|
6
|
-
the AI can help identify which code to change and apply the relevant refactor.
|
5
|
+
__The goal for AI Refactor is to help apply repetitive refactoring tasks, not to replace human mind that decides what refactoring is needed.__
|
7
6
|
|
8
|
-
|
7
|
+
AI Refactor currently uses [OpenAI's ChatGPT](https://platform.openai.com/).
|
8
|
+
|
9
|
+
The tool lets the human user prompt the AI with explicit refactoring tasks, and can be run on one or more files at a time.
|
10
|
+
The tool then uses a LLM to apply the relevant refactor, and if appropriate, checks results by running tests and comparing output.
|
11
|
+
|
12
|
+
The focus of the tool is work with the Ruby programming language ecosystem, but it can be used with any language.
|
9
13
|
|
10
14
|
## Available refactors
|
11
15
|
|
12
16
|
Currently available:
|
13
17
|
|
14
|
-
- `
|
15
|
-
- `
|
18
|
+
- `rails/minitest/rspec_to_minitest`: convert RSpec specs to minitest tests in Rails apps
|
19
|
+
- `generic`: provide your own prompt for the AI and run against the input files
|
16
20
|
|
17
|
-
### `
|
21
|
+
### `rails/minitest/rspec_to_minitest`
|
18
22
|
|
19
23
|
Converts RSpec tests to minitest tests for Rails test suites (ie generated minitest tests are actually `ActiveSupport::TestCase`s).
|
20
24
|
|
@@ -23,20 +27,17 @@ The tool first runs the original RSpec spec file and then runs the generated min
|
|
23
27
|
The comparison is simply the count of successful and failed tests but this is probably enough to determine if the conversion worked.
|
24
28
|
|
25
29
|
```shellq
|
26
|
-
stephen$ OPENAI_API_KEY=my-key ai_refactor
|
27
|
-
AI Refactor 1 files(s)/dir(s) '["spec/models/my_thing_spec.rb"]' with
|
30
|
+
stephen$ OPENAI_API_KEY=my-key ai_refactor rails/minitest/rspec_to_minitest spec/models/my_thing_spec.rb
|
31
|
+
AI Refactor 1 files(s)/dir(s) '["spec/models/my_thing_spec.rb"]' with rails/minitest/rspec_to_minitest refactor
|
28
32
|
====================
|
29
33
|
Processing spec/models/my_thing_spec.rb...
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
[Converted spec/models/my_thing_spec.rb to test/models/my_thing_test.rb...]
|
38
|
-
[Run generated test file test/models/my_thing_test.rb (bundle exec rails test test/models/my_thing_test.rb)...]
|
39
|
-
[Done converting spec/models/my_thing_spec.rb to test/models/my_thing_test.rb...]
|
34
|
+
|
35
|
+
Original test run results:
|
36
|
+
>> Examples: 41, Failures: 0, Pendings: 0
|
37
|
+
|
38
|
+
Translated test file results:
|
39
|
+
>> Runs: 41, Failures: 0, Skips: 0
|
40
|
+
|
40
41
|
No differences found! Conversion worked!
|
41
42
|
Refactor succeeded on spec/models/my_thing_spec.rb
|
42
43
|
|
@@ -47,7 +48,17 @@ Done processing all files!
|
|
47
48
|
|
48
49
|
Applies the refactor specified by prompting the AI with the user supplied prompt. You must supply a prompt file with the `-p` option.
|
49
50
|
|
50
|
-
The output is written to `stdout
|
51
|
+
The output is written to `stdout`, or to a file with the `--output` option.
|
52
|
+
|
53
|
+
### `minitest/write_test_for_class`
|
54
|
+
|
55
|
+
Writes a minitest test for a given class. The output will, by default, be put into a directory named `test` in the current directory,
|
56
|
+
in a path that matches the input file path, with a `_test.rb` suffix.
|
57
|
+
|
58
|
+
For example, if the input file is `app/stuff/my_thing.rb` the output will be written to `test/app/stuff/my_thing_test.rb`.
|
59
|
+
|
60
|
+
This refactor can benefit from being passed related files as context, for example, if the class under test inherits from another class,
|
61
|
+
then context can be used to provide the parent class.
|
51
62
|
|
52
63
|
## Installation
|
53
64
|
|
@@ -66,19 +77,52 @@ See `ai_refactor --help` for more information.
|
|
66
77
|
```
|
67
78
|
Usage: ai_refactor REFACTOR_TYPE INPUT_FILE_OR_DIR [options]
|
68
79
|
|
69
|
-
Where REFACTOR_TYPE is one of: ["generic"
|
80
|
+
Where REFACTOR_TYPE is one of: ["generic" ... (run ai_refactor --help for full list of refactor types)]
|
70
81
|
|
82
|
+
-o, --output [FILE] Write output to given file instead of stdout. If no path provided will overwrite input file (will prompt to overwrite existing files). Some refactor tasks will write out to a new file by default. This option will override the tasks default behaviour.
|
83
|
+
-O, --output-template TEMPLATE Write outputs to files instead of stdout. The template is used to create the output name, where the it can have substitutions, '[FILE]', '[NAME]', '[DIR]', '[REFACTOR]' & '[EXT]'. Eg `[DIR]/[NAME]_[REFACTOR][EXT]` (will prompt to overwrite existing files)
|
84
|
+
-c, --context CONTEXT_FILES Specify one or more files to use as context for the AI. The contents of these files will be prepended to the prompt sent to the AI.
|
85
|
+
-r, --review-prompt Show the prompt that will be sent to ChatGPT but do not actually call ChatGPT or make changes to files.
|
71
86
|
-p, --prompt PROMPT_FILE Specify path to a text file that contains the ChatGPT 'system' prompt.
|
72
|
-
-
|
73
|
-
-
|
87
|
+
-f, --diffs Request AI generate diffs of changes rather than writing out the whole file.
|
88
|
+
-C, --continue [MAX_MESSAGES] If ChatGPT stops generating due to the maximum token count being reached, continue to generate more messages, until a stop condition or MAX_MESSAGES. MAX_MESSAGES defaults to 3
|
89
|
+
-m, --model MODEL_NAME Specify a ChatGPT model to use (default gpt-4).
|
74
90
|
--temperature TEMP Specify the temperature parameter for ChatGPT (default 0.7).
|
75
91
|
--max-tokens MAX_TOKENS Specify the max number of tokens of output ChatGPT can generate. Max will depend on the size of the prompt (default 1500)
|
76
92
|
-t, --timeout SECONDS Specify the max wait time for ChatGPT response.
|
93
|
+
--overwrite ANSWER Always overwrite existing output files, 'y' for yes, 'n' for no, or 'a' for ask. Default to ask.
|
77
94
|
-v, --verbose Show extra output and progress info
|
78
95
|
-d, --debug Show debugging output to help diagnose issues
|
79
96
|
-h, --help Prints this help
|
80
97
|
```
|
81
98
|
|
99
|
+
## Outputs
|
100
|
+
|
101
|
+
Some refactor tasks will write out to a new file by default, others to stdout.
|
102
|
+
|
103
|
+
The `--output` lets you specify a file to write to instead of the Refactors default behaviour.
|
104
|
+
|
105
|
+
If `--output` is used without a value it overwrites the input with a prompt to overwrite existing files.
|
106
|
+
|
107
|
+
You can also output to a file using a template, `--output-template` to determine the output file name given a template string:
|
108
|
+
|
109
|
+
The template is used to create the output name, where the it can have substitutions, '[FILE]', '[NAME]', '[DIR]', '[REFACTOR]' & '[EXT]'.
|
110
|
+
|
111
|
+
Eg `--output-template "[DIR]/[NAME]_[REFACTOR][EXT]"`
|
112
|
+
|
113
|
+
eg for the input `my_dir/my_class.rb`
|
114
|
+
- `[FILE]`: `my_class.rb`
|
115
|
+
- `[NAME]`: `my_class`
|
116
|
+
- `[DIR]`: `my_dir`
|
117
|
+
- `[REFACTOR]`: `generic`
|
118
|
+
- `[EXT]`: `.rb`
|
119
|
+
|
120
|
+
|
121
|
+
## Note on performance and ChatGPT version
|
122
|
+
|
123
|
+
_The quality of results depend very much on the version of ChatGPT being used._
|
124
|
+
|
125
|
+
I have tested with both 3.5 and 4 and see **significantly** better performance with version 4.
|
82
126
|
|
83
127
|
## Development
|
84
128
|
|
data/Rakefile
CHANGED
data/ai_refactor.gemspec
CHANGED
data/exe/ai_refactor
CHANGED
@@ -3,26 +3,53 @@
|
|
3
3
|
require "optparse"
|
4
4
|
require "colorize"
|
5
5
|
require "openai"
|
6
|
+
require "shellwords"
|
6
7
|
require_relative "../lib/ai_refactor"
|
7
8
|
|
9
|
+
require "dotenv/load"
|
10
|
+
|
8
11
|
options = {}
|
9
12
|
|
10
13
|
supported_refactors = AIRefactor::Refactors.all
|
11
|
-
|
14
|
+
descriptions = AIRefactor::Refactors.descriptions
|
12
15
|
|
13
16
|
# General options for all refactor types
|
14
17
|
option_parser = OptionParser.new do |parser|
|
15
|
-
parser.banner = "Usage: ai_refactor REFACTOR_TYPE INPUT_FILE_OR_DIR [options]\n\nWhere REFACTOR_TYPE is one of: #{
|
18
|
+
parser.banner = "Usage: ai_refactor REFACTOR_TYPE INPUT_FILE_OR_DIR [options]\n\nWhere REFACTOR_TYPE is one of: \n- #{descriptions.to_a.map { |refactor| refactor.join(": ") }.join("\n- ")}\n\n"
|
19
|
+
|
20
|
+
parser.on("-o", "--output [FILE]", String, "Write output to given file instead of stdout. If no path provided will overwrite input file (will prompt to overwrite existing files). Some refactor tasks will write out to a new file by default. This option will override the tasks default behaviour.") do |f|
|
21
|
+
options[:output_file_path] = f
|
22
|
+
end
|
23
|
+
|
24
|
+
parser.on("-O", "--output-template TEMPLATE", String, "Write outputs to files instead of stdout. The template is used to create the output name, where the it can have substitutions, '[FILE]', '[NAME]', '[DIR]', '[REFACTOR]' & '[EXT]'. Eg `[DIR]/[NAME]_[REFACTOR][EXT]` (will prompt to overwrite existing files)") do |t|
|
25
|
+
options[:output_template_path] = t
|
26
|
+
end
|
27
|
+
|
28
|
+
parser.on("-c", "--context CONTEXT_FILES", Array, "Specify one or more files to use as context for the AI. The contents of these files will be prepended to the prompt sent to the AI.") do |c|
|
29
|
+
options[:context_file_paths] = c
|
30
|
+
end
|
31
|
+
|
32
|
+
parser.on("-x", "--extra CONTEXT_TEXT", String, "Specify some text to be prepended to the prompt sent to the AI as extra information of note.") do |c|
|
33
|
+
options[:context_text] = c
|
34
|
+
end
|
35
|
+
|
36
|
+
parser.on("-r", "--review-prompt", "Show the prompt that will be sent to ChatGPT but do not actually call ChatGPT or make changes to files.") do
|
37
|
+
options[:review_prompt] = true
|
38
|
+
end
|
16
39
|
|
17
40
|
parser.on("-p", "--prompt PROMPT_FILE", String, "Specify path to a text file that contains the ChatGPT 'system' prompt.") do |f|
|
18
41
|
options[:prompt_file_path] = f
|
19
42
|
end
|
20
43
|
|
21
|
-
parser.on("-
|
44
|
+
parser.on("-f", "--diffs", "Request AI generate diffs of changes rather than writing out the whole file.") do
|
45
|
+
options[:diff] = true
|
46
|
+
end
|
47
|
+
|
48
|
+
parser.on("-C", "--continue [MAX_MESSAGES]", Integer, "If ChatGPT stops generating due to the maximum token count being reached, continue to generate more messages, until a stop condition or MAX_MESSAGES. MAX_MESSAGES defaults to 3") do |c|
|
22
49
|
options[:ai_max_attempts] = c || 3
|
23
50
|
end
|
24
51
|
|
25
|
-
parser.on("-m", "--model MODEL_NAME", String, "Specify a ChatGPT model to use (default gpt-
|
52
|
+
parser.on("-m", "--model MODEL_NAME", String, "Specify a ChatGPT model to use (default gpt-4).") do |m|
|
26
53
|
options[:ai_model] = m
|
27
54
|
end
|
28
55
|
|
@@ -38,6 +65,14 @@ option_parser = OptionParser.new do |parser|
|
|
38
65
|
options[:ai_timeout] = m
|
39
66
|
end
|
40
67
|
|
68
|
+
parser.on("--overwrite ANSWER", "Always overwrite existing output files, 'y' for yes, 'n' for no, or 'a' for ask. Default to ask.") do |a|
|
69
|
+
options[:overwrite] = a
|
70
|
+
end
|
71
|
+
|
72
|
+
parser.on("-N", "--no", "Never overwrite existing output files, same as --overwrite=n.") do |a|
|
73
|
+
options[:overwrite] = "n"
|
74
|
+
end
|
75
|
+
|
41
76
|
parser.on("-v", "--verbose", "Show extra output and progress info") do
|
42
77
|
options[:verbose] = true
|
43
78
|
end
|
@@ -53,6 +88,22 @@ option_parser = OptionParser.new do |parser|
|
|
53
88
|
|
54
89
|
parser.separator ""
|
55
90
|
|
91
|
+
# Example in Refactor class:
|
92
|
+
#
|
93
|
+
# class << self
|
94
|
+
# def command_line_options
|
95
|
+
# [
|
96
|
+
# {
|
97
|
+
# key: :my_option_key,
|
98
|
+
# short: "-s",
|
99
|
+
# long: "--long-form-cli-param [FILE]",
|
100
|
+
# type: String,
|
101
|
+
# help: "help text"
|
102
|
+
# },
|
103
|
+
# ...
|
104
|
+
# ]
|
105
|
+
# end
|
106
|
+
# end
|
56
107
|
supported_refactors.each do |name, refactorer|
|
57
108
|
parser.separator "For refactor type '#{name}':" if refactorer.command_line_options.size.positive?
|
58
109
|
refactorer.command_line_options.each do |option|
|
@@ -65,55 +116,38 @@ option_parser = OptionParser.new do |parser|
|
|
65
116
|
end
|
66
117
|
end
|
67
118
|
|
68
|
-
|
69
|
-
|
70
|
-
|
119
|
+
# Load config from ~/.ai_refactor or .ai_refactor
|
120
|
+
home_config_file_path = File.expand_path("~/.ai_refactor")
|
121
|
+
local_config_file_path = File.join(Dir.pwd, ".ai_refactor")
|
71
122
|
|
72
|
-
|
73
|
-
input_file_path = ARGV
|
123
|
+
arguments = ARGV.dup
|
74
124
|
|
75
|
-
if
|
76
|
-
|
77
|
-
|
125
|
+
config_file_path = if File.exist?(local_config_file_path)
|
126
|
+
local_config_file_path
|
127
|
+
elsif File.exist?(home_config_file_path)
|
128
|
+
home_config_file_path
|
78
129
|
end
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
config.request_timeout = options[:ai_timeout] || 240
|
130
|
+
if config_file_path
|
131
|
+
config_string = File.read(config_file_path)
|
132
|
+
config_lines = config_string.split(/\n+/).reject { |s| s =~ /\A\s*#/ }
|
133
|
+
arguments += config_lines.flat_map(&:shellsplit)
|
84
134
|
end
|
85
135
|
|
86
|
-
|
136
|
+
option_parser.parse!(arguments)
|
87
137
|
|
88
|
-
|
89
|
-
File.exist?(path) ? path : Dir.glob(path)
|
90
|
-
end.flatten
|
138
|
+
logger = AIRefactor::Logger.new(verbose: options[:verbose], debug: options[:debug])
|
91
139
|
|
92
|
-
|
93
|
-
logger.info "
|
140
|
+
if config_file_path
|
141
|
+
logger.info "Loaded config from '#{config_file_path}'..."
|
142
|
+
end
|
94
143
|
|
95
|
-
|
96
|
-
logger.info "Processing #{file}..."
|
144
|
+
job = ::AIRefactor::Cli.new(refactoring_type: arguments.shift, inputs: arguments, options: options, logger: logger)
|
97
145
|
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
if failed
|
102
|
-
logger.warn "Refactor failed on #{file}\nFailed due to: #{refactor.failed_message}\n"
|
103
|
-
else
|
104
|
-
logger.success "Refactor succeeded on #{file}\n"
|
105
|
-
if refactor_returned.is_a?(String)
|
106
|
-
logger.info "Refactor #{file} output:\n\n#{refactor_returned}\n\n"
|
107
|
-
end
|
108
|
-
end
|
109
|
-
failed ? [file, refactor.failed_message] : true
|
146
|
+
unless job.valid?
|
147
|
+
puts option_parser.help
|
148
|
+
exit 1
|
110
149
|
end
|
111
150
|
|
112
|
-
|
113
|
-
|
114
|
-
else
|
115
|
-
files = return_values.select { |v| v != true }
|
116
|
-
logger.warn "Some files failed to process:\n#{files.map { |f| "#{f[0]} :\n > #{f[1]}" }.join("\n")}"
|
151
|
+
unless job.run
|
152
|
+
exit 1
|
117
153
|
end
|
118
|
-
|
119
|
-
logger.info "Done processing all files!"
|
@@ -0,0 +1,86 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module AIRefactor
|
4
|
+
class Cli
|
5
|
+
def initialize(refactoring_type:, inputs:, options:, logger:)
|
6
|
+
@refactoring_type = refactoring_type
|
7
|
+
@inputs = inputs
|
8
|
+
@options = options
|
9
|
+
@logger = logger
|
10
|
+
end
|
11
|
+
|
12
|
+
attr_reader :refactoring_type, :inputs, :options, :logger
|
13
|
+
|
14
|
+
def valid?
|
15
|
+
return false unless refactorer
|
16
|
+
inputs_valid = refactorer.takes_input_files? ? !(inputs.nil? || inputs.empty?) : true
|
17
|
+
AIRefactor::Refactors.supported?(refactoring_type) && inputs_valid
|
18
|
+
end
|
19
|
+
|
20
|
+
def run
|
21
|
+
return false unless valid?
|
22
|
+
|
23
|
+
OpenAI.configure do |config|
|
24
|
+
config.access_token = ENV.fetch("OPENAI_API_KEY")
|
25
|
+
config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID", nil)
|
26
|
+
config.request_timeout = options[:ai_timeout] || 240
|
27
|
+
end
|
28
|
+
|
29
|
+
if refactorer.takes_input_files?
|
30
|
+
expanded_inputs = inputs.map do |path|
|
31
|
+
File.exist?(path) ? path : Dir.glob(path)
|
32
|
+
end.flatten
|
33
|
+
|
34
|
+
logger.info "AI Refactor #{expanded_inputs.size} files(s)/dir(s) '#{expanded_inputs}' with #{refactorer.refactor_name} refactor\n"
|
35
|
+
logger.info "====================\n"
|
36
|
+
|
37
|
+
return_values = expanded_inputs.map do |file|
|
38
|
+
logger.info "Processing #{file}..."
|
39
|
+
|
40
|
+
refactor = refactorer.new(file, options, logger)
|
41
|
+
refactor_returned = refactor.run
|
42
|
+
failed = refactor_returned == false
|
43
|
+
if failed
|
44
|
+
logger.warn "Refactor failed on #{file}\nFailed due to: #{refactor.failed_message}\n"
|
45
|
+
else
|
46
|
+
logger.success "Refactor succeeded on #{file}\n"
|
47
|
+
if refactor_returned.is_a?(String)
|
48
|
+
logger.info "Refactor #{file} output:\n\n#{refactor_returned}\n\n"
|
49
|
+
end
|
50
|
+
end
|
51
|
+
failed ? [file, refactor.failed_message] : true
|
52
|
+
end
|
53
|
+
|
54
|
+
if return_values.all?(true)
|
55
|
+
logger.success "All files processed successfully!"
|
56
|
+
else
|
57
|
+
files = return_values.select { |v| v != true }
|
58
|
+
logger.warn "Some files failed to process:\n#{files.map { |f| "#{f[0]} :\n > #{f[1]}" }.join("\n")}"
|
59
|
+
end
|
60
|
+
|
61
|
+
logger.info "Done processing all files!"
|
62
|
+
else
|
63
|
+
name = refactorer.refactor_name
|
64
|
+
logger.info "AI Refactor - #{name} refactor\n"
|
65
|
+
logger.info "====================\n"
|
66
|
+
refactor = refactorer.new(nil, options, logger)
|
67
|
+
refactor_returned = refactor.run
|
68
|
+
failed = refactor_returned == false
|
69
|
+
if failed
|
70
|
+
logger.warn "Refactor failed with #{name}\nFailed due to: #{refactor.failed_message}\n"
|
71
|
+
else
|
72
|
+
logger.success "Refactor succeeded with #{name}\n"
|
73
|
+
if refactor_returned.is_a?(String)
|
74
|
+
logger.info "Refactor output:\n\n#{refactor_returned}\n\n"
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
private
|
81
|
+
|
82
|
+
def refactorer
|
83
|
+
@refactorer ||= AIRefactor::Refactors.get(refactoring_type)
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module AIRefactor
|
4
|
+
class Context
|
5
|
+
def initialize(files:, text:, logger:)
|
6
|
+
@files = files
|
7
|
+
@text = text
|
8
|
+
@logger = logger
|
9
|
+
end
|
10
|
+
|
11
|
+
def prepare_context
|
12
|
+
context = read_contexts&.compact
|
13
|
+
file_context = (context && context.size.positive?) ? "Here is some related files:\n\n#{context.join("\n")}" : ""
|
14
|
+
if @text.nil? || @text.empty?
|
15
|
+
file_context
|
16
|
+
else
|
17
|
+
"Also note: #{@text}\n\n#{file_context}"
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
private
|
22
|
+
|
23
|
+
def read_contexts
|
24
|
+
@files&.map do |file|
|
25
|
+
unless File.exist?(file)
|
26
|
+
@logger.warn "Context file #{file} does not exist"
|
27
|
+
next
|
28
|
+
end
|
29
|
+
"#---\n# File '#{file}':\n\n```#{File.read(file)}```\n"
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -1,18 +1,19 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "fileutils"
|
3
4
|
require "openai"
|
4
5
|
require "json"
|
5
6
|
|
6
7
|
module AIRefactor
|
7
8
|
class FileProcessor
|
8
|
-
attr_reader :
|
9
|
+
attr_reader :input_file_path, :output_path, :logger, :options
|
9
10
|
|
10
|
-
def initialize(
|
11
|
-
@
|
12
|
-
@prompt_file_path = prompt_file_path
|
11
|
+
def initialize(prompt:, ai_client:, logger:, output_path: nil, options: {})
|
12
|
+
@prompt = prompt
|
13
13
|
@ai_client = ai_client
|
14
14
|
@logger = logger
|
15
15
|
@output_path = output_path
|
16
|
+
@options = options
|
16
17
|
end
|
17
18
|
|
18
19
|
def output_exists?
|
@@ -20,20 +21,26 @@ module AIRefactor
|
|
20
21
|
File.exist?(output_path)
|
21
22
|
end
|
22
23
|
|
23
|
-
def process!
|
24
|
-
logger.debug("Processing #{
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
24
|
+
def process!
|
25
|
+
logger.debug("Processing #{@prompt.input_file_path} with prompt in #{@prompt.prompt_file_path}")
|
26
|
+
logger.debug("Options: #{options.inspect}")
|
27
|
+
messages = @prompt.chat_messages
|
28
|
+
if options[:review_prompt]
|
29
|
+
logger.info "Review prompt:\n"
|
30
|
+
messages.each do |message|
|
31
|
+
logger.info "\n-- Start of prompt for Role #{message[:role]} --\n"
|
32
|
+
logger.info message[:content]
|
33
|
+
logger.info "\n-- End of prompt for Role #{message[:role]} --\n"
|
34
|
+
end
|
35
|
+
return [nil, "Skipped as review prompt was requested", nil]
|
36
|
+
end
|
37
|
+
|
38
|
+
content, finished_reason, usage = generate_next_message(messages, options, ai_max_attempts)
|
32
39
|
|
33
40
|
content = if content && content.length > 0
|
34
41
|
processed = block_given? ? yield(content) : content
|
35
42
|
if output_path
|
36
|
-
|
43
|
+
write_output(output_path, processed)
|
37
44
|
logger.verbose "Wrote output to #{output_path}..."
|
38
45
|
end
|
39
46
|
processed
|
@@ -44,14 +51,18 @@ module AIRefactor
|
|
44
51
|
|
45
52
|
private
|
46
53
|
|
47
|
-
def
|
54
|
+
def ai_max_attempts
|
55
|
+
options[:ai_max_attempts] || 1
|
56
|
+
end
|
57
|
+
|
58
|
+
def generate_next_message(messages, options, attempts_left)
|
48
59
|
logger.verbose "Generate AI output. Generation attempts left: #{attempts_left}"
|
49
60
|
logger.debug "Options: #{options.inspect}"
|
50
61
|
logger.debug "Messages: #{messages.inspect}"
|
51
62
|
|
52
63
|
response = @ai_client.chat(
|
53
64
|
parameters: {
|
54
|
-
model: options[:ai_model] || "gpt-
|
65
|
+
model: options[:ai_model] || "gpt-4",
|
55
66
|
messages: messages,
|
56
67
|
temperature: options[:ai_temperature] || 0.7,
|
57
68
|
max_tokens: options[:ai_max_tokens] || 1500
|
@@ -69,7 +80,7 @@ module AIRefactor
|
|
69
80
|
generate_next_message(messages + [
|
70
81
|
{role: "assistant", content: content},
|
71
82
|
{role: "user", content: "Continue"}
|
72
|
-
],
|
83
|
+
], options, attempts_left - 1)
|
73
84
|
else
|
74
85
|
previous_messages = messages.filter { |m| m[:role] == "assistant" }.map { |m| m[:content] }.join
|
75
86
|
content = if previous_messages.length > 0
|
@@ -80,5 +91,11 @@ module AIRefactor
|
|
80
91
|
[content, finished_reason, response["usage"]]
|
81
92
|
end
|
82
93
|
end
|
94
|
+
|
95
|
+
def write_output(output_path, processed)
|
96
|
+
dir = File.dirname(output_path)
|
97
|
+
FileUtils.mkdir_p(dir) unless File.directory?(dir)
|
98
|
+
File.write(output_path, processed)
|
99
|
+
end
|
83
100
|
end
|
84
101
|
end
|