ai_refactor 0.3.1 → 0.5.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +65 -2
- data/Gemfile +9 -0
- data/Gemfile.lock +169 -1
- data/README.md +169 -43
- data/Rakefile +1 -1
- data/ai_refactor.gemspec +1 -0
- data/examples/.gitignore +1 -0
- data/examples/ex1_convert_a_rspec_test_to_minitest.yml +7 -0
- data/examples/ex1_input_spec.rb +32 -0
- data/examples/rails_helper.rb +21 -0
- data/examples/test_helper.rb +14 -0
- data/exe/ai_refactor +139 -52
- data/lib/ai_refactor/cli.rb +138 -0
- data/lib/ai_refactor/command_file_parser.rb +27 -0
- data/lib/ai_refactor/context.rb +33 -0
- data/lib/ai_refactor/file_processor.rb +34 -17
- data/lib/ai_refactor/prompt.rb +84 -0
- data/lib/ai_refactor/prompts/diff.md +17 -0
- data/lib/ai_refactor/prompts/input.md +1 -0
- data/lib/ai_refactor/refactors/base_refactor.rb +183 -0
- data/lib/ai_refactor/refactors/custom.rb +43 -0
- data/lib/ai_refactor/refactors/minitest/write_test_for_class.md +15 -0
- data/lib/ai_refactor/refactors/minitest/write_test_for_class.rb +51 -0
- data/lib/ai_refactor/refactors/project/write_changelog_from_history.md +35 -0
- data/lib/ai_refactor/refactors/project/write_changelog_from_history.rb +50 -0
- data/lib/ai_refactor/refactors/{prompts/rspec_to_minitest_rails.md → rails/minitest/rspec_to_minitest.md} +40 -1
- data/lib/ai_refactor/refactors/rails/minitest/rspec_to_minitest.rb +77 -0
- data/lib/ai_refactor/refactors/rspec/minitest_to_rspec.rb +13 -0
- data/lib/ai_refactor/refactors/ruby/refactor_ruby.md +10 -0
- data/lib/ai_refactor/refactors/ruby/refactor_ruby.rb +29 -0
- data/lib/ai_refactor/refactors/ruby/write_ruby.md +7 -0
- data/lib/ai_refactor/refactors/ruby/write_ruby.rb +33 -0
- data/lib/ai_refactor/refactors.rb +13 -5
- data/lib/ai_refactor/run_configuration.rb +115 -0
- data/lib/ai_refactor/{refactors/tests → test_runners}/minitest_runner.rb +2 -2
- data/lib/ai_refactor/{refactors/tests → test_runners}/rspec_runner.rb +1 -1
- data/lib/ai_refactor/{refactors/tests → test_runners}/test_run_diff_report.rb +1 -1
- data/lib/ai_refactor/{refactors/tests → test_runners}/test_run_result.rb +1 -1
- data/lib/ai_refactor/version.rb +1 -1
- data/lib/ai_refactor.rb +13 -8
- metadata +47 -13
- data/lib/ai_refactor/base_refactor.rb +0 -66
- data/lib/ai_refactor/refactors/generic.rb +0 -113
- data/lib/ai_refactor/refactors/minitest_to_rspec.rb +0 -11
- data/lib/ai_refactor/refactors/rspec_to_minitest_rails.rb +0 -103
- /data/lib/ai_refactor/refactors/{prompts → rspec}/minitest_to_rspec.md +0 -0
@@ -0,0 +1,14 @@
|
|
1
|
+
require "rails/all"
|
2
|
+
require "active_support/testing/autorun"
|
3
|
+
|
4
|
+
class MyModel
|
5
|
+
include ActiveModel::Model
|
6
|
+
include ActiveModel::Attributes
|
7
|
+
include ActiveModel::Validations
|
8
|
+
include ActiveModel::Validations::Callbacks
|
9
|
+
|
10
|
+
validates :name, presence: true
|
11
|
+
|
12
|
+
attribute :name, :string
|
13
|
+
attribute :age, :integer
|
14
|
+
end
|
data/exe/ai_refactor
CHANGED
@@ -3,47 +3,87 @@
|
|
3
3
|
require "optparse"
|
4
4
|
require "colorize"
|
5
5
|
require "openai"
|
6
|
+
require "shellwords"
|
6
7
|
require_relative "../lib/ai_refactor"
|
7
8
|
|
8
|
-
|
9
|
+
require "dotenv/load"
|
9
10
|
|
10
11
|
supported_refactors = AIRefactor::Refactors.all
|
11
|
-
|
12
|
+
refactors_descriptions = AIRefactor::Refactors.descriptions
|
13
|
+
|
14
|
+
arguments = ARGV.dup
|
15
|
+
|
16
|
+
options_from_config_file = AIRefactor::Cli.load_options_from_config_file
|
17
|
+
arguments += options_from_config_file if options_from_config_file
|
18
|
+
|
19
|
+
run_config = AIRefactor::RunConfiguration.new
|
12
20
|
|
13
21
|
# General options for all refactor types
|
14
22
|
option_parser = OptionParser.new do |parser|
|
15
|
-
parser.banner = "Usage: ai_refactor
|
23
|
+
parser.banner = "Usage: ai_refactor REFACTOR_TYPE_OR_COMMAND_FILE INPUT_FILE_OR_DIR [options]\n\nWhere REFACTOR_TYPE_OR_COMMAND_FILE is either the path to a command YML file, or one of the refactor types to run: \n- #{refactors_descriptions.to_a.map { |refactor| refactor.join(": ") }.join("\n- ")}\n\n"
|
24
|
+
|
25
|
+
parser.on("-o", "--output [FILE]", String, "Write output to given file instead of stdout. If no path provided will overwrite input file (will prompt to overwrite existing files). Some refactor tasks will write out to a new file by default. This option will override the tasks default behaviour.") do |f|
|
26
|
+
run_config.output_file_path = f
|
27
|
+
end
|
28
|
+
|
29
|
+
parser.on("-O", "--output-template TEMPLATE", String, "Write outputs to files instead of stdout. The template is used to create the output name, where the it can have substitutions, '[FILE]', '[NAME]', '[DIR]', '[REFACTOR]' & '[EXT]'. Eg `[DIR]/[NAME]_[REFACTOR][EXT]` (will prompt to overwrite existing files)") do |t|
|
30
|
+
run_config.output_template_path = t
|
31
|
+
end
|
32
|
+
|
33
|
+
parser.on("-c", "--context CONTEXT_FILES", Array, "Specify one or more files to use as context for the AI. The contents of these files will be prepended to the prompt sent to the AI.") do |c|
|
34
|
+
run_config.context_file_paths = c
|
35
|
+
end
|
36
|
+
|
37
|
+
parser.on("-x", "--extra CONTEXT_TEXT", String, "Specify some text to be prepended to the prompt sent to the AI as extra information of note.") do |c|
|
38
|
+
run_config.context_text = c
|
39
|
+
end
|
40
|
+
|
41
|
+
parser.on("-r", "--review-prompt", "Show the prompt that will be sent to ChatGPT but do not actually call ChatGPT or make changes to files.") do
|
42
|
+
run_config.review_prompt = true
|
43
|
+
end
|
16
44
|
|
17
45
|
parser.on("-p", "--prompt PROMPT_FILE", String, "Specify path to a text file that contains the ChatGPT 'system' prompt.") do |f|
|
18
|
-
|
46
|
+
run_config.prompt_file_path = f
|
47
|
+
end
|
48
|
+
|
49
|
+
parser.on("-f", "--diffs", "Request AI generate diffs of changes rather than writing out the whole file.") do
|
50
|
+
run_config.diff = true
|
19
51
|
end
|
20
52
|
|
21
|
-
parser.on("-
|
22
|
-
|
53
|
+
parser.on("-C", "--continue [MAX_MESSAGES]", Integer, "If ChatGPT stops generating due to the maximum token count being reached, continue to generate more messages, until a stop condition or MAX_MESSAGES. MAX_MESSAGES defaults to 3") do |c|
|
54
|
+
run_config.ai_max_attempts = c
|
23
55
|
end
|
24
56
|
|
25
|
-
parser.on("-m", "--model MODEL_NAME", String, "Specify a ChatGPT model to use (default gpt-
|
26
|
-
|
57
|
+
parser.on("-m", "--model MODEL_NAME", String, "Specify a ChatGPT model to use (default gpt-4).") do |m|
|
58
|
+
run_config.ai_model = m
|
27
59
|
end
|
28
60
|
|
29
61
|
parser.on("--temperature TEMP", Float, "Specify the temperature parameter for ChatGPT (default 0.7).") do |p|
|
30
|
-
|
62
|
+
run_config.ai_temperature = p
|
31
63
|
end
|
32
64
|
|
33
65
|
parser.on("--max-tokens MAX_TOKENS", Integer, "Specify the max number of tokens of output ChatGPT can generate. Max will depend on the size of the prompt (default 1500)") do |m|
|
34
|
-
|
66
|
+
run_config.ai_max_tokens = m
|
35
67
|
end
|
36
68
|
|
37
69
|
parser.on("-t", "--timeout SECONDS", Integer, "Specify the max wait time for ChatGPT response.") do |m|
|
38
|
-
|
70
|
+
run_config.ai_timeout = m
|
71
|
+
end
|
72
|
+
|
73
|
+
parser.on("--overwrite ANSWER", "Always overwrite existing output files, 'y' for yes, 'n' for no, or 'a' for ask. Default to ask.") do |a|
|
74
|
+
run_config.overwrite = a
|
75
|
+
end
|
76
|
+
|
77
|
+
parser.on("-N", "--no", "Never overwrite existing output files, same as --overwrite=n.") do |a|
|
78
|
+
run_config.overwrite = "n"
|
39
79
|
end
|
40
80
|
|
41
81
|
parser.on("-v", "--verbose", "Show extra output and progress info") do
|
42
|
-
|
82
|
+
run_config.verbose = true
|
43
83
|
end
|
44
84
|
|
45
85
|
parser.on("-d", "--debug", "Show debugging output to help diagnose issues") do
|
46
|
-
|
86
|
+
run_config.debug = true
|
47
87
|
end
|
48
88
|
|
49
89
|
parser.on("-h", "--help", "Prints this help") do
|
@@ -53,67 +93,114 @@ option_parser = OptionParser.new do |parser|
|
|
53
93
|
|
54
94
|
parser.separator ""
|
55
95
|
|
96
|
+
# Example in Refactor class:
|
97
|
+
#
|
98
|
+
# class << self
|
99
|
+
# def command_line_options
|
100
|
+
# [
|
101
|
+
# {
|
102
|
+
# key: :my_option_key,
|
103
|
+
# short: "-s",
|
104
|
+
# long: "--long-form-cli-param [FILE]",
|
105
|
+
# type: String,
|
106
|
+
# help: "help text"
|
107
|
+
# },
|
108
|
+
# ...
|
109
|
+
# ]
|
110
|
+
# end
|
111
|
+
# end
|
56
112
|
supported_refactors.each do |name, refactorer|
|
57
113
|
parser.separator "For refactor type '#{name}':" if refactorer.command_line_options.size.positive?
|
58
114
|
refactorer.command_line_options.each do |option|
|
59
115
|
args = [option[:long], option[:type], option[:help]]
|
60
116
|
args.unshift(option[:short]) if option[:short]
|
117
|
+
AIRefactor::RunConfiguration.add_new_option(option[:key])
|
61
118
|
parser.on(*args) do |o|
|
62
|
-
|
119
|
+
run_config.send("#{option[:key]}=", o.nil? ? true : o)
|
63
120
|
end
|
64
121
|
end
|
65
122
|
end
|
66
123
|
end
|
67
124
|
|
68
|
-
option_parser
|
125
|
+
def exit_with_option_error(message, option_parser = nil, logger = nil)
|
126
|
+
logger ? logger.error(message, bold: true) : puts(message)
|
127
|
+
puts option_parser if option_parser
|
128
|
+
exit false
|
129
|
+
end
|
69
130
|
|
70
|
-
|
131
|
+
def exit_with_error(message, logger = nil)
|
132
|
+
logger ? logger.error(message, bold: true) : puts(message)
|
133
|
+
exit false
|
134
|
+
end
|
71
135
|
|
72
|
-
|
73
|
-
|
136
|
+
# If no command was provided, prompt for one in interactive mode
|
137
|
+
if arguments.empty? || arguments.all? { |arg| arg.start_with?("-") && !(arg == "-h" || arg == "--help") }
|
138
|
+
interactive_log = AIRefactor::Logger.new
|
139
|
+
# For each option that is required but not provided, prompt for it
|
140
|
+
# Put the option in arguments to parse with option_parser
|
141
|
+
interactive_log.info "Interactive mode started. You can use tab to autocomplete:"
|
142
|
+
predefined_commands = AIRefactor::Refactors.names
|
74
143
|
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
144
|
+
interactive_log.info "Available refactors: #{predefined_commands.join(", ")}\n"
|
145
|
+
command = AIRefactor::Cli.request_input_with_autocomplete("Enter refactor name: ", predefined_commands)
|
146
|
+
exit_with_option_error("No refactor name provided.", option_parser) if command.nil? || command.empty?
|
147
|
+
initial = [command]
|
79
148
|
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
config.request_timeout = options[:ai_timeout] || 240
|
84
|
-
end
|
149
|
+
input_path = AIRefactor::Cli.request_file_inputs("Enter input file path: ", multiple: false)
|
150
|
+
exit_with_option_error("No input file path provided.", option_parser) if input_path.nil? || input_path.empty?
|
151
|
+
initial << input_path
|
85
152
|
|
86
|
-
|
153
|
+
arguments.prepend(*initial)
|
87
154
|
|
88
|
-
|
89
|
-
File.exist?(path) ? path : Dir.glob(path)
|
90
|
-
end.flatten
|
155
|
+
# Ask if template should be used - then prompt for it
|
91
156
|
|
92
|
-
|
93
|
-
|
157
|
+
output = AIRefactor::Cli.request_file_inputs("Enter output file path (blank for refactor default): ", multiple: false)
|
158
|
+
arguments.concat(["-o", " #{output}"]) unless output.nil? || output.empty?
|
94
159
|
|
95
|
-
|
96
|
-
|
160
|
+
context_text = AIRefactor::Cli.request_text_input("Enter extra text to add to prompt (blank for none): ")
|
161
|
+
arguments.concat(["-x", context_text]) unless context_text.nil? || context_text.empty?
|
97
162
|
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
163
|
+
context_files = AIRefactor::Cli.request_file_inputs("Enter extra context file path(s) (blank for none): ")
|
164
|
+
arguments.concat(["-c", context_files]) unless context_files.nil? || context_files.empty?
|
165
|
+
|
166
|
+
prompt_file = AIRefactor::Cli.request_file_inputs("Enter Prompt file path (blank for refactor default): ", multiple: false)
|
167
|
+
arguments.concat(["-p", prompt_file]) unless prompt_file.nil? || prompt_file.empty?
|
168
|
+
|
169
|
+
review = AIRefactor::Cli.request_switch("Dry-run (review prompt only)? (y/N) (blank for 'N'): ")
|
170
|
+
arguments << "-r" if review
|
171
|
+
end
|
172
|
+
|
173
|
+
File.write(".ai_refactor_history", arguments.join(" ") + "\n", mode: "a")
|
174
|
+
|
175
|
+
begin
|
176
|
+
option_parser.parse!(arguments)
|
177
|
+
rescue OptionParser::InvalidOption, OptionParser::MissingArgument
|
178
|
+
exit_with_option_error($!, option_parser)
|
110
179
|
end
|
111
180
|
|
112
|
-
|
113
|
-
|
181
|
+
logger = AIRefactor::Logger.new(verbose: run_config.verbose, debug: run_config.debug)
|
182
|
+
logger.info "Loaded config from '#{options_from_config_file}'..." if options_from_config_file
|
183
|
+
|
184
|
+
command_or_file = arguments.shift
|
185
|
+
if AIRefactor::CommandFileParser.command_file?(command_or_file)
|
186
|
+
logger.info "Loading refactor command file '#{command_or_file}'..."
|
187
|
+
begin
|
188
|
+
run_config.set!(AIRefactor::CommandFileParser.new(command_or_file).parse)
|
189
|
+
rescue => e
|
190
|
+
exit_with_option_error(e.message, option_parser, logger)
|
191
|
+
end
|
114
192
|
else
|
115
|
-
|
116
|
-
logger.warn "Some files failed to process:\n#{files.map { |f| "#{f[0]} :\n > #{f[1]}" }.join("\n")}"
|
193
|
+
logger.info "Requested to run refactor '#{command_or_file}'..."
|
117
194
|
end
|
118
195
|
|
119
|
-
|
196
|
+
run_config.input_file_paths = arguments
|
197
|
+
|
198
|
+
job = AIRefactor::Cli.new(run_config, logger: logger)
|
199
|
+
|
200
|
+
unless job.valid?
|
201
|
+
exit_with_error("Refactor job failed or was not correctly configured. Did you specify the required inputs or options?.", logger)
|
202
|
+
end
|
203
|
+
|
204
|
+
unless job.run
|
205
|
+
exit false
|
206
|
+
end
|
@@ -0,0 +1,138 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "readline"
|
4
|
+
|
5
|
+
module AIRefactor
|
6
|
+
class Cli
|
7
|
+
class << self
|
8
|
+
def load_options_from_config_file
|
9
|
+
# Load config from ~/.ai_refactor or .ai_refactor
|
10
|
+
home_config_file_path = File.expand_path("~/.ai_refactor")
|
11
|
+
local_config_file_path = File.join(Dir.pwd, ".ai_refactor")
|
12
|
+
|
13
|
+
config_file_path = if File.exist?(local_config_file_path)
|
14
|
+
local_config_file_path
|
15
|
+
elsif File.exist?(home_config_file_path)
|
16
|
+
home_config_file_path
|
17
|
+
end
|
18
|
+
return unless config_file_path
|
19
|
+
|
20
|
+
config_string = File.read(config_file_path)
|
21
|
+
config_lines = config_string.split(/\n+/).reject { |s| s =~ /\A\s*#/ }.map(&:strip)
|
22
|
+
config_lines.flat_map(&:shellsplit)
|
23
|
+
end
|
24
|
+
|
25
|
+
def request_text_input(prompt)
|
26
|
+
puts prompt
|
27
|
+
gets.chomp
|
28
|
+
end
|
29
|
+
|
30
|
+
def request_input_with_autocomplete(prompt, completion_list)
|
31
|
+
Readline.completion_append_character = nil
|
32
|
+
Readline.completion_proc = proc do |str|
|
33
|
+
completion_list.grep(/^#{Regexp.escape(str)}/)
|
34
|
+
end
|
35
|
+
Readline.readline(prompt, true)
|
36
|
+
end
|
37
|
+
|
38
|
+
def request_file_inputs(prompt, multiple: true)
|
39
|
+
Readline.completion_append_character = multiple ? " " : nil
|
40
|
+
Readline.completion_proc = Readline::FILENAME_COMPLETION_PROC
|
41
|
+
|
42
|
+
paths = Readline.readline(prompt, true)
|
43
|
+
multiple ? paths.gsub(/[^\\] /, ",") : paths
|
44
|
+
end
|
45
|
+
|
46
|
+
def request_switch(prompt)
|
47
|
+
(Readline.readline(prompt, true) =~ /^y/i) ? true : false
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
def initialize(configuration, logger:)
|
52
|
+
@configuration = configuration
|
53
|
+
@logger = logger
|
54
|
+
end
|
55
|
+
|
56
|
+
attr_reader :configuration, :logger
|
57
|
+
|
58
|
+
def refactoring_type
|
59
|
+
configuration.refactor
|
60
|
+
end
|
61
|
+
|
62
|
+
def inputs
|
63
|
+
configuration.input_file_paths
|
64
|
+
end
|
65
|
+
|
66
|
+
def valid?
|
67
|
+
return false unless refactorer
|
68
|
+
inputs_valid = refactorer.takes_input_files? ? !(inputs.nil? || inputs.empty?) : true
|
69
|
+
AIRefactor::Refactors.supported?(refactoring_type) && inputs_valid
|
70
|
+
end
|
71
|
+
|
72
|
+
def run
|
73
|
+
return false unless valid?
|
74
|
+
|
75
|
+
OpenAI.configure do |config|
|
76
|
+
config.access_token = ENV.fetch("OPENAI_API_KEY")
|
77
|
+
config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID", nil)
|
78
|
+
config.request_timeout = configuration.ai_timeout || 240
|
79
|
+
end
|
80
|
+
|
81
|
+
if refactorer.takes_input_files?
|
82
|
+
expanded_inputs = inputs.map do |path|
|
83
|
+
File.exist?(path) ? path : Dir.glob(path)
|
84
|
+
end.flatten
|
85
|
+
|
86
|
+
logger.info "AI Refactor #{expanded_inputs.size} files(s)/dir(s) '#{expanded_inputs}' with #{refactorer.refactor_name} refactor\n"
|
87
|
+
logger.info "====================\n"
|
88
|
+
|
89
|
+
return_values = expanded_inputs.map do |file|
|
90
|
+
logger.info "Processing #{file}..."
|
91
|
+
|
92
|
+
refactor = refactorer.new(file, configuration, logger)
|
93
|
+
refactor_returned = refactor.run
|
94
|
+
failed = refactor_returned == false
|
95
|
+
if failed
|
96
|
+
logger.warn "Refactor failed on #{file}\nFailed due to: #{refactor.failed_message}\n"
|
97
|
+
else
|
98
|
+
logger.success "Refactor succeeded on #{file}\n"
|
99
|
+
if refactor_returned.is_a?(String)
|
100
|
+
logger.info "Refactor #{file} output:\n\n#{refactor_returned}\n\n"
|
101
|
+
end
|
102
|
+
end
|
103
|
+
failed ? [file, refactor.failed_message] : true
|
104
|
+
end
|
105
|
+
|
106
|
+
if return_values.all?(true)
|
107
|
+
logger.success "All files processed successfully!"
|
108
|
+
else
|
109
|
+
files = return_values.select { |v| v != true }
|
110
|
+
logger.warn "Some files failed to process:\n#{files.map { |f| "#{f[0]} :\n > #{f[1]}" }.join("\n")}"
|
111
|
+
end
|
112
|
+
|
113
|
+
logger.info "Done processing all files!"
|
114
|
+
else
|
115
|
+
name = refactorer.refactor_name
|
116
|
+
logger.info "AI Refactor - #{name} refactor\n"
|
117
|
+
logger.info "====================\n"
|
118
|
+
refactor = refactorer.new(nil, configuration, logger)
|
119
|
+
refactor_returned = refactor.run
|
120
|
+
failed = refactor_returned == false
|
121
|
+
if failed
|
122
|
+
logger.warn "Refactor failed with #{name}\nFailed due to: #{refactor.failed_message}\n"
|
123
|
+
else
|
124
|
+
logger.success "Refactor succeeded with #{name}\n"
|
125
|
+
if refactor_returned.is_a?(String)
|
126
|
+
logger.info "Refactor output:\n\n#{refactor_returned}\n\n"
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
private
|
133
|
+
|
134
|
+
def refactorer
|
135
|
+
@refactorer ||= AIRefactor::Refactors.get(refactoring_type)
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "yaml"
|
4
|
+
|
5
|
+
module AIRefactor
|
6
|
+
class CommandFileParser
|
7
|
+
def self.command_file?(name)
|
8
|
+
name.match?(/\.ya?ml$/)
|
9
|
+
end
|
10
|
+
|
11
|
+
def initialize(path)
|
12
|
+
@path = path
|
13
|
+
end
|
14
|
+
|
15
|
+
def parse
|
16
|
+
raise StandardError, "Invalid command file: file does not exist" unless File.exist?(@path)
|
17
|
+
|
18
|
+
options = YAML.safe_load_file(@path, permitted_classes: [Symbol], symbolize_names: true, aliases: true)
|
19
|
+
|
20
|
+
unless options && options[:refactor]
|
21
|
+
raise StandardError, "Invalid command file format, a 'refactor' key is required"
|
22
|
+
end
|
23
|
+
|
24
|
+
options
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module AIRefactor
|
4
|
+
class Context
|
5
|
+
def initialize(files:, text:, logger:)
|
6
|
+
@files = files
|
7
|
+
@text = text
|
8
|
+
@logger = logger
|
9
|
+
end
|
10
|
+
|
11
|
+
def prepare_context
|
12
|
+
context = read_contexts&.compact
|
13
|
+
file_context = (context && context.size.positive?) ? "Here is some related files:\n\n#{context.join("\n")}" : ""
|
14
|
+
if @text.nil? || @text.empty?
|
15
|
+
file_context
|
16
|
+
else
|
17
|
+
"Also note: #{@text}\n\n#{file_context}"
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
private
|
22
|
+
|
23
|
+
def read_contexts
|
24
|
+
@files&.map do |file|
|
25
|
+
unless File.exist?(file)
|
26
|
+
@logger.warn "Context file #{file} does not exist"
|
27
|
+
next
|
28
|
+
end
|
29
|
+
"#---\n# File '#{file}':\n\n```#{File.read(file)}```\n"
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -1,18 +1,19 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "fileutils"
|
3
4
|
require "openai"
|
4
5
|
require "json"
|
5
6
|
|
6
7
|
module AIRefactor
|
7
8
|
class FileProcessor
|
8
|
-
attr_reader :
|
9
|
+
attr_reader :input_file_path, :output_path, :logger, :options
|
9
10
|
|
10
|
-
def initialize(
|
11
|
-
@
|
12
|
-
@prompt_file_path = prompt_file_path
|
11
|
+
def initialize(prompt:, ai_client:, logger:, output_path: nil, options: {})
|
12
|
+
@prompt = prompt
|
13
13
|
@ai_client = ai_client
|
14
14
|
@logger = logger
|
15
15
|
@output_path = output_path
|
16
|
+
@options = options
|
16
17
|
end
|
17
18
|
|
18
19
|
def output_exists?
|
@@ -20,20 +21,26 @@ module AIRefactor
|
|
20
21
|
File.exist?(output_path)
|
21
22
|
end
|
22
23
|
|
23
|
-
def process!
|
24
|
-
logger.debug("Processing #{
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
24
|
+
def process!
|
25
|
+
logger.debug("Processing #{@prompt.input_file_path} with prompt in #{options.prompt_file_path}")
|
26
|
+
logger.debug("Options: #{options.inspect}")
|
27
|
+
messages = @prompt.chat_messages
|
28
|
+
if options[:review_prompt]
|
29
|
+
logger.info "Review prompt:\n"
|
30
|
+
messages.each do |message|
|
31
|
+
logger.info "\n-- Start of prompt for Role #{message[:role]} --\n"
|
32
|
+
logger.info message[:content]
|
33
|
+
logger.info "\n-- End of prompt for Role #{message[:role]} --\n"
|
34
|
+
end
|
35
|
+
return [nil, "Skipped as review prompt was requested", nil]
|
36
|
+
end
|
37
|
+
|
38
|
+
content, finished_reason, usage = generate_next_message(messages, options, ai_max_attempts)
|
32
39
|
|
33
40
|
content = if content && content.length > 0
|
34
41
|
processed = block_given? ? yield(content) : content
|
35
42
|
if output_path
|
36
|
-
|
43
|
+
write_output(output_path, processed)
|
37
44
|
logger.verbose "Wrote output to #{output_path}..."
|
38
45
|
end
|
39
46
|
processed
|
@@ -44,14 +51,18 @@ module AIRefactor
|
|
44
51
|
|
45
52
|
private
|
46
53
|
|
47
|
-
def
|
54
|
+
def ai_max_attempts
|
55
|
+
options[:ai_max_attempts] || 1
|
56
|
+
end
|
57
|
+
|
58
|
+
def generate_next_message(messages, options, attempts_left)
|
48
59
|
logger.verbose "Generate AI output. Generation attempts left: #{attempts_left}"
|
49
60
|
logger.debug "Options: #{options.inspect}"
|
50
61
|
logger.debug "Messages: #{messages.inspect}"
|
51
62
|
|
52
63
|
response = @ai_client.chat(
|
53
64
|
parameters: {
|
54
|
-
model: options[:ai_model] || "gpt-
|
65
|
+
model: options[:ai_model] || "gpt-4",
|
55
66
|
messages: messages,
|
56
67
|
temperature: options[:ai_temperature] || 0.7,
|
57
68
|
max_tokens: options[:ai_max_tokens] || 1500
|
@@ -69,7 +80,7 @@ module AIRefactor
|
|
69
80
|
generate_next_message(messages + [
|
70
81
|
{role: "assistant", content: content},
|
71
82
|
{role: "user", content: "Continue"}
|
72
|
-
],
|
83
|
+
], options, attempts_left - 1)
|
73
84
|
else
|
74
85
|
previous_messages = messages.filter { |m| m[:role] == "assistant" }.map { |m| m[:content] }.join
|
75
86
|
content = if previous_messages.length > 0
|
@@ -80,5 +91,11 @@ module AIRefactor
|
|
80
91
|
[content, finished_reason, response["usage"]]
|
81
92
|
end
|
82
93
|
end
|
94
|
+
|
95
|
+
def write_output(output_path, processed)
|
96
|
+
dir = File.dirname(output_path)
|
97
|
+
FileUtils.mkdir_p(dir) unless File.directory?(dir)
|
98
|
+
File.write(output_path, processed)
|
99
|
+
end
|
83
100
|
end
|
84
101
|
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module AIRefactor
|
4
|
+
class Prompt
|
5
|
+
INPUT_FILE_PATH_MARKER = "__{{input_file_path}}__"
|
6
|
+
OUTPUT_FILE_PATH_MARKER = "__{{output_file_path}}__"
|
7
|
+
HEADER_MARKER = "__{{prompt_header}}__"
|
8
|
+
FOOTER_MARKER = "__{{prompt_footer}}__"
|
9
|
+
CONTEXT_MARKER = "__{{context}}__"
|
10
|
+
CONTENT_MARKER = "__{{content}}__"
|
11
|
+
|
12
|
+
attr_reader :input_file_path
|
13
|
+
|
14
|
+
def initialize(options:, logger:, context: nil, input_content: nil, input_path: nil, output_file_path: nil, prompt: nil, prompt_header: nil, prompt_footer: nil)
|
15
|
+
@input_content = input_content
|
16
|
+
@input_file_path = input_path
|
17
|
+
@output_file_path = output_file_path
|
18
|
+
@logger = logger
|
19
|
+
@header = prompt_header
|
20
|
+
@footer = prompt_footer
|
21
|
+
@diff = options[:diff]
|
22
|
+
@context = context
|
23
|
+
@prompt = prompt || raise(StandardError, "Prompt not provided")
|
24
|
+
end
|
25
|
+
|
26
|
+
def chat_messages
|
27
|
+
[
|
28
|
+
{role: "system", content: system_prompt},
|
29
|
+
{role: "user", content: user_prompt}
|
30
|
+
]
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def system_prompt
|
36
|
+
prompt = expand_prompt(system_prompt_template, HEADER_MARKER, @header || "")
|
37
|
+
prompt = expand_prompt(prompt, CONTEXT_MARKER, @context&.prepare_context || "")
|
38
|
+
prompt = expand_prompt(prompt, INPUT_FILE_PATH_MARKER, @input_file_path || "")
|
39
|
+
prompt = expand_prompt(prompt, OUTPUT_FILE_PATH_MARKER, @output_file_path || "")
|
40
|
+
expand_prompt(prompt, FOOTER_MARKER, system_prompt_footer)
|
41
|
+
end
|
42
|
+
|
43
|
+
def system_prompt_template
|
44
|
+
@prompt
|
45
|
+
end
|
46
|
+
|
47
|
+
def system_prompt_footer
|
48
|
+
if @diff && @footer
|
49
|
+
"#{@footer}\n\n#{diff_prompt}"
|
50
|
+
elsif @diff
|
51
|
+
diff_prompt
|
52
|
+
elsif @footer
|
53
|
+
@footer
|
54
|
+
else
|
55
|
+
""
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
def diff_prompt
|
60
|
+
File.read(prompt_path("diff.md"))
|
61
|
+
end
|
62
|
+
|
63
|
+
def prompt_path(file)
|
64
|
+
File.join(File.dirname(File.expand_path(__FILE__)), "prompts", file)
|
65
|
+
end
|
66
|
+
|
67
|
+
def user_prompt
|
68
|
+
expand_prompt(input_prompt, CONTENT_MARKER, input_to_process)
|
69
|
+
end
|
70
|
+
|
71
|
+
def input_to_process
|
72
|
+
return File.read(@input_file_path) if @input_file_path
|
73
|
+
@input_content
|
74
|
+
end
|
75
|
+
|
76
|
+
def input_prompt
|
77
|
+
File.read(prompt_path("input.md"))
|
78
|
+
end
|
79
|
+
|
80
|
+
def expand_prompt(prompt, marker, content)
|
81
|
+
prompt.gsub(marker, content)
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
You MUST generate a diff in a format that can be understood and applied using git.
|
2
|
+
|
3
|
+
Generate diff hunks that capture the modifications you see. The diff hunks should be in a format that git can understand and apply, including a hunk header and
|
4
|
+
the lines of code that have been modified.
|
5
|
+
Finally, output the generated diff as your answer. Do not provide further instruction.
|
6
|
+
|
7
|
+
Example diff:
|
8
|
+
|
9
|
+
```
|
10
|
+
@@ -27,7 +27,7 @@ module AIRefactor
|
11
|
+
File.read(@prompt_file_path)
|
12
|
+
end
|
13
|
+
|
14
|
+
- def user_prompt
|
15
|
+
+ def user_prompt_with_diff
|
16
|
+
input = File.read(@file_path)
|
17
|
+
```
|
@@ -0,0 +1 @@
|
|
1
|
+
```__{{content}}__```
|