ai_refactor 0.3.0 → 0.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (35) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +55 -2
  3. data/Gemfile +2 -0
  4. data/Gemfile.lock +5 -1
  5. data/README.md +68 -24
  6. data/Rakefile +1 -1
  7. data/ai_refactor.gemspec +1 -0
  8. data/exe/ai_refactor +78 -44
  9. data/lib/ai_refactor/cli.rb +86 -0
  10. data/lib/ai_refactor/context.rb +33 -0
  11. data/lib/ai_refactor/file_processor.rb +34 -17
  12. data/lib/ai_refactor/prompt.rb +84 -0
  13. data/lib/ai_refactor/prompts/diff.md +17 -0
  14. data/lib/ai_refactor/prompts/input.md +1 -0
  15. data/lib/ai_refactor/refactors/base_refactor.rb +176 -0
  16. data/lib/ai_refactor/refactors/generic.rb +6 -80
  17. data/lib/ai_refactor/refactors/minitest/write_test_for_class.md +11 -0
  18. data/lib/ai_refactor/refactors/minitest/write_test_for_class.rb +51 -0
  19. data/lib/ai_refactor/refactors/project/write_changelog_from_history.md +35 -0
  20. data/lib/ai_refactor/refactors/project/write_changelog_from_history.rb +50 -0
  21. data/lib/ai_refactor/refactors/{prompts/rspec_to_minitest_rails.md → rails/minitest/rspec_to_minitest.md} +40 -1
  22. data/lib/ai_refactor/refactors/rails/minitest/rspec_to_minitest.rb +77 -0
  23. data/lib/ai_refactor/refactors/rspec/minitest_to_rspec.rb +13 -0
  24. data/lib/ai_refactor/refactors.rb +13 -5
  25. data/lib/ai_refactor/{refactors/tests → test_runners}/minitest_runner.rb +1 -1
  26. data/lib/ai_refactor/{refactors/tests → test_runners}/rspec_runner.rb +1 -1
  27. data/lib/ai_refactor/{refactors/tests → test_runners}/test_run_diff_report.rb +1 -1
  28. data/lib/ai_refactor/{refactors/tests → test_runners}/test_run_result.rb +1 -1
  29. data/lib/ai_refactor/version.rb +1 -1
  30. data/lib/ai_refactor.rb +13 -8
  31. metadata +34 -11
  32. data/lib/ai_refactor/base_refactor.rb +0 -70
  33. data/lib/ai_refactor/refactors/minitest_to_rspec.rb +0 -11
  34. data/lib/ai_refactor/refactors/rspec_to_minitest_rails.rb +0 -103
  35. /data/lib/ai_refactor/refactors/{prompts → rspec}/minitest_to_rspec.md +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: fa9421b9791dda3c02b930e63d0fa7bd3dc587d121a0b3748a0f9d8c649f98a3
4
- data.tar.gz: a5cb385968939847ebb1ceb1d663b1926e410fce0571fa2ab7463440347c0f50
3
+ metadata.gz: 725fb1da5fc311d3740687d97fa3b6c3c4292a3b531f467ea1d8ff9e734608bc
4
+ data.tar.gz: 4c16d310c643ae1158442816a9f863961ae151bb4ae486c965e96340342e2d44
5
5
  SHA512:
6
- metadata.gz: 6ff997f0533fe95da7f0361789d88b4a73ecd8fbd82a1f54179fbce3b6a3b466c40330ae615a00f850e549212fe2463ba60ca4467cc0d9f65bbd6679f326dff1
7
- data.tar.gz: 5898b17ebaa96c24a89cae1ff741a53d3afbb7a6c4844f52a271ddfecc1ebc1923026324601cb98e56b5aed0d673bba5d583d388a7a936e4cc86b272e1858bd8
6
+ metadata.gz: d0f380ef54a29be017b23c2caaa4491ee6304dc410e52ab6e11038b9f30c30a09a04597f3342332f464007e0e932f5b81112f712d46c584b2728d51bee61c036
7
+ data.tar.gz: 9ffa29857815cb73daef7d6902bf7301dc80e36361b0451d3d3df1b7921b1c0978ef16ea7963c544b893db1bd0d6db34120378aa6a7ae4ac5add0831e4b61dbf
data/CHANGELOG.md CHANGED
@@ -1,5 +1,58 @@
1
- ## [Unreleased]
1
+ # AI Refactor Changelog
2
+
3
+ ## [0.4.0] - 2023-08-15
4
+
5
+ ### Added
6
+
7
+ - Support for providing files as context for prompt.
8
+ - Output path configuration made available to all refactors.
9
+ - New refactor to write minitest tests for classes
10
+ - New refactor to write changelog entries.
11
+ - 'review' prompt CLI switch without invoking AI.
12
+ - CLI switch to control output overwrite behaviour.
13
+ - Extra text context for prompts via command line with -x
14
+ - Support for .ai_refactor config file which can provide default options/switches for prompts.
15
+
16
+ ### Changed
17
+
18
+ - Moved to using zeitwerk for loading and dotenv.
19
+ - Simple registry for refactors and change in naming convention.
20
+ - Updated diff prompt option and fixes for new structure.
21
+ - Reorganised refactors.
22
+ - Tweaked rspec to minitest prompt.
23
+ - Fixed check for custom prompt path.
24
+ - Updated docs.
25
+
26
+ ### Fixed
27
+
28
+ - Fixed reference to built in prompt paths.
29
+
30
+ ## [0.3.1] - 2023-05-25
31
+
32
+ ### Added
33
+
34
+ - Added support for outputting to file from generic refactor.
35
+
36
+ ## [0.2.0] - 2023-05-24
37
+
38
+ ### Added
39
+
40
+ - Introduced a generic refactor type which uses the user supplied prompt and outputs to stdout.
41
+ - Added support for outputting to file from generic refactor.
42
+ - Added a prompt for rspec_to_minitest.
43
+
44
+ ### Fixed
45
+
46
+ - Fixed example.
2
47
 
3
48
  ## [0.1.0] - 2023-05-19
4
49
 
5
- - Initial release
50
+ ### Added
51
+
52
+ - First version of CLI.
53
+
54
+ ### Changed
55
+
56
+ - Gem dependencies are not open-ended.
57
+ - Renamed to clean up intention.
58
+ - Updated docs.
data/Gemfile CHANGED
@@ -10,3 +10,5 @@ gem "rake", "~> 13.0"
10
10
  gem "minitest", "~> 5.0"
11
11
 
12
12
  gem "standard", "~> 1.3"
13
+
14
+ gem "dotenv"
data/Gemfile.lock CHANGED
@@ -1,16 +1,18 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- ai_refactor (0.3.0)
4
+ ai_refactor (0.4.0)
5
5
  colorize (< 2.0)
6
6
  open3 (< 2.0)
7
7
  ruby-openai (>= 3.4.0, < 5.0)
8
+ zeitwerk (~> 2.6)
8
9
 
9
10
  GEM
10
11
  remote: https://rubygems.org/
11
12
  specs:
12
13
  ast (2.4.2)
13
14
  colorize (0.8.1)
15
+ dotenv (2.8.1)
14
16
  faraday (2.7.4)
15
17
  faraday-net_http (>= 2.0, < 3.1)
16
18
  ruby2_keywords (>= 0.0.4)
@@ -62,12 +64,14 @@ GEM
62
64
  lint_roller (~> 1.0)
63
65
  rubocop-performance (~> 1.16.0)
64
66
  unicode-display_width (2.4.2)
67
+ zeitwerk (2.6.8)
65
68
 
66
69
  PLATFORMS
67
70
  arm64-darwin-22
68
71
 
69
72
  DEPENDENCIES
70
73
  ai_refactor!
74
+ dotenv
71
75
  minitest (~> 5.0)
72
76
  rake (~> 13.0)
73
77
  standard (~> 1.3)
data/README.md CHANGED
@@ -1,20 +1,24 @@
1
- # AI Refactor
1
+ # AI Refactor for Ruby
2
2
 
3
- AI Refactor is an experimental tool to see how AI (specifically [OpenAI's ChatGPT](https://platform.openai.com/)) can be used to help apply refactoring to code.
3
+ AI Refactor is an experimental tool to use AI to help apply refactoring to code.
4
4
 
5
- The goal is **not** that the AI decides what refactoring to do, but rather, given refactoring tasks specified by the human user,
6
- the AI can help identify which code to change and apply the relevant refactor.
5
+ __The goal for AI Refactor is to help apply repetitive refactoring tasks, not to replace human mind that decides what refactoring is needed.__
7
6
 
8
- This is based on the assumption that the LLM AIs are pretty good at identifying patterns.
7
+ AI Refactor currently uses [OpenAI's ChatGPT](https://platform.openai.com/).
8
+
9
+ The tool lets the human user prompt the AI with explicit refactoring tasks, and can be run on one or more files at a time.
10
+ The tool then uses a LLM to apply the relevant refactor, and if appropriate, checks results by running tests and comparing output.
11
+
12
+ The focus of the tool is work with the Ruby programming language ecosystem, but it can be used with any language.
9
13
 
10
14
  ## Available refactors
11
15
 
12
16
  Currently available:
13
17
 
14
- - `generic`
15
- - `rspec_to_minitest_rails`
18
+ - `rails/minitest/rspec_to_minitest`: convert RSpec specs to minitest tests in Rails apps
19
+ - `generic`: provide your own prompt for the AI and run against the input files
16
20
 
17
- ### `rspec_to_minitest_rails`
21
+ ### `rails/minitest/rspec_to_minitest`
18
22
 
19
23
  Converts RSpec tests to minitest tests for Rails test suites (ie generated minitest tests are actually `ActiveSupport::TestCase`s).
20
24
 
@@ -23,20 +27,17 @@ The tool first runs the original RSpec spec file and then runs the generated min
23
27
  The comparison is simply the count of successful and failed tests but this is probably enough to determine if the conversion worked.
24
28
 
25
29
  ```shellq
26
- stephen$ OPENAI_API_KEY=my-key ai_refactor rspec_to_minitest_rails spec/models/my_thing_spec.rb -v
27
- AI Refactor 1 files(s)/dir(s) '["spec/models/my_thing_spec.rb"]' with rspec_to_minitest_rails refactor
30
+ stephen$ OPENAI_API_KEY=my-key ai_refactor rails/minitest/rspec_to_minitest spec/models/my_thing_spec.rb
31
+ AI Refactor 1 files(s)/dir(s) '["spec/models/my_thing_spec.rb"]' with rails/minitest/rspec_to_minitest refactor
28
32
  ====================
29
33
  Processing spec/models/my_thing_spec.rb...
30
- [Run spec spec/models/my_thing_spec.rb... (bundle exec rspec spec/models/my_thing_spec.rb)]
31
- Do you wish to overwrite test/models/my_thing_test.rb? (y/n)
32
- y
33
- [Converting spec/models/my_thing_spec.rb...]
34
- [Generate AI output. Generation attempts left: 3]
35
- [OpenAI finished, with reason 'stop'...]
36
- [Used tokens: 1869]
37
- [Converted spec/models/my_thing_spec.rb to test/models/my_thing_test.rb...]
38
- [Run generated test file test/models/my_thing_test.rb (bundle exec rails test test/models/my_thing_test.rb)...]
39
- [Done converting spec/models/my_thing_spec.rb to test/models/my_thing_test.rb...]
34
+
35
+ Original test run results:
36
+ >> Examples: 41, Failures: 0, Pendings: 0
37
+
38
+ Translated test file results:
39
+ >> Runs: 41, Failures: 0, Skips: 0
40
+
40
41
  No differences found! Conversion worked!
41
42
  Refactor succeeded on spec/models/my_thing_spec.rb
42
43
 
@@ -47,7 +48,17 @@ Done processing all files!
47
48
 
48
49
  Applies the refactor specified by prompting the AI with the user supplied prompt. You must supply a prompt file with the `-p` option.
49
50
 
50
- The output is written to `stdout`.
51
+ The output is written to `stdout`, or to a file with the `--output` option.
52
+
53
+ ### `minitest/write_test_for_class`
54
+
55
+ Writes a minitest test for a given class. The output will, by default, be put into a directory named `test` in the current directory,
56
+ in a path that matches the input file path, with a `_test.rb` suffix.
57
+
58
+ For example, if the input file is `app/stuff/my_thing.rb` the output will be written to `test/app/stuff/my_thing_test.rb`.
59
+
60
+ This refactor can benefit from being passed related files as context, for example, if the class under test inherits from another class,
61
+ then context can be used to provide the parent class.
51
62
 
52
63
  ## Installation
53
64
 
@@ -66,19 +77,52 @@ See `ai_refactor --help` for more information.
66
77
  ```
67
78
  Usage: ai_refactor REFACTOR_TYPE INPUT_FILE_OR_DIR [options]
68
79
 
69
- Where REFACTOR_TYPE is one of: ["generic", "rspec_to_minitest_rails", "minitest_to_rspec"]
80
+ Where REFACTOR_TYPE is one of: ["generic" ... (run ai_refactor --help for full list of refactor types)]
70
81
 
82
+ -o, --output [FILE] Write output to given file instead of stdout. If no path provided will overwrite input file (will prompt to overwrite existing files). Some refactor tasks will write out to a new file by default. This option will override the tasks default behaviour.
83
+ -O, --output-template TEMPLATE Write outputs to files instead of stdout. The template is used to create the output name, where the it can have substitutions, '[FILE]', '[NAME]', '[DIR]', '[REFACTOR]' & '[EXT]'. Eg `[DIR]/[NAME]_[REFACTOR][EXT]` (will prompt to overwrite existing files)
84
+ -c, --context CONTEXT_FILES Specify one or more files to use as context for the AI. The contents of these files will be prepended to the prompt sent to the AI.
85
+ -r, --review-prompt Show the prompt that will be sent to ChatGPT but do not actually call ChatGPT or make changes to files.
71
86
  -p, --prompt PROMPT_FILE Specify path to a text file that contains the ChatGPT 'system' prompt.
72
- -c, --continue [MAX_MESSAGES] If ChatGPT stops generating due to the maximum token count being reached, continue to generate more messages, until a stop condition or MAX_MESSAGES. MAX_MESSAGES defaults to 3
73
- -m, --model MODEL_NAME Specify a ChatGPT model to use (default gpt-3.5-turbo).
87
+ -f, --diffs Request AI generate diffs of changes rather than writing out the whole file.
88
+ -C, --continue [MAX_MESSAGES] If ChatGPT stops generating due to the maximum token count being reached, continue to generate more messages, until a stop condition or MAX_MESSAGES. MAX_MESSAGES defaults to 3
89
+ -m, --model MODEL_NAME Specify a ChatGPT model to use (default gpt-4).
74
90
  --temperature TEMP Specify the temperature parameter for ChatGPT (default 0.7).
75
91
  --max-tokens MAX_TOKENS Specify the max number of tokens of output ChatGPT can generate. Max will depend on the size of the prompt (default 1500)
76
92
  -t, --timeout SECONDS Specify the max wait time for ChatGPT response.
93
+ --overwrite ANSWER Always overwrite existing output files, 'y' for yes, 'n' for no, or 'a' for ask. Default to ask.
77
94
  -v, --verbose Show extra output and progress info
78
95
  -d, --debug Show debugging output to help diagnose issues
79
96
  -h, --help Prints this help
80
97
  ```
81
98
 
99
+ ## Outputs
100
+
101
+ Some refactor tasks will write out to a new file by default, others to stdout.
102
+
103
+ The `--output` lets you specify a file to write to instead of the Refactors default behaviour.
104
+
105
+ If `--output` is used without a value it overwrites the input with a prompt to overwrite existing files.
106
+
107
+ You can also output to a file using a template, `--output-template` to determine the output file name given a template string:
108
+
109
+ The template is used to create the output name, where the it can have substitutions, '[FILE]', '[NAME]', '[DIR]', '[REFACTOR]' & '[EXT]'.
110
+
111
+ Eg `--output-template "[DIR]/[NAME]_[REFACTOR][EXT]"`
112
+
113
+ eg for the input `my_dir/my_class.rb`
114
+ - `[FILE]`: `my_class.rb`
115
+ - `[NAME]`: `my_class`
116
+ - `[DIR]`: `my_dir`
117
+ - `[REFACTOR]`: `generic`
118
+ - `[EXT]`: `.rb`
119
+
120
+
121
+ ## Note on performance and ChatGPT version
122
+
123
+ _The quality of results depend very much on the version of ChatGPT being used._
124
+
125
+ I have tested with both 3.5 and 4 and see **significantly** better performance with version 4.
82
126
 
83
127
  ## Development
84
128
 
data/Rakefile CHANGED
@@ -6,7 +6,7 @@ require "rake/testtask"
6
6
  Rake::TestTask.new(:test) do |t|
7
7
  t.libs << "test"
8
8
  t.libs << "lib"
9
- t.test_files = FileList["test/**/test_*.rb"]
9
+ t.test_files = FileList["test/**/*_test.rb"]
10
10
  end
11
11
 
12
12
  require "standard/rake"
data/ai_refactor.gemspec CHANGED
@@ -32,4 +32,5 @@ Gem::Specification.new do |spec|
32
32
  spec.add_dependency "colorize", "< 2.0"
33
33
  spec.add_dependency "open3", "< 2.0"
34
34
  spec.add_dependency "ruby-openai", ">= 3.4.0", "< 5.0"
35
+ spec.add_dependency "zeitwerk", "~> 2.6"
35
36
  end
data/exe/ai_refactor CHANGED
@@ -3,26 +3,53 @@
3
3
  require "optparse"
4
4
  require "colorize"
5
5
  require "openai"
6
+ require "shellwords"
6
7
  require_relative "../lib/ai_refactor"
7
8
 
9
+ require "dotenv/load"
10
+
8
11
  options = {}
9
12
 
10
13
  supported_refactors = AIRefactor::Refactors.all
11
- supported_names = AIRefactor::Refactors.names
14
+ descriptions = AIRefactor::Refactors.descriptions
12
15
 
13
16
  # General options for all refactor types
14
17
  option_parser = OptionParser.new do |parser|
15
- parser.banner = "Usage: ai_refactor REFACTOR_TYPE INPUT_FILE_OR_DIR [options]\n\nWhere REFACTOR_TYPE is one of: #{supported_names}\n\n"
18
+ parser.banner = "Usage: ai_refactor REFACTOR_TYPE INPUT_FILE_OR_DIR [options]\n\nWhere REFACTOR_TYPE is one of: \n- #{descriptions.to_a.map { |refactor| refactor.join(": ") }.join("\n- ")}\n\n"
19
+
20
+ parser.on("-o", "--output [FILE]", String, "Write output to given file instead of stdout. If no path provided will overwrite input file (will prompt to overwrite existing files). Some refactor tasks will write out to a new file by default. This option will override the tasks default behaviour.") do |f|
21
+ options[:output_file_path] = f
22
+ end
23
+
24
+ parser.on("-O", "--output-template TEMPLATE", String, "Write outputs to files instead of stdout. The template is used to create the output name, where the it can have substitutions, '[FILE]', '[NAME]', '[DIR]', '[REFACTOR]' & '[EXT]'. Eg `[DIR]/[NAME]_[REFACTOR][EXT]` (will prompt to overwrite existing files)") do |t|
25
+ options[:output_template_path] = t
26
+ end
27
+
28
+ parser.on("-c", "--context CONTEXT_FILES", Array, "Specify one or more files to use as context for the AI. The contents of these files will be prepended to the prompt sent to the AI.") do |c|
29
+ options[:context_file_paths] = c
30
+ end
31
+
32
+ parser.on("-x", "--extra CONTEXT_TEXT", String, "Specify some text to be prepended to the prompt sent to the AI as extra information of note.") do |c|
33
+ options[:context_text] = c
34
+ end
35
+
36
+ parser.on("-r", "--review-prompt", "Show the prompt that will be sent to ChatGPT but do not actually call ChatGPT or make changes to files.") do
37
+ options[:review_prompt] = true
38
+ end
16
39
 
17
40
  parser.on("-p", "--prompt PROMPT_FILE", String, "Specify path to a text file that contains the ChatGPT 'system' prompt.") do |f|
18
41
  options[:prompt_file_path] = f
19
42
  end
20
43
 
21
- parser.on("-c", "--continue [MAX_MESSAGES]", Integer, "If ChatGPT stops generating due to the maximum token count being reached, continue to generate more messages, until a stop condition or MAX_MESSAGES. MAX_MESSAGES defaults to 3") do |c|
44
+ parser.on("-f", "--diffs", "Request AI generate diffs of changes rather than writing out the whole file.") do
45
+ options[:diff] = true
46
+ end
47
+
48
+ parser.on("-C", "--continue [MAX_MESSAGES]", Integer, "If ChatGPT stops generating due to the maximum token count being reached, continue to generate more messages, until a stop condition or MAX_MESSAGES. MAX_MESSAGES defaults to 3") do |c|
22
49
  options[:ai_max_attempts] = c || 3
23
50
  end
24
51
 
25
- parser.on("-m", "--model MODEL_NAME", String, "Specify a ChatGPT model to use (default gpt-3.5-turbo).") do |m|
52
+ parser.on("-m", "--model MODEL_NAME", String, "Specify a ChatGPT model to use (default gpt-4).") do |m|
26
53
  options[:ai_model] = m
27
54
  end
28
55
 
@@ -38,6 +65,14 @@ option_parser = OptionParser.new do |parser|
38
65
  options[:ai_timeout] = m
39
66
  end
40
67
 
68
+ parser.on("--overwrite ANSWER", "Always overwrite existing output files, 'y' for yes, 'n' for no, or 'a' for ask. Default to ask.") do |a|
69
+ options[:overwrite] = a
70
+ end
71
+
72
+ parser.on("-N", "--no", "Never overwrite existing output files, same as --overwrite=n.") do |a|
73
+ options[:overwrite] = "n"
74
+ end
75
+
41
76
  parser.on("-v", "--verbose", "Show extra output and progress info") do
42
77
  options[:verbose] = true
43
78
  end
@@ -53,6 +88,22 @@ option_parser = OptionParser.new do |parser|
53
88
 
54
89
  parser.separator ""
55
90
 
91
+ # Example in Refactor class:
92
+ #
93
+ # class << self
94
+ # def command_line_options
95
+ # [
96
+ # {
97
+ # key: :my_option_key,
98
+ # short: "-s",
99
+ # long: "--long-form-cli-param [FILE]",
100
+ # type: String,
101
+ # help: "help text"
102
+ # },
103
+ # ...
104
+ # ]
105
+ # end
106
+ # end
56
107
  supported_refactors.each do |name, refactorer|
57
108
  parser.separator "For refactor type '#{name}':" if refactorer.command_line_options.size.positive?
58
109
  refactorer.command_line_options.each do |option|
@@ -65,55 +116,38 @@ option_parser = OptionParser.new do |parser|
65
116
  end
66
117
  end
67
118
 
68
- option_parser.parse!
69
-
70
- logger = AIRefactor::Logger.new(verbose: options[:verbose], debug: options[:debug])
119
+ # Load config from ~/.ai_refactor or .ai_refactor
120
+ home_config_file_path = File.expand_path("~/.ai_refactor")
121
+ local_config_file_path = File.join(Dir.pwd, ".ai_refactor")
71
122
 
72
- refactoring_type = ARGV.shift
73
- input_file_path = ARGV
123
+ arguments = ARGV.dup
74
124
 
75
- if !AIRefactor::Refactors.supported?(refactoring_type) || input_file_path.nil? || input_file_path.empty?
76
- puts option_parser.help
77
- exit 1
125
+ config_file_path = if File.exist?(local_config_file_path)
126
+ local_config_file_path
127
+ elsif File.exist?(home_config_file_path)
128
+ home_config_file_path
78
129
  end
79
-
80
- OpenAI.configure do |config|
81
- config.access_token = ENV.fetch("OPENAI_API_KEY")
82
- config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID", nil)
83
- config.request_timeout = options[:ai_timeout] || 240
130
+ if config_file_path
131
+ config_string = File.read(config_file_path)
132
+ config_lines = config_string.split(/\n+/).reject { |s| s =~ /\A\s*#/ }
133
+ arguments += config_lines.flat_map(&:shellsplit)
84
134
  end
85
135
 
86
- refactorer = AIRefactor::Refactors.get(refactoring_type)
136
+ option_parser.parse!(arguments)
87
137
 
88
- inputs = input_file_path.map do |path|
89
- File.exist?(path) ? path : Dir.glob(path)
90
- end.flatten
138
+ logger = AIRefactor::Logger.new(verbose: options[:verbose], debug: options[:debug])
91
139
 
92
- logger.info "AI Refactor #{inputs.size} files(s)/dir(s) '#{input_file_path}' with #{refactorer.refactor_name} refactor\n"
93
- logger.info "====================\n"
140
+ if config_file_path
141
+ logger.info "Loaded config from '#{config_file_path}'..."
142
+ end
94
143
 
95
- return_values = inputs.map do |file|
96
- logger.info "Processing #{file}..."
144
+ job = ::AIRefactor::Cli.new(refactoring_type: arguments.shift, inputs: arguments, options: options, logger: logger)
97
145
 
98
- refactor = refactorer.new(file, options, logger)
99
- refactor_returned = refactor.run
100
- failed = refactor_returned == false
101
- if failed
102
- logger.warn "Refactor failed on #{file}\nFailed due to: #{refactor.failed_message}\n"
103
- else
104
- logger.success "Refactor succeeded on #{file}\n"
105
- if refactor_returned.is_a?(String)
106
- logger.info "Refactor #{file} output:\n\n#{refactor_returned}\n\n"
107
- end
108
- end
109
- failed ? [file, refactor.failed_message] : true
146
+ unless job.valid?
147
+ puts option_parser.help
148
+ exit 1
110
149
  end
111
150
 
112
- if return_values.all?(true)
113
- logger.success "All files processed successfully!"
114
- else
115
- files = return_values.select { |v| v != true }
116
- logger.warn "Some files failed to process:\n#{files.map { |f| "#{f[0]} :\n > #{f[1]}" }.join("\n")}"
151
+ unless job.run
152
+ exit 1
117
153
  end
118
-
119
- logger.info "Done processing all files!"
@@ -0,0 +1,86 @@
1
+ # frozen_string_literal: true
2
+
3
+ module AIRefactor
4
+ class Cli
5
+ def initialize(refactoring_type:, inputs:, options:, logger:)
6
+ @refactoring_type = refactoring_type
7
+ @inputs = inputs
8
+ @options = options
9
+ @logger = logger
10
+ end
11
+
12
+ attr_reader :refactoring_type, :inputs, :options, :logger
13
+
14
+ def valid?
15
+ return false unless refactorer
16
+ inputs_valid = refactorer.takes_input_files? ? !(inputs.nil? || inputs.empty?) : true
17
+ AIRefactor::Refactors.supported?(refactoring_type) && inputs_valid
18
+ end
19
+
20
+ def run
21
+ return false unless valid?
22
+
23
+ OpenAI.configure do |config|
24
+ config.access_token = ENV.fetch("OPENAI_API_KEY")
25
+ config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID", nil)
26
+ config.request_timeout = options[:ai_timeout] || 240
27
+ end
28
+
29
+ if refactorer.takes_input_files?
30
+ expanded_inputs = inputs.map do |path|
31
+ File.exist?(path) ? path : Dir.glob(path)
32
+ end.flatten
33
+
34
+ logger.info "AI Refactor #{expanded_inputs.size} files(s)/dir(s) '#{expanded_inputs}' with #{refactorer.refactor_name} refactor\n"
35
+ logger.info "====================\n"
36
+
37
+ return_values = expanded_inputs.map do |file|
38
+ logger.info "Processing #{file}..."
39
+
40
+ refactor = refactorer.new(file, options, logger)
41
+ refactor_returned = refactor.run
42
+ failed = refactor_returned == false
43
+ if failed
44
+ logger.warn "Refactor failed on #{file}\nFailed due to: #{refactor.failed_message}\n"
45
+ else
46
+ logger.success "Refactor succeeded on #{file}\n"
47
+ if refactor_returned.is_a?(String)
48
+ logger.info "Refactor #{file} output:\n\n#{refactor_returned}\n\n"
49
+ end
50
+ end
51
+ failed ? [file, refactor.failed_message] : true
52
+ end
53
+
54
+ if return_values.all?(true)
55
+ logger.success "All files processed successfully!"
56
+ else
57
+ files = return_values.select { |v| v != true }
58
+ logger.warn "Some files failed to process:\n#{files.map { |f| "#{f[0]} :\n > #{f[1]}" }.join("\n")}"
59
+ end
60
+
61
+ logger.info "Done processing all files!"
62
+ else
63
+ name = refactorer.refactor_name
64
+ logger.info "AI Refactor - #{name} refactor\n"
65
+ logger.info "====================\n"
66
+ refactor = refactorer.new(nil, options, logger)
67
+ refactor_returned = refactor.run
68
+ failed = refactor_returned == false
69
+ if failed
70
+ logger.warn "Refactor failed with #{name}\nFailed due to: #{refactor.failed_message}\n"
71
+ else
72
+ logger.success "Refactor succeeded with #{name}\n"
73
+ if refactor_returned.is_a?(String)
74
+ logger.info "Refactor output:\n\n#{refactor_returned}\n\n"
75
+ end
76
+ end
77
+ end
78
+ end
79
+
80
+ private
81
+
82
+ def refactorer
83
+ @refactorer ||= AIRefactor::Refactors.get(refactoring_type)
84
+ end
85
+ end
86
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module AIRefactor
4
+ class Context
5
+ def initialize(files:, text:, logger:)
6
+ @files = files
7
+ @text = text
8
+ @logger = logger
9
+ end
10
+
11
+ def prepare_context
12
+ context = read_contexts&.compact
13
+ file_context = (context && context.size.positive?) ? "Here is some related files:\n\n#{context.join("\n")}" : ""
14
+ if @text.nil? || @text.empty?
15
+ file_context
16
+ else
17
+ "Also note: #{@text}\n\n#{file_context}"
18
+ end
19
+ end
20
+
21
+ private
22
+
23
+ def read_contexts
24
+ @files&.map do |file|
25
+ unless File.exist?(file)
26
+ @logger.warn "Context file #{file} does not exist"
27
+ next
28
+ end
29
+ "#---\n# File '#{file}':\n\n```#{File.read(file)}```\n"
30
+ end
31
+ end
32
+ end
33
+ end
@@ -1,18 +1,19 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "fileutils"
3
4
  require "openai"
4
5
  require "json"
5
6
 
6
7
  module AIRefactor
7
8
  class FileProcessor
8
- attr_reader :file_path, :output_path, :logger
9
+ attr_reader :input_file_path, :output_path, :logger, :options
9
10
 
10
- def initialize(input_path:, prompt_file_path:, ai_client:, logger:, output_path: nil)
11
- @file_path = input_path
12
- @prompt_file_path = prompt_file_path
11
+ def initialize(prompt:, ai_client:, logger:, output_path: nil, options: {})
12
+ @prompt = prompt
13
13
  @ai_client = ai_client
14
14
  @logger = logger
15
15
  @output_path = output_path
16
+ @options = options
16
17
  end
17
18
 
18
19
  def output_exists?
@@ -20,20 +21,26 @@ module AIRefactor
20
21
  File.exist?(output_path)
21
22
  end
22
23
 
23
- def process!(options)
24
- logger.debug("Processing #{file_path} with prompt in #{@prompt_file_path}")
25
- prompt = File.read(@prompt_file_path)
26
- input = File.read(@file_path)
27
- messages = [
28
- {role: "system", content: prompt},
29
- {role: "user", content: "Convert: ```#{input}```"}
30
- ]
31
- content, finished_reason, usage = generate_next_message(messages, prompt, options, options[:ai_max_attempts] || 3)
24
+ def process!
25
+ logger.debug("Processing #{@prompt.input_file_path} with prompt in #{@prompt.prompt_file_path}")
26
+ logger.debug("Options: #{options.inspect}")
27
+ messages = @prompt.chat_messages
28
+ if options[:review_prompt]
29
+ logger.info "Review prompt:\n"
30
+ messages.each do |message|
31
+ logger.info "\n-- Start of prompt for Role #{message[:role]} --\n"
32
+ logger.info message[:content]
33
+ logger.info "\n-- End of prompt for Role #{message[:role]} --\n"
34
+ end
35
+ return [nil, "Skipped as review prompt was requested", nil]
36
+ end
37
+
38
+ content, finished_reason, usage = generate_next_message(messages, options, ai_max_attempts)
32
39
 
33
40
  content = if content && content.length > 0
34
41
  processed = block_given? ? yield(content) : content
35
42
  if output_path
36
- File.write(output_path, processed)
43
+ write_output(output_path, processed)
37
44
  logger.verbose "Wrote output to #{output_path}..."
38
45
  end
39
46
  processed
@@ -44,14 +51,18 @@ module AIRefactor
44
51
 
45
52
  private
46
53
 
47
- def generate_next_message(messages, prompt, options, attempts_left)
54
+ def ai_max_attempts
55
+ options[:ai_max_attempts] || 1
56
+ end
57
+
58
+ def generate_next_message(messages, options, attempts_left)
48
59
  logger.verbose "Generate AI output. Generation attempts left: #{attempts_left}"
49
60
  logger.debug "Options: #{options.inspect}"
50
61
  logger.debug "Messages: #{messages.inspect}"
51
62
 
52
63
  response = @ai_client.chat(
53
64
  parameters: {
54
- model: options[:ai_model] || "gpt-3.5-turbo",
65
+ model: options[:ai_model] || "gpt-4",
55
66
  messages: messages,
56
67
  temperature: options[:ai_temperature] || 0.7,
57
68
  max_tokens: options[:ai_max_tokens] || 1500
@@ -69,7 +80,7 @@ module AIRefactor
69
80
  generate_next_message(messages + [
70
81
  {role: "assistant", content: content},
71
82
  {role: "user", content: "Continue"}
72
- ], prompt, options, attempts_left - 1)
83
+ ], options, attempts_left - 1)
73
84
  else
74
85
  previous_messages = messages.filter { |m| m[:role] == "assistant" }.map { |m| m[:content] }.join
75
86
  content = if previous_messages.length > 0
@@ -80,5 +91,11 @@ module AIRefactor
80
91
  [content, finished_reason, response["usage"]]
81
92
  end
82
93
  end
94
+
95
+ def write_output(output_path, processed)
96
+ dir = File.dirname(output_path)
97
+ FileUtils.mkdir_p(dir) unless File.directory?(dir)
98
+ File.write(output_path, processed)
99
+ end
83
100
  end
84
101
  end