n2b 0.5.1 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -32,7 +32,7 @@ module N2M
32
32
  "messages" => [
33
33
  {
34
34
  "role" => "user",
35
- "content" => content
35
+ "content" => content
36
36
  }
37
37
  ]
38
38
  })
@@ -44,29 +44,16 @@ module N2M
44
44
  if response.code != '200'
45
45
  raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.message} - #{response.body}")
46
46
  end
47
- answer = JSON.parse(response.body)['content'].first['text']
48
- begin
49
- # The llm_response.json file is likely for debugging and can be kept or removed.
50
- # For this refactoring, I'll keep it as it doesn't affect the error handling logic.
51
- File.open('llm_response.json', 'w') do |f|
52
- f.write(answer)
53
- end
54
- # remove everything before the first { and after the last }
55
-
47
+ answer = JSON.parse(response.body)['content'].first['text']
48
+ begin
56
49
  answer = answer.sub(/.*?\{(.*)\}.*/m, '{\1}') unless answer.start_with?('{')
57
- # gsub all \n with \\n that are inside "
58
- #
59
50
  answer.gsub!(/"([^"]*)"/) { |match| match.gsub(/\n/, "\\n") }
60
- # The llm_response.json file is likely for debugging and can be kept or removed.
61
- File.open('llm_response.json', 'w') do |f|
62
- f.write(answer)
63
- end
64
51
  answer = JSON.parse(answer)
65
52
  rescue JSON::ParserError
66
53
  # This specific JSON parsing error is about the LLM's *response content*, not an API error.
67
54
  # It should probably be handled differently, but the subtask is about LlmApiError.
68
55
  # For now, keeping existing behavior for this part.
69
- puts "Error parsing JSON from LLM response: #{answer}" # Clarified error message
56
+ puts "Error parsing JSON from LLM response: #{answer}"
70
57
  answer = { 'explanation' => answer} # Default fallback
71
58
  end
72
59
  answer
@@ -83,10 +70,10 @@ module N2M
83
70
 
84
71
  request.body = JSON.dump({
85
72
  "model" => get_model_name,
86
- "max_tokens" => @config['max_tokens'] || 1024, # Allow overriding max_tokens from config
73
+ "max_tokens" => @config['max_tokens'] || 1024,
87
74
  "messages" => [
88
75
  {
89
- "role" => "user", # The entire prompt is passed as a single user message
76
+ "role" => "user",
90
77
  "content" => prompt_content
91
78
  }
92
79
  ]
@@ -34,7 +34,10 @@ module N2M
34
34
  "parts" => [{
35
35
  "text" => content
36
36
  }]
37
- }]
37
+ }],
38
+ "generationConfig" => {
39
+ "responseMimeType" => "application/json"
40
+ }
38
41
  })
39
42
 
40
43
  response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http|
@@ -82,14 +85,12 @@ module N2M
82
85
  request.body = JSON.dump({
83
86
  "contents" => [{
84
87
  "parts" => [{
85
- "text" => prompt_content # The entire prompt is passed as text
88
+ "text" => prompt_content
86
89
  }]
87
90
  }],
88
- # Gemini specific: Ensure JSON output if possible via generationConfig
89
- # However, the primary method is instructing it within the prompt itself.
90
- # "generationConfig": {
91
- # "responseMimeType": "application/json", # This might be too restrictive or not always work as expected
92
- # }
91
+ "generationConfig" => {
92
+ "responseMimeType" => "application/json"
93
+ }
93
94
  })
94
95
 
95
96
  response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http|
@@ -22,20 +22,23 @@ module N2M
22
22
  model_name
23
23
  end
24
24
 
25
- def make_request(content)
25
+ def make_request(content, expect_json: true)
26
26
  request = Net::HTTP::Post.new(API_URI)
27
27
  request.content_type = 'application/json'
28
28
  request['Authorization'] = "Bearer #{@config['access_key']}"
29
29
 
30
- request.body = JSON.dump({
30
+ body_hash = {
31
31
  "model" => get_model_name,
32
- response_format: { type: 'json_object' },
33
32
  "messages" => [
34
33
  {
35
34
  "role" => "user",
36
35
  "content" => content
37
- }]
38
- })
36
+ }
37
+ ]
38
+ }
39
+ body_hash["response_format"] = { "type" => "json_object" } if expect_json
40
+
41
+ request.body = JSON.dump(body_hash)
39
42
 
40
43
  response = Net::HTTP.start(API_URI.hostname, API_URI.port, use_ssl: true) do |http|
41
44
  http.request(request)
@@ -46,34 +49,38 @@ module N2M
46
49
  raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.message} - #{response.body}")
47
50
  end
48
51
  answer = JSON.parse(response.body)['choices'].first['message']['content']
49
- begin
50
- # remove everything before the first { and after the last }
51
- answer = answer.sub(/.*\{(.*)\}.*/m, '{\1}') unless answer.start_with?('{')
52
- answer = JSON.parse(answer)
53
- rescue JSON::ParserError
54
- answer = { 'commands' => answer.split("\n"), explanation: answer }
52
+ if expect_json
53
+ begin
54
+ # remove everything before the first { and after the last }
55
+ answer = answer.sub(/.*\{(.*)\}.*/m, '{\1}') unless answer.start_with?('{')
56
+ answer = JSON.parse(answer)
57
+ rescue JSON::ParserError
58
+ answer = { 'commands' => answer.split("\n"), explanation: answer }
59
+ end
55
60
  end
56
61
  answer
57
62
  end
58
63
 
59
- def analyze_code_diff(prompt_content)
64
+ def analyze_code_diff(prompt_content, expect_json: true)
60
65
  # This method assumes prompt_content is the full, ready-to-send prompt
61
66
  # including all instructions for the LLM (system message, diff, user additions, JSON format).
62
67
  request = Net::HTTP::Post.new(API_URI)
63
68
  request.content_type = 'application/json'
64
69
  request['Authorization'] = "Bearer #{@config['access_key']}"
65
70
 
66
- request.body = JSON.dump({
71
+ body_hash = {
67
72
  "model" => get_model_name,
68
- "response_format" => { "type" => "json_object" }, # Crucial for OpenAI to return JSON
69
73
  "messages" => [
70
74
  {
71
- "role" => "user", # The entire prompt is passed as a single user message
75
+ "role" => "user",
72
76
  "content" => prompt_content
73
77
  }
74
78
  ],
75
- "max_tokens" => @config['max_tokens'] || 1500 # Allow overriding, ensure it's enough for JSON
76
- })
79
+ "max_tokens" => @config['max_tokens'] || 1500
80
+ }
81
+ body_hash["response_format"] = { "type" => "json_object" } if expect_json
82
+
83
+ request.body = JSON.dump(body_hash)
77
84
 
78
85
  response = Net::HTTP.start(API_URI.hostname, API_URI.port, use_ssl: true) do |http|
79
86
  http.request(request)
@@ -83,9 +90,8 @@ module N2M
83
90
  raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.message} - #{response.body}")
84
91
  end
85
92
 
86
- # Return the raw JSON string. CLI's call_llm_for_diff_analysis will handle parsing.
87
- # OpenAI with json_object mode should return the JSON directly in 'choices'.first.message.content
88
- JSON.parse(response.body)['choices'].first['message']['content']
93
+ answer = JSON.parse(response.body)['choices'].first['message']['content']
94
+ answer
89
95
  end
90
96
  end
91
97
  end