n2b 0.7.1 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,8 @@ require 'net/http'
2
2
  require 'uri'
3
3
  require 'json'
4
4
  require 'base64'
5
+ require 'time'
6
+ require_relative 'template_engine'
5
7
 
6
8
  module N2B
7
9
  class JiraClient
@@ -70,19 +72,41 @@ module N2B
70
72
  # Generate comment using template system
71
73
  template_comment = generate_templated_comment(comment)
72
74
 
75
+ if debug_mode?
76
+ puts "🔍 DEBUG: Generated template comment (#{template_comment.length} chars):"
77
+ puts "--- TEMPLATE COMMENT START ---"
78
+ puts template_comment
79
+ puts "--- TEMPLATE COMMENT END ---"
80
+ end
81
+
73
82
  # Prepare the comment body in Jira's Atlassian Document Format (ADF)
74
83
  comment_body = {
75
84
  "body" => format_comment_as_adf(template_comment)
76
85
  }
77
86
 
87
+ if debug_mode?
88
+ puts "🔍 DEBUG: Formatted ADF comment body:"
89
+ puts "--- ADF BODY START ---"
90
+ puts JSON.pretty_generate(comment_body)
91
+ puts "--- ADF BODY END ---"
92
+ end
93
+
78
94
  # Make the API call to add a comment
79
95
  path = "/rest/api/3/issue/#{ticket_key}/comment"
96
+ puts "🔍 DEBUG: Making API request to: #{path}" if debug_mode?
97
+
80
98
  _response = make_api_request('POST', path, comment_body)
81
99
 
82
100
  puts "✅ Successfully added comment to Jira ticket #{ticket_key}"
83
101
  true
84
102
  rescue JiraApiError => e
85
103
  puts "❌ Failed to update Jira ticket #{ticket_key}: #{e.message}"
104
+ if debug_mode?
105
+ puts "🔍 DEBUG: Full error details:"
106
+ puts " - Ticket key: #{ticket_key}"
107
+ puts " - Template comment length: #{template_comment&.length || 'nil'}"
108
+ puts " - Comment body keys: #{comment_body&.keys || 'nil'}"
109
+ end
86
110
  false
87
111
  end
88
112
 
@@ -367,7 +391,7 @@ module N2B
367
391
  else
368
392
  formatted_date = created
369
393
  end
370
- rescue
394
+ rescue => e
371
395
  formatted_date = created
372
396
  end
373
397
 
@@ -400,6 +424,11 @@ module N2B
400
424
  end
401
425
 
402
426
  def generate_templated_comment(comment_data)
427
+ # Handle structured hash data from format_analysis_for_jira
428
+ if comment_data.is_a?(Hash) && comment_data.key?(:implementation_summary)
429
+ return generate_structured_comment(comment_data)
430
+ end
431
+
403
432
  # Prepare template data from the analysis results
404
433
  template_data = prepare_template_data(comment_data)
405
434
 
@@ -412,6 +441,110 @@ module N2B
412
441
  engine.render
413
442
  end
414
443
 
444
+ def generate_structured_comment(data)
445
+ # Generate a properly formatted comment from structured analysis data
446
+ git_info = extract_git_info
447
+ timestamp = Time.now.strftime("%Y-%m-%d %H:%M UTC")
448
+
449
+ comment_parts = []
450
+
451
+ # Header
452
+ comment_parts << "*N2B Code Analysis Report*"
453
+ comment_parts << ""
454
+
455
+ # Implementation Summary (always expanded)
456
+ comment_parts << "*Implementation Summary:*"
457
+ comment_parts << (data[:implementation_summary] || "Unknown")
458
+ comment_parts << ""
459
+
460
+ # Custom message if provided (also expanded)
461
+ if data[:custom_analysis_focus] && !data[:custom_analysis_focus].empty?
462
+ comment_parts << "*Custom Analysis Focus:*"
463
+ comment_parts << data[:custom_analysis_focus]
464
+ comment_parts << ""
465
+ end
466
+
467
+ comment_parts << "---"
468
+ comment_parts << ""
469
+
470
+ # Automated Analysis Findings
471
+ comment_parts << "*Automated Analysis Findings:*"
472
+ comment_parts << ""
473
+
474
+ # Critical Issues (collapsed by default)
475
+ critical_issues = classify_issues_by_severity(data[:issues] || [], 'CRITICAL')
476
+ if critical_issues.any?
477
+ comment_parts << "{expand:🚨 Critical Issues (Must Fix Before Merge)}"
478
+ critical_issues.each { |issue| comment_parts << "☐ #{issue}" }
479
+ comment_parts << "{expand}"
480
+ else
481
+ comment_parts << "✅ No critical issues found"
482
+ end
483
+ comment_parts << ""
484
+
485
+ # Important Issues (collapsed by default)
486
+ important_issues = classify_issues_by_severity(data[:issues] || [], 'IMPORTANT')
487
+ if important_issues.any?
488
+ comment_parts << "{expand:⚠️ Important Issues (Should Address)}"
489
+ important_issues.each { |issue| comment_parts << "☐ #{issue}" }
490
+ comment_parts << "{expand}"
491
+ else
492
+ comment_parts << "✅ No important issues found"
493
+ end
494
+ comment_parts << ""
495
+
496
+ # Suggested Improvements (collapsed by default)
497
+ if data[:improvements] && data[:improvements].any?
498
+ comment_parts << "{expand:💡 Suggested Improvements (Nice to Have)}"
499
+ data[:improvements].each { |improvement| comment_parts << "☐ #{improvement}" }
500
+ comment_parts << "{expand}"
501
+ else
502
+ comment_parts << "✅ No specific improvements suggested"
503
+ end
504
+ comment_parts << ""
505
+
506
+ # Test Coverage Assessment
507
+ comment_parts << "{expand:🧪 Test Coverage Assessment}"
508
+ if data[:test_coverage] && !data[:test_coverage].empty?
509
+ comment_parts << "*Overall Assessment:* #{data[:test_coverage]}"
510
+ else
511
+ comment_parts << "*Overall Assessment:* Not assessed"
512
+ end
513
+ comment_parts << "{expand}"
514
+ comment_parts << ""
515
+
516
+ # Missing Test Coverage
517
+ comment_parts << "*Missing Test Coverage:*"
518
+ comment_parts << "☐ No specific missing tests identified"
519
+ comment_parts << ""
520
+
521
+ # Requirements Evaluation
522
+ comment_parts << "*📋 Requirements Evaluation:*"
523
+ if data[:requirements_evaluation] && !data[:requirements_evaluation].empty?
524
+ comment_parts << "#{data[:requirements_evaluation]}"
525
+ else
526
+ comment_parts << "🔍 *UNCLEAR:* Requirements not provided or assessed"
527
+ end
528
+ comment_parts << ""
529
+
530
+ comment_parts << "---"
531
+ comment_parts << ""
532
+
533
+ # Footer with metadata (simplified)
534
+ comment_parts << "Analysis completed on #{timestamp} | Branch: #{git_info[:branch]}"
535
+
536
+ comment_parts.join("\n")
537
+ end
538
+
539
+ def classify_issues_by_severity(issues, target_severity)
540
+ return [] unless issues.is_a?(Array)
541
+
542
+ issues.select do |issue|
543
+ severity = classify_error_severity(issue)
544
+ severity == target_severity
545
+ end
546
+ end
547
+
415
548
  def prepare_template_data(comment_data)
416
549
  # Handle both string and hash inputs
417
550
  if comment_data.is_a?(String)
@@ -641,15 +774,17 @@ module N2B
641
774
  end
642
775
 
643
776
  def get_config(reconfigure: false, advanced_flow: false)
644
- # This should match the config loading from the main CLI
645
- # For now, return empty hash - will be enhanced when config system is unified
646
- {}
777
+ # Return the config that was passed during initialization
778
+ # This is used for template resolution and other configuration needs
779
+ @config
647
780
  end
648
781
 
649
782
  def convert_markdown_to_adf(markdown_text)
650
783
  content = []
651
784
  lines = markdown_text.split("\n")
652
785
  current_paragraph = []
786
+ current_expand = nil
787
+ expand_content = []
653
788
 
654
789
  lines.each do |line|
655
790
  case line
@@ -680,75 +815,118 @@ module N2B
680
815
  current_paragraph = []
681
816
  end
682
817
 
683
- # Create expand section
818
+ # Start collecting expand content
684
819
  expand_title = $1.strip
685
- content << {
820
+ current_expand = {
686
821
  "type" => "expand",
687
822
  "attrs" => { "title" => expand_title },
688
823
  "content" => []
689
824
  }
825
+ expand_content = []
690
826
  when /^\{expand\}$/ # Jira expand end
691
- # End of expand section - handled by the expand start
827
+ # End of expand section - add collected content
828
+ if current_expand
829
+ current_expand["content"] = expand_content
830
+ content << current_expand if expand_content.any? # Only add if has content
831
+ current_expand = nil
832
+ expand_content = []
833
+ end
692
834
  when /^☐\s+(.+)$/ # Unchecked checkbox
693
835
  # Flush current paragraph
694
836
  if current_paragraph.any?
695
- content << create_paragraph(current_paragraph.join(" "))
837
+ paragraph = create_paragraph(current_paragraph.join(" "))
838
+ if current_expand
839
+ expand_content << paragraph
840
+ else
841
+ content << paragraph
842
+ end
696
843
  current_paragraph = []
697
844
  end
698
845
 
699
- content << {
700
- "type" => "taskList",
701
- "content" => [
702
- {
703
- "type" => "taskItem",
704
- "attrs" => { "state" => "TODO" },
705
- "content" => [
706
- create_paragraph($1.strip)
707
- ]
708
- }
709
- ]
710
- }
846
+ # Convert checkbox to simple paragraph (no bullet points)
847
+ checkbox_paragraph = create_paragraph("" + $1.strip)
848
+
849
+ if current_expand
850
+ expand_content << checkbox_paragraph
851
+ else
852
+ content << checkbox_paragraph
853
+ end
711
854
  when /^☑\s+(.+)$/ # Checked checkbox
712
855
  # Flush current paragraph
713
856
  if current_paragraph.any?
714
- content << create_paragraph(current_paragraph.join(" "))
857
+ paragraph = create_paragraph(current_paragraph.join(" "))
858
+ if current_expand
859
+ expand_content << paragraph
860
+ else
861
+ content << paragraph
862
+ end
715
863
  current_paragraph = []
716
864
  end
717
865
 
718
- content << {
719
- "type" => "taskList",
720
- "content" => [
721
- {
722
- "type" => "taskItem",
723
- "attrs" => { "state" => "DONE" },
724
- "content" => [
725
- create_paragraph($1.strip)
726
- ]
727
- }
728
- ]
729
- }
866
+ # Convert checkbox to simple paragraph (no bullet points)
867
+ checkbox_paragraph = create_paragraph("" + $1.strip)
868
+
869
+ if current_expand
870
+ expand_content << checkbox_paragraph
871
+ else
872
+ content << checkbox_paragraph
873
+ end
730
874
  when /^---$/ # Horizontal rule
731
875
  # Flush current paragraph
732
876
  if current_paragraph.any?
733
- content << create_paragraph(current_paragraph.join(" "))
877
+ paragraph = create_paragraph(current_paragraph.join(" "))
878
+ if current_expand
879
+ expand_content << paragraph
880
+ else
881
+ content << paragraph
882
+ end
734
883
  current_paragraph = []
735
884
  end
736
885
 
737
- content << { "type" => "rule" }
886
+ rule = { "type" => "rule" }
887
+ if current_expand
888
+ expand_content << rule
889
+ else
890
+ content << rule
891
+ end
738
892
  when "" # Empty line
739
893
  # Flush current paragraph
740
894
  if current_paragraph.any?
741
- content << create_paragraph(current_paragraph.join(" "))
895
+ paragraph = create_paragraph(current_paragraph.join(" "))
896
+ if current_expand
897
+ expand_content << paragraph
898
+ else
899
+ content << paragraph
900
+ end
742
901
  current_paragraph = []
743
902
  end
744
903
  else # Regular text
745
- current_paragraph << line
904
+ # Skip empty or whitespace-only content
905
+ unless line.strip.empty? || line.strip == "{}"
906
+ current_paragraph << line
907
+ end
746
908
  end
747
909
  end
748
910
 
749
911
  # Flush any remaining paragraph
750
912
  if current_paragraph.any?
751
- content << create_paragraph(current_paragraph.join(" "))
913
+ paragraph = create_paragraph(current_paragraph.join(" "))
914
+ if current_expand
915
+ expand_content << paragraph
916
+ else
917
+ content << paragraph
918
+ end
919
+ end
920
+
921
+ # Close any remaining expand section
922
+ if current_expand && expand_content.any?
923
+ current_expand["content"] = expand_content
924
+ content << current_expand
925
+ end
926
+
927
+ # Ensure we have at least one content element
928
+ if content.empty?
929
+ content << create_paragraph("Analysis completed.")
752
930
  end
753
931
 
754
932
  {
@@ -772,6 +950,10 @@ module N2B
772
950
 
773
951
  private
774
952
 
953
+ def debug_mode?
954
+ ENV['N2B_DEBUG'] == 'true'
955
+ end
956
+
775
957
  def format_comment_as_adf(comment_data)
776
958
  # If comment_data is a string (from template), convert to simple ADF
777
959
  if comment_data.is_a?(String)
@@ -1096,8 +1278,26 @@ module N2B
1096
1278
  request['Content-Type'] = 'application/json'
1097
1279
  request['Accept'] = 'application/json'
1098
1280
 
1281
+ if debug_mode?
1282
+ puts "🔍 DEBUG: Making #{method} request to: #{full_url}"
1283
+ puts "🔍 DEBUG: Request headers: Content-Type=#{request['Content-Type']}, Accept=#{request['Accept']}"
1284
+ if body
1285
+ puts "🔍 DEBUG: Request body size: #{body.to_json.length} bytes"
1286
+ puts "🔍 DEBUG: Request body preview: #{body.to_json[0..500]}#{'...' if body.to_json.length > 500}"
1287
+ end
1288
+ end
1289
+
1099
1290
  response = http.request(request)
1100
1291
 
1292
+ if debug_mode?
1293
+ puts "🔍 DEBUG: Response code: #{response.code} #{response.message}"
1294
+ if response.body && !response.body.empty?
1295
+ # Force UTF-8 encoding to handle character encoding issues
1296
+ response_body = response.body.force_encoding('UTF-8')
1297
+ puts "🔍 DEBUG: Response body: #{response_body}"
1298
+ end
1299
+ end
1300
+
1101
1301
  unless response.is_a?(Net::HTTPSuccess)
1102
1302
  error_message = "Jira API Error: #{response.code} #{response.message}"
1103
1303
  error_message += " - #{response.body}" if response.body && !response.body.empty?
@@ -1,6 +1,6 @@
1
1
  require_relative '../model_config'
2
2
 
3
- module N2M
3
+ module N2B
4
4
  module Llm
5
5
  class Claude
6
6
  API_URI = URI.parse('https://api.anthropic.com/v1/messages')
@@ -1,15 +1,16 @@
1
1
  require 'net/http'
2
2
  require 'json'
3
3
  require 'uri'
4
+ # Removed googleauth require
4
5
  require_relative '../model_config'
5
6
 
6
- module N2M
7
+ module N2B
7
8
  module Llm
8
9
  class Gemini
9
10
  API_URI = URI.parse('https://generativelanguage.googleapis.com/v1beta/models')
10
11
 
11
12
  def initialize(config)
12
- @config = config
13
+ @config = config # Used for access_key and model
13
14
  end
14
15
 
15
16
  def get_model_name
@@ -29,6 +30,8 @@ module N2M
29
30
  request = Net::HTTP::Post.new(uri)
30
31
  request.content_type = 'application/json'
31
32
 
33
+ # Removed Authorization header and token fetching logic
34
+
32
35
  request.body = JSON.dump({
33
36
  "contents" => [{
34
37
  "parts" => [{
@@ -82,6 +85,8 @@ module N2M
82
85
  request = Net::HTTP::Post.new(uri)
83
86
  request.content_type = 'application/json'
84
87
 
88
+ # Removed Authorization header and token fetching logic
89
+
85
90
  request.body = JSON.dump({
86
91
  "contents" => [{
87
92
  "parts" => [{
@@ -3,7 +3,7 @@ require 'json'
3
3
  require 'uri'
4
4
  require_relative '../model_config'
5
5
 
6
- module N2M
6
+ module N2B
7
7
  module Llm
8
8
  class OpenAi
9
9
  API_URI = URI.parse('https://api.openai.com/v1/chat/completions')
@@ -0,0 +1,225 @@
1
+ require 'net/http'
2
+ require 'json'
3
+ require 'uri'
4
+ require 'googleauth' # For service account authentication
5
+ require_relative '../model_config'
6
+ require_relative '../errors'
7
+
8
+ module N2B
9
+ module Llm
10
+ class VertexAi
11
+ # Vertex AI API endpoint format
12
+ DEFAULT_LOCATION = 'us-central1'
13
+ COMMON_LOCATIONS = [
14
+ 'us-central1', # Iowa, USA
15
+ 'us-east1', # South Carolina, USA
16
+ 'us-west1', # Oregon, USA
17
+ 'europe-west1', # Belgium
18
+ 'europe-west4', # Netherlands
19
+ 'asia-northeast1', # Tokyo, Japan
20
+ 'asia-southeast1' # Singapore
21
+ ].freeze
22
+
23
+ # HTTP timeout in seconds
24
+ REQUEST_TIMEOUT = 60
25
+
26
+ def initialize(config)
27
+ @config = config # Contains 'vertex_credential_file' and 'model'
28
+ @project_id = nil
29
+ @location = DEFAULT_LOCATION
30
+ load_project_info
31
+ end
32
+
33
+ private
34
+
35
+ def load_project_info
36
+ # Extract project_id from the credential file
37
+ credential_data = JSON.parse(File.read(@config['vertex_credential_file']))
38
+ @project_id = credential_data['project_id']
39
+
40
+ # Allow location override from config, with intelligent defaults
41
+ @location = determine_location
42
+ rescue JSON::ParserError => e
43
+ raise N2B::LlmApiError.new("Invalid JSON in credential file: #{e.message}")
44
+ rescue Errno::ENOENT => e
45
+ raise N2B::LlmApiError.new("Credential file not found: #{e.message}")
46
+ rescue => e
47
+ raise N2B::LlmApiError.new("Failed to load project info from credential file: #{e.message}")
48
+ end
49
+
50
+ def determine_location
51
+ # 1. Use explicit config if provided
52
+ return @config['vertex_location'] if @config['vertex_location']
53
+
54
+ # 2. Try to detect from project_id patterns (some projects have region hints)
55
+ # 3. Default to us-central1 but provide helpful error message if it fails
56
+ DEFAULT_LOCATION
57
+ end
58
+
59
+ def build_api_uri(model)
60
+ "https://#{@location}-aiplatform.googleapis.com/v1/projects/#{@project_id}/locations/#{@location}/publishers/google/models/#{model}:generateContent"
61
+ end
62
+
63
+ public
64
+
65
+ def get_model_name
66
+ # Resolve model name using the centralized configuration for 'vertexai'
67
+ model_name = N2B::ModelConfig.resolve_model('vertexai', @config['model'])
68
+ if model_name.nil? || model_name.empty?
69
+ # Fallback to default if no model specified for vertexai
70
+ model_name = N2B::ModelConfig.resolve_model('vertexai', N2B::ModelConfig.default_model('vertexai'))
71
+ end
72
+ # If still no model, a generic default could be used, or an error raised.
73
+ # For now, assume ModelConfig handles returning a usable default or nil.
74
+ # If ModelConfig.resolve_model can return nil and that's an issue, add handling here.
75
+ # For example, if model_name is still nil, raise an error or use a hardcoded default.
76
+ # Let's assume ModelConfig provides a valid model or a sensible default from models.yml.
77
+ model_name
78
+ end
79
+
80
+ def make_request(content)
81
+ model = get_model_name
82
+ raise N2B::LlmApiError.new("No model configured for Vertex AI.") if model.nil? || model.empty?
83
+
84
+ uri = URI.parse(build_api_uri(model))
85
+
86
+ request = Net::HTTP::Post.new(uri)
87
+ request.content_type = 'application/json'
88
+
89
+ begin
90
+ scope = 'https://www.googleapis.com/auth/cloud-platform'
91
+ authorizer = Google::Auth::ServiceAccountCredentials.make_creds(
92
+ json_key_io: File.open(@config['vertex_credential_file']),
93
+ scope: scope
94
+ )
95
+ access_token = authorizer.fetch_access_token!['access_token']
96
+ request['Authorization'] = "Bearer #{access_token}"
97
+ rescue StandardError => e
98
+ raise N2B::LlmApiError.new("Vertex AI - Failed to obtain Google Cloud access token: #{e.message}")
99
+ end
100
+
101
+ request.body = JSON.dump({
102
+ "contents" => [{
103
+ "role" => "user",
104
+ "parts" => [{
105
+ "text" => content
106
+ }]
107
+ }],
108
+ "generationConfig" => {
109
+ "responseMimeType" => "application/json" # Requesting JSON output from the LLM
110
+ }
111
+ })
112
+
113
+ begin
114
+ response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http|
115
+ http.read_timeout = REQUEST_TIMEOUT
116
+ http.open_timeout = 30
117
+ http.request(request)
118
+ end
119
+ rescue Net::TimeoutError, Net::ReadTimeout, Net::OpenTimeout => e
120
+ error_msg = "Vertex AI request timed out (region: #{@location}): #{e.message}"
121
+ error_msg += "\n\nThis might be a region issue. Try reconfiguring with 'n2b -c' and select a different region."
122
+ error_msg += "\nFor EU users, try: europe-west1 (Belgium) or europe-west4 (Netherlands)"
123
+ error_msg += "\nCommon regions: #{COMMON_LOCATIONS.join(', ')}"
124
+ raise N2B::LlmApiError.new(error_msg)
125
+ rescue => e
126
+ raise N2B::LlmApiError.new("Vertex AI network error: #{e.message}")
127
+ end
128
+
129
+ if response.code != '200'
130
+ error_msg = "Vertex AI LLM API Error: #{response.code} #{response.message} - #{response.body}"
131
+ if response.code == '404'
132
+ error_msg += "\n\nThis might be a region or model availability issue. Current region: #{@location}"
133
+ error_msg += "\nNote: Google models via Vertex AI are not available in all regions."
134
+ error_msg += "\nTry reconfiguring with 'n2b -c' and:"
135
+ error_msg += "\n 1. Select a different region (Common regions: #{COMMON_LOCATIONS.join(', ')})"
136
+ error_msg += "\n 2. Choose a different model (some models are only available in specific regions)"
137
+ end
138
+ raise N2B::LlmApiError.new(error_msg)
139
+ end
140
+
141
+ parsed_response = JSON.parse(response.body)
142
+ # Vertex AI response structure is the same as Gemini API
143
+ answer = parsed_response['candidates'].first['content']['parts'].first['text']
144
+
145
+ begin
146
+ if answer.strip.start_with?('{') && answer.strip.end_with?('}')
147
+ answer = JSON.parse(answer) # LLM returned JSON as a string
148
+ else
149
+ # If not JSON, wrap it as per existing Gemini class (for CLI compatibility)
150
+ answer = { 'explanation' => answer, 'code' => nil }
151
+ end
152
+ rescue JSON::ParserError
153
+ answer = { 'explanation' => answer, 'code' => nil }
154
+ end
155
+ answer
156
+ end
157
+
158
+ def analyze_code_diff(prompt_content)
159
+ model = get_model_name
160
+ raise N2B::LlmApiError.new("No model configured for Vertex AI.") if model.nil? || model.empty?
161
+
162
+ uri = URI.parse(build_api_uri(model))
163
+
164
+ request = Net::HTTP::Post.new(uri)
165
+ request.content_type = 'application/json'
166
+
167
+ begin
168
+ scope = 'https://www.googleapis.com/auth/cloud-platform'
169
+ authorizer = Google::Auth::ServiceAccountCredentials.make_creds(
170
+ json_key_io: File.open(@config['vertex_credential_file']),
171
+ scope: scope
172
+ )
173
+ access_token = authorizer.fetch_access_token!['access_token']
174
+ request['Authorization'] = "Bearer #{access_token}"
175
+ rescue StandardError => e
176
+ raise N2B::LlmApiError.new("Vertex AI - Failed to obtain Google Cloud access token for diff analysis: #{e.message}")
177
+ end
178
+
179
+ request.body = JSON.dump({
180
+ "contents" => [{
181
+ "role" => "user",
182
+ "parts" => [{
183
+ "text" => prompt_content
184
+ }]
185
+ }],
186
+ "generationConfig" => {
187
+ "responseMimeType" => "application/json" # Expecting JSON response from LLM
188
+ }
189
+ })
190
+
191
+ begin
192
+ response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http|
193
+ http.read_timeout = REQUEST_TIMEOUT
194
+ http.open_timeout = 30
195
+ http.request(request)
196
+ end
197
+ rescue Net::TimeoutError, Net::ReadTimeout, Net::OpenTimeout => e
198
+ error_msg = "Vertex AI diff analysis timed out (region: #{@location}): #{e.message}"
199
+ error_msg += "\n\nThis might be a region issue. Try reconfiguring with 'n2b -c' and select a different region."
200
+ error_msg += "\nFor EU users, try: europe-west1 (Belgium) or europe-west4 (Netherlands)"
201
+ error_msg += "\nCommon regions: #{COMMON_LOCATIONS.join(', ')}"
202
+ raise N2B::LlmApiError.new(error_msg)
203
+ rescue => e
204
+ raise N2B::LlmApiError.new("Vertex AI network error during diff analysis: #{e.message}")
205
+ end
206
+
207
+ if response.code != '200'
208
+ error_msg = "Vertex AI LLM API Error for diff analysis: #{response.code} #{response.message} - #{response.body}"
209
+ if response.code == '404'
210
+ error_msg += "\n\nThis might be a region or model availability issue. Current region: #{@location}"
211
+ error_msg += "\nNote: Google models via Vertex AI are not available in all regions."
212
+ error_msg += "\nTry reconfiguring with 'n2b -c' and:"
213
+ error_msg += "\n 1. Select a different region (Common regions: #{COMMON_LOCATIONS.join(', ')})"
214
+ error_msg += "\n 2. Choose a different model (some models are only available in specific regions)"
215
+ end
216
+ raise N2B::LlmApiError.new(error_msg)
217
+ end
218
+
219
+ parsed_response = JSON.parse(response.body)
220
+ # Return the raw JSON string from the 'text' field, CLI will parse it.
221
+ parsed_response['candidates'].first['content']['parts'].first['text']
222
+ end
223
+ end
224
+ end
225
+ end