n2b 0.3.1 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,13 +1,24 @@
1
+ require_relative '../model_config'
2
+
1
3
  module N2M
2
4
  module Llm
3
5
  class Claude
4
6
  API_URI = URI.parse('https://api.anthropic.com/v1/messages')
5
- MODELS = { 'haiku' => 'claude-3-haiku-20240307', 'sonnet' => 'claude-3-sonnet-20240229', 'sonnet35' => 'claude-3-5-sonnet-20240620', "sonnet37" => "claude-3-7-sonnet-20250219" }
6
7
 
7
8
  def initialize(config)
8
9
  @config = config
9
10
  end
10
11
 
12
+ def get_model_name
13
+ # Resolve model name using the centralized configuration
14
+ model_name = N2B::ModelConfig.resolve_model('claude', @config['model'])
15
+ if model_name.nil? || model_name.empty?
16
+ # Fallback to default if no model specified
17
+ model_name = N2B::ModelConfig.resolve_model('claude', N2B::ModelConfig.default_model('claude'))
18
+ end
19
+ model_name
20
+ end
21
+
11
22
  def make_request( content)
12
23
  uri = URI.parse('https://api.anthropic.com/v1/messages')
13
24
  request = Net::HTTP::Post.new(uri)
@@ -16,7 +27,7 @@ module N2M
16
27
  request['anthropic-version'] = '2023-06-01'
17
28
 
18
29
  request.body = JSON.dump({
19
- "model" => MODELS[@config['model']],
30
+ "model" => get_model_name,
20
31
  "max_tokens" => 1024,
21
32
  "messages" => [
22
33
  {
@@ -71,7 +82,7 @@ module N2M
71
82
  request['anthropic-version'] = '2023-06-01'
72
83
 
73
84
  request.body = JSON.dump({
74
- "model" => MODELS[@config['model']],
85
+ "model" => get_model_name,
75
86
  "max_tokens" => @config['max_tokens'] || 1024, # Allow overriding max_tokens from config
76
87
  "messages" => [
77
88
  {
@@ -1,21 +1,29 @@
1
1
  require 'net/http'
2
2
  require 'json'
3
3
  require 'uri'
4
+ require_relative '../model_config'
4
5
 
5
6
  module N2M
6
7
  module Llm
7
8
  class Gemini
8
9
  API_URI = URI.parse('https://generativelanguage.googleapis.com/v1beta/models')
9
- MODELS = {
10
- 'gemini-flash' => 'gemini-2.0-flash'
11
- }
12
10
 
13
11
  def initialize(config)
14
12
  @config = config
15
13
  end
16
14
 
15
+ def get_model_name
16
+ # Resolve model name using the centralized configuration
17
+ model_name = N2B::ModelConfig.resolve_model('gemini', @config['model'])
18
+ if model_name.nil? || model_name.empty?
19
+ # Fallback to default if no model specified
20
+ model_name = N2B::ModelConfig.resolve_model('gemini', N2B::ModelConfig.default_model('gemini'))
21
+ end
22
+ model_name
23
+ end
24
+
17
25
  def make_request(content)
18
- model = MODELS[@config['model']] || 'gemini-flash'
26
+ model = get_model_name
19
27
  uri = URI.parse("#{API_URI}/#{model}:generateContent?key=#{@config['access_key']}")
20
28
 
21
29
  request = Net::HTTP::Post.new(uri)
@@ -65,7 +73,7 @@ module N2M
65
73
  def analyze_code_diff(prompt_content)
66
74
  # This method assumes prompt_content is the full, ready-to-send prompt
67
75
  # including all instructions for the LLM (system message, diff, user additions, JSON format).
68
- model = MODELS[@config['model']] || 'gemini-flash' # Or a specific model for analysis if different
76
+ model = get_model_name
69
77
  uri = URI.parse("#{API_URI}/#{model}:generateContent?key=#{@config['access_key']}")
70
78
 
71
79
  request = Net::HTTP::Post.new(uri)
@@ -0,0 +1,129 @@
1
+ require 'net/http'
2
+ require 'json'
3
+ require 'uri'
4
+ require_relative '../model_config'
5
+
6
+ module N2M
7
+ module Llm
8
+ class Ollama
9
+ # Default API URI for Ollama. This might need to be configurable later.
10
+ DEFAULT_OLLAMA_API_URI = 'http://localhost:11434/api/chat'
11
+
12
+ def initialize(config)
13
+ @config = config
14
+ # Allow overriding the Ollama API URI from config if needed
15
+ @api_uri = URI.parse(@config['ollama_api_url'] || DEFAULT_OLLAMA_API_URI)
16
+ end
17
+
18
+ def get_model_name
19
+ # Resolve model name using the centralized configuration
20
+ model_name = N2B::ModelConfig.resolve_model('ollama', @config['model'])
21
+ if model_name.nil? || model_name.empty?
22
+ # Fallback to default if no model specified
23
+ model_name = N2B::ModelConfig.resolve_model('ollama', N2B::ModelConfig.default_model('ollama'))
24
+ end
25
+ model_name
26
+ end
27
+
28
+ def make_request(prompt_content)
29
+ request = Net::HTTP::Post.new(@api_uri)
30
+ request.content_type = 'application/json'
31
+
32
+ # Ollama expects the model name directly in the request body.
33
+ # It also expects the full message history.
34
+ request.body = JSON.dump({
35
+ "model" => get_model_name,
36
+ "messages" => [
37
+ {
38
+ "role" => "user",
39
+ "content" => prompt_content
40
+ }
41
+ ],
42
+ "stream" => false # Ensure we get the full response, not a stream
43
+ # "format" => "json" # For some Ollama versions/models to enforce JSON output
44
+ })
45
+
46
+ begin
47
+ response = Net::HTTP.start(@api_uri.hostname, @api_uri.port, use_ssl: @api_uri.scheme == 'https') do |http|
48
+ # Set timeouts: open_timeout for connection, read_timeout for waiting for response
49
+ http.open_timeout = 5 # seconds
50
+ http.read_timeout = 120 # seconds
51
+ http.request(request)
52
+ end
53
+ rescue Net::OpenTimeout, Net::ReadTimeout => e
54
+ raise N2B::LlmApiError.new("Ollama API Error: Timeout connecting or reading from Ollama at #{@api_uri}: #{e.message}")
55
+ rescue Errno::ECONNREFUSED => e
56
+ raise N2B::LlmApiError.new("Ollama API Error: Connection refused at #{@api_uri}. Is Ollama running? #{e.message}")
57
+ end
58
+
59
+
60
+ if response.code != '200'
61
+ raise N2B::LlmApiError.new("Ollama API Error: #{response.code} #{response.message} - #{response.body}")
62
+ end
63
+
64
+ # Ollama's chat response structure is slightly different. The message is in `message.content`.
65
+ raw_response_body = JSON.parse(response.body)
66
+ answer_content = raw_response_body['message']['content']
67
+
68
+ begin
69
+ # Attempt to parse the answer_content as JSON
70
+ # This is for n2b's expectation of JSON with 'commands' and 'explanation'
71
+ parsed_answer = JSON.parse(answer_content)
72
+ if parsed_answer.is_a?(Hash) && parsed_answer.key?('commands')
73
+ parsed_answer
74
+ else
75
+ # If the content itself is valid JSON but not the expected structure, wrap it.
76
+ { 'commands' => [answer_content], 'explanation' => 'Response from LLM (JSON content).' }
77
+ end
78
+ rescue JSON::ParserError
79
+ # If answer_content is not JSON, wrap it in the n2b expected structure
80
+ { 'commands' => [answer_content], 'explanation' => answer_content }
81
+ end
82
+ end
83
+
84
+ def analyze_code_diff(prompt_content)
85
+ request = Net::HTTP::Post.new(@api_uri)
86
+ request.content_type = 'application/json'
87
+
88
+ # The prompt_content for diff analysis should instruct the LLM to return JSON.
89
+ # For Ollama, you can also try adding "format": "json" to the request if the model supports it.
90
+ request_body = {
91
+ "model" => @config['model'] || MODELS.keys.first,
92
+ "messages" => [
93
+ {
94
+ "role" => "user",
95
+ "content" => prompt_content # This prompt must ask for JSON output
96
+ }
97
+ ],
98
+ "stream" => false
99
+ }
100
+ # Some Ollama models/versions might respect a "format": "json" parameter
101
+ # request_body["format"] = "json" # Uncomment if you want to try this
102
+
103
+ request.body = JSON.dump(request_body)
104
+
105
+ begin
106
+ response = Net::HTTP.start(@api_uri.hostname, @api_uri.port, use_ssl: @api_uri.scheme == 'https') do |http|
107
+ http.open_timeout = 5
108
+ http.read_timeout = 180 # Potentially longer for analysis
109
+ http.request(request)
110
+ end
111
+ rescue Net::OpenTimeout, Net::ReadTimeout => e
112
+ raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): Timeout for #{@api_uri}: #{e.message}")
113
+ rescue Errno::ECONNREFUSED => e
114
+ raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): Connection refused at #{@api_uri}. Is Ollama running? #{e.message}")
115
+ end
116
+
117
+
118
+ if response.code != '200'
119
+ raise N2B::LlmApiError.new("Ollama API Error (analyze_code_diff): #{response.code} #{response.message} - #{response.body}")
120
+ end
121
+
122
+ # Return the raw JSON string from the LLM's response content.
123
+ # The calling method (call_llm_for_diff_analysis in cli.rb) will parse this.
124
+ raw_response_body = JSON.parse(response.body)
125
+ raw_response_body['message']['content']
126
+ end
127
+ end
128
+ end
129
+ end
@@ -1,24 +1,34 @@
1
1
  require 'net/http'
2
2
  require 'json'
3
3
  require 'uri'
4
+ require_relative '../model_config'
4
5
 
5
6
  module N2M
6
7
  module Llm
7
8
  class OpenAi
8
9
  API_URI = URI.parse('https://api.openai.com/v1/chat/completions')
9
- MODELS = { 'gpt-4o' => 'gpt-4o','gpt-4o-mini'=>'gpt-4o-mini', 'gpt-35' => 'gpt-3.5-turbo-1106' }
10
10
 
11
11
  def initialize(config)
12
12
  @config = config
13
13
  end
14
14
 
15
+ def get_model_name
16
+ # Resolve model name using the centralized configuration
17
+ model_name = N2B::ModelConfig.resolve_model('openai', @config['model'])
18
+ if model_name.nil? || model_name.empty?
19
+ # Fallback to default if no model specified
20
+ model_name = N2B::ModelConfig.resolve_model('openai', N2B::ModelConfig.default_model('openai'))
21
+ end
22
+ model_name
23
+ end
24
+
15
25
  def make_request(content)
16
26
  request = Net::HTTP::Post.new(API_URI)
17
27
  request.content_type = 'application/json'
18
28
  request['Authorization'] = "Bearer #{@config['access_key']}"
19
29
 
20
30
  request.body = JSON.dump({
21
- "model" => MODELS[@config['model']],
31
+ "model" => get_model_name,
22
32
  response_format: { type: 'json_object' },
23
33
  "messages" => [
24
34
  {
@@ -54,7 +64,7 @@ module N2M
54
64
  request['Authorization'] = "Bearer #{@config['access_key']}"
55
65
 
56
66
  request.body = JSON.dump({
57
- "model" => MODELS[@config['model']],
67
+ "model" => get_model_name,
58
68
  "response_format" => { "type" => "json_object" }, # Crucial for OpenAI to return JSON
59
69
  "messages" => [
60
70
  {
@@ -0,0 +1,116 @@
1
+ require 'net/http'
2
+ require 'json'
3
+ require 'uri'
4
+ require_relative '../model_config'
5
+
6
+ module N2M
7
+ module Llm
8
+ class OpenRouter
9
+ API_URI = URI.parse('https://openrouter.ai/api/v1/chat/completions')
10
+
11
+ def initialize(config)
12
+ @config = config
13
+ @api_key = @config['access_key']
14
+ @site_url = @config['openrouter_site_url'] || '' # Optional: Read from config
15
+ @site_name = @config['openrouter_site_name'] || '' # Optional: Read from config
16
+ end
17
+
18
+ def get_model_name
19
+ # Resolve model name using the centralized configuration
20
+ model_name = N2B::ModelConfig.resolve_model('openrouter', @config['model'])
21
+ if model_name.nil? || model_name.empty?
22
+ # Fallback to default if no model specified
23
+ model_name = N2B::ModelConfig.resolve_model('openrouter', N2B::ModelConfig.default_model('openrouter'))
24
+ end
25
+ model_name
26
+ end
27
+
28
+ def make_request(prompt_content)
29
+ request = Net::HTTP::Post.new(API_URI)
30
+ request.content_type = 'application/json'
31
+ request['Authorization'] = "Bearer #{@api_key}"
32
+
33
+ # Add OpenRouter specific headers
34
+ request['HTTP-Referer'] = @site_url unless @site_url.empty?
35
+ request['X-Title'] = @site_name unless @site_name.empty?
36
+
37
+ request.body = JSON.dump({
38
+ "model" => get_model_name,
39
+ "messages" => [
40
+ {
41
+ "role" => "user",
42
+ "content" => prompt_content
43
+ }
44
+ ]
45
+ # TODO: Consider adding max_tokens, temperature, etc. from @config if needed
46
+ })
47
+
48
+ response = Net::HTTP.start(API_URI.hostname, API_URI.port, use_ssl: true) do |http|
49
+ http.request(request)
50
+ end
51
+
52
+ if response.code != '200'
53
+ raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.message} - #{response.body}")
54
+ end
55
+
56
+ # Assuming OpenRouter returns a similar structure to OpenAI for chat completions
57
+ answer_content = JSON.parse(response.body)['choices'].first['message']['content']
58
+
59
+ begin
60
+ # Attempt to parse the answer as JSON, as expected by the calling CLI's process_natural_language_command
61
+ parsed_answer = JSON.parse(answer_content)
62
+ # Ensure it has the 'commands' and 'explanation' structure if it's for n2b's command generation
63
+ # This might need adjustment based on how `make_request` is used.
64
+ # If it's just for generic requests, this parsing might be too specific.
65
+ # For now, mirroring the OpenAI class's attempt to parse JSON from the content.
66
+ if parsed_answer.is_a?(Hash) && parsed_answer.key?('commands')
67
+ parsed_answer
68
+ else
69
+ # If the content itself isn't the JSON structure n2b expects,
70
+ # but is valid JSON, return it. Otherwise, wrap it.
71
+ # This part needs to be robust based on actual OpenRouter responses.
72
+ { 'commands' => [answer_content], 'explanation' => 'Response from LLM.' } # Fallback
73
+ end
74
+ rescue JSON::ParserError
75
+ # If the content isn't JSON, wrap it in the expected structure for n2b
76
+ { 'commands' => [answer_content], 'explanation' => answer_content }
77
+ end
78
+ end
79
+
80
+ def analyze_code_diff(prompt_content)
81
+ request = Net::HTTP::Post.new(API_URI) # Chat completions endpoint
82
+ request.content_type = 'application/json'
83
+ request['Authorization'] = "Bearer #{@api_key}"
84
+
85
+ # Add OpenRouter specific headers
86
+ request['HTTP-Referer'] = @site_url unless @site_url.empty?
87
+ request['X-Title'] = @site_name unless @site_name.empty?
88
+
89
+ # The prompt_content for diff analysis should already instruct the LLM to return JSON.
90
+ request.body = JSON.dump({
91
+ "model" => get_model_name,
92
+ # "response_format" => { "type" => "json_object" }, # Some models on OpenRouter might support this
93
+ "messages" => [
94
+ {
95
+ "role" => "user",
96
+ "content" => prompt_content # This prompt should ask for JSON output
97
+ }
98
+ ],
99
+ "max_tokens" => @config['max_tokens'] || 2048 # Ensure enough tokens for JSON
100
+ })
101
+
102
+ response = Net::HTTP.start(API_URI.hostname, API_URI.port, use_ssl: true) do |http|
103
+ http.request(request)
104
+ end
105
+
106
+ if response.code != '200'
107
+ raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.message} - #{response.body}")
108
+ end
109
+
110
+ # Return the raw JSON string from the LLM's response content.
111
+ # The calling method (call_llm_for_diff_analysis in cli.rb) will parse this.
112
+ JSON.parse(response.body)['choices'].first['message']['content']
113
+ end
114
+ end
115
+ end
116
+ end
@@ -0,0 +1,139 @@
1
+ require 'yaml'
2
+
3
+ module N2B
4
+ class ModelConfig
5
+ CONFIG_PATH = File.expand_path('config/models.yml', __dir__)
6
+
7
+ def self.load_models
8
+ @models ||= YAML.load_file(CONFIG_PATH)
9
+ rescue => e
10
+ puts "Warning: Could not load models configuration: #{e.message}"
11
+ puts "Using fallback model configuration."
12
+ fallback_models
13
+ end
14
+
15
+ def self.fallback_models
16
+ {
17
+ 'claude' => { 'suggested' => { 'sonnet' => 'claude-3-sonnet-20240229' }, 'default' => 'sonnet' },
18
+ 'openai' => { 'suggested' => { 'gpt-4o-mini' => 'gpt-4o-mini' }, 'default' => 'gpt-4o-mini' },
19
+ 'gemini' => { 'suggested' => { 'gemini-flash' => 'gemini-2.0-flash' }, 'default' => 'gemini-flash' },
20
+ 'openrouter' => { 'suggested' => { 'gpt-4o' => 'openai/gpt-4o' }, 'default' => 'gpt-4o' },
21
+ 'ollama' => { 'suggested' => { 'llama3' => 'llama3' }, 'default' => 'llama3' }
22
+ }
23
+ end
24
+
25
+ def self.suggested_models(provider)
26
+ load_models.dig(provider, 'suggested') || {}
27
+ end
28
+
29
+ def self.default_model(provider)
30
+ load_models.dig(provider, 'default')
31
+ end
32
+
33
+ def self.resolve_model(provider, user_input)
34
+ return nil if user_input.nil? || user_input.empty?
35
+
36
+ suggested = suggested_models(provider)
37
+
38
+ # If user input matches a suggested model key, return the API name
39
+ if suggested.key?(user_input)
40
+ suggested[user_input]
41
+ else
42
+ # Otherwise, treat as custom model (return as-is)
43
+ user_input
44
+ end
45
+ end
46
+
47
+ def self.display_model_options(provider)
48
+ suggested = suggested_models(provider)
49
+ default = default_model(provider)
50
+
51
+ options = []
52
+ suggested.each_with_index do |(key, api_name), index|
53
+ default_marker = key == default ? " [default]" : ""
54
+ options << "#{index + 1}. #{key} (#{api_name})#{default_marker}"
55
+ end
56
+ options << "#{suggested.size + 1}. custom (enter your own model name)"
57
+
58
+ options
59
+ end
60
+
61
+ def self.get_model_choice(provider, current_model = nil)
62
+ options = display_model_options(provider)
63
+ suggested = suggested_models(provider)
64
+ default = default_model(provider)
65
+
66
+ puts "\nChoose a model for #{provider}:"
67
+ options.each { |option| puts " #{option}" }
68
+
69
+ current_display = current_model || default
70
+ print "\nEnter choice (1-#{options.size}) or model name [#{current_display}]: "
71
+
72
+ input = $stdin.gets.chomp
73
+
74
+ # If empty input, use current or default
75
+ if input.empty?
76
+ return current_model || resolve_model(provider, default)
77
+ end
78
+
79
+ # If numeric input, handle menu selection
80
+ if input.match?(/^\d+$/)
81
+ choice_num = input.to_i
82
+ if choice_num >= 1 && choice_num <= suggested.size
83
+ # Selected a suggested model
84
+ selected_key = suggested.keys[choice_num - 1]
85
+ return resolve_model(provider, selected_key)
86
+ elsif choice_num == suggested.size + 1
87
+ # Selected custom option
88
+ print "Enter custom model name: "
89
+ custom_model = $stdin.gets.chomp
90
+ if custom_model.empty?
91
+ puts "Custom model name cannot be empty. Using default."
92
+ return resolve_model(provider, default)
93
+ end
94
+ puts "✓ Using custom model: #{custom_model}"
95
+ return custom_model
96
+ else
97
+ puts "Invalid choice. Using default."
98
+ return resolve_model(provider, default)
99
+ end
100
+ else
101
+ # Direct model name input - validate it first
102
+ if is_valid_model_name?(input)
103
+ resolved = resolve_model(provider, input)
104
+ if suggested.key?(input)
105
+ puts "✓ Using suggested model: #{input} (#{resolved})"
106
+ else
107
+ puts "✓ Using custom model: #{resolved}"
108
+ end
109
+ return resolved
110
+ else
111
+ puts "Invalid model name '#{input}'. Model names should be meaningful (at least 3 characters, no single letters)."
112
+ puts "Using default model instead."
113
+ return resolve_model(provider, default)
114
+ end
115
+ end
116
+ end
117
+
118
+ private
119
+
120
+ def self.is_valid_model_name?(input)
121
+ return false if input.nil? || input.empty?
122
+
123
+ # Reject single characters (like 'y', 'n', etc.)
124
+ return false if input.length == 1
125
+
126
+ # Reject very short inputs that are likely not model names
127
+ return false if input.length < 3
128
+
129
+ # Reject common boolean/confirmation inputs
130
+ return false if %w[y n yes no true false].include?(input.downcase)
131
+
132
+ # Reject inputs that are just numbers
133
+ return false if input.match?(/^\d+$/)
134
+
135
+ # Accept anything else as potentially valid
136
+ true
137
+ end
138
+ end
139
+ end
data/lib/n2b/version.rb CHANGED
@@ -1,4 +1,4 @@
1
1
  # lib/n2b/version.rb
2
2
  module N2B
3
- VERSION = "0.3.1"
3
+ VERSION = "0.5.0"
4
4
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: n2b
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.1
4
+ version: 0.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Stefan Nothegger
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-06-04 00:00:00.000000000 Z
11
+ date: 2025-06-05 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: json
@@ -64,22 +64,28 @@ extra_rdoc_files: []
64
64
  files:
65
65
  - README.md
66
66
  - bin/n2b
67
+ - bin/n2b-test-jira
67
68
  - lib/n2b.rb
68
69
  - lib/n2b/base.rb
69
70
  - lib/n2b/cli.rb
71
+ - lib/n2b/config/models.yml
70
72
  - lib/n2b/errors.rb
71
73
  - lib/n2b/irb.rb
74
+ - lib/n2b/jira_client.rb
72
75
  - lib/n2b/llm/claude.rb
73
76
  - lib/n2b/llm/gemini.rb
77
+ - lib/n2b/llm/ollama.rb
74
78
  - lib/n2b/llm/open_ai.rb
79
+ - lib/n2b/llm/open_router.rb
80
+ - lib/n2b/model_config.rb
75
81
  - lib/n2b/version.rb
76
82
  homepage: https://github.com/stefan-kp/n2b
77
83
  licenses:
78
84
  - MIT
79
85
  metadata:
80
- homepage_uri: https://github.com/stefan-kp/n2b
81
86
  source_code_uri: https://github.com/stefan-kp/n2b
82
87
  changelog_uri: https://github.com/stefan-kp/n2b/blob/main/CHANGELOG.md
88
+ documentation_uri: https://github.com/stefan-kp/n2b/blob/main/README.md
83
89
  post_install_message:
84
90
  rdoc_options: []
85
91
  require_paths:
@@ -95,7 +101,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
95
101
  - !ruby/object:Gem::Version
96
102
  version: '0'
97
103
  requirements: []
98
- rubygems_version: 3.5.22
104
+ rubygems_version: 3.5.3
99
105
  signing_key:
100
106
  specification_version: 4
101
107
  summary: Convert natural language to bash commands or ruby code and help with debugging.