active_genie 0.30.1 → 0.30.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5673a34f1cd8c2b2a9a884a73ded1bf12e28f4d29ab5f351ecb3374bc9ca0617
4
- data.tar.gz: 7ca0bc8d8bdc8b6d3618bd5f22bd13f80a9ac4399fa0282e6b11a461beb1cc1d
3
+ metadata.gz: 0305b370de6af340e3ae6d3afa10b67f7be5f68ba20916c2fb3b8c934c40fc3e
4
+ data.tar.gz: ab3ac5ed4b7bd14c53ceeff02240953b7427260e94630803dffd8669d5a5e051
5
5
  SHA512:
6
- metadata.gz: f174076b72df558ef82d3834043ae3299ab1aa41d235d6b547cbee45d21804295b87729a92dda51910d37a076f954fb8b4e2460443c1814f73221b5663255d96
7
- data.tar.gz: 503840469d6a67206d67d3f5b27f73803cb736cd4c826e7ea4b4575f1f702b7875fdeb979a875c29e88d5b06d9d18b49c94ae4f957f13563289428bdc1d83cd0
6
+ metadata.gz: 84c7f399b16c21599e6302aeb8c4243cd833dc7ab554bbc610d400d982e7e9cb48cdc5b626cd74897dfa235a97f4a48115110275e8761bfe2be2a8964b040890
7
+ data.tar.gz: 705f57b32a5f59b2b8d88650e3b5eb7be657e577b571e74709a53c5b60aee5bf38740621d901fa72342a7d89e113982173d6ab1120ecc76c105b0589bd8910db
data/VERSION CHANGED
@@ -1 +1 @@
1
- 0.30.1
1
+ 0.30.3
@@ -31,7 +31,7 @@ module ActiveGenie
31
31
  @player_a = player_a
32
32
  @player_b = player_b
33
33
  @criteria = criteria
34
- @config = ActiveGenie.configuration.merge(config)
34
+ @initial_config = config
35
35
  end
36
36
 
37
37
  # @return [ComparatorResponse] The evaluation result containing the winner and reasoning
@@ -43,11 +43,7 @@ module ActiveGenie
43
43
  { role: 'user', content: "criteria: #{@criteria}" }
44
44
  ]
45
45
 
46
- response = ::ActiveGenie::Providers::UnifiedProvider.function_calling(
47
- messages,
48
- FUNCTION,
49
- config: @config
50
- )
46
+ response = ::ActiveGenie::Providers::UnifiedProvider.function_calling(messages, FUNCTION, config:)
51
47
 
52
48
  response_formatted(response)
53
49
  end
@@ -72,7 +68,7 @@ module ActiveGenie
72
68
  end
73
69
 
74
70
  def log_comparator(comparator_response)
75
- @config.logger.call(
71
+ config.logger.call(
76
72
  code: :comparator,
77
73
  player_a: @player_a[0..30],
78
74
  player_b: @player_b[0..30],
@@ -80,6 +76,15 @@ module ActiveGenie
80
76
  **comparator_response.to_h
81
77
  )
82
78
  end
79
+
80
+ def config
81
+ @config ||= begin
82
+ c = ActiveGenie.configuration.merge(@initial_config)
83
+ c.llm.recommended_model = 'deepseek-chat' unless c.llm.recommended_model
84
+
85
+ c
86
+ end
87
+ end
83
88
  end
84
89
  end
85
90
  end
@@ -3,22 +3,12 @@
3
3
  module ActiveGenie
4
4
  module Config
5
5
  class LlmConfig
6
- attr_accessor :model, :temperature, :max_tokens, :max_retries, :retry_delay,
6
+ attr_accessor :model, :recommended_model, :temperature, :max_tokens, :max_retries, :retry_delay,
7
7
  :model_tier, :read_timeout, :open_timeout, :provider, :max_fibers
8
8
  attr_reader :provider_name
9
9
 
10
10
  def initialize
11
- @model = nil
12
- @provider_name = nil
13
- @provider = nil
14
- @temperature = 0
15
- @max_tokens = 4096
16
- @max_retries = nil
17
- @retry_delay = nil
18
- @model_tier = 'lower_tier'
19
- @read_timeout = nil
20
- @open_timeout = nil
21
- @max_fibers = 10
11
+ set_defaults
22
12
  end
23
13
 
24
14
  def provider_name=(provider_name)
@@ -34,6 +24,17 @@ module ActiveGenie
34
24
  end
35
25
  end
36
26
  end
27
+
28
+ private
29
+
30
+ def set_defaults
31
+ @model = @recommended_model = @provider_name = @provider = nil
32
+ @max_retries = @retry_delay = @read_timeout = @open_timeout = nil
33
+ @temperature = 0
34
+ @max_tokens = 4096
35
+ @model_tier = 'lower_tier'
36
+ @max_fibers = 10
37
+ end
37
38
  end
38
39
  end
39
40
  end
@@ -30,27 +30,6 @@ module ActiveGenie
30
30
  def anthropic_version
31
31
  @anthropic_version || '2023-06-01'
32
32
  end
33
-
34
- # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
35
- # Defaults to 'claude-3-haiku'.
36
- # @return [String] The lower tier model name.
37
- def lower_tier_model
38
- @lower_tier_model || 'claude-3-5-haiku-20241022'
39
- end
40
-
41
- # Retrieves the model name designated for the middle tier (e.g., balanced performance).
42
- # Defaults to 'claude-3-sonnet'.
43
- # @return [String] The middle tier model name.
44
- def middle_tier_model
45
- @middle_tier_model || 'claude-3-7-sonnet-20250219'
46
- end
47
-
48
- # Retrieves the model name designated for the upper tier (e.g., most capable).
49
- # Defaults to 'claude-3-opus'.
50
- # @return [String] The upper tier model name.
51
- def higher_tier_model
52
- @higher_tier_model || 'claude-3-opus-20240229'
53
- end
54
33
  end
55
34
  end
56
35
  end
@@ -23,27 +23,6 @@ module ActiveGenie
23
23
  def api_url
24
24
  @api_url || 'https://api.deepseek.com/v1'
25
25
  end
26
-
27
- # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
28
- # Defaults to 'deepseek-chat'.
29
- # @return [String] The lower tier model name.
30
- def lower_tier_model
31
- @lower_tier_model || 'deepseek-chat'
32
- end
33
-
34
- # Retrieves the model name designated for the middle tier (e.g., balanced performance).
35
- # Defaults to 'deepseek-chat'.
36
- # @return [String] The middle tier model name.
37
- def middle_tier_model
38
- @middle_tier_model || 'deepseek-chat'
39
- end
40
-
41
- # Retrieves the model name designated for the upper tier (e.g., most capable).
42
- # Defaults to 'deepseek-reasoner'.
43
- # @return [String] The upper tier model name.
44
- def higher_tier_model
45
- @higher_tier_model || 'deepseek-reasoner'
46
- end
47
26
  end
48
27
  end
49
28
  end
@@ -25,27 +25,6 @@ module ActiveGenie
25
25
  # The base URL here should be just the domain part.
26
26
  @api_url || 'https://generativelanguage.googleapis.com'
27
27
  end
28
-
29
- # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
30
- # Defaults to 'gemini-2.0-flash-lite'.
31
- # @return [String] The lower tier model name.
32
- def lower_tier_model
33
- @lower_tier_model || 'gemini-2.0-flash-lite'
34
- end
35
-
36
- # Retrieves the model name designated for the middle tier (e.g., balanced performance).
37
- # Defaults to 'gemini-2.0-flash'.
38
- # @return [String] The middle tier model name.
39
- def middle_tier_model
40
- @middle_tier_model || 'gemini-2.0-flash'
41
- end
42
-
43
- # Retrieves the model name designated for the upper tier (e.g., most capable).
44
- # Defaults to 'gemini-2.5-pro-experimental'.
45
- # @return [String] The upper tier model name.
46
- def higher_tier_model
47
- @higher_tier_model || 'gemini-2.5-pro-experimental'
48
- end
49
28
  end
50
29
  end
51
30
  end
@@ -23,27 +23,6 @@ module ActiveGenie
23
23
  def api_url
24
24
  @api_url || 'https://api.openai.com/v1'
25
25
  end
26
-
27
- # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
28
- # Defaults to 'gpt-4o-mini'.
29
- # @return [String] The lower tier model name.
30
- def lower_tier_model
31
- @lower_tier_model || 'gpt-4.1-mini'
32
- end
33
-
34
- # Retrieves the model name designated for the middle tier (e.g., balanced performance).
35
- # Defaults to 'gpt-4o'.
36
- # @return [String] The middle tier model name.
37
- def middle_tier_model
38
- @middle_tier_model || 'gpt-4.1'
39
- end
40
-
41
- # Retrieves the model name designated for the upper tier (e.g., most capable).
42
- # Defaults to 'o1-preview'.
43
- # @return [String] The upper tier model name.
44
- def higher_tier_model
45
- @higher_tier_model || 'o3-mini'
46
- end
47
26
  end
48
27
  end
49
28
  end
@@ -6,20 +6,7 @@ module ActiveGenie
6
6
  class ProviderBase
7
7
  NAME = :unknown
8
8
 
9
- attr_writer :api_key, :organization, :api_url, :client,
10
- :lower_tier_model, :middle_tier_model, :higher_tier_model
11
-
12
- # Maps a symbolic tier (:lower_tier, :middle_tier, :upper_tier) to a specific model name.
13
- # Falls back to the lower_tier_model if the tier is nil or unrecognized.
14
- # @param tier [Symbol, String, nil] The symbolic tier name.
15
- # @return [String] The corresponding model name.
16
- def tier_to_model(tier)
17
- {
18
- lower_tier: lower_tier_model,
19
- middle_tier: middle_tier_model,
20
- upper_tier: higher_tier_model
21
- }[tier&.to_sym] || lower_tier_model
22
- end
9
+ attr_writer :api_key, :organization, :api_url, :client
23
10
 
24
11
  # Returns a hash representation of the configuration.
25
12
  # @param config [Hash] Additional key-value pairs to merge into the hash.
@@ -29,9 +16,6 @@ module ActiveGenie
29
16
  name: NAME,
30
17
  api_key:,
31
18
  api_url:,
32
- lower_tier_model:,
33
- middle_tier_model:,
34
- higher_tier_model:,
35
19
  **config
36
20
  }
37
21
  end
@@ -62,27 +46,6 @@ module ActiveGenie
62
46
  def client
63
47
  raise NotImplementedError, 'Subclasses must implement this method'
64
48
  end
65
-
66
- # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
67
- # Defaults to 'gpt-4o-mini'.
68
- # @return [String] The lower tier model name.
69
- def lower_tier_model
70
- raise NotImplementedError, 'Subclasses must implement this method'
71
- end
72
-
73
- # Retrieves the model name designated for the middle tier (e.g., balanced performance).
74
- # Defaults to 'gpt-4o'.
75
- # @return [String] The middle tier model name.
76
- def middle_tier_model
77
- raise NotImplementedError, 'Subclasses must implement this method'
78
- end
79
-
80
- # Retrieves the model name designated for the upper tier (e.g., most capable).
81
- # Defaults to 'o1-preview'.
82
- # @return [String] The upper tier model name.
83
- def higher_tier_model
84
- raise NotImplementedError, 'Subclasses must implement this method'
85
- end
86
49
  end
87
50
  end
88
51
  end
@@ -24,20 +24,20 @@ module ActiveGenie
24
24
  @all.slice(*valid_provider_keys)
25
25
  end
26
26
 
27
- def add(provider_classes)
27
+ def add(provider_configs)
28
28
  @all ||= {}
29
- Array(provider_classes).each do |provider|
30
- name = provider::NAME
29
+ Array(provider_configs).each do |provider_config|
30
+ name = provider_config::NAME
31
31
  remove([name]) if @all.key?(name)
32
32
 
33
- @all[name] = provider.new
33
+ @all[name] = provider_config.new
34
34
  end
35
35
 
36
36
  self
37
37
  end
38
38
 
39
- def remove(provider_classes)
40
- Array(provider_classes).each do |provider|
39
+ def remove(provider_configs)
40
+ Array(provider_configs).each do |provider|
41
41
  @all.delete(provider::NAME)
42
42
  end
43
43
 
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ActiveGenie
4
+ class InvalidModelError < StandardError
5
+ TEXT = <<~TEXT
6
+ Invalid model: %<model>s
7
+
8
+ To configure ActiveGenie, you can either:
9
+ 1. Set up global configuration:
10
+ ```ruby
11
+ ActiveGenie.configure do |config|
12
+ config.providers.openai.api_key = 'your_api_key'
13
+ config.llm.model = 'gpt-5'
14
+ # ... other configuration options
15
+ end
16
+ ```
17
+
18
+ 2. Or pass configuration directly to the method call:
19
+ ```ruby
20
+ ActiveGenie::Extraction.call(
21
+ arg1,
22
+ arg2,
23
+ config: {
24
+ providers: {
25
+ openai: {
26
+ api_key: 'your_api_key'
27
+ }
28
+ },
29
+ llm: {
30
+ model: 'gpt-5'
31
+ }
32
+ }
33
+ )
34
+ ```
35
+
36
+ TEXT
37
+
38
+ def initialize(model)
39
+ super(format(TEXT, model:))
40
+ end
41
+ end
42
+ end
@@ -39,7 +39,7 @@ module ActiveGenie
39
39
  end
40
40
 
41
41
  def available_providers
42
- ActiveGenie.configuration.providers.all.keys.join(', ')
42
+ ActiveGenie.configuration.providers.valid.keys.join(', ')
43
43
  end
44
44
  end
45
45
  end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ActiveGenie
4
+ class ProviderServerError < StandardError
5
+ TEXT = <<~TEXT.freeze
6
+ Provider server error: #{code}
7
+ #{body}
8
+
9
+ Providers errors are common and can occur for various reasons, such as:
10
+ - Invalid API key
11
+ - Exceeded usage limits
12
+ - Temporary server issues
13
+
14
+ Be ready to handle these errors gracefully in your application. We recommend implementing retry logic and exponential backoff strategies.
15
+ Usually async workers layer is the ideal place to handle such errors.
16
+ TEXT
17
+
18
+ def initialize(response)
19
+ @response = response
20
+ super(format(TEXT, **response))
21
+ end
22
+ end
23
+ end
@@ -31,7 +31,7 @@ module ActiveGenie
31
31
  def initialize(text, data_to_extract, config: {})
32
32
  @text = text
33
33
  @data_to_extract = data_to_extract
34
- @config = ActiveGenie.configuration.merge(config)
34
+ @initial_config = config
35
35
  end
36
36
 
37
37
  def call
@@ -54,7 +54,7 @@ module ActiveGenie
54
54
  private
55
55
 
56
56
  def data_to_extract_with_explanation
57
- return @data_to_extract unless @config.extractor.with_explanation
57
+ return @data_to_extract unless config.extractor.with_explanation
58
58
 
59
59
  with_explanation = {}
60
60
 
@@ -84,10 +84,10 @@ module ActiveGenie
84
84
  response = ::ActiveGenie::Providers::UnifiedProvider.function_calling(
85
85
  messages,
86
86
  function,
87
- config: @config
87
+ config: config
88
88
  )
89
89
 
90
- @config.logger.call(
90
+ config.logger.call(
91
91
  {
92
92
  code: :extractor,
93
93
  text: @text[0..30],
@@ -100,7 +100,7 @@ module ActiveGenie
100
100
  end
101
101
 
102
102
  def simplify_response(response)
103
- return response if @config.extractor.verbose
103
+ return response if config.extractor.verbose
104
104
 
105
105
  simplified_response = {}
106
106
 
@@ -115,12 +115,21 @@ module ActiveGenie
115
115
  end
116
116
 
117
117
  def min_accuracy
118
- @config.extractor.min_accuracy # default 70
118
+ config.extractor.min_accuracy # default 70
119
119
  end
120
120
 
121
121
  def prompt
122
122
  File.read(File.join(__dir__, 'explanation.prompt.md'))
123
123
  end
124
+
125
+ def config
126
+ @config ||= begin
127
+ c = ActiveGenie.configuration.merge(@initial_config)
128
+ c.llm.recommended_model = 'deepseek-chat' unless c.llm.recommended_model
129
+
130
+ c
131
+ end
132
+ end
124
133
  end
125
134
  end
126
135
  end
@@ -31,15 +31,13 @@ module ActiveGenie
31
31
  def initialize(text, data_to_extract, config: {})
32
32
  @text = text
33
33
  @data_to_extract = data_to_extract
34
- @config = ActiveGenie.configuration.merge(config)
34
+ @initial_config = config
35
35
  end
36
36
 
37
37
  def call
38
- response = Explanation.call(@text, extract_with_litote, config: @config)
38
+ response = Explanation.call(@text, extract_with_litote, config:)
39
39
 
40
- if response[:message_litote]
41
- response = Explanation.call(response[:litote_rephrased], @data_to_extract, config: @config)
42
- end
40
+ response = Explanation.call(response[:litote_rephrased], @data_to_extract, config:) if response[:message_litote]
43
41
 
44
42
  response
45
43
  end
@@ -51,6 +49,15 @@ module ActiveGenie
51
49
 
52
50
  @data_to_extract.merge(parameters)
53
51
  end
52
+
53
+ def config
54
+ @config ||= begin
55
+ c = ActiveGenie.configuration.merge(@initial_config)
56
+ c.llm.recommended_model = 'deepseek-chat' unless c.llm.recommended_model
57
+
58
+ c
59
+ end
60
+ end
54
61
  end
55
62
  end
56
63
  end
@@ -19,7 +19,7 @@ module ActiveGenie
19
19
  # @return [Array of strings] List of items
20
20
  def initialize(theme, config: {})
21
21
  @theme = theme
22
- @config = ActiveGenie.configuration.merge(config)
22
+ @initial_config = config
23
23
  end
24
24
 
25
25
  # @return [Array of strings] The list of items
@@ -33,7 +33,7 @@ module ActiveGenie
33
33
  response = ::ActiveGenie::Providers::UnifiedProvider.function_calling(
34
34
  messages,
35
35
  FUNCTION,
36
- config: @config
36
+ config:
37
37
  )
38
38
 
39
39
  log_feud(response)
@@ -46,16 +46,25 @@ module ActiveGenie
46
46
  private
47
47
 
48
48
  def number_of_items
49
- @config.lister.number_of_items
49
+ config.lister.number_of_items
50
50
  end
51
51
 
52
52
  def log_feud(response)
53
- @config.logger.call(
53
+ config.logger.call(
54
54
  code: :feud,
55
55
  theme: @theme[0..30],
56
56
  items: response['items'].map { |item| item[0..30] }
57
57
  )
58
58
  end
59
+
60
+ def config
61
+ @config ||= begin
62
+ c = ActiveGenie.configuration.merge(@initial_config)
63
+ c.llm.recommended_model = 'deepseek-chat' unless c.llm.recommended_model
64
+
65
+ c
66
+ end
67
+ end
59
68
  end
60
69
  end
61
70
  end
@@ -14,8 +14,7 @@ module ActiveGenie
14
14
  # @example Getting jury for technical content
15
15
  # Juries.call("Technical documentation about API design",
16
16
  # "Evaluate technical accuracy and clarity")
17
- # # => { jury1: "API Architect", jury2: "Technical Writer",
18
- # # jury3: "Developer Advocate", reasoning: "..." }
17
+ # # => [ "API Architect", "Technical Writer", "Developer Advocate" ]
19
18
  #
20
19
  class Juries
21
20
  def self.call(...)
@@ -30,7 +29,7 @@ module ActiveGenie
30
29
  def initialize(text, criteria, config: {})
31
30
  @text = text
32
31
  @criteria = criteria
33
- @config = ActiveGenie.configuration.merge(config)
32
+ @initial_config = config
34
33
  end
35
34
 
36
35
  def call
@@ -62,7 +61,7 @@ module ActiveGenie
62
61
  result = client.function_calling(
63
62
  messages,
64
63
  function,
65
- config: @config
64
+ config:
66
65
  )
67
66
 
68
67
  result['juries'] || []
@@ -77,6 +76,15 @@ module ActiveGenie
77
76
  def prompt
78
77
  @prompt ||= File.read(File.join(__dir__, 'juries.prompt.md'))
79
78
  end
79
+
80
+ def config
81
+ @config ||= begin
82
+ c = ActiveGenie.configuration.merge(@initial_config)
83
+ c.llm.recommended_model = 'deepseek-chat' unless c.llm.recommended_model
84
+
85
+ c
86
+ end
87
+ end
80
88
  end
81
89
  end
82
90
  end
@@ -84,7 +84,7 @@ module ActiveGenie
84
84
  end
85
85
 
86
86
  def model
87
- @config.llm.model || @config.providers.anthropic.tier_to_model(@config.llm.model_tier)
87
+ @config.llm.model
88
88
  end
89
89
 
90
90
  def headers
@@ -6,7 +6,6 @@ module ActiveGenie
6
6
  module Providers
7
7
  class BaseProvider
8
8
  class ProviderUnknownError < StandardError; end
9
- class ProviderServerError < StandardError; end
10
9
 
11
10
  DEFAULT_HEADERS = {
12
11
  'Content-Type': 'application/json',
@@ -174,7 +173,7 @@ module ActiveGenie
174
173
  begin
175
174
  response = yield
176
175
 
177
- raise ProviderServerError, "Provider server error: #{response.code} - #{response.body}" if !response.is_a?(Net::HTTPSuccess) && response.code.to_i >= 500
176
+ raise ActiveGenie::ProviderServerError, response if response&.code.to_i >= 500
178
177
 
179
178
  response
180
179
  rescue Net::OpenTimeout, Net::ReadTimeout, Errno::ECONNREFUSED, ProviderServerError => e
@@ -186,8 +185,8 @@ module ActiveGenie
186
185
  @config.logger.call(
187
186
  code: :retry_attempt,
188
187
  attempt: retries,
189
- max_retries: max_retries,
190
- delay: sleep_time,
188
+ max_retries:,
189
+ next_retry_in_seconds: sleep_time,
191
190
  error: e.message
192
191
  )
193
192
 
@@ -74,7 +74,7 @@ module ActiveGenie
74
74
  end
75
75
 
76
76
  def model
77
- @config.llm.model || provider_config.tier_to_model(@config.llm.model_tier)
77
+ @config.llm.model
78
78
  end
79
79
 
80
80
  def url
@@ -108,7 +108,7 @@ module ActiveGenie
108
108
  end
109
109
 
110
110
  def model
111
- @config.llm.model || provider_config.tier_to_model(@config.llm.model_tier)
111
+ @config.llm.model
112
112
  end
113
113
 
114
114
  def url
@@ -37,7 +37,7 @@ module ActiveGenie
37
37
  private
38
38
 
39
39
  def request(payload)
40
- response = post(url, payload, headers: headers)
40
+ response = post(url, payload, headers:)
41
41
 
42
42
  return nil if response.nil?
43
43
 
@@ -71,7 +71,7 @@ module ActiveGenie
71
71
  end
72
72
 
73
73
  def model
74
- @config.llm.model || provider_config.tier_to_model(@config.llm.model_tier)
74
+ @config.llm.model
75
75
  end
76
76
 
77
77
  def url
@@ -5,6 +5,7 @@ require_relative 'anthropic_provider'
5
5
  require_relative 'google_provider'
6
6
  require_relative 'deepseek_provider'
7
7
  require_relative '../errors/invalid_provider_error'
8
+ require_relative '../errors/invalid_model_error'
8
9
 
9
10
  module ActiveGenie
10
11
  module Providers
@@ -18,10 +19,8 @@ module ActiveGenie
18
19
  }.freeze
19
20
 
20
21
  def function_calling(messages, function, config: {})
21
- provider_name = config.llm.provider_name || config.providers.default
22
- provider = PROVIDER_NAME_TO_PROVIDER[provider_name.to_sym]
23
-
24
- raise ActiveGenie::InvalidProviderError, provider_name if provider.nil?
22
+ provider = provider(config)
23
+ define_llm_model(config)
25
24
 
26
25
  response = provider.new(config).function_calling(messages, function)
27
26
 
@@ -30,6 +29,32 @@ module ActiveGenie
30
29
 
31
30
  private
32
31
 
32
+ def provider(config)
33
+ provider_name = config.llm.provider_name || config.providers.default
34
+
35
+ unless config.providers.valid.keys.include?(provider_name.to_sym)
36
+ raise ActiveGenie::InvalidProviderError,
37
+ provider_name
38
+ end
39
+
40
+ provider = PROVIDER_NAME_TO_PROVIDER[provider_name.to_sym]
41
+
42
+ raise ActiveGenie::InvalidProviderError, provider_name if provider.nil?
43
+
44
+ provider
45
+ end
46
+
47
+ def define_llm_model(config)
48
+ if config.llm.model.nil?
49
+ raise ActiveGenie::InvalidModelError, 'nil' unless config.llm.recommended_model
50
+
51
+ config.llm.model = config.llm.recommended_model
52
+
53
+ end
54
+
55
+ config.llm.model
56
+ end
57
+
33
58
  def normalize_response(response)
34
59
  response.each do |key, value|
35
60
  response[key] = nil if ['null', 'none', 'undefined', '', 'unknown',
@@ -34,7 +34,7 @@ module ActiveGenie
34
34
  @text = text
35
35
  @criteria = criteria
36
36
  @param_juries = Array(juries).compact.uniq
37
- @config = ActiveGenie.configuration.merge(config)
37
+ @initial_config = config
38
38
  end
39
39
 
40
40
  def call
@@ -47,19 +47,19 @@ module ActiveGenie
47
47
  result = ::ActiveGenie::Providers::UnifiedProvider.function_calling(
48
48
  messages,
49
49
  build_function,
50
- config: @config
50
+ config:
51
51
  )
52
52
 
53
53
  result['final_score'] = 0 if result['final_score'].nil?
54
54
 
55
- @config.logger.call({
56
- code: :Scorer,
57
- text: @text[0..30],
58
- criteria: @criteria[0..30],
59
- juries: juries,
60
- score: result['final_score'],
61
- reasoning: result['final_reasoning']
62
- })
55
+ config.logger.call({
56
+ code: :Scorer,
57
+ text: @text[0..30],
58
+ criteria: @criteria[0..30],
59
+ juries: juries,
60
+ score: result['final_score'],
61
+ reasoning: result['final_reasoning']
62
+ })
63
63
 
64
64
  result
65
65
  end
@@ -113,9 +113,18 @@ module ActiveGenie
113
113
  @juries ||= if @param_juries.any?
114
114
  @param_juries
115
115
  else
116
- ::ActiveGenie::Lister::Juries.call(@text, @criteria, config: @config)
116
+ ::ActiveGenie::Lister::Juries.call(@text, @criteria, config:)
117
117
  end
118
118
  end
119
+
120
+ def config
121
+ @config ||= begin
122
+ c = ActiveGenie.configuration.merge(@initial_config)
123
+ c.llm.recommended_model = 'deepseek-chat' unless c.llm.recommended_model
124
+
125
+ c
126
+ end
127
+ end
119
128
  end
120
129
  end
121
130
  end
@@ -4,11 +4,9 @@ namespace :active_genie do
4
4
  desc 'Run benchmarks, optionally for a specific module (e.g., rake active_genie:benchmark[data_extractor])'
5
5
  task :benchmark, [:module_name] do |_, args|
6
6
  Rake::TestTask.new(:run_benchmarks) do |t|
7
- t.libs << 'benchmark'
8
-
9
7
  if args[:module_name]
10
8
  module_name = args[:module_name]
11
- module_path = "benchmark/test_cases/#{module_name}/"
9
+ module_path = "test/benchmark/#{module_name}/"
12
10
  t.test_files = FileList["#{module_path}**/*_test.rb"]
13
11
  puts "Running benchmarks for module: #{module_name}"
14
12
  else
@@ -5,9 +5,6 @@ ActiveGenie.configure do |config|
5
5
  # config.providers.openai.api_key = ENV['OPENAI_API_KEY']
6
6
  # config.providers.openai.organization = ENV['OPENAI_ORGANIZATION']
7
7
  # config.providers.openai.api_url = 'https://api.openai.com/v1'
8
- # config.providers.openai.lower_tier_model = 'gpt-4.1-mini'
9
- # config.providers.openai.middle_tier_model = 'gpt-4.1'
10
- # config.providers.openai.higher_tier_model = 'o3-mini'
11
8
  # config.providers.openai.client = ActiveGenie::Providers::Openai::Client.new(config)
12
9
 
13
10
  # example how add a new provider
data/lib/tasks/test.rake CHANGED
@@ -13,3 +13,6 @@ namespace :test do
13
13
 
14
14
  task default: %i[unit integration]
15
15
  end
16
+
17
+ desc 'Run all tests (unit and integration)'
18
+ task test: ['test:unit', 'test:integration']
metadata CHANGED
@@ -1,27 +1,26 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: active_genie
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.30.1
4
+ version: 0.30.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Radamés Roriz
8
- autorequire:
9
8
  bindir: bin
10
9
  cert_chain: []
11
- date: 2025-08-15 00:00:00.000000000 Z
10
+ date: 1980-01-02 00:00:00.000000000 Z
12
11
  dependencies:
13
12
  - !ruby/object:Gem::Dependency
14
13
  name: async
15
14
  requirement: !ruby/object:Gem::Requirement
16
15
  requirements:
17
- - - ">="
16
+ - - "~>"
18
17
  - !ruby/object:Gem::Version
19
18
  version: '2.0'
20
19
  type: :runtime
21
20
  prerelease: false
22
21
  version_requirements: !ruby/object:Gem::Requirement
23
22
  requirements:
24
- - - ">="
23
+ - - "~>"
25
24
  - !ruby/object:Gem::Version
26
25
  version: '2.0'
27
26
  description: |
@@ -59,7 +58,9 @@ files:
59
58
  - lib/active_genie/configs/scorer_config.rb
60
59
  - lib/active_genie/configuration.rb
61
60
  - lib/active_genie/errors/invalid_log_output_error.rb
61
+ - lib/active_genie/errors/invalid_model_error.rb
62
62
  - lib/active_genie/errors/invalid_provider_error.rb
63
+ - lib/active_genie/errors/provider_server_error.rb
63
64
  - lib/active_genie/extractor.rb
64
65
  - lib/active_genie/extractor/explanation.json
65
66
  - lib/active_genie/extractor/explanation.prompt.md
@@ -103,7 +104,6 @@ metadata:
103
104
  changelog_uri: https://github.com/Roriz/active_genie/blob/master/CHANGELOG.md
104
105
  bug_tracker_uri: https://github.com/Roriz/active_genie/issues
105
106
  rubygems_mfa_required: 'true'
106
- post_install_message:
107
107
  rdoc_options: []
108
108
  require_paths:
109
109
  - lib
@@ -118,8 +118,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
118
118
  - !ruby/object:Gem::Version
119
119
  version: '0'
120
120
  requirements: []
121
- rubygems_version: 3.4.20
122
- signing_key:
121
+ rubygems_version: 3.6.9
123
122
  specification_version: 4
124
123
  summary: 'The Lodash for GenAI: Real Value + Consistent + Model-Agnostic'
125
124
  test_files: []