active_genie 0.0.10 → 0.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +63 -57
  3. data/VERSION +1 -1
  4. data/lib/active_genie/battle/README.md +7 -7
  5. data/lib/active_genie/battle/basic.rb +75 -68
  6. data/lib/active_genie/battle.rb +4 -0
  7. data/lib/active_genie/clients/anthropic_client.rb +110 -0
  8. data/lib/active_genie/clients/google_client.rb +158 -0
  9. data/lib/active_genie/clients/helpers/retry.rb +29 -0
  10. data/lib/active_genie/clients/openai_client.rb +58 -38
  11. data/lib/active_genie/clients/unified_client.rb +5 -5
  12. data/lib/active_genie/concerns/loggable.rb +44 -0
  13. data/lib/active_genie/configuration/log_config.rb +1 -1
  14. data/lib/active_genie/configuration/providers/anthropic_config.rb +54 -0
  15. data/lib/active_genie/configuration/providers/base_config.rb +85 -0
  16. data/lib/active_genie/configuration/providers/deepseek_config.rb +54 -0
  17. data/lib/active_genie/configuration/providers/google_config.rb +56 -0
  18. data/lib/active_genie/configuration/providers/openai_config.rb +54 -0
  19. data/lib/active_genie/configuration/providers_config.rb +7 -4
  20. data/lib/active_genie/configuration/runtime_config.rb +35 -0
  21. data/lib/active_genie/configuration.rb +18 -4
  22. data/lib/active_genie/data_extractor/README.md +0 -1
  23. data/lib/active_genie/data_extractor/basic.rb +22 -19
  24. data/lib/active_genie/data_extractor/from_informal.rb +4 -15
  25. data/lib/active_genie/data_extractor.rb +4 -0
  26. data/lib/active_genie/logger.rb +60 -14
  27. data/lib/active_genie/{league → ranking}/README.md +7 -7
  28. data/lib/active_genie/ranking/elo_round.rb +134 -0
  29. data/lib/active_genie/ranking/free_for_all.rb +93 -0
  30. data/lib/active_genie/ranking/player.rb +92 -0
  31. data/lib/active_genie/{league → ranking}/players_collection.rb +19 -12
  32. data/lib/active_genie/ranking/ranking.rb +153 -0
  33. data/lib/active_genie/ranking/ranking_scoring.rb +71 -0
  34. data/lib/active_genie/ranking.rb +12 -0
  35. data/lib/active_genie/scoring/README.md +1 -1
  36. data/lib/active_genie/scoring/basic.rb +93 -49
  37. data/lib/active_genie/scoring/{recommended_reviews.rb → recommended_reviewers.rb} +18 -7
  38. data/lib/active_genie/scoring.rb +6 -3
  39. data/lib/active_genie.rb +1 -1
  40. data/lib/tasks/benchmark.rake +27 -0
  41. metadata +100 -100
  42. data/lib/active_genie/configuration/openai_config.rb +0 -56
  43. data/lib/active_genie/league/elo_ranking.rb +0 -121
  44. data/lib/active_genie/league/free_for_all.rb +0 -62
  45. data/lib/active_genie/league/league.rb +0 -120
  46. data/lib/active_genie/league/player.rb +0 -59
  47. data/lib/active_genie/league.rb +0 -12
@@ -0,0 +1,158 @@
1
+ require 'json'
2
+ require 'net/http'
3
+ require 'uri'
4
+ require_relative './helpers/retry'
5
+
6
+ module ActiveGenie
7
+ module Clients
8
+ # Client for interacting with the Google Generative Language API.
9
+ class GoogleClient
10
+ class GoogleError < StandardError; end
11
+ class RateLimitError < GoogleError; end
12
+
13
+ API_VERSION_PATH = '/v1beta/models'.freeze
14
+ DEFAULT_HEADERS = {
15
+ 'Content-Type': 'application/json',
16
+ }.freeze
17
+
18
+ def initialize(config)
19
+ @app_config = config
20
+ end
21
+
22
+ # Requests structured JSON output from the Google Generative Language model based on a schema.
23
+ #
24
+ # @param messages [Array<Hash>] A list of messages representing the conversation history.
25
+ # Each hash should have :role ('user' or 'model') and :content (String).
26
+ # Google Generative Language uses 'user' and 'model' roles.
27
+ # @param function [Hash] A JSON schema definition describing the desired output format.
28
+ # @param model_tier [Symbol, nil] A symbolic representation of the model quality/size tier.
29
+ # @param config [Hash] Optional configuration overrides:
30
+ # - :api_key [String] Override the default API key.
31
+ # - :model [String] Override the model name directly.
32
+ # - :max_retries [Integer] Max retries for the request.
33
+ # - :retry_delay [Integer] Initial delay for retries.
34
+ # @return [Hash, nil] The parsed JSON object matching the schema, or nil if parsing fails or content is empty.
35
+ def function_calling(messages, function, model_tier: nil, config: {})
36
+ model = config[:runtime][:model] || @app_config.tier_to_model(model_tier)
37
+ api_key = config[:runtime][:api_key] || @app_config.api_key
38
+
39
+ contents = convert_messages_to_contents(messages, function)
40
+ contents << output_as_json_schema(function)
41
+
42
+ payload = {
43
+ contents: contents,
44
+ generationConfig: {
45
+ response_mime_type: "application/json",
46
+ temperature: 0.1
47
+ }
48
+ }
49
+
50
+ url = URI("#{@app_config.api_url}#{API_VERSION_PATH}/#{model}:generateContent?key=#{api_key}")
51
+
52
+ retry_with_backoff(config:) do
53
+ response = request(url, payload, model, config:)
54
+
55
+ json_string = response&.dig('candidates', 0, 'content', 'parts', 0, 'text')
56
+
57
+ return nil if json_string.nil? || json_string.empty?
58
+
59
+ parsed_response = JSON.parse(json_string)
60
+
61
+ ActiveGenie::Logger.trace({ code: :function_calling, payload:, parsed_response: })
62
+
63
+ normalize_function_output(parsed_response)
64
+ end
65
+ end
66
+
67
+ private
68
+
69
+ def normalize_function_output(output)
70
+ output = if output.is_a?(Array)
71
+ output.dig(0, 'properties') || output.dig(0)
72
+ else
73
+ output
74
+ end
75
+
76
+ output.dig('input_schema', 'properties') || output
77
+ end
78
+
79
+ def request(url, payload, model, config:)
80
+ start_time = Time.now
81
+
82
+ retry_with_backoff(config:) do
83
+ response = Net::HTTP.post(url, payload.to_json, DEFAULT_HEADERS)
84
+
85
+ case response
86
+ when Net::HTTPSuccess
87
+ return nil if response.body.nil? || response.body.empty?
88
+
89
+ parsed_body = JSON.parse(response.body)
90
+
91
+ usage_metadata = parsed_body['usageMetadata'] || {}
92
+ prompt_tokens = usage_metadata['promptTokenCount'] || 0
93
+ candidates_tokens = usage_metadata['candidatesTokenCount'] || 0
94
+ total_tokens = usage_metadata['totalTokenCount'] || (prompt_tokens + candidates_tokens)
95
+
96
+ ActiveGenie::Logger.trace({
97
+ code: :llm_usage,
98
+ input_tokens: prompt_tokens,
99
+ output_tokens: candidates_tokens,
100
+ total_tokens: total_tokens,
101
+ model: model,
102
+ duration: Time.now - start_time,
103
+ usage: usage_metadata # Log the whole usage block
104
+ })
105
+
106
+ parsed_body
107
+
108
+ when Net::HTTPTooManyRequests
109
+ # Rate Limit Error
110
+ raise RateLimitError, "Google API rate limit exceeded (HTTP 429): #{response.body}"
111
+
112
+ else
113
+ # Other Errors
114
+ raise GoogleError, "Google API error (HTTP #{response.code}): #{response.body}"
115
+ end
116
+ end
117
+ rescue JSON::ParserError => e
118
+ raise GoogleError, "Failed to parse Google API response body: #{e.message} - Body: #{response&.body}"
119
+ end
120
+
121
+ ROLE_TO_GOOGLE_ROLE = {
122
+ user: 'user',
123
+ assistant: 'model',
124
+ }.freeze
125
+
126
+ # Converts standard message format to Google's 'contents' format
127
+ # and injects JSON schema instructions.
128
+ # @param messages [Array<Hash>] Array of { role: 'user'/'assistant'/'system', content: '...' }
129
+ # @param function_schema [Hash] The JSON schema for the desired output.
130
+ # @return [Array<Hash>] Array formatted for Google's 'contents' field.
131
+ def convert_messages_to_contents(messages, function_schema)
132
+ messages.map do |message|
133
+ {
134
+ role: ROLE_TO_GOOGLE_ROLE[message[:role].to_sym] || 'user',
135
+ parts: [{ text: message[:content] }]
136
+ }
137
+ end
138
+ end
139
+
140
+ def output_as_json_schema(function_schema)
141
+ json_instruction = <<~PROMPT
142
+ Generate a JSON object that strictly adheres to the following JSON schema:
143
+
144
+ ```json
145
+ #{JSON.pretty_generate(function_schema)}
146
+ ```
147
+
148
+ IMPORTANT: Only output the raw JSON object. Do not include any other text, explanations, or markdown formatting like ```json ... ``` wrappers around the final output.
149
+ PROMPT
150
+
151
+ {
152
+ role: 'user',
153
+ parts: [{ text: json_instruction }]
154
+ }
155
+ end
156
+ end
157
+ end
158
+ end
@@ -0,0 +1,29 @@
1
+ MAX_RETRIES = 3
2
+ BASE_DELAY = 0.5
3
+
4
+ def retry_with_backoff(config: {})
5
+ retries = config[:runtime][:max_retries] || MAX_RETRIES
6
+
7
+ begin
8
+ yield
9
+ rescue => e
10
+ if retries > 0
11
+ ActiveGenie::Logger.warn({ code: :retry_with_backoff, message: "Retrying request after error: #{e.message}. Attempts remaining: #{retries}" })
12
+
13
+ retries -= 1
14
+ backoff_time = calculate_backoff(MAX_RETRIES - retries)
15
+ sleep(backoff_time)
16
+ retry
17
+ else
18
+ raise
19
+ end
20
+ end
21
+ end
22
+
23
+ def calculate_backoff(retry_count)
24
+ # Exponential backoff with jitter: 2^retry_count + random jitter
25
+ # Base delay is 0.5 seconds, doubles each retry, plus up to 0.5 seconds of random jitter
26
+ # Simplified example: 0.5, 1, 2, 4, 8, 12, 16, 20, 24, 28, 30 seconds
27
+ jitter = rand * BASE_DELAY
28
+ [BASE_DELAY * (2 ** retry_count) + jitter, 30].min # Cap at 30 seconds
29
+ end
@@ -1,77 +1,97 @@
1
1
  require 'json'
2
2
  require 'net/http'
3
3
 
4
+ require_relative './helpers/retry'
5
+
4
6
  module ActiveGenie::Clients
5
- class OpenaiClient
7
+ class OpenaiClient
8
+ class OpenaiError < StandardError; end
9
+ class RateLimitError < OpenaiError; end
10
+ class InvalidResponseError < StandardError; end
11
+
6
12
  def initialize(config)
7
13
  @app_config = config
8
14
  end
9
15
 
10
- def function_calling(messages, function, config: {})
11
- model = config[:model]
12
- model = @app_config.tier_to_model(config.dig(:all_providers, :model_tier)) if model.nil? && config.dig(:all_providers, :model_tier)
13
- model = @app_config.lower_tier_model if model.nil?
16
+ # Requests structured JSON output from the Gemini model based on a schema.
17
+ #
18
+ # @param messages [Array<Hash>] A list of messages representing the conversation history.
19
+ # Each hash should have :role ('user' or 'model') and :content (String).
20
+ # Gemini uses 'user' and 'model' roles.
21
+ # @param function [Hash] A JSON schema definition describing the desired output format.
22
+ # @param model_tier [Symbol, nil] A symbolic representation of the model quality/size tier.
23
+ # @param config [Hash] Optional configuration overrides:
24
+ # - :api_key [String] Override the default API key.
25
+ # - :model [String] Override the model name directly.
26
+ # - :max_retries [Integer] Max retries for the request.
27
+ # - :retry_delay [Integer] Initial delay for retries.
28
+ # @return [Hash, nil] The parsed JSON object matching the schema, or nil if parsing fails or content is empty.
29
+ def function_calling(messages, function, model_tier: nil, config: {})
30
+ model = config[:runtime][:model] || @app_config.tier_to_model(model_tier)
14
31
 
15
32
  payload = {
16
33
  messages:,
17
- response_format: {
18
- type: 'json_schema',
19
- json_schema: function
20
- },
34
+ tools: [{ type: 'function', function: }],
35
+ tool_choice: { type: 'function', function: { name: function[:name] } },
36
+ stream: false,
21
37
  model:,
22
38
  }
23
39
 
24
- api_key = config[:api_key] || @app_config.api_key
40
+ api_key = config[:runtime][:api_key] || @app_config.api_key
25
41
  headers = DEFAULT_HEADERS.merge(
26
42
  'Authorization': "Bearer #{api_key}"
27
43
  ).compact
28
44
 
29
- response = request(payload, headers, config:)
45
+ retry_with_backoff(config:) do
46
+ response = request(payload, headers, config:)
47
+
48
+ parsed_response = JSON.parse(response.dig('choices', 0, 'message', 'tool_calls', 0, 'function', 'arguments'))
49
+ parsed_response = parsed_response.dig('message') || parsed_response
50
+
51
+ raise InvalidResponseError, "Invalid response: #{parsed_response}" if parsed_response.nil? || parsed_response.keys.size.zero?
52
+
53
+ ActiveGenie::Logger.trace({code: :function_calling, payload:, parsed_response: })
30
54
 
31
- parsed_response = JSON.parse(response.dig('choices', 0, 'message', 'content'))
32
- parsed_response.dig('properties') || parsed_response
33
- rescue JSON::ParserError
34
- nil
55
+ parsed_response
56
+ end
35
57
  end
36
58
 
37
59
  private
38
60
 
61
+ DEFAULT_HEADERS = {
62
+ 'Content-Type': 'application/json',
63
+ }
64
+
39
65
  def request(payload, headers, config:)
40
66
  start_time = Time.now
67
+
41
68
  response = Net::HTTP.post(
42
69
  URI("#{@app_config.api_url}/chat/completions"),
43
70
  payload.to_json,
44
71
  headers
45
72
  )
46
73
 
74
+ if response.is_a?(Net::HTTPTooManyRequests)
75
+ raise RateLimitError, "OpenAI API rate limit exceeded: #{response.body}"
76
+ end
77
+
47
78
  raise OpenaiError, response.body unless response.is_a?(Net::HTTPSuccess)
79
+
48
80
  return nil if response.body.empty?
49
81
 
50
82
  parsed_body = JSON.parse(response.body)
51
- log_response(start_time, parsed_body, config:)
52
83
 
53
- parsed_body
54
- end
84
+ ActiveGenie::Logger.trace({
85
+ code: :llm_usage,
86
+ input_tokens: parsed_body.dig('usage', 'prompt_tokens'),
87
+ output_tokens: parsed_body.dig('usage', 'completion_tokens'),
88
+ total_tokens: parsed_body.dig('usage', 'prompt_tokens') + parsed_body.dig('usage', 'completion_tokens'),
89
+ model: payload[:model],
90
+ duration: Time.now - start_time,
91
+ usage: parsed_body.dig('usage')
92
+ })
55
93
 
56
- DEFAULT_HEADERS = {
57
- 'Content-Type': 'application/json',
58
- }
59
-
60
- def log_response(start_time, response, config:)
61
- ActiveGenie::Logger.trace(
62
- {
63
- **config.dig(:log),
64
- category: :llm,
65
- trace: "#{config.dig(:log, :trace)}/#{self.class.name}",
66
- total_tokens: response.dig('usage', 'total_tokens'),
67
- model: response.dig('model'),
68
- request_duration: Time.now - start_time,
69
- openai: response
70
- }
71
- )
94
+ parsed_body
72
95
  end
73
-
74
- # TODO: add some more rich error handling
75
- class OpenaiError < StandardError; end
76
96
  end
77
- end
97
+ end
@@ -1,13 +1,13 @@
1
1
  module ActiveGenie::Clients
2
2
  class UnifiedClient
3
3
  class << self
4
- def function_calling(messages, function, config: {})
5
- provider_name = config[:provider]&.downcase&.strip&.to_sym
6
- provider = ActiveGenie.configuration.providers.all[provider_name] || ActiveGenie.configuration.providers.default
4
+ def function_calling(messages, function, model_tier: nil, config: {})
5
+ provider_name = config[:runtime][:provider]&.to_s&.downcase&.strip&.to_sym || ActiveGenie.configuration.providers.default
6
+ provider_instance = ActiveGenie.configuration.providers.valid[provider_name]
7
7
 
8
- raise InvalidProviderError if provider.nil? || provider.client.nil?
8
+ raise InvalidProviderError if provider_instance.nil? || provider_instance.client.nil?
9
9
 
10
- provider.client.function_calling(messages, function, config:)
10
+ provider_instance.client.function_calling(messages, function, model_tier:, config:)
11
11
  end
12
12
 
13
13
  private
@@ -0,0 +1,44 @@
1
+ module ActiveGenie
2
+ module Concerns
3
+ module Loggable
4
+ def self.included(base)
5
+ base.extend(ClassMethods)
6
+ end
7
+
8
+ module ClassMethods
9
+ def with_logging_context(context_method, observer_proc = nil)
10
+ original_method = instance_method(:call)
11
+
12
+ define_method(:call) do |*args, **kwargs, &block|
13
+ context = send(context_method, *args, **kwargs)
14
+ bound_observer = observer_proc ? ->(log) { instance_exec(log, &observer_proc) } : nil
15
+
16
+ ActiveGenie::Logger.with_context(context, observer: bound_observer) do
17
+ original_method.bind(self).call(*args, **kwargs, &block)
18
+ end
19
+ end
20
+ end
21
+ end
22
+
23
+ def info(log)
24
+ ::ActiveGenie::Logger.info(log)
25
+ end
26
+
27
+ def error(log)
28
+ ::ActiveGenie::Logger.error(log)
29
+ end
30
+
31
+ def warn(log)
32
+ ::ActiveGenie::Logger.warn(log)
33
+ end
34
+
35
+ def debug(log)
36
+ ::ActiveGenie::Logger.debug(log)
37
+ end
38
+
39
+ def trace(log)
40
+ ::ActiveGenie::Logger.trace(log)
41
+ end
42
+ end
43
+ end
44
+ end
@@ -8,7 +8,7 @@ module ActiveGenie::Configuration
8
8
  end
9
9
 
10
10
  def to_h(config = {})
11
- { log_level:, **config }
11
+ { log_level: }.merge(config)
12
12
  end
13
13
  end
14
14
  end
@@ -0,0 +1,54 @@
1
+ require_relative '../../clients/anthropic_client'
2
+ require_relative './base_config'
3
+
4
+ module ActiveGenie
5
+ module Configuration::Providers
6
+ # Configuration class for the Anthropic API client.
7
+ # Manages API keys, URLs, model selections, and client instantiation.
8
+ class AnthropicConfig < BaseConfig
9
+ NAME = :anthropic
10
+
11
+ # Retrieves the API key.
12
+ # Falls back to the ANTHROPIC_API_KEY environment variable if not set.
13
+ # @return [String, nil] The API key.
14
+ def api_key
15
+ @api_key || ENV['ANTHROPIC_API_KEY']
16
+ end
17
+
18
+ # Retrieves the base API URL for Anthropic API.
19
+ # Defaults to 'https://api.anthropic.com'.
20
+ # @return [String] The API base URL.
21
+ def api_url
22
+ @api_url || 'https://api.anthropic.com'
23
+ end
24
+
25
+ # Lazily initializes and returns an instance of the AnthropicClient.
26
+ # Passes itself (the config object) to the client's constructor.
27
+ # @return [ActiveGenie::Clients::AnthropicClient] The client instance.
28
+ def client
29
+ @client ||= ::ActiveGenie::Clients::AnthropicClient.new(self)
30
+ end
31
+
32
+ # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
33
+ # Defaults to 'claude-3-haiku'.
34
+ # @return [String] The lower tier model name.
35
+ def lower_tier_model
36
+ @lower_tier_model || 'claude-3-5-haiku-20241022'
37
+ end
38
+
39
+ # Retrieves the model name designated for the middle tier (e.g., balanced performance).
40
+ # Defaults to 'claude-3-sonnet'.
41
+ # @return [String] The middle tier model name.
42
+ def middle_tier_model
43
+ @middle_tier_model || 'claude-3-7-sonnet-20250219'
44
+ end
45
+
46
+ # Retrieves the model name designated for the upper tier (e.g., most capable).
47
+ # Defaults to 'claude-3-opus'.
48
+ # @return [String] The upper tier model name.
49
+ def upper_tier_model
50
+ @upper_tier_model || 'claude-3-opus-20240229'
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,85 @@
1
+ module ActiveGenie
2
+ module Configuration::Providers
3
+ class BaseConfig
4
+ NAME = :unknown
5
+
6
+ attr_writer :api_key, :organization, :api_url, :client,
7
+ :lower_tier_model, :middle_tier_model, :upper_tier_model
8
+
9
+ # Maps a symbolic tier (:lower_tier, :middle_tier, :upper_tier) to a specific model name.
10
+ # Falls back to the lower_tier_model if the tier is nil or unrecognized.
11
+ # @param tier [Symbol, String, nil] The symbolic tier name.
12
+ # @return [String] The corresponding model name.
13
+ def tier_to_model(tier)
14
+ {
15
+ lower_tier: lower_tier_model,
16
+ middle_tier: middle_tier_model,
17
+ upper_tier: upper_tier_model
18
+ }[tier&.to_sym] || lower_tier_model
19
+ end
20
+
21
+ # Returns a hash representation of the configuration.
22
+ # @param config [Hash] Additional key-value pairs to merge into the hash.
23
+ # @return [Hash] The configuration settings as a hash.
24
+ def to_h(config = {})
25
+ {
26
+ name: NAME,
27
+ api_key:,
28
+ api_url:,
29
+ lower_tier_model:,
30
+ middle_tier_model:,
31
+ upper_tier_model:,
32
+ **config
33
+ }
34
+ end
35
+
36
+ # Validates the configuration.
37
+ # @return [Boolean] True if the configuration is valid, false otherwise.
38
+ def valid?
39
+ api_key && api_url
40
+ end
41
+
42
+ # Retrieves the API key.
43
+ # Falls back to the OPENAI_API_KEY environment variable if not set.
44
+ # @return [String, nil] The API key.
45
+ def api_key
46
+ raise NotImplementedError, "Subclasses must implement this method"
47
+ end
48
+
49
+ # Retrieves the base API URL for OpenAI API.
50
+ # Defaults to 'https://api.openai.com/v1'.
51
+ # @return [String] The API base URL.
52
+ def api_url
53
+ raise NotImplementedError, "Subclasses must implement this method"
54
+ end
55
+
56
+ # Lazily initializes and returns an instance of the OpenaiClient.
57
+ # Passes itself (the config object) to the client's constructor.
58
+ # @return [ActiveGenie::Clients::OpenaiClient] The client instance.
59
+ def client
60
+ raise NotImplementedError, "Subclasses must implement this method"
61
+ end
62
+
63
+ # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
64
+ # Defaults to 'gpt-4o-mini'.
65
+ # @return [String] The lower tier model name.
66
+ def lower_tier_model
67
+ raise NotImplementedError, "Subclasses must implement this method"
68
+ end
69
+
70
+ # Retrieves the model name designated for the middle tier (e.g., balanced performance).
71
+ # Defaults to 'gpt-4o'.
72
+ # @return [String] The middle tier model name.
73
+ def middle_tier_model
74
+ raise NotImplementedError, "Subclasses must implement this method"
75
+ end
76
+
77
+ # Retrieves the model name designated for the upper tier (e.g., most capable).
78
+ # Defaults to 'o1-preview'.
79
+ # @return [String] The upper tier model name.
80
+ def upper_tier_model
81
+ raise NotImplementedError, "Subclasses must implement this method"
82
+ end
83
+ end
84
+ end
85
+ end
@@ -0,0 +1,54 @@
1
+ require_relative '../../clients/openai_client'
2
+ require_relative './base_config'
3
+
4
+ module ActiveGenie
5
+ module Configuration::Providers
6
+ # Configuration class for the DeepSeek API client.
7
+ # Manages API keys, organization IDs, URLs, model selections, and client instantiation.
8
+ class DeepseekConfig < BaseConfig
9
+ NAME = :deepseek
10
+
11
+ # Retrieves the API key.
12
+ # Falls back to the DEEPSEEK_API_KEY environment variable if not set.
13
+ # @return [String, nil] The API key.
14
+ def api_key
15
+ @api_key || ENV['DEEPSEEK_API_KEY']
16
+ end
17
+
18
+ # Retrieves the base API URL for DeepSeek API.
19
+ # Defaults to 'https://api.deepseek.com/v1'.
20
+ # @return [String] The API base URL.
21
+ def api_url
22
+ @api_url || 'https://api.deepseek.com/v1'
23
+ end
24
+
25
+ # Lazily initializes and returns an instance of the OpenaiClient.
26
+ # Passes itself (the config object) to the client's constructor.
27
+ # @return [ActiveGenie::Clients::OpenaiClient] The client instance.
28
+ def client
29
+ @client ||= ::ActiveGenie::Clients::OpenaiClient.new(self)
30
+ end
31
+
32
+ # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
33
+ # Defaults to 'deepseek-chat'.
34
+ # @return [String] The lower tier model name.
35
+ def lower_tier_model
36
+ @lower_tier_model || 'deepseek-chat'
37
+ end
38
+
39
+ # Retrieves the model name designated for the middle tier (e.g., balanced performance).
40
+ # Defaults to 'deepseek-chat'.
41
+ # @return [String] The middle tier model name.
42
+ def middle_tier_model
43
+ @middle_tier_model || 'deepseek-chat'
44
+ end
45
+
46
+ # Retrieves the model name designated for the upper tier (e.g., most capable).
47
+ # Defaults to 'deepseek-reasoner'.
48
+ # @return [String] The upper tier model name.
49
+ def upper_tier_model
50
+ @upper_tier_model || 'deepseek-reasoner'
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,56 @@
1
+ require_relative '../../clients/google_client'
2
+ require_relative './base_config'
3
+
4
+ module ActiveGenie
5
+ module Configuration::Providers
6
+ # Configuration class for the Google Generative Language API client.
7
+ # Manages API keys, URLs, model selections, and client instantiation.
8
+ class GoogleConfig < BaseConfig
9
+ NAME = :google
10
+
11
+ # Retrieves the API key.
12
+ # Falls back to the GENERATIVE_LANGUAGE_GOOGLE_API_KEY environment variable if not set.
13
+ # @return [String, nil] The API key.
14
+ def api_key
15
+ @api_key || ENV['GENERATIVE_LANGUAGE_GOOGLE_API_KEY'] || ENV['GEMINI_API_KEY']
16
+ end
17
+
18
+ # Retrieves the base API URL for Google Generative Language API.
19
+ # Defaults to 'https://generativelanguage.googleapis.com'.
20
+ # @return [String] The API base URL.
21
+ def api_url
22
+ # Note: Google Generative Language API uses a specific path structure like /v1beta/models/{model}:generateContent
23
+ # The base URL here should be just the domain part.
24
+ @api_url || 'https://generativelanguage.googleapis.com'
25
+ end
26
+
27
+ # Lazily initializes and returns an instance of the GoogleClient.
28
+ # Passes itself (the config object) to the client's constructor.
29
+ # @return [ActiveGenie::Clients::GoogleClient] The client instance.
30
+ def client
31
+ @client ||= ::ActiveGenie::Clients::GoogleClient.new(self)
32
+ end
33
+
34
+ # Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
35
+ # Defaults to 'gemini-2.0-flash-lite'.
36
+ # @return [String] The lower tier model name.
37
+ def lower_tier_model
38
+ @lower_tier_model || 'gemini-2.0-flash-lite'
39
+ end
40
+
41
+ # Retrieves the model name designated for the middle tier (e.g., balanced performance).
42
+ # Defaults to 'gemini-2.0-flash'.
43
+ # @return [String] The middle tier model name.
44
+ def middle_tier_model
45
+ @middle_tier_model || 'gemini-2.0-flash'
46
+ end
47
+
48
+ # Retrieves the model name designated for the upper tier (e.g., most capable).
49
+ # Defaults to 'gemini-2.5-pro-experimental'.
50
+ # @return [String] The upper tier model name.
51
+ def upper_tier_model
52
+ @upper_tier_model || 'gemini-2.5-pro-experimental'
53
+ end
54
+ end
55
+ end
56
+ end