active_genie 0.0.18 → 0.0.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +59 -1
- data/VERSION +1 -1
- data/lib/active_genie/battle/basic.rb +2 -10
- data/lib/active_genie/clients/anthropic_client.rb +69 -95
- data/lib/active_genie/clients/base_client.rb +241 -0
- data/lib/active_genie/clients/google_client.rb +105 -128
- data/lib/active_genie/clients/openai_client.rb +45 -44
- data/lib/active_genie/configuration/providers/internal_company_api_config.rb +54 -0
- data/lib/active_genie/data_extractor/basic.rb +1 -1
- data/lib/active_genie/ranking/ranking.rb +1 -1
- data/lib/active_genie/scoring/basic.rb +1 -1
- data/lib/active_genie/scoring/recommended_reviewers.rb +3 -2
- data/lib/tasks/templates/active_genie.rb +1 -1
- metadata +26 -6
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d46586e4c64fdba00b703d32d2ee5eaad16e53e38f01adadb1a6090f049118d1
|
4
|
+
data.tar.gz: f8420784cfd4b4ce21b2217af31d3e46252d623a65f0e93194027d9fd416b8ca
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 7ab8e0d11276d5805d9ea3568c4f0c2a15d23fed01d8d762b8750cb6a2bd2f6a698a929b13b6bdf8da30d4b2b81187ab61160be9250b7e709b827389f925a9e2
|
7
|
+
data.tar.gz: a6cddf62c9f6f19fe6fbe6982e8c6a2097b644280d0d86fea19e85da507e91b8dab3299304f182246f84c1cfa7c0e2e003c19781d827b66f15a5c34d10e36da5
|
data/README.md
CHANGED
@@ -137,7 +137,7 @@ result = ActiveGenie::Battle.call(
|
|
137
137
|
# }
|
138
138
|
```
|
139
139
|
|
140
|
-
*Recommended model*: `
|
140
|
+
*Recommended model*: `claude-3-5-haiku`
|
141
141
|
|
142
142
|
Features:
|
143
143
|
- Multi-reviewer evaluation with automatic expert selection
|
@@ -216,6 +216,64 @@ See the [Benchmark README](benchmark/README.md) for detailed results, methodolog
|
|
216
216
|
|
217
217
|
> **Note:** Each module can append its own set of configuration, see the individual module documentation for details.
|
218
218
|
|
219
|
+
## How to create a new provider
|
220
|
+
|
221
|
+
ActiveGenie supports adding custom providers to integrate with different LLM services. To create a new provider:
|
222
|
+
|
223
|
+
1. Create a configuration class for your provider in `lib/active_genie/configuration/providers/`:
|
224
|
+
|
225
|
+
```ruby
|
226
|
+
# Example: lib/active_genie/configuration/providers/internal_company_api_config.rb
|
227
|
+
module ActiveGenie
|
228
|
+
module Configuration::Providers
|
229
|
+
class InternalCompanyApiConfig < BaseConfig
|
230
|
+
NAME = :internal_company_api
|
231
|
+
|
232
|
+
# API key accessor with environment variable fallback
|
233
|
+
def api_key
|
234
|
+
@api_key || ENV['INTERNAL_COMPANY_API_KEY']
|
235
|
+
end
|
236
|
+
|
237
|
+
# Base API URL
|
238
|
+
def api_url
|
239
|
+
@api_url || 'https://api.internal-company.com/v1'
|
240
|
+
end
|
241
|
+
|
242
|
+
# Client instantiation
|
243
|
+
def client
|
244
|
+
@client ||= ::ActiveGenie::Clients::InternalCompanyApiClient.new(self)
|
245
|
+
end
|
246
|
+
|
247
|
+
# Model tier definitions
|
248
|
+
def lower_tier_model
|
249
|
+
@lower_tier_model || 'internal-basic'
|
250
|
+
end
|
251
|
+
|
252
|
+
def middle_tier_model
|
253
|
+
@middle_tier_model || 'internal-standard'
|
254
|
+
end
|
255
|
+
|
256
|
+
def upper_tier_model
|
257
|
+
@upper_tier_model || 'internal-premium'
|
258
|
+
end
|
259
|
+
end
|
260
|
+
end
|
261
|
+
end
|
262
|
+
```
|
263
|
+
|
264
|
+
2. Register your provider in your configuration:
|
265
|
+
|
266
|
+
```ruby
|
267
|
+
# In config/initializers/active_genie.rb
|
268
|
+
ActiveGenie.configure do |config|
|
269
|
+
# Register your custom provider
|
270
|
+
config.providers.register(InternalCompanyApi::Configuration)
|
271
|
+
|
272
|
+
# Configure your provider
|
273
|
+
config.internal_company_api.api_key = ENV['INTERNAL_COMPANY_API_KEY']
|
274
|
+
end
|
275
|
+
```
|
276
|
+
|
219
277
|
## Contributing
|
220
278
|
|
221
279
|
1. Fork the repository
|
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
0.0.
|
1
|
+
0.0.20
|
@@ -94,7 +94,7 @@ module ActiveGenie::Battle
|
|
94
94
|
FUNCTION = {
|
95
95
|
name: 'battle_evaluation',
|
96
96
|
description: 'Evaluate a battle between player_1 and player_2 using predefined criteria and identify the winner.',
|
97
|
-
|
97
|
+
parameters: {
|
98
98
|
type: "object",
|
99
99
|
properties: {
|
100
100
|
player_1_sell_himself: {
|
@@ -122,15 +122,7 @@ module ActiveGenie::Battle
|
|
122
122
|
description: 'Who is the winner based on the impartial judge reasoning?',
|
123
123
|
enum: ['player_1', 'player_2']
|
124
124
|
},
|
125
|
-
}
|
126
|
-
required: [
|
127
|
-
'player_1_sell_himself',
|
128
|
-
'player_2_sell_himself',
|
129
|
-
'player_1_arguments',
|
130
|
-
'player_2_counter',
|
131
|
-
'impartial_judge_winner_reasoning',
|
132
|
-
'impartial_judge_winner'
|
133
|
-
]
|
125
|
+
}
|
134
126
|
}
|
135
127
|
}
|
136
128
|
end
|
@@ -2,108 +2,82 @@ require 'json'
|
|
2
2
|
require 'net/http'
|
3
3
|
require 'uri'
|
4
4
|
require_relative './helpers/retry'
|
5
|
+
require_relative './base_client'
|
5
6
|
|
6
|
-
module ActiveGenie
|
7
|
-
|
8
|
-
|
9
|
-
class
|
10
|
-
|
11
|
-
class RateLimitError < AnthropicError; end
|
7
|
+
module ActiveGenie::Clients
|
8
|
+
# Client for interacting with the Anthropic (Claude) API with json response
|
9
|
+
class AnthropicClient < BaseClient
|
10
|
+
class AnthropicError < ClientError; end
|
11
|
+
class RateLimitError < AnthropicError; end
|
12
12
|
|
13
|
-
|
14
|
-
|
15
|
-
end
|
16
|
-
|
17
|
-
# Requests structured JSON output from the Anthropic Claude model based on a schema.
|
18
|
-
#
|
19
|
-
# @param messages [Array<Hash>] A list of messages representing the conversation history.
|
20
|
-
# Each hash should have :role ('user', 'assistant', or 'system') and :content (String).
|
21
|
-
# Claude uses 'user', 'assistant', and 'system' roles.
|
22
|
-
# @param function [Hash] A JSON schema definition describing the desired output format.
|
23
|
-
# @param model_tier [Symbol, nil] A symbolic representation of the model quality/size tier.
|
24
|
-
# @param config [Hash] Optional configuration overrides:
|
25
|
-
# - :api_key [String] Override the default API key.
|
26
|
-
# - :model [String] Override the model name directly.
|
27
|
-
# - :max_retries [Integer] Max retries for the request.
|
28
|
-
# - :retry_delay [Integer] Initial delay for retries.
|
29
|
-
# - :anthropic_version [String] Override the default Anthropic API version.
|
30
|
-
# @return [Hash, nil] The parsed JSON object matching the schema, or nil if parsing fails or content is empty.
|
31
|
-
def function_calling(messages, function, model_tier: nil, config: {})
|
32
|
-
model = config[:runtime][:model] || @app_config.tier_to_model(model_tier)
|
33
|
-
|
34
|
-
system_message = messages.find { |m| m[:role] == 'system' }&.dig(:content) || ''
|
35
|
-
user_messages = messages.select { |m| m[:role] == 'user' || m[:role] == 'assistant' }
|
36
|
-
.map { |m| { role: m[:role], content: m[:content] } }
|
37
|
-
|
38
|
-
anthropic_function = function
|
39
|
-
anthropic_function[:input_schema] = function[:schema]
|
40
|
-
anthropic_function.delete(:schema)
|
41
|
-
|
42
|
-
payload = {
|
43
|
-
model:,
|
44
|
-
system: system_message,
|
45
|
-
messages: user_messages,
|
46
|
-
tools: [anthropic_function],
|
47
|
-
tool_choice: { name: anthropic_function[:name], type: 'tool' },
|
48
|
-
max_tokens: config[:runtime][:max_tokens],
|
49
|
-
temperature: config[:runtime][:temperature] || 0,
|
50
|
-
}
|
51
|
-
|
52
|
-
api_key = config[:runtime][:api_key] || @app_config.api_key
|
53
|
-
headers = DEFAULT_HEADERS.merge(
|
54
|
-
'x-api-key': api_key,
|
55
|
-
'anthropic-version': config[:anthropic_version] || ANTHROPIC_VERSION
|
56
|
-
).compact
|
13
|
+
ANTHROPIC_VERSION = '2023-06-01'
|
14
|
+
ANTHROPIC_ENDPOINT = '/v1/messages'
|
57
15
|
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
ActiveGenie::Logger.trace({code: :function_calling, payload:, parsed_response: content})
|
63
|
-
|
64
|
-
content
|
65
|
-
end
|
66
|
-
end
|
67
|
-
|
68
|
-
private
|
16
|
+
def initialize(config)
|
17
|
+
super(config)
|
18
|
+
end
|
69
19
|
|
70
|
-
|
71
|
-
|
20
|
+
# Requests structured JSON output from the Anthropic Claude model based on a schema.
|
21
|
+
#
|
22
|
+
# @param messages [Array<Hash>] A list of messages representing the conversation history.
|
23
|
+
# Each hash should have :role ('user', 'assistant', or 'system') and :content (String).
|
24
|
+
# Claude uses 'user', 'assistant', and 'system' roles.
|
25
|
+
# @param function [Hash] A JSON schema definition describing the desired output format.
|
26
|
+
# @param model_tier [Symbol, nil] A symbolic representation of the model quality/size tier.
|
27
|
+
# @param config [Hash] Optional configuration overrides:
|
28
|
+
# - :api_key [String] Override the default API key.
|
29
|
+
# - :model [String] Override the model name directly.
|
30
|
+
# - :max_retries [Integer] Max retries for the request.
|
31
|
+
# - :retry_delay [Integer] Initial delay for retries.
|
32
|
+
# - :anthropic_version [String] Override the default Anthropic API version.
|
33
|
+
# @return [Hash, nil] The parsed JSON object matching the schema, or nil if parsing fails or content is empty.
|
34
|
+
def function_calling(messages, function, model_tier: nil, config: {})
|
35
|
+
model = config[:runtime][:model] || @app_config.tier_to_model(model_tier)
|
36
|
+
|
37
|
+
system_message = messages.find { |m| m[:role] == 'system' }&.dig(:content) || ''
|
38
|
+
user_messages = messages.select { |m| m[:role] == 'user' || m[:role] == 'assistant' }
|
39
|
+
.map { |m| { role: m[:role], content: m[:content] } }
|
40
|
+
|
41
|
+
anthropic_function = function.dup
|
42
|
+
anthropic_function[:input_schema] = function[:parameters]
|
43
|
+
anthropic_function.delete(:parameters)
|
44
|
+
|
45
|
+
payload = {
|
46
|
+
model:,
|
47
|
+
system: system_message,
|
48
|
+
messages: user_messages,
|
49
|
+
tools: [anthropic_function],
|
50
|
+
tool_choice: { name: anthropic_function[:name], type: 'tool' },
|
51
|
+
max_tokens: config[:runtime][:max_tokens],
|
52
|
+
temperature: config[:runtime][:temperature] || 0,
|
72
53
|
}
|
73
|
-
ANTHROPIC_VERSION = '2023-06-01'
|
74
54
|
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
URI("#{@app_config.api_url}/v1/messages"),
|
81
|
-
payload.to_json,
|
82
|
-
headers
|
83
|
-
)
|
84
|
-
|
85
|
-
if response.is_a?(Net::HTTPTooManyRequests)
|
86
|
-
raise RateLimitError, "Anthropic API rate limit exceeded: #{response.body}"
|
87
|
-
end
|
88
|
-
|
89
|
-
raise AnthropicError, response.body unless response.is_a?(Net::HTTPSuccess)
|
90
|
-
|
91
|
-
return nil if response.body.empty?
|
92
|
-
|
93
|
-
parsed_body = JSON.parse(response.body)
|
55
|
+
api_key = config[:runtime][:api_key] || @app_config.api_key
|
56
|
+
headers = {
|
57
|
+
'x-api-key': api_key,
|
58
|
+
'anthropic-version': config[:anthropic_version] || ANTHROPIC_VERSION
|
59
|
+
}.compact
|
94
60
|
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
61
|
+
retry_with_backoff(config:) do
|
62
|
+
start_time = Time.now
|
63
|
+
|
64
|
+
response = post(ANTHROPIC_ENDPOINT, payload, headers: headers, config: config)
|
65
|
+
|
66
|
+
content = response.dig('content', 0, 'input')
|
67
|
+
|
68
|
+
ActiveGenie::Logger.trace({
|
69
|
+
code: :llm_usage,
|
70
|
+
input_tokens: response.dig('usage', 'input_tokens'),
|
71
|
+
output_tokens: response.dig('usage', 'output_tokens'),
|
72
|
+
total_tokens: response.dig('usage', 'input_tokens') + response.dig('usage', 'output_tokens'),
|
73
|
+
model: payload[:model],
|
74
|
+
duration: Time.now - start_time,
|
75
|
+
usage: response.dig('usage')
|
76
|
+
})
|
77
|
+
|
78
|
+
ActiveGenie::Logger.trace({code: :function_calling, payload:, parsed_response: content})
|
79
|
+
|
80
|
+
content
|
107
81
|
end
|
108
82
|
end
|
109
83
|
end
|
@@ -0,0 +1,241 @@
|
|
1
|
+
module ActiveGenie
|
2
|
+
module Clients
|
3
|
+
class BaseClient
|
4
|
+
class ClientError < StandardError; end
|
5
|
+
class RateLimitError < ClientError; end
|
6
|
+
class TimeoutError < ClientError; end
|
7
|
+
class NetworkError < ClientError; end
|
8
|
+
|
9
|
+
DEFAULT_HEADERS = {
|
10
|
+
'Content-Type': 'application/json',
|
11
|
+
'Accept': 'application/json',
|
12
|
+
'User-Agent': 'ActiveGenie/1.0',
|
13
|
+
}.freeze
|
14
|
+
|
15
|
+
DEFAULT_TIMEOUT = 60 # seconds
|
16
|
+
DEFAULT_OPEN_TIMEOUT = 10 # seconds
|
17
|
+
DEFAULT_MAX_RETRIES = 3
|
18
|
+
DEFAULT_RETRY_DELAY = 1 # seconds
|
19
|
+
|
20
|
+
attr_reader :app_config
|
21
|
+
|
22
|
+
def initialize(config)
|
23
|
+
@app_config = config
|
24
|
+
end
|
25
|
+
|
26
|
+
# Make a GET request to the specified endpoint
|
27
|
+
#
|
28
|
+
# @param endpoint [String] The API endpoint to call
|
29
|
+
# @param headers [Hash] Additional headers to include in the request
|
30
|
+
# @param params [Hash] Query parameters for the request
|
31
|
+
# @param config [Hash] Configuration options including timeout, retries, etc.
|
32
|
+
# @return [Hash, nil] The parsed JSON response or nil if empty
|
33
|
+
def get(endpoint, params: {}, headers: {}, config: {})
|
34
|
+
uri = build_uri(endpoint, params)
|
35
|
+
request = Net::HTTP::Get.new(uri)
|
36
|
+
execute_request(uri, request, headers, config)
|
37
|
+
end
|
38
|
+
|
39
|
+
# Make a POST request to the specified endpoint
|
40
|
+
#
|
41
|
+
# @param endpoint [String] The API endpoint to call
|
42
|
+
# @param payload [Hash] The request body to send
|
43
|
+
# @param headers [Hash] Additional headers to include in the request
|
44
|
+
# @param config [Hash] Configuration options including timeout, retries, etc.
|
45
|
+
# @return [Hash, nil] The parsed JSON response or nil if empty
|
46
|
+
def post(endpoint, payload, params: {}, headers: {}, config: {})
|
47
|
+
uri = build_uri(endpoint, params)
|
48
|
+
request = Net::HTTP::Post.new(uri)
|
49
|
+
request.body = payload.to_json
|
50
|
+
execute_request(uri, request, headers, config)
|
51
|
+
end
|
52
|
+
|
53
|
+
# Make a PUT request to the specified endpoint
|
54
|
+
#
|
55
|
+
# @param endpoint [String] The API endpoint to call
|
56
|
+
# @param payload [Hash] The request body to send
|
57
|
+
# @param headers [Hash] Additional headers to include in the request
|
58
|
+
# @param config [Hash] Configuration options including timeout, retries, etc.
|
59
|
+
# @return [Hash, nil] The parsed JSON response or nil if empty
|
60
|
+
def put(endpoint, payload, headers: {}, config: {})
|
61
|
+
uri = build_uri(endpoint)
|
62
|
+
request = Net::HTTP::Put.new(uri)
|
63
|
+
request.body = payload.to_json
|
64
|
+
execute_request(uri, request, headers, config)
|
65
|
+
end
|
66
|
+
|
67
|
+
# Make a DELETE request to the specified endpoint
|
68
|
+
#
|
69
|
+
# @param endpoint [String] The API endpoint to call
|
70
|
+
# @param headers [Hash] Additional headers to include in the request
|
71
|
+
# @param params [Hash] Query parameters for the request
|
72
|
+
# @param config [Hash] Configuration options including timeout, retries, etc.
|
73
|
+
# @return [Hash, nil] The parsed JSON response or nil if empty
|
74
|
+
def delete(endpoint, headers: {}, params: {}, config: {})
|
75
|
+
uri = build_uri(endpoint, params)
|
76
|
+
request = Net::HTTP::Delete.new(uri)
|
77
|
+
execute_request(uri, request, headers, config)
|
78
|
+
end
|
79
|
+
|
80
|
+
protected
|
81
|
+
|
82
|
+
# Execute a request with retry logic and proper error handling
|
83
|
+
#
|
84
|
+
# @param uri [URI] The URI for the request
|
85
|
+
# @param request [Net::HTTP::Request] The request object
|
86
|
+
# @param headers [Hash] Additional headers to include
|
87
|
+
# @param config [Hash] Configuration options
|
88
|
+
# @return [Hash, nil] The parsed JSON response or nil if empty
|
89
|
+
def execute_request(uri, request, headers, config)
|
90
|
+
start_time = Time.now
|
91
|
+
|
92
|
+
# Apply headers
|
93
|
+
apply_headers(request, headers)
|
94
|
+
|
95
|
+
# Apply retry logic
|
96
|
+
retry_with_backoff(config) do
|
97
|
+
http = create_http_client(uri, config)
|
98
|
+
|
99
|
+
begin
|
100
|
+
response = http.request(request)
|
101
|
+
|
102
|
+
# Handle common HTTP errors
|
103
|
+
case response
|
104
|
+
when Net::HTTPSuccess
|
105
|
+
parsed_response = parse_response(response)
|
106
|
+
|
107
|
+
# Log request details if logging is enabled
|
108
|
+
log_request_details(
|
109
|
+
uri: uri,
|
110
|
+
method: request.method,
|
111
|
+
status: response.code,
|
112
|
+
duration: Time.now - start_time,
|
113
|
+
response: parsed_response
|
114
|
+
)
|
115
|
+
|
116
|
+
parsed_response
|
117
|
+
when Net::HTTPTooManyRequests
|
118
|
+
raise RateLimitError, "Rate limit exceeded: #{response.body}"
|
119
|
+
when Net::HTTPClientError, Net::HTTPServerError
|
120
|
+
raise ClientError, "HTTP Error #{response.code}: #{response.body}"
|
121
|
+
else
|
122
|
+
raise ClientError, "Unexpected response: #{response.code} - #{response.body}"
|
123
|
+
end
|
124
|
+
rescue Timeout::Error, Errno::ETIMEDOUT
|
125
|
+
raise TimeoutError, "Request to #{uri} timed out"
|
126
|
+
rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH, SocketError => e
|
127
|
+
raise NetworkError, "Network error: #{e.message}"
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
# Create and configure an HTTP client
|
133
|
+
#
|
134
|
+
# @param uri [URI] The URI for the request
|
135
|
+
# @param config [Hash] Configuration options
|
136
|
+
# @return [Net::HTTP] Configured HTTP client
|
137
|
+
def create_http_client(uri, config)
|
138
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
139
|
+
http.use_ssl = (uri.scheme == 'https')
|
140
|
+
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
|
141
|
+
http.read_timeout = config.dig(:runtime, :timeout) || DEFAULT_TIMEOUT
|
142
|
+
http.open_timeout = config.dig(:runtime, :open_timeout) || DEFAULT_OPEN_TIMEOUT
|
143
|
+
http
|
144
|
+
end
|
145
|
+
|
146
|
+
# Apply headers to the request
|
147
|
+
#
|
148
|
+
# @param request [Net::HTTP::Request] The request object
|
149
|
+
# @param headers [Hash] Additional headers to include
|
150
|
+
def apply_headers(request, headers)
|
151
|
+
DEFAULT_HEADERS.each do |key, value|
|
152
|
+
request[key] = value
|
153
|
+
end
|
154
|
+
|
155
|
+
headers.each do |key, value|
|
156
|
+
request[key.to_s] = value
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
160
|
+
# Build a URI for the request
|
161
|
+
#
|
162
|
+
# @param endpoint [String] The API endpoint
|
163
|
+
# @param params [Hash] Query parameters
|
164
|
+
# @return [URI] The constructed URI
|
165
|
+
def build_uri(endpoint, params = {})
|
166
|
+
base_url = @app_config.api_url
|
167
|
+
uri = URI("#{base_url}#{endpoint}")
|
168
|
+
|
169
|
+
unless params.empty?
|
170
|
+
uri.query = URI.encode_www_form(params)
|
171
|
+
end
|
172
|
+
|
173
|
+
uri
|
174
|
+
end
|
175
|
+
|
176
|
+
# Parse the response body
|
177
|
+
#
|
178
|
+
# @param response [Net::HTTPResponse] The HTTP response
|
179
|
+
# @return [Hash, nil] Parsed JSON or nil if empty
|
180
|
+
def parse_response(response)
|
181
|
+
return nil if response.body.nil? || response.body.empty?
|
182
|
+
|
183
|
+
begin
|
184
|
+
JSON.parse(response.body)
|
185
|
+
rescue JSON::ParserError => e
|
186
|
+
raise ClientError, "Failed to parse JSON response: #{e.message}"
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
# Log request details if logging is enabled
|
191
|
+
#
|
192
|
+
# @param details [Hash] Request and response details
|
193
|
+
def log_request_details(details)
|
194
|
+
return unless defined?(ActiveGenie::Logger)
|
195
|
+
|
196
|
+
ActiveGenie::Logger.trace({
|
197
|
+
code: :http_request,
|
198
|
+
uri: details[:uri].to_s,
|
199
|
+
method: details[:method],
|
200
|
+
status: details[:status],
|
201
|
+
duration: details[:duration],
|
202
|
+
response_size: details[:response].to_s.bytesize
|
203
|
+
})
|
204
|
+
end
|
205
|
+
|
206
|
+
# Retry a block with exponential backoff
|
207
|
+
#
|
208
|
+
# @param config [Hash] Configuration options
|
209
|
+
# @yield The block to retry
|
210
|
+
# @return [Object] The result of the block
|
211
|
+
def retry_with_backoff(config = {})
|
212
|
+
max_retries = config.dig(:runtime, :max_retries) || DEFAULT_MAX_RETRIES
|
213
|
+
retry_delay = config.dig(:runtime, :retry_delay) || DEFAULT_RETRY_DELAY
|
214
|
+
|
215
|
+
retries = 0
|
216
|
+
|
217
|
+
begin
|
218
|
+
yield
|
219
|
+
rescue RateLimitError, NetworkError => e
|
220
|
+
if retries < max_retries
|
221
|
+
sleep_time = retry_delay * (2 ** retries)
|
222
|
+
retries += 1
|
223
|
+
|
224
|
+
ActiveGenie::Logger.trace({
|
225
|
+
code: :retry_attempt,
|
226
|
+
attempt: retries,
|
227
|
+
max_retries: max_retries,
|
228
|
+
delay: sleep_time,
|
229
|
+
error: e.message
|
230
|
+
}) if defined?(ActiveGenie::Logger)
|
231
|
+
|
232
|
+
sleep(sleep_time)
|
233
|
+
retry
|
234
|
+
else
|
235
|
+
raise
|
236
|
+
end
|
237
|
+
end
|
238
|
+
end
|
239
|
+
end
|
240
|
+
end
|
241
|
+
end
|
@@ -2,157 +2,134 @@ require 'json'
|
|
2
2
|
require 'net/http'
|
3
3
|
require 'uri'
|
4
4
|
require_relative './helpers/retry'
|
5
|
+
require_relative './base_client'
|
5
6
|
|
6
|
-
module ActiveGenie
|
7
|
-
|
8
|
-
|
9
|
-
class
|
10
|
-
|
11
|
-
class RateLimitError < GoogleError; end
|
7
|
+
module ActiveGenie::Clients
|
8
|
+
# Client for interacting with the Google Generative Language API.
|
9
|
+
class GoogleClient < BaseClient
|
10
|
+
class GoogleError < ClientError; end
|
11
|
+
class RateLimitError < GoogleError; end
|
12
12
|
|
13
|
-
|
14
|
-
DEFAULT_HEADERS = {
|
15
|
-
'Content-Type': 'application/json',
|
16
|
-
}.freeze
|
13
|
+
API_VERSION_PATH = '/v1beta/models'.freeze
|
17
14
|
|
18
|
-
|
19
|
-
|
20
|
-
|
15
|
+
def initialize(config)
|
16
|
+
super(config)
|
17
|
+
end
|
21
18
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
}
|
19
|
+
# Requests structured JSON output from the Google Generative Language model based on a schema.
|
20
|
+
#
|
21
|
+
# @param messages [Array<Hash>] A list of messages representing the conversation history.
|
22
|
+
# Each hash should have :role ('user' or 'model') and :content (String).
|
23
|
+
# Google Generative Language uses 'user' and 'model' roles.
|
24
|
+
# @param function [Hash] A JSON schema definition describing the desired output format.
|
25
|
+
# @param model_tier [Symbol, nil] A symbolic representation of the model quality/size tier.
|
26
|
+
# @param config [Hash] Optional configuration overrides:
|
27
|
+
# - :api_key [String] Override the default API key.
|
28
|
+
# - :model [String] Override the model name directly.
|
29
|
+
# - :max_retries [Integer] Max retries for the request.
|
30
|
+
# - :retry_delay [Integer] Initial delay for retries.
|
31
|
+
# @return [Hash, nil] The parsed JSON object matching the schema, or nil if parsing fails or content is empty.
|
32
|
+
def function_calling(messages, function, model_tier: nil, config: {})
|
33
|
+
model = config[:runtime][:model] || @app_config.tier_to_model(model_tier)
|
34
|
+
api_key = config[:runtime][:api_key] || @app_config.api_key
|
35
|
+
|
36
|
+
contents = convert_messages_to_contents(messages, function)
|
37
|
+
contents << output_as_json_schema(function)
|
38
|
+
|
39
|
+
payload = {
|
40
|
+
contents: contents,
|
41
|
+
generationConfig: {
|
42
|
+
response_mime_type: "application/json",
|
43
|
+
temperature: 0.1
|
48
44
|
}
|
45
|
+
}
|
49
46
|
|
50
|
-
|
47
|
+
endpoint = "#{API_VERSION_PATH}/#{model}:generateContent"
|
48
|
+
params = { key: api_key }
|
49
|
+
headers = DEFAULT_HEADERS
|
51
50
|
|
52
|
-
|
53
|
-
|
51
|
+
retry_with_backoff(config:) do
|
52
|
+
start_time = Time.now
|
53
|
+
|
54
|
+
response = post(endpoint, payload, headers:, params:, config: config)
|
55
|
+
|
56
|
+
json_string = response&.dig('candidates', 0, 'content', 'parts', 0, 'text')
|
57
|
+
return nil if json_string.nil? || json_string.empty?
|
54
58
|
|
55
|
-
|
56
|
-
|
57
|
-
return nil if json_string.nil? || json_string.empty?
|
58
|
-
|
59
|
+
begin
|
59
60
|
parsed_response = JSON.parse(json_string)
|
60
|
-
|
61
|
+
|
62
|
+
# Log usage metrics
|
63
|
+
usage_metadata = response['usageMetadata'] || {}
|
64
|
+
prompt_tokens = usage_metadata['promptTokenCount'] || 0
|
65
|
+
candidates_tokens = usage_metadata['candidatesTokenCount'] || 0
|
66
|
+
total_tokens = usage_metadata['totalTokenCount'] || (prompt_tokens + candidates_tokens)
|
67
|
+
|
68
|
+
ActiveGenie::Logger.trace({
|
69
|
+
code: :llm_usage,
|
70
|
+
input_tokens: prompt_tokens,
|
71
|
+
output_tokens: candidates_tokens,
|
72
|
+
total_tokens: total_tokens,
|
73
|
+
model: model,
|
74
|
+
duration: Time.now - start_time,
|
75
|
+
usage: usage_metadata
|
76
|
+
})
|
77
|
+
|
61
78
|
ActiveGenie::Logger.trace({ code: :function_calling, payload:, parsed_response: })
|
62
|
-
|
79
|
+
|
63
80
|
normalize_function_output(parsed_response)
|
81
|
+
rescue JSON::ParserError => e
|
82
|
+
raise GoogleError, "Failed to parse Google API response: #{e.message} - Content: #{json_string}"
|
64
83
|
end
|
65
84
|
end
|
85
|
+
end
|
66
86
|
|
67
|
-
|
68
|
-
|
69
|
-
def normalize_function_output(output)
|
70
|
-
output = if output.is_a?(Array)
|
71
|
-
output.dig(0, 'properties') || output.dig(0)
|
72
|
-
else
|
73
|
-
output
|
74
|
-
end
|
87
|
+
private
|
75
88
|
|
76
|
-
|
89
|
+
def normalize_function_output(output)
|
90
|
+
output = if output.is_a?(Array)
|
91
|
+
output.dig(0, 'properties') || output.dig(0)
|
92
|
+
else
|
93
|
+
output
|
77
94
|
end
|
78
95
|
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
retry_with_backoff(config:) do
|
83
|
-
response = Net::HTTP.post(url, payload.to_json, DEFAULT_HEADERS)
|
84
|
-
|
85
|
-
case response
|
86
|
-
when Net::HTTPSuccess
|
87
|
-
return nil if response.body.nil? || response.body.empty?
|
88
|
-
|
89
|
-
parsed_body = JSON.parse(response.body)
|
90
|
-
|
91
|
-
usage_metadata = parsed_body['usageMetadata'] || {}
|
92
|
-
prompt_tokens = usage_metadata['promptTokenCount'] || 0
|
93
|
-
candidates_tokens = usage_metadata['candidatesTokenCount'] || 0
|
94
|
-
total_tokens = usage_metadata['totalTokenCount'] || (prompt_tokens + candidates_tokens)
|
95
|
-
|
96
|
-
ActiveGenie::Logger.trace({
|
97
|
-
code: :llm_usage,
|
98
|
-
input_tokens: prompt_tokens,
|
99
|
-
output_tokens: candidates_tokens,
|
100
|
-
total_tokens: total_tokens,
|
101
|
-
model: model,
|
102
|
-
duration: Time.now - start_time,
|
103
|
-
usage: usage_metadata # Log the whole usage block
|
104
|
-
})
|
105
|
-
|
106
|
-
parsed_body
|
107
|
-
|
108
|
-
when Net::HTTPTooManyRequests
|
109
|
-
# Rate Limit Error
|
110
|
-
raise RateLimitError, "Google API rate limit exceeded (HTTP 429): #{response.body}"
|
111
|
-
|
112
|
-
else
|
113
|
-
# Other Errors
|
114
|
-
raise GoogleError, "Google API error (HTTP #{response.code}): #{response.body}"
|
115
|
-
end
|
116
|
-
end
|
117
|
-
rescue JSON::ParserError => e
|
118
|
-
raise GoogleError, "Failed to parse Google API response body: #{e.message} - Body: #{response&.body}"
|
119
|
-
end
|
96
|
+
output.dig('input_schema', 'properties') || output
|
97
|
+
end
|
120
98
|
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
end
|
99
|
+
ROLE_TO_GOOGLE_ROLE = {
|
100
|
+
user: 'user',
|
101
|
+
assistant: 'model',
|
102
|
+
}.freeze
|
103
|
+
|
104
|
+
# Converts standard message format to Google's 'contents' format
|
105
|
+
# and injects JSON schema instructions.
|
106
|
+
# @param messages [Array<Hash>] Array of { role: 'user'/'assistant'/'system', content: '...' }
|
107
|
+
# @param function_schema [Hash] The JSON schema for the desired output.
|
108
|
+
# @return [Array<Hash>] Array formatted for Google's 'contents' field.
|
109
|
+
def convert_messages_to_contents(messages, function_schema)
|
110
|
+
messages.map do |message|
|
111
|
+
{
|
112
|
+
role: ROLE_TO_GOOGLE_ROLE[message[:role].to_sym] || 'user',
|
113
|
+
parts: [{ text: message[:content] }]
|
114
|
+
}
|
138
115
|
end
|
116
|
+
end
|
139
117
|
|
140
|
-
|
141
|
-
|
142
|
-
|
118
|
+
def output_as_json_schema(function_schema)
|
119
|
+
json_instruction = <<~PROMPT
|
120
|
+
Generate a JSON object that strictly adheres to the following JSON schema:
|
143
121
|
|
144
|
-
|
145
|
-
|
146
|
-
|
122
|
+
```json
|
123
|
+
#{JSON.pretty_generate(function_schema[:parameters])}
|
124
|
+
```
|
147
125
|
|
148
|
-
|
149
|
-
|
126
|
+
IMPORTANT: Only output the raw JSON object. Do not include any other text, explanations, or markdown formatting like ```json ... ``` wrappers around the final output.
|
127
|
+
PROMPT
|
150
128
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
end
|
129
|
+
{
|
130
|
+
role: 'user',
|
131
|
+
parts: [{ text: json_instruction }]
|
132
|
+
}
|
156
133
|
end
|
157
134
|
end
|
158
135
|
end
|
@@ -2,48 +2,58 @@ require 'json'
|
|
2
2
|
require 'net/http'
|
3
3
|
|
4
4
|
require_relative './helpers/retry'
|
5
|
+
require_relative './base_client'
|
5
6
|
|
6
7
|
module ActiveGenie::Clients
|
7
|
-
class OpenaiClient
|
8
|
-
class OpenaiError <
|
8
|
+
class OpenaiClient < BaseClient
|
9
|
+
class OpenaiError < ClientError; end
|
9
10
|
class RateLimitError < OpenaiError; end
|
10
11
|
class InvalidResponseError < StandardError; end
|
11
12
|
|
12
13
|
def initialize(config)
|
13
|
-
|
14
|
+
super(config)
|
14
15
|
end
|
15
16
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
# @return [Hash, nil] The parsed JSON object matching the schema, or nil if parsing fails or content is empty.
|
17
|
+
# Requests structured JSON output from the OpenAI model based on a schema.
|
18
|
+
#
|
19
|
+
# @param messages [Array<Hash>] A list of messages representing the conversation history.
|
20
|
+
# Each hash should have :role ('user', 'assistant', or 'system') and :content (String).
|
21
|
+
# @param function [Hash] A JSON schema definition describing the desired output format.
|
22
|
+
# @param model_tier [Symbol, nil] A symbolic representation of the model quality/size tier.
|
23
|
+
# @param config [Hash] Optional configuration overrides:
|
24
|
+
# - :api_key [String] Override the default API key.
|
25
|
+
# - :model [String] Override the model name directly.
|
26
|
+
# - :max_retries [Integer] Max retries for the request.
|
27
|
+
# - :retry_delay [Integer] Initial delay for retries.
|
28
|
+
# @return [Hash, nil] The parsed JSON object matching the schema, or nil if parsing fails or content is empty.
|
29
29
|
def function_calling(messages, function, model_tier: nil, config: {})
|
30
30
|
model = config[:runtime][:model] || @app_config.tier_to_model(model_tier)
|
31
31
|
|
32
32
|
payload = {
|
33
33
|
messages:,
|
34
|
-
tools: [{
|
34
|
+
tools: [{
|
35
|
+
type: 'function',
|
36
|
+
function: {
|
37
|
+
**function,
|
38
|
+
parameters: {
|
39
|
+
**function[:parameters],
|
40
|
+
additionalProperties: false
|
41
|
+
},
|
42
|
+
strict: true
|
43
|
+
}.compact
|
44
|
+
}],
|
35
45
|
tool_choice: { type: 'function', function: { name: function[:name] } },
|
36
46
|
stream: false,
|
37
47
|
model:,
|
38
48
|
}
|
39
49
|
|
40
50
|
api_key = config[:runtime][:api_key] || @app_config.api_key
|
41
|
-
headers =
|
51
|
+
headers = {
|
42
52
|
'Authorization': "Bearer #{api_key}"
|
43
|
-
|
53
|
+
}.compact
|
44
54
|
|
45
55
|
retry_with_backoff(config:) do
|
46
|
-
response =
|
56
|
+
response = request_openai(payload, headers, config:)
|
47
57
|
|
48
58
|
parsed_response = JSON.parse(response.dig('choices', 0, 'message', 'tool_calls', 0, 'function', 'arguments'))
|
49
59
|
parsed_response = parsed_response.dig('message') || parsed_response
|
@@ -56,42 +66,33 @@ module ActiveGenie::Clients
|
|
56
66
|
end
|
57
67
|
end
|
58
68
|
|
59
|
-
private
|
60
69
|
|
61
|
-
|
62
|
-
'Content-Type': 'application/json',
|
63
|
-
}
|
70
|
+
private
|
64
71
|
|
65
|
-
|
72
|
+
# Make a request to the OpenAI API
|
73
|
+
#
|
74
|
+
# @param payload [Hash] The request payload
|
75
|
+
# @param headers [Hash] Additional headers
|
76
|
+
# @param config [Hash] Configuration options
|
77
|
+
# @return [Hash] The parsed response
|
78
|
+
def request_openai(payload, headers, config:)
|
66
79
|
start_time = Time.now
|
67
80
|
|
68
|
-
response =
|
69
|
-
URI("#{@app_config.api_url}/chat/completions"),
|
70
|
-
payload.to_json,
|
71
|
-
headers
|
72
|
-
)
|
73
|
-
|
74
|
-
if response.is_a?(Net::HTTPTooManyRequests)
|
75
|
-
raise RateLimitError, "OpenAI API rate limit exceeded: #{response.body}"
|
76
|
-
end
|
77
|
-
|
78
|
-
raise OpenaiError, response.body unless response.is_a?(Net::HTTPSuccess)
|
79
|
-
|
80
|
-
return nil if response.body.empty?
|
81
|
+
response = post("/chat/completions", payload, headers: headers, config: config)
|
81
82
|
|
82
|
-
|
83
|
+
return nil if response.nil?
|
83
84
|
|
84
85
|
ActiveGenie::Logger.trace({
|
85
86
|
code: :llm_usage,
|
86
|
-
input_tokens:
|
87
|
-
output_tokens:
|
88
|
-
total_tokens:
|
87
|
+
input_tokens: response.dig('usage', 'prompt_tokens'),
|
88
|
+
output_tokens: response.dig('usage', 'completion_tokens'),
|
89
|
+
total_tokens: response.dig('usage', 'total_tokens'),
|
89
90
|
model: payload[:model],
|
90
91
|
duration: Time.now - start_time,
|
91
|
-
usage:
|
92
|
+
usage: response.dig('usage')
|
92
93
|
})
|
93
94
|
|
94
|
-
|
95
|
+
response
|
95
96
|
end
|
96
97
|
end
|
97
98
|
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
require_relative '../../clients/internal_company_api_client'
|
2
|
+
require_relative './base_config'
|
3
|
+
|
4
|
+
module ActiveGenie
|
5
|
+
module Configuration::Providers
|
6
|
+
# Configuration class for the Internal Company API client.
|
7
|
+
# Manages API keys, URLs, model selections, and client instantiation.
|
8
|
+
class InternalCompanyApiConfig < BaseConfig
|
9
|
+
NAME = :internal_company_api
|
10
|
+
|
11
|
+
# Retrieves the API key.
|
12
|
+
# Falls back to the INTERNAL_COMPANY_API_KEY environment variable if not set.
|
13
|
+
# @return [String, nil] The API key.
|
14
|
+
def api_key
|
15
|
+
@api_key || ENV['INTERNAL_COMPANY_API_KEY']
|
16
|
+
end
|
17
|
+
|
18
|
+
# Retrieves the base API URL for Internal Company API.
|
19
|
+
# Defaults to 'https://api.internal-company.com/v1'.
|
20
|
+
# @return [String] The API base URL.
|
21
|
+
def api_url
|
22
|
+
@api_url || 'https://api.internal-company.com/v1'
|
23
|
+
end
|
24
|
+
|
25
|
+
# Lazily initializes and returns an instance of the InternalCompanyApiClient.
|
26
|
+
# Passes itself (the config object) to the client's constructor.
|
27
|
+
# @return [ActiveGenie::Clients::InternalCompanyApiClient] The client instance.
|
28
|
+
def client
|
29
|
+
@client ||= ::ActiveGenie::Clients::InternalCompanyApiClient.new(self)
|
30
|
+
end
|
31
|
+
|
32
|
+
# Retrieves the model name designated for the lower tier (e.g., cost-effective, faster).
|
33
|
+
# Defaults to 'internal-basic'.
|
34
|
+
# @return [String] The lower tier model name.
|
35
|
+
def lower_tier_model
|
36
|
+
@lower_tier_model || 'internal-basic'
|
37
|
+
end
|
38
|
+
|
39
|
+
# Retrieves the model name designated for the middle tier (e.g., balanced performance).
|
40
|
+
# Defaults to 'internal-standard'.
|
41
|
+
# @return [String] The middle tier model name.
|
42
|
+
def middle_tier_model
|
43
|
+
@middle_tier_model || 'internal-standard'
|
44
|
+
end
|
45
|
+
|
46
|
+
# Retrieves the model name designated for the upper tier (e.g., most capable).
|
47
|
+
# Defaults to 'internal-premium'.
|
48
|
+
# @return [String] The upper tier model name.
|
49
|
+
def upper_tier_model
|
50
|
+
@upper_tier_model || 'internal-premium'
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
@@ -72,7 +72,7 @@ module ActiveGenie::Ranking
|
|
72
72
|
ELIMINATION_RELEGATION = 'relegation_tier'
|
73
73
|
|
74
74
|
with_logging_context :log_context, ->(log) {
|
75
|
-
@total_tokens += log[:total_tokens] if log[:code] == :llm_usage
|
75
|
+
@total_tokens += log[:total_tokens] || 0 if log[:code] == :llm_usage
|
76
76
|
}
|
77
77
|
|
78
78
|
def initial_log
|
@@ -42,14 +42,15 @@ module ActiveGenie::Scoring
|
|
42
42
|
function = {
|
43
43
|
name: 'identify_reviewers',
|
44
44
|
description: 'Discover reviewers based on the text and given criteria.',
|
45
|
-
|
45
|
+
parameters: {
|
46
46
|
type: "object",
|
47
47
|
properties: {
|
48
48
|
reasoning: { type: 'string' },
|
49
49
|
reviewer1: { type: 'string' },
|
50
50
|
reviewer2: { type: 'string' },
|
51
51
|
reviewer3: { type: 'string' },
|
52
|
-
}
|
52
|
+
},
|
53
|
+
required: ['reasoning', 'reviewer1', 'reviewer2', 'reviewer3']
|
53
54
|
}
|
54
55
|
}
|
55
56
|
|
@@ -10,7 +10,7 @@ ActiveGenie.configure do |config|
|
|
10
10
|
# config.providers.openai.client = ActiveGenie::Providers::Openai::Client.new(config)
|
11
11
|
|
12
12
|
# example how add a new provider
|
13
|
-
# config.providers.register(
|
13
|
+
# config.providers.register(InternalCompanyApi::Configuration)
|
14
14
|
|
15
15
|
# Logs configuration
|
16
16
|
# config.log_level = :debug # default is :info
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: active_genie
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.20
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Radamés Roriz
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-04-
|
11
|
+
date: 2025-04-04 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: "# ActiveGenie \U0001F9DE♂️\n> The lodash for GenAI, stop reinventing
|
14
14
|
the wheel\n\n[](https://badge.fury.io/rb/active_genie)\n[](https://github.com/roriz/active_genie/actions/workflows/benchmark.yml)\n\nActiveGenie
|
@@ -62,7 +62,7 @@ description: "# ActiveGenie \U0001F9DE♂️\n> The lodash for GenAI, stop re
|
|
62
62
|
While Player 2 has good test coverage, \n# the tight coupling makes
|
63
63
|
the code harder to maintain and modify.\",\n# what_could_be_changed_to_avoid_draw:
|
64
64
|
\"Focus on specific architectural patterns and design principles\"\n# }\n```\n\n*Recommended
|
65
|
-
model*: `
|
65
|
+
model*: `claude-3-5-haiku`\n\nFeatures:\n- Multi-reviewer evaluation with automatic
|
66
66
|
expert selection\n- Detailed feedback with scoring reasoning\n- Customizable reviewer
|
67
67
|
weights\n- Flexible evaluation criteria\n\nSee the [Battle README](lib/active_genie/battle/README.md)
|
68
68
|
for advanced usage, custom reviewers, and detailed interface documentation.\n\n###
|
@@ -93,9 +93,27 @@ description: "# ActiveGenie \U0001F9DE♂️\n> The lodash for GenAI, stop re
|
|
93
93
|
`api_key` | Provider API key | `nil` |\n| `timeout` | Request timeout in seconds
|
94
94
|
| `5` |\n| `max_retries` | Maximum retry attempts | `3` |\n\n> **Note:** Each module
|
95
95
|
can append its own set of configuration, see the individual module documentation
|
96
|
-
for details.\n\n##
|
97
|
-
|
98
|
-
|
96
|
+
for details.\n\n## How to create a new provider\n\nActiveGenie supports adding custom
|
97
|
+
providers to integrate with different LLM services. To create a new provider:\n\n1.
|
98
|
+
Create a configuration class for your provider in `lib/active_genie/configuration/providers/`:\n\n```ruby\n#
|
99
|
+
Example: lib/active_genie/configuration/providers/internal_company_api_config.rb\nmodule
|
100
|
+
ActiveGenie\n module Configuration::Providers\n class InternalCompanyApiConfig
|
101
|
+
< BaseConfig\n NAME = :internal_company_api\n \n # API key accessor
|
102
|
+
with environment variable fallback\n def api_key\n @api_key || ENV['INTERNAL_COMPANY_API_KEY']\n
|
103
|
+
\ end\n \n # Base API URL\n def api_url\n @api_url ||
|
104
|
+
'https://api.internal-company.com/v1'\n end\n \n # Client instantiation\n
|
105
|
+
\ def client\n @client ||= ::ActiveGenie::Clients::InternalCompanyApiClient.new(self)\n
|
106
|
+
\ end\n \n # Model tier definitions\n def lower_tier_model\n
|
107
|
+
\ @lower_tier_model || 'internal-basic'\n end\n \n def middle_tier_model\n
|
108
|
+
\ @middle_tier_model || 'internal-standard'\n end\n \n def
|
109
|
+
upper_tier_model\n @upper_tier_model || 'internal-premium'\n end\n end\n
|
110
|
+
\ end\nend\n```\n\n2. Register your provider in your configuration:\n\n```ruby\n#
|
111
|
+
In config/initializers/active_genie.rb\nActiveGenie.configure do |config|\n # Register
|
112
|
+
your custom provider\n config.providers.register(InternalCompanyApi::Configuration)\n
|
113
|
+
\ \n # Configure your provider\n config.internal_company_api.api_key = ENV['INTERNAL_COMPANY_API_KEY']\nend\n```\n\n##
|
114
|
+
Contributing\n\n1. Fork the repository\n2. Create your feature branch (`git checkout
|
115
|
+
-b feature/amazing-feature`)\n3. Commit your changes (`git commit -m 'Add amazing
|
116
|
+
feature'`)\n4. Push to the branch (`git push origin feature/amazing-feature`)\n5.
|
99
117
|
Open a Pull Request\n\n## License\n\nThis project is licensed under the Apache License
|
100
118
|
2.0 License - see the [LICENSE](LICENSE) file for details.\n"
|
101
119
|
email:
|
@@ -112,6 +130,7 @@ files:
|
|
112
130
|
- lib/active_genie/battle/README.md
|
113
131
|
- lib/active_genie/battle/basic.rb
|
114
132
|
- lib/active_genie/clients/anthropic_client.rb
|
133
|
+
- lib/active_genie/clients/base_client.rb
|
115
134
|
- lib/active_genie/clients/google_client.rb
|
116
135
|
- lib/active_genie/clients/helpers/retry.rb
|
117
136
|
- lib/active_genie/clients/openai_client.rb
|
@@ -123,6 +142,7 @@ files:
|
|
123
142
|
- lib/active_genie/configuration/providers/base_config.rb
|
124
143
|
- lib/active_genie/configuration/providers/deepseek_config.rb
|
125
144
|
- lib/active_genie/configuration/providers/google_config.rb
|
145
|
+
- lib/active_genie/configuration/providers/internal_company_api_config.rb
|
126
146
|
- lib/active_genie/configuration/providers/openai_config.rb
|
127
147
|
- lib/active_genie/configuration/providers_config.rb
|
128
148
|
- lib/active_genie/configuration/runtime_config.rb
|