ai_client 0.4.1 → 0.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,191 @@
1
+ # lib/ai_client/ollama_extensions.rb
2
+
3
+ # Ollama Extensions for AiClient
4
+ #
5
+ # This file adds several public instance and class methods to the AiClient class
6
+ # to provide information about AI models and providers.
7
+ #
8
+ # Instance Methods:
9
+ # - model_details: Retrieves details for the current model.
10
+ # - models: Retrieves model names for the current provider.
11
+ #
12
+ # Class Methods:
13
+ # - providers: Retrieves all available providers.
14
+ # - models: Retrieves model names, optionally filtered by provider.
15
+ # - model_details: Retrieves details for a specific model.
16
+ #
17
+ # These methods utilize the AiClient::LLM class and the models.yml file
18
+ # for model information.
19
+
20
+ # TODO: consider incorporating Ollama AI extensions
21
+ # require 'ollama-ai'
22
+
23
+ require 'yaml'
24
+ require 'uri'
25
+ require 'json'
26
+ require 'net/http'
27
+
28
+ class AiClient
29
+
30
+ # Retrieves details for the current model.
31
+ #
32
+ # @return [Hash, nil] Details of the current model or nil if not found.
33
+ def model_details
34
+ id = "#{@provider}/#{@model}"
35
+ LLM.find(id.downcase)
36
+ end
37
+
38
+ # Retrieves model names for the current provider.
39
+ #
40
+ # @return [Array<String>] List of model names for the current provider.
41
+ def models = LLM.models(@provider)
42
+
43
+
44
+ class << self
45
+
46
+ # Retrieves all available providers.
47
+ #
48
+ # @return [Array<Symbol>] List of all provider names.
49
+ def providers = LLM.providers
50
+
51
+
52
+ # Retrieves model names, optionally filtered by provider.
53
+ #
54
+ # @param substring [String, nil] Optional substring to filter models by.
55
+ # @return [Array<String>] List of model names.
56
+ def models(substring = nil) = LLM.models(substring)
57
+
58
+ # Retrieves details for a specific model.
59
+ #
60
+ # @param model_id [String] The model ID to retrieve details for,
61
+ # in the pattern "provider/model".downcase
62
+ # @return [AiClient::LLM, nil] Details of the model or nil if not found.
63
+ def model_details(model_id) = LLM.find(model_id.downcase)
64
+
65
+
66
+ # Resets LLM data with the available ORC models.
67
+ #
68
+ # @return [void]
69
+ #
70
+ def reset_llm_data
71
+ # Simply delegate to LLM class if it has the method
72
+ if LLM.respond_to?(:reset_llm_data)
73
+ LLM.reset_llm_data
74
+ end
75
+ end
76
+
77
+
78
+ # Initializes Ollama extensions for AiClient.
79
+ #
80
+ # This sets up the access token and initializes the ORC client.
81
+ #
82
+ # @return [void]
83
+ #
84
+ def add_ollama_extensions
85
+ access_token = fetch_access_token
86
+
87
+ return unless access_token
88
+
89
+ configure_ollama(access_token)
90
+ initialize_ollama_client
91
+ end
92
+
93
+
94
+ # Retrieves the ORC client instance.
95
+ #
96
+ # @return [Ollama::Client] Instance of the Ollama client.
97
+ #
98
+ def ollama_client
99
+ @ollama_client ||= initialize_ollama_client
100
+ end
101
+
102
+
103
+ # Retrieves the available models from the Ollama server.
104
+ #
105
+ # @param host [String] Optional host URL for the Ollama server.
106
+ # Defaults to the configured host or http://localhost:11434 if not specified.
107
+ # @return [Array<Hash>] List of available models with their details.
108
+ #
109
+ def ollama_available_models(host = nil)
110
+ host ||= ollama_host
111
+
112
+ uri = URI("#{host}/api/tags")
113
+ response = Net::HTTP.get_response(uri)
114
+
115
+ if response.is_a?(Net::HTTPSuccess)
116
+ JSON.parse(response.body)["models"] rescue []
117
+ else
118
+ []
119
+ end
120
+ end
121
+
122
+ # Gets the configured Ollama host URL
123
+ #
124
+ # @return [String] The configured Ollama host URL
125
+ def ollama_host
126
+ class_config.providers[:ollama]&.dig(:host) || 'http://localhost:11434'
127
+ end
128
+
129
+ # Checks if a specific model exists on the Ollama server.
130
+ #
131
+ # @param model_name [String] The name of the model to check.
132
+ # @param host [String] Optional host URL for the Ollama server.
133
+ # Defaults to the configured host or http://localhost:11434 if not specified.
134
+ # @return [Boolean] True if the model exists, false otherwise.
135
+ #
136
+ def ollama_model_exists?(model_name, host = nil)
137
+ models = ollama_available_models(host)
138
+ models.any? { |m| m['name'] == model_name }
139
+ end
140
+
141
+
142
+ private
143
+
144
+
145
+ # Retrieves models from the ORC client.
146
+ #
147
+ # @return [Array<Hash>] List of models.
148
+ #
149
+ def ollama_models
150
+ [] # Simply return an empty array since we're not using the actual Ollama gem
151
+ end
152
+
153
+
154
+ # Fetches the access token from environment variables.
155
+ #
156
+ # @return [String, nil] The access token or nil if not found.
157
+ #
158
+ def fetch_access_token
159
+ # Check if the key exists in the configuration
160
+ return nil unless class_config.envar_api_key_names &&
161
+ class_config.envar_api_key_names[:ollama]
162
+
163
+ # Now safely access the array
164
+ class_config.envar_api_key_names[:ollama]
165
+ .map { |key| ENV[key] }
166
+ .compact
167
+ .first
168
+ end
169
+
170
+ # Configures the Ollama client with the access token.
171
+ #
172
+ # @param access_token [String] The access token to configure.
173
+ # @return [void]
174
+ #
175
+ def configure_ollama(access_token)
176
+ # No-op since we're not using the actual Ollama gem
177
+ end
178
+
179
+ # Initializes the ORC client instance.
180
+ #
181
+ # @return [OmniAI::Ollama::Client] Instance of the Ollama client.
182
+ def initialize_ollama_client
183
+ # Return a dummy object that won't raise errors
184
+ Object.new
185
+ end
186
+ end
187
+ end
188
+
189
+ # Don't try to initialize the Ollama extensions at load time
190
+ # because we're not requiring the Ollama gem
191
+ # AiClient.add_ollama_extensions
@@ -1,46 +1,23 @@
1
- # ai_client/retry_middleware.rb
1
+ # lib/ai_client/retry_middleware.rb
2
2
 
3
3
  class AiClient
4
-
5
- # AiClient.use(
6
- # AiClient::RetryMiddleware.new(
7
- # max_retries: 5,
8
- # base_delay: 2,
9
- # max_delay: 30
10
- # )
11
- # )
12
- #
13
4
  class RetryMiddleware
14
-
15
- # Initializes a new instance of RetryMiddleware.
16
- #
17
- # @param max_retries [Integer] The maximum number of retries to attempt (default: 3).
18
- # @param base_delay [Integer] The base delay in seconds before retrying (default: 2).
19
- # @param max_delay [Integer] The maximum delay in seconds between retries (default: 16).
20
- #
21
- def initialize(max_retries: 3, base_delay: 2, max_delay: 16)
5
+ def initialize(max_retries: 3, base_delay: 1, max_delay: 16)
22
6
  @max_retries = max_retries
23
7
  @base_delay = base_delay
24
8
  @max_delay = max_delay
25
9
  end
26
10
 
27
- # Calls the next middleware, retrying on specific errors.
28
- #
29
- # @param client [AiClient] The client instance that invokes the middleware.
30
- # @param next_middleware [Proc] The next middleware in the chain to call.
31
- # @param args [Array] Any additional arguments to pass to the next middleware.
32
- #
33
- # @raise [StandardError] Reraise the error if max retries are exceeded.
34
- #
35
11
  def call(client, next_middleware, *args)
36
- retries = 0
12
+ @retries = 0
13
+ @client = client
14
+
37
15
  begin
38
16
  next_middleware.call
39
17
  rescue OmniAI::RateLimitError, OmniAI::NetworkError => e
40
- if retries < @max_retries
41
- retries += 1
42
- delay = [@base_delay * (2 ** (retries - 1)), @max_delay].min
43
- client.logger.warn("Retrying in #{delay} seconds due to error: #{e.message}")
18
+ if @retries < @max_retries
19
+ delay = retry_delay(e)
20
+ log_retry(delay, e)
44
21
  sleep(delay)
45
22
  retry
46
23
  else
@@ -48,5 +25,16 @@ class AiClient
48
25
  end
49
26
  end
50
27
  end
28
+
29
+ private
30
+
31
+ def retry_delay(error)
32
+ @retries += 1
33
+ [@base_delay * (2 ** (@retries - 1)), @max_delay].min
34
+ end
35
+
36
+ def log_retry(delay, error)
37
+ @client.logger.warn("Retrying in #{delay} seconds due to error: #{error.message}")
38
+ end
51
39
  end
52
40
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class AiClient
4
- VERSION = "0.4.1"
4
+ VERSION = "0.4.4"
5
5
 
6
6
  def version = VERSION
7
7
  def self.version = VERSION
@@ -0,0 +1,35 @@
1
+ # lib/ai_client/xai.rb
2
+
3
+ module AiClient
4
+ module XAI
5
+ class Client
6
+ BASE_URI = 'https://api.x.ai/v1' # Replace with actual xAI API endpoint
7
+
8
+ def initialize(api_key: ENV['XAI_API_KEY'])
9
+ @api_key = api_key
10
+ @connection = OmniAI::HTTP::Connection.new(BASE_URI)
11
+ end
12
+
13
+ def chat(prompt:, model: 'grok3', **options)
14
+ response = @connection.post(
15
+ '/chat/completions', # Adjust endpoint based on xAI API docs
16
+ headers: { 'Authorization' => "Bearer #{@api_key}", 'Content-Type' => 'application/json' },
17
+ body: {
18
+ model: model,
19
+ messages: [{ role: 'user', content: prompt }],
20
+ temperature: options[:temperature] || 0.7,
21
+ max_tokens: options[:max_tokens] || 1024
22
+ }.to_json
23
+ )
24
+ parse_response(response)
25
+ end
26
+
27
+ private
28
+
29
+ def parse_response(response)
30
+ json = JSON.parse(response.body)
31
+ json['choices'][0]['message']['content'] # Adjust based on actual response structure
32
+ end
33
+ end
34
+ end
35
+ end
data/lib/ai_client.rb CHANGED
@@ -41,6 +41,7 @@ require_relative 'ai_client/open_router_extensions'
41
41
  require_relative 'ai_client/llm' # SMELL: must come after the open router stuff
42
42
  require_relative 'ai_client/tool'
43
43
  require_relative 'ai_client/function'
44
+ require_relative 'ai_client/ollama_extensions' # Added ollama extensions
44
45
 
45
46
  # Create a generic client instance using only model name
46
47
  # client = AiClient.new('gpt-3.5-turbo')
@@ -288,14 +289,23 @@ class AiClient
288
289
  OmniAI::Mistral::Client.new(**client_options)
289
290
 
290
291
  when :ollama
291
- OmniAI::OpenAI::Client.new(host: 'http://localhost:11434', api_key: nil, **client_options)
292
+ provider_config = @config.providers[:ollama] || {}
293
+ host = provider_config[:host] || 'http://localhost:11434'
294
+ OmniAI::OpenAI::Client.new(host: host, api_key: nil, **client_options)
292
295
 
293
296
  when :localai
294
- OmniAI::OpenAI::Client.new(host: 'http://localhost:8080', api_key: nil, **client_options)
297
+ provider_config = @config.providers[:localai] || {}
298
+ host = provider_config[:host] || 'http://localhost:8080'
299
+ OmniAI::OpenAI::Client.new(host: host, api_key: nil, **client_options)
295
300
 
296
301
  when :open_router
297
302
  OmniAI::OpenAI::Client.new(host: 'https://openrouter.ai', api_prefix: 'api', **client_options)
298
303
 
304
+ when :xai
305
+ # SMELL: may want to make this an
306
+ # extension to OmniAI
307
+ AiClient::XAI::Client.new
308
+
299
309
  else
300
310
  raise ArgumentError, "Unsupported provider: #{@provider}"
301
311
  end
@@ -322,6 +332,9 @@ class AiClient
322
332
  def determine_provider(model)
323
333
  return nil if model.nil? || model.empty?
324
334
 
335
+ # Ollama has many open-source models.
336
+ # We can use the Ollama API to list the currently installed models.
337
+ # http://localhost:11434/api/tags
325
338
  config.provider_patterns.find { |provider, pattern| model.match?(pattern) }&.first ||
326
339
  raise(ArgumentError, "Unsupported model: #{model}")
327
340
  end
metadata CHANGED
@@ -1,14 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ai_client
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.1
4
+ version: 0.4.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Dewayne VanHoozer
8
- autorequire:
9
8
  bindir: bin
10
9
  cert_chain: []
11
- date: 2024-10-21 00:00:00.000000000 Z
10
+ date: 2025-03-02 00:00:00.000000000 Z
12
11
  dependencies:
13
12
  - !ruby/object:Gem::Dependency
14
13
  name: active_hash
@@ -231,12 +230,14 @@ files:
231
230
  - lib/ai_client/logger_middleware.rb
232
231
  - lib/ai_client/middleware.rb
233
232
  - lib/ai_client/models.yml
233
+ - lib/ai_client/ollama_extensions.rb
234
234
  - lib/ai_client/open_router_extensions.rb
235
235
  - lib/ai_client/retry_middleware.rb
236
236
  - lib/ai_client/speak.rb
237
237
  - lib/ai_client/tool.rb
238
238
  - lib/ai_client/transcribe.rb
239
239
  - lib/ai_client/version.rb
240
+ - lib/ai_client/xai.rb
240
241
  - sig/ai_client.rbs
241
242
  - the_ollama_model_problem.md
242
243
  homepage: https://github.com/MadBomber/ai_client
@@ -247,7 +248,6 @@ metadata:
247
248
  homepage_uri: https://github.com/MadBomber/ai_client
248
249
  source_code_uri: https://github.com/MadBomber/ai_client
249
250
  changelog_uri: https://github.com/MadBomber/ai_client/blob/main/CHANGELOG.md
250
- post_install_message:
251
251
  rdoc_options: []
252
252
  require_paths:
253
253
  - lib
@@ -262,8 +262,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
262
262
  - !ruby/object:Gem::Version
263
263
  version: '0'
264
264
  requirements: []
265
- rubygems_version: 3.5.22
266
- signing_key:
265
+ rubygems_version: 3.6.5
267
266
  specification_version: 4
268
267
  summary: A generic AI Client for many providers
269
268
  test_files: []