durable-llm 0.1.4 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/.envrc +7 -0
  3. data/CHANGELOG.md +5 -0
  4. data/CLI.md +0 -2
  5. data/Gemfile +7 -9
  6. data/README.md +564 -30
  7. data/Rakefile +16 -6
  8. data/devenv.lock +171 -0
  9. data/devenv.nix +12 -0
  10. data/devenv.yaml +8 -0
  11. data/durable-llm.gemspec +52 -0
  12. data/examples/openai_quick_complete.rb +4 -2
  13. data/lib/durable/llm/cli.rb +218 -22
  14. data/lib/durable/llm/client.rb +228 -8
  15. data/lib/durable/llm/configuration.rb +163 -10
  16. data/lib/durable/llm/convenience.rb +102 -0
  17. data/lib/durable/llm/errors.rb +185 -0
  18. data/lib/durable/llm/provider_utilities.rb +201 -0
  19. data/lib/durable/llm/providers/anthropic.rb +232 -24
  20. data/lib/durable/llm/providers/azure_openai.rb +347 -0
  21. data/lib/durable/llm/providers/base.rb +220 -11
  22. data/lib/durable/llm/providers/cohere.rb +157 -11
  23. data/lib/durable/llm/providers/deepseek.rb +233 -0
  24. data/lib/durable/llm/providers/fireworks.rb +304 -0
  25. data/lib/durable/llm/providers/google.rb +327 -0
  26. data/lib/durable/llm/providers/groq.rb +133 -25
  27. data/lib/durable/llm/providers/huggingface.rb +120 -17
  28. data/lib/durable/llm/providers/mistral.rb +431 -0
  29. data/lib/durable/llm/providers/openai.rb +150 -4
  30. data/lib/durable/llm/providers/opencode.rb +253 -0
  31. data/lib/durable/llm/providers/openrouter.rb +256 -0
  32. data/lib/durable/llm/providers/perplexity.rb +273 -0
  33. data/lib/durable/llm/providers/together.rb +346 -0
  34. data/lib/durable/llm/providers/xai.rb +355 -0
  35. data/lib/durable/llm/providers.rb +113 -13
  36. data/lib/durable/llm/response_helpers.rb +185 -0
  37. data/lib/durable/llm/version.rb +5 -1
  38. data/lib/durable/llm.rb +214 -1
  39. data/lib/durable.rb +29 -4
  40. data/sig/durable/llm.rbs +303 -1
  41. metadata +106 -28
  42. data/Gemfile.lock +0 -103
@@ -1,33 +1,218 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This file defines a comprehensive hierarchy of custom exception classes for the Durable LLM gem,
4
+ # providing specific error types for different failure scenarios including API errors, rate limiting,
5
+ # authentication issues, network problems, and configuration errors. The error hierarchy extends
6
+ # from a base Error class and allows for precise error handling and user feedback throughout the
7
+ # gem's LLM provider interactions and operations.
8
+
1
9
  module Durable
2
10
  module Llm
11
+ # Base error class for all Durable LLM exceptions.
12
+ #
13
+ # All custom errors in the Durable LLM gem inherit from this class,
14
+ # allowing users to rescue all LLM-related errors with a single catch block.
15
+ #
16
+ # @example Rescuing all Durable LLM errors
17
+ # begin
18
+ # # LLM operation
19
+ # rescue Durable::Llm::Error => e
20
+ # puts "LLM operation failed: #{e.message}"
21
+ # end
3
22
  class Error < StandardError; end
4
23
 
24
+ # Error raised when an API request fails with an unexpected error.
25
+ #
26
+ # This error is raised for API errors that don't fit into more specific categories
27
+ # like authentication, rate limiting, or server errors.
28
+ #
29
+ # @example Handling API errors
30
+ # begin
31
+ # client.complete("Hello")
32
+ # rescue Durable::Llm::APIError => e
33
+ # puts "API request failed: #{e.message}"
34
+ # end
5
35
  class APIError < Error; end
6
36
 
37
+ # Error raised when the API rate limit has been exceeded.
38
+ #
39
+ # This typically occurs when too many requests are made within a short time period.
40
+ # Users should implement retry logic with exponential backoff when encountering this error.
41
+ #
42
+ # @example Handling rate limit errors with retry
43
+ # retries = 0
44
+ # begin
45
+ # client.complete("Hello")
46
+ # rescue Durable::Llm::RateLimitError => e
47
+ # if retries < 3
48
+ # sleep(2 ** retries)
49
+ # retries += 1
50
+ # retry
51
+ # else
52
+ # puts "Rate limit exceeded after retries: #{e.message}"
53
+ # end
54
+ # end
7
55
  class RateLimitError < Error; end
8
56
 
57
+ # Error raised when authentication with the LLM provider fails.
58
+ #
59
+ # This typically occurs when API keys are invalid, expired, or not provided.
60
+ # Users should check their API key configuration when encountering this error.
61
+ #
62
+ # @example Handling authentication errors
63
+ # begin
64
+ # client.complete("Hello")
65
+ # rescue Durable::Llm::AuthenticationError => e
66
+ # puts "Authentication failed. Please check your API key: #{e.message}"
67
+ # end
9
68
  class AuthenticationError < Error; end
10
69
 
70
+ # Error raised when the request parameters are invalid.
71
+ #
72
+ # This occurs when the request contains malformed data, invalid parameters,
73
+ # or violates the API's constraints.
74
+ #
75
+ # @example Handling invalid request errors
76
+ # begin
77
+ # client.complete("Hello", model: "invalid-model")
78
+ # rescue Durable::Llm::InvalidRequestError => e
79
+ # puts "Invalid request parameters: #{e.message}"
80
+ # end
11
81
  class InvalidRequestError < Error; end
12
82
 
83
+ # Error raised when a requested resource cannot be found.
84
+ #
85
+ # This typically occurs when requesting a model or resource that doesn't exist
86
+ # or is not available to the user.
87
+ #
88
+ # @example Handling resource not found errors
89
+ # begin
90
+ # client.complete("Hello", model: "nonexistent-model")
91
+ # rescue Durable::Llm::ResourceNotFoundError => e
92
+ # puts "Requested resource not found: #{e.message}"
93
+ # end
13
94
  class ResourceNotFoundError < Error; end
14
95
 
96
+ # Error raised when a request times out.
97
+ #
98
+ # This occurs when the API request takes longer than the configured timeout period.
99
+ # Users may want to increase timeout settings or retry the request.
100
+ #
101
+ # @example Handling timeout errors
102
+ # begin
103
+ # client.complete("Hello")
104
+ # rescue Durable::Llm::TimeoutError => e
105
+ # puts "Request timed out: #{e.message}"
106
+ # end
15
107
  class TimeoutError < Error; end
16
108
 
109
+ # Error raised when the LLM provider's server encounters an internal error.
110
+ #
111
+ # This indicates a problem on the provider's side, not with the user's request.
112
+ # Users should retry the request after a short delay.
113
+ #
114
+ # @example Handling server errors
115
+ # begin
116
+ # client.complete("Hello")
117
+ # rescue Durable::Llm::ServerError => e
118
+ # puts "Server error occurred: #{e.message}"
119
+ # # Consider retrying after a delay
120
+ # end
17
121
  class ServerError < Error; end
18
122
 
123
+ # Error raised when attempting to use an unsupported LLM provider.
124
+ #
125
+ # This occurs when the requested provider is not implemented or configured
126
+ # in the Durable LLM gem.
127
+ #
128
+ # @example Handling unsupported provider errors
129
+ # begin
130
+ # client = Durable::Llm::Client.new(provider: "unsupported-provider")
131
+ # rescue Durable::Llm::UnsupportedProviderError => e
132
+ # puts "Unsupported provider: #{e.message}"
133
+ # end
19
134
  class UnsupportedProviderError < Error; end
20
135
 
136
+ # Error raised when there is a configuration problem.
137
+ #
138
+ # This occurs when required configuration is missing, invalid, or inconsistent.
139
+ # Users should check their configuration settings.
140
+ #
141
+ # @example Handling configuration errors
142
+ # begin
143
+ # client = Durable::Llm::Client.new(api_key: nil)
144
+ # rescue Durable::Llm::ConfigurationError => e
145
+ # puts "Configuration error: #{e.message}"
146
+ # end
21
147
  class ConfigurationError < Error; end
22
148
 
149
+ # Error raised when the requested model is not found or not available.
150
+ #
151
+ # This is similar to ResourceNotFoundError but specifically for models.
152
+ # It occurs when the specified model doesn't exist or isn't accessible.
153
+ #
154
+ # @example Handling model not found errors
155
+ # begin
156
+ # client.complete("Hello", model: "unknown-model")
157
+ # rescue Durable::Llm::ModelNotFoundError => e
158
+ # puts "Model not found: #{e.message}"
159
+ # end
23
160
  class ModelNotFoundError < Error; end
24
161
 
162
+ # Error raised when the account has insufficient quota or credits.
163
+ #
164
+ # This occurs when the user's account has exhausted its usage limits
165
+ # or doesn't have enough credits for the requested operation.
166
+ #
167
+ # @example Handling insufficient quota errors
168
+ # begin
169
+ # client.complete("Hello")
170
+ # rescue Durable::Llm::InsufficientQuotaError => e
171
+ # puts "Insufficient quota: #{e.message}"
172
+ # end
25
173
  class InsufficientQuotaError < Error; end
26
174
 
175
+ # Error raised when the API response is invalid or malformed.
176
+ #
177
+ # This occurs when the provider returns a response that cannot be parsed
178
+ # or doesn't match the expected format.
179
+ #
180
+ # @example Handling invalid response errors
181
+ # begin
182
+ # client.complete("Hello")
183
+ # rescue Durable::Llm::InvalidResponseError => e
184
+ # puts "Invalid response received: #{e.message}"
185
+ # end
27
186
  class InvalidResponseError < Error; end
28
187
 
188
+ # Error raised when there is a network connectivity problem.
189
+ #
190
+ # This occurs when the request cannot reach the LLM provider due to
191
+ # network issues, DNS problems, or connectivity failures.
192
+ #
193
+ # @example Handling network errors
194
+ # begin
195
+ # client.complete("Hello")
196
+ # rescue Durable::Llm::NetworkError => e
197
+ # puts "Network error: #{e.message}"
198
+ # end
29
199
  class NetworkError < Error; end
30
200
 
201
+ # Error raised when there is a problem with streaming responses.
202
+ #
203
+ # This occurs during streaming operations when the connection is interrupted,
204
+ # the stream format is invalid, or other streaming-specific issues arise.
205
+ #
206
+ # @example Handling streaming errors
207
+ # begin
208
+ # client.stream("Hello") do |chunk|
209
+ # puts chunk
210
+ # end
211
+ # rescue Durable::Llm::StreamingError => e
212
+ # puts "Streaming error: #{e.message}"
213
+ # end
31
214
  class StreamingError < Error; end
32
215
  end
33
216
  end
217
+
218
+ # Copyright (c) 2025 Durable Programming, LLC. All rights reserved.
@@ -0,0 +1,201 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This module provides utility functions for working with LLM providers, including
4
+ # provider discovery, comparison, and model routing capabilities. It helps developers
5
+ # choose and switch between providers efficiently.
6
+
7
+ module Durable
8
+ module Llm
9
+ # Utility methods for provider management and comparison
10
+ #
11
+ # This module offers helper methods for:
12
+ # - Discovering available providers
13
+ # - Finding providers that support specific models
14
+ # - Comparing provider capabilities
15
+ # - Routing requests to appropriate providers
16
+ #
17
+ # @example Find provider for a model
18
+ # provider = ProviderUtilities.provider_for_model('gpt-4')
19
+ # # => :openai
20
+ module ProviderUtilities
21
+ module_function
22
+
23
+ # Lists all available providers
24
+ #
25
+ # @return [Array<Symbol>] Array of provider names
26
+ # @example List providers
27
+ # providers = ProviderUtilities.available_providers
28
+ # # => [:openai, :anthropic, :google, ...]
29
+ def available_providers
30
+ Providers.available_providers
31
+ end
32
+
33
+ # Finds the provider that supports a given model
34
+ #
35
+ # @param model_id [String] The model identifier
36
+ # @return [Symbol, nil] The provider name or nil if not found
37
+ # @example Find provider for GPT-4
38
+ # provider = ProviderUtilities.provider_for_model('gpt-4')
39
+ # # => :openai
40
+ # @example Find provider for Claude
41
+ # provider = ProviderUtilities.provider_for_model('claude-3-opus-20240229')
42
+ # # => :anthropic
43
+ def provider_for_model(model_id)
44
+ Providers.model_id_to_provider(model_id)
45
+ end
46
+
47
+ # Gets all models available for a provider
48
+ #
49
+ # @param provider_name [Symbol, String] The provider name
50
+ # @param options [Hash] Provider configuration options
51
+ # @return [Array<String>] Array of model IDs
52
+ # @example Get OpenAI models
53
+ # models = ProviderUtilities.models_for_provider(:openai)
54
+ def models_for_provider(provider_name, **options)
55
+ Durable::Llm.models(provider_name, **options)
56
+ rescue StandardError
57
+ []
58
+ end
59
+
60
+ # Checks if a provider supports a specific capability
61
+ #
62
+ # @param provider_name [Symbol, String] The provider name
63
+ # @param capability [Symbol] The capability to check (:streaming, :embeddings, :chat)
64
+ # @return [Boolean] True if capability is supported
65
+ # @example Check streaming support
66
+ # supports = ProviderUtilities.supports_capability?(:openai, :streaming)
67
+ # # => true
68
+ def supports_capability?(provider_name, capability)
69
+ provider_class = Providers.provider_class_for(provider_name)
70
+ instance = provider_class.new
71
+
72
+ case capability
73
+ when :streaming
74
+ instance.respond_to?(:stream?) && instance.stream?
75
+ when :embeddings
76
+ instance.respond_to?(:embedding)
77
+ when :chat, :completion
78
+ instance.respond_to?(:completion)
79
+ else
80
+ false
81
+ end
82
+ rescue StandardError
83
+ false
84
+ end
85
+
86
+ # Finds all providers that support a specific capability
87
+ #
88
+ # @param capability [Symbol] The capability to filter by
89
+ # @return [Array<Symbol>] Providers supporting the capability
90
+ # @example Find streaming providers
91
+ # providers = ProviderUtilities.providers_with_capability(:streaming)
92
+ # # => [:openai, :anthropic, :google, ...]
93
+ def providers_with_capability(capability)
94
+ available_providers.select do |provider|
95
+ supports_capability?(provider, capability)
96
+ end
97
+ end
98
+
99
+ # Compares models across providers based on common characteristics
100
+ #
101
+ # @param model_ids [Array<String>] Models to compare
102
+ # @return [Hash] Comparison data
103
+ # @example Compare models
104
+ # comparison = ProviderUtilities.compare_models(['gpt-4', 'claude-3-opus-20240229'])
105
+ def compare_models(model_ids)
106
+ model_ids.map do |model_id|
107
+ provider = provider_for_model(model_id)
108
+ {
109
+ model: model_id,
110
+ provider: provider,
111
+ streaming: provider ? supports_capability?(provider, :streaming) : false
112
+ }
113
+ end
114
+ end
115
+
116
+ # Creates a fallback chain of providers for redundancy
117
+ #
118
+ # This method helps build resilient systems by providing fallback options
119
+ # when a primary provider is unavailable.
120
+ #
121
+ # @param providers [Array<Symbol>] Ordered list of providers to try
122
+ # @param options [Hash] Configuration options
123
+ # @return [Array<Durable::Llm::Client>] Array of clients in fallback order
124
+ # @example Create fallback chain
125
+ # clients = ProviderUtilities.fallback_chain(
126
+ # [:openai, :anthropic, :google],
127
+ # model_map: {
128
+ # openai: 'gpt-4',
129
+ # anthropic: 'claude-3-opus-20240229',
130
+ # google: 'gemini-pro'
131
+ # }
132
+ # )
133
+ def fallback_chain(providers, options = {})
134
+ model_map = options[:model_map] || {}
135
+
136
+ providers.map do |provider|
137
+ model = model_map[provider]
138
+ Durable::Llm.new(provider, model: model)
139
+ rescue StandardError => e
140
+ warn "Failed to create client for #{provider}: #{e.message}"
141
+ nil
142
+ end.compact
143
+ end
144
+
145
+ # Executes a completion with automatic provider fallback
146
+ #
147
+ # @param text [String] The input text
148
+ # @param providers [Array<Symbol>] Ordered providers to try
149
+ # @param model_map [Hash] Map of provider to model
150
+ # @return [String, nil] The completion text or nil if all fail
151
+ # @example Completion with fallback
152
+ # result = ProviderUtilities.complete_with_fallback(
153
+ # 'Hello!',
154
+ # providers: [:openai, :anthropic],
155
+ # model_map: { openai: 'gpt-4', anthropic: 'claude-3-opus-20240229' }
156
+ # )
157
+ def complete_with_fallback(text, providers:, model_map: {})
158
+ providers.each do |provider|
159
+ begin
160
+ client = Durable::Llm.new(provider, model: model_map[provider])
161
+ return client.complete(text)
162
+ rescue StandardError => e
163
+ warn "Provider #{provider} failed: #{e.message}"
164
+ next
165
+ end
166
+ end
167
+
168
+ nil # All providers failed
169
+ end
170
+
171
+ # Gets provider information including capabilities
172
+ #
173
+ # @param provider_name [Symbol, String] The provider name
174
+ # @return [Hash] Provider information
175
+ # @example Get provider info
176
+ # info = ProviderUtilities.provider_info(:openai)
177
+ # # => { name: :openai, streaming: true, embeddings: true, ... }
178
+ def provider_info(provider_name)
179
+ {
180
+ name: provider_name,
181
+ streaming: supports_capability?(provider_name, :streaming),
182
+ embeddings: supports_capability?(provider_name, :embeddings),
183
+ chat: supports_capability?(provider_name, :chat)
184
+ }
185
+ rescue StandardError => e
186
+ { name: provider_name, error: e.message }
187
+ end
188
+
189
+ # Lists all providers with their capabilities
190
+ #
191
+ # @return [Array<Hash>] Array of provider information hashes
192
+ # @example List all provider capabilities
193
+ # all = ProviderUtilities.all_provider_info
194
+ def all_provider_info
195
+ available_providers.map { |p| provider_info(p) }
196
+ end
197
+ end
198
+ end
199
+ end
200
+
201
+ # Copyright (c) 2025 Durable Programming, LLC. All rights reserved.