rails_ai_promptable 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,124 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rails/generators'
4
+
5
+ module RailsAiPromptable
6
+ module Generators
7
+ class InstallGenerator < Rails::Generators::Base
8
+ source_root File.expand_path('templates', __dir__)
9
+
10
+ desc 'Creates a RailsAIPromptable initializer file'
11
+
12
+ class_option :provider,
13
+ type: :string,
14
+ default: 'openai',
15
+ desc: 'AI provider to use (openai, anthropic, gemini, cohere, azure_openai, mistral, openrouter)'
16
+
17
+ def copy_initializer_file
18
+ template 'rails_ai_promptable.rb.tt', 'config/initializers/rails_ai_promptable.rb'
19
+ end
20
+
21
+ def show_readme
22
+ readme 'POST_INSTALL' if behavior == :invoke
23
+ end
24
+
25
+ private
26
+
27
+ def provider_name
28
+ options['provider'].to_s
29
+ end
30
+
31
+ def provider_config
32
+ case provider_name
33
+ when 'openai'
34
+ openai_config
35
+ when 'anthropic', 'claude'
36
+ anthropic_config
37
+ when 'gemini', 'google'
38
+ gemini_config
39
+ when 'cohere'
40
+ cohere_config
41
+ when 'azure_openai', 'azure'
42
+ azure_config
43
+ when 'mistral'
44
+ mistral_config
45
+ when 'openrouter'
46
+ openrouter_config
47
+ else
48
+ openai_config
49
+ end
50
+ end
51
+
52
+ def openai_config
53
+ {
54
+ provider: ':openai',
55
+ api_key: "ENV['OPENAI_API_KEY']",
56
+ default_model: "'gpt-4o-mini'",
57
+ additional_config: " # config.openai_base_url = 'https://api.openai.com/v1' # Optional: custom endpoint"
58
+ }
59
+ end
60
+
61
+ def anthropic_config
62
+ {
63
+ provider: ':anthropic',
64
+ api_key: "ENV['ANTHROPIC_API_KEY']",
65
+ default_model: "'claude-3-5-sonnet-20241022'",
66
+ additional_config: " # config.anthropic_base_url = 'https://api.anthropic.com/v1' # Optional: custom endpoint"
67
+ }
68
+ end
69
+
70
+ def gemini_config
71
+ {
72
+ provider: ':gemini',
73
+ api_key: "ENV['GEMINI_API_KEY']",
74
+ default_model: "'gemini-pro'",
75
+ additional_config: " # config.gemini_base_url = 'https://generativelanguage.googleapis.com/v1beta' # Optional: custom endpoint"
76
+ }
77
+ end
78
+
79
+ def cohere_config
80
+ {
81
+ provider: ':cohere',
82
+ api_key: "ENV['COHERE_API_KEY']",
83
+ default_model: "'command'",
84
+ additional_config: " # config.cohere_base_url = 'https://api.cohere.ai/v1' # Optional: custom endpoint"
85
+ }
86
+ end
87
+
88
+ def azure_config
89
+ {
90
+ provider: ':azure_openai',
91
+ api_key: "ENV['AZURE_OPENAI_API_KEY']",
92
+ default_model: "'gpt-4'",
93
+ additional_config: <<~CONFIG.chomp
94
+ config.azure_base_url = ENV['AZURE_OPENAI_BASE_URL'] # Required: e.g., https://your-resource.openai.azure.com
95
+ config.azure_deployment_name = ENV['AZURE_OPENAI_DEPLOYMENT_NAME'] # Required
96
+ # config.azure_api_version = '2024-02-15-preview' # Optional: API version
97
+ CONFIG
98
+ }
99
+ end
100
+
101
+ def mistral_config
102
+ {
103
+ provider: ':mistral',
104
+ api_key: "ENV['MISTRAL_API_KEY']",
105
+ default_model: "'mistral-small-latest'",
106
+ additional_config: " # config.mistral_base_url = 'https://api.mistral.ai/v1' # Optional: custom endpoint"
107
+ }
108
+ end
109
+
110
+ def openrouter_config
111
+ {
112
+ provider: ':openrouter',
113
+ api_key: "ENV['OPENROUTER_API_KEY']",
114
+ default_model: "'openai/gpt-3.5-turbo'",
115
+ additional_config: <<~CONFIG.chomp
116
+ # config.openrouter_app_name = 'Your App Name' # Optional: for tracking
117
+ # config.openrouter_site_url = 'https://yourapp.com' # Optional: for attribution
118
+ # config.openrouter_base_url = 'https://openrouter.ai/api/v1' # Optional: custom endpoint
119
+ CONFIG
120
+ }
121
+ end
122
+ end
123
+ end
124
+ end
@@ -0,0 +1,46 @@
1
+ ===============================================================================
2
+
3
+ RailsAIPromptable has been successfully installed!
4
+
5
+ ===============================================================================
6
+
7
+ Next steps:
8
+
9
+ 1. Set up your API key in your environment:
10
+
11
+ <%= provider_config[:api_key].include?('OPENAI') ? 'export OPENAI_API_KEY="your-api-key-here"' : '' %>
12
+ <%= provider_config[:api_key].include?('ANTHROPIC') ? 'export ANTHROPIC_API_KEY="your-api-key-here"' : '' %>
13
+ <%= provider_config[:api_key].include?('GEMINI') ? 'export GEMINI_API_KEY="your-api-key-here"' : '' %>
14
+ <%= provider_config[:api_key].include?('COHERE') ? 'export COHERE_API_KEY="your-api-key-here"' : '' %>
15
+ <%= provider_config[:api_key].include?('AZURE') ? 'export AZURE_OPENAI_API_KEY="your-api-key-here"' : '' %>
16
+ <%= provider_config[:api_key].include?('MISTRAL') ? 'export MISTRAL_API_KEY="your-api-key-here"' : '' %>
17
+ <%= provider_config[:api_key].include?('OPENROUTER') ? 'export OPENROUTER_API_KEY="your-api-key-here"' : '' %>
18
+
19
+ 2. Review and customize the configuration in:
20
+ config/initializers/rails_ai_promptable.rb
21
+
22
+ 3. Include RailsAIPromptable::Promptable in your models:
23
+
24
+ class Article < ApplicationRecord
25
+ include RailsAIPromptable::Promptable
26
+
27
+ prompt_template "Summarize this article: %{content}"
28
+
29
+ def generate_summary
30
+ ai_generate(context: { content: body })
31
+ end
32
+ end
33
+
34
+ 4. Start generating AI content!
35
+
36
+ article = Article.first
37
+ summary = article.generate_summary
38
+
39
+ ===============================================================================
40
+
41
+ For more information and examples:
42
+
43
+ • Documentation: https://github.com/shoaibmalik786/rails_ai_promptable
44
+ • Issues: https://github.com/shoaibmalik786/rails_ai_promptable/issues
45
+
46
+ ===============================================================================
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ # RailsAIPromptable Configuration
4
+ #
5
+ # This file was generated by rails_ai_promptable:install
6
+ #
7
+ # For more configuration options, see:
8
+ # https://github.com/shoaibmalik786/rails_ai_promptable
9
+
10
+ RailsAIPromptable.configure do |config|
11
+ # ==> Provider Configuration
12
+ # Choose your AI provider. Supported options:
13
+ # :openai, :anthropic, :gemini, :cohere, :azure_openai, :mistral, :openrouter
14
+ config.provider = <%= provider_config[:provider] %>
15
+
16
+ # ==> Authentication
17
+ # API key for the selected provider
18
+ config.api_key = <%= provider_config[:api_key] %>
19
+
20
+ # ==> Model Settings
21
+ # Default model to use for AI generation
22
+ config.default_model = <%= provider_config[:default_model] %>
23
+
24
+ # ==> General Settings
25
+ # HTTP timeout in seconds for API requests
26
+ config.timeout = 30
27
+
28
+ # Logger for debugging and monitoring
29
+ # config.logger = Rails.logger
30
+
31
+ # ==> Provider-Specific Configuration
32
+ <%= provider_config[:additional_config] %>
33
+
34
+ # ==> Multiple Providers (Optional)
35
+ # You can configure multiple providers and switch between them at runtime
36
+ # Example:
37
+ # config.anthropic_api_key = ENV['ANTHROPIC_API_KEY']
38
+ # config.gemini_api_key = ENV['GEMINI_API_KEY']
39
+ # config.cohere_api_key = ENV['COHERE_API_KEY']
40
+ # config.mistral_api_key = ENV['MISTRAL_API_KEY']
41
+ # config.openrouter_api_key = ENV['OPENROUTER_API_KEY']
42
+ end
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ begin
4
+ require 'active_job'
5
+ rescue LoadError
6
+ # ActiveJob not available, skip loading BackgroundJob
7
+ end
8
+
9
+ if defined?(ActiveJob)
10
+ module RailsAIPromptable
11
+ class BackgroundJob < ActiveJob::Base
12
+ queue_as :default
13
+
14
+ def perform(klass_name, record_id, context, kwargs)
15
+ klass = klass_name.constantize
16
+ record = klass.find_by(id: record_id)
17
+ return unless record
18
+
19
+ result = record.ai_generate(context: context, **(kwargs || {}))
20
+
21
+ # Call the callback method if defined on the record
22
+ if record.respond_to?(:ai_generation_completed)
23
+ record.ai_generation_completed(result)
24
+ end
25
+
26
+ # Store result in ai_generated_content attribute if it exists
27
+ if record.respond_to?(:ai_generated_content=)
28
+ record.ai_generated_content = result
29
+ record.save if record.respond_to?(:save)
30
+ end
31
+
32
+ RailsAIPromptable.configuration.logger.info("[rails_ai_promptable] background generation completed for #{klass_name}##{record_id}")
33
+
34
+ result
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,82 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RailsAIPromptable
4
+ class Configuration
5
+ attr_accessor :provider, :api_key, :default_model, :timeout, :logger,
6
+ # OpenAI
7
+ :openai_base_url,
8
+ # Anthropic/Claude
9
+ :anthropic_api_key, :anthropic_base_url,
10
+ # Google Gemini
11
+ :gemini_api_key, :gemini_base_url,
12
+ # Cohere
13
+ :cohere_api_key, :cohere_base_url,
14
+ # Azure OpenAI
15
+ :azure_api_key, :azure_base_url, :azure_api_version, :azure_deployment_name,
16
+ # Mistral AI
17
+ :mistral_api_key, :mistral_base_url,
18
+ # OpenRouter
19
+ :openrouter_api_key, :openrouter_base_url, :openrouter_app_name, :openrouter_site_url
20
+
21
+ def initialize
22
+ @provider = :openai
23
+ @api_key = ENV['OPENAI_API_KEY']
24
+ @default_model = 'gpt-4o-mini'
25
+ @timeout = 30
26
+ @logger = Logger.new($stdout)
27
+
28
+ # OpenAI settings
29
+ @openai_base_url = 'https://api.openai.com/v1'
30
+
31
+ # Anthropic settings
32
+ @anthropic_api_key = ENV['ANTHROPIC_API_KEY']
33
+ @anthropic_base_url = 'https://api.anthropic.com/v1'
34
+
35
+ # Gemini settings
36
+ @gemini_api_key = ENV['GEMINI_API_KEY']
37
+ @gemini_base_url = 'https://generativelanguage.googleapis.com/v1beta'
38
+
39
+ # Cohere settings
40
+ @cohere_api_key = ENV['COHERE_API_KEY']
41
+ @cohere_base_url = 'https://api.cohere.ai/v1'
42
+
43
+ # Azure OpenAI settings
44
+ @azure_api_key = ENV['AZURE_OPENAI_API_KEY']
45
+ @azure_base_url = ENV['AZURE_OPENAI_BASE_URL']
46
+ @azure_api_version = '2024-02-15-preview'
47
+ @azure_deployment_name = ENV['AZURE_OPENAI_DEPLOYMENT_NAME']
48
+
49
+ # Mistral AI settings
50
+ @mistral_api_key = ENV['MISTRAL_API_KEY']
51
+ @mistral_base_url = 'https://api.mistral.ai/v1'
52
+
53
+ # OpenRouter settings
54
+ @openrouter_api_key = ENV['OPENROUTER_API_KEY']
55
+ @openrouter_base_url = 'https://openrouter.ai/api/v1'
56
+ @openrouter_app_name = ENV['OPENROUTER_APP_NAME']
57
+ @openrouter_site_url = ENV['OPENROUTER_SITE_URL']
58
+ end
59
+
60
+ # Helper method to get the appropriate default model for the current provider
61
+ def model_for_provider
62
+ case provider.to_sym
63
+ when :openai
64
+ 'gpt-4o-mini'
65
+ when :anthropic
66
+ 'claude-3-5-sonnet-20241022'
67
+ when :gemini
68
+ 'gemini-pro'
69
+ when :cohere
70
+ 'command'
71
+ when :azure_openai
72
+ azure_deployment_name || 'gpt-4'
73
+ when :mistral
74
+ 'mistral-small-latest'
75
+ when :openrouter
76
+ 'openai/gpt-3.5-turbo'
77
+ else
78
+ default_model
79
+ end
80
+ end
81
+ end
82
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+ require 'logger'
3
+
4
+ module RailsAIPromptable
5
+ class Logger
6
+ def initialize(io = $stdout)
7
+ @logger = ::Logger.new(io)
8
+ end
9
+
10
+ def info(msg)
11
+ @logger.info(msg)
12
+ end
13
+
14
+ def error(msg)
15
+ @logger.error(msg)
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,65 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'active_support/concern'
4
+
5
+ module RailsAIPromptable
6
+ module Promptable
7
+ extend ActiveSupport::Concern
8
+
9
+ included do
10
+ class_attribute :ai_prompt_template, instance_accessor: false
11
+ end
12
+
13
+ class_methods do
14
+ def prompt_template(template = nil)
15
+ return ai_prompt_template if template.nil?
16
+ self.ai_prompt_template = template
17
+ end
18
+
19
+ # Load a template from the template registry
20
+ def ai_use_template(name)
21
+ template = RailsAIPromptable::TemplateRegistry.get(name)
22
+
23
+ if template.nil?
24
+ raise ArgumentError, "Template '#{name}' not found. Available templates: #{RailsAIPromptable::TemplateRegistry.list.join(', ')}"
25
+ end
26
+
27
+ self.ai_prompt_template = template
28
+ end
29
+ end
30
+
31
+ def ai_generate(context: {}, model: nil, temperature: nil, format: :text)
32
+ template = self.class.ai_prompt_template || ''
33
+ prompt = render_template(template, context)
34
+
35
+ RailsAIPromptable.configuration.logger.info("[rails_ai_promptable] prompt: ")
36
+
37
+ response = RailsAIPromptable.client.generate(
38
+ prompt: prompt,
39
+ model: model || RailsAIPromptable.configuration.default_model,
40
+ temperature: temperature || 0.7,
41
+ format: format
42
+ )
43
+
44
+ # basic parsing
45
+ response
46
+ end
47
+
48
+ def ai_generate_later(context: {}, **kwargs)
49
+ RailsAIPromptable.configuration.logger.info('[rails_ai_promptable] enqueuing ai_generate_later')
50
+ # Use ActiveJob to enqueue. We'll provide a default job class in later steps.
51
+ RailsAIPromptable::BackgroundJob.perform_later(self.class.name, id, context, kwargs)
52
+ end
53
+
54
+ private
55
+
56
+ def render_template(template, context)
57
+ template % context.transform_keys(&:to_sym)
58
+ rescue KeyError
59
+ # fallback: simple interpolation using gsub
60
+ result = template.dup
61
+ context.each { |k, v| result.gsub!("%{#{k}}", v.to_s) }
62
+ result
63
+ end
64
+ end
65
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'net/http'
4
+ require 'json'
5
+
6
+ module RailsAIPromptable
7
+ module Providers
8
+ class AnthropicProvider < BaseProvider
9
+ API_VERSION = '2023-06-01'
10
+
11
+ def initialize(configuration)
12
+ super
13
+ @api_key = configuration.anthropic_api_key || configuration.api_key
14
+ @base_url = configuration.anthropic_base_url || 'https://api.anthropic.com/v1'
15
+ @timeout = configuration.timeout
16
+ end
17
+
18
+ def generate(prompt:, model:, temperature:, format:)
19
+ uri = URI.parse("#{@base_url}/messages")
20
+ http = Net::HTTP.new(uri.host, uri.port)
21
+ http.use_ssl = uri.scheme == 'https'
22
+ http.read_timeout = @timeout
23
+
24
+ request = Net::HTTP::Post.new(uri.request_uri, {
25
+ 'Content-Type' => 'application/json',
26
+ 'x-api-key' => @api_key,
27
+ 'anthropic-version' => API_VERSION
28
+ })
29
+
30
+ body = {
31
+ model: model,
32
+ messages: [{ role: 'user', content: prompt }],
33
+ temperature: temperature,
34
+ max_tokens: 4096
35
+ }
36
+
37
+ request.body = body.to_json
38
+
39
+ response = http.request(request)
40
+ parsed = JSON.parse(response.body)
41
+
42
+ if response.code.to_i >= 400
43
+ error_message = parsed.dig('error', 'message') || 'Unknown error'
44
+ raise "Anthropic API error: #{error_message}"
45
+ end
46
+
47
+ # Extract content from Anthropic response
48
+ parsed.dig('content', 0, 'text')
49
+ rescue => e
50
+ RailsAIPromptable.configuration.logger.error("[rails_ai_promptable] anthropic error: #{e.message}")
51
+ nil
52
+ end
53
+ end
54
+ end
55
+ end
@@ -0,0 +1,65 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'net/http'
4
+ require 'json'
5
+
6
+ module RailsAIPromptable
7
+ module Providers
8
+ class AzureOpenAIProvider < BaseProvider
9
+ def initialize(configuration)
10
+ super
11
+ @api_key = configuration.azure_api_key || configuration.api_key
12
+ @base_url = configuration.azure_base_url
13
+ @api_version = configuration.azure_api_version || '2024-02-15-preview'
14
+ @timeout = configuration.timeout
15
+ @deployment_name = configuration.azure_deployment_name
16
+
17
+ validate_azure_configuration!
18
+ end
19
+
20
+ def generate(prompt:, model:, temperature:, format:)
21
+ # Azure uses deployment name instead of model in the URL
22
+ deployment = @deployment_name || model
23
+ uri = URI.parse("#{@base_url}/openai/deployments/#{deployment}/chat/completions?api-version=#{@api_version}")
24
+ http = Net::HTTP.new(uri.host, uri.port)
25
+ http.use_ssl = uri.scheme == 'https'
26
+ http.read_timeout = @timeout
27
+
28
+ request = Net::HTTP::Post.new(uri.request_uri, {
29
+ 'Content-Type' => 'application/json',
30
+ 'api-key' => @api_key
31
+ })
32
+
33
+ body = {
34
+ messages: [{ role: 'user', content: prompt }],
35
+ temperature: temperature,
36
+ max_tokens: 2048
37
+ }
38
+
39
+ request.body = body.to_json
40
+
41
+ response = http.request(request)
42
+ parsed = JSON.parse(response.body)
43
+
44
+ if response.code.to_i >= 400
45
+ error_message = parsed.dig('error', 'message') || 'Unknown error'
46
+ raise "Azure OpenAI API error: #{error_message}"
47
+ end
48
+
49
+ # Extract content (same structure as OpenAI)
50
+ parsed.dig('choices', 0, 'message', 'content')
51
+ rescue => e
52
+ RailsAIPromptable.configuration.logger.error("[rails_ai_promptable] azure_openai error: #{e.message}")
53
+ nil
54
+ end
55
+
56
+ private
57
+
58
+ def validate_azure_configuration!
59
+ unless @base_url
60
+ raise ArgumentError, 'Azure OpenAI requires azure_base_url to be set (e.g., https://your-resource.openai.azure.com)'
61
+ end
62
+ end
63
+ end
64
+ end
65
+ end
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RailsAIPromptable
4
+ module Providers
5
+ class BaseProvider
6
+ def initialize(configuration)
7
+ @config = configuration
8
+ end
9
+
10
+ def generate(prompt:, model:, temperature:, format:)
11
+ raise NotImplementedError
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'net/http'
4
+ require 'json'
5
+
6
+ module RailsAIPromptable
7
+ module Providers
8
+ class CohereProvider < BaseProvider
9
+ def initialize(configuration)
10
+ super
11
+ @api_key = configuration.cohere_api_key || configuration.api_key
12
+ @base_url = configuration.cohere_base_url || 'https://api.cohere.ai/v1'
13
+ @timeout = configuration.timeout
14
+ end
15
+
16
+ def generate(prompt:, model:, temperature:, format:)
17
+ uri = URI.parse("#{@base_url}/generate")
18
+ http = Net::HTTP.new(uri.host, uri.port)
19
+ http.use_ssl = uri.scheme == 'https'
20
+ http.read_timeout = @timeout
21
+
22
+ request = Net::HTTP::Post.new(uri.request_uri, {
23
+ 'Content-Type' => 'application/json',
24
+ 'Authorization' => "Bearer #{@api_key}"
25
+ })
26
+
27
+ body = {
28
+ model: model,
29
+ prompt: prompt,
30
+ temperature: temperature,
31
+ max_tokens: 2048
32
+ }
33
+
34
+ request.body = body.to_json
35
+
36
+ response = http.request(request)
37
+ parsed = JSON.parse(response.body)
38
+
39
+ if response.code.to_i >= 400
40
+ error_message = parsed['message'] || 'Unknown error'
41
+ raise "Cohere API error: #{error_message}"
42
+ end
43
+
44
+ # Extract content from Cohere response
45
+ parsed.dig('generations', 0, 'text')
46
+ rescue => e
47
+ RailsAIPromptable.configuration.logger.error("[rails_ai_promptable] cohere error: #{e.message}")
48
+ nil
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'net/http'
4
+ require 'json'
5
+
6
+ module RailsAIPromptable
7
+ module Providers
8
+ class GeminiProvider < BaseProvider
9
+ def initialize(configuration)
10
+ super
11
+ @api_key = configuration.gemini_api_key || configuration.api_key
12
+ @base_url = configuration.gemini_base_url || 'https://generativelanguage.googleapis.com/v1beta'
13
+ @timeout = configuration.timeout
14
+ end
15
+
16
+ def generate(prompt:, model:, temperature:, format:)
17
+ # Gemini uses the API key as a query parameter
18
+ uri = URI.parse("#{@base_url}/models/#{model}:generateContent?key=#{@api_key}")
19
+ http = Net::HTTP.new(uri.host, uri.port)
20
+ http.use_ssl = uri.scheme == 'https'
21
+ http.read_timeout = @timeout
22
+
23
+ request = Net::HTTP::Post.new(uri.request_uri, {
24
+ 'Content-Type' => 'application/json'
25
+ })
26
+
27
+ body = {
28
+ contents: [{
29
+ parts: [{ text: prompt }]
30
+ }],
31
+ generationConfig: {
32
+ temperature: temperature,
33
+ maxOutputTokens: 2048
34
+ }
35
+ }
36
+
37
+ request.body = body.to_json
38
+
39
+ response = http.request(request)
40
+ parsed = JSON.parse(response.body)
41
+
42
+ if response.code.to_i >= 400
43
+ error_message = parsed.dig('error', 'message') || 'Unknown error'
44
+ raise "Gemini API error: #{error_message}"
45
+ end
46
+
47
+ # Extract content from Gemini response
48
+ parsed.dig('candidates', 0, 'content', 'parts', 0, 'text')
49
+ rescue => e
50
+ RailsAIPromptable.configuration.logger.error("[rails_ai_promptable] gemini error: #{e.message}")
51
+ nil
52
+ end
53
+ end
54
+ end
55
+ end