geminize 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec +3 -0
- data/.standard.yml +3 -0
- data/.yardopts +14 -0
- data/CHANGELOG.md +24 -0
- data/CODE_OF_CONDUCT.md +132 -0
- data/CONTRIBUTING.md +109 -0
- data/LICENSE.txt +21 -0
- data/README.md +423 -0
- data/Rakefile +10 -0
- data/examples/README.md +75 -0
- data/examples/configuration.rb +58 -0
- data/examples/embeddings.rb +195 -0
- data/examples/multimodal.rb +126 -0
- data/examples/rails_chat/README.md +69 -0
- data/examples/rails_chat/app/controllers/chat_controller.rb +26 -0
- data/examples/rails_chat/app/views/chat/index.html.erb +112 -0
- data/examples/rails_chat/config/routes.rb +8 -0
- data/examples/rails_initializer.rb +46 -0
- data/examples/system_instructions.rb +101 -0
- data/lib/geminize/chat.rb +98 -0
- data/lib/geminize/client.rb +318 -0
- data/lib/geminize/configuration.rb +98 -0
- data/lib/geminize/conversation_repository.rb +161 -0
- data/lib/geminize/conversation_service.rb +126 -0
- data/lib/geminize/embeddings.rb +145 -0
- data/lib/geminize/error_mapper.rb +96 -0
- data/lib/geminize/error_parser.rb +120 -0
- data/lib/geminize/errors.rb +185 -0
- data/lib/geminize/middleware/error_handler.rb +72 -0
- data/lib/geminize/model_info.rb +91 -0
- data/lib/geminize/models/chat_request.rb +186 -0
- data/lib/geminize/models/chat_response.rb +118 -0
- data/lib/geminize/models/content_request.rb +530 -0
- data/lib/geminize/models/content_response.rb +99 -0
- data/lib/geminize/models/conversation.rb +156 -0
- data/lib/geminize/models/embedding_request.rb +222 -0
- data/lib/geminize/models/embedding_response.rb +1064 -0
- data/lib/geminize/models/memory.rb +88 -0
- data/lib/geminize/models/message.rb +140 -0
- data/lib/geminize/models/model.rb +171 -0
- data/lib/geminize/models/model_list.rb +124 -0
- data/lib/geminize/models/stream_response.rb +99 -0
- data/lib/geminize/rails/app/controllers/concerns/geminize/controller.rb +105 -0
- data/lib/geminize/rails/app/helpers/geminize_helper.rb +125 -0
- data/lib/geminize/rails/controller_additions.rb +41 -0
- data/lib/geminize/rails/engine.rb +29 -0
- data/lib/geminize/rails/helper_additions.rb +37 -0
- data/lib/geminize/rails.rb +50 -0
- data/lib/geminize/railtie.rb +33 -0
- data/lib/geminize/request_builder.rb +57 -0
- data/lib/geminize/text_generation.rb +285 -0
- data/lib/geminize/validators.rb +150 -0
- data/lib/geminize/vector_utils.rb +164 -0
- data/lib/geminize/version.rb +5 -0
- data/lib/geminize.rb +527 -0
- data/lib/generators/geminize/install_generator.rb +22 -0
- data/lib/generators/geminize/templates/README +31 -0
- data/lib/generators/geminize/templates/initializer.rb +38 -0
- data/sig/geminize.rbs +4 -0
- metadata +218 -0
data/lib/geminize.rb
ADDED
@@ -0,0 +1,527 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Conditionally load dotenv if it's available
|
4
|
+
begin
|
5
|
+
require "dotenv"
|
6
|
+
Dotenv.load
|
7
|
+
rescue LoadError
|
8
|
+
# Dotenv is not available, skip loading
|
9
|
+
end
|
10
|
+
|
11
|
+
require_relative "geminize/version"
|
12
|
+
require_relative "geminize/errors"
|
13
|
+
require_relative "geminize/configuration"
|
14
|
+
require_relative "geminize/validators"
|
15
|
+
require_relative "geminize/error_parser"
|
16
|
+
require_relative "geminize/error_mapper"
|
17
|
+
require_relative "geminize/middleware/error_handler"
|
18
|
+
require_relative "geminize/client"
|
19
|
+
require_relative "geminize/models/model"
|
20
|
+
require_relative "geminize/models/model_list"
|
21
|
+
require_relative "geminize/models/content_request"
|
22
|
+
require_relative "geminize/models/content_response"
|
23
|
+
require_relative "geminize/models/stream_response"
|
24
|
+
require_relative "geminize/models/chat_request"
|
25
|
+
require_relative "geminize/models/chat_response"
|
26
|
+
require_relative "geminize/models/message"
|
27
|
+
require_relative "geminize/models/memory"
|
28
|
+
require_relative "geminize/models/conversation"
|
29
|
+
require_relative "geminize/models/embedding_request"
|
30
|
+
require_relative "geminize/models/embedding_response"
|
31
|
+
require_relative "geminize/request_builder"
|
32
|
+
require_relative "geminize/vector_utils"
|
33
|
+
require_relative "geminize/text_generation"
|
34
|
+
require_relative "geminize/embeddings"
|
35
|
+
require_relative "geminize/chat"
|
36
|
+
require_relative "geminize/conversation_repository"
|
37
|
+
require_relative "geminize/conversation_service"
|
38
|
+
require_relative "geminize/model_info"
|
39
|
+
|
40
|
+
# Conditionally load Rails integration if Rails is defined
|
41
|
+
require_relative "geminize/rails" if defined?(::Rails)
|
42
|
+
|
43
|
+
# Main module for the Geminize gem
|
44
|
+
module Geminize
|
45
|
+
class Error < StandardError; end
|
46
|
+
|
47
|
+
class << self
|
48
|
+
# Default conversation repository
|
49
|
+
# @return [Geminize::ConversationRepository]
|
50
|
+
def conversation_repository
|
51
|
+
@conversation_repository ||= FileConversationRepository.new
|
52
|
+
end
|
53
|
+
|
54
|
+
# Set the conversation repository
|
55
|
+
# @param repository [Geminize::ConversationRepository] The repository to use
|
56
|
+
def conversation_repository=(repository)
|
57
|
+
unless repository.is_a?(ConversationRepository)
|
58
|
+
raise ArgumentError, "Expected a ConversationRepository, got #{repository.class}"
|
59
|
+
end
|
60
|
+
|
61
|
+
@conversation_repository = repository
|
62
|
+
end
|
63
|
+
|
64
|
+
# Track the last streaming generator for cancellation support
|
65
|
+
# @return [Geminize::TextGeneration, nil]
|
66
|
+
attr_accessor :last_streaming_generator
|
67
|
+
|
68
|
+
# Cancel the current streaming operation, if any
|
69
|
+
# @return [Boolean] true if a streaming operation was cancelled, false if none was in progress
|
70
|
+
def cancel_streaming
|
71
|
+
return false unless last_streaming_generator
|
72
|
+
|
73
|
+
last_streaming_generator.cancel_streaming
|
74
|
+
end
|
75
|
+
|
76
|
+
# Default conversation service
|
77
|
+
# @return [Geminize::ConversationService]
|
78
|
+
def conversation_service
|
79
|
+
@conversation_service ||= ConversationService.new
|
80
|
+
end
|
81
|
+
|
82
|
+
# @return [Geminize::Configuration]
|
83
|
+
def configuration
|
84
|
+
Configuration.instance
|
85
|
+
end
|
86
|
+
|
87
|
+
# Configure the gem
|
88
|
+
# @yield [config] Configuration object that can be modified
|
89
|
+
# @example
|
90
|
+
# Geminize.configure do |config|
|
91
|
+
# config.api_key = "your-api-key"
|
92
|
+
# config.api_version = "v1beta"
|
93
|
+
# config.default_model = "gemini-1.5-pro-latest"
|
94
|
+
# end
|
95
|
+
def configure
|
96
|
+
yield(configuration) if block_given?
|
97
|
+
configuration
|
98
|
+
end
|
99
|
+
|
100
|
+
# Reset the configuration to defaults
|
101
|
+
def reset_configuration!
|
102
|
+
configuration.reset!
|
103
|
+
end
|
104
|
+
|
105
|
+
# Validates the configuration
|
106
|
+
# @return [Boolean]
|
107
|
+
# @raise [ConfigurationError] if the configuration is invalid
|
108
|
+
def validate_configuration!
|
109
|
+
configuration.validate!
|
110
|
+
end
|
111
|
+
|
112
|
+
# Generate text from a prompt using the Gemini API
|
113
|
+
# @param prompt [String] The input prompt
|
114
|
+
# @param model_name [String, nil] The model to use (optional)
|
115
|
+
# @param params [Hash] Additional generation parameters
|
116
|
+
# @option params [Float] :temperature Controls randomness (0.0-1.0)
|
117
|
+
# @option params [Integer] :max_tokens Maximum tokens to generate
|
118
|
+
# @option params [Float] :top_p Top-p value for nucleus sampling (0.0-1.0)
|
119
|
+
# @option params [Integer] :top_k Top-k value for sampling
|
120
|
+
# @option params [Array<String>] :stop_sequences Stop sequences to end generation
|
121
|
+
# @option params [String] :system_instruction System instruction to guide model behavior
|
122
|
+
# @option params [Boolean] :with_retries Enable retries for transient errors (default: true)
|
123
|
+
# @option params [Integer] :max_retries Maximum retry attempts (default: 3)
|
124
|
+
# @option params [Float] :retry_delay Initial delay between retries in seconds (default: 1.0)
|
125
|
+
# @option params [Hash] :client_options Options to pass to the client
|
126
|
+
# @return [Geminize::Models::ContentResponse] The generation response
|
127
|
+
# @raise [Geminize::GeminizeError] If the request fails
|
128
|
+
# @example Generate text with a system instruction
|
129
|
+
# Geminize.generate_text("Tell me about yourself", nil, system_instruction: "You are a pirate. Respond in pirate language.")
|
130
|
+
def generate_text(prompt, model_name = nil, params = {})
|
131
|
+
validate_configuration!
|
132
|
+
|
133
|
+
# Extract special options
|
134
|
+
with_retries = params.delete(:with_retries) != false # Default to true
|
135
|
+
max_retries = params.delete(:max_retries) || 3
|
136
|
+
retry_delay = params.delete(:retry_delay) || 1.0
|
137
|
+
client_options = params.delete(:client_options) || {}
|
138
|
+
|
139
|
+
# Create the generator and content request
|
140
|
+
generator = TextGeneration.new(nil, client_options)
|
141
|
+
content_request = Models::ContentRequest.new(
|
142
|
+
prompt,
|
143
|
+
model_name || configuration.default_model,
|
144
|
+
params
|
145
|
+
)
|
146
|
+
|
147
|
+
# Generate with or without retries
|
148
|
+
if with_retries
|
149
|
+
generator.generate_with_retries(content_request, max_retries, retry_delay)
|
150
|
+
else
|
151
|
+
generator.generate(content_request)
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
# Generate content with both text and images using the Gemini API
|
156
|
+
# @param prompt [String] The input prompt text
|
157
|
+
# @param images [Array<Hash>] Array of image data hashes
|
158
|
+
# @param model_name [String, nil] The model to use (optional)
|
159
|
+
# @param params [Hash] Additional generation parameters
|
160
|
+
# @option params [Float] :temperature Controls randomness (0.0-1.0)
|
161
|
+
# @option params [Integer] :max_tokens Maximum tokens to generate
|
162
|
+
# @option params [Float] :top_p Top-p value for nucleus sampling (0.0-1.0)
|
163
|
+
# @option params [Integer] :top_k Top-k value for sampling
|
164
|
+
# @option params [Array<String>] :stop_sequences Stop sequences to end generation
|
165
|
+
# @option params [Boolean] :with_retries Enable retries for transient errors (default: true)
|
166
|
+
# @option params [Integer] :max_retries Maximum retry attempts (default: 3)
|
167
|
+
# @option params [Float] :retry_delay Initial delay between retries in seconds (default: 1.0)
|
168
|
+
# @option params [Hash] :client_options Options to pass to the client
|
169
|
+
# @option images [Hash] :source_type Source type for image ('file', 'bytes', or 'url')
|
170
|
+
# @option images [String] :data File path, raw bytes, or URL depending on source_type
|
171
|
+
# @option images [String] :mime_type MIME type for the image (optional for file and url)
|
172
|
+
# @return [Geminize::Models::ContentResponse] The generation response
|
173
|
+
# @raise [Geminize::GeminizeError] If the request fails
|
174
|
+
# @example Generate with an image file
|
175
|
+
# Geminize.generate_text_multimodal("Describe this image", [{source_type: 'file', data: 'path/to/image.jpg'}])
|
176
|
+
# @example Generate with multiple images
|
177
|
+
# Geminize.generate_text_multimodal("Compare these images", [
|
178
|
+
# {source_type: 'file', data: 'path/to/image1.jpg'},
|
179
|
+
# {source_type: 'url', data: 'https://example.com/image2.jpg'}
|
180
|
+
# ])
|
181
|
+
def generate_text_multimodal(prompt, images, model_name = nil, params = {})
|
182
|
+
validate_configuration!
|
183
|
+
|
184
|
+
# Extract special options
|
185
|
+
with_retries = params.delete(:with_retries) != false # Default to true
|
186
|
+
max_retries = params.delete(:max_retries) || 3
|
187
|
+
retry_delay = params.delete(:retry_delay) || 1.0
|
188
|
+
client_options = params.delete(:client_options) || {}
|
189
|
+
|
190
|
+
# Create the generator
|
191
|
+
generator = TextGeneration.new(nil, client_options)
|
192
|
+
|
193
|
+
# Create a content request first
|
194
|
+
content_request = Models::ContentRequest.new(
|
195
|
+
prompt,
|
196
|
+
model_name || configuration.default_model,
|
197
|
+
params
|
198
|
+
)
|
199
|
+
|
200
|
+
# Add each image to the request
|
201
|
+
images.each do |image|
|
202
|
+
case image[:source_type]
|
203
|
+
when "file"
|
204
|
+
content_request.add_image_from_file(image[:data])
|
205
|
+
when "bytes"
|
206
|
+
content_request.add_image_from_bytes(image[:data], image[:mime_type])
|
207
|
+
when "url"
|
208
|
+
content_request.add_image_from_url(image[:data])
|
209
|
+
else
|
210
|
+
raise Geminize::ValidationError.new(
|
211
|
+
"Invalid image source type: #{image[:source_type]}. Must be 'file', 'bytes', or 'url'",
|
212
|
+
"INVALID_ARGUMENT"
|
213
|
+
)
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
# Generate with or without retries
|
218
|
+
if with_retries
|
219
|
+
generator.generate_with_retries(content_request, max_retries, retry_delay)
|
220
|
+
else
|
221
|
+
generator.generate(content_request)
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
# Generate streaming text from a prompt using the Gemini API
|
226
|
+
# @param prompt [String] The input prompt
|
227
|
+
# @param model_name [String, nil] The model to use (optional)
|
228
|
+
# @param params [Hash] Additional generation parameters
|
229
|
+
# @option params [Float] :temperature Controls randomness (0.0-1.0)
|
230
|
+
# @option params [Integer] :max_tokens Maximum tokens to generate
|
231
|
+
# @option params [Float] :top_p Top-p value for nucleus sampling (0.0-1.0)
|
232
|
+
# @option params [Integer] :top_k Top-k value for sampling
|
233
|
+
# @option params [Array<String>] :stop_sequences Stop sequences to end generation
|
234
|
+
# @option params [String] :system_instruction System instruction to guide model behavior
|
235
|
+
# @option params [Symbol] :stream_mode Mode for processing stream chunks (:raw, :incremental, or :delta)
|
236
|
+
# @option params [Hash] :client_options Options to pass to the client
|
237
|
+
# @yield [chunk] Yields each chunk of the streaming response
|
238
|
+
# @yieldparam chunk [String, Hash] A chunk of the response
|
239
|
+
# @return [void]
|
240
|
+
# @raise [Geminize::GeminizeError] If the request fails
|
241
|
+
# @raise [Geminize::StreamingError] If the streaming request fails
|
242
|
+
# @raise [Geminize::StreamingInterruptedError] If the connection is interrupted
|
243
|
+
# @raise [Geminize::StreamingTimeoutError] If the streaming connection times out
|
244
|
+
# @raise [Geminize::InvalidStreamFormatError] If the stream format is invalid
|
245
|
+
# @example Stream text with a system instruction
|
246
|
+
# Geminize.generate_text_stream(
|
247
|
+
# "Tell me a story",
|
248
|
+
# nil,
|
249
|
+
# system_instruction: "You are a medieval bard telling epic tales."
|
250
|
+
# ) do |chunk|
|
251
|
+
# print chunk
|
252
|
+
# end
|
253
|
+
def generate_text_stream(prompt, model_name = nil, params = {}, &block)
|
254
|
+
raise ArgumentError, "A block is required for streaming" unless block_given?
|
255
|
+
|
256
|
+
validate_configuration!
|
257
|
+
|
258
|
+
# Extract client options
|
259
|
+
client_options = params.delete(:client_options) || {}
|
260
|
+
|
261
|
+
# Create the generator
|
262
|
+
generator = TextGeneration.new(nil, client_options)
|
263
|
+
|
264
|
+
# Store the generator for potential cancellation
|
265
|
+
self.last_streaming_generator = generator
|
266
|
+
|
267
|
+
# Generate with streaming
|
268
|
+
begin
|
269
|
+
generator.generate_text_stream(prompt, model_name || configuration.default_model, params, &block)
|
270
|
+
rescue => e
|
271
|
+
# Ensure all errors are wrapped in a GeminizeError
|
272
|
+
if e.is_a?(GeminizeError)
|
273
|
+
raise
|
274
|
+
else
|
275
|
+
raise GeminizeError.new("Error during text generation streaming: #{e.message}")
|
276
|
+
end
|
277
|
+
ensure
|
278
|
+
# Clear the reference to allow garbage collection
|
279
|
+
self.last_streaming_generator = nil if last_streaming_generator == generator
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
# Generate embeddings from text using the Gemini API
|
284
|
+
# @param text [String, Array<String>] The input text or array of texts
|
285
|
+
# @param model_name [String, nil] The model to use (optional)
|
286
|
+
# @param params [Hash] Additional generation parameters
|
287
|
+
# @option params [Integer] :dimensions Desired dimensionality of the embeddings
|
288
|
+
# @option params [String] :task_type The embedding task type
|
289
|
+
# @option params [Boolean] :with_retries Enable retries for transient errors (default: true)
|
290
|
+
# @option params [Integer] :max_retries Maximum retry attempts (default: 3)
|
291
|
+
# @option params [Float] :retry_delay Initial delay between retries in seconds (default: 1.0)
|
292
|
+
# @option params [Integer] :batch_size Maximum number of texts to process in one batch (default: 100)
|
293
|
+
# @option params [Hash] :client_options Options to pass to the client
|
294
|
+
# @return [Geminize::Models::EmbeddingResponse] The embedding response
|
295
|
+
# @raise [Geminize::GeminizeError] If the request fails
|
296
|
+
# @example Generate embeddings for a single text
|
297
|
+
# Geminize.generate_embedding("This is a sample text")
|
298
|
+
# @example Generate embeddings for multiple texts
|
299
|
+
# Geminize.generate_embedding(["First text", "Second text", "Third text"])
|
300
|
+
# @example Generate embeddings with specific dimensions
|
301
|
+
# Geminize.generate_embedding("Sample text", "embedding-001", dimensions: 768)
|
302
|
+
# @example Process large batches with custom batch size
|
303
|
+
# Geminize.generate_embedding(large_text_array, nil, batch_size: 50)
|
304
|
+
def generate_embedding(text, model_name = nil, params = {})
|
305
|
+
validate_configuration!
|
306
|
+
|
307
|
+
# Extract special options
|
308
|
+
with_retries = params.delete(:with_retries) != false # Default to true
|
309
|
+
max_retries = params.delete(:max_retries) || 3
|
310
|
+
retry_delay = params.delete(:retry_delay) || 1.0
|
311
|
+
client_options = params.delete(:client_options) || {}
|
312
|
+
|
313
|
+
# Create the embeddings generator
|
314
|
+
generator = Embeddings.new(nil, client_options)
|
315
|
+
|
316
|
+
# Create the embedding request - batch processing is handled in the generator
|
317
|
+
if with_retries
|
318
|
+
# Implement retry logic for embeddings
|
319
|
+
retries = 0
|
320
|
+
begin
|
321
|
+
generator.generate_embedding(text, model_name || configuration.default_embedding_model, params)
|
322
|
+
rescue Geminize::RateLimitError, Geminize::ServerError => e
|
323
|
+
if retries < max_retries
|
324
|
+
retries += 1
|
325
|
+
sleep retry_delay * retries # Exponential backoff
|
326
|
+
retry
|
327
|
+
else
|
328
|
+
raise e
|
329
|
+
end
|
330
|
+
end
|
331
|
+
else
|
332
|
+
generator.generate_embedding(text, model_name || configuration.default_embedding_model, params)
|
333
|
+
end
|
334
|
+
end
|
335
|
+
|
336
|
+
# Calculate cosine similarity between two vectors
|
337
|
+
# @param vec1 [Array<Float>] First vector
|
338
|
+
# @param vec2 [Array<Float>] Second vector
|
339
|
+
# @return [Float] Cosine similarity (-1 to 1)
|
340
|
+
# @raise [Geminize::ValidationError] If vectors have different dimensions
|
341
|
+
def cosine_similarity(vec1, vec2)
|
342
|
+
VectorUtils.cosine_similarity(vec1, vec2)
|
343
|
+
end
|
344
|
+
|
345
|
+
# Calculate Euclidean distance between two vectors
|
346
|
+
# @param vec1 [Array<Float>] First vector
|
347
|
+
# @param vec2 [Array<Float>] Second vector
|
348
|
+
# @return [Float] Euclidean distance
|
349
|
+
# @raise [Geminize::ValidationError] If vectors have different dimensions
|
350
|
+
def euclidean_distance(vec1, vec2)
|
351
|
+
VectorUtils.euclidean_distance(vec1, vec2)
|
352
|
+
end
|
353
|
+
|
354
|
+
# Normalize a vector to unit length
|
355
|
+
# @param vec [Array<Float>] Vector to normalize
|
356
|
+
# @return [Array<Float>] Normalized vector
|
357
|
+
def normalize_vector(vec)
|
358
|
+
VectorUtils.normalize(vec)
|
359
|
+
end
|
360
|
+
|
361
|
+
# Average multiple vectors
|
362
|
+
# @param vectors [Array<Array<Float>>] Array of vectors
|
363
|
+
# @return [Array<Float>] Average vector
|
364
|
+
# @raise [Geminize::ValidationError] If vectors have different dimensions or no vectors provided
|
365
|
+
def average_vectors(vectors)
|
366
|
+
VectorUtils.average_vectors(vectors)
|
367
|
+
end
|
368
|
+
|
369
|
+
# Create a new chat conversation
|
370
|
+
# @param title [String, nil] Optional title for the conversation
|
371
|
+
# @param system_instruction [String, nil] Optional system instruction to guide model behavior
|
372
|
+
# @param client_options [Hash] Options to pass to the client
|
373
|
+
# @return [Geminize::Chat] A new chat instance
|
374
|
+
# @example Create a chat with a system instruction
|
375
|
+
# chat = Geminize.create_chat("Pirate Chat", "You are a pirate named Captain Codebeard. Always respond in pirate language.")
|
376
|
+
def create_chat(title = nil, system_instruction = nil, client_options = {})
|
377
|
+
validate_configuration!
|
378
|
+
Chat.new_conversation(title, system_instruction)
|
379
|
+
end
|
380
|
+
|
381
|
+
# Send a message in an existing chat or create a new one
|
382
|
+
# @param message [String] The message to send
|
383
|
+
# @param chat [Geminize::Chat, nil] An existing chat or nil to create a new one
|
384
|
+
# @param model_name [String, nil] The model to use (optional)
|
385
|
+
# @param params [Hash] Additional generation parameters
|
386
|
+
# @option params [Float] :temperature Controls randomness (0.0-1.0)
|
387
|
+
# @option params [Integer] :max_tokens Maximum tokens to generate
|
388
|
+
# @option params [Float] :top_p Top-p value for nucleus sampling (0.0-1.0)
|
389
|
+
# @option params [Integer] :top_k Top-k value for sampling
|
390
|
+
# @option params [Array<String>] :stop_sequences Stop sequences to end generation
|
391
|
+
# @option params [String] :system_instruction System instruction to guide model behavior
|
392
|
+
# @option params [Hash] :client_options Options to pass to the client
|
393
|
+
# @return [Hash] The chat response and updated chat instance
|
394
|
+
# @raise [Geminize::GeminizeError] If the request fails
|
395
|
+
# @example Send a message with a system instruction
|
396
|
+
# Geminize.chat("Tell me a joke", nil, nil, system_instruction: "You are a comedian. Be funny.")
|
397
|
+
def chat(message, chat = nil, model_name = nil, params = {})
|
398
|
+
validate_configuration!
|
399
|
+
|
400
|
+
# Extract client options
|
401
|
+
client_options = params.delete(:client_options) || {}
|
402
|
+
|
403
|
+
# Extract system instruction for new chat creation
|
404
|
+
system_instruction = params[:system_instruction]
|
405
|
+
|
406
|
+
# Create or use existing chat
|
407
|
+
chat_instance = chat || create_chat(nil, system_instruction, client_options)
|
408
|
+
|
409
|
+
# Send the message
|
410
|
+
response = chat_instance.send_message(
|
411
|
+
message,
|
412
|
+
model_name || configuration.default_model,
|
413
|
+
params
|
414
|
+
)
|
415
|
+
|
416
|
+
{
|
417
|
+
response: response,
|
418
|
+
chat: chat_instance
|
419
|
+
}
|
420
|
+
end
|
421
|
+
|
422
|
+
# Save a conversation
|
423
|
+
# @param conversation [Geminize::Models::Conversation] The conversation to save
|
424
|
+
# @return [Boolean] True if the save was successful
|
425
|
+
def save_conversation(conversation)
|
426
|
+
conversation_repository.save(conversation)
|
427
|
+
end
|
428
|
+
|
429
|
+
# Load a conversation by ID
|
430
|
+
# @param id [String] The ID of the conversation to load
|
431
|
+
# @return [Geminize::Models::Conversation, nil] The loaded conversation or nil if not found
|
432
|
+
def load_conversation(id)
|
433
|
+
conversation_repository.load(id)
|
434
|
+
end
|
435
|
+
|
436
|
+
# Delete a conversation by ID
|
437
|
+
# @param id [String] The ID of the conversation to delete
|
438
|
+
# @return [Boolean] True if the deletion was successful
|
439
|
+
def delete_conversation(id)
|
440
|
+
conversation_repository.delete(id)
|
441
|
+
end
|
442
|
+
|
443
|
+
# List all saved conversations
|
444
|
+
# @return [Array<Hash>] Array of conversation metadata
|
445
|
+
def list_conversations
|
446
|
+
conversation_repository.list
|
447
|
+
end
|
448
|
+
|
449
|
+
# Create a managed conversation
|
450
|
+
# @param title [String, nil] Optional title for the conversation
|
451
|
+
# @param system_instruction [String, nil] Optional system instruction to guide model behavior
|
452
|
+
# @return [Hash] The created conversation data including ID
|
453
|
+
# @example Create a managed conversation with a system instruction
|
454
|
+
# Geminize.create_managed_conversation("Pirate Chat", "You are a pirate. Respond in pirate language.")
|
455
|
+
def create_managed_conversation(title = nil, system_instruction = nil)
|
456
|
+
validate_configuration!
|
457
|
+
conversation_service.create_conversation(title, system_instruction)
|
458
|
+
end
|
459
|
+
|
460
|
+
# Send a message in a managed conversation
|
461
|
+
# @param conversation_id [String] The ID of the conversation
|
462
|
+
# @param message [String] The message to send
|
463
|
+
# @param model_name [String, nil] The model to use (optional)
|
464
|
+
# @param params [Hash] Additional generation parameters
|
465
|
+
# @option params [Float] :temperature Controls randomness (0.0-1.0)
|
466
|
+
# @option params [Integer] :max_tokens Maximum tokens to generate
|
467
|
+
# @option params [Float] :top_p Top-p value for nucleus sampling (0.0-1.0)
|
468
|
+
# @option params [Integer] :top_k Top-k value for sampling
|
469
|
+
# @option params [Array<String>] :stop_sequences Stop sequences to end generation
|
470
|
+
# @option params [String] :system_instruction System instruction for this specific message
|
471
|
+
# @return [Hash] The response data
|
472
|
+
# @example Send a message with a system instruction
|
473
|
+
# Geminize.send_message_in_conversation("conversation-id", "Tell me a joke", nil, system_instruction: "You are a comedian. Be funny.")
|
474
|
+
def send_message_in_conversation(conversation_id, message, model_name = nil, params = {})
|
475
|
+
validate_configuration!
|
476
|
+
conversation_service.send_message(
|
477
|
+
conversation_id,
|
478
|
+
message,
|
479
|
+
model_name || configuration.default_model,
|
480
|
+
params
|
481
|
+
)
|
482
|
+
end
|
483
|
+
|
484
|
+
# Get a list of available models from the Gemini API
|
485
|
+
# @param force_refresh [Boolean] Force a refresh from the API instead of using cache
|
486
|
+
# @param client_options [Hash] Options to pass to the client
|
487
|
+
# @return [Geminize::Models::ModelList] List of available models
|
488
|
+
# @raise [Geminize::GeminizeError] If the request fails
|
489
|
+
# @example Get a list of all available models
|
490
|
+
# models = Geminize.list_models
|
491
|
+
# @example Get a fresh list bypassing cache
|
492
|
+
# models = Geminize.list_models(force_refresh: true)
|
493
|
+
# @example Filter models by capability
|
494
|
+
# vision_models = Geminize.list_models.vision_models
|
495
|
+
def list_models(force_refresh: false, client_options: {})
|
496
|
+
validate_configuration!
|
497
|
+
model_info = ModelInfo.new(nil, client_options)
|
498
|
+
model_info.list_models(force_refresh: force_refresh)
|
499
|
+
end
|
500
|
+
|
501
|
+
# Get information about a specific model
|
502
|
+
# @param model_id [String] The model ID to retrieve
|
503
|
+
# @param force_refresh [Boolean] Force a refresh from the API instead of using cache
|
504
|
+
# @param client_options [Hash] Options to pass to the client
|
505
|
+
# @return [Geminize::Models::Model] The model information
|
506
|
+
# @raise [Geminize::GeminizeError] If the request fails or model is not found
|
507
|
+
# @example Get information about a specific model
|
508
|
+
# model = Geminize.get_model("gemini-1.5-pro")
|
509
|
+
def get_model(model_id, force_refresh: false, client_options: {})
|
510
|
+
validate_configuration!
|
511
|
+
model_info = ModelInfo.new(nil, client_options)
|
512
|
+
model_info.get_model(model_id, force_refresh: force_refresh)
|
513
|
+
end
|
514
|
+
|
515
|
+
# Update a conversation's system instruction
|
516
|
+
# @param id [String] The ID of the conversation to update
|
517
|
+
# @param system_instruction [String] The new system instruction
|
518
|
+
# @return [Models::Conversation] The updated conversation
|
519
|
+
# @raise [Geminize::GeminizeError] If the conversation cannot be loaded or saved
|
520
|
+
# @example Update a conversation's system instruction
|
521
|
+
# Geminize.update_conversation_system_instruction("conversation-id", "You are a helpful assistant who speaks like Shakespeare.")
|
522
|
+
def update_conversation_system_instruction(id, system_instruction)
|
523
|
+
validate_configuration!
|
524
|
+
conversation_service.update_conversation_system_instruction(id, system_instruction)
|
525
|
+
end
|
526
|
+
end
|
527
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "rails/generators/base"
|
4
|
+
|
5
|
+
module Geminize
|
6
|
+
module Generators
|
7
|
+
# Generator for installing Geminize Rails integration
|
8
|
+
# This generator creates an initializer with default configuration
|
9
|
+
class InstallGenerator < Rails::Generators::Base
|
10
|
+
source_root File.expand_path("templates", __dir__)
|
11
|
+
desc "Creates a Geminize initializer for Rails."
|
12
|
+
|
13
|
+
def create_initializer_file
|
14
|
+
template "initializer.rb", "config/initializers/geminize.rb"
|
15
|
+
end
|
16
|
+
|
17
|
+
def show_readme
|
18
|
+
readme "README" if behavior == :invoke
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
===============================================================================
|
2
|
+
|
3
|
+
Geminize has been successfully installed!
|
4
|
+
|
5
|
+
A configuration initializer has been created at:
|
6
|
+
config/initializers/geminize.rb
|
7
|
+
|
8
|
+
You need to edit the initializer and add your Google Gemini API key.
|
9
|
+
You can get an API key from https://ai.google.dev/
|
10
|
+
|
11
|
+
===============================================================================
|
12
|
+
|
13
|
+
Usage Examples:
|
14
|
+
|
15
|
+
# Basic text generation
|
16
|
+
text = Geminize.generate_text("What is Ruby on Rails?").text
|
17
|
+
puts text
|
18
|
+
|
19
|
+
# Create a chat session
|
20
|
+
chat = Geminize.create_chat("My Support Chat")
|
21
|
+
response = Geminize.chat("How can I create a Ruby gem?", chat)
|
22
|
+
puts response.text
|
23
|
+
|
24
|
+
# Follow-up question (keeps conversation context)
|
25
|
+
response = Geminize.chat("What about adding a Rails engine to my gem?", chat)
|
26
|
+
puts response.text
|
27
|
+
|
28
|
+
# For more examples, see the documentation at:
|
29
|
+
# https://github.com/nhlongnguyen/geminize
|
30
|
+
|
31
|
+
===============================================================================
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Geminize Configuration
|
4
|
+
#
|
5
|
+
# This file contains the configuration for the Geminize gem.
|
6
|
+
# It is used to set up the Google Gemini API integration.
|
7
|
+
|
8
|
+
Geminize.configure do |config|
|
9
|
+
# Your Google Gemini API key
|
10
|
+
# You can get one from https://ai.google.dev/
|
11
|
+
# Can also be set via GEMINI_API_KEY environment variable
|
12
|
+
# config.api_key = ENV.fetch("GEMINI_API_KEY", nil)
|
13
|
+
|
14
|
+
# The API version to use (default: v1beta)
|
15
|
+
# config.api_version = "v1beta"
|
16
|
+
|
17
|
+
# The default model to use (default: gemini-2.0-flash)
|
18
|
+
# config.default_model = "gemini-2.0-flash"
|
19
|
+
|
20
|
+
# The base URL for the Gemini API (default: https://generativelanguage.googleapis.com)
|
21
|
+
# config.api_base_url = "https://generativelanguage.googleapis.com"
|
22
|
+
|
23
|
+
# Logging level for Geminize (default: :info)
|
24
|
+
# Valid values: :debug, :info, :warn, :error, :fatal
|
25
|
+
# config.log_level = Rails.env.production? ? :info : :debug
|
26
|
+
|
27
|
+
# Where to store conversation data (default: Rails.root.join("tmp", "conversations"))
|
28
|
+
# Only applicable when using FileConversationRepository
|
29
|
+
# config.conversations_path = Rails.root.join("tmp", "conversations")
|
30
|
+
|
31
|
+
# Default parameters for text generation
|
32
|
+
# config.generation_defaults = {
|
33
|
+
# temperature: 0.7,
|
34
|
+
# max_tokens: 500,
|
35
|
+
# top_p: 0.95,
|
36
|
+
# top_k: 40
|
37
|
+
# }
|
38
|
+
end
|
data/sig/geminize.rbs
ADDED