aigen-google 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,251 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aigen
4
+ module Google
5
+ # Chat manages stateful multi-turn conversations with the Gemini API.
6
+ # It maintains conversation history and automatically includes context
7
+ # in each API request for coherent, context-aware responses.
8
+ #
9
+ # @example Starting a new chat session
10
+ # client = Aigen::Google::Client.new(api_key: "your-api-key")
11
+ # chat = client.start_chat
12
+ # response = chat.send_message("Hello!")
13
+ # puts response["candidates"][0]["content"]["parts"][0]["text"]
14
+ #
15
+ # @example Multi-turn conversation with context
16
+ # chat = client.start_chat
17
+ # chat.send_message("What is Ruby?")
18
+ # chat.send_message("What are its main features?") # Uses context from first message
19
+ # puts chat.history # View full conversation
20
+ #
21
+ # @example Starting chat with existing history
22
+ # history = [
23
+ # {role: "user", parts: [{text: "Hello"}]},
24
+ # {role: "model", parts: [{text: "Hi there!"}]}
25
+ # ]
26
+ # chat = client.start_chat(history: history)
27
+ # chat.send_message("How are you?") # Continues from existing context
28
+ class Chat
29
+ # Returns the conversation history as an array of message hashes.
30
+ # Each message has a role ("user" or "model") and parts array.
31
+ #
32
+ # @return [Array<Hash>] a frozen copy of the conversation history
33
+ # @note The returned array is frozen to prevent external modification.
34
+ # The history is managed internally by the Chat instance.
35
+ def history
36
+ @history.dup.freeze
37
+ end
38
+
39
+ # Initializes a new Chat instance.
40
+ #
41
+ # @param client [Aigen::Google::Client] the client instance for API requests
42
+ # @param model [String, nil] the model to use (defaults to client's default_model)
43
+ # @param history [Array<Hash>] initial conversation history (defaults to empty array)
44
+ #
45
+ # @example Create a new chat
46
+ # chat = Chat.new(client: client, model: "gemini-pro")
47
+ #
48
+ # @example Create chat with existing history
49
+ # history = [{role: "user", parts: [{text: "Hello"}]}]
50
+ # chat = Chat.new(client: client, history: history)
51
+ def initialize(client:, model: nil, history: [])
52
+ @client = client
53
+ @model = model || @client.config.default_model
54
+ @history = history.dup
55
+ end
56
+
57
+ # Sends a message in the chat context and returns the model's response.
58
+ # Automatically includes conversation history for context and updates
59
+ # history with both the user message and model response.
60
+ #
61
+ # Accepts both simple String messages and Content objects for multimodal support.
62
+ #
63
+ # @param message [String, Content] the message text or Content object to send
64
+ # @param options [Hash] additional options to pass to the API (e.g., generationConfig)
65
+ #
66
+ # @return [Hash] the API response containing the model's reply
67
+ #
68
+ # @raise [Aigen::Google::AuthenticationError] if API key is invalid
69
+ # @raise [Aigen::Google::InvalidRequestError] if the request is malformed
70
+ # @raise [Aigen::Google::RateLimitError] if rate limit is exceeded
71
+ # @raise [Aigen::Google::ServerError] if the API returns a server error
72
+ # @raise [Aigen::Google::TimeoutError] if the request times out
73
+ # @raise [ArgumentError] if message is nil or empty
74
+ #
75
+ # @example Send a simple text message
76
+ # response = chat.send_message("What is the weather today?")
77
+ # text = response["candidates"][0]["content"]["parts"][0]["text"]
78
+ #
79
+ # @example Send multimodal content (text + image)
80
+ # content = Aigen::Google::Content.new([
81
+ # {text: "What is in this image?"},
82
+ # {inline_data: {mime_type: "image/jpeg", data: base64_data}}
83
+ # ])
84
+ # response = chat.send_message(content)
85
+ #
86
+ # @example Send message with generation config
87
+ # response = chat.send_message(
88
+ # "Tell me a story",
89
+ # generationConfig: {temperature: 0.9, maxOutputTokens: 1024}
90
+ # )
91
+ def send_message(message, **options)
92
+ # Validate message parameter
93
+ raise ArgumentError, "message cannot be nil" if message.nil?
94
+ raise ArgumentError, "message cannot be empty" if message.respond_to?(:empty?) && message.empty?
95
+
96
+ # Build user message part
97
+ # Handle both String (simple text) and Content objects (multimodal)
98
+ user_message = if message.is_a?(Content)
99
+ # Content object - use its to_h serialization and add role
100
+ message.to_h.merge(role: "user")
101
+ else
102
+ # String - wrap in standard format
103
+ {
104
+ role: "user",
105
+ parts: [{text: message}]
106
+ }
107
+ end
108
+
109
+ # Build payload with full history + new message
110
+ payload = {
111
+ contents: @history + [user_message]
112
+ }
113
+
114
+ # Merge any additional generation config options
115
+ payload.merge!(options) if options.any?
116
+
117
+ # Make API request
118
+ endpoint = "models/#{@model}:generateContent"
119
+ response = @client.http_client.post(endpoint, payload)
120
+
121
+ # Extract model response
122
+ model_response = extract_model_message(response)
123
+
124
+ # Update history with both user message and model response
125
+ @history << user_message
126
+ @history << model_response
127
+
128
+ response
129
+ end
130
+
131
+ # Sends a message in the chat context with streaming response delivery.
132
+ # Automatically includes conversation history for context and updates history
133
+ # with the full accumulated response after streaming completes.
134
+ #
135
+ # @param message [String] the message text to send
136
+ # @param options [Hash] additional options to pass to the API (e.g., generationConfig)
137
+ # @yieldparam chunk [Hash] parsed JSON chunk from the streaming response (if block given)
138
+ #
139
+ # @return [nil] if block is given, returns nil after streaming completes
140
+ # @return [Enumerator] if no block given, returns lazy Enumerator for progressive iteration
141
+ #
142
+ # @raise [Aigen::Google::AuthenticationError] if API key is invalid
143
+ # @raise [Aigen::Google::InvalidRequestError] if the request is malformed
144
+ # @raise [Aigen::Google::RateLimitError] if rate limit is exceeded
145
+ # @raise [Aigen::Google::ServerError] if the API returns a server error
146
+ # @raise [Aigen::Google::TimeoutError] if the request times out
147
+ # @raise [ArgumentError] if message is nil or empty
148
+ #
149
+ # @note The conversation history is NOT updated until the entire stream completes.
150
+ # This ensures the history remains consistent even if streaming is interrupted.
151
+ #
152
+ # @example Stream chat response with block
153
+ # chat = client.start_chat
154
+ # chat.send_message_stream("Tell me a joke") do |chunk|
155
+ # text = chunk["candidates"][0]["content"]["parts"][0]["text"]
156
+ # print text
157
+ # end
158
+ # # History now contains both user message and full accumulated response
159
+ #
160
+ # @example Stream with Enumerator for lazy processing
161
+ # chat = client.start_chat
162
+ # stream = chat.send_message_stream("Count to 5")
163
+ # stream.each { |chunk| puts chunk["candidates"][0]["content"]["parts"][0]["text"] }
164
+ # puts chat.history.length # => 2 (user + model)
165
+ def send_message_stream(message, **options, &block)
166
+ # Validate message parameter
167
+ raise ArgumentError, "message cannot be nil" if message.nil?
168
+ raise ArgumentError, "message cannot be empty" if message.respond_to?(:empty?) && message.empty?
169
+
170
+ # Build user message part
171
+ user_message = {
172
+ role: "user",
173
+ parts: [{text: message}]
174
+ }
175
+
176
+ # Build payload with full history + new message
177
+ payload = {
178
+ contents: @history + [user_message]
179
+ }
180
+
181
+ # Merge any additional generation config options
182
+ payload.merge!(options) if options.any?
183
+
184
+ # Make API request
185
+ endpoint = "models/#{@model}:streamGenerateContent"
186
+
187
+ # Accumulate full response text while streaming
188
+ accumulated_text = ""
189
+
190
+ # If block given, stream with block and accumulate
191
+ if block_given?
192
+ @client.http_client.post_stream(endpoint, payload) do |chunk|
193
+ # Extract text from chunk
194
+ text = extract_chunk_text(chunk)
195
+ accumulated_text += text if text
196
+
197
+ # Yield chunk to user's block
198
+ block.call(chunk)
199
+ end
200
+
201
+ # After streaming completes, update history with full response
202
+ update_history_after_stream(user_message, accumulated_text)
203
+ nil
204
+ else
205
+ # Return Enumerator that accumulates and updates history when consumed
206
+ Enumerator.new do |yielder|
207
+ @client.http_client.post_stream(endpoint, payload) do |chunk|
208
+ text = extract_chunk_text(chunk)
209
+ accumulated_text += text if text
210
+ yielder << chunk
211
+ end
212
+
213
+ # Update history after enumeration completes
214
+ update_history_after_stream(user_message, accumulated_text)
215
+ end
216
+ end
217
+ end
218
+
219
+ private
220
+
221
+ def extract_model_message(response)
222
+ candidate = response.dig("candidates", 0)
223
+ return {role: "model", parts: [{text: ""}]} unless candidate
224
+
225
+ content = candidate["content"]
226
+ return {role: "model", parts: [{text: ""}]} unless content
227
+
228
+ {
229
+ role: content["role"] || "model",
230
+ parts: content["parts"] || [{text: ""}]
231
+ }
232
+ end
233
+
234
+ def extract_chunk_text(chunk)
235
+ chunk.dig("candidates", 0, "content", "parts", 0, "text")
236
+ end
237
+
238
+ def update_history_after_stream(user_message, accumulated_text)
239
+ # Add user message to history
240
+ @history << user_message
241
+
242
+ # Add accumulated model response to history (using symbol keys like non-streaming)
243
+ model_response = {
244
+ role: "model",
245
+ parts: [{"text" => accumulated_text}]
246
+ }
247
+ @history << model_response
248
+ end
249
+ end
250
+ end
251
+ end
@@ -0,0 +1,243 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aigen
4
+ module Google
5
+ class Client
6
+ attr_reader :config, :http_client
7
+
8
+ def initialize(api_key: nil, model: nil, timeout: nil, **options)
9
+ @config = build_configuration(api_key, model, timeout, options)
10
+ @config.validate!
11
+ @http_client = HttpClient.new(
12
+ api_key: @config.api_key,
13
+ timeout: @config.timeout,
14
+ retry_count: @config.retry_count
15
+ )
16
+ end
17
+
18
+ # Generates content from the Gemini API with support for text, multimodal content,
19
+ # generation parameters, and safety settings.
20
+ #
21
+ # @param prompt [String, nil] simple text prompt (for backward compatibility)
22
+ # @param contents [Array<Hash>, nil] array of content hashes for multimodal requests
23
+ # @param model [String, nil] the model to use (defaults to client's default_model)
24
+ # @param temperature [Float, nil] controls randomness (0.0-1.0)
25
+ # @param top_p [Float, nil] nucleus sampling threshold (0.0-1.0)
26
+ # @param top_k [Integer, nil] top-k sampling limit (> 0)
27
+ # @param max_output_tokens [Integer, nil] maximum response tokens (> 0)
28
+ # @param safety_settings [Array<Hash>, nil] safety filtering configuration
29
+ # @param options [Hash] additional options to pass to the API
30
+ #
31
+ # @return [Hash] the API response
32
+ #
33
+ # @raise [Aigen::Google::InvalidRequestError] if validation fails (before API call)
34
+ # @raise [Aigen::Google::AuthenticationError] if API key is invalid
35
+ # @raise [Aigen::Google::RateLimitError] if rate limit is exceeded
36
+ # @raise [Aigen::Google::ServerError] if the API returns a server error
37
+ #
38
+ # @example Simple text prompt (backward compatible)
39
+ # response = client.generate_content(prompt: "Hello")
40
+ #
41
+ # @example With generation config
42
+ # response = client.generate_content(
43
+ # prompt: "Tell me a story",
44
+ # temperature: 0.7,
45
+ # max_output_tokens: 1024
46
+ # )
47
+ #
48
+ # @example Multimodal content (text + image)
49
+ # text = Aigen::Google::Content.text("What is in this image?")
50
+ # image = Aigen::Google::Content.image(data: base64_data, mime_type: "image/jpeg")
51
+ # response = client.generate_content(contents: [text.to_h, image.to_h])
52
+ #
53
+ # @example Image generation (Nano Banana)
54
+ # response = client.generate_content(
55
+ # prompt: "A serene mountain landscape",
56
+ # response_modalities: ["TEXT", "IMAGE"],
57
+ # aspect_ratio: "16:9",
58
+ # image_size: "2K"
59
+ # )
60
+ def generate_content(prompt: nil, contents: nil, model: nil, temperature: nil, top_p: nil, top_k: nil, max_output_tokens: nil, response_modalities: nil, aspect_ratio: nil, image_size: nil, safety_settings: nil, **options)
61
+ model ||= @config.default_model
62
+
63
+ # Build generation config if parameters provided (validates before API call)
64
+ gen_config = nil
65
+ if temperature || top_p || top_k || max_output_tokens || response_modalities || aspect_ratio || image_size
66
+ gen_config = GenerationConfig.new(
67
+ temperature: temperature,
68
+ top_p: top_p,
69
+ top_k: top_k,
70
+ max_output_tokens: max_output_tokens,
71
+ response_modalities: response_modalities,
72
+ aspect_ratio: aspect_ratio,
73
+ image_size: image_size
74
+ )
75
+ end
76
+
77
+ # Build payload
78
+ payload = {}
79
+
80
+ # Handle contents (multimodal) or prompt (simple text)
81
+ if contents
82
+ payload[:contents] = contents
83
+ elsif prompt
84
+ # Backward compatibility: convert simple prompt to contents format
85
+ payload[:contents] = [
86
+ {
87
+ parts: [
88
+ {text: prompt}
89
+ ]
90
+ }
91
+ ]
92
+ end
93
+
94
+ # Add generation config if present
95
+ payload[:generationConfig] = gen_config.to_h if gen_config && !gen_config.to_h.empty?
96
+
97
+ # Add safety settings if present
98
+ payload[:safetySettings] = safety_settings if safety_settings
99
+
100
+ # Merge any additional options
101
+ payload.merge!(options) if options.any?
102
+
103
+ endpoint = "models/#{model}:generateContent"
104
+ @http_client.post(endpoint, payload)
105
+ end
106
+
107
+ # Streams generated content from the Gemini API with progressive chunk delivery.
108
+ # Supports both block-based immediate processing and lazy Enumerator evaluation.
109
+ #
110
+ # @param prompt [String] the prompt text to generate content from
111
+ # @param model [String, nil] the model to use (defaults to client's default_model)
112
+ # @param options [Hash] additional options to pass to the API (e.g., generationConfig)
113
+ # @yieldparam chunk [Hash] parsed JSON chunk from the streaming response (if block given)
114
+ #
115
+ # @return [nil] if block is given, returns nil after streaming completes
116
+ # @return [Enumerator] if no block given, returns lazy Enumerator for progressive iteration
117
+ #
118
+ # @raise [Aigen::Google::AuthenticationError] if API key is invalid
119
+ # @raise [Aigen::Google::InvalidRequestError] if the request is malformed
120
+ # @raise [Aigen::Google::RateLimitError] if rate limit is exceeded
121
+ # @raise [Aigen::Google::ServerError] if the API returns a server error
122
+ # @raise [Aigen::Google::TimeoutError] if the request times out
123
+ #
124
+ # @example Stream with block (immediate processing)
125
+ # client.generate_content_stream(prompt: "Tell me a story") do |chunk|
126
+ # text = chunk["candidates"][0]["content"]["parts"][0]["text"]
127
+ # print text
128
+ # end
129
+ #
130
+ # @example Stream with Enumerator (lazy evaluation)
131
+ # stream = client.generate_content_stream(prompt: "Tell me a story")
132
+ # stream.each do |chunk|
133
+ # text = chunk["candidates"][0]["content"]["parts"][0]["text"]
134
+ # print text
135
+ # end
136
+ #
137
+ # @example Stream with lazy operations
138
+ # stream = client.generate_content_stream(prompt: "Count to 10")
139
+ # first_three = stream.lazy.take(3).map { |c| c["candidates"][0]["content"]["parts"][0]["text"] }.to_a
140
+ def generate_content_stream(prompt:, model: nil, **options, &block)
141
+ model ||= @config.default_model
142
+
143
+ payload = {
144
+ contents: [
145
+ {
146
+ parts: [
147
+ {text: prompt}
148
+ ]
149
+ }
150
+ ]
151
+ }
152
+
153
+ # Merge any additional generation config options
154
+ payload.merge!(options) if options.any?
155
+
156
+ endpoint = "models/#{model}:streamGenerateContent"
157
+
158
+ # If block given, stream with block
159
+ if block_given?
160
+ @http_client.post_stream(endpoint, payload, &block)
161
+ else
162
+ # Return Enumerator for lazy evaluation
163
+ Enumerator.new do |yielder|
164
+ @http_client.post_stream(endpoint, payload) do |chunk|
165
+ yielder << chunk
166
+ end
167
+ end
168
+ end
169
+ end
170
+
171
+ # Generates an image from a text prompt using Gemini image generation models.
172
+ # This is a convenience method that automatically sets response_modalities to ["TEXT", "IMAGE"]
173
+ # and returns an ImageResponse object for easy image extraction.
174
+ #
175
+ # @param prompt [String] the text description of the image to generate
176
+ # @param aspect_ratio [String, nil] optional aspect ratio ("1:1", "16:9", "9:16", "4:3", "3:4", "5:4", "4:5")
177
+ # @param size [String, nil] optional image size ("1K", "2K", "4K")
178
+ # @param model [String, nil] optional model name (defaults to client's model)
179
+ # @param options [Hash] additional options to pass to generate_content
180
+ #
181
+ # @return [Aigen::Google::ImageResponse] wrapper around the API response with convenience methods
182
+ #
183
+ # @raise [InvalidRequestError] if aspect_ratio or size are invalid
184
+ #
185
+ # @example Basic image generation
186
+ # client = Aigen::Google::Client.new(model: "gemini-2.5-flash-image")
187
+ # response = client.generate_image("A serene mountain landscape")
188
+ # response.save("landscape.png") if response.success?
189
+ #
190
+ # @example With size and aspect ratio
191
+ # response = client.generate_image(
192
+ # "A futuristic cityscape",
193
+ # aspect_ratio: "16:9",
194
+ # size: "2K"
195
+ # )
196
+ # if response.success?
197
+ # puts response.text
198
+ # response.save("city.png")
199
+ # else
200
+ # puts "Failed: #{response.failure_message}"
201
+ # end
202
+ def generate_image(prompt, aspect_ratio: nil, size: nil, model: nil, **options)
203
+ response = generate_content(
204
+ prompt: prompt,
205
+ model: model,
206
+ response_modalities: ["TEXT", "IMAGE"],
207
+ aspect_ratio: aspect_ratio,
208
+ image_size: size,
209
+ **options
210
+ )
211
+
212
+ ImageResponse.new(response)
213
+ end
214
+
215
+ def start_chat(history: [], model: nil, **options)
216
+ Chat.new(
217
+ client: self,
218
+ model: model || @config.default_model,
219
+ history: history
220
+ )
221
+ end
222
+
223
+ private
224
+
225
+ def build_configuration(api_key, model, timeout, options)
226
+ # Start with global configuration if available
227
+ config = if Google.configuration
228
+ Google.configuration.dup
229
+ else
230
+ Configuration.new
231
+ end
232
+
233
+ # Override with instance-specific values
234
+ config.api_key = api_key if api_key
235
+ config.default_model = model if model
236
+ config.timeout = timeout if timeout
237
+ options.each { |k, v| config.send("#{k}=", v) if config.respond_to?("#{k}=") }
238
+
239
+ config
240
+ end
241
+ end
242
+ end
243
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aigen
4
+ module Google
5
+ class Configuration
6
+ attr_accessor :api_key, :default_model, :timeout, :retry_count
7
+
8
+ def initialize
9
+ @api_key = ENV.fetch("GOOGLE_API_KEY", nil)
10
+ @default_model = "gemini-pro"
11
+ @timeout = 30
12
+ @retry_count = 3
13
+ end
14
+
15
+ def validate!
16
+ raise ConfigurationError, "API key is required. Set via Aigen::Google.configure or ENV['GOOGLE_API_KEY']" if api_key.nil? || api_key.empty?
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,82 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aigen
4
+ module Google
5
+ # Content represents text, image, or multimodal content for Gemini API requests.
6
+ # Provides builder methods for creating content and serialization to API format.
7
+ #
8
+ # @example Text content
9
+ # content = Aigen::Google::Content.text("Hello, world!")
10
+ # content.to_h # => {parts: [{text: "Hello, world!"}]}
11
+ #
12
+ # @example Image content (Base64-encoded)
13
+ # require "base64"
14
+ # image_data = Base64.strict_encode64(File.read("image.jpg"))
15
+ # content = Aigen::Google::Content.image(data: image_data, mime_type: "image/jpeg")
16
+ # content.to_h # => {parts: [{inline_data: {mime_type: "image/jpeg", data: "..."}}]}
17
+ #
18
+ # @example Multimodal content (text + image)
19
+ # text_part = {text: "What is in this image?"}
20
+ # image_part = {inline_data: {mime_type: "image/jpeg", data: base64_data}}
21
+ # content = Aigen::Google::Content.new([text_part, image_part])
22
+ class Content
23
+ # Creates a text content instance.
24
+ #
25
+ # @param text [String] the text content
26
+ # @return [Content] a Content instance with text part
27
+ #
28
+ # @example
29
+ # content = Content.text("Hello!")
30
+ # content.to_h # => {parts: [{text: "Hello!"}]}
31
+ def self.text(text)
32
+ new([{text: text}])
33
+ end
34
+
35
+ # Creates an image content instance with Base64-encoded data.
36
+ #
37
+ # @param data [String] Base64-encoded image data (use Base64.strict_encode64)
38
+ # @param mime_type [String] the MIME type (e.g., "image/jpeg", "image/png")
39
+ # @return [Content] a Content instance with inline_data part
40
+ #
41
+ # @example
42
+ # require "base64"
43
+ # data = Base64.strict_encode64(File.read("photo.jpg"))
44
+ # content = Content.image(data: data, mime_type: "image/jpeg")
45
+ def self.image(data:, mime_type:)
46
+ new([{
47
+ inline_data: {
48
+ mime_type: mime_type,
49
+ data: data
50
+ }
51
+ }])
52
+ end
53
+
54
+ # Initializes a Content instance with an array of parts.
55
+ #
56
+ # @param parts [Array<Hash>] array of content parts
57
+ # - Text part: {text: "string"}
58
+ # - Image part: {inline_data: {mime_type: "...", data: "..."}}
59
+ #
60
+ # @example
61
+ # parts = [
62
+ # {text: "Describe this image:"},
63
+ # {inline_data: {mime_type: "image/jpeg", data: base64_data}}
64
+ # ]
65
+ # content = Content.new(parts)
66
+ def initialize(parts)
67
+ @parts = parts
68
+ end
69
+
70
+ # Serializes the content to Gemini API format.
71
+ #
72
+ # @return [Hash] the content hash with parts array
73
+ #
74
+ # @example
75
+ # content = Content.text("Hello")
76
+ # content.to_h # => {parts: [{text: "Hello"}]}
77
+ def to_h
78
+ {parts: @parts}
79
+ end
80
+ end
81
+ end
82
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aigen
4
+ module Google
5
+ # Base error class for all gem exceptions
6
+ class Error < StandardError; end
7
+
8
+ # Configuration-related errors
9
+ class ConfigurationError < Error; end
10
+
11
+ # Base class for API-related errors
12
+ class ApiError < Error
13
+ attr_reader :status_code
14
+
15
+ def initialize(message, status_code: nil)
16
+ super(message)
17
+ @status_code = status_code
18
+ end
19
+ end
20
+
21
+ # Authentication errors (401, 403)
22
+ class AuthenticationError < ApiError
23
+ def initialize(message = "Invalid API key. Get one at https://makersuite.google.com/app/apikey", status_code: 401)
24
+ super
25
+ end
26
+ end
27
+
28
+ # Rate limit exceeded (429)
29
+ class RateLimitError < ApiError
30
+ def initialize(message = "Rate limit exceeded. Please retry after some time.", status_code: 429)
31
+ super
32
+ end
33
+ end
34
+
35
+ # Invalid request errors (400, 404)
36
+ class InvalidRequestError < ApiError
37
+ def initialize(message = "Invalid request. Check your parameters.", status_code: 400)
38
+ super
39
+ end
40
+ end
41
+
42
+ # Server errors (500-599)
43
+ class ServerError < ApiError
44
+ def initialize(message = "Google API server error. Please retry.", status_code: 500)
45
+ super
46
+ end
47
+ end
48
+
49
+ # Timeout errors
50
+ class TimeoutError < Error
51
+ def initialize(message = "Request timed out after retries.")
52
+ super
53
+ end
54
+ end
55
+ end
56
+ end