durable-llm 0.1.4 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/.envrc +7 -0
  3. data/CHANGELOG.md +5 -0
  4. data/CLI.md +0 -2
  5. data/Gemfile +7 -9
  6. data/README.md +564 -30
  7. data/Rakefile +16 -6
  8. data/devenv.lock +171 -0
  9. data/devenv.nix +12 -0
  10. data/devenv.yaml +8 -0
  11. data/durable-llm.gemspec +52 -0
  12. data/examples/openai_quick_complete.rb +4 -2
  13. data/lib/durable/llm/cli.rb +218 -22
  14. data/lib/durable/llm/client.rb +228 -8
  15. data/lib/durable/llm/configuration.rb +163 -10
  16. data/lib/durable/llm/convenience.rb +102 -0
  17. data/lib/durable/llm/errors.rb +185 -0
  18. data/lib/durable/llm/provider_utilities.rb +201 -0
  19. data/lib/durable/llm/providers/anthropic.rb +232 -24
  20. data/lib/durable/llm/providers/azure_openai.rb +347 -0
  21. data/lib/durable/llm/providers/base.rb +220 -11
  22. data/lib/durable/llm/providers/cohere.rb +157 -11
  23. data/lib/durable/llm/providers/deepseek.rb +233 -0
  24. data/lib/durable/llm/providers/fireworks.rb +304 -0
  25. data/lib/durable/llm/providers/google.rb +327 -0
  26. data/lib/durable/llm/providers/groq.rb +133 -25
  27. data/lib/durable/llm/providers/huggingface.rb +120 -17
  28. data/lib/durable/llm/providers/mistral.rb +431 -0
  29. data/lib/durable/llm/providers/openai.rb +150 -4
  30. data/lib/durable/llm/providers/opencode.rb +253 -0
  31. data/lib/durable/llm/providers/openrouter.rb +256 -0
  32. data/lib/durable/llm/providers/perplexity.rb +273 -0
  33. data/lib/durable/llm/providers/together.rb +346 -0
  34. data/lib/durable/llm/providers/xai.rb +355 -0
  35. data/lib/durable/llm/providers.rb +113 -13
  36. data/lib/durable/llm/response_helpers.rb +185 -0
  37. data/lib/durable/llm/version.rb +5 -1
  38. data/lib/durable/llm.rb +214 -1
  39. data/lib/durable.rb +29 -4
  40. data/sig/durable/llm.rbs +303 -1
  41. metadata +106 -28
  42. data/Gemfile.lock +0 -103
@@ -0,0 +1,304 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This file implements the Fireworks AI provider for accessing Fireworks AI's language models through their API, providing completion, embedding, and streaming capabilities with authentication handling, error management, and response normalization. It establishes HTTP connections to Fireworks AI's API endpoint, processes chat completions and embeddings, handles various API error responses, and includes comprehensive response classes to format Fireworks AI's API responses into a consistent interface.
4
+
5
+ require 'faraday'
6
+ require 'json'
7
+ require 'event_stream_parser'
8
+ require 'durable/llm/errors'
9
+ require 'durable/llm/providers/base'
10
+
11
+ module Durable
12
+ module Llm
13
+ module Providers
14
+ # Fireworks AI provider for accessing Fireworks AI's language models.
15
+ #
16
+ # Provides completion, embedding, and streaming capabilities with proper
17
+ # error handling and response normalization.
18
+ class Fireworks < Durable::Llm::Providers::Base
19
+ BASE_URL = 'https://api.fireworks.ai/inference/v1'
20
+
21
+ def default_api_key
22
+ Durable::Llm.configuration.fireworks&.api_key || ENV['FIREWORKS_API_KEY']
23
+ end
24
+
25
+ attr_accessor :api_key
26
+
27
+ # Initializes a new Fireworks provider instance.
28
+ #
29
+ # @param api_key [String, nil] The API key for Fireworks AI. If not provided, uses the default from configuration or environment.
30
+ # @return [Fireworks] A new instance of the Fireworks provider.
31
+ def initialize(api_key: nil)
32
+ super()
33
+ @api_key = api_key || default_api_key
34
+ @conn = Faraday.new(url: BASE_URL) do |faraday|
35
+ faraday.request :json
36
+ faraday.response :json
37
+ faraday.adapter Faraday.default_adapter
38
+ end
39
+ end
40
+
41
+ # Performs a chat completion request to Fireworks AI.
42
+ #
43
+ # @param options [Hash] The completion options including model, messages, temperature, etc.
44
+ # @return [FireworksResponse] The response object containing the completion results.
45
+ # @raise [Durable::Llm::AuthenticationError] If authentication fails.
46
+ # @raise [Durable::Llm::RateLimitError] If rate limit is exceeded.
47
+ # @raise [Durable::Llm::InvalidRequestError] If the request is invalid.
48
+ # @raise [Durable::Llm::ServerError] If there's a server error.
49
+ def completion(options)
50
+ response = @conn.post('chat/completions') do |req|
51
+ req.headers['Authorization'] = "Bearer #{@api_key}"
52
+ req.body = options
53
+ end
54
+
55
+ handle_response(response)
56
+ end
57
+
58
+ # Generates embeddings for the given input using Fireworks AI.
59
+ #
60
+ # @param model [String] The model to use for generating embeddings.
61
+ # @param input [String, Array<String>] The text input(s) to embed.
62
+ # @param options [Hash] Additional options for the embedding request.
63
+ # @return [FireworksEmbeddingResponse] The response object containing the embeddings.
64
+ # @raise [Durable::Llm::AuthenticationError] If authentication fails.
65
+ # @raise [Durable::Llm::RateLimitError] If rate limit is exceeded.
66
+ # @raise [Durable::Llm::InvalidRequestError] If the request is invalid.
67
+ # @raise [Durable::Llm::ServerError] If there's a server error.
68
+ def embedding(model:, input:, **options)
69
+ response = @conn.post('embeddings') do |req|
70
+ req.headers['Authorization'] = "Bearer #{@api_key}"
71
+ req.body = { model: model, input: input, **options }
72
+ end
73
+
74
+ handle_response(response, FireworksEmbeddingResponse)
75
+ end
76
+
77
+ # Retrieves the list of available models from Fireworks AI.
78
+ #
79
+ # @return [Array<String>] An array of model IDs available for use.
80
+ # @raise [Durable::Llm::AuthenticationError] If authentication fails.
81
+ # @raise [Durable::Llm::RateLimitError] If rate limit is exceeded.
82
+ # @raise [Durable::Llm::InvalidRequestError] If the request is invalid.
83
+ # @raise [Durable::Llm::ServerError] If there's a server error.
84
+ def models
85
+ response = @conn.get('models') do |req|
86
+ req.headers['Authorization'] = "Bearer #{@api_key}"
87
+ end
88
+
89
+ handle_response(response).data.map { |model| model['id'] }
90
+ end
91
+
92
+ def self.stream?
93
+ true
94
+ end
95
+
96
+ # Performs a streaming chat completion request to Fireworks AI.
97
+ #
98
+ # @param options [Hash] The completion options including model, messages, temperature, etc.
99
+ # @yield [FireworksStreamResponse] Yields each chunk of the streaming response.
100
+ # @return [nil] Returns nil after streaming is complete.
101
+ # @raise [Durable::Llm::AuthenticationError] If authentication fails.
102
+ # @raise [Durable::Llm::RateLimitError] If rate limit is exceeded.
103
+ # @raise [Durable::Llm::InvalidRequestError] If the request is invalid.
104
+ # @raise [Durable::Llm::ServerError] If there's a server error.
105
+ def stream(options)
106
+ options[:stream] = true
107
+
108
+ @conn.post('chat/completions') do |req|
109
+ req.headers['Authorization'] = "Bearer #{@api_key}"
110
+ req.headers['Accept'] = 'text/event-stream'
111
+
112
+ options['temperature'] = options['temperature'].to_f if options['temperature']
113
+
114
+ req.body = options
115
+
116
+ user_proc = proc do |chunk, _size, _total|
117
+ yield FireworksStreamResponse.new(chunk)
118
+ end
119
+
120
+ req.options.on_data = to_json_stream(user_proc: user_proc)
121
+ end
122
+
123
+ # For streaming, errors are handled in to_json_stream, no need for handle_response
124
+ nil
125
+ end
126
+
127
+ private
128
+
129
+ # CODE-FROM: ruby-openai @ https://github.com/alexrudall/ruby-openai/blob/main/lib/openai/http.rb
130
+ # MIT License: https://github.com/alexrudall/ruby-openai/blob/main/LICENSE.md
131
+ def to_json_stream(user_proc:)
132
+ parser = EventStreamParser::Parser.new
133
+
134
+ proc do |chunk, _bytes, env|
135
+ if env && env.status != 200
136
+ raise_error = Faraday::Response::RaiseError.new
137
+ raise_error.on_complete(env.merge(body: try_parse_json(chunk)))
138
+ end
139
+
140
+ parser.feed(chunk) do |_type, data|
141
+ user_proc.call(JSON.parse(data)) unless data == '[DONE]'
142
+ end
143
+ end
144
+ end
145
+
146
+ def try_parse_json(maybe_json)
147
+ JSON.parse(maybe_json)
148
+ rescue JSON::ParserError
149
+ maybe_json
150
+ end
151
+
152
+ # END-CODE-FROM
153
+
154
+ def handle_response(response, response_class = FireworksResponse)
155
+ case response.status
156
+ when 200..299
157
+ response_class.new(response.body)
158
+ when 401
159
+ raise Durable::Llm::AuthenticationError, parse_error_message(response)
160
+ when 429
161
+ raise Durable::Llm::RateLimitError, parse_error_message(response)
162
+ when 400..499
163
+ raise Durable::Llm::InvalidRequestError, parse_error_message(response)
164
+ when 500..599
165
+ raise Durable::Llm::ServerError, parse_error_message(response)
166
+ else
167
+ raise Durable::Llm::APIError, "Unexpected response code: #{response.status}"
168
+ end
169
+ end
170
+
171
+ def parse_error_message(response)
172
+ body = begin
173
+ JSON.parse(response.body)
174
+ rescue StandardError
175
+ nil
176
+ end
177
+ message = body&.dig('error', 'message') || response.body
178
+ "#{response.status} Error: #{message}"
179
+ end
180
+
181
+ # Response object for Fireworks chat API responses.
182
+ #
183
+ # Wraps the raw response and provides a consistent interface for accessing
184
+ # message content and metadata.
185
+ class FireworksResponse
186
+ attr_reader :raw_response
187
+
188
+ def initialize(response)
189
+ @raw_response = response
190
+ end
191
+
192
+ def choices
193
+ @raw_response['choices'].map { |choice| FireworksChoice.new(choice) }
194
+ end
195
+
196
+ def data
197
+ @raw_response['data']
198
+ end
199
+
200
+ def to_s
201
+ choices.map(&:to_s).join(' ')
202
+ end
203
+ end
204
+
205
+ # Represents a single choice in a Fireworks response.
206
+ #
207
+ # Contains the message and finish reason for the choice.
208
+ class FireworksChoice
209
+ attr_reader :message, :finish_reason
210
+
211
+ def initialize(choice)
212
+ @message = FireworksMessage.new(choice['message'])
213
+ @finish_reason = choice['finish_reason']
214
+ end
215
+
216
+ def to_s
217
+ @message.to_s
218
+ end
219
+ end
220
+
221
+ # Represents a message in a Fireworks conversation.
222
+ #
223
+ # Messages have a role (user, assistant) and text content.
224
+ class FireworksMessage
225
+ attr_reader :role, :content
226
+
227
+ def initialize(message)
228
+ @role = message['role']
229
+ @content = message['content']
230
+ end
231
+
232
+ def to_s
233
+ @content
234
+ end
235
+ end
236
+
237
+ # Response object for streaming Fireworks chat chunks.
238
+ #
239
+ # Wraps individual chunks from the Server-Sent Events stream.
240
+ class FireworksStreamResponse
241
+ attr_reader :choices
242
+
243
+ def initialize(parsed)
244
+ @choices = FireworksStreamChoice.new(parsed['choices'])
245
+ end
246
+
247
+ def to_s
248
+ @choices.to_s
249
+ end
250
+ end
251
+
252
+ # Response object for Fireworks embedding API responses.
253
+ #
254
+ # Wraps embedding data and provides array access to the vector representation.
255
+ class FireworksEmbeddingResponse
256
+ attr_reader :embedding
257
+
258
+ def initialize(data)
259
+ @embedding = data.dig('data', 0, 'embedding')
260
+ end
261
+
262
+ def to_a
263
+ @embedding
264
+ end
265
+ end
266
+
267
+ # Represents a single choice in a streaming Fireworks response chunk.
268
+ #
269
+ # Contains the delta (incremental content) and finish reason for the choice.
270
+ class FireworksStreamChoice
271
+ attr_reader :delta, :finish_reason
272
+
273
+ def initialize(choice)
274
+ @choice = [choice].flatten.first
275
+ @delta = FireworksStreamDelta.new(@choice['delta'])
276
+ @finish_reason = @choice['finish_reason']
277
+ end
278
+
279
+ def to_s
280
+ @delta.to_s
281
+ end
282
+ end
283
+
284
+ # Represents the incremental content delta in a streaming response.
285
+ #
286
+ # Contains the role and text content of the delta.
287
+ class FireworksStreamDelta
288
+ attr_reader :role, :content
289
+
290
+ def initialize(delta)
291
+ @role = delta['role']
292
+ @content = delta['content']
293
+ end
294
+
295
+ def to_s
296
+ @content || ''
297
+ end
298
+ end
299
+ end
300
+ end
301
+ end
302
+ end
303
+
304
+ # Copyright (c) 2025 Durable Programming, LLC. All rights reserved.
@@ -0,0 +1,327 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This file implements the Google provider for accessing Google's Gemini language models through their API, providing completion capabilities with authentication handling, error management, and response normalization. It establishes HTTP connections to Google's Generative Language API endpoint, processes generateContent requests with text content, handles various API error responses, and includes comprehensive response classes to format Google's API responses into a consistent interface.
4
+
5
+ require 'faraday'
6
+ require 'json'
7
+ require 'durable/llm/errors'
8
+ require 'durable/llm/providers/base'
9
+ require 'event_stream_parser'
10
+
11
+ module Durable
12
+ module Llm
13
+ module Providers
14
+ # Google Generative AI provider for accessing Gemini language models.
15
+ #
16
+ # Provides completion, embedding, and streaming capabilities with proper
17
+ # error handling and response normalization for Google's Generative Language API.
18
+ class Google < Durable::Llm::Providers::Base
19
+ BASE_URL = 'https://generativelanguage.googleapis.com'
20
+
21
+ def default_api_key
22
+ begin
23
+ Durable::Llm.configuration.google&.api_key
24
+ rescue NoMethodError
25
+ nil
26
+ end || ENV['GOOGLE_API_KEY']
27
+ end
28
+
29
+ attr_accessor :api_key
30
+
31
+ def initialize(api_key: nil)
32
+ @api_key = api_key || default_api_key
33
+ @conn = Faraday.new(url: BASE_URL) do |faraday|
34
+ faraday.request :json
35
+ faraday.response :json
36
+ faraday.adapter Faraday.default_adapter
37
+ end
38
+ end
39
+
40
+ def completion(options)
41
+ model = options[:model]
42
+ url = "/v1beta/models/#{model}:generateContent?key=#{@api_key}"
43
+
44
+ # Transform options to Google's format
45
+ request_body = transform_options(options)
46
+
47
+ response = @conn.post(url) do |req|
48
+ req.body = request_body
49
+ end
50
+
51
+ handle_response(response)
52
+ end
53
+
54
+ def embedding(model:, input:, **_options)
55
+ url = "/v1beta/models/#{model}:embedContent?key=#{@api_key}"
56
+
57
+ request_body = {
58
+ content: {
59
+ parts: [{ text: input }]
60
+ }
61
+ }
62
+
63
+ response = @conn.post(url) do |req|
64
+ req.body = request_body
65
+ end
66
+
67
+ handle_response(response, GoogleEmbeddingResponse)
68
+ end
69
+
70
+ def models
71
+ # Google doesn't provide a public models API, so return hardcoded list
72
+ [
73
+ 'gemini-1.5-flash',
74
+ 'gemini-1.5-flash-001',
75
+ 'gemini-1.5-flash-002',
76
+ 'gemini-1.5-flash-8b',
77
+ 'gemini-1.5-flash-8b-001',
78
+ 'gemini-1.5-flash-8b-latest',
79
+ 'gemini-1.5-flash-latest',
80
+ 'gemini-1.5-pro',
81
+ 'gemini-1.5-pro-001',
82
+ 'gemini-1.5-pro-002',
83
+ 'gemini-1.5-pro-latest',
84
+ 'gemini-2.0-flash',
85
+ 'gemini-2.0-flash-001',
86
+ 'gemini-2.0-flash-exp',
87
+ 'gemini-2.0-flash-lite',
88
+ 'gemini-2.0-flash-lite-001',
89
+ 'gemini-2.0-flash-live-001',
90
+ 'gemini-2.0-flash-preview-image-generation',
91
+ 'gemini-2.5-flash',
92
+ 'gemini-2.5-flash-exp-native-audio-thinking-dialog',
93
+ 'gemini-2.5-flash-lite',
94
+ 'gemini-2.5-flash-lite-06-17',
95
+ 'gemini-2.5-flash-preview-05-20',
96
+ 'gemini-2.5-flash-preview-native-audio-dialog',
97
+ 'gemini-2.5-flash-preview-tts',
98
+ 'gemini-2.5-pro',
99
+ 'gemini-2.5-pro-preview-tts',
100
+ 'gemini-live-2.5-flash-preview',
101
+ 'text-embedding-004',
102
+ 'text-multilingual-embedding-002'
103
+ ]
104
+ end
105
+
106
+ def self.stream?
107
+ true
108
+ end
109
+
110
+ def stream(options)
111
+ model = options[:model]
112
+ url = "/v1beta/models/#{model}:streamGenerateContent?key=#{@api_key}&alt=sse"
113
+
114
+ request_body = transform_options(options)
115
+
116
+ response = @conn.post(url) do |req|
117
+ req.headers['Accept'] = 'text/event-stream'
118
+ req.body = request_body
119
+
120
+ user_proc = proc do |chunk, _size, _total|
121
+ yield GoogleStreamResponse.new(chunk)
122
+ end
123
+
124
+ req.options.on_data = to_json_stream(user_proc: user_proc)
125
+ end
126
+
127
+ handle_response(response)
128
+ end
129
+
130
+ private
131
+
132
+ def transform_options(options)
133
+ messages = options[:messages] || []
134
+ system_messages = messages.select { |m| m[:role] == 'system' }
135
+ conversation_messages = messages.reject { |m| m[:role] == 'system' }
136
+
137
+ body = {
138
+ contents: conversation_messages.map do |msg|
139
+ {
140
+ role: msg[:role] == 'assistant' ? 'model' : 'user',
141
+ parts: [{ text: msg[:content] }]
142
+ }
143
+ end
144
+ }
145
+
146
+ if system_messages.any?
147
+ body[:systemInstruction] = {
148
+ parts: [{ text: system_messages.map { |m| m[:content] }.join("\n") }]
149
+ }
150
+ end
151
+
152
+ generation_config = {}
153
+ generation_config[:temperature] = options[:temperature] if options[:temperature]
154
+ generation_config[:maxOutputTokens] = options[:max_tokens] if options[:max_tokens]
155
+ generation_config[:topP] = options[:top_p] if options[:top_p]
156
+ generation_config[:topK] = options[:top_k] if options[:top_k]
157
+
158
+ body[:generationConfig] = generation_config unless generation_config.empty?
159
+
160
+ body
161
+ end
162
+
163
+ # CODE-FROM: ruby-openai @ https://github.com/alexrudall/ruby-openai/blob/main/lib/openai/http.rb
164
+ # MIT License: https://github.com/alexrudall/ruby-openai/blob/main/LICENSE.md
165
+ def to_json_stream(user_proc:)
166
+ parser = EventStreamParser::Parser.new
167
+
168
+ proc do |chunk, _bytes, env|
169
+ if env && env.status != 200
170
+ raise_error = Faraday::Response::RaiseError.new
171
+ raise_error.on_complete(env.merge(body: try_parse_json(chunk)))
172
+ end
173
+
174
+ parser.feed(chunk) do |_type, data|
175
+ user_proc.call(JSON.parse(data)) unless data == '[DONE]'
176
+ end
177
+ end
178
+ end
179
+
180
+ def try_parse_json(maybe_json)
181
+ JSON.parse(maybe_json)
182
+ rescue JSON::ParserError
183
+ maybe_json
184
+ end
185
+
186
+ def handle_response(response, response_class = GoogleResponse)
187
+ case response.status
188
+ when 200..299
189
+ response_class.new(response.body)
190
+ when 401
191
+ raise Durable::Llm::AuthenticationError, parse_error_message(response)
192
+ when 429
193
+ raise Durable::Llm::RateLimitError, parse_error_message(response)
194
+ when 400..499
195
+ raise Durable::Llm::InvalidRequestError, parse_error_message(response)
196
+ when 500..599
197
+ raise Durable::Llm::ServerError, parse_error_message(response)
198
+ else
199
+ raise Durable::Llm::APIError, "Unexpected response code: #{response.status}"
200
+ end
201
+ end
202
+
203
+ def parse_error_message(response)
204
+ body = begin
205
+ JSON.parse(response.body)
206
+ rescue StandardError
207
+ nil
208
+ end
209
+ message = body&.dig('error', 'message') || response.body
210
+ "#{response.status} Error: #{message}"
211
+ end
212
+
213
+ # Response object for Google Generative AI API responses.
214
+ #
215
+ # Wraps the raw response and provides a consistent interface for accessing
216
+ # candidate content and metadata.
217
+ class GoogleResponse
218
+ attr_reader :raw_response
219
+
220
+ def initialize(response)
221
+ @raw_response = response
222
+ end
223
+
224
+ def choices
225
+ [GoogleChoice.new(@raw_response['candidates']&.first)]
226
+ end
227
+
228
+ def to_s
229
+ choices.map(&:to_s).join(' ')
230
+ end
231
+ end
232
+
233
+ # Represents a single candidate choice in a Google response.
234
+ #
235
+ # Contains the message content from the candidate.
236
+ class GoogleChoice
237
+ attr_reader :message
238
+
239
+ def initialize(candidate)
240
+ @message = GoogleMessage.new(candidate&.dig('content', 'parts')&.first)
241
+ end
242
+
243
+ def to_s
244
+ @message.to_s
245
+ end
246
+ end
247
+
248
+ # Represents a message in a Google conversation.
249
+ #
250
+ # Messages contain text content extracted from parts.
251
+ class GoogleMessage
252
+ attr_reader :content
253
+
254
+ def initialize(part)
255
+ @content = part&.dig('text') || ''
256
+ end
257
+
258
+ def to_s
259
+ @content
260
+ end
261
+ end
262
+
263
+ # Response object for streaming Google Generative AI chunks.
264
+ #
265
+ # Wraps individual chunks from the streaming response.
266
+ class GoogleStreamResponse
267
+ attr_reader :choices
268
+
269
+ def initialize(parsed)
270
+ @choices = [GoogleStreamChoice.new(parsed)]
271
+ end
272
+
273
+ def to_s
274
+ @choices.map(&:to_s).join
275
+ end
276
+ end
277
+
278
+ # Represents a single choice in a streaming Google response chunk.
279
+ #
280
+ # Contains the delta (incremental content) for the choice.
281
+ class GoogleStreamChoice
282
+ attr_reader :delta
283
+
284
+ def initialize(parsed)
285
+ @delta = GoogleStreamDelta.new(parsed.dig('candidates', 0, 'content', 'parts', 0))
286
+ end
287
+
288
+ def to_s
289
+ @delta.to_s
290
+ end
291
+ end
292
+
293
+ # Represents the incremental content delta in a streaming response.
294
+ #
295
+ # Contains the text content of the delta.
296
+ class GoogleStreamDelta
297
+ attr_reader :content
298
+
299
+ def initialize(part)
300
+ @content = part&.dig('text') || ''
301
+ end
302
+
303
+ def to_s
304
+ @content
305
+ end
306
+ end
307
+
308
+ # Response object for Google embedding API responses.
309
+ #
310
+ # Wraps embedding data and provides array access to the vector representation.
311
+ class GoogleEmbeddingResponse
312
+ attr_reader :embedding
313
+
314
+ def initialize(data)
315
+ @embedding = data.dig('embedding', 'values')
316
+ end
317
+
318
+ def to_a
319
+ @embedding
320
+ end
321
+ end
322
+ end
323
+ end
324
+ end
325
+ end
326
+
327
+ # Copyright (c) 2025 Durable Programming, LLC. All rights reserved.