intelligence 0.6.0 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +576 -0
  3. data/intelligence.gemspec +2 -1
  4. data/lib/intelligence/adapter/base.rb +13 -6
  5. data/lib/intelligence/adapter/class_methods.rb +15 -0
  6. data/lib/intelligence/adapter/module_methods.rb +41 -0
  7. data/lib/intelligence/adapter.rb +2 -2
  8. data/lib/intelligence/adapters/anthropic/adapter.rb +21 -19
  9. data/lib/intelligence/adapters/anthropic/chat_request_methods.rb +189 -0
  10. data/lib/intelligence/adapters/anthropic/{chat_methods.rb → chat_response_methods.rb} +13 -137
  11. data/lib/intelligence/adapters/cerebras.rb +19 -19
  12. data/lib/intelligence/adapters/generic/adapter.rb +4 -2
  13. data/lib/intelligence/adapters/generic/chat_request_methods.rb +221 -0
  14. data/lib/intelligence/adapters/generic/chat_response_methods.rb +234 -0
  15. data/lib/intelligence/adapters/generic.rb +1 -1
  16. data/lib/intelligence/adapters/google/adapter.rb +33 -22
  17. data/lib/intelligence/adapters/google/chat_request_methods.rb +234 -0
  18. data/lib/intelligence/adapters/google/chat_response_methods.rb +236 -0
  19. data/lib/intelligence/adapters/groq.rb +29 -49
  20. data/lib/intelligence/adapters/hyperbolic.rb +13 -39
  21. data/lib/intelligence/adapters/mistral.rb +21 -42
  22. data/lib/intelligence/adapters/open_ai/adapter.rb +39 -32
  23. data/lib/intelligence/adapters/open_ai/chat_request_methods.rb +186 -0
  24. data/lib/intelligence/adapters/open_ai/chat_response_methods.rb +239 -0
  25. data/lib/intelligence/adapters/open_ai.rb +1 -1
  26. data/lib/intelligence/adapters/open_router.rb +18 -18
  27. data/lib/intelligence/adapters/samba_nova.rb +16 -18
  28. data/lib/intelligence/adapters/together_ai.rb +25 -23
  29. data/lib/intelligence/conversation.rb +11 -10
  30. data/lib/intelligence/message.rb +45 -29
  31. data/lib/intelligence/message_content/base.rb +2 -9
  32. data/lib/intelligence/message_content/binary.rb +3 -3
  33. data/lib/intelligence/message_content/file.rb +3 -3
  34. data/lib/intelligence/message_content/text.rb +10 -2
  35. data/lib/intelligence/message_content/tool_call.rb +61 -5
  36. data/lib/intelligence/message_content/tool_result.rb +11 -6
  37. data/lib/intelligence/tool.rb +139 -0
  38. data/lib/intelligence/version.rb +1 -1
  39. data/lib/intelligence.rb +3 -1
  40. metadata +31 -13
  41. data/lib/intelligence/adapter/class_methods/construction.rb +0 -17
  42. data/lib/intelligence/adapter/module_methods/construction.rb +0 -43
  43. data/lib/intelligence/adapters/generic/chat_methods.rb +0 -355
  44. data/lib/intelligence/adapters/google/chat_methods.rb +0 -393
  45. data/lib/intelligence/adapters/legacy/adapter.rb +0 -11
  46. data/lib/intelligence/adapters/legacy/chat_methods.rb +0 -54
  47. data/lib/intelligence/adapters/open_ai/chat_methods.rb +0 -345
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 754f9f4fd414c8bee3fd90ea14aeae011e21f3312e483096f5a50b41698dfef5
4
- data.tar.gz: 7ba7e4cb7cd172e152f8ab6c886255a13d0bbf26b6c6e0f674b71765ed03cf67
3
+ metadata.gz: 35084b4f3df27ee21c0a21a759b74bcca9c05c6b47e0311026d6e6587f8661a3
4
+ data.tar.gz: 76afc7ad3e1f2e3637c82e8613b492680be20962ca5daf263d3c457968512c04
5
5
  SHA512:
6
- metadata.gz: db69984a0028e297a30346406f351388caca294094fabe00d30c8598f038db8731acfbf1ded26c6fa8f653bf388826ec6fa07327c5868bec40ba6686a7f38182
7
- data.tar.gz: f1a21376458d278c46828c564951a945bc7c896951946911e06fe73c04204dad2147ce91ba83434161d55e0c0c1a7132d1d9f7006ea9f2ef3fbe59bcf24bc87d
6
+ metadata.gz: 643f9acfde921655b5861901f5ea11646d00e9673a852e8546ec112a275d9f4e695934c314c577ca5cf44684ee3ce86b57cb5da9100e67b5c016c3bd90672f14
7
+ data.tar.gz: 6a9bb70335d3cd9f5b5ef48f1029e8b7997d880a1ebed37168c6d5cae88ec4abb48c2c8ffb95ef63c5b4b82a0e5c00904b0320321c124d8cf24fabb1928e5453
data/README.md ADDED
@@ -0,0 +1,576 @@
1
+ # Intelligence
2
+
3
+ Intelligence is a lightweight yet powerful Ruby gem that provides a uniform interface for
4
+ interacting with large language and vision model APIs across multiple vendors. It allows
5
+ you to seamlessly integrate with services from OpenAI, Anthropic, Google, Mistral, Cerebras,
6
+ Groq, Hyperbolic, Samba Nova, Together AI, and others, while maintaining a consistent API
7
+ across all providers.
8
+
9
+ The gem operates with minimal dependencies and doesn't require vendor SDK installation,
10
+ making it easy to switch between providers or work with multiple providers simultaneously.
11
+
12
+ ```ruby
13
+ require 'intelligence'
14
+
15
+ adapter = Intelligence::Adapter.build :open_ai do
16
+ key ENV[ 'OPENAI_API_KEY' ]
17
+ chat_options do
18
+ model 'gpt-4o'
19
+ max_tokens 256
20
+ end
21
+ end
22
+
23
+ request = Intelligence::ChatRequest.new( adapter: adapter )
24
+ conversation = Intelligence::Conversation.build do
25
+ system_message do
26
+ content text: "You are a highly efficient AI assistant. Provide clear, concise responses."
27
+ end
28
+ message role: :user do
29
+ content text: ARGV[ 0 ] || 'Hello!'
30
+ end
31
+ end
32
+
33
+ response = request.chat( conversation )
34
+
35
+ if response.success?
36
+ puts response.result.text
37
+ else
38
+ puts "Error: " + response.result.error_description
39
+ end
40
+ ```
41
+
42
+ ## Installation
43
+
44
+ Add this line to your application's Gemfile:
45
+
46
+ ```ruby
47
+ gem 'intelligence'
48
+ ```
49
+
50
+ Then execute:
51
+
52
+ ```bash
53
+ $ bundle install
54
+ ```
55
+
56
+ Or install it directly:
57
+
58
+ ```bash
59
+ $ gem install intelligence
60
+ ```
61
+
62
+ ## Usage
63
+
64
+ ### Fundamentals
65
+
66
+ The core components of Intelligence are adapters, requests and responses. An adapter encapsulates
67
+ the differences between different API vendors, allowing you to use requests and responses
68
+ uniformly.
69
+
70
+ You retrieve an adapter for a specific vendor, configure it with a key, model and associated
71
+ parameters and then make a request by calling either the `chat` or `stream` methods.
72
+
73
+ ```ruby
74
+ require 'intelligence'
75
+
76
+ # configure the adapter with your API key and model settings
77
+ adapter = Intelligence::Adapter[ :google ].new(
78
+ key: ENV[ 'GOOGLE_API_KEY' ],
79
+ chat_options: {
80
+ model: 'gemini-1.5-flash-002',
81
+ max_tokens: 256
82
+ }
83
+ )
84
+
85
+ # create a request instance, passing the adapter
86
+ request = Intelligence::ChatRequest.new( adapter: adapter )
87
+
88
+ # make the request and handle the response
89
+ response = request.chat( "What is the capital of France?" )
90
+
91
+ if response.success?
92
+ puts response.result.text
93
+ else
94
+ puts "Error: #{response.result.error_description}"
95
+ end
96
+ ```
97
+
98
+ The `response` object is a `Faraday` response with an added method: `result`. If a response is
99
+ successful `result` returns a `ChatResult`. If it is not successful it returns a
100
+ `ChatErrorResult`. You can use the `Faraday` method `success?` to determine if the response is
101
+ successful.
102
+
103
+ ### Results
104
+
105
+ When you make a request using Intelligence, the response includes a `result` that provides
106
+ structured access to the model's output.
107
+
108
+ - A `ChatResult` contains one or more `choices` (alternate responses from the model). The
109
+ `choices` method returns an array of `ChatResultChoice` instances. `ChatResult` also
110
+ includes a `metrics` methods which provides information about token usage for the request.
111
+ - A `ChatResultChoice` contains a `message` from the assistant and an `end_result` which
112
+ indicates how the response ended:
113
+ - `:ended` means the model completed its response normally
114
+ - `:token_limit_exceeded` means the response hit the token limit ( `max_tokens` )
115
+ - `:end_sequence_encountered` means the response hit a stop sequence
116
+ - `:filtered` means the content was filtered by the vendors safety settings or protocols
117
+ - `:tool_called` means the model is requesting to use a tool
118
+ - The `Message` in each choice contains one or more content items, typically text but
119
+ potentially tool calls or other content types.
120
+
121
+ While the convenience method `text` used in the previous example is useful for simple cases,
122
+ you will typically want to work with the full response structure.
123
+
124
+ ```ruby
125
+ adapter = Intelligence::Adapter[ :google ].new(
126
+ key: ENV[ 'GOOGLE_API_KEY' ],
127
+ chat_options: {
128
+ model: 'gemini-1.5-flash-002',
129
+ max_tokens: 256
130
+ }
131
+ )
132
+
133
+ request = Intelligence::ChatRequest.new( adapter: adapter )
134
+ response = request.chat( "What are three interesting facts about ruby gemstones?" )
135
+
136
+ if response.success?
137
+ result = response.result # this is a ChatResult
138
+
139
+ # iterate through the model's choices
140
+ result.choices.each do | choice |
141
+ # check why the response ended
142
+ puts "Response ended because: #{choice.end_reason}"
143
+
144
+ # work with the message
145
+ message = choice.message
146
+ puts "Message role: #{message.role}"
147
+
148
+ # examine each piece of content
149
+ message.each_content do | content |
150
+ puts content.text if content.is_a?( Intelligence::MessageContent::Text )
151
+ end
152
+ end
153
+
154
+ # check token usage if metrics are available
155
+ if result.metrics
156
+ puts "Input tokens: #{result.metrics.input_tokens}"
157
+ puts "Output tokens: #{result.metrics.output_tokens}"
158
+ puts "Total tokens: #{result.metrics.total_tokens}"
159
+ end
160
+ else
161
+ # or alternativelly handle the error result
162
+ puts "Error: #{response.result.error_description}"
163
+ end
164
+ ```
165
+
166
+ The `ChatResult`, `ChatResultChoice` and `Message` all provide the `text` convenience
167
+ method which return the text.
168
+
169
+ ### Conversations, Messages, and Content
170
+
171
+ Intelligence organizes interactions with models using three main components:
172
+
173
+ - **Conversations** are collections of messages that represent a complete interaction with a
174
+ model. A conversation can include an optional system message that sets the context, a series
175
+ of back-and-forth messages between the user and assistant and any tools the model may call.
176
+
177
+ - **Messages** are individual communications within a conversation. Each message has a role
178
+ (`:system`, `:user`, or `:assistant`) that identifies its sender and can contain multiple
179
+ pieces of content.
180
+
181
+ - **Content** represents the actual data within a message. This can be text
182
+ ( `MessageContent::Text` ), binary data like images ( `MessageContent::Binary` ), references
183
+ to files ( `MessageContent::File` ) or tool calls or tool results ( `MessageContent::ToolCall`
184
+ or `MessageContent::ToolResult` respectivelly ).
185
+
186
+ In the previous examples we used a simple string as an argument to `chat`. As a convenience,
187
+ the `chat` methods builds a coversation for you from a String but, typically, you will construct
188
+ a coversation instance ( `Coversation` ) and pass that to the chat or stream methods.
189
+
190
+ The following example expands the minimal example, building a conversation, messages and content:
191
+
192
+ ```ruby
193
+ # create an adapter as before
194
+ adapter = Intelligence::Adapter[ :google ].new(
195
+ key: ENV[ 'GOOGLE_API_KEY' ],
196
+ chat_options: { model: 'gemini-1.5-flash-002', max_tokens: 256 }
197
+ )
198
+
199
+ # create a conversation
200
+ conversation = Intelligence::Conversation.new
201
+
202
+ # add a system message (optional but recommended)
203
+ system_message = Intelligence::Message.new( :system )
204
+ system_message << Intelligence::MessageContent::Text.new(
205
+ text: "You are a helpful coding assistant."
206
+ )
207
+ conversation.system_message = system_message
208
+
209
+ # add a user message
210
+ user_message = Intelligence::Message.new( :user )
211
+ user_message << Intelligence::MessageContent::Text.new(
212
+ text: "How do I read a file in Ruby?"
213
+ )
214
+ conversation.messages << user_message
215
+
216
+ # make the request
217
+ request = Intelligence::ChatRequest.new( adapter: adapter )
218
+ response = request.chat( conversation )
219
+
220
+ if response.success?
221
+ puts response.result.text
222
+ else
223
+ puts "Error: #{response.result.error_description}"
224
+ end
225
+ ```
226
+
227
+ The hierarchical nature of these components makes it easy to organize and access your interaction
228
+ data. A conversation acts as a container for messages, and each message acts as a container for
229
+ content items. This structure allows for rich interactions that can include multiple types of
230
+ content in a single message.
231
+
232
+ You can examine the contents of a conversation by iterating through its messages and their content:
233
+
234
+ ```ruby
235
+ # iterate through messages
236
+ conversation.messages.each do |message|
237
+ puts "Role: #{message.role}"
238
+
239
+ # each message can have multiple content items
240
+ message.each_content do |content|
241
+ case content
242
+ when Intelligence::MessageContent::Text
243
+ puts "Text: #{content.text}"
244
+ when Intelligence::MessageContent::Binary
245
+ puts "Binary content of type: #{content.content_type}"
246
+ when Intelligence::MessageContent::File
247
+ puts "File reference: #{content.uri}"
248
+ end
249
+ end
250
+ end
251
+
252
+ # remeber that, alternatively, you can use convenience methods for quick text access
253
+ puts message.text # combines all text content in a messages with newlines
254
+ ```
255
+ ### Continuing Conversations / Maintaining Context
256
+
257
+ To continue a conversation with the model, we can add the model's response and our follow-up
258
+ message to the conversation:
259
+
260
+ ```ruby
261
+ # get the previous response
262
+ if response.success?
263
+ # add the assistant's response to our conversation
264
+ assistant_message = response.result.message
265
+ conversation.messages << assistant_message
266
+
267
+ # add another user message for follow-up
268
+ follow_up = Intelligence::Message.new( :user )
269
+ follow_up << Intelligence::MessageContent::Text.new(
270
+ text: "How do I write to that file?"
271
+ )
272
+ conversation.messages << follow_up
273
+
274
+ # make another request with the updated conversation
275
+ response = request.chat( conversation )
276
+
277
+ if response.success?
278
+ puts response.result.text
279
+ end
280
+ end
281
+ ```
282
+
283
+ This pattern allows you to maintain context across multiple interactions with the model. Each
284
+ request includes the full conversation history, helping the model provide more contextually
285
+ relevant responses.
286
+
287
+ ### Builders
288
+
289
+ For more readable configuration, Intelligence provides builder syntax for both adapters and
290
+ conversations.
291
+
292
+ ```ruby
293
+ adapter = Intelligence::Adapter.build! :google do
294
+ key ENV['GOOGLE_API_KEY']
295
+ chat_options do
296
+ model 'gemini-1.5-flash-002'
297
+ max_tokens 256
298
+ temperature 0.7
299
+ end
300
+ end
301
+ ```
302
+
303
+ Similarly, you can use builders to construct conversations with multiple messages.
304
+
305
+ ```ruby
306
+ conversation = Intelligence::Conversation.build do
307
+ system_message do
308
+ content text: "You are a knowledgeable historian specializing in ancient civilizations."
309
+ end
310
+
311
+ message do
312
+ role :user
313
+ content text: "What were the key factors in the fall of the Roman Empire?"
314
+ end
315
+ end
316
+
317
+ request = Intelligence::ChatRequest.new( adapter: adapte r)
318
+ response = request.chat( conversation )
319
+ ```
320
+
321
+ ## Binary and File Content
322
+
323
+ Intelligence supports vision models through binary and file content types.
324
+
325
+ ```ruby
326
+ require 'intelligence'
327
+ require 'mime-types'
328
+
329
+ adapter = Intelligence::Adapter.build! :open_ai do
330
+ key ENV[ 'OPENAI_API_KEY' ]
331
+ chat_options do
332
+ model 'gpt-4-vision-preview'
333
+ max_tokens 256
334
+ end
335
+ end
336
+
337
+ # Using binary content for local images
338
+ conversation = Intelligence::Conversation.build do
339
+ message do
340
+ role :user
341
+ content text: "What's in this image?"
342
+ content do
343
+ type :binary
344
+ content_type 'image/jpeg'
345
+ bytes File.binread( 'path/to/image.jpg' )
346
+ end
347
+ end
348
+ end
349
+
350
+ request = Intelligence::ChatRequest.new( adapter: adapter )
351
+ response = request.chat( conversation )
352
+ ```
353
+
354
+ For remote images, you can use file content instead of binary content:
355
+
356
+ ```ruby
357
+ conversation = Intelligence::Conversation.build do
358
+ message do
359
+ role :user
360
+ content text: "Analyze this image"
361
+ content do
362
+ type :file
363
+ content_type 'image/jpeg'
364
+ uri 'https://example.com/image.jpg'
365
+ end
366
+ end
367
+ end
368
+ ```
369
+
370
+ ## Tools
371
+
372
+ Intelligence supports tool/function calling capabilities, allowing models to
373
+ use defined tools during their response.
374
+
375
+ ```ruby
376
+ adapter = Intelligence::Adapter.build! :anthropic do
377
+ key ENV['ANTHROPIC_API_KEY']
378
+ chat_options do
379
+ model 'claude-3-5-sonnet-20240620'
380
+ max_tokens 1024
381
+ end
382
+ end
383
+
384
+ # Define a tool for getting weather information
385
+ weather_tool = Intelligence::Tool.build! do
386
+ name :get_weather
387
+ description "Get the current weather for a specified location"
388
+ argument name: :location, required: true, type: 'object' do
389
+ description "The location for which to retrieve weather information"
390
+ property name: :city, type: 'string', required: true do
391
+ description "The city or town name"
392
+ end
393
+ property name: :state, type: 'string' do
394
+ description "The state or province (optional)"
395
+ end
396
+ property name: :country, type: 'string' do
397
+ description "The country (optional)"
398
+ end
399
+ end
400
+ end
401
+
402
+ # Create a conversation with the tool
403
+ conversation = Intelligence::Conversation.build do
404
+ system_message do
405
+ content text: "You can help users check weather conditions."
406
+ end
407
+
408
+ # Add the tool to the conversation
409
+ tools << weather_tool
410
+
411
+ message do
412
+ role :user
413
+ content text: "What's the weather like in Paris, France?"
414
+ end
415
+ end
416
+
417
+ request = Intelligence::ChatRequest.new( adapter: adapter )
418
+ response = request.chat( conversation )
419
+
420
+ # Handle tool calls in the response
421
+ if response.success?
422
+ result.choices.each do |choice|
423
+ choice.message.each_content do |content|
424
+ if content.is_a?(Intelligence::MessageContent::ToolCall)
425
+ # Process the tool call
426
+ if content.tool_name == :get_weather
427
+ # Make actual weather API call here
428
+ weather_data = fetch_weather(content.tool_parameters[:location])
429
+
430
+ # Send tool result back to continue the conversation
431
+ conversation.messages << Intelligence::Message.build! do
432
+ role :user
433
+ content do
434
+ type :tool_result
435
+ tool_call_id content.tool_call_id
436
+ tool_result weather_data.to_json
437
+ end
438
+ end
439
+ end
440
+ end
441
+ end
442
+ end
443
+ end
444
+ ```
445
+
446
+ Tools are defined using the `Intelligence::Tool.build!` method, where you specify the tool's
447
+ name, description, and its argument schema. Arguments can have nested properties with their
448
+ own descriptions and requirements. Once defined, tools are added to conversations and can be
449
+ used by the model during its response.
450
+
451
+ Note that not all providers support tools, and the specific tool capabilities may vary between
452
+ providers. Today, OpenAI, Anthropic, Google, Mistral, and Together AI support tools. In general
453
+ all these providers support tools in an identical manner but as of this writing Google does not
454
+ support 'complex' tools which take object parameters.
455
+
456
+ ## Streaming Responses
457
+
458
+ The `chat` method, while straightforward in implementation, can be time consuming ( especially
459
+ when using modern 'reasoning' models like OpenAI O1 ). The alternative is to use the `stream`
460
+ method which will receive results as these are generated by the model.
461
+
462
+ ```ruby
463
+ adapter = Intelligence::Adapter.build! :anthropic do
464
+ key ENV['ANTHROPIC_API_KEY']
465
+ chat_options do
466
+ model 'claude-3-5-sonnet-20240620'
467
+ max_tokens 1024
468
+ stream true
469
+ end
470
+ end
471
+
472
+ request = Intelligence::ChatRequest.new(adapter: adapter)
473
+
474
+ response = request.stream( "Tell me a story about a robot." ) do | request |
475
+ request.receive_result do | result |
476
+ # result is a ChatResult object with partial content
477
+ print result.text
478
+ print "\n" if result.choices.first.end_reason
479
+ end
480
+ end
481
+ ```
482
+
483
+ Notice that in this approach you will receive multiple results ( `ChatResult` instances )
484
+ each with a fragment of the generation. The result always includes a `message` and will
485
+ include `contents` as soon as any content is received. The `contents` is always positionally
486
+ consitent, meaning that if a model is, for example, generating text followed by several
487
+ tool calls you may receive a single text content initially, then the text content and a tool,
488
+ and then subsequent tools, even after the text has been completely generated.
489
+
490
+ Remember that every `result` contains only a fragment of content and it is possible that
491
+ any given fragment is completely blank ( that is, it is possible for the content to be
492
+ present in the result but all of it's fields are nil ).
493
+
494
+ While you will likelly want to immediatelly output any generated text but, as practical matter,
495
+ tool calls are not useful until full generated. To assemble tool calls ( or the text ) from
496
+ the text fragments you may use the content items `merge` method.
497
+
498
+ ```ruby
499
+ request = Intelligence::ChatRequest.new( adapter: adapter )
500
+
501
+ contents = []
502
+ response = request.stream( "Tell me a story about a robot." ) do | request |
503
+ request.receive_result do | result |
504
+ choice = result.choices.first
505
+ contents_fragments = choice.message.contents
506
+ contents.fill( nil, contents.length..(contents_fragments.length - 1) )
507
+
508
+ contents_fragments.each_with_index do | contents_fragment, index |
509
+ if contents_fragment.is_a?( Intelligence::MessageContent::Text )
510
+ # here we need the `|| ''` because the text of the fragment may be nil
511
+ print contents_fragment.text
512
+ else
513
+ contents[ index ] = contents[ index ].nil? ?
514
+ contents_fragment :
515
+ contents[ index ].merge( contents_fragment )
516
+ end
517
+ end
518
+
519
+ end
520
+ end
521
+ ```
522
+
523
+ In the above example we construct an array to receive the content. As the content fragments
524
+ are streamed we will immediatelly output generated text but other types of content ( today
525
+ it could only be instances of `Intelligence::MessageContent::ToolCall' ) are individualy
526
+ combined in the `contents` array. You can simply iterate though the array and then retrieve
527
+ and take action for any of the tool calls.
528
+
529
+ Note also that the `result` will only include a non-nil `end_reason` as the last ( or one
530
+ of the last, `result` instances to be received ).
531
+
532
+ Finally note that the streamed `result` is always a `ChatResult`, never a `ChatErrorResult`.
533
+ If an error occurs, the request itself will fail and you will receive this as part of
534
+ `response.result`.
535
+
536
+ ## Provider Switching
537
+
538
+ One of Intelligence's most powerful features is the ability to easily switch between providers:
539
+
540
+ ```ruby
541
+ def create_adapter(provider, api_key, model)
542
+ Intelligence::Adapter.build! provider do
543
+ key api_key
544
+ chat_options do
545
+ model model
546
+ max_tokens 256
547
+ end
548
+ end
549
+ end
550
+
551
+ # Create adapters for different providers
552
+ anthropic = create_adapter(:anthropic, ENV['ANTHROPIC_API_KEY'], 'claude-3-5-sonnet-20240620')
553
+ google = create_adapter(:google, ENV['GOOGLE_API_KEY'], 'gemini-1.5-pro-002')
554
+ openai = create_adapter(:open_ai, ENV['OPENAI_API_KEY'], 'gpt-4o')
555
+
556
+ # Use the same conversation with different providers
557
+ conversation = Intelligence::Conversation.build do
558
+ system_message do
559
+ content text: "You are a helpful assistant."
560
+ end
561
+ message do
562
+ role :user
563
+ content text: "Explain quantum entanglement in simple terms."
564
+ end
565
+ end
566
+
567
+ [anthropic, google, open_ai].each do |adapter|
568
+ request = Intelligence::ChatRequest.new(adapter: adapter)
569
+ response = request.chat(conversation)
570
+ puts "#{adapter.class.name} response: #{response.result.text}"
571
+ end
572
+ ```
573
+
574
+ ## License
575
+
576
+ This gem is available as open source under the terms of the MIT License.
data/intelligence.gemspec CHANGED
@@ -37,8 +37,9 @@ Gem::Specification.new do | spec |
37
37
  spec.require_paths = [ "lib" ]
38
38
 
39
39
  spec.add_runtime_dependency 'faraday', '~> 2.7'
40
- spec.add_runtime_dependency 'adaptiveconfiguration', '~> 1.0.0.beta08'
40
+ spec.add_runtime_dependency 'dynamicschema', '~> 1.0.0.beta03'
41
41
  spec.add_runtime_dependency 'mime-types', '~> 3.6'
42
+ spec.add_runtime_dependency 'json-repair', '~> 0.2'
42
43
 
43
44
  spec.add_development_dependency 'rspec', '~> 3.4'
44
45
  spec.add_development_dependency 'debug', '~> 1.9'
@@ -1,19 +1,26 @@
1
- require_relative 'class_methods/construction'
1
+ require_relative 'class_methods'
2
2
 
3
3
  module Intelligence
4
4
  module Adapter
5
5
  class Base
6
- extend AdaptiveConfiguration::Configurable
7
- extend ClassMethods::Construction
6
+ include DynamicSchema::Definable
7
+ extend ClassMethods
8
8
 
9
- def initialize( options = nil, configuration: nil )
10
- @options = options ? self.class.configure( options ) : {}
11
- @options = configuration.merge( @options ) if configuration
9
+ def initialize( options = {}, configuration: nil )
10
+ @options = build_options( options )
11
+ @options = configuration.merge( @options ) if configuration&.any?
12
12
  end
13
13
 
14
14
  protected
15
15
  attr_reader :options
16
16
 
17
+ private
18
+
19
+ def build_options( options )
20
+ return {} unless options&.any?
21
+ self.class.builder.build( options )
22
+ end
23
+
17
24
  end
18
25
  end
19
26
  end
@@ -0,0 +1,15 @@
1
+ module Intelligence
2
+ module Adapter
3
+ module ClassMethods
4
+
5
+ def build( options = nil, &block )
6
+ new( configuration: builder.build( options, &block ) )
7
+ end
8
+
9
+ def build!( options = nil, &block )
10
+ new( configuration: builder.build( options, &block ) )
11
+ end
12
+
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,41 @@
1
+ module Intelligence
2
+ module Adapter
3
+ module ModuleMethods
4
+
5
+ def []( adapter_type )
6
+
7
+ raise ArgumentError.new( "An adapter type is required but nil was given." ) \
8
+ if adapter_type.nil?
9
+
10
+ class_name = adapter_type.to_s.split( '_' ).map( &:capitalize ).join
11
+ class_name += "::Adapter"
12
+
13
+ adapter_class = Intelligence.const_get( class_name ) rescue nil
14
+ if adapter_class.nil?
15
+ adapter_file = File.expand_path( "../../adapters/#{adapter_type}", __FILE__ )
16
+ unless require adapter_file
17
+ raise ArgumentError.new(
18
+ "The Intelligence adapter file #{adapter_file} is missing or does not define #{class_name}."
19
+ )
20
+ end
21
+ adapter_class = Intelligence.const_get( class_name ) rescue nil
22
+ end
23
+
24
+ raise ArgumentError.new( "An unknown Intelligence adapter #{adapter_type} was requested." ) \
25
+ if adapter_class.nil?
26
+
27
+ adapter_class
28
+
29
+ end
30
+
31
+ def build( adapter_type, attributes = nil, &block )
32
+ self.[]( adapter_type ).build( attributes, &block )
33
+ end
34
+
35
+ def build!( adapter_type, attributes = nil, &block )
36
+ self.[]( adapter_type ).build!( attributes, &block )
37
+ end
38
+
39
+ end
40
+ end
41
+ end
@@ -1,8 +1,8 @@
1
- require_relative 'adapter/module_methods/construction'
1
+ require_relative 'adapter/module_methods'
2
2
  require_relative 'adapter/base'
3
3
 
4
4
  module Intelligence
5
5
  module Adapter
6
- extend ModuleMethods::Construction
6
+ extend ModuleMethods
7
7
  end
8
8
  end