intelligence 0.5.0 → 0.7.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +555 -0
- data/intelligence.gemspec +1 -1
- data/lib/intelligence/adapter/base.rb +23 -3
- data/lib/intelligence/adapter/class_methods.rb +15 -0
- data/lib/intelligence/adapter/{construction_methods.rb → module_methods.rb} +8 -4
- data/lib/intelligence/adapter.rb +2 -2
- data/lib/intelligence/adapters/anthropic/adapter.rb +21 -30
- data/lib/intelligence/adapters/anthropic/chat_request_methods.rb +189 -0
- data/lib/intelligence/adapters/anthropic/{chat_methods.rb → chat_response_methods.rb} +8 -124
- data/lib/intelligence/adapters/cerebras.rb +17 -17
- data/lib/intelligence/adapters/generic/adapter.rb +1 -12
- data/lib/intelligence/adapters/generic/chat_methods.rb +42 -11
- data/lib/intelligence/adapters/generic.rb +1 -1
- data/lib/intelligence/adapters/google/adapter.rb +33 -35
- data/lib/intelligence/adapters/google/chat_request_methods.rb +233 -0
- data/lib/intelligence/adapters/google/{chat_methods.rb → chat_response_methods.rb} +52 -162
- data/lib/intelligence/adapters/groq.rb +46 -28
- data/lib/intelligence/adapters/hyperbolic.rb +13 -13
- data/lib/intelligence/adapters/legacy/adapter.rb +0 -2
- data/lib/intelligence/adapters/legacy/chat_methods.rb +22 -6
- data/lib/intelligence/adapters/mistral.rb +57 -0
- data/lib/intelligence/adapters/open_ai/adapter.rb +38 -45
- data/lib/intelligence/adapters/open_ai/chat_request_methods.rb +186 -0
- data/lib/intelligence/adapters/open_ai/{chat_methods.rb → chat_response_methods.rb} +60 -131
- data/lib/intelligence/adapters/open_ai.rb +1 -1
- data/lib/intelligence/adapters/open_router.rb +62 -0
- data/lib/intelligence/adapters/samba_nova.rb +13 -13
- data/lib/intelligence/adapters/together_ai.rb +21 -19
- data/lib/intelligence/chat_request.rb +57 -7
- data/lib/intelligence/chat_result.rb +4 -0
- data/lib/intelligence/chat_result_choice.rb +4 -2
- data/lib/intelligence/conversation.rb +38 -9
- data/lib/intelligence/message.rb +103 -20
- data/lib/intelligence/message_content/base.rb +3 -0
- data/lib/intelligence/message_content/binary.rb +6 -0
- data/lib/intelligence/message_content/file.rb +35 -0
- data/lib/intelligence/message_content/text.rb +5 -0
- data/lib/intelligence/message_content/tool_call.rb +12 -1
- data/lib/intelligence/message_content/tool_result.rb +15 -3
- data/lib/intelligence/message_content.rb +12 -3
- data/lib/intelligence/tool.rb +139 -0
- data/lib/intelligence/version.rb +1 -1
- data/lib/intelligence.rb +6 -4
- metadata +18 -9
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 910d4c8472c375a7a3759c474d62e71c01395136d895ccff9dd2f33012fd5a39
|
4
|
+
data.tar.gz: 86b51ac28f93a39556664c3707f341d40fe5aa4ec1cdc27a217586fd222c0c77
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6d47d1f1d333cb1f0ffe8bdb06bf27ad9be4c675349c01460f97021cc733df2410ed7bc597324b3413d44d0ed081a7a0ae129f0173313f4df5400d9012ed37b9
|
7
|
+
data.tar.gz: 90f91d3efdf84c091252d4e16dbbcab41a7f27b39350974a62d80eee37d465f1dfc000b1efb836471d00d47d9a774050cf0f28c3ada96d6ba9947d8f54a6c209
|
data/README.md
ADDED
@@ -0,0 +1,555 @@
|
|
1
|
+
# Intelligence
|
2
|
+
|
3
|
+
Intelligence is a lightweight yet powerful Ruby gem that provides a uniform interface for
|
4
|
+
interacting with large language and vision model APIs across multiple providers. It allows
|
5
|
+
you to seamlessly integrate with services from OpenAI, Anthropic, Google, Cerebras, Groq,
|
6
|
+
Hyperbolic, Samba Nova, Together AI, and others, while maintaining a consistent API across
|
7
|
+
all providers.
|
8
|
+
|
9
|
+
The gem operates with minimal dependencies and doesn't require vendor SDK installation,
|
10
|
+
making it easy to switch between providers or work with multiple providers simultaneously.
|
11
|
+
|
12
|
+
```
|
13
|
+
require 'intelligence'
|
14
|
+
|
15
|
+
adapter = Intelligence::Adapter.build :open_ai do
|
16
|
+
key ENV[ 'OPENAI_API_KEY' ]
|
17
|
+
chat_options do
|
18
|
+
model 'gpt-4o'
|
19
|
+
max_tokens 256
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
request = Intelligence::ChatRequest.new( adapter: adapter )
|
24
|
+
conversation = Intelligence::Conversation.build do
|
25
|
+
system_message do
|
26
|
+
content text: "You are a highly efficient AI assistant. Provide clear, concise responses."
|
27
|
+
end
|
28
|
+
message role: :user do
|
29
|
+
content text: ARGV[ 0 ] || 'Hello!'
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
response = request.chat( conversation )
|
34
|
+
|
35
|
+
if response.success?
|
36
|
+
puts response.result.text
|
37
|
+
else
|
38
|
+
puts "Error: " + response.result.error_description
|
39
|
+
end
|
40
|
+
```
|
41
|
+
|
42
|
+
## Installation
|
43
|
+
|
44
|
+
Add this line to your application's Gemfile:
|
45
|
+
|
46
|
+
```ruby
|
47
|
+
gem 'intelligence'
|
48
|
+
```
|
49
|
+
|
50
|
+
Then execute:
|
51
|
+
|
52
|
+
```bash
|
53
|
+
$ bundle install
|
54
|
+
```
|
55
|
+
|
56
|
+
Or install it directly:
|
57
|
+
|
58
|
+
```bash
|
59
|
+
$ gem install intelligence
|
60
|
+
```
|
61
|
+
|
62
|
+
## Usage
|
63
|
+
|
64
|
+
### Minimal Chat Request
|
65
|
+
|
66
|
+
The core components of Intelligence are adapters, requests and responses. An adapter encapsulates
|
67
|
+
the differences between different providers allowing you to use requests and responses uniformly.
|
68
|
+
|
69
|
+
You retrieve an adapter for a specific vendor, configure it with a key, model and associated
|
70
|
+
parameters and then make a request by calling either the `chat` or `stream` methods.
|
71
|
+
|
72
|
+
```ruby
|
73
|
+
require 'intelligence'
|
74
|
+
|
75
|
+
# configure the adapter with your API key and model settings
|
76
|
+
adapter = Intelligence::Adapter[ :google ].new(
|
77
|
+
key: ENV[ 'GOOGLE_API_KEY' ],
|
78
|
+
chat_options: {
|
79
|
+
model: 'gemini-1.5-flash-002',
|
80
|
+
max_tokens: 256
|
81
|
+
}
|
82
|
+
)
|
83
|
+
|
84
|
+
# create a request instance, passing the adapter
|
85
|
+
request = Intelligence::ChatRequest.new( adapter: adapter )
|
86
|
+
|
87
|
+
# make the request and handle the response
|
88
|
+
response = request.chat( "What is the capital of France?" )
|
89
|
+
|
90
|
+
if response.success?
|
91
|
+
puts response.result.text
|
92
|
+
else
|
93
|
+
puts "Error: #{response.result.error_description}"
|
94
|
+
end
|
95
|
+
```
|
96
|
+
|
97
|
+
The `response` object is a Faraday response with an added method: `result`. If a response is
|
98
|
+
successful `result` returns a `ChatResult`. If it is not successful it returns a
|
99
|
+
`ChatErrorResult`.
|
100
|
+
|
101
|
+
### Understanding Results
|
102
|
+
|
103
|
+
When you make a request using Intelligence, the response includes a `result` that provides
|
104
|
+
structured access to the model's output.
|
105
|
+
|
106
|
+
- A `ChatResult` contains one or more `choices` (alternate responses from the model). The
|
107
|
+
`choices` method returns an array of `ChatResultChoice` instances. It also includes
|
108
|
+
a `metrics` methods which provides information about token usage for the request.
|
109
|
+
optional `metrics` about token usage
|
110
|
+
- A `ChatResultChoice` contains a `message` from the assistant and an `end_result` which
|
111
|
+
indicates how the response ended;
|
112
|
+
- `:ended` means the model completed its response normally
|
113
|
+
- `:token_limit_exceeded` means the response hit the token limit ( `max_tokens` )
|
114
|
+
- `:end_sequence_encountered` means the response hit a stop sequence
|
115
|
+
- `:filtered` means the content was filtered by safety settings
|
116
|
+
- `:tool_called` means the model is requesting to use a tool
|
117
|
+
- The `Message` in each choice contains one or more content items, typically text but
|
118
|
+
potentially tool calls or other content types.
|
119
|
+
|
120
|
+
While the convenience method `text` used in the previous example is useful for simple cases,
|
121
|
+
you will typically want to work with the full response structure.
|
122
|
+
|
123
|
+
```ruby
|
124
|
+
adapter = Intelligence::Adapter[ :google ].new(
|
125
|
+
key: ENV[ 'GOOGLE_API_KEY' ],
|
126
|
+
chat_options: {
|
127
|
+
model: 'gemini-1.5-flash-002',
|
128
|
+
max_tokens: 256
|
129
|
+
}
|
130
|
+
)
|
131
|
+
|
132
|
+
request = Intelligence::ChatRequest.new( adapter: adapter )
|
133
|
+
response = request.chat( "What are three interesting facts about ruby gemstones?" )
|
134
|
+
|
135
|
+
if response.success?
|
136
|
+
result = response.result # this is a ChatResult
|
137
|
+
|
138
|
+
# iterate through the model's choices
|
139
|
+
result.choices.each do | choice |
|
140
|
+
# check why the response ended
|
141
|
+
puts "Response ended because: #{choice.end_reason}"
|
142
|
+
|
143
|
+
# work with the message
|
144
|
+
message = choice.message
|
145
|
+
puts "Message role: #{message.role}"
|
146
|
+
|
147
|
+
# examine each piece of content
|
148
|
+
message.each_content do | content |
|
149
|
+
puts content.text if content.is_a?( Intelligence::MessageContent::Text )
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
# check token usage if metrics are available
|
154
|
+
if result.metrics
|
155
|
+
puts "Input tokens: #{result.metrics.input_tokens}"
|
156
|
+
puts "Output tokens: #{result.metrics.output_tokens}"
|
157
|
+
puts "Total tokens: #{result.metrics.total_tokens}"
|
158
|
+
end
|
159
|
+
else
|
160
|
+
# or alternativelly handle the end result
|
161
|
+
puts "Error: #{response.result.error_description}"
|
162
|
+
end
|
163
|
+
```
|
164
|
+
|
165
|
+
The `ChatResult`, `ChatResultChoice` and `Message` all provide the `text` convenience
|
166
|
+
method which return the text.
|
167
|
+
|
168
|
+
A response might end for various reasons, indicated by the `end_reason` in each choice:
|
169
|
+
- `:ended` means the model completed its response normally
|
170
|
+
- `:token_limit_exceeded` means the response hit the token limit
|
171
|
+
- `:end_sequence_encountered` means the response hit a stop sequence
|
172
|
+
- `:filtered` means the content was filtered by safety settings
|
173
|
+
- `:tool_called` means the model is requesting to use a tool
|
174
|
+
|
175
|
+
### Understanding Conversations, Messages, and Content
|
176
|
+
|
177
|
+
Intelligence organizes interactions with models using three main components:
|
178
|
+
|
179
|
+
- **Conversations** are collections of messages that represent a complete interaction with a
|
180
|
+
model. A conversation can include an optional system message that sets the context, and a
|
181
|
+
series of back-and-forth messages between the user and assistant.
|
182
|
+
|
183
|
+
- **Messages** are individual communications within a conversation. Each message has a role
|
184
|
+
(`:system`, `:user`, or `:assistant`) that identifies its sender and can contain multiple
|
185
|
+
pieces of content.
|
186
|
+
|
187
|
+
- **Content** represents the actual data within a message. This can be text
|
188
|
+
(`MessageContent::Text`), binary data like images (`MessageContent::Binary`), or references
|
189
|
+
to files (`MessageContent::File`).
|
190
|
+
|
191
|
+
In the previous examples we used a simple string as an argument to `chat`. As a convenience,
|
192
|
+
the `chat` methods builds a coversation for you but, typically, you will construct a coversation
|
193
|
+
instance (`Coversation`) and pass that to the chat or stream methods.
|
194
|
+
|
195
|
+
The following example expands the minimal example, building a conversation, messages and content:
|
196
|
+
|
197
|
+
```ruby
|
198
|
+
# create an adapter as before
|
199
|
+
adapter = Intelligence::Adapter[ :google ].new(
|
200
|
+
key: ENV[ 'GOOGLE_API_KEY' ],
|
201
|
+
chat_options: { model: 'gemini-1.5-flash-002', max_tokens: 256 }
|
202
|
+
)
|
203
|
+
|
204
|
+
# create a conversation
|
205
|
+
conversation = Intelligence::Conversation.new
|
206
|
+
|
207
|
+
# add a system message (optional but recommended)
|
208
|
+
system_message = Intelligence::Message.new( :system )
|
209
|
+
system_message << Intelligence::MessageContent::Text.new(
|
210
|
+
text: "You are a helpful coding assistant."
|
211
|
+
)
|
212
|
+
conversation.system_message = system_message
|
213
|
+
|
214
|
+
# add a user message
|
215
|
+
user_message = Intelligence::Message.new( :user )
|
216
|
+
user_message << Intelligence::MessageContent::Text.new(
|
217
|
+
text: "How do I read a file in Ruby?"
|
218
|
+
)
|
219
|
+
conversation.messages << user_message
|
220
|
+
|
221
|
+
# make the request
|
222
|
+
request = Intelligence::ChatRequest.new( adapter: adapter )
|
223
|
+
response = request.chat( conversation )
|
224
|
+
|
225
|
+
if response.success?
|
226
|
+
puts response.result.text
|
227
|
+
else
|
228
|
+
puts "Error: #{response.result.error_description}"
|
229
|
+
end
|
230
|
+
```
|
231
|
+
|
232
|
+
The hierarchical nature of these components makes it easy to organize and access your interaction
|
233
|
+
data. A conversation acts as a container for messages, and each message acts as a container for
|
234
|
+
content items. This structure allows for rich interactions that can include multiple types of
|
235
|
+
content in a single message.
|
236
|
+
|
237
|
+
You can examine the contents of a conversation by iterating through its messages and their content:
|
238
|
+
|
239
|
+
```ruby
|
240
|
+
# iterate through messages
|
241
|
+
conversation.messages.each do |message|
|
242
|
+
puts "Role: #{message.role}"
|
243
|
+
|
244
|
+
# each message can have multiple content items
|
245
|
+
message.each_content do |content|
|
246
|
+
case content
|
247
|
+
when Intelligence::MessageContent::Text
|
248
|
+
puts "Text: #{content.text}"
|
249
|
+
when Intelligence::MessageContent::Binary
|
250
|
+
puts "Binary content of type: #{content.content_type}"
|
251
|
+
when Intelligence::MessageContent::File
|
252
|
+
puts "File reference: #{content.uri}"
|
253
|
+
end
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
# remeber that, alternatively, you can use convenience methods for quick text access
|
258
|
+
puts message.text # combines all text content in a messages with newlines
|
259
|
+
```
|
260
|
+
### Continuing Conversations / Maintaining Context
|
261
|
+
|
262
|
+
To continue a conversation with the model, we can add the model's response and our follow-up
|
263
|
+
message to the conversation:
|
264
|
+
|
265
|
+
```ruby
|
266
|
+
# get the previous response
|
267
|
+
if response.success?
|
268
|
+
# add the assistant's response to our conversation
|
269
|
+
assistant_message = response.result.message
|
270
|
+
conversation.messages << assistant_message
|
271
|
+
|
272
|
+
# add another user message for follow-up
|
273
|
+
follow_up = Intelligence::Message.new( :user )
|
274
|
+
follow_up << Intelligence::MessageContent::Text.new(
|
275
|
+
text: "How do I write to that file?"
|
276
|
+
)
|
277
|
+
conversation.messages << follow_up
|
278
|
+
|
279
|
+
# make another request with the updated conversation
|
280
|
+
response = request.chat( conversation )
|
281
|
+
|
282
|
+
if response.success?
|
283
|
+
puts response.result.text
|
284
|
+
end
|
285
|
+
end
|
286
|
+
```
|
287
|
+
|
288
|
+
This pattern allows you to maintain context across multiple interactions with the model. Each
|
289
|
+
request includes the full conversation history, helping the model provide more contextually
|
290
|
+
relevant responses.
|
291
|
+
|
292
|
+
### Using Builders
|
293
|
+
|
294
|
+
For more readable configuration, Intelligence provides builder syntax for both adapters and
|
295
|
+
conversations.
|
296
|
+
|
297
|
+
```ruby
|
298
|
+
adapter = Intelligence::Adapter.build! :google do
|
299
|
+
key ENV['GOOGLE_API_KEY']
|
300
|
+
chat_options do
|
301
|
+
model 'gemini-1.5-flash-002'
|
302
|
+
max_tokens 256
|
303
|
+
temperature 0.7
|
304
|
+
end
|
305
|
+
end
|
306
|
+
```
|
307
|
+
|
308
|
+
Similarly, you can use builders to construct conversations with multiple messages.
|
309
|
+
|
310
|
+
```ruby
|
311
|
+
conversation = Intelligence::Conversation.build do
|
312
|
+
system_message do
|
313
|
+
content text: "You are a knowledgeable historian specializing in ancient civilizations."
|
314
|
+
end
|
315
|
+
|
316
|
+
message do
|
317
|
+
role :user
|
318
|
+
content text: "What were the key factors in the fall of the Roman Empire?"
|
319
|
+
end
|
320
|
+
end
|
321
|
+
|
322
|
+
request = Intelligence::ChatRequest.new( adapter: adapte r)
|
323
|
+
response = request.chat( conversation )
|
324
|
+
```
|
325
|
+
|
326
|
+
## Binary and File Content
|
327
|
+
|
328
|
+
Intelligence supports vision models through binary and file content types.
|
329
|
+
|
330
|
+
```ruby
|
331
|
+
require 'intelligence'
|
332
|
+
require 'mime-types'
|
333
|
+
|
334
|
+
adapter = Intelligence::Adapter.build! :open_ai do
|
335
|
+
key ENV[ 'OPENAI_API_KEY' ]
|
336
|
+
chat_options do
|
337
|
+
model 'gpt-4-vision-preview'
|
338
|
+
max_tokens 256
|
339
|
+
end
|
340
|
+
end
|
341
|
+
|
342
|
+
# Using binary content for local images
|
343
|
+
conversation = Intelligence::Conversation.build do
|
344
|
+
message do
|
345
|
+
role :user
|
346
|
+
content text: "What's in this image?"
|
347
|
+
content do
|
348
|
+
type :binary
|
349
|
+
content_type 'image/jpeg'
|
350
|
+
bytes File.binread( 'path/to/image.jpg' )
|
351
|
+
end
|
352
|
+
end
|
353
|
+
end
|
354
|
+
|
355
|
+
request = Intelligence::ChatRequest.new( adapter: adapter )
|
356
|
+
response = request.chat( conversation )
|
357
|
+
```
|
358
|
+
|
359
|
+
For remote images, you can use file content instead of binary content:
|
360
|
+
|
361
|
+
```ruby
|
362
|
+
conversation = Intelligence::Conversation.build do
|
363
|
+
message do
|
364
|
+
role :user
|
365
|
+
content text: "Analyze this image"
|
366
|
+
content do
|
367
|
+
type :file
|
368
|
+
content_type 'image/jpeg'
|
369
|
+
uri 'https://example.com/image.jpg'
|
370
|
+
end
|
371
|
+
end
|
372
|
+
end
|
373
|
+
```
|
374
|
+
|
375
|
+
## Tools
|
376
|
+
|
377
|
+
Intelligence supports tool/function calling capabilities, allowing models to
|
378
|
+
use defined tools during their response.
|
379
|
+
|
380
|
+
```ruby
|
381
|
+
adapter = Intelligence::Adapter.build! :anthropic do
|
382
|
+
key ENV['ANTHROPIC_API_KEY']
|
383
|
+
chat_options do
|
384
|
+
model 'claude-3-5-sonnet-20240620'
|
385
|
+
max_tokens 1024
|
386
|
+
end
|
387
|
+
end
|
388
|
+
|
389
|
+
# Define a tool for getting weather information
|
390
|
+
weather_tool = Intelligence::Tool.build! do
|
391
|
+
name :get_weather
|
392
|
+
description "Get the current weather for a specified location"
|
393
|
+
argument name: :location, required: true, type: 'object' do
|
394
|
+
description "The location for which to retrieve weather information"
|
395
|
+
property name: :city, type: 'string', required: true do
|
396
|
+
description "The city or town name"
|
397
|
+
end
|
398
|
+
property name: :state, type: 'string' do
|
399
|
+
description "The state or province (optional)"
|
400
|
+
end
|
401
|
+
property name: :country, type: 'string' do
|
402
|
+
description "The country (optional)"
|
403
|
+
end
|
404
|
+
end
|
405
|
+
end
|
406
|
+
|
407
|
+
# Create a conversation with the tool
|
408
|
+
conversation = Intelligence::Conversation.build do
|
409
|
+
system_message do
|
410
|
+
content text: "You can help users check weather conditions."
|
411
|
+
end
|
412
|
+
|
413
|
+
# Add the tool to the conversation
|
414
|
+
tools << weather_tool
|
415
|
+
|
416
|
+
message do
|
417
|
+
role :user
|
418
|
+
content text: "What's the weather like in Paris, France?"
|
419
|
+
end
|
420
|
+
end
|
421
|
+
|
422
|
+
request = Intelligence::ChatRequest.new( adapter: adapter )
|
423
|
+
response = request.chat( conversation )
|
424
|
+
|
425
|
+
# Handle tool calls in the response
|
426
|
+
if response.success?
|
427
|
+
result.choices.each do |choice|
|
428
|
+
choice.message.each_content do |content|
|
429
|
+
if content.is_a?(Intelligence::MessageContent::ToolCall)
|
430
|
+
# Process the tool call
|
431
|
+
if content.tool_name == :get_weather
|
432
|
+
# Make actual weather API call here
|
433
|
+
weather_data = fetch_weather(content.tool_parameters[:location])
|
434
|
+
|
435
|
+
# Send tool result back to continue the conversation
|
436
|
+
conversation.messages << Intelligence::Message.build! do
|
437
|
+
role :user
|
438
|
+
content do
|
439
|
+
type :tool_result
|
440
|
+
tool_call_id content.tool_call_id
|
441
|
+
tool_result weather_data.to_json
|
442
|
+
end
|
443
|
+
end
|
444
|
+
end
|
445
|
+
end
|
446
|
+
end
|
447
|
+
end
|
448
|
+
end
|
449
|
+
```
|
450
|
+
|
451
|
+
Tools are defined using the `Intelligence::Tool.build!` method, where you specify the tool's
|
452
|
+
name, description, and its argument schema. Arguments can have nested properties with their
|
453
|
+
own descriptions and requirements. Once defined, tools are added to conversations and can be
|
454
|
+
used by the model during its response.
|
455
|
+
|
456
|
+
Note that not all providers support tools, and the specific tool capabilities may vary between
|
457
|
+
providers. Check your provider's documentation for details on tool support and requirements.
|
458
|
+
|
459
|
+
## Streaming Responses
|
460
|
+
|
461
|
+
Once you're familiar with basic requests, you might want to use streaming for real-time
|
462
|
+
responses. Streaming delivers the model's response in chunks as it's generated:
|
463
|
+
|
464
|
+
```ruby
|
465
|
+
adapter = Intelligence::Adapter.build! :anthropic do
|
466
|
+
key ENV['ANTHROPIC_API_KEY']
|
467
|
+
chat_options do
|
468
|
+
model 'claude-3-5-sonnet-20240620'
|
469
|
+
max_tokens 1024
|
470
|
+
stream true
|
471
|
+
end
|
472
|
+
end
|
473
|
+
|
474
|
+
request = Intelligence::ChatRequest.new(adapter: adapter)
|
475
|
+
|
476
|
+
response = request.stream("Tell me a story about a robot.") do |request|
|
477
|
+
request.receive_result do |result|
|
478
|
+
# result is a ChatResult object with partial content
|
479
|
+
print result.text
|
480
|
+
print "\n" if result.choices.first.end_reason
|
481
|
+
end
|
482
|
+
end
|
483
|
+
```
|
484
|
+
|
485
|
+
Streaming also works with complex conversations and binary content:
|
486
|
+
|
487
|
+
```ruby
|
488
|
+
conversation = Intelligence::Conversation.build do
|
489
|
+
system_message do
|
490
|
+
content text: "You are an image analysis expert."
|
491
|
+
end
|
492
|
+
|
493
|
+
message do
|
494
|
+
role :user
|
495
|
+
content text: "Describe this image in detail"
|
496
|
+
content do
|
497
|
+
type :binary
|
498
|
+
content_type 'image/jpeg'
|
499
|
+
bytes File.binread('path/to/image.jpg')
|
500
|
+
end
|
501
|
+
end
|
502
|
+
end
|
503
|
+
|
504
|
+
response = request.stream(conversation) do |request|
|
505
|
+
request.receive_result do |result|
|
506
|
+
result.choices.each do |choice|
|
507
|
+
choice.message.each_content do |content|
|
508
|
+
print content.text if content.is_a?(Intelligence::MessageContent::Text)
|
509
|
+
end
|
510
|
+
end
|
511
|
+
end
|
512
|
+
end
|
513
|
+
```
|
514
|
+
|
515
|
+
## Provider Switching
|
516
|
+
|
517
|
+
One of Intelligence's most powerful features is the ability to easily switch between providers:
|
518
|
+
|
519
|
+
```ruby
|
520
|
+
def create_adapter(provider, api_key, model)
|
521
|
+
Intelligence::Adapter.build! provider do
|
522
|
+
key api_key
|
523
|
+
chat_options do
|
524
|
+
model model
|
525
|
+
max_tokens 256
|
526
|
+
end
|
527
|
+
end
|
528
|
+
end
|
529
|
+
|
530
|
+
# Create adapters for different providers
|
531
|
+
anthropic = create_adapter(:anthropic, ENV['ANTHROPIC_API_KEY'], 'claude-3-5-sonnet-20240620')
|
532
|
+
google = create_adapter(:google, ENV['GOOGLE_API_KEY'], 'gemini-1.5-pro-002')
|
533
|
+
openai = create_adapter(:open_ai, ENV['OPENAI_API_KEY'], 'gpt-4o')
|
534
|
+
|
535
|
+
# Use the same conversation with different providers
|
536
|
+
conversation = Intelligence::Conversation.build do
|
537
|
+
system_message do
|
538
|
+
content text: "You are a helpful assistant."
|
539
|
+
end
|
540
|
+
message do
|
541
|
+
role :user
|
542
|
+
content text: "Explain quantum entanglement in simple terms."
|
543
|
+
end
|
544
|
+
end
|
545
|
+
|
546
|
+
[anthropic, google, open_ai].each do |adapter|
|
547
|
+
request = Intelligence::ChatRequest.new(adapter: adapter)
|
548
|
+
response = request.chat(conversation)
|
549
|
+
puts "#{adapter.class.name} response: #{response.result.text}"
|
550
|
+
end
|
551
|
+
```
|
552
|
+
|
553
|
+
## License
|
554
|
+
|
555
|
+
This gem is available as open source under the terms of the MIT License.
|
data/intelligence.gemspec
CHANGED
@@ -37,7 +37,7 @@ Gem::Specification.new do | spec |
|
|
37
37
|
spec.require_paths = [ "lib" ]
|
38
38
|
|
39
39
|
spec.add_runtime_dependency 'faraday', '~> 2.7'
|
40
|
-
spec.add_runtime_dependency '
|
40
|
+
spec.add_runtime_dependency 'dynamicschema', '~> 1.0.0.beta03'
|
41
41
|
spec.add_runtime_dependency 'mime-types', '~> 3.6'
|
42
42
|
|
43
43
|
spec.add_development_dependency 'rspec', '~> 3.4'
|
@@ -1,7 +1,27 @@
|
|
1
|
+
require_relative 'class_methods'
|
2
|
+
|
1
3
|
module Intelligence
|
2
4
|
module Adapter
|
3
5
|
class Base
|
4
|
-
|
5
|
-
|
6
|
+
include DynamicSchema::Definable
|
7
|
+
extend ClassMethods
|
8
|
+
|
9
|
+
def initialize( options = {}, configuration: nil )
|
10
|
+
@options = build_options( options )
|
11
|
+
@options = configuration.merge( @options ) if configuration&.any?
|
12
|
+
end
|
13
|
+
|
14
|
+
protected
|
15
|
+
attr_reader :options
|
16
|
+
|
17
|
+
private
|
18
|
+
|
19
|
+
def build_options( options )
|
20
|
+
return {} unless options&.any?
|
21
|
+
self.class.builder.build( options )
|
22
|
+
end
|
23
|
+
|
24
|
+
end
|
6
25
|
end
|
7
|
-
end
|
26
|
+
end
|
27
|
+
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module Intelligence
|
2
|
+
module Adapter
|
3
|
+
module ClassMethods
|
4
|
+
|
5
|
+
def build( options = nil, &block )
|
6
|
+
new( configuration: builder.build( options, &block ) )
|
7
|
+
end
|
8
|
+
|
9
|
+
def build!( options = nil, &block )
|
10
|
+
new( configuration: builder.build( options, &block ) )
|
11
|
+
end
|
12
|
+
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -1,6 +1,6 @@
|
|
1
1
|
module Intelligence
|
2
2
|
module Adapter
|
3
|
-
module
|
3
|
+
module ModuleMethods
|
4
4
|
|
5
5
|
def []( adapter_type )
|
6
6
|
|
@@ -21,15 +21,19 @@ module Intelligence
|
|
21
21
|
adapter_class = Intelligence.const_get( class_name ) rescue nil
|
22
22
|
end
|
23
23
|
|
24
|
-
raise ArgumentError.new( "An unknown Intelligence adapter #{adapter_type} was
|
24
|
+
raise ArgumentError.new( "An unknown Intelligence adapter #{adapter_type} was requested." ) \
|
25
25
|
if adapter_class.nil?
|
26
26
|
|
27
27
|
adapter_class
|
28
28
|
|
29
29
|
end
|
30
30
|
|
31
|
-
def build( adapter_type, attributes, &block )
|
32
|
-
self.[]( adapter_type ).
|
31
|
+
def build( adapter_type, attributes = nil, &block )
|
32
|
+
self.[]( adapter_type ).build( attributes, &block )
|
33
|
+
end
|
34
|
+
|
35
|
+
def build!( adapter_type, attributes = nil, &block )
|
36
|
+
self.[]( adapter_type ).build!( attributes, &block )
|
33
37
|
end
|
34
38
|
|
35
39
|
end
|
data/lib/intelligence/adapter.rb
CHANGED