boxcars 0.6.8 → 0.7.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b025b4faf290736766df116e4146cdcea5285bfbd54b6b1c8eaae1e3fc220507
4
- data.tar.gz: cdc51dd2f92609b3b2d52879de4716fe8a34e20a15c9bc5d9f27c005bd5f760f
3
+ metadata.gz: f737b8f862a6054265b74b7705a9779d301ceba197737c5b85aed4fc96733867
4
+ data.tar.gz: ffb896560aab128f2bcf685082755ce6400174d38b7d8b5294feea51808d0e17
5
5
  SHA512:
6
- metadata.gz: 4267533089c1c3be564ac59d321a8adb670d4197d0548767be1291835049c1edf86f6994b93186d4b0c5ab49445f17d1bdba35bc3b983eafea150c0b05732775
7
- data.tar.gz: 3d37938ea2c431fe3f1b839471421fc5af46e0c60ede688265874d8b1ac93a6e58e440509a66aea4c91375986ed0295a477cca3adca75f68ffc028f33976583a
6
+ metadata.gz: 2f9afcd76b2efda7fc9f35f2ae02882256a03f935d8a94a10149daa10ad8180c5d91db34e11296310074a1ec9d5d4863a10e74ddc41b19cd9d250d2da64bdff2
7
+ data.tar.gz: 17860c296d8cb4a167e4e4bc58835b3bb9da867bb65e38b89cc08d884f88bf09707bc64b4a36195848d7ee995592c4f32ede29752744a7dee4135dd90338285f
data/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # Changelog
2
2
 
3
+ ## [v0.6.9](https://github.com/BoxcarsAI/boxcars/tree/v0.6.9) (2024-12-19)
4
+
5
+ [Full Changelog](https://github.com/BoxcarsAI/boxcars/compare/v0.6.8...v0.6.9)
6
+
7
+ ## [v0.6.8](https://github.com/BoxcarsAI/boxcars/tree/v0.6.8) (2024-12-07)
8
+
9
+ [Full Changelog](https://github.com/BoxcarsAI/boxcars/compare/v0.6.7...v0.6.8)
10
+
3
11
  ## [v0.6.7](https://github.com/BoxcarsAI/boxcars/tree/v0.6.7) (2024-12-04)
4
12
 
5
13
  [Full Changelog](https://github.com/BoxcarsAI/boxcars/compare/v0.6.6...v0.6.7)
data/Gemfile.lock CHANGED
@@ -1,25 +1,26 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- boxcars (0.6.8)
5
- anthropic (~> 0.1)
4
+ boxcars (0.7.1)
5
+ anthropic (~> 0.3)
6
6
  google_search_results (~> 2.2)
7
- gpt4all (~> 0.0.4)
8
- hnswlib (~> 0.8)
9
- nokogiri (~> 1.16)
7
+ gpt4all (~> 0.0.5)
8
+ hnswlib (~> 0.9)
9
+ intelligence (>= 0.8)
10
+ nokogiri (~> 1.18)
10
11
  pgvector (~> 0.2)
11
- ruby-openai (>= 7.1, < 8.0)
12
+ ruby-openai (>= 7.3)
12
13
 
13
14
  GEM
14
15
  remote: https://rubygems.org/
15
16
  specs:
16
- activemodel (7.2.2)
17
- activesupport (= 7.2.2)
18
- activerecord (7.2.2)
19
- activemodel (= 7.2.2)
20
- activesupport (= 7.2.2)
17
+ activemodel (7.2.2.1)
18
+ activesupport (= 7.2.2.1)
19
+ activerecord (7.2.2.1)
20
+ activemodel (= 7.2.2.1)
21
+ activesupport (= 7.2.2.1)
21
22
  timeout (>= 0.4.0)
22
- activesupport (7.2.2)
23
+ activesupport (7.2.2.1)
23
24
  base64
24
25
  benchmark (>= 0.3)
25
26
  bigdecimal
@@ -50,41 +51,44 @@ GEM
50
51
  protocol-http1 (~> 0.19.0)
51
52
  protocol-http2 (~> 0.16.0)
52
53
  traces (>= 0.10.0)
53
- async-http-faraday (0.14.0)
54
+ async-http-faraday (0.19.0)
54
55
  async-http (~> 0.42)
55
56
  faraday
56
57
  async-io (1.43.2)
57
58
  async
58
- async-pool (0.7.0)
59
+ async-pool (0.10.2)
59
60
  async (>= 1.25)
61
+ traces
60
62
  base64 (0.2.0)
61
63
  benchmark (0.4.0)
62
- bigdecimal (3.1.8)
63
- concurrent-ruby (1.3.4)
64
- connection_pool (2.4.1)
65
- console (1.27.0)
64
+ bigdecimal (3.1.9)
65
+ concurrent-ruby (1.3.5)
66
+ connection_pool (2.5.0)
67
+ console (1.29.2)
66
68
  fiber-annotation
67
69
  fiber-local (~> 1.1)
68
70
  json
69
71
  crack (1.0.0)
70
72
  bigdecimal
71
73
  rexml
72
- debug (1.9.2)
74
+ date (3.4.1)
75
+ debug (1.10.0)
73
76
  irb (~> 1.10)
74
77
  reline (>= 0.3.8)
75
78
  diff-lcs (1.5.1)
76
79
  domain_name (0.6.20240107)
77
- dotenv (3.1.4)
80
+ dotenv (3.1.7)
78
81
  drb (2.2.1)
82
+ dynamicschema (1.0.0.beta04)
79
83
  event_stream_parser (1.0.0)
80
- faraday (2.12.1)
84
+ faraday (2.12.2)
81
85
  faraday-net_http (>= 2.0, < 3.5)
82
86
  json
83
87
  logger
84
88
  faraday-http-cache (2.5.1)
85
89
  faraday (>= 0.8)
86
- faraday-multipart (1.0.4)
87
- multipart-post (~> 2)
90
+ faraday-multipart (1.1.0)
91
+ multipart-post (~> 2.0)
88
92
  faraday-net_http (3.4.0)
89
93
  net-http (>= 0.5.0)
90
94
  faraday-retry (2.2.1)
@@ -92,7 +96,7 @@ GEM
92
96
  fiber-annotation (0.2.0)
93
97
  fiber-local (1.1.0)
94
98
  fiber-storage
95
- fiber-storage (0.1.2)
99
+ fiber-storage (1.0.0)
96
100
  github_changelog_generator (1.16.4)
97
101
  activesupport
98
102
  async (>= 1.25.0)
@@ -107,82 +111,102 @@ GEM
107
111
  faraday (~> 2.7)
108
112
  os (~> 1.1)
109
113
  tty-progressbar (~> 0.18.2)
110
- hashdiff (1.1.1)
114
+ hashdiff (1.1.2)
111
115
  hnswlib (0.9.0)
112
116
  http-accept (1.7.0)
113
- http-cookie (1.0.6)
117
+ http-cookie (1.0.8)
114
118
  domain_name (~> 0.5)
115
119
  i18n (1.14.6)
116
120
  concurrent-ruby (~> 1.0)
117
- io-console (0.7.2)
118
- irb (1.14.0)
121
+ intelligence (0.8.0)
122
+ dynamicschema (~> 1.0.0.beta03)
123
+ faraday (~> 2.7)
124
+ json-repair (~> 0.2)
125
+ mime-types (~> 3.6)
126
+ io-console (0.8.0)
127
+ irb (1.14.3)
119
128
  rdoc (>= 4.0.0)
120
129
  reline (>= 0.4.2)
121
- json (2.9.0)
130
+ json (2.9.1)
131
+ json-repair (0.2.0)
122
132
  language_server-protocol (3.17.0.3)
123
- logger (1.6.2)
124
- mime-types (3.5.2)
133
+ logger (1.6.5)
134
+ mime-types (3.6.0)
135
+ logger
125
136
  mime-types-data (~> 3.2015)
126
- mime-types-data (3.2024.0702)
137
+ mime-types-data (3.2025.0107)
127
138
  minitest (5.25.4)
128
139
  multi_json (1.15.0)
129
140
  multipart-post (2.4.1)
130
141
  net-http (0.6.0)
131
142
  uri
132
143
  netrc (0.11.0)
133
- nio4r (2.7.3)
134
- nokogiri (1.16.8-arm64-darwin)
144
+ nio4r (2.7.4)
145
+ nokogiri (1.18.1-aarch64-linux-gnu)
146
+ racc (~> 1.4)
147
+ nokogiri (1.18.1-aarch64-linux-musl)
148
+ racc (~> 1.4)
149
+ nokogiri (1.18.1-arm-linux-gnu)
150
+ racc (~> 1.4)
151
+ nokogiri (1.18.1-arm-linux-musl)
152
+ racc (~> 1.4)
153
+ nokogiri (1.18.1-arm64-darwin)
154
+ racc (~> 1.4)
155
+ nokogiri (1.18.1-x86_64-darwin)
156
+ racc (~> 1.4)
157
+ nokogiri (1.18.1-x86_64-linux-gnu)
135
158
  racc (~> 1.4)
136
- nokogiri (1.16.8-x86_64-linux)
159
+ nokogiri (1.18.1-x86_64-linux-musl)
137
160
  racc (~> 1.4)
138
161
  octokit (4.25.1)
139
162
  faraday (>= 1, < 3)
140
163
  sawyer (~> 0.9)
141
164
  os (1.1.4)
142
165
  parallel (1.26.3)
143
- parser (3.3.6.0)
166
+ parser (3.3.7.0)
144
167
  ast (~> 2.4.1)
145
168
  racc
146
169
  pg (1.5.9)
147
170
  pgvector (0.2.2)
148
- protocol-hpack (1.4.3)
171
+ protocol-hpack (1.5.1)
149
172
  protocol-http (0.26.8)
150
173
  protocol-http1 (0.19.1)
151
174
  protocol-http (~> 0.22)
152
175
  protocol-http2 (0.16.0)
153
176
  protocol-hpack (~> 1.4)
154
177
  protocol-http (~> 0.18)
155
- psych (5.1.2)
178
+ psych (5.2.3)
179
+ date
156
180
  stringio
157
181
  public_suffix (6.0.1)
158
182
  racc (1.8.1)
159
183
  rainbow (3.1.1)
160
184
  rake (13.2.1)
161
- rdoc (6.7.0)
185
+ rdoc (6.11.0)
162
186
  psych (>= 4.0.0)
163
- regexp_parser (2.9.3)
164
- reline (0.5.9)
187
+ regexp_parser (2.10.0)
188
+ reline (0.6.0)
165
189
  io-console (~> 0.5)
166
190
  rest-client (2.1.0)
167
191
  http-accept (>= 1.7.0, < 2.0)
168
192
  http-cookie (>= 1.0.2, < 2.0)
169
193
  mime-types (>= 1.16, < 4.0)
170
194
  netrc (~> 0.8)
171
- rexml (3.3.9)
195
+ rexml (3.4.0)
172
196
  rspec (3.13.0)
173
197
  rspec-core (~> 3.13.0)
174
198
  rspec-expectations (~> 3.13.0)
175
199
  rspec-mocks (~> 3.13.0)
176
- rspec-core (3.13.0)
200
+ rspec-core (3.13.2)
177
201
  rspec-support (~> 3.13.0)
178
- rspec-expectations (3.13.1)
202
+ rspec-expectations (3.13.3)
179
203
  diff-lcs (>= 1.2.0, < 2.0)
180
204
  rspec-support (~> 3.13.0)
181
- rspec-mocks (3.13.1)
205
+ rspec-mocks (3.13.2)
182
206
  diff-lcs (>= 1.2.0, < 2.0)
183
207
  rspec-support (~> 3.13.0)
184
- rspec-support (3.13.1)
185
- rubocop (1.69.1)
208
+ rspec-support (3.13.2)
209
+ rubocop (1.70.0)
186
210
  json (~> 2.3)
187
211
  language_server-protocol (>= 3.17.0)
188
212
  parallel (~> 1.10)
@@ -192,11 +216,11 @@ GEM
192
216
  rubocop-ast (>= 1.36.2, < 2.0)
193
217
  ruby-progressbar (~> 1.7)
194
218
  unicode-display_width (>= 2.4.0, < 4.0)
195
- rubocop-ast (1.36.2)
219
+ rubocop-ast (1.37.0)
196
220
  parser (>= 3.3.1.0)
197
221
  rubocop-rake (0.6.0)
198
222
  rubocop (~> 1.0)
199
- rubocop-rspec (3.2.0)
223
+ rubocop-rspec (3.3.0)
200
224
  rubocop (~> 1.61)
201
225
  ruby-openai (7.3.1)
202
226
  event_stream_parser (>= 0.3.0, < 2.0.0)
@@ -206,16 +230,22 @@ GEM
206
230
  sawyer (0.9.2)
207
231
  addressable (>= 2.3.5)
208
232
  faraday (>= 0.17.3, < 3)
209
- securerandom (0.4.0)
210
- sqlite3 (2.0.4-arm64-darwin)
211
- sqlite3 (2.0.4-x86_64-linux-gnu)
212
- stringio (3.1.1)
233
+ securerandom (0.4.1)
234
+ sqlite3 (2.5.0-aarch64-linux-gnu)
235
+ sqlite3 (2.5.0-aarch64-linux-musl)
236
+ sqlite3 (2.5.0-arm-linux-gnu)
237
+ sqlite3 (2.5.0-arm-linux-musl)
238
+ sqlite3 (2.5.0-arm64-darwin)
239
+ sqlite3 (2.5.0-x86_64-darwin)
240
+ sqlite3 (2.5.0-x86_64-linux-gnu)
241
+ sqlite3 (2.5.0-x86_64-linux-musl)
242
+ stringio (3.1.2)
213
243
  strings-ansi (0.2.0)
214
- timeout (0.4.2)
244
+ timeout (0.4.3)
215
245
  timers (4.3.5)
216
- traces (0.11.1)
246
+ traces (0.14.1)
217
247
  tty-cursor (0.7.1)
218
- tty-progressbar (0.18.2)
248
+ tty-progressbar (0.18.3)
219
249
  strings-ansi (~> 0.2)
220
250
  tty-cursor (~> 0.7)
221
251
  tty-screen (~> 0.8)
@@ -233,10 +263,14 @@ GEM
233
263
  hashdiff (>= 0.4.0, < 2.0.0)
234
264
 
235
265
  PLATFORMS
236
- arm64-darwin-22
237
- arm64-darwin-23
238
- arm64-darwin-24
239
- x86_64-linux
266
+ aarch64-linux-gnu
267
+ aarch64-linux-musl
268
+ arm-linux-gnu
269
+ arm-linux-musl
270
+ arm64-darwin
271
+ x86_64-darwin
272
+ x86_64-linux-gnu
273
+ x86_64-linux-musl
240
274
 
241
275
  DEPENDENCIES
242
276
  activerecord (~> 7.1)
@@ -261,4 +295,4 @@ DEPENDENCIES
261
295
  webmock (~> 3.24.0)
262
296
 
263
297
  BUNDLED WITH
264
- 2.4.16
298
+ 2.5.23
data/boxcars.gemspec CHANGED
@@ -31,13 +31,14 @@ Gem::Specification.new do |spec|
31
31
  spec.require_paths = ["lib"]
32
32
 
33
33
  # runtime dependencies
34
- spec.add_dependency "anthropic", "~> 0.1"
34
+ spec.add_dependency "anthropic", "~> 0.3"
35
35
  spec.add_dependency "google_search_results", "~> 2.2"
36
- spec.add_dependency "gpt4all", "~> 0.0.4"
37
- spec.add_dependency "hnswlib", "~> 0.8"
38
- spec.add_dependency "nokogiri", "~> 1.16"
36
+ spec.add_dependency "gpt4all", "~> 0.0.5"
37
+ spec.add_dependency "hnswlib", "~> 0.9"
38
+ spec.add_dependency "intelligence", ">= 0.8"
39
+ spec.add_dependency "nokogiri", "~> 1.18"
39
40
  spec.add_dependency "pgvector", "~> 0.2"
40
- spec.add_dependency "ruby-openai", ">= 7.1", "< 8.0"
41
+ spec.add_dependency "ruby-openai", ">= 7.3"
41
42
 
42
43
  # For more information and examples about making a new gem, checkout our
43
44
  # guide at: https://bundler.io/guides/creating_gem.html
@@ -95,6 +95,20 @@ module Boxcars
95
95
  raise KeyError, "Prompt format error: #{first_line}"
96
96
  end
97
97
 
98
+ def as_intelligence_conversation(inputs = nil)
99
+ conversation = Intelligence::Conversation.new
100
+ no_history.each do |ln|
101
+ message = Intelligence::Message.new(ln[0])
102
+ message << Intelligence::MessageContent::Text.new(text: cformat(ln.last, inputs))
103
+ conversation.messages << message
104
+ end
105
+ conversation
106
+ rescue ::KeyError => e
107
+ first_line = e.message.to_s.split("\n").first
108
+ Boxcars.error "Missing prompt input key: #{first_line}"
109
+ raise KeyError, "Prompt format error: #{first_line}"
110
+ end
111
+
98
112
  # compute the prompt parameters with input substitutions
99
113
  # @param inputs [Hash] The inputs to use for the prompt.
100
114
  # @return [Hash] The formatted prompt { prompt: "..."}
@@ -51,5 +51,12 @@ module Boxcars
51
51
  def default_prefixes
52
52
  conversation.default_prefixes
53
53
  end
54
+
55
+ # Convert the prompt to an Intelligence::Conversation
56
+ # @param inputs [Hash] The inputs to use for the prompt
57
+ # @return [Intelligence::Conversation] The converted conversation
58
+ def as_intelligence_conversation(inputs: nil)
59
+ conversation.to_intelligence_conversation(inputs: inputs)
60
+ end
54
61
  end
55
62
  end
@@ -78,7 +78,7 @@ module Boxcars
78
78
  raise Error, "Anthropic: #{response['error']}" if response['error']
79
79
 
80
80
  answer = response['completion']
81
- Boxcars.debug(response, :yellow)
81
+ Boxcars.debug("Answer: #{answer}", :cyan)
82
82
  answer
83
83
  end
84
84
 
@@ -0,0 +1,109 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "intelligence"
4
+ module Boxcars
5
+ # A engine that uses Cerebras's API
6
+ class Cerebras < Engine
7
+ attr_reader :prompts, :cerebras_params, :model_kwargs, :batch_size
8
+
9
+ # The default parameters to use when asking the engine
10
+ DEFAULT_PARAMS = {
11
+ model: "llama-3.3-70b",
12
+ temperature: 0.1
13
+ }.freeze
14
+
15
+ # the default name of the engine
16
+ DEFAULT_NAME = "Cerebras engine"
17
+ # the default description of the engine
18
+ DEFAULT_DESCRIPTION = "useful for when you need to use Cerebras to process complex content. " \
19
+ "Supports text, images, and other content types"
20
+
21
+ def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], batch_size: 20, **kwargs)
22
+ @cerebras_params = DEFAULT_PARAMS.merge(kwargs)
23
+ @prompts = prompts
24
+ @batch_size = batch_size
25
+ super(description: description, name: name)
26
+ end
27
+
28
+ # Get the Cerebras API client
29
+ def self.adapter(params:, api_key: nil)
30
+ api_key = Boxcars.configuration.cerebras_api_key(**params) if api_key.nil?
31
+ raise ArgumentError, "Cerebras API key not configured" unless api_key
32
+
33
+ Intelligence::Adapter[:cerebras].new(
34
+ { key: api_key, chat_options: params }
35
+ )
36
+ end
37
+
38
+ # Process different content types
39
+ def process_content(content)
40
+ case content
41
+ when String
42
+ { type: "text", text: content }
43
+ when Hash
44
+ validate_content(content)
45
+ when Array
46
+ content.map { |c| process_content(c) }
47
+ else
48
+ raise ArgumentError, "Unsupported content type: #{content.class}"
49
+ end
50
+ end
51
+
52
+ # Validate content structure
53
+ def validate_content(content)
54
+ raise ArgumentError, "Content must have type and text fields" unless content[:type] && content[:text]
55
+
56
+ content
57
+ end
58
+
59
+ # Get an answer from the engine
60
+ def client(prompt:, inputs: {}, api_key: nil, **kwargs)
61
+ params = cerebras_params.merge(kwargs)
62
+ adapter = Cerebras.adapter(api_key: api_key, params: params)
63
+ raise Error, "OpenAI: No response from API" unless adapter
64
+
65
+ convo = prompt.as_intelligence_conversation(inputs: inputs)
66
+
67
+ # Add content processing
68
+ Boxcars.debug("Sending to Cerebras:\n#{convo}", :cyan) if Boxcars.configuration.log_prompts
69
+
70
+ # Make API call
71
+ request = Intelligence::ChatRequest.new(adapter: adapter)
72
+ response = request.chat(convo)
73
+ check_response(response)
74
+ rescue StandardError => e
75
+ Boxcars.error("Cerebras Error: #{e.message}", :red)
76
+ raise
77
+ end
78
+
79
+ # Run the engine with a question
80
+ def run(question, **kwargs)
81
+ prompt = Prompt.new(template: question)
82
+ response = client(prompt: prompt, **kwargs)
83
+ extract_answer(response)
84
+ end
85
+
86
+ private
87
+
88
+ def extract_answer(response)
89
+ # Handle different response formats
90
+ if response["choices"]
91
+ response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
92
+ else
93
+ response["output"] || response.to_s
94
+ end
95
+ end
96
+
97
+ def check_response(response)
98
+ return response.result.text if response.success?
99
+
100
+ raise KeyError, "CEREBRAS_API_KEY not valid" if response&.reason_phrase == "Unauthorized"
101
+
102
+ raise ValueError, "Cerebras error: #{response&.reason_phrase&.present? ? response.reason_phrase : response}"
103
+ end
104
+
105
+ def conversation_model?(_model)
106
+ true
107
+ end
108
+ end
109
+ end
@@ -78,7 +78,7 @@ module Boxcars
78
78
  raise Error, "Cohere: #{response[:error]}" if response[:error]
79
79
 
80
80
  answer = response[:text]
81
- Boxcars.debug(response, :yellow)
81
+ Boxcars.debug("Answer: #{answer}", :cyan)
82
82
  answer
83
83
  end
84
84
 
@@ -1,73 +1,66 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # Boxcars - a framework for running a series of tools to get an answer to a question.
3
+ # Boxcars is a framework for running a series of tools to get an answer to a question.
4
4
  module Boxcars
5
- # A engine that uses Gemini's API.
5
+ # A engine that uses GeminiAI's API.
6
6
  class GeminiAi < Engine
7
- attr_reader :prompts, :llm_params, :model_kwargs, :batch_size
7
+ attr_reader :prompts, :llm_parmas, :model_kwargs, :batch_size
8
8
 
9
9
  # The default parameters to use when asking the engine.
10
10
  DEFAULT_PARAMS = {
11
- model: "gemini-1.5-flash-latest"
11
+ model: "gemini-1.5-flash-latest",
12
+ temperature: 0.1
12
13
  }.freeze
13
14
 
14
15
  # the default name of the engine
15
- DEFAULT_NAME = "Google Gemini AI engine"
16
+ DEFAULT_NAME = "GeminiAI engine"
16
17
  # the default description of the engine
17
- DEFAULT_DESCRIPTION = "useful for when you need to use Google Gemini AI to answer questions. " \
18
+ DEFAULT_DESCRIPTION = "useful for when you need to use AI to answer questions. " \
18
19
  "You should ask targeted questions"
19
20
 
20
- # A engine is the driver for a single tool to run.
21
- # @param name [String] The name of the engine. Defaults to "OpenAI engine".
21
+ # A engine is a container for a single tool to run.
22
+ # @param name [String] The name of the engine. Defaults to "GeminiAI engine".
22
23
  # @param description [String] A description of the engine. Defaults to:
23
24
  # useful for when you need to use AI to answer questions. You should ask targeted questions".
24
25
  # @param prompts [Array<String>] The prompts to use when asking the engine. Defaults to [].
25
- def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], **kwargs)
26
- @llm_params = DEFAULT_PARAMS.merge(kwargs)
26
+ # @param batch_size [Integer] The number of prompts to send to the engine at once. Defaults to 20.
27
+ def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], batch_size: 20, **kwargs)
28
+ @llm_parmas = DEFAULT_PARAMS.merge(kwargs)
27
29
  @prompts = prompts
28
- @batch_size = 20
30
+ @batch_size = batch_size
29
31
  super(description: description, name: name)
30
32
  end
31
33
 
32
- def conversation_model?(_model)
33
- true
34
+ # Get the OpenAI API client
35
+ # @param gemini_api_key [String] The access token to use when asking the engine.
36
+ # Defaults to Boxcars.configuration.gemini_api_key
37
+ # @return [OpenAI::Client] The OpenAI API gem client.
38
+ def self.open_ai_client(gemini_api_key: nil)
39
+ access_token = Boxcars.configuration.gemini_api_key(gemini_api_key: gemini_api_key)
40
+ ::OpenAI::Client.new(access_token: access_token, uri_base: "https://generativelanguage.googleapis.com/v1beta/openai/")
34
41
  end
35
42
 
36
- def chat(params, gemini_api_key)
37
- raise Boxcars::ConfigurationError('Google AI API key not set') if gemini_api_key.blank?
38
-
39
- model_string = params.delete(:model_string)
40
- raise Boxcars::ConfigurationError('Google AI API key not set') if model_string.blank?
41
-
42
- # Define the API endpoint and parameters
43
- api_endpoint = "https://generativelanguage.googleapis.com/v1beta/models/#{model_string}:generateContent?key=#{gemini_api_key}"
44
-
45
- connection = Faraday.new(api_endpoint) do |faraday|
46
- faraday.request :url_encoded
47
- faraday.headers['Content-Type'] = 'application/json'
48
- end
49
-
50
- # Make the API call
51
- response = connection.post { |req| req.body = params.to_json }
52
-
53
- JSON.parse(response.body, symbolize_names: true)
43
+ def conversation_model?(_model)
44
+ true
54
45
  end
55
46
 
56
47
  # Get an answer from the engine.
57
48
  # @param prompt [String] The prompt to use when asking the engine.
58
- # @param gemini_api_key [String] Optional api key to use when asking the engine.
49
+ # @param gemini_api_key [String] The access token to use when asking the engine.
59
50
  # Defaults to Boxcars.configuration.gemini_api_key.
60
51
  # @param kwargs [Hash] Additional parameters to pass to the engine if wanted.
61
- def client(prompt:, inputs: {}, **kwargs)
62
- api_key = Boxcars.configuration.gemini_api_key(**kwargs)
63
- option_params = llm_params.merge(kwargs)
64
- model_string = option_params.delete(:model) || DEFAULT_PARAMS[:model]
65
- convo = prompt.as_messages(inputs: inputs)
66
- # Convert conversation to Google Gemini format
67
- params = to_google_gemini_format(convo[:messages], option_params)
68
- params[:model_string] = model_string
69
- Boxcars.debug("Prompt after formatting:#{params[:message]}", :cyan) if Boxcars.configuration.log_prompts
70
- chat(params, api_key)
52
+ def client(prompt:, inputs: {}, gemini_api_key: nil, **kwargs)
53
+ clnt = GeminiAi.open_ai_client(gemini_api_key: gemini_api_key)
54
+ params = llm_parmas.merge(kwargs)
55
+ prompt = prompt.first if prompt.is_a?(Array)
56
+ params = prompt.as_messages(inputs).merge(params)
57
+ if Boxcars.configuration.log_prompts
58
+ Boxcars.debug(params[:messages].last(2).map { |p| ">>>>>> Role: #{p[:role]} <<<<<<\n#{p[:content]}" }.join("\n"), :cyan)
59
+ end
60
+ clnt.chat(parameters: params)
61
+ rescue => e
62
+ Boxcars.error(e, :red)
63
+ raise
71
64
  end
72
65
 
73
66
  # get an answer from the engine for a question.
@@ -76,13 +69,10 @@ module Boxcars
76
69
  def run(question, **kwargs)
77
70
  prompt = Prompt.new(template: question)
78
71
  response = client(prompt: prompt, **kwargs)
79
-
80
72
  raise Error, "GeminiAI: No response from API" unless response
81
- raise Error, "GeminiAI: #{response[:error]}" if response[:error]
82
73
 
83
- answer = response[:candidates].first[:content][:parts].first[:text]
84
- Boxcars.debug(response, :yellow)
85
- answer
74
+ check_response(response)
75
+ response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
86
76
  end
87
77
 
88
78
  # Get the default parameters for the engine.
@@ -95,13 +85,13 @@ module Boxcars
95
85
  # @param must_haves [Array<String>] The keys that must be in the response. Defaults to %w[choices].
96
86
  # @raise [KeyError] if there is an issue with the access token.
97
87
  # @raise [ValueError] if the response is not valid.
98
- def check_response(response, must_haves: %w[completion])
99
- if response['error']
88
+ def check_response(response, must_haves: %w[choices])
89
+ if response['error'].is_a?(Hash)
100
90
  code = response.dig('error', 'code')
101
91
  msg = response.dig('error', 'message') || 'unknown error'
102
- raise KeyError, "ANTHOPIC_API_KEY not valid" if code == 'invalid_api_key'
92
+ raise KeyError, "GEMINI_API_TOKEN not valid" if code == 'invalid_api_key'
103
93
 
104
- raise ValueError, "Gemini error: #{msg}"
94
+ raise ValueError, "GeminiAI error: #{msg}"
105
95
  end
106
96
 
107
97
  must_haves.each do |key|
@@ -111,13 +101,7 @@ module Boxcars
111
101
 
112
102
  # the engine type
113
103
  def engine_type
114
- "claude"
115
- end
116
-
117
- # lookup the context size for a model by name
118
- # @param modelname [String] The name of the model to lookup.
119
- def modelname_to_contextsize(_modelname)
120
- 100000
104
+ "gemini_ai"
121
105
  end
122
106
 
123
107
  # Calculate the maximum number of tokens possible to generate for a prompt.
@@ -127,34 +111,8 @@ module Boxcars
127
111
  num_tokens = get_num_tokens(prompt_text)
128
112
 
129
113
  # get max context size for model by name
130
- max_size = modelname_to_contextsize(model_name)
114
+ max_size = 8096
131
115
  max_size - num_tokens
132
116
  end
133
-
134
- def to_google_gemini_format(convo, option_params)
135
- instructions = convo.shift.last if convo.first && convo.first[:role] == :system
136
- system_instructions = instructions || "You are a helpful assistant."
137
-
138
- # Convert conversation history to the format expected by Google
139
- contents = convo.map { |message| { text: message[:content] } }
140
-
141
- generation_config = {}
142
- if option_params.length.positive?
143
- generation_config.merge!(option_params)
144
- generation_config[:stopSequences] = [generation_config.delete(:stop)] if generation_config[:stop].present?
145
- end
146
-
147
- rv = {
148
- system_instruction: { parts: { text: system_instructions } }, # System instructions or context
149
- contents: { parts: contents } # The chat messages
150
- }
151
-
152
- rv[:generationConfig] = generation_config if generation_config.length.positive?
153
- rv
154
- end
155
-
156
- def default_prefixes
157
- { system: "SYSTEM: ", user: "USER: ", assistant: "CHATBOT: ", history: :history }
158
- end
159
117
  end
160
118
  end
@@ -73,9 +73,7 @@ module Boxcars
73
73
  raise Error, "Groq: No response from API" unless response
74
74
 
75
75
  check_response(response)
76
- answer = response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
77
- puts answer
78
- answer
76
+ response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
79
77
  end
80
78
 
81
79
  # Get the default parameters for the engine.
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Boxcars
4
+ class Intelligence
5
+ # Client for interacting with the Intelligence API
6
+ class Client
7
+ BASE_URL = "https://api.intelligence.com/v1"
8
+ DEFAULT_TIMEOUT = 120
9
+
10
+ def initialize(api_key:)
11
+ @api_key = api_key
12
+ @connection = Faraday.new(
13
+ url: BASE_URL,
14
+ headers: {
15
+ "Content-Type" => "application/json",
16
+ "Authorization" => "Bearer #{@api_key}"
17
+ },
18
+ request: {
19
+ timeout: DEFAULT_TIMEOUT
20
+ }
21
+ )
22
+ end
23
+
24
+ # Generate a response from the Intelligence API
25
+ def generate(parameters:)
26
+ response = @connection.post("/generate") do |req|
27
+ req.body = parameters.to_json
28
+ end
29
+
30
+ handle_response(response)
31
+ end
32
+
33
+ # Stream a response from the Intelligence API
34
+ def stream(parameters:, &block)
35
+ @connection.post("/generate") do |req|
36
+ req.options.on_data = block
37
+ req.headers["Accept"] = "text/event-stream"
38
+ req.body = parameters.to_json
39
+ end
40
+ end
41
+
42
+ private
43
+
44
+ def handle_response(response)
45
+ case response.status
46
+ when 200
47
+ JSON.parse(response.body)
48
+ when 401
49
+ raise KeyError, "Invalid API key"
50
+ when 429
51
+ raise ValueError, "Rate limit exceeded"
52
+ when 400..499
53
+ raise ArgumentError, "Bad request: #{response.body}"
54
+ when 500..599
55
+ raise Error, "Intelligence API server error"
56
+ else
57
+ raise Error, "Unexpected response: #{response.status}"
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,141 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Boxcars
4
+ # A engine that uses Intelligence's API
5
+ class Intelligence < Engine
6
+ attr_reader :prompts, :intelligence_params, :model_kwargs, :batch_size
7
+
8
+ # The default parameters to use when asking the engine
9
+ DEFAULT_PARAMS = {
10
+ model: "intelligence-1.0",
11
+ temperature: 0.1
12
+ }.freeze
13
+
14
+ # the default name of the engine
15
+ DEFAULT_NAME = "Intelligence engine"
16
+ # the default description of the engine
17
+ DEFAULT_DESCRIPTION = "useful for when you need to use Intelligence to process complex content. " \
18
+ "Supports text, images, and other content types"
19
+
20
+ def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], batch_size: 20, **kwargs)
21
+ begin
22
+ require 'intelligence'
23
+ rescue LoadError => _e
24
+ raise LoadError,
25
+ "The intelligence gem is required. Please add 'gem \"intelligence\"' to your Gemfile and run bundle install"
26
+ end
27
+
28
+ @intelligence_params = DEFAULT_PARAMS.merge(kwargs)
29
+ @prompts = prompts
30
+ @batch_size = batch_size
31
+ super(description: description, name: name)
32
+ end
33
+
34
+ # Get the Intelligence API client
35
+ def self.intelligence_client(api_key: nil)
36
+ api_key ||= Boxcars.configuration.intelligence_api_key
37
+ raise ArgumentError, "Intelligence API key not configured" unless api_key
38
+
39
+ Client.new(api_key: api_key)
40
+ end
41
+
42
+ # Stream responses from the Intelligence API
43
+ def stream(prompt:, inputs: {}, api_key: nil, &block)
44
+ client = Intelligence.intelligence_client(api_key: api_key)
45
+ params = intelligence_params.merge(stream: true)
46
+
47
+ processed_prompt = if conversation_model?(params[:model])
48
+ prompt.as_messages(inputs)
49
+ else
50
+ { prompt: prompt.as_prompt(inputs: inputs) }
51
+ end
52
+
53
+ processed_prompt[:content] = process_content(processed_prompt[:content]) if processed_prompt[:content]
54
+
55
+ client.stream(parameters: params.merge(processed_prompt), &block)
56
+ end
57
+
58
+ # Process different content types
59
+ def process_content(content)
60
+ case content
61
+ when String
62
+ { type: "text", text: content }
63
+ when Hash
64
+ validate_content(content)
65
+ when Array
66
+ content.map { |c| process_content(c) }
67
+ else
68
+ raise ArgumentError, "Unsupported content type: #{content.class}"
69
+ end
70
+ end
71
+
72
+ # Validate content structure
73
+ def validate_content(content)
74
+ raise ArgumentError, "Content must have type and text fields" unless content[:type] && content[:text]
75
+
76
+ content
77
+ end
78
+
79
+ # Get an answer from the engine
80
+ def client(prompt:, inputs: {}, api_key: nil, **kwargs)
81
+ client = Intelligence.intelligence_client(api_key: api_key)
82
+ params = intelligence_params.merge(kwargs)
83
+
84
+ processed_prompt = if conversation_model?(params[:model])
85
+ prompt.as_messages(inputs)
86
+ else
87
+ { prompt: prompt.as_prompt(inputs: inputs) }
88
+ end
89
+
90
+ # Add content processing
91
+ processed_prompt[:content] = process_content(processed_prompt[:content]) if processed_prompt[:content]
92
+
93
+ Boxcars.debug("Sending to Intelligence:\n#{processed_prompt}", :cyan) if Boxcars.configuration.log_prompts
94
+
95
+ # Make API call
96
+ response = client.generate(parameters: params.merge(processed_prompt))
97
+ check_response(response)
98
+ response
99
+ rescue StandardError => e
100
+ Boxcars.error("Intelligence Error: #{e.message}", :red)
101
+ raise
102
+ end
103
+
104
+ # Run the engine with a question
105
+ def run(question, **kwargs)
106
+ prompt = Prompt.new(template: question)
107
+ response = client(prompt: prompt, **kwargs)
108
+ extract_answer(response)
109
+ end
110
+
111
+ private
112
+
113
+ def extract_answer(response)
114
+ # Handle different response formats
115
+ if response["choices"]
116
+ response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
117
+ else
118
+ response["output"] || response.to_s
119
+ end
120
+ end
121
+
122
+ def check_response(response)
123
+ if response["error"]
124
+ code = response.dig("error", "code")
125
+ msg = response.dig("error", "message") || "unknown error"
126
+ raise KeyError, "INTELLIGENCE_API_KEY not valid" if code == "invalid_api_key"
127
+
128
+ raise ValueError, "Intelligence error: #{msg}"
129
+ end
130
+
131
+ # Validate response structure
132
+ return if response["choices"] || response["output"]
133
+
134
+ raise Error, "Invalid response format from Intelligence API"
135
+ end
136
+
137
+ def conversation_model?(_model)
138
+ true
139
+ end
140
+ end
141
+ end
@@ -92,7 +92,7 @@ module Boxcars
92
92
  raise Error, "OpenAI: #{response['error']}" if response["error"]
93
93
 
94
94
  answer = response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
95
- puts answer
95
+ Boxcars.debug("Answer: #{answer}", :cyan)
96
96
  answer
97
97
  end
98
98
 
@@ -78,7 +78,7 @@ module Boxcars
78
78
  raise Error, "PerplexityAI: #{response['error']}" if response["error"]
79
79
 
80
80
  answer = response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
81
- puts answer
81
+ Boxcars.debug("Answer: #{answer}", :cyan)
82
82
  answer
83
83
  end
84
84
 
@@ -80,3 +80,4 @@ require "boxcars/engine/openai"
80
80
  require "boxcars/engine/perplexityai"
81
81
  require "boxcars/engine/gpt4all_eng"
82
82
  require "boxcars/engine/gemini_ai"
83
+ require "boxcars/engine/cerebras"
@@ -44,6 +44,18 @@ module Boxcars
44
44
  def default_prefixes
45
45
  end
46
46
 
47
+ # Convert the prompt to an Intelligence::Conversation
48
+ # @param inputs [Hash] The inputs to use for the prompt
49
+ # @return [Intelligence::Conversation] The converted conversation
50
+ def as_intelligence_conversation(inputs: nil)
51
+ conversation = Intelligence::Conversation.new
52
+ user_msg = Intelligence::Message.new(:user)
53
+ user_msg << Intelligence::MessageContent::Text.new(text: format(inputs))
54
+ conversation.messages << user_msg
55
+
56
+ conversation
57
+ end
58
+
47
59
  private
48
60
 
49
61
  # format the prompt with the input variables
@@ -2,5 +2,5 @@
2
2
 
3
3
  module Boxcars
4
4
  # The current version of the gem.
5
- VERSION = "0.6.8"
5
+ VERSION = "0.7.1"
6
6
  end
data/lib/boxcars.rb CHANGED
@@ -27,7 +27,7 @@ module Boxcars
27
27
 
28
28
  # Configuration contains gem settings
29
29
  class Configuration
30
- attr_writer :openai_access_token, :serpapi_api_key, :groq_api_key
30
+ attr_writer :openai_access_token, :serpapi_api_key, :groq_api_key, :cerebras_api_key
31
31
  attr_accessor :organization_id, :logger, :log_prompts, :log_generated, :default_train, :default_engine
32
32
 
33
33
  def initialize
@@ -62,6 +62,11 @@ module Boxcars
62
62
  key_lookup(:groq_api_key, kwargs)
63
63
  end
64
64
 
65
+ # @return [String] The Cerebras API key either from arg or env.
66
+ def cerebras_api_key(**kwargs)
67
+ key_lookup(:cerebras_api_key, kwargs)
68
+ end
69
+
65
70
  # @return [String] The Google AI API key either from arg or env.
66
71
  def gemini_api_key(**kwargs)
67
72
  key_lookup(:gemini_api_key, kwargs)
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: boxcars
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.8
4
+ version: 0.7.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Francis Sullivan
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: exe
11
11
  cert_chain: []
12
- date: 2024-12-07 00:00:00.000000000 Z
12
+ date: 2025-01-17 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: anthropic
@@ -17,14 +17,14 @@ dependencies:
17
17
  requirements:
18
18
  - - "~>"
19
19
  - !ruby/object:Gem::Version
20
- version: '0.1'
20
+ version: '0.3'
21
21
  type: :runtime
22
22
  prerelease: false
23
23
  version_requirements: !ruby/object:Gem::Requirement
24
24
  requirements:
25
25
  - - "~>"
26
26
  - !ruby/object:Gem::Version
27
- version: '0.1'
27
+ version: '0.3'
28
28
  - !ruby/object:Gem::Dependency
29
29
  name: google_search_results
30
30
  requirement: !ruby/object:Gem::Requirement
@@ -45,26 +45,40 @@ dependencies:
45
45
  requirements:
46
46
  - - "~>"
47
47
  - !ruby/object:Gem::Version
48
- version: 0.0.4
48
+ version: 0.0.5
49
49
  type: :runtime
50
50
  prerelease: false
51
51
  version_requirements: !ruby/object:Gem::Requirement
52
52
  requirements:
53
53
  - - "~>"
54
54
  - !ruby/object:Gem::Version
55
- version: 0.0.4
55
+ version: 0.0.5
56
56
  - !ruby/object:Gem::Dependency
57
57
  name: hnswlib
58
58
  requirement: !ruby/object:Gem::Requirement
59
59
  requirements:
60
60
  - - "~>"
61
61
  - !ruby/object:Gem::Version
62
- version: '0.8'
62
+ version: '0.9'
63
63
  type: :runtime
64
64
  prerelease: false
65
65
  version_requirements: !ruby/object:Gem::Requirement
66
66
  requirements:
67
67
  - - "~>"
68
+ - !ruby/object:Gem::Version
69
+ version: '0.9'
70
+ - !ruby/object:Gem::Dependency
71
+ name: intelligence
72
+ requirement: !ruby/object:Gem::Requirement
73
+ requirements:
74
+ - - ">="
75
+ - !ruby/object:Gem::Version
76
+ version: '0.8'
77
+ type: :runtime
78
+ prerelease: false
79
+ version_requirements: !ruby/object:Gem::Requirement
80
+ requirements:
81
+ - - ">="
68
82
  - !ruby/object:Gem::Version
69
83
  version: '0.8'
70
84
  - !ruby/object:Gem::Dependency
@@ -73,14 +87,14 @@ dependencies:
73
87
  requirements:
74
88
  - - "~>"
75
89
  - !ruby/object:Gem::Version
76
- version: '1.16'
90
+ version: '1.18'
77
91
  type: :runtime
78
92
  prerelease: false
79
93
  version_requirements: !ruby/object:Gem::Requirement
80
94
  requirements:
81
95
  - - "~>"
82
96
  - !ruby/object:Gem::Version
83
- version: '1.16'
97
+ version: '1.18'
84
98
  - !ruby/object:Gem::Dependency
85
99
  name: pgvector
86
100
  requirement: !ruby/object:Gem::Requirement
@@ -101,20 +115,14 @@ dependencies:
101
115
  requirements:
102
116
  - - ">="
103
117
  - !ruby/object:Gem::Version
104
- version: '7.1'
105
- - - "<"
106
- - !ruby/object:Gem::Version
107
- version: '8.0'
118
+ version: '7.3'
108
119
  type: :runtime
109
120
  prerelease: false
110
121
  version_requirements: !ruby/object:Gem::Requirement
111
122
  requirements:
112
123
  - - ">="
113
124
  - !ruby/object:Gem::Version
114
- version: '7.1'
115
- - - "<"
116
- - !ruby/object:Gem::Version
117
- version: '8.0'
125
+ version: '7.3'
118
126
  description: You simply set an OpenAI key, give a number of Boxcars to a Train, and
119
127
  magic ensues when you run it.
120
128
  email:
@@ -157,11 +165,14 @@ files:
157
165
  - lib/boxcars/conversation_prompt.rb
158
166
  - lib/boxcars/engine.rb
159
167
  - lib/boxcars/engine/anthropic.rb
168
+ - lib/boxcars/engine/cerebras.rb
160
169
  - lib/boxcars/engine/cohere.rb
161
170
  - lib/boxcars/engine/engine_result.rb
162
171
  - lib/boxcars/engine/gemini_ai.rb
163
172
  - lib/boxcars/engine/gpt4all_eng.rb
164
173
  - lib/boxcars/engine/groq.rb
174
+ - lib/boxcars/engine/intelligence.rb
175
+ - lib/boxcars/engine/intelligence/client.rb
165
176
  - lib/boxcars/engine/ollama.rb
166
177
  - lib/boxcars/engine/openai.rb
167
178
  - lib/boxcars/engine/perplexityai.rb