ruby_llm 0.1.0.pre38 → 0.1.0.pre40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: efcf5aa8692558d9214d82dbb6c053727b1b6c5e810fcd7cd224e9a909ae38ff
4
- data.tar.gz: 4910f4d7dade64d20696f5af971e968ce31abf0d98eae14fa7c874efe6ac03ce
3
+ metadata.gz: db95ae0d7103334e7760659d58200e1cc882b970a2d3f67f56180a9879bad976
4
+ data.tar.gz: f2a95fb00c476cbea2bf060ff5502c4fb61ed2d652578133cf7eee6ff4c7297f
5
5
  SHA512:
6
- metadata.gz: 31679bff69003430d7f662dfa822a640ea6863087bf9e2260b0555f7341c41d3368a2e07d7c515c78255f5b1b400a05b08c631a80ee4ed7871fd0ceedb330ca2
7
- data.tar.gz: 306fbf3cc3f35688b03b73995abe962152ddcde8d438093f4b4879d8c0b271187e4c7c26b10b8393f80556b955e1c631a8b30fdd693605712353cd7d6aa5e9cb
6
+ metadata.gz: 3a3624f1c5d8184d5f36aca807528b4bc627a66aebe3829ff9e90bb62348a12863b764352d4b466931c0ef3469c1d952832a1b7726f93231b72324b13228ffb1
7
+ data.tar.gz: 4bef0ae0373be0c2cd8d4702ddfc2f7a76883fe7c60e5ae5ef501c6f808744328919fba3f16dedf075d6f1cb1585120f46103415579a046306fd1a4d1c81f20b
@@ -3,6 +3,12 @@ name: CI
3
3
  on:
4
4
  push:
5
5
  branches: [ "main" ]
6
+ paths:
7
+ - 'lib/**'
8
+ - 'spec/**'
9
+ - 'Gemfile'
10
+ - 'Rakefile'
11
+ - 'ruby_llm.gemspec'
6
12
  pull_request:
7
13
  branches: [ "main" ]
8
14
  workflow_call:
data/README.md CHANGED
@@ -20,6 +20,8 @@ A delightful Ruby way to work with AI. Chat in text, analyze and generate images
20
20
  <a href="https://codecov.io/gh/crmne/ruby_llm"><img src="https://codecov.io/gh/crmne/ruby_llm/branch/main/graph/badge.svg" alt="codecov" /></a>
21
21
  </p>
22
22
 
23
+ 🤺 Battle tested at [💬 Chat with Work](https://chatwithwork.com)
24
+
23
25
  ## Features
24
26
 
25
27
  - 💬 **Beautiful Chat Interface** - Converse with AI models as easily as `RubyLLM.chat.ask "teach me Ruby"`
@@ -143,11 +143,20 @@ module RubyLLM
143
143
  maybe_json
144
144
  end
145
145
 
146
- def parse_error(response)
146
+ def parse_error(response) # rubocop:disable Metrics/MethodLength
147
147
  return if response.body.empty?
148
148
 
149
149
  body = try_parse_json(response.body)
150
- body.is_a?(Hash) ? body.dig('error', 'message') : body
150
+ case body
151
+ when Hash
152
+ body.dig('error', 'message')
153
+ when Array
154
+ body.map do |part|
155
+ part.dig('error', 'message')
156
+ end.join('. ')
157
+ else
158
+ body
159
+ end
151
160
  end
152
161
 
153
162
  class << self
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '0.1.0.pre38'
4
+ VERSION = '0.1.0.pre40'
5
5
  end
data/ruby_llm.gemspec CHANGED
@@ -28,7 +28,7 @@ Gem::Specification.new do |spec|
28
28
  # Specify which files should be added to the gem when it is released.
29
29
  # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
30
30
  spec.files = Dir.chdir(File.expand_path(__dir__)) do
31
- `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
31
+ `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features|docs)/}) }
32
32
  end
33
33
  spec.bindir = 'exe'
34
34
  spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0.pre38
4
+ version: 0.1.0.pre40
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2025-02-25 00:00:00.000000000 Z
11
+ date: 2025-02-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: event_stream_parser
@@ -111,21 +111,6 @@ files:
111
111
  - Rakefile
112
112
  - bin/console
113
113
  - bin/setup
114
- - docs/.gitignore
115
- - docs/Gemfile
116
- - docs/_config.yml
117
- - docs/_data/navigation.yml
118
- - docs/guides/chat.md
119
- - docs/guides/embeddings.md
120
- - docs/guides/error-handling.md
121
- - docs/guides/getting-started.md
122
- - docs/guides/image-generation.md
123
- - docs/guides/index.md
124
- - docs/guides/rails.md
125
- - docs/guides/streaming.md
126
- - docs/guides/tools.md
127
- - docs/index.md
128
- - docs/installation.md
129
114
  - lib/ruby_llm.rb
130
115
  - lib/ruby_llm/active_record/acts_as.rb
131
116
  - lib/ruby_llm/chat.rb
data/docs/.gitignore DELETED
@@ -1,7 +0,0 @@
1
- _site/
2
- .sass-cache/
3
- .jekyll-cache/
4
- .jekyll-metadata
5
- # Ignore folders generated by Bundler
6
- .bundle/
7
- vendor/
data/docs/Gemfile DELETED
@@ -1,11 +0,0 @@
1
- source 'https://rubygems.org'
2
-
3
- gem 'jekyll', '~> 4.3'
4
- gem 'just-the-docs', '~> 0.7.0'
5
- gem 'webrick', '~> 1.8'
6
-
7
- # GitHub Pages plugins
8
- group :jekyll_plugins do
9
- gem 'jekyll-remote-theme'
10
- gem 'jekyll-seo-tag'
11
- end
data/docs/_config.yml DELETED
@@ -1,43 +0,0 @@
1
- title: RubyLLM
2
- description: A delightful Ruby way to work with AI
3
- url: https://rubyllm.com
4
- baseurl: /
5
- remote_theme: just-the-docs/just-the-docs
6
-
7
- # Enable search
8
- search_enabled: true
9
- search:
10
- heading_level: 2
11
- previews: 3
12
- preview_words_before: 5
13
- preview_words_after: 10
14
- tokenizer_separator: /[\s/]+/
15
- rel_url: true
16
- button: false
17
-
18
- # Navigation structure
19
- nav_external_links:
20
- - title: RubyLLM on GitHub
21
- url: https://github.com/crmne/ruby_llm
22
- hide_icon: false
23
-
24
- # Footer content
25
- footer_content: "Copyright &copy; 2025 <a href='https://paolino.me'>Carmine Paolino</a>. Distributed under an <a href=\"https://github.com/crmne/ruby_llm/tree/main/LICENSE\">MIT license.</a>"
26
-
27
- # Enable copy button on code blocks
28
- enable_copy_code_button: true
29
-
30
- # Make Anchor links show on hover
31
- heading_anchors: true
32
-
33
- # Color scheme
34
- color_scheme: light
35
-
36
- # Google Analytics
37
- ga_tracking:
38
- ga_tracking_anonymize_ip: true
39
-
40
- # Custom plugins (GitHub Pages allows these)
41
- plugins:
42
- - jekyll-remote-theme
43
- - jekyll-seo-tag
@@ -1,25 +0,0 @@
1
- - title: Home
2
- url: /
3
- - title: Installation
4
- url: /installation
5
- - title: Guides
6
- url: /guides/
7
- subfolderitems:
8
- - title: Getting Started
9
- url: /guides/getting-started
10
- - title: Chat
11
- url: /guides/chat
12
- - title: Tools
13
- url: /guides/tools
14
- - title: Streaming
15
- url: /guides/streaming
16
- - title: Rails Integration
17
- url: /guides/rails
18
- - title: Image Generation
19
- url: /guides/image-generation
20
- - title: Embeddings
21
- url: /guides/embeddings
22
- - title: Error Handling
23
- url: /guides/error-handling
24
- - title: GitHub
25
- url: https://github.com/crmne/ruby_llm
data/docs/guides/chat.md DELETED
@@ -1,206 +0,0 @@
1
- ---
2
- layout: default
3
- title: Chat
4
- parent: Guides
5
- nav_order: 2
6
- permalink: /guides/chat
7
- ---
8
-
9
- # Chatting with AI Models
10
-
11
- RubyLLM's chat interface provides a natural way to interact with various AI models. This guide covers everything from basic chatting to advanced features like multimodal inputs and streaming responses.
12
-
13
- ## Basic Chat
14
-
15
- Creating a chat and asking questions is straightforward:
16
-
17
- ```ruby
18
- # Create a chat with the default model
19
- chat = RubyLLM.chat
20
-
21
- # Ask a question
22
- response = chat.ask "What's the best way to learn Ruby?"
23
-
24
- # The response is a Message object
25
- puts response.content
26
- puts "Role: #{response.role}"
27
- puts "Model: #{response.model_id}"
28
- puts "Tokens: #{response.input_tokens} input, #{response.output_tokens} output"
29
- ```
30
-
31
- ## Choosing Models
32
-
33
- You can specify which model to use when creating a chat:
34
-
35
- ```ruby
36
- # Create a chat with a specific model
37
- chat = RubyLLM.chat(model: 'gpt-4o-mini')
38
-
39
- # Use Claude instead
40
- claude_chat = RubyLLM.chat(model: 'claude-3-5-sonnet-20241022')
41
-
42
- # Or change the model for an existing chat
43
- chat.with_model('gemini-2.0-flash')
44
- ```
45
-
46
- ## Multi-turn Conversations
47
-
48
- Chats maintain conversation history automatically:
49
-
50
- ```ruby
51
- chat = RubyLLM.chat
52
-
53
- # Start a conversation
54
- chat.ask "What's your favorite programming language?"
55
-
56
- # Follow up
57
- chat.ask "Why do you like that language?"
58
-
59
- # Continue the conversation
60
- chat.ask "What are its weaknesses?"
61
-
62
- # Access the conversation history
63
- chat.messages.each do |message|
64
- puts "#{message.role}: #{message.content[0..50]}..."
65
- end
66
- ```
67
-
68
- ## Working with Images
69
-
70
- Vision-capable models can understand images:
71
-
72
- ```ruby
73
- chat = RubyLLM.chat
74
-
75
- # Ask about an image (local file)
76
- chat.ask "What's in this image?", with: { image: "path/to/image.jpg" }
77
-
78
- # Or use an image URL
79
- chat.ask "Describe this picture", with: { image: "https://example.com/image.jpg" }
80
-
81
- # Include multiple images
82
- chat.ask "Compare these two charts", with: {
83
- image: ["chart1.png", "chart2.png"]
84
- }
85
-
86
- # Combine text and image
87
- chat.ask "Is this the Ruby logo?", with: { image: "logo.png" }
88
- ```
89
-
90
- ## Working with Audio
91
-
92
- Models with audio capabilities can process spoken content:
93
-
94
- ```ruby
95
- chat = RubyLLM.chat(model: 'gpt-4o-audio-preview')
96
-
97
- # Analyze audio content
98
- chat.ask "What's being said in this recording?", with: {
99
- audio: "meeting.wav"
100
- }
101
-
102
- # Ask follow-up questions about the audio
103
- chat.ask "Summarize the key points mentioned"
104
- ```
105
-
106
- ## Streaming Responses
107
-
108
- For a more interactive experience, you can stream responses as they're generated:
109
-
110
- ```ruby
111
- chat = RubyLLM.chat
112
-
113
- # Stream the response with a block
114
- chat.ask "Tell me a story about a Ruby programmer" do |chunk|
115
- # Each chunk is a partial response
116
- print chunk.content
117
- $stdout.flush # Ensure output is displayed immediately
118
- end
119
-
120
- # Useful for long responses or real-time displays
121
- chat.ask "Write a detailed essay about programming paradigms" do |chunk|
122
- add_to_ui(chunk.content) # Your method to update UI
123
- end
124
- ```
125
-
126
- ## Temperature Control
127
-
128
- Control the creativity and randomness of AI responses:
129
-
130
- ```ruby
131
- # Higher temperature (more creative)
132
- creative_chat = RubyLLM.chat.with_temperature(0.9)
133
- creative_chat.ask "Write a poem about Ruby programming"
134
-
135
- # Lower temperature (more deterministic)
136
- precise_chat = RubyLLM.chat.with_temperature(0.1)
137
- precise_chat.ask "Explain how Ruby's garbage collector works"
138
- ```
139
-
140
- ## Access Token Usage
141
-
142
- RubyLLM automatically tracks token usage for billing and quota management:
143
-
144
- ```ruby
145
- chat = RubyLLM.chat
146
- response = chat.ask "Explain quantum computing"
147
-
148
- # Check token usage
149
- puts "Input tokens: #{response.input_tokens}"
150
- puts "Output tokens: #{response.output_tokens}"
151
- puts "Total tokens: #{response.input_tokens + response.output_tokens}"
152
-
153
- # Estimate cost (varies by model)
154
- model = RubyLLM.models.find(response.model_id)
155
- input_cost = response.input_tokens * model.input_price_per_million / 1_000_000
156
- output_cost = response.output_tokens * model.output_price_per_million / 1_000_000
157
- puts "Estimated cost: $#{(input_cost + output_cost).round(6)}"
158
- ```
159
-
160
- ## Registering Event Handlers
161
-
162
- You can register callbacks for chat events:
163
-
164
- ```ruby
165
- chat = RubyLLM.chat
166
-
167
- # Called when a new assistant message starts
168
- chat.on_new_message do
169
- puts "Assistant is typing..."
170
- end
171
-
172
- # Called when a message is complete
173
- chat.on_end_message do |message|
174
- puts "Response complete!"
175
- puts "Used #{message.input_tokens + message.output_tokens} tokens"
176
- end
177
-
178
- # These callbacks work with both streaming and non-streaming responses
179
- chat.ask "Tell me about Ruby's history"
180
- ```
181
-
182
- ## Multiple Parallel Chats
183
-
184
- You can maintain multiple separate chat instances:
185
-
186
- ```ruby
187
- # Create multiple chat instances
188
- ruby_chat = RubyLLM.chat
189
- python_chat = RubyLLM.chat
190
-
191
- # Each has its own conversation history
192
- ruby_chat.ask "What's great about Ruby?"
193
- python_chat.ask "What's great about Python?"
194
-
195
- # Continue separate conversations
196
- ruby_chat.ask "How does Ruby handle metaprogramming?"
197
- python_chat.ask "How does Python handle decorators?"
198
- ```
199
-
200
- ## Next Steps
201
-
202
- Now that you understand chat basics, you might want to explore:
203
-
204
- - [Using Tools]({% link guides/tools.md %}) to let AI use your Ruby code
205
- - [Streaming Responses]({% link guides/streaming.md %}) for real-time interactions
206
- - [Rails Integration]({% link guides/rails.md %}) to persist conversations in your apps