ruby_llm 0.1.0.pre15 → 0.1.0.pre17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ca489465046f71efd3a80717b46a68cdcbc754527f4b5ccade15442d2a95854e
4
- data.tar.gz: b1091dd99c8b96266b767b1be69ade14a88cffc0c3f2cd07c1bf20d9efebe6e2
3
+ metadata.gz: 899bee99690875afeb0d8bb83b654cf05ccaa4db7006971eb122e7188e91f6b7
4
+ data.tar.gz: a050cc712573b90bc2d2d1012f40c1866be940e001b6d2aaff6089834bf89584
5
5
  SHA512:
6
- metadata.gz: 8bd968d14ec94fb254ae21a3d253cbe8b952145d3626540bf92cc5ded13d660e8f45b651319d3591f6a6e6b137c02cb94540c2a00680c25c2d7d3ecf4d799bbf
7
- data.tar.gz: d1a5e1ef82b95f5ebe59b27440d703e88d4332f47cb706da8c4f09487608d6de572e78a38756af938a0e0ed0dbf8dc0f4a45aa82ac1d32f444681c3172baf40a
6
+ metadata.gz: f18a0c1094c407d7690151825f1c0cc557e2621fda75569c052ab9d396a46b7201e8aa3ba643a42da3d1b52e2a8a95216d315f66632cd6cc4de1fee0a372cfb1
7
+ data.tar.gz: 312856c0e943e93933e33e492d1e599732b6714a527cf83ac3d5b271d50ecd004064ae7aec5b5671e5dca2c83dba3e458794f0a87c0c12f19c27357a89b9ad61
data/README.md CHANGED
@@ -76,6 +76,33 @@ last_message = chat.messages.last
76
76
  puts "Conversation used #{last_message.input_tokens} input tokens and #{last_message.output_tokens} output tokens"
77
77
  ```
78
78
 
79
+ ## Text Embeddings
80
+
81
+ Need vector embeddings for your text? RubyLLM makes it simple:
82
+
83
+ ```ruby
84
+ # Get embeddings with the default model
85
+ vector = RubyLLM.embed(text: "Hello, world!")
86
+
87
+ # Use a specific model
88
+ vector = RubyLLM.embed(
89
+ "Ruby is awesome!",
90
+ model: "text-embedding-3-large"
91
+ )
92
+
93
+ # Process multiple texts at once
94
+ vectors = RubyLLM.embed([
95
+ "First document",
96
+ "Second document",
97
+ "Third document"
98
+ ])
99
+
100
+ # Configure the default model
101
+ RubyLLM.configure do |config|
102
+ config.default_embedding_model = 'text-embedding-3-large'
103
+ end
104
+ ```
105
+
79
106
  ## Using Tools
80
107
 
81
108
  Give your AI assistants access to your Ruby code by creating tool classes that do one thing well:
@@ -9,7 +9,7 @@ module RubyLLM
9
9
  extend ActiveSupport::Concern
10
10
 
11
11
  class_methods do
12
- def acts_as_chat(message_class: 'Message')
12
+ def acts_as_chat(message_class: 'Message') # rubocop:disable Metrics/MethodLength
13
13
  include ChatMethods
14
14
 
15
15
  has_many :messages,
@@ -17,13 +17,23 @@ module RubyLLM
17
17
  class_name: message_class.to_s,
18
18
  dependent: :destroy
19
19
 
20
- delegate :complete, to: :chat
20
+ delegate :complete,
21
+ :with_tool,
22
+ :with_tools,
23
+ :with_model,
24
+ :with_temperature,
25
+ :on_new_message,
26
+ :on_end_message,
27
+ :add_message,
28
+ to: :to_llm
21
29
  end
22
30
 
23
31
  def acts_as_message(chat_class: 'Chat')
24
32
  include MessageMethods
25
33
 
26
34
  belongs_to :chat, class_name: chat_class.to_s
35
+
36
+ delegate :tool_call?, :tool_result?, :tool_results, to: :to_llm
27
37
  end
28
38
  end
29
39
  end
@@ -33,7 +43,7 @@ module RubyLLM
33
43
  module ChatMethods
34
44
  extend ActiveSupport::Concern
35
45
 
36
- def chat
46
+ def to_llm
37
47
  chat = RubyLLM.chat(model: model_id)
38
48
 
39
49
  # Load existing messages into chat
@@ -44,8 +54,6 @@ module RubyLLM
44
54
  # Set up message persistence
45
55
  chat.on_new_message { persist_new_message }
46
56
  .on_end_message { |msg| persist_message_completion(msg) }
47
-
48
- chat
49
57
  end
50
58
 
51
59
  def ask(message, &block)
@@ -59,7 +67,10 @@ module RubyLLM
59
67
  private
60
68
 
61
69
  def persist_new_message
62
- messages.create!
70
+ messages.create!(
71
+ role: :assistant,
72
+ content: String.new
73
+ )
63
74
  end
64
75
 
65
76
  def persist_message_completion(message)
@@ -10,11 +10,16 @@ module RubyLLM
10
10
  # config.anthropic_api_key = ENV['ANTHROPIC_API_KEY']
11
11
  # end
12
12
  class Configuration
13
- attr_accessor :openai_api_key, :anthropic_api_key, :default_model, :request_timeout
13
+ attr_accessor :openai_api_key,
14
+ :anthropic_api_key,
15
+ :default_model,
16
+ :default_embedding_model,
17
+ :request_timeout
14
18
 
15
19
  def initialize
16
20
  @request_timeout = 30
17
21
  @default_model = 'gpt-4o-mini'
22
+ @default_embedding_model = 'text-embedding-3-small'
18
23
  end
19
24
  end
20
25
  end
@@ -0,0 +1,17 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Core embedding interface. Provides a clean way to generate embeddings
5
+ # from text using various provider models.
6
+ module Embedding
7
+ module_function
8
+
9
+ def embed(text, model: nil)
10
+ model_id = model || RubyLLM.config.default_embedding_model
11
+ Models.find(model_id)
12
+
13
+ provider = Provider.for(model_id)
14
+ provider.embed(text, model: model_id)
15
+ end
16
+ end
17
+ end
@@ -30,17 +30,23 @@ module RubyLLM
30
30
  parse_list_models_response response
31
31
  end
32
32
 
33
+ def embed(text, model:)
34
+ payload = build_embedding_payload text, model: model
35
+ response = post embedding_url, payload
36
+ parse_embedding_response response
37
+ end
38
+
33
39
  private
34
40
 
35
41
  def sync_response(payload)
36
- response = post payload
42
+ response = post completion_url, payload
37
43
  parse_completion_response response
38
44
  end
39
45
 
40
46
  def stream_response(payload, &block)
41
47
  accumulator = StreamAccumulator.new
42
48
 
43
- post payload do |req|
49
+ post completion_url, payload do |req|
44
50
  req.options.on_data = handle_stream do |chunk|
45
51
  accumulator.add chunk
46
52
  block.call chunk
@@ -50,8 +56,8 @@ module RubyLLM
50
56
  accumulator.to_message
51
57
  end
52
58
 
53
- def post(payload)
54
- connection.post completion_url, payload do |req|
59
+ def post(url, payload)
60
+ connection.post url, payload do |req|
55
61
  req.headers.merge! headers
56
62
  yield req if block_given?
57
63
  end
@@ -28,6 +28,10 @@ module RubyLLM
28
28
  '/v1/models'
29
29
  end
30
30
 
31
+ def embedding_url
32
+ '/v1/embeddings'
33
+ end
34
+
31
35
  def build_payload(messages, tools:, temperature:, model:, stream: false) # rubocop:disable Metrics/MethodLength
32
36
  {
33
37
  model: model,
@@ -53,6 +57,18 @@ module RubyLLM
53
57
  end
54
58
  end
55
59
 
60
+ def build_embedding_payload(text, model:)
61
+ {
62
+ model: model,
63
+ input: text
64
+ }
65
+ end
66
+
67
+ def parse_embedding_response(response)
68
+ embeddings = response.body['data'].map { |d| d['embedding'] }
69
+ embeddings.size == 1 ? embeddings.first : embeddings
70
+ end
71
+
56
72
  def format_tool_calls(tool_calls) # rubocop:disable Metrics/MethodLength
57
73
  return nil unless tool_calls&.any?
58
74
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '0.1.0.pre15'
4
+ VERSION = '0.1.0.pre17'
5
5
  end
data/lib/ruby_llm.rb CHANGED
@@ -27,6 +27,10 @@ module RubyLLM
27
27
  Chat.new(model: model)
28
28
  end
29
29
 
30
+ def embed(...)
31
+ Embedding.embed(...)
32
+ end
33
+
30
34
  def models
31
35
  Models
32
36
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0.pre15
4
+ version: 0.1.0.pre17
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2025-02-04 00:00:00.000000000 Z
11
+ date: 2025-02-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: event_stream_parser
@@ -348,6 +348,7 @@ files:
348
348
  - lib/ruby_llm/chat.rb
349
349
  - lib/ruby_llm/chunk.rb
350
350
  - lib/ruby_llm/configuration.rb
351
+ - lib/ruby_llm/embedding.rb
351
352
  - lib/ruby_llm/message.rb
352
353
  - lib/ruby_llm/model_capabilities/anthropic.rb
353
354
  - lib/ruby_llm/model_capabilities/openai.rb