llm.rb 0.9.1 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 20812e4c8cfc7ebee81190054e7483b00b24e0f0f567f630bfb4dc0ac962193f
4
- data.tar.gz: 4ff26fa74520b29da3b6aa10331d5cd618e13d4df53cb9f0c5b7a7691f5fb42e
3
+ metadata.gz: 39e538d8185cf5c8c5a36da0e1bf5b0b9e0055945a02570cd00fefc805b288d0
4
+ data.tar.gz: 7fc0d3a4422fe10bb3058c7b1b5b9bc80693ccc0dbf6b62bda46d42fb7c2830c
5
5
  SHA512:
6
- metadata.gz: 4ed6e3f0426fc0967cb59cc14ae0d0afe552a93e2a29b8d173bf000341b77800ea31ef9088d45a696d353ac0e7f58cc1b3d7f86c3c15d757ae73efb33523b56d
7
- data.tar.gz: 136e14863ef92264e270f3b6616a9660298cdc477a2a15db1b89b9360be1ce2be3dde0ee66d97be68d57c5bf68f4f0fc9764657187fbf2ae226f1a5ed6579189
6
+ metadata.gz: a2b3de69ce317d856ec593074e22883ec2b96ddcdc2637cb2b4c555885c1c771b2ffd447b255cdc16a2c7f1c2b72362ab1b6e29ec0fdf775e977292b03fd3e34
7
+ data.tar.gz: c2ba0f853b7eaac4ca8fab15f497a3fa375e054b7da928b2f0798e393909baff20d2381afd48e793fd878261a0d838474e4be3be5b5232e480cf162af57dbe2e
data/README.md CHANGED
@@ -30,19 +30,20 @@ images, files, and JSON Schema generation.
30
30
 
31
31
  ## Demos
32
32
 
33
- <details>
34
- <summary><b>1. Tools: "system" function</b></summary>
35
- <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/toolcalls.gif">
36
- </details>
33
+ > The
34
+ > [llmrb/llm-shell](https://github.com/llmrb/llm-shell)
35
+ > project is built with llm.rb and its demos have been
36
+ > included to provide a better idea of what llm.rb
37
+ > is capable of.
37
38
 
38
39
  <details>
39
- <summary><b>2. Files: import at runtime</b></summary>
40
- <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-runtime.gif">
40
+ <summary><b>1. An introduction to tool calls</b></summary>
41
+ <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/toolcalls_v2.gif">
41
42
  </details>
42
43
 
43
44
  <details>
44
- <summary><b>3. Files: import at boot time</b></summary>
45
- <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-boottime.gif">
45
+ <summary><b>2. Add files as conversation context</b></summary>
46
+ <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-runtime_v2.gif">
46
47
  </details>
47
48
 
48
49
  ## Examples
@@ -114,13 +115,13 @@ msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
114
115
  > There Is More Than One Way To Do It (TIMTOWTDI) when you are
115
116
  > using llm.rb &ndash; and this is especially true when it
116
117
  > comes to streaming. See the streaming documentation in
117
- > [docs/](docs/STREAMING.md#flexibility) for more details.
118
+ > [docs/](docs/STREAMING.md#scopes) for more details.
118
119
 
119
120
  The following example streams the messages in a conversation
120
121
  as they are generated in real-time. This feature can be useful
121
- in case you want to see the contents of a message as it is
122
- generated, or in case you want to avoid potential read timeouts
123
- during the generation of a response.
122
+ when you want to stream a conversation in real time, or when you
123
+ want to avoid potential read timeouts during the generation of a
124
+ response.
124
125
 
125
126
  The `stream` option can be set to an IO object, or the value `true`
126
127
  to enable streaming &ndash; and at the end of the request, `bot.chat`
@@ -496,22 +497,10 @@ else there's the API reference. It covers classes and methods that the README gl
496
497
  over or doesn't cover at all. The API reference is available at
497
498
  [0x1eef.github.io/x/llm.rb](https://0x1eef.github.io/x/llm.rb).
498
499
 
499
-
500
500
  ### Guides
501
501
 
502
502
  The [docs/](docs/) directory contains some additional documentation that
503
- didn't quite make it into the README. It covers the design guidelines that
504
- the library follows, some strategies for memory management, and other
505
- provider-specific features.
506
-
507
- ## See also
508
-
509
- **[llmrb/llm-shell](https://github.com/llmrb/llm-shell)**
510
-
511
- An extensible, developer-oriented command line utility that is powered by
512
- llm.rb and serves as a demonstration of the library's capabilities. The
513
- [demo](https://github.com/llmrb/llm-shell#demos) section has a number of GIF
514
- previews might be especially interesting.
503
+ didn't quite make it into the README.
515
504
 
516
505
  ## Install
517
506
 
data/lib/llm/buffer.rb CHANGED
@@ -2,7 +2,6 @@
2
2
 
3
3
  module LLM
4
4
  ##
5
- # @private
6
5
  # {LLM::Buffer LLM::Buffer} provides an Enumerable object that
7
6
  # yields each message in a conversation on-demand, and only sends
8
7
  # a request to the LLM when a response is needed.
@@ -58,6 +57,15 @@ module LLM
58
57
  end
59
58
  alias_method :push, :<<
60
59
 
60
+ ##
61
+ # @param [Integer, #to_i] index
62
+ # The message index
63
+ # @return [LLM::Message, nil]
64
+ # Returns a message, or nil
65
+ def [](index)
66
+ @completed[index.to_i] || to_a[index.to_i]
67
+ end
68
+
61
69
  ##
62
70
  # @return [String]
63
71
  def inspect
data/lib/llm/function.rb CHANGED
@@ -33,6 +33,11 @@ class LLM::Function
33
33
  class Return < Struct.new(:id, :value)
34
34
  end
35
35
 
36
+ ##
37
+ # Returns the function ID
38
+ # @return [String, nil]
39
+ attr_accessor :id
40
+
36
41
  ##
37
42
  # Returns the function name
38
43
  # @return [String]
@@ -43,11 +48,6 @@ class LLM::Function
43
48
  # @return [Array, nil]
44
49
  attr_accessor :arguments
45
50
 
46
- ##
47
- # Returns the function ID
48
- # @return [String, nil]
49
- attr_accessor :id
50
-
51
51
  ##
52
52
  # @param [String] name The function name
53
53
  # @yieldparam [LLM::Function] self The function object
@@ -61,10 +61,14 @@ class LLM::Function
61
61
 
62
62
  ##
63
63
  # Set the function description
64
- # @param [String] str The function description
64
+ # @param [String] desc The function description
65
65
  # @return [void]
66
- def description(str)
67
- @description = str
66
+ def description(desc = nil)
67
+ if desc
68
+ @description = desc
69
+ else
70
+ @description
71
+ end
68
72
  end
69
73
 
70
74
  ##
@@ -76,10 +76,10 @@ module LLM
76
76
 
77
77
  ##
78
78
  # Returns the default model for chat completions
79
- # @see https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table claude-3-5-sonnet-20240620
79
+ # @see https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table claude-sonnet-4-20250514
80
80
  # @return [String]
81
81
  def default_model
82
- "claude-3-5-sonnet-20240620"
82
+ "claude-sonnet-4-20250514"
83
83
  end
84
84
 
85
85
  private
@@ -43,7 +43,7 @@ class LLM::Gemini
43
43
  def create(prompt:, model: "gemini-2.0-flash-exp-image-generation", **params)
44
44
  req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
45
45
  body = JSON.dump({
46
- contents: [{parts: {text: prompt}}],
46
+ contents: [{parts: [{text: create_prompt}, {text: prompt}]}],
47
47
  generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
48
48
  }.merge!(params))
49
49
  req.body = body
@@ -93,6 +93,15 @@ class LLM::Gemini
93
93
  @provider.instance_variable_get(:@key)
94
94
  end
95
95
 
96
+ def create_prompt
97
+ <<~PROMPT
98
+ Your task is to generate one or more image(s) from
99
+ text I will provide to you. Your response *MUST* include
100
+ at least one image, and your response *MUST NOT* include
101
+ any text or other content.
102
+ PROMPT
103
+ end
104
+
96
105
  [:response_parser, :headers, :execute, :set_body_stream].each do |m|
97
106
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
98
107
  end
@@ -126,10 +126,10 @@ module LLM
126
126
 
127
127
  ##
128
128
  # Returns the default model for chat completions
129
- # @see https://ai.google.dev/gemini-api/docs/models#gemini-1.5-flash gemini-1.5-flash
129
+ # @see https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash gemini-2.5-flash
130
130
  # @return [String]
131
131
  def default_model
132
- "gemini-1.5-flash"
132
+ "gemini-2.5-flash"
133
133
  end
134
134
 
135
135
  private
@@ -121,10 +121,10 @@ module LLM
121
121
 
122
122
  ##
123
123
  # Returns the default model for chat completions
124
- # @see https://platform.openai.com/docs/models/gpt-4o-mini gpt-4o-mini
124
+ # @see https://platform.openai.com/docs/models/gpt-4.1 gpt-4.1
125
125
  # @return [String]
126
126
  def default_model
127
- "gpt-4o-mini"
127
+ "gpt-4.1"
128
128
  end
129
129
 
130
130
  private
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.9.1"
4
+ VERSION = "0.10.1"
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.1
4
+ version: 0.10.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -282,7 +282,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
282
282
  - !ruby/object:Gem::Version
283
283
  version: '0'
284
284
  requirements: []
285
- rubygems_version: 3.6.8
285
+ rubygems_version: 3.7.1
286
286
  specification_version: 4
287
287
  summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
288
288
  OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The toolkit includes