llms 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +21 -0
  3. data/README.md +160 -0
  4. data/bin/llms-chat +6 -0
  5. data/bin/llms-test-model-access +4 -0
  6. data/bin/llms-test-model-image-support +4 -0
  7. data/bin/llms-test-model-prompt-caching +4 -0
  8. data/bin/llms-test-model-tool-use +5 -0
  9. data/lib/llms/adapters/anthropic_message_adapter.rb +73 -0
  10. data/lib/llms/adapters/anthropic_tool_call_adapter.rb +20 -0
  11. data/lib/llms/adapters/base_message_adapter.rb +60 -0
  12. data/lib/llms/adapters/google_gemini_message_adapter.rb +72 -0
  13. data/lib/llms/adapters/google_gemini_tool_call_adapter.rb +20 -0
  14. data/lib/llms/adapters/open_ai_compatible_message_adapter.rb +88 -0
  15. data/lib/llms/adapters/open_ai_compatible_tool_call_adapter.rb +67 -0
  16. data/lib/llms/adapters.rb +12 -0
  17. data/lib/llms/apis/google_gemini_api.rb +45 -0
  18. data/lib/llms/apis/open_ai_compatible_api.rb +54 -0
  19. data/lib/llms/cli/base.rb +186 -0
  20. data/lib/llms/cli/chat.rb +92 -0
  21. data/lib/llms/cli/test_access.rb +79 -0
  22. data/lib/llms/cli/test_image_support.rb +92 -0
  23. data/lib/llms/cli/test_prompt_caching.rb +275 -0
  24. data/lib/llms/cli/test_tool_use.rb +108 -0
  25. data/lib/llms/cli.rb +12 -0
  26. data/lib/llms/conversation.rb +100 -0
  27. data/lib/llms/conversation_message.rb +60 -0
  28. data/lib/llms/conversation_tool_call.rb +14 -0
  29. data/lib/llms/conversation_tool_result.rb +15 -0
  30. data/lib/llms/exceptions.rb +33 -0
  31. data/lib/llms/executors/anthropic_executor.rb +247 -0
  32. data/lib/llms/executors/base_executor.rb +144 -0
  33. data/lib/llms/executors/google_gemini_executor.rb +212 -0
  34. data/lib/llms/executors/hugging_face_executor.rb +17 -0
  35. data/lib/llms/executors/open_ai_compatible_executor.rb +209 -0
  36. data/lib/llms/executors.rb +52 -0
  37. data/lib/llms/models/model.rb +86 -0
  38. data/lib/llms/models/provider.rb +48 -0
  39. data/lib/llms/models.rb +187 -0
  40. data/lib/llms/parsers/anthropic_chat_response_stream_parser.rb +184 -0
  41. data/lib/llms/parsers/google_gemini_chat_response_stream_parser.rb +128 -0
  42. data/lib/llms/parsers/open_ai_compatible_chat_response_stream_parser.rb +170 -0
  43. data/lib/llms/parsers/partial_json_parser.rb +77 -0
  44. data/lib/llms/parsers/sse_chat_response_stream_parser.rb +72 -0
  45. data/lib/llms/public_models.json +607 -0
  46. data/lib/llms/stream/event_emitter.rb +48 -0
  47. data/lib/llms/stream/events.rb +104 -0
  48. data/lib/llms/usage/cost_calculator.rb +75 -0
  49. data/lib/llms/usage/usage_data.rb +46 -0
  50. data/lib/llms.rb +16 -0
  51. metadata +243 -0
@@ -0,0 +1,104 @@
1
+ module LLMs
2
+ module Stream
3
+ class Events
4
+ class Base
5
+ attr_reader :timestamp
6
+ def initialize
7
+ @timestamp = Time.now
8
+ end
9
+ end
10
+
11
+ class MessageStarted < Base
12
+ attr_reader :message_id, :text
13
+ def initialize(message_id, text = '')
14
+ super()
15
+ @message_id = message_id
16
+ @text = text
17
+ end
18
+ end
19
+
20
+ class UsageUpdated < Base
21
+ attr_reader :message_id, :usage
22
+ def initialize(message_id, usage)
23
+ super()
24
+ @message_id = message_id
25
+ @usage = usage
26
+ end
27
+ end
28
+
29
+ class ThinkingDelta < Base
30
+ attr_reader :message_id, :thinking
31
+ def initialize(message_id, thinking)
32
+ super()
33
+ @message_id = message_id
34
+ @thinking = thinking
35
+ end
36
+ end
37
+
38
+ class TextDelta < Base
39
+ attr_reader :message_id, :text
40
+ def initialize(message_id, text)
41
+ super()
42
+ @message_id = message_id
43
+ @text = text
44
+ end
45
+ end
46
+
47
+ class ToolCallStarted < Base
48
+ attr_reader :message_id, :tool_call_id, :index, :name, :arguments
49
+ def initialize(message_id, tool_call_id, index, name, arguments)
50
+ super()
51
+ @message_id = message_id
52
+ @tool_call_id = tool_call_id
53
+ @index = index
54
+ @name = name
55
+ @arguments = arguments
56
+ end
57
+ end
58
+
59
+ class ToolCallArgumentsJsonDelta < Base
60
+ attr_reader :message_id, :tool_call_id, :index, :json_delta
61
+ def initialize(message_id, tool_call_id, index, json_delta)
62
+ super()
63
+ @message_id = message_id
64
+ @tool_call_id = tool_call_id
65
+ @index = index
66
+ @json_delta = json_delta
67
+ end
68
+ end
69
+
70
+ ## Holds the current parse-attempted state of the arguments object, not a delta
71
+ class ToolCallArgumentsUpdated < Base
72
+ attr_reader :message_id, :tool_call_id, :index, :arguments
73
+ def initialize(message_id, tool_call_id, index, arguments)
74
+ super()
75
+ @message_id = message_id
76
+ @tool_call_id = tool_call_id
77
+ @index = index
78
+ @arguments = arguments
79
+ end
80
+ end
81
+
82
+ class ToolCallCompleted < Base
83
+ attr_reader :message_id, :tool_call_id, :index, :name, :arguments
84
+ def initialize(message_id, tool_call_id, index, name, arguments)
85
+ super()
86
+ @message_id = message_id
87
+ @tool_call_id = tool_call_id
88
+ @index = index
89
+ @name = name
90
+ @arguments = arguments
91
+ end
92
+ end
93
+
94
+ class MessageCompleted < Base
95
+ attr_reader :message_id, :response
96
+ def initialize(message_id, response)
97
+ super()
98
+ @message_id = message_id
99
+ @response = response
100
+ end
101
+ end
102
+ end
103
+ end
104
+ end
@@ -0,0 +1,75 @@
1
+ module LLMs
2
+ module Usage
3
+ class CostCalculator
4
+ def initialize(pricing)
5
+ @pricing = pricing || {}
6
+ end
7
+
8
+ def calculate(usage_data, model = nil)
9
+ pricing = model&.pricing || @pricing
10
+ components = []
11
+ total_cost = 0.0
12
+
13
+ if usage_data.input_tokens > 0 && pricing[:input]
14
+ cost = (usage_data.input_tokens / 1_000_000.0) * pricing[:input]
15
+ components << {
16
+ type: :input,
17
+ tokens: usage_data.input_tokens,
18
+ rate: pricing[:input],
19
+ cost: cost
20
+ }
21
+ total_cost += cost
22
+ end
23
+
24
+ if usage_data.output_tokens > 0 && pricing[:output]
25
+ cost = (usage_data.output_tokens / 1_000_000.0) * pricing[:output]
26
+ components << {
27
+ type: :output,
28
+ tokens: usage_data.output_tokens,
29
+ rate: pricing[:output],
30
+ cost: cost
31
+ }
32
+ total_cost += cost
33
+ end
34
+
35
+ if usage_data.cache_read_tokens > 0 && pricing[:cache_read]
36
+ cost = (usage_data.cache_read_tokens / 1_000_000.0) * pricing[:cache_read]
37
+ components << {
38
+ type: :cache_read,
39
+ tokens: usage_data.cache_read_tokens,
40
+ rate: pricing[:cache_read],
41
+ cost: cost
42
+ }
43
+ total_cost += cost
44
+ end
45
+
46
+ if usage_data.cache_write_tokens > 0 && pricing[:cache_write]
47
+ cost = (usage_data.cache_write_tokens / 1_000_000.0) * pricing[:cache_write]
48
+ components << {
49
+ type: :cache_write,
50
+ tokens: usage_data.cache_write_tokens,
51
+ rate: pricing[:cache_write],
52
+ cost: cost
53
+ }
54
+ total_cost += cost
55
+ end
56
+
57
+ {
58
+ total_cost: total_cost,
59
+ components: components,
60
+ currency: 'USD'
61
+ }
62
+ end
63
+
64
+ def calculate_simple(input_tokens: 0, output_tokens: 0, cache_read_tokens: 0, cache_write_tokens: 0)
65
+ usage_data = UsageData.new(
66
+ input_tokens: input_tokens,
67
+ output_tokens: output_tokens,
68
+ cache_read_tokens: cache_read_tokens,
69
+ cache_write_tokens: cache_write_tokens
70
+ )
71
+ calculate(usage_data)
72
+ end
73
+ end
74
+ end
75
+ end
@@ -0,0 +1,46 @@
1
+ module LLMs
2
+ module Usage
3
+ class UsageData
4
+ attr_reader :input_tokens, :output_tokens, :cache_read_tokens,
5
+ :cache_write_tokens, :execution_time, :model_name
6
+
7
+ def initialize(input_tokens: 0, output_tokens: 0,
8
+ cache_read_tokens: 0, cache_write_tokens: 0,
9
+ execution_time: 0, model_name: nil)
10
+ @input_tokens = input_tokens.to_i
11
+ @output_tokens = output_tokens.to_i
12
+ @cache_read_tokens = cache_read_tokens.to_i
13
+ @cache_write_tokens = cache_write_tokens.to_i
14
+ @execution_time = execution_time.to_f
15
+ @model_name = model_name
16
+ end
17
+
18
+ def total_tokens
19
+ @input_tokens + @output_tokens + @cache_read_tokens + @cache_write_tokens
20
+ end
21
+
22
+ def to_h
23
+ {
24
+ input_tokens: @input_tokens,
25
+ output_tokens: @output_tokens,
26
+ cache_read_tokens: @cache_read_tokens,
27
+ cache_write_tokens: @cache_write_tokens,
28
+ total_tokens: total_tokens,
29
+ execution_time: @execution_time,
30
+ model_name: @model_name
31
+ }
32
+ end
33
+
34
+ def to_s
35
+ if total_tokens == 0
36
+ "Usage: 0 tokens () in #{@execution_time.round(2)}s"
37
+ else
38
+ "Usage: #{total_tokens} tokens (#{@input_tokens} input, #{@output_tokens} output" +
39
+ "#{@cache_read_tokens > 0 ? ", #{@cache_read_tokens} cache_read" : ""}" +
40
+ "#{@cache_write_tokens > 0 ? ", #{@cache_write_tokens} cache_write" : ""}" +
41
+ ") in #{@execution_time.round(2)}s"
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
data/lib/llms.rb ADDED
@@ -0,0 +1,16 @@
1
+ require_relative 'llms/exceptions'
2
+ require_relative 'llms/conversation'
3
+ require_relative 'llms/conversation_message'
4
+ require_relative 'llms/conversation_tool_call'
5
+ require_relative 'llms/conversation_tool_result'
6
+ require_relative 'llms/executors'
7
+ require_relative 'llms/adapters'
8
+ require_relative 'llms/models'
9
+ require_relative 'llms/usage/usage_data'
10
+ require_relative 'llms/usage/cost_calculator'
11
+ require_relative 'llms/models/provider'
12
+ require_relative 'llms/models/model'
13
+
14
+ module LLMs
15
+ VERSION = '0.1.0'
16
+ end
metadata ADDED
@@ -0,0 +1,243 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: llms
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Ben Lund
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2025-07-25 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: ruby-anthropic
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: 0.4.2
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: 0.4.2
27
+ - !ruby/object:Gem::Dependency
28
+ name: faraday
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: '2.0'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: '2.0'
41
+ - !ruby/object:Gem::Dependency
42
+ name: json
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: '2.0'
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: '2.0'
55
+ - !ruby/object:Gem::Dependency
56
+ name: readline
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - "~>"
60
+ - !ruby/object:Gem::Version
61
+ version: '0.0'
62
+ type: :runtime
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - "~>"
67
+ - !ruby/object:Gem::Version
68
+ version: '0.0'
69
+ - !ruby/object:Gem::Dependency
70
+ name: base64
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - ">="
74
+ - !ruby/object:Gem::Version
75
+ version: '0.1'
76
+ type: :runtime
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - ">="
81
+ - !ruby/object:Gem::Version
82
+ version: '0.1'
83
+ - !ruby/object:Gem::Dependency
84
+ name: bundler
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - "~>"
88
+ - !ruby/object:Gem::Version
89
+ version: '2.0'
90
+ type: :development
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - "~>"
95
+ - !ruby/object:Gem::Version
96
+ version: '2.0'
97
+ - !ruby/object:Gem::Dependency
98
+ name: rake
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - "~>"
102
+ - !ruby/object:Gem::Version
103
+ version: '13.0'
104
+ type: :development
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - "~>"
109
+ - !ruby/object:Gem::Version
110
+ version: '13.0'
111
+ - !ruby/object:Gem::Dependency
112
+ name: rspec
113
+ requirement: !ruby/object:Gem::Requirement
114
+ requirements:
115
+ - - "~>"
116
+ - !ruby/object:Gem::Version
117
+ version: '3.0'
118
+ type: :development
119
+ prerelease: false
120
+ version_requirements: !ruby/object:Gem::Requirement
121
+ requirements:
122
+ - - "~>"
123
+ - !ruby/object:Gem::Version
124
+ version: '3.0'
125
+ - !ruby/object:Gem::Dependency
126
+ name: yard
127
+ requirement: !ruby/object:Gem::Requirement
128
+ requirements:
129
+ - - "~>"
130
+ - !ruby/object:Gem::Version
131
+ version: '0.9'
132
+ type: :development
133
+ prerelease: false
134
+ version_requirements: !ruby/object:Gem::Requirement
135
+ requirements:
136
+ - - "~>"
137
+ - !ruby/object:Gem::Version
138
+ version: '0.9'
139
+ - !ruby/object:Gem::Dependency
140
+ name: rubocop
141
+ requirement: !ruby/object:Gem::Requirement
142
+ requirements:
143
+ - - "~>"
144
+ - !ruby/object:Gem::Version
145
+ version: '1.0'
146
+ type: :development
147
+ prerelease: false
148
+ version_requirements: !ruby/object:Gem::Requirement
149
+ requirements:
150
+ - - "~>"
151
+ - !ruby/object:Gem::Version
152
+ version: '1.0'
153
+ description: Ruby library for interacting with various LLM providers including Anthropic,
154
+ Google Gemini, xAI, and other OpenAI-compatible API providers (including local models).
155
+ Supports streaming, event-handling, conversation management, tool-use, image input,
156
+ and cost-tracking.
157
+ email:
158
+ - ben@benlund.com
159
+ executables:
160
+ - llms-chat
161
+ - llms-test-model-access
162
+ - llms-test-model-image-support
163
+ - llms-test-model-prompt-caching
164
+ - llms-test-model-tool-use
165
+ extensions: []
166
+ extra_rdoc_files: []
167
+ files:
168
+ - LICENSE
169
+ - README.md
170
+ - bin/llms-chat
171
+ - bin/llms-test-model-access
172
+ - bin/llms-test-model-image-support
173
+ - bin/llms-test-model-prompt-caching
174
+ - bin/llms-test-model-tool-use
175
+ - lib/llms.rb
176
+ - lib/llms/adapters.rb
177
+ - lib/llms/adapters/anthropic_message_adapter.rb
178
+ - lib/llms/adapters/anthropic_tool_call_adapter.rb
179
+ - lib/llms/adapters/base_message_adapter.rb
180
+ - lib/llms/adapters/google_gemini_message_adapter.rb
181
+ - lib/llms/adapters/google_gemini_tool_call_adapter.rb
182
+ - lib/llms/adapters/open_ai_compatible_message_adapter.rb
183
+ - lib/llms/adapters/open_ai_compatible_tool_call_adapter.rb
184
+ - lib/llms/apis/google_gemini_api.rb
185
+ - lib/llms/apis/open_ai_compatible_api.rb
186
+ - lib/llms/cli.rb
187
+ - lib/llms/cli/base.rb
188
+ - lib/llms/cli/chat.rb
189
+ - lib/llms/cli/test_access.rb
190
+ - lib/llms/cli/test_image_support.rb
191
+ - lib/llms/cli/test_prompt_caching.rb
192
+ - lib/llms/cli/test_tool_use.rb
193
+ - lib/llms/conversation.rb
194
+ - lib/llms/conversation_message.rb
195
+ - lib/llms/conversation_tool_call.rb
196
+ - lib/llms/conversation_tool_result.rb
197
+ - lib/llms/exceptions.rb
198
+ - lib/llms/executors.rb
199
+ - lib/llms/executors/anthropic_executor.rb
200
+ - lib/llms/executors/base_executor.rb
201
+ - lib/llms/executors/google_gemini_executor.rb
202
+ - lib/llms/executors/hugging_face_executor.rb
203
+ - lib/llms/executors/open_ai_compatible_executor.rb
204
+ - lib/llms/models.rb
205
+ - lib/llms/models/model.rb
206
+ - lib/llms/models/provider.rb
207
+ - lib/llms/parsers/anthropic_chat_response_stream_parser.rb
208
+ - lib/llms/parsers/google_gemini_chat_response_stream_parser.rb
209
+ - lib/llms/parsers/open_ai_compatible_chat_response_stream_parser.rb
210
+ - lib/llms/parsers/partial_json_parser.rb
211
+ - lib/llms/parsers/sse_chat_response_stream_parser.rb
212
+ - lib/llms/public_models.json
213
+ - lib/llms/stream/event_emitter.rb
214
+ - lib/llms/stream/events.rb
215
+ - lib/llms/usage/cost_calculator.rb
216
+ - lib/llms/usage/usage_data.rb
217
+ homepage: https://github.com/benlund/llms
218
+ licenses:
219
+ - MIT
220
+ metadata:
221
+ homepage_uri: https://github.com/benlund/llms
222
+ source_code_uri: https://github.com/benlund/llms
223
+ changelog_uri: https://github.com/benlund/llms/blob/main/CHANGELOG.md
224
+ post_install_message:
225
+ rdoc_options: []
226
+ require_paths:
227
+ - lib
228
+ required_ruby_version: !ruby/object:Gem::Requirement
229
+ requirements:
230
+ - - ">="
231
+ - !ruby/object:Gem::Version
232
+ version: 2.5.0
233
+ required_rubygems_version: !ruby/object:Gem::Requirement
234
+ requirements:
235
+ - - ">="
236
+ - !ruby/object:Gem::Version
237
+ version: '0'
238
+ requirements: []
239
+ rubygems_version: 3.5.11
240
+ signing_key:
241
+ specification_version: 4
242
+ summary: Ruby library for using LLM APIs across multiple providers
243
+ test_files: []