llm.rb 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +264 -110
- data/lib/llm/buffer.rb +83 -0
- data/lib/llm/chat.rb +131 -0
- data/lib/llm/file.rb +26 -40
- data/lib/llm/http_client.rb +10 -5
- data/lib/llm/message.rb +14 -8
- data/lib/llm/mime.rb +54 -0
- data/lib/llm/multipart.rb +98 -0
- data/lib/llm/provider.rb +116 -12
- data/lib/llm/providers/anthropic/error_handler.rb +2 -0
- data/lib/llm/providers/anthropic/format.rb +9 -1
- data/lib/llm/providers/anthropic/response_parser.rb +3 -1
- data/lib/llm/providers/anthropic.rb +14 -5
- data/lib/llm/providers/gemini/audio.rb +77 -0
- data/lib/llm/providers/gemini/error_handler.rb +2 -0
- data/lib/llm/providers/gemini/files.rb +160 -0
- data/lib/llm/providers/gemini/format.rb +19 -7
- data/lib/llm/providers/gemini/images.rb +99 -0
- data/lib/llm/providers/gemini/response_parser.rb +27 -1
- data/lib/llm/providers/gemini.rb +62 -6
- data/lib/llm/providers/ollama/error_handler.rb +2 -0
- data/lib/llm/providers/ollama/format.rb +18 -4
- data/lib/llm/providers/ollama/response_parser.rb +3 -1
- data/lib/llm/providers/ollama.rb +30 -7
- data/lib/llm/providers/openai/audio.rb +97 -0
- data/lib/llm/providers/openai/error_handler.rb +2 -0
- data/lib/llm/providers/openai/files.rb +148 -0
- data/lib/llm/providers/openai/format.rb +26 -7
- data/lib/llm/providers/openai/images.rb +109 -0
- data/lib/llm/providers/openai/response_parser.rb +58 -5
- data/lib/llm/providers/openai/responses.rb +78 -0
- data/lib/llm/providers/openai.rb +52 -6
- data/lib/llm/providers/voyageai.rb +2 -2
- data/lib/llm/response/audio.rb +13 -0
- data/lib/llm/response/audio_transcription.rb +14 -0
- data/lib/llm/response/audio_translation.rb +14 -0
- data/lib/llm/response/download_file.rb +15 -0
- data/lib/llm/response/file.rb +42 -0
- data/lib/llm/response/filelist.rb +18 -0
- data/lib/llm/response/image.rb +29 -0
- data/lib/llm/response/output.rb +56 -0
- data/lib/llm/response.rb +18 -6
- data/lib/llm/utils.rb +19 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +5 -2
- data/llm.gemspec +1 -6
- data/spec/anthropic/completion_spec.rb +1 -1
- data/spec/gemini/completion_spec.rb +22 -1
- data/spec/gemini/conversation_spec.rb +31 -0
- data/spec/gemini/files_spec.rb +124 -0
- data/spec/gemini/images_spec.rb +47 -0
- data/spec/llm/conversation_spec.rb +133 -1
- data/spec/ollama/completion_spec.rb +1 -1
- data/spec/ollama/conversation_spec.rb +31 -0
- data/spec/openai/audio_spec.rb +55 -0
- data/spec/openai/completion_spec.rb +22 -1
- data/spec/openai/files_spec.rb +150 -0
- data/spec/openai/images_spec.rb +95 -0
- data/spec/openai/responses_spec.rb +51 -0
- data/spec/setup.rb +8 -0
- metadata +31 -51
- data/LICENSE.txt +0 -21
- data/lib/llm/conversation.rb +0 -50
- data/lib/llm/lazy_conversation.rb +0 -51
- data/lib/llm/message_queue.rb +0 -47
- data/spec/llm/lazy_conversation_spec.rb +0 -92
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.3.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -9,50 +9,8 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2025-04-
|
12
|
+
date: 2025-04-25 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
|
-
- !ruby/object:Gem::Dependency
|
15
|
-
name: net-http
|
16
|
-
requirement: !ruby/object:Gem::Requirement
|
17
|
-
requirements:
|
18
|
-
- - "~>"
|
19
|
-
- !ruby/object:Gem::Version
|
20
|
-
version: 0.6.0
|
21
|
-
type: :runtime
|
22
|
-
prerelease: false
|
23
|
-
version_requirements: !ruby/object:Gem::Requirement
|
24
|
-
requirements:
|
25
|
-
- - "~>"
|
26
|
-
- !ruby/object:Gem::Version
|
27
|
-
version: 0.6.0
|
28
|
-
- !ruby/object:Gem::Dependency
|
29
|
-
name: json
|
30
|
-
requirement: !ruby/object:Gem::Requirement
|
31
|
-
requirements:
|
32
|
-
- - ">="
|
33
|
-
- !ruby/object:Gem::Version
|
34
|
-
version: '0'
|
35
|
-
type: :runtime
|
36
|
-
prerelease: false
|
37
|
-
version_requirements: !ruby/object:Gem::Requirement
|
38
|
-
requirements:
|
39
|
-
- - ">="
|
40
|
-
- !ruby/object:Gem::Version
|
41
|
-
version: '0'
|
42
|
-
- !ruby/object:Gem::Dependency
|
43
|
-
name: yaml
|
44
|
-
requirement: !ruby/object:Gem::Requirement
|
45
|
-
requirements:
|
46
|
-
- - ">="
|
47
|
-
- !ruby/object:Gem::Version
|
48
|
-
version: '0'
|
49
|
-
type: :runtime
|
50
|
-
prerelease: false
|
51
|
-
version_requirements: !ruby/object:Gem::Requirement
|
52
|
-
requirements:
|
53
|
-
- - ">="
|
54
|
-
- !ruby/object:Gem::Version
|
55
|
-
version: '0'
|
56
14
|
- !ruby/object:Gem::Dependency
|
57
15
|
name: webmock
|
58
16
|
requirement: !ruby/object:Gem::Requirement
|
@@ -189,41 +147,57 @@ executables: []
|
|
189
147
|
extensions: []
|
190
148
|
extra_rdoc_files: []
|
191
149
|
files:
|
192
|
-
- LICENSE.txt
|
193
150
|
- README.md
|
194
151
|
- lib/llm.rb
|
195
|
-
- lib/llm/
|
152
|
+
- lib/llm/buffer.rb
|
153
|
+
- lib/llm/chat.rb
|
196
154
|
- lib/llm/core_ext/ostruct.rb
|
197
155
|
- lib/llm/error.rb
|
198
156
|
- lib/llm/file.rb
|
199
157
|
- lib/llm/http_client.rb
|
200
|
-
- lib/llm/lazy_conversation.rb
|
201
158
|
- lib/llm/message.rb
|
202
|
-
- lib/llm/
|
159
|
+
- lib/llm/mime.rb
|
203
160
|
- lib/llm/model.rb
|
161
|
+
- lib/llm/multipart.rb
|
204
162
|
- lib/llm/provider.rb
|
205
163
|
- lib/llm/providers/anthropic.rb
|
206
164
|
- lib/llm/providers/anthropic/error_handler.rb
|
207
165
|
- lib/llm/providers/anthropic/format.rb
|
208
166
|
- lib/llm/providers/anthropic/response_parser.rb
|
209
167
|
- lib/llm/providers/gemini.rb
|
168
|
+
- lib/llm/providers/gemini/audio.rb
|
210
169
|
- lib/llm/providers/gemini/error_handler.rb
|
170
|
+
- lib/llm/providers/gemini/files.rb
|
211
171
|
- lib/llm/providers/gemini/format.rb
|
172
|
+
- lib/llm/providers/gemini/images.rb
|
212
173
|
- lib/llm/providers/gemini/response_parser.rb
|
213
174
|
- lib/llm/providers/ollama.rb
|
214
175
|
- lib/llm/providers/ollama/error_handler.rb
|
215
176
|
- lib/llm/providers/ollama/format.rb
|
216
177
|
- lib/llm/providers/ollama/response_parser.rb
|
217
178
|
- lib/llm/providers/openai.rb
|
179
|
+
- lib/llm/providers/openai/audio.rb
|
218
180
|
- lib/llm/providers/openai/error_handler.rb
|
181
|
+
- lib/llm/providers/openai/files.rb
|
219
182
|
- lib/llm/providers/openai/format.rb
|
183
|
+
- lib/llm/providers/openai/images.rb
|
220
184
|
- lib/llm/providers/openai/response_parser.rb
|
185
|
+
- lib/llm/providers/openai/responses.rb
|
221
186
|
- lib/llm/providers/voyageai.rb
|
222
187
|
- lib/llm/providers/voyageai/error_handler.rb
|
223
188
|
- lib/llm/providers/voyageai/response_parser.rb
|
224
189
|
- lib/llm/response.rb
|
190
|
+
- lib/llm/response/audio.rb
|
191
|
+
- lib/llm/response/audio_transcription.rb
|
192
|
+
- lib/llm/response/audio_translation.rb
|
225
193
|
- lib/llm/response/completion.rb
|
194
|
+
- lib/llm/response/download_file.rb
|
226
195
|
- lib/llm/response/embedding.rb
|
196
|
+
- lib/llm/response/file.rb
|
197
|
+
- lib/llm/response/filelist.rb
|
198
|
+
- lib/llm/response/image.rb
|
199
|
+
- lib/llm/response/output.rb
|
200
|
+
- lib/llm/utils.rb
|
227
201
|
- lib/llm/version.rb
|
228
202
|
- llm.gemspec
|
229
203
|
- share/llm/models/anthropic.yml
|
@@ -233,22 +207,28 @@ files:
|
|
233
207
|
- spec/anthropic/completion_spec.rb
|
234
208
|
- spec/anthropic/embedding_spec.rb
|
235
209
|
- spec/gemini/completion_spec.rb
|
210
|
+
- spec/gemini/conversation_spec.rb
|
236
211
|
- spec/gemini/embedding_spec.rb
|
212
|
+
- spec/gemini/files_spec.rb
|
213
|
+
- spec/gemini/images_spec.rb
|
237
214
|
- spec/llm/conversation_spec.rb
|
238
|
-
- spec/llm/lazy_conversation_spec.rb
|
239
215
|
- spec/ollama/completion_spec.rb
|
216
|
+
- spec/ollama/conversation_spec.rb
|
240
217
|
- spec/ollama/embedding_spec.rb
|
218
|
+
- spec/openai/audio_spec.rb
|
241
219
|
- spec/openai/completion_spec.rb
|
242
220
|
- spec/openai/embedding_spec.rb
|
221
|
+
- spec/openai/files_spec.rb
|
222
|
+
- spec/openai/images_spec.rb
|
223
|
+
- spec/openai/responses_spec.rb
|
243
224
|
- spec/readme_spec.rb
|
244
225
|
- spec/setup.rb
|
245
226
|
homepage: https://github.com/llmrb/llm
|
246
227
|
licenses:
|
247
|
-
-
|
228
|
+
- 0BSDL
|
248
229
|
metadata:
|
249
230
|
homepage_uri: https://github.com/llmrb/llm
|
250
231
|
source_code_uri: https://github.com/llmrb/llm
|
251
|
-
changelog_uri: https://github.com/llmrb/llm/blob/main/CHANGELOG.md
|
252
232
|
post_install_message:
|
253
233
|
rdoc_options: []
|
254
234
|
require_paths:
|
data/LICENSE.txt
DELETED
@@ -1,21 +0,0 @@
|
|
1
|
-
The MIT License (MIT)
|
2
|
-
|
3
|
-
Copyright (c) 2024 Antar Azri
|
4
|
-
|
5
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
-
of this software and associated documentation files (the "Software"), to deal
|
7
|
-
in the Software without restriction, including without limitation the rights
|
8
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
-
copies of the Software, and to permit persons to whom the Software is
|
10
|
-
furnished to do so, subject to the following conditions:
|
11
|
-
|
12
|
-
The above copyright notice and this permission notice shall be included in
|
13
|
-
all copies or substantial portions of the Software.
|
14
|
-
|
15
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
-
THE SOFTWARE.
|
data/lib/llm/conversation.rb
DELETED
@@ -1,50 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# {LLM::Conversation LLM::Conversation} provides a conversation
|
6
|
-
# object that maintains a thread of messages that act as the
|
7
|
-
# context of the conversation.
|
8
|
-
#
|
9
|
-
# @example
|
10
|
-
# llm = LLM.openai(key)
|
11
|
-
# bot = llm.chat("What is the capital of France?")
|
12
|
-
# bot.chat("What should we eat in Paris?")
|
13
|
-
# bot.chat("What is the weather like in Paris?")
|
14
|
-
# p bot.messages.map { [_1.role, _1.content] }
|
15
|
-
class Conversation
|
16
|
-
##
|
17
|
-
# @return [Array<LLM::Message>]
|
18
|
-
attr_reader :messages
|
19
|
-
|
20
|
-
##
|
21
|
-
# @param [LLM::Provider] provider
|
22
|
-
# A provider
|
23
|
-
def initialize(provider, params = {})
|
24
|
-
@provider = provider
|
25
|
-
@params = params
|
26
|
-
@messages = []
|
27
|
-
end
|
28
|
-
|
29
|
-
##
|
30
|
-
# @param prompt (see LLM::Provider#prompt)
|
31
|
-
# @return [LLM::Conversation]
|
32
|
-
def chat(prompt, role = :user, **params)
|
33
|
-
tap do
|
34
|
-
completion = @provider.complete(prompt, role, **@params.merge(params.merge(messages:)))
|
35
|
-
@messages.concat [Message.new(role, prompt), completion.choices[0]]
|
36
|
-
end
|
37
|
-
end
|
38
|
-
|
39
|
-
##
|
40
|
-
# @param [#to_s] role
|
41
|
-
# The role of the last message.
|
42
|
-
# Defaults to the LLM's assistant role (eg "assistant" or "model")
|
43
|
-
# @return [LLM::Message]
|
44
|
-
# The last message for the given role
|
45
|
-
def last_message(role: @provider.assistant_role)
|
46
|
-
messages.reverse_each.find { _1.role == role.to_s }
|
47
|
-
end
|
48
|
-
alias_method :recent_message, :last_message
|
49
|
-
end
|
50
|
-
end
|
@@ -1,51 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
require_relative "message_queue"
|
5
|
-
|
6
|
-
##
|
7
|
-
# {LLM::LazyConversation LLM::LazyConversation} provides a
|
8
|
-
# conversation object that allows input prompts to be queued
|
9
|
-
# and only sent to the LLM when a response is needed.
|
10
|
-
#
|
11
|
-
# @example
|
12
|
-
# llm = LLM.openai(key)
|
13
|
-
# bot = llm.chat("Be a helpful weather assistant", :system)
|
14
|
-
# bot.chat("What's the weather like in Rio?")
|
15
|
-
# bot.chat("What's the weather like in Algiers?")
|
16
|
-
# bot.messages.each do |message|
|
17
|
-
# # A single request is made at this point
|
18
|
-
# end
|
19
|
-
class LazyConversation
|
20
|
-
##
|
21
|
-
# @return [LLM::MessageQueue]
|
22
|
-
attr_reader :messages
|
23
|
-
|
24
|
-
##
|
25
|
-
# @param [LLM::Provider] provider
|
26
|
-
# A provider
|
27
|
-
def initialize(provider, params = {})
|
28
|
-
@provider = provider
|
29
|
-
@params = params
|
30
|
-
@messages = LLM::MessageQueue.new(provider)
|
31
|
-
end
|
32
|
-
|
33
|
-
##
|
34
|
-
# @param prompt (see LLM::Provider#prompt)
|
35
|
-
# @return [LLM::Conversation]
|
36
|
-
def chat(prompt, role = :user, **params)
|
37
|
-
tap { @messages << [prompt, role, @params.merge(params)] }
|
38
|
-
end
|
39
|
-
|
40
|
-
##
|
41
|
-
# @param [#to_s] role
|
42
|
-
# The role of the last message.
|
43
|
-
# Defaults to the LLM's assistant role (eg "assistant" or "model")
|
44
|
-
# @return [LLM::Message]
|
45
|
-
# The last message for the given role
|
46
|
-
def last_message(role: @provider.assistant_role)
|
47
|
-
messages.reverse_each.find { _1.role == role.to_s }
|
48
|
-
end
|
49
|
-
alias_method :recent_message, :last_message
|
50
|
-
end
|
51
|
-
end
|
data/lib/llm/message_queue.rb
DELETED
@@ -1,47 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# {LLM::MessageQueue LLM::MessageQueue} provides an Enumerable
|
6
|
-
# object that yields each message in a conversation on-demand,
|
7
|
-
# and only sends a request to the LLM when a response is needed.
|
8
|
-
class MessageQueue
|
9
|
-
include Enumerable
|
10
|
-
|
11
|
-
##
|
12
|
-
# @param [LLM::Provider] provider
|
13
|
-
# @return [LLM::MessageQueue]
|
14
|
-
def initialize(provider)
|
15
|
-
@provider = provider
|
16
|
-
@messages = []
|
17
|
-
end
|
18
|
-
|
19
|
-
##
|
20
|
-
# @yield [LLM::Message]
|
21
|
-
# Yields each message in the conversation thread
|
22
|
-
# @raise (see LLM::Provider#complete)
|
23
|
-
# @return [void]
|
24
|
-
def each
|
25
|
-
@messages = complete! unless @messages.grep(LLM::Message).size == @messages.size
|
26
|
-
@messages.each { yield(_1) }
|
27
|
-
end
|
28
|
-
|
29
|
-
##
|
30
|
-
# @param message [Object]
|
31
|
-
# A message to add to the conversation thread
|
32
|
-
# @return [void]
|
33
|
-
def <<(message)
|
34
|
-
@messages << message
|
35
|
-
end
|
36
|
-
alias_method :push, :<<
|
37
|
-
|
38
|
-
private
|
39
|
-
|
40
|
-
def complete!
|
41
|
-
prompt, role, params = @messages[-1]
|
42
|
-
rest = @messages[0..-2].map { (Array === _1) ? LLM::Message.new(_1[1], _1[0]) : _1 }
|
43
|
-
comp = @provider.complete(prompt, role, **params.merge(messages: rest)).choices.last
|
44
|
-
[*rest, LLM::Message.new(role, prompt), comp]
|
45
|
-
end
|
46
|
-
end
|
47
|
-
end
|
@@ -1,92 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "setup"
|
4
|
-
|
5
|
-
RSpec.describe LLM::LazyConversation do
|
6
|
-
let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
|
7
|
-
let(:prompt) { "Keep your answers short and concise, and provide three answers to the three questions" }
|
8
|
-
|
9
|
-
context "with gemini",
|
10
|
-
vcr: {cassette_name: "gemini/lazy_conversation/successful_response"} do
|
11
|
-
let(:provider) { LLM.gemini(token) }
|
12
|
-
let(:conversation) { described_class.new(provider) }
|
13
|
-
|
14
|
-
context "when given a thread of messages" do
|
15
|
-
subject(:message) { conversation.messages.to_a[-1] }
|
16
|
-
|
17
|
-
before do
|
18
|
-
conversation.chat prompt
|
19
|
-
conversation.chat "What is 3+2 ?"
|
20
|
-
conversation.chat "What is 5+5 ?"
|
21
|
-
conversation.chat "What is 5+7 ?"
|
22
|
-
end
|
23
|
-
|
24
|
-
it "maintains a conversation" do
|
25
|
-
is_expected.to have_attributes(
|
26
|
-
role: "model",
|
27
|
-
content: "5\n10\n12\n"
|
28
|
-
)
|
29
|
-
end
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
context "with openai" do
|
34
|
-
let(:provider) { LLM.openai(token) }
|
35
|
-
let(:conversation) { described_class.new(provider) }
|
36
|
-
|
37
|
-
context "when given a thread of messages",
|
38
|
-
vcr: {cassette_name: "openai/lazy_conversation/successful_response"} do
|
39
|
-
subject(:message) { conversation.recent_message }
|
40
|
-
|
41
|
-
before do
|
42
|
-
conversation.chat prompt, :system
|
43
|
-
conversation.chat "What is 3+2 ?"
|
44
|
-
conversation.chat "What is 5+5 ?"
|
45
|
-
conversation.chat "What is 5+7 ?"
|
46
|
-
end
|
47
|
-
|
48
|
-
it "maintains a conversation" do
|
49
|
-
is_expected.to have_attributes(
|
50
|
-
role: "assistant",
|
51
|
-
content: "1. 5 \n2. 10 \n3. 12 "
|
52
|
-
)
|
53
|
-
end
|
54
|
-
end
|
55
|
-
|
56
|
-
context "when given a specific model",
|
57
|
-
vcr: {cassette_name: "openai/lazy_conversation/successful_response_o3_mini"} do
|
58
|
-
let(:conversation) { described_class.new(provider, model: provider.models["o3-mini"]) }
|
59
|
-
|
60
|
-
it "maintains the model throughout a conversation" do
|
61
|
-
conversation.chat(prompt, :system)
|
62
|
-
expect(conversation.recent_message.extra[:completion].model).to eq("o3-mini-2025-01-31")
|
63
|
-
conversation.chat("What is 5+5?")
|
64
|
-
expect(conversation.recent_message.extra[:completion].model).to eq("o3-mini-2025-01-31")
|
65
|
-
end
|
66
|
-
end
|
67
|
-
end
|
68
|
-
|
69
|
-
context "with ollama",
|
70
|
-
vcr: {cassette_name: "ollama/lazy_conversation/successful_response"} do
|
71
|
-
let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
|
72
|
-
let(:conversation) { described_class.new(provider) }
|
73
|
-
|
74
|
-
context "when given a thread of messages" do
|
75
|
-
subject(:message) { conversation.recent_message }
|
76
|
-
|
77
|
-
before do
|
78
|
-
conversation.chat prompt, :system
|
79
|
-
conversation.chat "What is 3+2 ?"
|
80
|
-
conversation.chat "What is 5+5 ?"
|
81
|
-
conversation.chat "What is 5+7 ?"
|
82
|
-
end
|
83
|
-
|
84
|
-
it "maintains a conversation" do
|
85
|
-
is_expected.to have_attributes(
|
86
|
-
role: "assistant",
|
87
|
-
content: "Here are the calculations:\n\n1. 3 + 2 = 5\n2. 5 + 5 = 10\n3. 5 + 7 = 12"
|
88
|
-
)
|
89
|
-
end
|
90
|
-
end
|
91
|
-
end
|
92
|
-
end
|