ollama_chat 0.0.43 → 0.0.44

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cc5f503ab89f249029f367c0bbd92e26494f7ebaaa1514b90fe20160c099b51f
4
- data.tar.gz: f13c4a61429743b6265fb8b2fa836f58966e4cccf9092d311daa28cade51f8e9
3
+ metadata.gz: d0c9a525754d546b6d7cd0fa88e20c4767bd1dae51313d1f2c2409c06762f4b0
4
+ data.tar.gz: fc9ddde83495908f965c9225246b6668b01c40fe7391fb83c54beb3ce46321d2
5
5
  SHA512:
6
- metadata.gz: 4560b3a2cc81610c3fca8c5f24167957a32b1c0521e241aa77fb87b701decbd3640eb04e9f63a487a668f78de077d733e1a0fb1b60ba8a08b0e0f06b5c82bd96
7
- data.tar.gz: 12c982a3250583e5bb689941fe680b8632a9f3526d1e6548e261e5e7ffe738649c8f9d98bb07224faa5fbf5745bb0eb9d4988aae647e50f5075b121e1ca534f4
6
+ metadata.gz: 97dc5c42b59b3f0d7abf50a0647ff3a3c79681e88e4e9b61c37f706fadc450731ed0240795362e6a061f07ada56502934f5992e796c602b01c27f3b20e6c575f
7
+ data.tar.gz: 34c1f4dded9b54155b407913290354707cb63104b3ea126ca438a245d03458ee868962068f85398954cf6be0ff17f826bdc627985168817d55642cb231732459
data/CHANGES.md CHANGED
@@ -1,5 +1,34 @@
1
1
  # Changes
2
2
 
3
+ ## 2025-12-10 v0.0.44
4
+
5
+ - Fixed `stream` option in `spec/ollama_chat/follow_chat_spec.rb` from `on?
6
+ true` to `on?: true`
7
+ - Extracted `prepare_last_message` method to handle content and thinking text
8
+ formatting with markdown and annotation support
9
+ - Introduced `display_output` method that uses `use_pager` from `MessageList`
10
+ to handle large outputs gracefully
11
+ - Modified `FollowChat#process_response` to conditionally call
12
+ `display_formatted_terminal_output` based on `@chat.stream.on?`
13
+ - Added `use_pager` method to `MessageList` that wraps output blocks with pager
14
+ context using `Kramdown::ANSI::Pager`
15
+ - Updated conditional logic in `follow_chat.rb` to properly distinguish between
16
+ streaming and non-streaming display modes
17
+ - Updated `kramdown-ansi` dependency version from `~> 0.2` to `~> 0.3` in
18
+ `Rakefile` and `ollama_chat.gemspec`
19
+ - Added `truncate_for_terminal` method to `OllamaChat::FollowChat` class that
20
+ limits text to a specified number of lines
21
+ - Modified `display_formatted_terminal_output` to use `truncate_for_terminal`
22
+ when processing content and thinking text
23
+ - Updated spec file to expose the `FollowChat` instance for testing
24
+ - Added comprehensive tests for the new `truncate_for_terminal` method covering
25
+ various line count scenarios
26
+ - The method handles edge cases like negative and zero line counts by returning
27
+ the last line
28
+ - Uses `Tins::Terminal.lines` as default maximum lines parameter
29
+ - The implementation ensures terminal output stays within display limits while
30
+ preserving content integrity
31
+
3
32
  ## 2025-12-09 v0.0.43
4
33
 
5
34
  - Added retry logic in `interact_with_user` method to handle
data/Rakefile CHANGED
@@ -45,7 +45,7 @@ GemHadar do
45
45
  dependency 'redis', '~> 5.0'
46
46
  dependency 'mime-types', '~> 3.0'
47
47
  dependency 'reverse_markdown', '~> 3.0'
48
- dependency 'kramdown-ansi', '~> 0.2'
48
+ dependency 'kramdown-ansi', '~> 0.3'
49
49
  dependency 'complex_config', '~> 0.22', '>= 0.22.2'
50
50
  dependency 'tins', '~> 1.47'
51
51
  dependency 'search_ui', '~> 0.0'
@@ -68,7 +68,13 @@ class OllamaChat::FollowChat
68
68
  if response&.message&.role == 'assistant'
69
69
  ensure_assistant_response_exists
70
70
  update_last_message(response)
71
- display_formatted_terminal_output
71
+ if @chat.stream.on?
72
+ display_formatted_terminal_output
73
+ else
74
+ if display_output
75
+ display_formatted_terminal_output
76
+ end
77
+ end
72
78
  @say.call(response)
73
79
  end
74
80
 
@@ -79,6 +85,25 @@ class OllamaChat::FollowChat
79
85
 
80
86
  private
81
87
 
88
+ # The truncate_for_terminal method processes text to fit within a specified
89
+ # number of lines.
90
+ #
91
+ # This method takes a text string and trims it to ensure it doesn't exceed
92
+ # the maximum number of lines allowed for terminal display. If the text
93
+ # exceeds the limit, only
94
+ # the last N lines are retained where N equals the maximum lines parameter.
95
+ #
96
+ # @param text [ String ] the text content to be processed
97
+ # @param max_lines [ Integer ] the maximum number of lines allowed (defaults to terminal lines)
98
+ #
99
+ # @return [ String ] the text truncated to fit within the specified line limit
100
+ def truncate_for_terminal(text, max_lines: Tins::Terminal.lines)
101
+ max_lines = max_lines.clamp(1..)
102
+ lines = text.lines
103
+ return text if lines.size <= max_lines
104
+ lines[-max_lines..-1].join('')
105
+ end
106
+
82
107
  # The ensure_assistant_response_exists method ensures that the last message
83
108
  # in the conversation is from the assistant role.
84
109
  #
@@ -111,6 +136,46 @@ class OllamaChat::FollowChat
111
136
  end
112
137
  end
113
138
 
139
+ # The prepare_last_message method processes and formats content and thinking
140
+ # annotations for display.
141
+ #
142
+ # This method prepares the final content and thinking text by applying
143
+ # appropriate formatting based on the chat's markdown and think loud
144
+ # settings. It handles parsing of content through Kramdown::ANSI when
145
+ # markdown is enabled, and applies annotation
146
+ # formatting to both content and thinking text according to the chat's
147
+ # configuration.
148
+ #
149
+ # @return [Array<String, String>] an array containing the processed content
150
+ # and thinking text
151
+ # @return [Array<String, nil>] an array containing the processed content and
152
+ # nil if thinking is disabled
153
+ def prepare_last_message
154
+ content, thinking = @messages.last.content, @messages.last.thinking
155
+ if @chat.markdown.on?
156
+ content = talk_annotate { truncate_for_terminal @chat.kramdown_ansi_parse(content) }
157
+ if @chat.think_loud?
158
+ thinking = think_annotate { truncate_for_terminal@chat.kramdown_ansi_parse(thinking) }
159
+ end
160
+ else
161
+ content = talk_annotate { content }
162
+ @chat.think? and thinking = think_annotate { thinking }
163
+ end
164
+ return content, thinking
165
+ end
166
+
167
+ # The last_message_with_user method constructs a formatted message array by
168
+ # combining user information, newline characters, thinking annotations, and
169
+ # content for display in the terminal output.
170
+ #
171
+ # @return [ Array ] an array containing the user identifier, newline
172
+ # character, thinking annotation (if present), and content formatted for
173
+ # terminal display
174
+ def last_message_with_user
175
+ content, thinking = prepare_last_message
176
+ [ @user, ?\n, thinking, content ]
177
+ end
178
+
114
179
  # The display_formatted_terminal_output method formats and outputs the
115
180
  # terminal content by processing the last message's content and thinking,
116
181
  # then prints it to the output. It handles markdown parsing and annotation
@@ -119,19 +184,21 @@ class OllamaChat::FollowChat
119
184
  # thinking modes are enabled to determine how to process and display the
120
185
  # content.
121
186
  def display_formatted_terminal_output
122
- content, thinking = @messages.last.content, @messages.last.thinking
123
- if @chat.markdown.on?
124
- content = talk_annotate { @chat.kramdown_ansi_parse(content) }
125
- if @chat.think_loud?
126
- thinking = think_annotate { @chat.kramdown_ansi_parse(thinking) }
127
- end
128
- else
129
- content = talk_annotate { content }
130
- @chat.think? and thinking = think_annotate { thinking }
187
+ @output.print(*([ clear_screen, move_home, *last_message_with_user ].compact))
188
+ end
189
+
190
+ # The display_output method shows the last message in the conversation.
191
+ #
192
+ # This method delegates to the messages object's show_last method, which
193
+ # displays the most recent non-user message in the conversation history.
194
+ # It is typically used to provide feedback to the user about the last
195
+ # response from the assistant.
196
+ # @return [ nil, String ] the pager command or nil if no paging was
197
+ # performed.
198
+ def display_output
199
+ @messages.use_pager do |output|
200
+ output.print(*last_message_with_user)
131
201
  end
132
- @output.print(*([
133
- clear_screen, move_home, @user, ?\n, thinking, content
134
- ].compact))
135
202
  end
136
203
 
137
204
  # The eval_stats method processes response statistics and formats them into a
@@ -290,6 +290,22 @@ class OllamaChat::MessageList
290
290
  end.to_s
291
291
  end
292
292
 
293
+ # The use_pager method wraps the given block with a pager context.
294
+ # If the output would exceed the terminal's line capacity, it pipes the content
295
+ # through an appropriate pager command (like 'less' or 'more').
296
+ #
297
+ # @yield A block that yields an IO object to write output to
298
+ # @yieldparam [IO] the IO object to write to
299
+ def use_pager
300
+ command = determine_pager_command
301
+ output_buffer = StringIO.new
302
+ yield output_buffer
303
+ messages = output_buffer.string
304
+ Kramdown::ANSI::Pager.pager(command:, lines: messages.count(?\n)) do |output|
305
+ output.puts messages
306
+ end
307
+ end
308
+
293
309
  private
294
310
 
295
311
  # The config method provides access to the chat configuration object.
@@ -311,22 +327,6 @@ class OllamaChat::MessageList
311
327
  OllamaChat::EnvConfig::PAGER?
312
328
  end
313
329
 
314
- # The use_pager method wraps the given block with a pager context.
315
- # If the output would exceed the terminal's line capacity, it pipes the content
316
- # through an appropriate pager command (like 'less' or 'more').
317
- #
318
- # @yield A block that yields an IO object to write output to
319
- # @yieldparam [IO] the IO object to write to
320
- def use_pager
321
- command = determine_pager_command
322
- output_buffer = StringIO.new
323
- yield output_buffer
324
- messages = output_buffer.string
325
- Kramdown::ANSI::Pager.pager(command:, lines: messages.count(?\n)) do |output|
326
- output.puts messages
327
- end
328
- end
329
-
330
330
  # The message_text_for method generates formatted text representation of a
331
331
  # message including its role, content, thinking annotations, and associated
332
332
  # images.
@@ -1,6 +1,6 @@
1
1
  module OllamaChat
2
2
  # OllamaChat version
3
- VERSION = '0.0.43'
3
+ VERSION = '0.0.44'
4
4
  VERSION_ARRAY = VERSION.split('.').map(&:to_i) # :nodoc:
5
5
  VERSION_MAJOR = VERSION_ARRAY[0] # :nodoc:
6
6
  VERSION_MINOR = VERSION_ARRAY[1] # :nodoc:
data/ollama_chat.gemspec CHANGED
@@ -1,9 +1,9 @@
1
1
  # -*- encoding: utf-8 -*-
2
- # stub: ollama_chat 0.0.43 ruby lib
2
+ # stub: ollama_chat 0.0.44 ruby lib
3
3
 
4
4
  Gem::Specification.new do |s|
5
5
  s.name = "ollama_chat".freeze
6
- s.version = "0.0.43".freeze
6
+ s.version = "0.0.44".freeze
7
7
 
8
8
  s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
9
9
  s.require_paths = ["lib".freeze]
@@ -41,7 +41,7 @@ Gem::Specification.new do |s|
41
41
  s.add_runtime_dependency(%q<redis>.freeze, ["~> 5.0".freeze])
42
42
  s.add_runtime_dependency(%q<mime-types>.freeze, ["~> 3.0".freeze])
43
43
  s.add_runtime_dependency(%q<reverse_markdown>.freeze, ["~> 3.0".freeze])
44
- s.add_runtime_dependency(%q<kramdown-ansi>.freeze, ["~> 0.2".freeze])
44
+ s.add_runtime_dependency(%q<kramdown-ansi>.freeze, ["~> 0.3".freeze])
45
45
  s.add_runtime_dependency(%q<complex_config>.freeze, ["~> 0.22".freeze, ">= 0.22.2".freeze])
46
46
  s.add_runtime_dependency(%q<tins>.freeze, ["~> 1.47".freeze])
47
47
  s.add_runtime_dependency(%q<search_ui>.freeze, ["~> 0.0".freeze])
@@ -9,11 +9,11 @@ describe OllamaChat::FollowChat do
9
9
 
10
10
  let :chat do
11
11
  double('Chat', markdown: double(on?: false), think_loud?: true,
12
- think?: false, debug: false)
12
+ think?: false, debug: false, stream: double(on?: true))
13
13
  end
14
14
 
15
15
  let :follow_chat do
16
- described_class.new(chat:, messages:, output:)
16
+ described_class.new(chat:, messages:, output:).expose
17
17
  end
18
18
 
19
19
  let :output do
@@ -46,4 +46,33 @@ describe OllamaChat::FollowChat do
46
46
  expect(output).to receive(:puts).with("", /eval_duration/)
47
47
  follow_chat.call(response)
48
48
  end
49
+
50
+ context '#truncate_for_terminal' do
51
+ it 'can truncate text for 5 lines' do
52
+ text = (?A..?Z).to_a.join(?\n)
53
+ expect(follow_chat.truncate_for_terminal(text, max_lines: 5)).to eq(
54
+ (?V..?Z).to_a.join(?\n)
55
+ )
56
+ end
57
+
58
+ it 'can truncate text for -1 lines' do
59
+ text = (?A..?Z).to_a.join(?\n)
60
+ expect(follow_chat.truncate_for_terminal(text, max_lines: -1)).to eq(?Z)
61
+ end
62
+
63
+ it 'can truncate text for 0 lines' do
64
+ text = (?A..?Z).to_a.join(?\n)
65
+ expect(follow_chat.truncate_for_terminal(text, max_lines: 0)).to eq(?Z)
66
+ end
67
+
68
+ it 'can truncate text for 1 lines' do
69
+ text = (?A..?Z).to_a.join(?\n)
70
+ expect(follow_chat.truncate_for_terminal(text, max_lines: 1)).to eq(?Z)
71
+ end
72
+
73
+ it 'can truncate text for 42 lines' do
74
+ text = (?A..?Z).to_a.join(?\n)
75
+ expect(follow_chat.truncate_for_terminal(text, max_lines: 42)).to eq(text)
76
+ end
77
+ end
49
78
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ollama_chat
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.43
4
+ version: 0.0.44
5
5
  platform: ruby
6
6
  authors:
7
7
  - Florian Frank
@@ -259,14 +259,14 @@ dependencies:
259
259
  requirements:
260
260
  - - "~>"
261
261
  - !ruby/object:Gem::Version
262
- version: '0.2'
262
+ version: '0.3'
263
263
  type: :runtime
264
264
  prerelease: false
265
265
  version_requirements: !ruby/object:Gem::Requirement
266
266
  requirements:
267
267
  - - "~>"
268
268
  - !ruby/object:Gem::Version
269
- version: '0.2'
269
+ version: '0.3'
270
270
  - !ruby/object:Gem::Dependency
271
271
  name: complex_config
272
272
  requirement: !ruby/object:Gem::Requirement