ollama_chat 0.0.10 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d7e85d325cc40d5cb5dbe01e9971c22da13b4afada35fbb17f130d9430b1f508
4
- data.tar.gz: 275b88f332344e7d1664e58c60540243d6cf512b2ba78547fbe784cfd02a6acc
3
+ metadata.gz: c11573f3fe926d4b3a52336ae1fbae81587365725b1fe3724406b0d0d6fd2bf1
4
+ data.tar.gz: 756a07f1f4eb9056b5b2ff8441a034022d31df4c5b6c199c409c34438dad9aa7
5
5
  SHA512:
6
- metadata.gz: 3b3a8bbf023960e38a33f4f2c4fc1e75e7e7be4c11213144975b1ad370efd9a71e4d61c76a7de3f3340d80eaa77d53882184f1f7635d7d5f678753e2e2c0dc6f
7
- data.tar.gz: 69f57301a1308f75d55842630f7902172c8b67cf37d0ea2aad7564423be0567beb9e70bdff81b6e485cf2b3700040d6039e7c170d6a6bc4ebf65e1bf32bb32f9
6
+ metadata.gz: a58029a24c550ee8b1b934ea0c995fe95ce6015fd23af0a503b939dfa8fb681e451293b5b4437bd04fb036b7d6643b4c30468c1c129a5bb0a546247e3b51a0cf
7
+ data.tar.gz: 0bf0ea4532ea5dea7977cb5ef97505c336082c60f67f0be03ee1ffda815de76fd8fa23625b1e88b2595434476f6258a59e251150f1aa7705de8881a70f2a12f9
data/CHANGES.md CHANGED
@@ -1,5 +1,31 @@
1
1
  # Changes
2
2
 
3
+ ## 2025-06-01 v0.0.12
4
+
5
+ * **API Compatibility**: Enforces Ollama API version `0.9.0` or higher to
6
+ support new features like the `thinking` attribute.
7
+ * **Think Output Splitting**: When `think` is enabled, the API response is
8
+ split into `content` and `thinking` fields, enabled by the new API version.
9
+ * **Think Mode Simplified**: The previous multi-mode `think_mode` system has
10
+ been replaced with a boolean `think` switch for cleaner, more intuitive
11
+ control.
12
+
13
+ ## 2025-06-01 v0.0.11
14
+
15
+ * **Think Mode Implementation**:
16
+ + Introduced `@think_mode` attribute to read think mode setting from config
17
+ + Implemented `remove_think_blocks` method to filter out thought blocks from chat messages sent to the LLM model.
18
+ + Added conditional logic based on `@think_mode` value to handle different modes
19
+ * **User Interface Improvements**:
20
+ + Added `/think_mode` command to help users understand think mode options
21
+ + Updated session output to include current think mode
22
+ + Added think mode chooser to OllamaChat::Dialog, allowing users to select their preferred think mode
23
+ * **Output Handling Enhancements**:
24
+ + Improved markdown handling for think blocks in OllamaChat::FollowChat class
25
+ + Modified output to print clear screen, move home, and user info before printing content
26
+ * **Configuration Updates**:
27
+ + Added `think_mode` key with value `"display"` to `default_config.yml`
28
+
3
29
  ## 2025-05-28 v0.0.10
4
30
 
5
31
  * Simplify and improve command handling logic.
data/README.md CHANGED
@@ -121,7 +121,7 @@ The following commands can be given inside the chat, if prefixed by a `/`:
121
121
  /markdown toggle markdown output
122
122
  /stream toggle stream output
123
123
  /location toggle location submission
124
- /voice( change) toggle voice output or change the voice
124
+ /voice [change] toggle voice output or change the voice
125
125
  /list [n] list the last n / all conversation exchanges
126
126
  /clear [messages|links|history] clear the all messages, links, or the chat history (defaults to messages)
127
127
  /clobber clear the conversation, links, and collection
@@ -133,6 +133,7 @@ The following commands can be given inside the chat, if prefixed by a `/`:
133
133
  /info show information for current session
134
134
  /config output current configuration ("/Users/flori/.config/ollama_chat/config.yml")
135
135
  /document_policy pick a scan policy for document references
136
+ /think enable ollama think setting for models
136
137
  /import source import the source's content
137
138
  /summarize [n] source summarize the source's content in n words
138
139
  /embedding toggle embedding paused or not
data/Rakefile CHANGED
@@ -30,7 +30,7 @@ GemHadar do
30
30
  executables << 'ollama_chat' << 'ollama_chat_send'
31
31
 
32
32
  dependency 'excon', '~> 1.0'
33
- dependency 'ollama-ruby', '~> 1.0'
33
+ dependency 'ollama-ruby', '~> 1.2'
34
34
  dependency 'documentrix', '~> 0.0', '>= 0.0.2'
35
35
  dependency 'rss', '~> 0.3'
36
36
  dependency 'term-ansicolor', '~> 1.11'
data/VERSION CHANGED
@@ -1 +1 @@
1
- 0.0.10
1
+ 0.0.12
data/docker-compose.yml CHANGED
@@ -1,7 +1,7 @@
1
1
  services:
2
2
  redis:
3
3
  container_name: redis
4
- image: valkey/valkey:7.2.9-alpine
4
+ image: valkey/valkey:8.1.1-alpine
5
5
  restart: unless-stopped
6
6
  ports: [ "127.0.0.1:9736:6379" ]
7
7
  volumes:
@@ -1,5 +1,6 @@
1
1
  require 'tins'
2
2
  require 'tins/secure_write'
3
+ require 'tins/xt/string_version'
3
4
  require 'json'
4
5
  require 'term/ansicolor'
5
6
  require 'reline'
@@ -27,7 +28,7 @@ class OllamaChat::Chat
27
28
  include OllamaChat::Dialog
28
29
  include OllamaChat::Information
29
30
  include OllamaChat::Clipboard
30
- include OllamaChat::MessageType
31
+ include OllamaChat::MessageFormat
31
32
  include OllamaChat::History
32
33
  include OllamaChat::ServerSocket
33
34
 
@@ -44,7 +45,9 @@ class OllamaChat::Chat
44
45
  debug: config.debug,
45
46
  user_agent:
46
47
  )
47
- server_version
48
+ if server_version.version < '0.9.0'.version
49
+ raise ArgumentError, 'require ollama API version 0.9.0 or higher'
50
+ end
48
51
  @document_policy = config.document_policy
49
52
  @model = choose_model(@opts[?m], config.model.name)
50
53
  @model_options = Ollama::Options[config.model.options]
@@ -153,7 +156,7 @@ class OllamaChat::Chat
153
156
  when %r(^/regenerate$)
154
157
  if content = messages.second_last&.content
155
158
  content.gsub!(/\nConsider these chunks for your answer.*\z/, '')
156
- messages.drop(2)
159
+ messages.drop(1)
157
160
  else
158
161
  STDOUT.puts "Not enough messages in this conversation."
159
162
  return :redo
@@ -195,6 +198,9 @@ class OllamaChat::Chat
195
198
  when %r(^/document_policy$)
196
199
  choose_document_policy
197
200
  :next
201
+ when %r(^/think$)
202
+ think.toggle
203
+ :next
198
204
  when %r(^/import\s+(.+))
199
205
  @parse_content = false
200
206
  import($1) or :next
@@ -246,7 +252,6 @@ class OllamaChat::Chat
246
252
  fetch_source(url) { |url_io| embed_source(url_io, url) }
247
253
  end
248
254
  urls_summarized = urls.map { summarize(_1) }
249
- query = $2.inspect
250
255
  results = urls.zip(urls_summarized).
251
256
  map { |u, s| "%s as \n:%s" % [ u, s ] } * "\n\n"
252
257
  config.prompts.web % { query:, results: }
@@ -388,10 +393,11 @@ class OllamaChat::Chat
388
393
  voice: (@current_voice if voice.on?)
389
394
  )
390
395
  ollama.chat(
391
- model: @model,
392
- messages:,
393
- options: @model_options,
394
- stream: stream.on?,
396
+ model: @model,
397
+ messages: ,
398
+ options: @model_options,
399
+ stream: stream.on?,
400
+ think: think.on?,
395
401
  &handler
396
402
  )
397
403
  if embedding.on? && !records.empty?
@@ -2,7 +2,7 @@ class OllamaChat::FollowChat
2
2
  include Ollama
3
3
  include Ollama::Handlers::Concern
4
4
  include Term::ANSIColor
5
- include OllamaChat::MessageType
5
+ include OllamaChat::MessageFormat
6
6
 
7
7
  def initialize(chat:, messages:, voice: nil, output: STDOUT)
8
8
  super(output:)
@@ -14,34 +14,59 @@ class OllamaChat::FollowChat
14
14
  end
15
15
 
16
16
  def call(response)
17
- OllamaChat::Chat.config.debug and jj response
17
+ debug_output(response)
18
+
18
19
  if response&.message&.role == 'assistant'
19
- if @messages&.last&.role != 'assistant'
20
- @messages << Message.new(role: 'assistant', content: '')
21
- @user = message_type(@messages.last.images) + " " +
22
- bold { color(111) { 'assistant:' } }
23
- @output.puts @user unless @chat.markdown.on?
24
- end
25
- if content = response.message&.content
26
- content = content.gsub(%r(<think>), "💭\n").gsub(%r(</think>), "\n💬")
27
- end
28
- @messages.last.content << content
29
- if @chat.markdown.on? and content = @messages.last.content.full?
30
- markdown_content = Kramdown::ANSI.parse(content)
31
- @output.print clear_screen, move_home, @user, ?\n, markdown_content
32
- else
33
- @output.print content
34
- end
20
+ ensure_assistant_response_exists
21
+ update_last_message(response)
22
+ display_formatted_terminal_output
35
23
  @say.call(response)
36
24
  end
37
- if response.done
38
- @output.puts "", eval_stats(response)
39
- end
25
+
26
+ output_eval_stats(response)
27
+
40
28
  self
41
29
  end
42
30
 
31
+ private
32
+
33
+ def ensure_assistant_response_exists
34
+ if @messages&.last&.role != 'assistant'
35
+ @messages << Message.new(
36
+ role: 'assistant',
37
+ content: '',
38
+ thinking: ('' if @chat.think.on?)
39
+ )
40
+ @user = message_type(@messages.last.images) + " " +
41
+ bold { color(111) { 'assistant:' } }
42
+ end
43
+ end
44
+
45
+ def update_last_message(response)
46
+ @messages.last.content << response.message&.content
47
+ if @chat.think.on? and response_thinking = response.message&.thinking.full?
48
+ @messages.last.thinking << response_thinking
49
+ end
50
+ end
51
+
52
+ def display_formatted_terminal_output
53
+ content, thinking = @messages.last.content, @messages.last.thinking
54
+ if @chat.markdown.on?
55
+ content = talk_annotate { Kramdown::ANSI.parse(content) }
56
+ if @chat.think.on?
57
+ thinking = think_annotate { Kramdown::ANSI.parse(thinking) }
58
+ end
59
+ else
60
+ content = talk_annotate { content }
61
+ @chat.think.on? and thinking = think_annotate { @messages.last.thinking.full? }
62
+ end
63
+ @output.print(*([
64
+ clear_screen, move_home, @user, ?\n, thinking, content
65
+ ].compact))
66
+ end
67
+
43
68
  def eval_stats(response)
44
- eval_duration = response.eval_duration / 1e9
69
+ eval_duration = response.eval_duration / 1e9
45
70
  prompt_eval_duration = response.prompt_eval_duration / 1e9
46
71
  stats_text = {
47
72
  eval_duration: Tins::Duration.new(eval_duration),
@@ -57,4 +82,13 @@ class OllamaChat::FollowChat
57
82
  Kramdown::ANSI::Width.wrap(stats_text, percentage: 90).gsub(/(?<!\A)^/, ' ')
58
83
  }
59
84
  end
85
+
86
+ def output_eval_stats(response)
87
+ response.done or return
88
+ @output.puts "", eval_stats(response)
89
+ end
90
+
91
+ def debug_output(response)
92
+ OllamaChat::Chat.config.debug and jj response
93
+ end
60
94
  end
@@ -48,6 +48,7 @@ module OllamaChat::Information
48
48
  stream.show
49
49
  location.show
50
50
  STDOUT.puts "Document policy for references in user text: #{bold{@document_policy}}"
51
+ STDOUT.puts "Thinking is #{bold(think.on? ? 'enabled' : 'disabled')}."
51
52
  STDOUT.puts "Currently selected search engine is #{bold(search_engine)}."
52
53
  if @voice.on?
53
54
  STDOUT.puts "Using voice #{bold{@current_voice}} to speak."
@@ -75,12 +76,13 @@ module OllamaChat::Information
75
76
  /info show information for current session
76
77
  /config output current configuration (#{@ollama_chat_config.filename.to_s.inspect})
77
78
  /document_policy pick a scan policy for document references
79
+ /think enable ollama think setting for models
78
80
  /import source import the source's content
79
81
  /summarize [n] source summarize the source's content in n words
80
82
  /embedding toggle embedding paused or not
81
83
  /embed source embed the source's content
82
84
  /web [n] query query web search & return n or 1 results
83
- /links( clear) display (or clear) links used in the chat
85
+ /links [clear] display (or clear) links used in the chat
84
86
  /save filename store conversation messages
85
87
  /load filename load conversation messages
86
88
  /quit to quit
@@ -0,0 +1,23 @@
1
+ module OllamaChat::MessageFormat
2
+ def message_type(images)
3
+ images.present? ? ?📸 : ?📨
4
+ end
5
+
6
+ def think_annotate(&block)
7
+ string = block.()
8
+ string.to_s.size == 0 and return
9
+ if @chat.think.on?
10
+ "💭\n#{string}\n"
11
+ end
12
+ end
13
+
14
+ def talk_annotate(&block)
15
+ string = block.()
16
+ string.to_s.size == 0 and return
17
+ if @chat.think.on?
18
+ "💬\n#{string}\n"
19
+ else
20
+ string
21
+ end
22
+ end
23
+ end
@@ -1,6 +1,6 @@
1
1
  class OllamaChat::MessageList
2
2
  include Term::ANSIColor
3
- include OllamaChat::MessageType
3
+ include OllamaChat::MessageFormat
4
4
 
5
5
  # The initialize method sets up the message list for an OllamaChat session.
6
6
  #
@@ -32,6 +32,7 @@ voice:
32
32
  markdown: true
33
33
  stream: true
34
34
  document_policy: importing
35
+ think: false
35
36
  embedding:
36
37
  enabled: true
37
38
  model:
@@ -49,10 +49,12 @@ module OllamaChat::Switches
49
49
  include CheckSwitch
50
50
  end
51
51
 
52
- attr_reader :markdown
53
-
54
52
  attr_reader :stream
55
53
 
54
+ attr_reader :think
55
+
56
+ attr_reader :markdown
57
+
56
58
  attr_reader :voice
57
59
 
58
60
  attr_reader :embedding
@@ -64,21 +66,30 @@ module OllamaChat::Switches
64
66
  attr_reader :location
65
67
 
66
68
  def setup_switches(config)
67
- @markdown = Switch.new(
68
- :markdown,
69
+ @stream = Switch.new(
70
+ :stream,
69
71
  config:,
70
72
  msg: {
71
- true => "Using #{italic{'ANSI'}} markdown to output content.",
72
- false => "Using plaintext for outputting content.",
73
+ true => "Streaming enabled.",
74
+ false => "Streaming disabled.",
73
75
  }
74
76
  )
75
77
 
76
- @stream = Switch.new(
77
- :stream,
78
+ @think = Switch.new(
79
+ :think,
78
80
  config:,
79
81
  msg: {
80
- true => "Streaming enabled.",
81
- false => "Streaming disabled.",
82
+ true => "Thinking enabled.",
83
+ false => "Thinking disabled.",
84
+ }
85
+ )
86
+
87
+ @markdown = Switch.new(
88
+ :markdown,
89
+ config:,
90
+ msg: {
91
+ true => "Using #{italic{'ANSI'}} markdown to output content.",
92
+ false => "Using plaintext for outputting content.",
82
93
  }
83
94
  )
84
95
 
@@ -1,6 +1,6 @@
1
1
  module OllamaChat
2
2
  # OllamaChat version
3
- VERSION = '0.0.10'
3
+ VERSION = '0.0.12'
4
4
  VERSION_ARRAY = VERSION.split('.').map(&:to_i) # :nodoc:
5
5
  VERSION_MAJOR = VERSION_ARRAY[0] # :nodoc:
6
6
  VERSION_MINOR = VERSION_ARRAY[1] # :nodoc:
data/lib/ollama_chat.rb CHANGED
@@ -5,7 +5,7 @@ require 'ollama'
5
5
  require 'documentrix'
6
6
  require 'ollama_chat/version'
7
7
  require 'ollama_chat/utils'
8
- require 'ollama_chat/message_type'
8
+ require 'ollama_chat/message_format'
9
9
  require 'ollama_chat/ollama_chat_config'
10
10
  require 'ollama_chat/follow_chat'
11
11
  require 'ollama_chat/switches'
data/ollama_chat.gemspec CHANGED
@@ -1,9 +1,9 @@
1
1
  # -*- encoding: utf-8 -*-
2
- # stub: ollama_chat 0.0.10 ruby lib
2
+ # stub: ollama_chat 0.0.12 ruby lib
3
3
 
4
4
  Gem::Specification.new do |s|
5
5
  s.name = "ollama_chat".freeze
6
- s.version = "0.0.10".freeze
6
+ s.version = "0.0.12".freeze
7
7
 
8
8
  s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
9
9
  s.require_paths = ["lib".freeze]
@@ -12,8 +12,8 @@ Gem::Specification.new do |s|
12
12
  s.description = "The app provides a command-line interface (CLI) to an Ollama AI model,\nallowing users to engage in text-based conversations and generate\nhuman-like responses. Users can import data from local files or web pages,\nwhich are then processed through three different modes: fully importing the\ncontent into the conversation context, summarizing the information for\nconcise reference, or storing it in an embedding vector database for later\nretrieval based on the conversation.\n".freeze
13
13
  s.email = "flori@ping.de".freeze
14
14
  s.executables = ["ollama_chat".freeze, "ollama_chat_send".freeze]
15
- s.extra_rdoc_files = ["README.md".freeze, "lib/ollama_chat.rb".freeze, "lib/ollama_chat/chat.rb".freeze, "lib/ollama_chat/clipboard.rb".freeze, "lib/ollama_chat/dialog.rb".freeze, "lib/ollama_chat/document_cache.rb".freeze, "lib/ollama_chat/follow_chat.rb".freeze, "lib/ollama_chat/history.rb".freeze, "lib/ollama_chat/information.rb".freeze, "lib/ollama_chat/message_list.rb".freeze, "lib/ollama_chat/message_type.rb".freeze, "lib/ollama_chat/model_handling.rb".freeze, "lib/ollama_chat/ollama_chat_config.rb".freeze, "lib/ollama_chat/parsing.rb".freeze, "lib/ollama_chat/server_socket.rb".freeze, "lib/ollama_chat/source_fetching.rb".freeze, "lib/ollama_chat/switches.rb".freeze, "lib/ollama_chat/utils.rb".freeze, "lib/ollama_chat/utils/cache_fetcher.rb".freeze, "lib/ollama_chat/utils/chooser.rb".freeze, "lib/ollama_chat/utils/fetcher.rb".freeze, "lib/ollama_chat/utils/file_argument.rb".freeze, "lib/ollama_chat/version.rb".freeze, "lib/ollama_chat/web_searching.rb".freeze]
16
- s.files = [".all_images.yml".freeze, ".envrc".freeze, ".gitignore".freeze, "CHANGES.md".freeze, "Gemfile".freeze, "README.md".freeze, "Rakefile".freeze, "VERSION".freeze, "bin/ollama_chat".freeze, "bin/ollama_chat_send".freeze, "config/searxng/settings.yml".freeze, "docker-compose.yml".freeze, "lib/ollama_chat.rb".freeze, "lib/ollama_chat/chat.rb".freeze, "lib/ollama_chat/clipboard.rb".freeze, "lib/ollama_chat/dialog.rb".freeze, "lib/ollama_chat/document_cache.rb".freeze, "lib/ollama_chat/follow_chat.rb".freeze, "lib/ollama_chat/history.rb".freeze, "lib/ollama_chat/information.rb".freeze, "lib/ollama_chat/message_list.rb".freeze, "lib/ollama_chat/message_type.rb".freeze, "lib/ollama_chat/model_handling.rb".freeze, "lib/ollama_chat/ollama_chat_config.rb".freeze, "lib/ollama_chat/ollama_chat_config/default_config.yml".freeze, "lib/ollama_chat/parsing.rb".freeze, "lib/ollama_chat/server_socket.rb".freeze, "lib/ollama_chat/source_fetching.rb".freeze, "lib/ollama_chat/switches.rb".freeze, "lib/ollama_chat/utils.rb".freeze, "lib/ollama_chat/utils/cache_fetcher.rb".freeze, "lib/ollama_chat/utils/chooser.rb".freeze, "lib/ollama_chat/utils/fetcher.rb".freeze, "lib/ollama_chat/utils/file_argument.rb".freeze, "lib/ollama_chat/version.rb".freeze, "lib/ollama_chat/web_searching.rb".freeze, "ollama_chat.gemspec".freeze, "redis/redis.conf".freeze, "spec/assets/api_show.json".freeze, "spec/assets/api_tags.json".freeze, "spec/assets/api_version.json".freeze, "spec/assets/conversation.json".freeze, "spec/assets/duckduckgo.html".freeze, "spec/assets/example.atom".freeze, "spec/assets/example.csv".freeze, "spec/assets/example.html".freeze, "spec/assets/example.pdf".freeze, "spec/assets/example.ps".freeze, "spec/assets/example.rb".freeze, "spec/assets/example.rss".freeze, "spec/assets/example.xml".freeze, "spec/assets/kitten.jpg".freeze, "spec/assets/prompt.txt".freeze, "spec/assets/searxng.json".freeze, "spec/ollama_chat/chat_spec.rb".freeze, "spec/ollama_chat/clipboard_spec.rb".freeze, "spec/ollama_chat/follow_chat_spec.rb".freeze, "spec/ollama_chat/information_spec.rb".freeze, "spec/ollama_chat/message_list_spec.rb".freeze, "spec/ollama_chat/model_handling_spec.rb".freeze, "spec/ollama_chat/parsing_spec.rb".freeze, "spec/ollama_chat/source_fetching_spec.rb".freeze, "spec/ollama_chat/switches_spec.rb".freeze, "spec/ollama_chat/utils/cache_fetcher_spec.rb".freeze, "spec/ollama_chat/utils/fetcher_spec.rb".freeze, "spec/ollama_chat/utils/file_argument_spec.rb".freeze, "spec/ollama_chat/web_searching_spec.rb".freeze, "spec/spec_helper.rb".freeze, "tmp/.keep".freeze]
15
+ s.extra_rdoc_files = ["README.md".freeze, "lib/ollama_chat.rb".freeze, "lib/ollama_chat/chat.rb".freeze, "lib/ollama_chat/clipboard.rb".freeze, "lib/ollama_chat/dialog.rb".freeze, "lib/ollama_chat/document_cache.rb".freeze, "lib/ollama_chat/follow_chat.rb".freeze, "lib/ollama_chat/history.rb".freeze, "lib/ollama_chat/information.rb".freeze, "lib/ollama_chat/message_format.rb".freeze, "lib/ollama_chat/message_list.rb".freeze, "lib/ollama_chat/model_handling.rb".freeze, "lib/ollama_chat/ollama_chat_config.rb".freeze, "lib/ollama_chat/parsing.rb".freeze, "lib/ollama_chat/server_socket.rb".freeze, "lib/ollama_chat/source_fetching.rb".freeze, "lib/ollama_chat/switches.rb".freeze, "lib/ollama_chat/utils.rb".freeze, "lib/ollama_chat/utils/cache_fetcher.rb".freeze, "lib/ollama_chat/utils/chooser.rb".freeze, "lib/ollama_chat/utils/fetcher.rb".freeze, "lib/ollama_chat/utils/file_argument.rb".freeze, "lib/ollama_chat/version.rb".freeze, "lib/ollama_chat/web_searching.rb".freeze]
16
+ s.files = [".all_images.yml".freeze, ".envrc".freeze, ".gitignore".freeze, "CHANGES.md".freeze, "Gemfile".freeze, "README.md".freeze, "Rakefile".freeze, "VERSION".freeze, "bin/ollama_chat".freeze, "bin/ollama_chat_send".freeze, "config/searxng/settings.yml".freeze, "docker-compose.yml".freeze, "lib/ollama_chat.rb".freeze, "lib/ollama_chat/chat.rb".freeze, "lib/ollama_chat/clipboard.rb".freeze, "lib/ollama_chat/dialog.rb".freeze, "lib/ollama_chat/document_cache.rb".freeze, "lib/ollama_chat/follow_chat.rb".freeze, "lib/ollama_chat/history.rb".freeze, "lib/ollama_chat/information.rb".freeze, "lib/ollama_chat/message_format.rb".freeze, "lib/ollama_chat/message_list.rb".freeze, "lib/ollama_chat/model_handling.rb".freeze, "lib/ollama_chat/ollama_chat_config.rb".freeze, "lib/ollama_chat/ollama_chat_config/default_config.yml".freeze, "lib/ollama_chat/parsing.rb".freeze, "lib/ollama_chat/server_socket.rb".freeze, "lib/ollama_chat/source_fetching.rb".freeze, "lib/ollama_chat/switches.rb".freeze, "lib/ollama_chat/utils.rb".freeze, "lib/ollama_chat/utils/cache_fetcher.rb".freeze, "lib/ollama_chat/utils/chooser.rb".freeze, "lib/ollama_chat/utils/fetcher.rb".freeze, "lib/ollama_chat/utils/file_argument.rb".freeze, "lib/ollama_chat/version.rb".freeze, "lib/ollama_chat/web_searching.rb".freeze, "ollama_chat.gemspec".freeze, "redis/redis.conf".freeze, "spec/assets/api_show.json".freeze, "spec/assets/api_tags.json".freeze, "spec/assets/api_version.json".freeze, "spec/assets/conversation.json".freeze, "spec/assets/duckduckgo.html".freeze, "spec/assets/example.atom".freeze, "spec/assets/example.csv".freeze, "spec/assets/example.html".freeze, "spec/assets/example.pdf".freeze, "spec/assets/example.ps".freeze, "spec/assets/example.rb".freeze, "spec/assets/example.rss".freeze, "spec/assets/example.xml".freeze, "spec/assets/kitten.jpg".freeze, "spec/assets/prompt.txt".freeze, "spec/assets/searxng.json".freeze, "spec/ollama_chat/chat_spec.rb".freeze, "spec/ollama_chat/clipboard_spec.rb".freeze, "spec/ollama_chat/follow_chat_spec.rb".freeze, "spec/ollama_chat/information_spec.rb".freeze, "spec/ollama_chat/message_list_spec.rb".freeze, "spec/ollama_chat/model_handling_spec.rb".freeze, "spec/ollama_chat/parsing_spec.rb".freeze, "spec/ollama_chat/source_fetching_spec.rb".freeze, "spec/ollama_chat/switches_spec.rb".freeze, "spec/ollama_chat/utils/cache_fetcher_spec.rb".freeze, "spec/ollama_chat/utils/fetcher_spec.rb".freeze, "spec/ollama_chat/utils/file_argument_spec.rb".freeze, "spec/ollama_chat/web_searching_spec.rb".freeze, "spec/spec_helper.rb".freeze, "tmp/.keep".freeze]
17
17
  s.homepage = "https://github.com/flori/ollama_chat".freeze
18
18
  s.licenses = ["MIT".freeze]
19
19
  s.rdoc_options = ["--title".freeze, "OllamaChat - A command-line interface (CLI) for interacting with an Ollama AI model.".freeze, "--main".freeze, "README.md".freeze]
@@ -32,7 +32,7 @@ Gem::Specification.new do |s|
32
32
  s.add_development_dependency(%q<debug>.freeze, [">= 0".freeze])
33
33
  s.add_development_dependency(%q<simplecov>.freeze, [">= 0".freeze])
34
34
  s.add_runtime_dependency(%q<excon>.freeze, ["~> 1.0".freeze])
35
- s.add_runtime_dependency(%q<ollama-ruby>.freeze, ["~> 1.0".freeze])
35
+ s.add_runtime_dependency(%q<ollama-ruby>.freeze, ["~> 1.2".freeze])
36
36
  s.add_runtime_dependency(%q<documentrix>.freeze, ["~> 0.0".freeze, ">= 0.0.2".freeze])
37
37
  s.add_runtime_dependency(%q<rss>.freeze, ["~> 0.3".freeze])
38
38
  s.add_runtime_dependency(%q<term-ansicolor>.freeze, ["~> 1.11".freeze])
@@ -297,6 +297,7 @@ RSpec.describe OllamaChat::Chat do
297
297
  Streaming|
298
298
  Location|
299
299
  Document\ policy|
300
+ Thinking\ is|
300
301
  Currently\ selected\ search\ engine
301
302
  /x
302
303
  ).at_least(1)
@@ -8,7 +8,7 @@ RSpec.describe OllamaChat::FollowChat do
8
8
  end
9
9
 
10
10
  let :chat do
11
- double('Chat', markdown: double(on?: false))
11
+ double('Chat', markdown: double(on?: false), think: double(on?: false))
12
12
  end
13
13
 
14
14
  let :follow_chat do
@@ -31,8 +31,10 @@ RSpec.describe OllamaChat::FollowChat do
31
31
  it 'can follow without markdown' do
32
32
  message = Ollama::Message.new(role: 'assistant', content: 'world')
33
33
  response = double(message:, done: false)
34
- expect(output).to receive(:puts).with(/assistant/)
35
- expect(output).to receive(:print).with(/world/)
34
+ expect(output).to receive(:print).with(
35
+ "\e[2J", "\e[1;1H", "📨 \e[1m\e[38;5;111massistant:\e[0m\e[0m", "\n",
36
+ "world"
37
+ )
36
38
  follow_chat.call(response)
37
39
  response = double(
38
40
  message: nil,
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ollama_chat
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.10
4
+ version: 0.0.12
5
5
  platform: ruby
6
6
  authors:
7
7
  - Florian Frank
@@ -127,14 +127,14 @@ dependencies:
127
127
  requirements:
128
128
  - - "~>"
129
129
  - !ruby/object:Gem::Version
130
- version: '1.0'
130
+ version: '1.2'
131
131
  type: :runtime
132
132
  prerelease: false
133
133
  version_requirements: !ruby/object:Gem::Requirement
134
134
  requirements:
135
135
  - - "~>"
136
136
  - !ruby/object:Gem::Version
137
- version: '1.0'
137
+ version: '1.2'
138
138
  - !ruby/object:Gem::Dependency
139
139
  name: documentrix
140
140
  requirement: !ruby/object:Gem::Requirement
@@ -372,8 +372,8 @@ extra_rdoc_files:
372
372
  - lib/ollama_chat/follow_chat.rb
373
373
  - lib/ollama_chat/history.rb
374
374
  - lib/ollama_chat/information.rb
375
+ - lib/ollama_chat/message_format.rb
375
376
  - lib/ollama_chat/message_list.rb
376
- - lib/ollama_chat/message_type.rb
377
377
  - lib/ollama_chat/model_handling.rb
378
378
  - lib/ollama_chat/ollama_chat_config.rb
379
379
  - lib/ollama_chat/parsing.rb
@@ -408,8 +408,8 @@ files:
408
408
  - lib/ollama_chat/follow_chat.rb
409
409
  - lib/ollama_chat/history.rb
410
410
  - lib/ollama_chat/information.rb
411
+ - lib/ollama_chat/message_format.rb
411
412
  - lib/ollama_chat/message_list.rb
412
- - lib/ollama_chat/message_type.rb
413
413
  - lib/ollama_chat/model_handling.rb
414
414
  - lib/ollama_chat/ollama_chat_config.rb
415
415
  - lib/ollama_chat/ollama_chat_config/default_config.yml
@@ -1,5 +0,0 @@
1
- module OllamaChat::MessageType
2
- def message_type(images)
3
- images.present? ? ?📸 : ?📨
4
- end
5
- end