ollama-ruby 0.0.1 → 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
data/bin/ollama_chat CHANGED
@@ -7,6 +7,75 @@ include Term::ANSIColor
7
7
  require 'tins/go'
8
8
  include Tins::GO
9
9
  require 'reline'
10
+ require 'reverse_markdown'
11
+ require 'complex_config'
12
+ require 'fileutils'
13
+
14
+ class OllamaChatConfig
15
+ include ComplexConfig
16
+ include FileUtils
17
+
18
+ DEFAULT_CONFIG = <<~end
19
+ ---
20
+ url: <%= ENV['OLLAMA_URL'] || 'http://%s' % ENV.fetch('OLLAMA_HOST') %>
21
+ model:
22
+ name: <%= ENV.fetch('OLLAMA_CHAT_MODEL', 'llama3.1') %>
23
+ options:
24
+ num_ctx: 8192
25
+ system: <%= ENV.fetch('OLLAMA_CHAT_SYSTEM', 'null') %>
26
+ voice: Samantha
27
+ markdown: true
28
+ embedding:
29
+ enabled: true
30
+ model:
31
+ name: mxbai-embed-large
32
+ options: {}
33
+ # Retrieval prompt template:
34
+ prompt: 'Represent this sentence for searching relevant passages: %s'
35
+ collection: <%= ENV.fetch('OLLAMA_CHAT_COLLECTION', 'ollama_chat') %>
36
+ found_texts_size: 4096
37
+ splitter:
38
+ name: RecursiveCharacter
39
+ chunk_size: 1024
40
+ cache: Ollama::Documents::RedisCache
41
+ redis:
42
+ url: <%= ENV.fetch('REDIS_URL', 'null') %>
43
+ debug: <%= ENV['OLLAMA_CHAT_DEBUG'].to_i == 1 ? true : false %>
44
+ end
45
+
46
+ def initialize(filename = nil)
47
+ @filename = filename || default_path
48
+ @config = Provider.config(@filename)
49
+ retried = false
50
+ rescue ConfigurationFileMissing
51
+ if @filename == default_path && !retried
52
+ retried = true
53
+ mkdir_p File.dirname(default_path)
54
+ File.secure_write(default_path, DEFAULT_CONFIG)
55
+ retry
56
+ else
57
+ raise
58
+ end
59
+ end
60
+
61
+ attr_reader :filename
62
+
63
+ attr_reader :config
64
+
65
+ def default_path
66
+ File.join(config_dir_path, 'config.yml')
67
+ end
68
+
69
+ def config_dir_path
70
+ File.join(
71
+ ENV.fetch(
72
+ 'XDG_CONFIG_HOME',
73
+ File.join(ENV.fetch('HOME'), '.config')
74
+ ),
75
+ 'ollama_chat'
76
+ )
77
+ end
78
+ end
10
79
 
11
80
  class FollowChat
12
81
  include Ollama::Handlers::Concern
@@ -16,16 +85,16 @@ class FollowChat
16
85
  super(output:)
17
86
  @output.sync = true
18
87
  @markdown = markdown
19
- @say = voice ? Ollama::Handlers::Say.new(voice:) : NOP
88
+ @say = voice ? Handlers::Say.new(voice:) : NOP
20
89
  @messages = messages
21
90
  @user = nil
22
91
  end
23
92
 
24
93
  def call(response)
25
- ENV['DEBUG'].to_i == 1 and jj response
94
+ $config.debug and jj response
26
95
  if response&.message&.role == 'assistant'
27
96
  if @messages.last.role != 'assistant'
28
- @messages << Ollama::Message.new(role: 'assistant', content: '')
97
+ @messages << Message.new(role: 'assistant', content: '')
29
98
  @user = message_type(@messages.last.images) + " " +
30
99
  bold { color(111) { 'assistant:' } }
31
100
  puts @user unless @markdown
@@ -33,7 +102,7 @@ class FollowChat
33
102
  content = response.message&.content
34
103
  @messages.last.content << content
35
104
  if @markdown and @messages.last.content.present?
36
- markdown_content = Ollama::Utils::ANSIMarkdown.parse(@messages.last.content)
105
+ markdown_content = Utils::ANSIMarkdown.parse(@messages.last.content)
37
106
  @output.print clear_screen, move_home, @user, ?\n, markdown_content
38
107
  else
39
108
  @output.print content
@@ -45,37 +114,28 @@ class FollowChat
45
114
  end
46
115
  end
47
116
 
48
- def pull_model_unless_present(client, model, options)
49
- retried = false
50
- begin
51
- client.show(name: model) { |response|
52
- puts green {
53
- "Model with architecture #{response.model_info['general.architecture']} found."
54
- }
55
- if options
56
- puts "Model options are:"
57
- jj options
58
- end
59
- if system = response.system
60
- puts "Configured model system prompt is:\n#{italic { system }}"
61
- return system
62
- else
63
- return
64
- end
65
- }
66
- rescue Errors::NotFoundError
67
- puts "Model #{model} not found, attempting to pull it now…"
68
- client.pull(name: model)
69
- if retried
70
- exit 1
117
+ def pull_model_unless_present(model, options, retried = false)
118
+ ollama.show(name: model) { |response|
119
+ puts "Model #{bold{model}} with architecture #{response.model_info['general.architecture']} found."
120
+ if system = response.system
121
+ puts "Configured model system prompt is:\n#{italic { system }}"
122
+ return system
71
123
  else
72
- retried = true
73
- retry
124
+ return
74
125
  end
75
- rescue Errors::Error => e
76
- warn "Caught #{e.class}: #{e} => Exiting."
126
+ }
127
+ rescue Errors::NotFoundError
128
+ puts "Model #{bold{model}} not found, attempting to pull it now…"
129
+ ollama.pull(name: model)
130
+ if retried
77
131
  exit 1
132
+ else
133
+ retried = true
134
+ retry
78
135
  end
136
+ rescue Errors::Error => e
137
+ warn "Caught #{e.class}: #{e} => Exiting."
138
+ exit 1
79
139
  end
80
140
 
81
141
  def load_conversation(filename)
@@ -115,27 +175,198 @@ def list_conversation(messages, markdown)
115
175
  else 210
116
176
  end
117
177
  content = if markdown && m.content.present?
118
- Ollama::Utils::ANSIMarkdown.parse(m.content)
178
+ Utils::ANSIMarkdown.parse(m.content)
119
179
  else
120
180
  m.content
121
181
  end
122
- puts message_type(m.images) + " " +
123
- bold { color(role_color) { m.role } } + ":\n#{content}"
182
+ message_text = message_type(m.images) + " "
183
+ message_text += bold { color(role_color) { m.role } }
184
+ message_text += ":\n#{content}"
185
+ if m.images.present?
186
+ message_text += "\nImages: " + italic { m.images.map(&:path) * ', ' }
187
+ end
188
+ puts message_text
189
+ end
190
+ end
191
+
192
+ def parse_source(source_io)
193
+ case source_io&.content_type&.sub_type
194
+ when 'html'
195
+ ReverseMarkdown.convert(
196
+ source_io.read,
197
+ unknown_tags: :bypass,
198
+ github_flavored: true,
199
+ tag_border: ''
200
+ )
201
+ when 'plain', 'csv', 'xml'
202
+ source_io.read
203
+ else
204
+ STDERR.puts "Cannot import #{source_io.content_type} document."
205
+ return
206
+ end
207
+ end
208
+
209
+ def import_document(source_io, source)
210
+ unless $config.embedding.enabled
211
+ STDOUT.puts "Embedding disabled, I won't import any documents, try: /summarize"
212
+ return
213
+ end
214
+ STDOUT.puts "Importing #{source_io.content_type} document #{source.to_s.inspect}."
215
+ text = parse_source(source_io) or return
216
+ text.downcase!
217
+ splitter_config = $config.embedding.splitter
218
+ inputs = case splitter_config.name
219
+ when 'Character'
220
+ Ollama::Documents::Splitters::Character.new(
221
+ chunk_size: splitter_config.chunk_size,
222
+ ).split(text)
223
+ when 'RecursiveCharacter'
224
+ Ollama::Documents::Splitters::RecursiveCharacter.new(
225
+ chunk_size: splitter_config.chunk_size,
226
+ ).split(text)
227
+ when 'Semantic'
228
+ Ollama::Documents::Splitters::Semantic.new(
229
+ ollama:, model: $config.embedding.model.name,
230
+ chunk_size: splitter_config.chunk_size,
231
+ ).split(
232
+ text,
233
+ breakpoint: splitter_config.breakpoint.to_sym,
234
+ percentage: splitter_config.percentage?,
235
+ percentile: splitter_config.percentile?,
236
+ )
237
+ end
238
+ $documents.add(inputs, source: source.to_s)
239
+ end
240
+
241
+ def add_image(images, source_io, source)
242
+ STDERR.puts "Adding #{source_io.content_type} image #{source.to_s.inspect}."
243
+ image = Image.for_io(source_io, path: source.to_s)
244
+ (images << image).uniq!
245
+ end
246
+
247
+ def fetch_source(source, &block)
248
+ case source
249
+ when %r(\Ahttps?://\S+)
250
+ Utils::Fetcher.get(source) do |tmp|
251
+ block.(tmp)
252
+ end
253
+ when %r(\Afile://(?:(?:[.-]|[[:alnum:]])*)(/\S*)|([~.]?/\S*))
254
+ filename = $~.captures.compact.first
255
+ filename = File.expand_path(filename)
256
+ Utils::Fetcher.read(filename) do |tmp|
257
+ block.(tmp)
258
+ end
259
+ else
260
+ raise "invalid source"
261
+ end
262
+ rescue => e
263
+ STDERR.puts "Cannot add source #{source.to_s.inspect}: #{e}\n#{e.backtrace * ?\n}"
264
+ end
265
+
266
+ def summarize(source)
267
+ puts "Now summarizing #{source.inspect}."
268
+ source_content =
269
+ fetch_source(source) do |source_io|
270
+ parse_source(source_io) or return
271
+ end
272
+ <<~end
273
+ # Generate an abstract summary of the content in this document:
274
+
275
+ #{source_content}
276
+ end
277
+ end
278
+
279
+ def parse_content(content, images)
280
+ images.clear
281
+ tags = Utils::Tags.new
282
+
283
+ content.scan(%r([.~]?/\S+|https?://\S+|#\S+)).each do |source|
284
+ case source
285
+ when /\A#(\S+)/
286
+ tags << $1
287
+ else
288
+ source = source.sub(/(["')]|\*+)\z/, '')
289
+ fetch_source(source) do |source_io|
290
+ case source_io&.content_type&.media_type
291
+ when 'image'
292
+ add_image(images, source_io, source)
293
+ when 'text'
294
+ import_document(source_io, source)
295
+ else
296
+ STDERR.puts(
297
+ "Cannot fetch #{source.to_s.inspect} with content type "\
298
+ "#{source_io&.content_type.inspect}"
299
+ )
300
+ end
301
+ end
302
+ end
303
+ end
304
+
305
+ return content, (tags unless tags.empty?)
306
+ end
307
+
308
+ def choose_model(cli_model, default_model)
309
+ models = ollama.tags.models.map(&:name).sort
310
+ model = if cli_model == ''
311
+ Ollama::Utils::Chooser.choose(models) || default_model
312
+ else
313
+ cli_model || default_model
314
+ end
315
+ ensure
316
+ puts green { "Connecting to #{model}@#{ollama.base_url} now…" }
317
+ end
318
+
319
+ def choose_collection(default_collection)
320
+ collections = [ default_collection ] + $documents.collections
321
+ collections = collections.uniq.sort
322
+ $documents.collection = collection =
323
+ Ollama::Utils::Chooser.choose(collections) || default_collection
324
+ ensure
325
+ puts "Changing to collection #{bold{collection}}."
326
+ collection_stats
327
+ end
328
+
329
+ def collection_stats
330
+ puts <<~end
331
+ Collection
332
+ Name: #{bold{$documents.collection}}
333
+ #Embeddings: #{$documents.size}
334
+ Tags: #{$documents.tags}
335
+ end
336
+ end
337
+
338
+ def configure_cache
339
+ Object.const_get($config.cache)
340
+ rescue => e
341
+ STDERR.puts "Caught #{e.class}: #{e} => Falling back to MemoryCache."
342
+ Ollama::Documents::MemoryCache
343
+ end
344
+
345
+ def set_markdown(value)
346
+ if value
347
+ puts "Using ANSI markdown to output content."
348
+ true
349
+ else
350
+ puts "Using plaintext for outputting content."
351
+ false
124
352
  end
125
353
  end
126
354
 
127
355
  def display_chat_help
128
356
  puts <<~end
129
- /paste to paste content
130
- /list list the messages of the conversation
131
- /clear clear the conversation messages
132
- /pop n pop the last n message, defaults to 1
133
- /regenerate the last answer message
134
- /save filename store conversation messages
135
- /load filename load conversation messages
136
- /image filename attach image to the next message
137
- /quit to quit.
138
- /help to view this help.
357
+ /paste to paste content
358
+ /markdown toggle markdown output
359
+ /list list the messages of the conversation
360
+ /clear clear the conversation messages
361
+ /pop [n] pop the last n exchanges, defaults to 1
362
+ /model change the model
363
+ /regenerate the last answer message
364
+ /collection clear|stats|change|new clear or show stats of current collection
365
+ /summarize source summarize the URL/file source's content
366
+ /save filename store conversation messages
367
+ /load filename load conversation messages
368
+ /quit to quit
369
+ /help to view this help
139
370
  end
140
371
  end
141
372
 
@@ -143,36 +374,90 @@ def usage
143
374
  puts <<~end
144
375
  #{File.basename($0)} [OPTIONS]
145
376
 
146
- -u URL the ollama base url, OLLAMA_URL
147
- -m MODEL the ollama model to chat with, OLLAMA_MODEL
148
- -M OPTIONS the model options as JSON file, see Ollama::Options
149
- -s SYSTEM the system prompt to use as a file
150
- -c CHAT a saved chat conversation to load
151
- -v VOICE use VOICE (e. g. Samantha) to speak with say command
152
- -d use markdown to display the chat messages
153
- -h this help
377
+ -f CONFIG config file to read
378
+ -u URL the ollama base url, OLLAMA_URL
379
+ -m MODEL the ollama model to chat with, OLLAMA_CHAT_MODEL
380
+ -s SYSTEM the system prompt to use as a file, OLLAMA_CHAT_SYSTEM
381
+ -c CHAT a saved chat conversation to load
382
+ -C COLLECTION name of the collection used in this conversation
383
+ -D DOCUMENT load document and add to collection (multiple)
384
+ -d use markdown to display the chat messages
385
+ -v use voice output
386
+ -h this help
154
387
 
155
388
  end
156
389
  exit 0
157
390
  end
158
391
 
159
- opts = go 'u:m:M:s:c:v:dh'
392
+ def ollama
393
+ $ollama
394
+ end
395
+
396
+ opts = go 'f:u:m:s:c:C:D:dvh'
397
+
398
+ config = OllamaChatConfig.new(opts[?f])
399
+ $config = config.config
160
400
 
161
401
  opts[?h] and usage
162
402
 
163
- base_url = opts[?u] || ENV['OLLAMA_URL'] || 'http://%s' % ENV.fetch('OLLAMA_HOST')
164
- model = opts[?m] || ENV.fetch('OLLAMA_MODEL', 'llama3.1')
165
- options = if options_file = opts[?M]
166
- JSON(File.read(options_file), create_additions: true)
167
- end
403
+ puts "Configuration read from #{config.filename.inspect} is:"
404
+ y $config.to_h
405
+
406
+ base_url = opts[?u] || $config.url
407
+ $ollama = Client.new(base_url:, debug: $config.debug)
408
+
409
+ model = choose_model(opts[?m], $config.model.name)
410
+ options = $config.model.options
411
+ model_system = pull_model_unless_present(model, options)
412
+ messages = []
168
413
 
169
- client = Client.new(base_url:)
414
+ if $config.embedding.enabled
415
+ embedding_model = $config.embedding.model.name
416
+ embedding_model_options = $config.embedding.model.options
417
+ pull_model_unless_present(embedding_model, embedding_model_options)
418
+ collection = opts[?C] || $config.embedding.collection
419
+ $documents = Documents.new(
420
+ ollama:,
421
+ model: $config.embedding.model.name,
422
+ model_options: $config.embedding.model.options,
423
+ collection:,
424
+ cache: configure_cache,
425
+ redis_url: $config.redis.url?,
426
+ )
170
427
 
171
- model_system = pull_model_unless_present(client, model, options)
428
+ document_list = opts[?D].to_a
429
+ if document_list.any?(&:empty?)
430
+ puts "Clearing collection #{bold{collection}}."
431
+ $documents.clear
432
+ document_list.reject!(&:empty?)
433
+ end
434
+ unless document_list.empty?
435
+ document_list.map! do |doc|
436
+ if doc =~ %r(\Ahttps?://)
437
+ doc
438
+ else
439
+ File.expand_path(doc)
440
+ end
441
+ end
442
+ infobar.puts "Collection #{bold{collection}}: Adding #{document_list.size} documents…"
443
+ document_list.each_slice(25) do |docs|
444
+ docs.each do |doc|
445
+ fetch_source(doc) do |doc_io|
446
+ import_document(doc_io, doc)
447
+ end
448
+ end
449
+ end
450
+ end
451
+ collection_stats
452
+ else
453
+ $documents = Documents.new(ollama:, model:)
454
+ end
172
455
 
173
- puts green { "Connecting to #{model}@#{base_url} now…" }
456
+ if voice = ($config.voice if opts[?v])
457
+ puts "Using voice #{bold{voice}} to speak."
458
+ end
174
459
 
175
- messages = []
460
+ markdown = set_markdown(opts[?d] || $config.markdown)
176
461
 
177
462
  if opts[?c]
178
463
  messages.concat load_conversation(opts[?c])
@@ -181,7 +466,7 @@ else
181
466
  if system_prompt_file = opts[?s]
182
467
  system = File.read(system_prompt_file)
183
468
  end
184
- system ||= ENV['OLLAMA_SYSTEM']
469
+ system ||= $config.system
185
470
 
186
471
  if system
187
472
  messages << Message.new(role: 'system', content: system)
@@ -191,68 +476,116 @@ else
191
476
  end
192
477
  end
193
478
 
194
- puts "Type /help to display the chat help."
479
+ puts "\nType /help to display the chat help."
195
480
 
196
- images = nil
481
+ images = []
197
482
  loop do
198
- prompt = bold { color(172) { message_type(images) + " user" } } + bold { "> " }
199
- case content = Reline.readline(prompt, true)&.chomp
483
+ parse_content = true
484
+
485
+ input_prompt = bold { color(172) { message_type(images) + " user" } } + bold { "> " }
486
+ case content = Reline.readline(input_prompt, true)&.chomp
200
487
  when %r(^/paste$)
201
488
  puts bold { "Paste your content and then press C-d!" }
202
489
  content = STDIN.read
203
490
  when %r(^/quit$)
204
491
  puts "Goodbye."
205
492
  exit 0
493
+ when %r(^/markdown)
494
+ markdown = set_markdown(!markdown)
495
+ next
206
496
  when %r(^/list$)
207
- list_conversation(messages, opts[?d])
497
+ list_conversation(messages, markdown)
208
498
  next
209
499
  when %r(^/clear$)
210
500
  messages.clear
211
501
  puts "Cleared messages."
212
502
  next
213
- when %r(^/pop\s*(\d*)$)
503
+ when %r(^/collection (clear|stats|change|new)$)
504
+ case $1
505
+ when 'clear'
506
+ $documents.clear
507
+ puts "Cleared collection #{bold{collection}}."
508
+ when 'stats'
509
+ collection_stats
510
+ when 'change'
511
+ choose_collection(collection)
512
+ when 'new'
513
+ print "Enter name of the new collection: "
514
+ $documents.collection = collection = STDIN.gets.chomp
515
+ collection_stats
516
+ end
517
+ next
518
+ when %r(^/pop?(?:\s+(\d*))?$)
214
519
  n = $1.to_i.clamp(1, Float::INFINITY)
215
- messages.pop(n)
216
- puts "Popped the last #{n} messages."
520
+ r = messages.pop(2 * n)
521
+ m = r.size
522
+ puts "Popped the last #{m} exchanges."
523
+ next
524
+ when %r(^/model$)
525
+ model = choose_model('', model)
217
526
  next
218
527
  when %r(^/regenerate$)
219
528
  if content = messages[-2]&.content
220
- images = messages[-2]&.images
529
+ content.gsub!(/\nConsider these chunks for your answer.*\z/, '')
221
530
  messages.pop(2)
222
531
  else
223
532
  puts "Not enough messages in this conversation."
224
533
  redo
225
534
  end
226
- when %r(^/save (.+)$)
535
+ when %r(^/summarize\s+(.+))
536
+ parse_content = false
537
+ content = summarize($1)
538
+ when %r(^/save\s+(.+)$)
227
539
  save_conversation($1, messages)
228
540
  puts "Saved conversation to #$1."
229
541
  next
230
- when %r(^/load (.+)$)
542
+ when %r(^/load\s+(.+)$)
231
543
  messages = load_conversation($1)
232
544
  puts "Loaded conversation from #$1."
233
545
  next
234
- when %r(^/image (.+)$)
235
- filename = File.expand_path($1)
236
- if File.exist?(filename)
237
- images = Image.for_filename(filename)
238
- puts "Attached image #$1 to the next message."
239
- redo
240
- else
241
- puts "Filename #$1 doesn't exist. Choose another one."
242
- next
243
- end
244
546
  when %r(^/help$)
245
547
  display_chat_help
246
548
  next
247
- when nil
549
+ when nil, ''
248
550
  puts "Type /quit to quit."
249
551
  next
250
552
  end
553
+
554
+ content, tags = if parse_content
555
+ parse_content(content, images.clear)
556
+ else
557
+ [ content, Utils::Tags.new ]
558
+ end
559
+
560
+ if $config.embedding.enabled
561
+ records = $documents.find(
562
+ content.downcase,
563
+ tags:,
564
+ prompt: $config.embedding.model.prompt?
565
+ )
566
+ s, found_texts_size = 0, $config.embedding.found_texts_size
567
+ records = records.take_while {
568
+ (s += _1.text.size) <= found_texts_size
569
+ }
570
+ found_texts = records.map(&:text)
571
+ unless found_texts.empty?
572
+ content += "\nConsider these chunks for your answer:\n#{found_texts.join("\n\n---\n\n")}"
573
+ end
574
+ end
575
+
251
576
  messages << Message.new(role: 'user', content:, images:)
252
- handler = FollowChat.new(messages:, markdown: opts[?d], voice: opts[?v])
253
- client.chat(model:, messages:, options:, stream: true, &handler)
254
- ENV['DEBUG'].to_i == 1 and jj messages
255
- images = nil
577
+ handler = FollowChat.new(messages:, markdown:, voice:)
578
+ ollama.chat(model:, messages:, options:, stream: true, &handler)
579
+
580
+ puts records.map { |record|
581
+ link = if record.source =~ %r(\Ahttps?://)
582
+ record.source
583
+ else
584
+ 'file://%s' % File.expand_path(record.source)
585
+ end
586
+ [ link, record.tags.first ]
587
+ }.uniq.map { |l, t| hyperlink(l, t) }.join(' ')
588
+ $config.debug and jj messages
256
589
  rescue Interrupt
257
590
  puts "Type /quit to quit."
258
591
  end
data/bin/ollama_console CHANGED
@@ -6,7 +6,7 @@ require 'irb'
6
6
  require 'irb/history'
7
7
 
8
8
  base_url = ENV['OLLAMA_URL'] || 'http://%s' % ENV.fetch('OLLAMA_HOST')
9
- client = Client.new(base_url:)
9
+ ollama = Client.new(base_url:)
10
10
  IRB.setup nil
11
11
  IRB.conf[:MAIN_CONTEXT] = IRB::Irb.new.context
12
12
  IRB.conf[:HISTORY_FILE] = File.join(ENV.fetch('HOME'), '.ollama_console-history')
@@ -16,5 +16,5 @@ if io = IRB.conf[:MAIN_CONTEXT].io and io.support_history_saving?
16
16
  io.load_history
17
17
  at_exit { io.save_history }
18
18
  end
19
- client.help
20
- IRB.irb nil, client
19
+ ollama.help
20
+ IRB.irb nil, ollama
data/bin/ollama_update ADDED
@@ -0,0 +1,17 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'time'
4
+ require 'term/ansicolor'
5
+ include Term::ANSIColor
6
+ require 'ollama'
7
+ include Ollama
8
+
9
+ base_url = ENV['OLLAMA_URL'] || 'http://%s' % ENV.fetch('OLLAMA_HOST')
10
+ ollama = Client.new(base_url:)
11
+ ollama.tags.models.each do |model|
12
+ name, modified_at = model.name, Time.parse(model.modified_at)
13
+ infobar.puts(
14
+ "Updating model #{bold {name}} (last modified at #{modified_at.iso8601}):"
15
+ )
16
+ ollama.pull(name:)
17
+ end
data/config/redis.conf ADDED
@@ -0,0 +1,5 @@
1
+ save 60 1000
2
+ dbfilename dump.rdb
3
+ appendonly yes
4
+ appendfilename "appendonly.aof"
5
+ appendfsync always
@@ -0,0 +1,11 @@
1
+ services:
2
+ redis:
3
+ image: redis:7.2.5-alpine
4
+ restart: unless-stopped
5
+ ports:
6
+ - "9736:6379"
7
+ volumes:
8
+ - "redis-data:/data:delegated"
9
+ - "./config/redis.conf:/etc/redis.conf"
10
+ volumes:
11
+ redis-data: