scout-ai 1.1.0 → 1.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/scout/llm/mcp.rb CHANGED
@@ -6,7 +6,7 @@ module Workflow
6
6
  tasks = self.tasks.keys if tasks.empty?
7
7
 
8
8
  tools = tasks.collect do |task,inputs=nil|
9
- tool_definition = LLM.task_tool_definition(self, task, inputs)[:function]
9
+ tool_definition = LLM.task_tool_definition(self, task, inputs)
10
10
  description = tool_definition[:description]
11
11
  input_schema = tool_definition[:parameters].slice(:properties, :required)
12
12
  annotations = tool_definition.slice(:title)
@@ -1,4 +1,7 @@
1
1
  module LLM
2
+ @max_content_length = Scout::Config.get(:max_content_length, :llm_tools, :tools, :llm, :ask, default: 5_000)
3
+ self.singleton_class.attr_accessor :max_content_length
4
+
2
5
  def self.call_id_name_and_arguments(tool_call)
3
6
  tool_call_id = tool_call.dig("call_id") || tool_call.dig("id")
4
7
  if tool_call['function']
@@ -15,6 +18,7 @@ module LLM
15
18
  end
16
19
 
17
20
  def self.process_calls(tools, calls, &block)
21
+ max_content_length = LLM.max_content_length
18
22
  IndiferentHash.setup tools
19
23
  calls.collect do |tool_call|
20
24
  tool_call_id, function_name, function_arguments = call_id_name_and_arguments(tool_call)
@@ -54,10 +58,17 @@ module LLM
54
58
  else
55
59
  function_response.to_json
56
60
  end
61
+
57
62
  content = content.to_s if Numeric === content
58
63
 
59
64
  Log.high "Called #{function_name}: " + Log.fingerprint(content)
60
65
 
66
+ if content.length > max_content_length
67
+ exception_msg = "Function #{function_name} called with parameters #{Log.fingerprint function_arguments} returned #{content.length} characters, which is more than the maximum set of #{max_content_length}."
68
+ Log.high exception_msg
69
+ content = {exception: exception_msg, stack: caller}.to_json
70
+ end
71
+
61
72
  response_message = {
62
73
  id: tool_call_id,
63
74
  role: "tool",
@@ -3,6 +3,10 @@ require 'mcp_client'
3
3
 
4
4
  module LLM
5
5
  def self.mcp_tools(url, options = {})
6
+ timeout = Scout::Config.get :timeout, :mcp, :tools
7
+
8
+ options = IndiferentHash.add_defaults options, read_timeout: timeout.to_i if timeout && timeout != ""
9
+
6
10
  if url == 'stdio'
7
11
  client = MCPClient.create_client(mcp_server_configs: [options.merge(type: 'stdio')])
8
12
  else
@@ -97,7 +97,9 @@ module LLM
97
97
  raise ScoutException, 'Potential recursive call' if parameters[:allow_recursive] != 'true' &&
98
98
  (job.running? and job.info[:pid] == Process.pid)
99
99
 
100
- job.run
100
+ Workflow.produce(job)
101
+ job.join
102
+ job.load
101
103
  end
102
104
  rescue ScoutException
103
105
  return $!
@@ -1,21 +1,4 @@
1
1
  module LLM
2
-
3
- def self.tag(tag, content, name = nil)
4
- if name
5
- <<-EOF.strip
6
- <#{tag} name="#{name}">
7
- #{content}
8
- </#{tag}>
9
- EOF
10
- else
11
- <<-EOF.strip
12
- <#{tag}>
13
- #{content}
14
- </#{tag}>
15
- EOF
16
- end
17
- end
18
-
19
2
  def self.get_url_server_tokens(url, prefix=nil)
20
3
  return get_url_server_tokens(url).collect{|e| prefix.to_s + "." + e } if prefix
21
4
 
@@ -39,4 +22,6 @@ module LLM
39
22
  end
40
23
  Scout::Config.get(key, *all_tokens, hash)
41
24
  end
25
+
26
+
42
27
  end
data/scout-ai.gemspec CHANGED
@@ -2,11 +2,11 @@
2
2
  # DO NOT EDIT THIS FILE DIRECTLY
3
3
  # Instead, edit Juwelier::Tasks in Rakefile, and run 'rake gemspec'
4
4
  # -*- encoding: utf-8 -*-
5
- # stub: scout-ai 1.1.0 ruby lib
5
+ # stub: scout-ai 1.1.2 ruby lib
6
6
 
7
7
  Gem::Specification.new do |s|
8
8
  s.name = "scout-ai".freeze
9
- s.version = "1.1.0".freeze
9
+ s.version = "1.1.2".freeze
10
10
 
11
11
  s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
12
12
  s.require_paths = ["lib".freeze]
@@ -49,9 +49,15 @@ Gem::Specification.new do |s|
49
49
  "lib/scout/llm/backends/relay.rb",
50
50
  "lib/scout/llm/backends/responses.rb",
51
51
  "lib/scout/llm/chat.rb",
52
+ "lib/scout/llm/chat/annotation.rb",
53
+ "lib/scout/llm/chat/parse.rb",
54
+ "lib/scout/llm/chat/process.rb",
55
+ "lib/scout/llm/chat/process/clear.rb",
56
+ "lib/scout/llm/chat/process/files.rb",
57
+ "lib/scout/llm/chat/process/options.rb",
58
+ "lib/scout/llm/chat/process/tools.rb",
52
59
  "lib/scout/llm/embed.rb",
53
60
  "lib/scout/llm/mcp.rb",
54
- "lib/scout/llm/parse.rb",
55
61
  "lib/scout/llm/rag.rb",
56
62
  "lib/scout/llm/tools.rb",
57
63
  "lib/scout/llm/tools/call.rb",
@@ -105,6 +111,8 @@ Gem::Specification.new do |s|
105
111
  "test/scout/llm/backends/test_openwebui.rb",
106
112
  "test/scout/llm/backends/test_relay.rb",
107
113
  "test/scout/llm/backends/test_responses.rb",
114
+ "test/scout/llm/chat/test_parse.rb",
115
+ "test/scout/llm/chat/test_process.rb",
108
116
  "test/scout/llm/test_agent.rb",
109
117
  "test/scout/llm/test_ask.rb",
110
118
  "test/scout/llm/test_chat.rb",
@@ -138,6 +146,8 @@ Gem::Specification.new do |s|
138
146
 
139
147
  s.add_runtime_dependency(%q<scout-rig>.freeze, [">= 0".freeze])
140
148
  s.add_runtime_dependency(%q<ruby-openai>.freeze, [">= 0".freeze])
149
+ s.add_runtime_dependency(%q<ollama-ai>.freeze, [">= 0".freeze])
141
150
  s.add_runtime_dependency(%q<ruby-mcp-client>.freeze, [">= 0".freeze])
151
+ s.add_runtime_dependency(%q<hnswlib>.freeze, [">= 0".freeze])
142
152
  end
143
153
 
@@ -21,7 +21,8 @@ characters '???', if they are present.
21
21
  -h--help Print this help
22
22
  -t--template* Use a template
23
23
  -c--chat* Follow a conversation
24
- -i--inline* Ask inline questions about a file
24
+ -i--imports* Chat files to import, separated by comma
25
+ -in--inline* Ask inline questions about a file
25
26
  -f--file* Incorporate file at the start
26
27
  -m--model* Model to use
27
28
  -e--endpoint* Endpoint to use
@@ -39,7 +40,9 @@ end
39
40
 
40
41
  Log.severity = options.delete(:log).to_i if options.include? :log
41
42
 
42
- file, chat, inline, template, dry_run = IndiferentHash.process_options options, :file, :chat, :inline, :template, :dry_run
43
+ file, chat, inline, imports, template, dry_run = IndiferentHash.process_options options, :file, :chat, :inline, :imports, :template, :dry_run
44
+
45
+ imports = imports.split(/,\s*/) if imports
43
46
 
44
47
  question = ARGV * " "
45
48
 
@@ -70,7 +73,8 @@ elsif file
70
73
  end
71
74
 
72
75
  if chat
73
- conversation = Open.exist?(chat)? LLM.chat(chat) : []
76
+ conversation = Open.exist?(chat)? LLM.chat(chat) : Chat.setup([])
77
+ imports.each{|import| conversation.import import } if imports
74
78
  convo_options = LLM.options conversation
75
79
  conversation = question.empty? ? conversation : conversation + LLM.chat(question)
76
80
 
@@ -80,15 +84,15 @@ if chat
80
84
  end
81
85
 
82
86
  new = LLM.ask(conversation, convo_options.merge(options.merge(return_messages: true)))
83
- conversation = Open.read(chat) + LLM.print(new)
84
- Open.write(chat, conversation)
87
+ Open.open(chat, mode: 'a'){|f| f.puts LLM.print(new) }
88
+ puts LLM.purge(new).last[:content]
85
89
  elsif inline
86
90
 
87
91
  file = Open.read inline
88
92
 
89
93
  new_file = ""
90
94
  while true
91
- pre, question, post =
95
+ pre, question, post =
92
96
  file.partition(/^\s*#\s*ask:(?:.*?)(?=^\s*[^\s#]|\z)/smu)
93
97
 
94
98
  break if question.empty?
@@ -97,10 +101,15 @@ elsif inline
97
101
  new_file << question
98
102
  clean_question = question.gsub('#', '').gsub(/\s+/,' ').sub(/.*ask:\s*/,'').strip
99
103
  chat = [
100
- {role: :system, content: "Write a succint reply with no commentary and no formatting."},
104
+ {role: :system, content: "Write a succint reply with no commentary and no formatting."},
101
105
  {role: :user, content: "Find the following question as a comment in the file give a response to be placed inline: #{question}"},
102
106
  LLM.tag('file', file, inline)
103
107
  ]
108
+
109
+ chat = Chat.chat(chat)
110
+
111
+ imports.each{|import| chat.import import } if imports
112
+
104
113
  response = LLM.ask(LLM.chat(chat))
105
114
  new_file << <<-EOF
106
115
  # Response start
@@ -35,10 +35,10 @@ while true
35
35
  files = directory.glob('*.json')
36
36
 
37
37
  files.each do |file|
38
+ id = File.basename(file, '.json')
38
39
  target = directory.reply[id + '.json']
39
40
 
40
41
  if ! File.exist?(target)
41
- id = File.basename(file, '.json')
42
42
  options = IndiferentHash.setup(JSON.parse(Open.read(file)))
43
43
  question = options.delete(:question)
44
44
  reply = LLM.ask(question, options)
@@ -2,7 +2,7 @@ require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
2
2
  require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
3
3
 
4
4
  class TestLLMAnthropic < Test::Unit::TestCase
5
- def _test_say_hi
5
+ def test_say_hi
6
6
  prompt =<<-EOF
7
7
  user: say hi
8
8
  EOF
@@ -121,7 +121,7 @@ What is the weather in London. Should I take my umbrella?
121
121
  ppp respose
122
122
  end
123
123
 
124
- def test_json_output
124
+ def _test_json_output
125
125
  prompt =<<-EOF
126
126
  user:
127
127
 
@@ -20,7 +20,7 @@ Some text
20
20
  assert(Float === emb.first)
21
21
  end
22
22
 
23
- def test_tool_call_output_weather
23
+ def _test_tool_call_output_weather
24
24
  Log.severity = 0
25
25
  prompt =<<-EOF
26
26
  function_call:
@@ -38,7 +38,7 @@ should i take an umbrella?
38
38
  ppp LLM::Responses.ask prompt, model: 'gpt-4.1-nano'
39
39
  end
40
40
 
41
- def test_tool
41
+ def _test_tool
42
42
  prompt =<<-EOF
43
43
  user:
44
44
  What is the weather in London. Should I take my umbrella?
@@ -75,7 +75,7 @@ What is the weather in London. Should I take my umbrella?
75
75
  ppp respose
76
76
  end
77
77
 
78
- def test_news
78
+ def _test_news
79
79
  prompt =<<-EOF
80
80
  websearch: true
81
81
 
@@ -86,7 +86,7 @@ What was the top new in the US today?
86
86
  ppp LLM::Responses.ask prompt
87
87
  end
88
88
 
89
- def test_image
89
+ def _test_image
90
90
  prompt =<<-EOF
91
91
  image: #{datafile_test 'cat.jpg'}
92
92
 
@@ -98,7 +98,7 @@ What animal is represented in the image?
98
98
  ppp LLM::Responses.ask prompt
99
99
  end
100
100
 
101
- def test_json_output
101
+ def _test_json_output
102
102
  prompt =<<-EOF
103
103
  system:
104
104
 
@@ -112,7 +112,7 @@ What other movies have the protagonists of the original gost busters played on,
112
112
  ppp LLM::Responses.ask prompt, format: :json
113
113
  end
114
114
 
115
- def test_json_format
115
+ def _test_json_format
116
116
  prompt =<<-EOF
117
117
  user:
118
118
 
@@ -130,7 +130,7 @@ Name each actor and the top movie they took part of
130
130
  ppp LLM::Responses.ask prompt, format: format
131
131
  end
132
132
 
133
- def test_json_format_list
133
+ def _test_json_format_list
134
134
  prompt =<<-EOF
135
135
  user:
136
136
 
@@ -148,7 +148,7 @@ Name each actor as keys and the top 3 movies they took part of as values
148
148
  ppp LLM::Responses.ask prompt, format: format
149
149
  end
150
150
 
151
- def test_json_format_actor_list
151
+ def _test_json_format_actor_list
152
152
  prompt =<<-EOF
153
153
  user:
154
154
 
@@ -199,7 +199,7 @@ Name each actor as keys and the top 3 movies they took part of as values
199
199
  ppp LLM::Responses.ask prompt, format: schema
200
200
  end
201
201
 
202
- def test_tool_gpt5
202
+ def _test_tool_gpt5
203
203
  prompt =<<-EOF
204
204
  user:
205
205
  What is the weather in London. Should I take my umbrella?
@@ -0,0 +1,126 @@
1
+ require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
2
+ require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
3
+
4
+ class TestParse < Test::Unit::TestCase
5
+ def test_parse_simple_text
6
+ text = "Hello\nWorld"
7
+ msgs = Chat.parse(text)
8
+ assert_equal 1, msgs.size
9
+ assert_equal 'user', msgs[0][:role]
10
+ assert_equal "Hello\nWorld", msgs[0][:content]
11
+ end
12
+
13
+ def test_parse_block_and_inline_headers
14
+ text = <<~TXT
15
+ assistant:
16
+ This is a block
17
+ with lines
18
+ user: inline reply
19
+ another line
20
+ TXT
21
+
22
+ msgs = Chat.parse(text)
23
+
24
+ # Expect a few messages: initial empty user, assistant block, inline user, and final user block
25
+ assert_equal 'user', msgs[0][:role]
26
+ assert_equal '', msgs[0][:content]
27
+
28
+ assert_equal 'assistant', msgs[1][:role]
29
+ assert_equal "This is a block\nwith lines", msgs[1][:content]
30
+
31
+ assert_equal 'user', msgs[2][:role]
32
+ assert_equal 'inline reply', msgs[2][:content]
33
+
34
+ assert_equal 'assistant', msgs[3][:role]
35
+ assert_equal 'another line', msgs[3][:content]
36
+ end
37
+
38
+ def test_parse_code_fence_protection
39
+ text = <<~TXT
40
+ assistant:
41
+ Here is code:
42
+ ```
43
+ def foo
44
+ end
45
+ ```
46
+ Done
47
+ TXT
48
+
49
+ msgs = Chat.parse(text)
50
+ assert_equal 2, msgs.size # initial empty + assistant
51
+
52
+ assistant_msg = msgs[1]
53
+ assert_equal 'assistant', assistant_msg[:role]
54
+
55
+ expected = "Here is code:\n```\ndef foo\nend\n```\nDone"
56
+ assert_equal expected, assistant_msg[:content]
57
+ end
58
+
59
+ def test_parse_xml_protection
60
+ text = <<~TXT
61
+ assistant:
62
+ Before xml
63
+ <note>
64
+ This is protected
65
+ </note>
66
+ After
67
+ TXT
68
+
69
+ msgs = Chat.parse(text)
70
+ assistant_msg = msgs.find { |m| m[:role] == 'assistant' }
71
+ assert assistant_msg
72
+ assert_equal "Before xml\n<note>\nThis is protected\n</note>\nAfter", assistant_msg[:content]
73
+ end
74
+
75
+ def test_parse_square_brackets_protection
76
+ text = <<~TXT
77
+ assistant:
78
+ Start
79
+ [[This: has colon
80
+ and lines]]
81
+ End
82
+ TXT
83
+
84
+ msgs = Chat.parse(text)
85
+ assistant_msg = msgs.find { |m| m[:role] == 'assistant' }
86
+ assert assistant_msg
87
+ assert_equal "Start\nThis: has colon\nand lines\nEnd", assistant_msg[:content]
88
+ end
89
+
90
+ def test_parse_cmd_output_protection
91
+ text = <<~TXT
92
+ assistant:
93
+ Before
94
+ shell:-- ls {{{
95
+ file1
96
+ shell:-- ls }}}
97
+ After
98
+ TXT
99
+
100
+ msgs = Chat.parse(text)
101
+ assistant_msg = msgs.find { |m| m[:role] == 'assistant' }
102
+ assert assistant_msg
103
+
104
+ expected = "Before\n<cmd_output cmd=\"ls\">\nfile1\n</cmd_output>\nAfter"
105
+ assert_equal expected, assistant_msg[:content]
106
+ end
107
+
108
+ def test_previous_response_id_behavior
109
+ text = <<~TXT
110
+ previous_response_id:abc123
111
+ Some block
112
+ assistant: Got it
113
+ TXT
114
+
115
+ msgs = Chat.parse(text)
116
+
117
+ # Find the previous_response_id message
118
+ idx = msgs.index { |m| m[:role] == 'previous_response_id' }
119
+ assert idx, 'previous_response_id message not found'
120
+ assert_equal 'abc123', msgs[idx][:content]
121
+
122
+ # The message after previous_response_id should be a user block containing "Some block"
123
+ assert_equal 'user', msgs[idx + 1][:role]
124
+ assert_equal 'Some block', msgs[idx + 1][:content]
125
+ end
126
+ end
@@ -0,0 +1,123 @@
1
+ require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
2
+ require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
3
+
4
+ require 'scout/llm/chat'
5
+ class TestProcess < Test::Unit::TestCase
6
+ def setup
7
+ super
8
+ @tmp = tmpdir
9
+ end
10
+
11
+ def _test_imports_basic_and_continue_last
12
+ TmpFile.with_file do |file|
13
+ Open.write(file, "assistant: hello\nuser: from_import\n")
14
+
15
+ messages = [{role: 'import', content: file}]
16
+ out = Chat.imports(messages)
17
+
18
+ # Should have replaced import with the messages from the file
19
+ roles = out.collect{|m| m[:role]}
20
+ assert_includes roles, 'assistant'
21
+ assert_includes roles, 'user'
22
+
23
+ # Test continue: only last non-empty message
24
+ messages = [{role: 'continue', content: file}]
25
+ out = Chat.imports(messages)
26
+ assert_equal 1, out.size
27
+ assert_equal 'user', out[0][:role]
28
+ assert_equal 'from_import', out[0][:content].strip
29
+
30
+ # Test last: should behave similarly but using purge
31
+ messages = [{role: 'last', content: file}]
32
+ out = Chat.imports(messages)
33
+ assert_equal 1, out.size
34
+ end
35
+ end
36
+
37
+ def _test_files_file_reads_and_tags_content
38
+ TmpFile.with_file do |tmp|
39
+ file = File.join(tmp, 'afile.txt')
40
+ Open.write(file, "SOME_UNIQUE_CONTENT_12345")
41
+
42
+ messages = [{role: 'file', content: file}]
43
+ out = Chat.files(messages)
44
+
45
+ assert_equal 1, out.size
46
+ msg = out[0]
47
+ assert_equal 'user', msg[:role]
48
+ # content should include the file content and the filename
49
+ assert_match /SOME_UNIQUE_CONTENT_12345/, msg[:content]
50
+ assert_match /afile.txt/, msg[:content]
51
+ end
52
+ end
53
+
54
+ def _test_options_extracts_and_resets
55
+ chat = [
56
+ {role: 'endpoint', content: 'http://api.example'},
57
+ {role: 'option', content: 'k1 v1'},
58
+ {role: 'sticky_option', content: 'sk sv'},
59
+ {role: 'assistant', content: 'ok'},
60
+ {role: 'option', content: 'k2 v2'},
61
+ {role: 'user', content: 'do something'}
62
+ ]
63
+
64
+ opts = Chat.options(chat)
65
+
66
+ # endpoint should be sticky
67
+ assert_equal 'http://api.example', opts['endpoint']
68
+ # sticky_option should be present
69
+ assert_equal 'sv', opts['sk']
70
+ # first option k1 should have been cleared after assistant
71
+ assert_nil opts['k1']
72
+ # second option should remain
73
+ assert_equal 'v2', opts['k2']
74
+
75
+ # chat should have been replaced and should not include option messages
76
+ roles = chat.collect{|m| m[:role]}
77
+ assert_includes roles, 'assistant'
78
+ assert_includes roles, 'user'
79
+ assert_not_includes roles, 'option'
80
+ assert_not_includes roles, 'sticky_option'
81
+ end
82
+
83
+ def test_tasks_creates_jobs_and_calls_workflow_produce
84
+ # define a minimal workflow class to be resolved by Kernel.const_get
85
+ klass = Class.new do
86
+ def self.job(task_name, jobname=nil, options={})
87
+ # return a simple object with a path that responds to find
88
+ path = Struct.new(:p) do
89
+ def find; p; end
90
+ end
91
+ job = Struct.new(:path).new(path.new("/tmp/fake_job_#{task_name}"))
92
+ job
93
+ end
94
+ end
95
+
96
+ Object.const_set('TestWorkflow', klass)
97
+
98
+ produced = nil
99
+ # stub Workflow.produce to capture
100
+ orig = Workflow.method(:produce)
101
+ Workflow.define_singleton_method(:produce) do |jobs|
102
+ produced = jobs
103
+ end
104
+
105
+ begin
106
+ messages = [ {role: 'task', content: 'TestWorkflow mytask jobname=jn param=1'} ]
107
+ out = Chat.tasks(messages)
108
+
109
+ # Should have returned a job message pointing to our fake path
110
+ assert_equal 1, out.size
111
+ assert_equal 'job', out[0][:role]
112
+ assert_match /fake_job_mytask/, out[0][:content]
113
+
114
+ # produce should have been called with the job
115
+ assert_not_nil produced
116
+ assert_equal 1, produced.size
117
+ ensure
118
+ # restore original
119
+ Workflow.define_singleton_method(:produce, orig)
120
+ Object.send(:remove_const, 'TestWorkflow') rescue nil
121
+ end
122
+ end
123
+ end
@@ -1,7 +1,7 @@
1
1
  require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
2
2
  require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
3
3
 
4
- require 'rbbt-util'
4
+ require 'scout/knowledge_base'
5
5
  class TestLLMAgent < Test::Unit::TestCase
6
6
  def test_system
7
7
  TmpFile.with_dir do |dir|
@@ -15,7 +15,6 @@ class TestLLMAgent < Test::Unit::TestCase
15
15
 
16
16
  sss 0
17
17
  ppp agent.ask "Who is Miguel's brother-in-law. Brother in law is your spouses sibling or your sibling's spouse"
18
- #ppp agent.ask "Who is Guille's brother-in-law. Brother in law is your spouses sibling or your sibling's spouse"
19
18
  end
20
19
  end
21
20
  end