scout-ai 0.2.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. checksums.yaml +4 -4
  2. data/.vimproject +91 -10
  3. data/Rakefile +1 -0
  4. data/VERSION +1 -1
  5. data/bin/scout-ai +2 -0
  6. data/lib/scout/llm/agent/chat.rb +24 -0
  7. data/lib/scout/llm/agent.rb +13 -13
  8. data/lib/scout/llm/ask.rb +26 -16
  9. data/lib/scout/llm/backends/bedrock.rb +129 -0
  10. data/lib/scout/llm/backends/huggingface.rb +6 -21
  11. data/lib/scout/llm/backends/ollama.rb +69 -36
  12. data/lib/scout/llm/backends/openai.rb +85 -35
  13. data/lib/scout/llm/backends/openwebui.rb +1 -1
  14. data/lib/scout/llm/backends/relay.rb +3 -2
  15. data/lib/scout/llm/backends/responses.rb +272 -0
  16. data/lib/scout/llm/chat.rb +547 -0
  17. data/lib/scout/llm/parse.rb +70 -13
  18. data/lib/scout/llm/tools.rb +126 -5
  19. data/lib/scout/llm/utils.rb +17 -10
  20. data/lib/scout/model/base.rb +19 -0
  21. data/lib/scout/model/python/base.rb +25 -0
  22. data/lib/scout/model/python/huggingface/causal/next_token.rb +23 -0
  23. data/lib/scout/model/python/huggingface/causal.rb +29 -0
  24. data/lib/scout/model/python/huggingface/classification +0 -0
  25. data/lib/scout/model/python/huggingface/classification.rb +50 -0
  26. data/lib/scout/model/python/huggingface.rb +112 -0
  27. data/lib/scout/model/python/torch/dataloader.rb +57 -0
  28. data/lib/scout/model/python/torch/helpers.rb +84 -0
  29. data/lib/scout/model/python/torch/introspection.rb +34 -0
  30. data/lib/scout/model/python/torch/load_and_save.rb +47 -0
  31. data/lib/scout/model/python/torch.rb +94 -0
  32. data/lib/scout/model/util/run.rb +181 -0
  33. data/lib/scout/model/util/save.rb +81 -0
  34. data/lib/scout-ai.rb +3 -1
  35. data/python/scout_ai/__init__.py +35 -0
  36. data/python/scout_ai/__pycache__/__init__.cpython-310.pyc +0 -0
  37. data/python/scout_ai/__pycache__/__init__.cpython-311.pyc +0 -0
  38. data/python/scout_ai/__pycache__/huggingface.cpython-310.pyc +0 -0
  39. data/python/scout_ai/__pycache__/huggingface.cpython-311.pyc +0 -0
  40. data/python/scout_ai/__pycache__/util.cpython-310.pyc +0 -0
  41. data/python/scout_ai/__pycache__/util.cpython-311.pyc +0 -0
  42. data/python/scout_ai/atcold/__init__.py +0 -0
  43. data/python/scout_ai/atcold/plot_lib.py +141 -0
  44. data/python/scout_ai/atcold/spiral.py +27 -0
  45. data/python/scout_ai/huggingface/data.py +48 -0
  46. data/python/scout_ai/huggingface/eval.py +60 -0
  47. data/python/scout_ai/huggingface/model.py +29 -0
  48. data/python/scout_ai/huggingface/rlhf.py +83 -0
  49. data/python/scout_ai/huggingface/train/__init__.py +34 -0
  50. data/python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc +0 -0
  51. data/python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc +0 -0
  52. data/python/scout_ai/huggingface/train/next_token.py +315 -0
  53. data/python/scout_ai/language_model.py +70 -0
  54. data/python/scout_ai/util.py +32 -0
  55. data/scout-ai.gemspec +130 -0
  56. data/scout_commands/agent/ask +133 -15
  57. data/scout_commands/agent/kb +15 -0
  58. data/scout_commands/llm/ask +71 -12
  59. data/scout_commands/llm/process +4 -2
  60. data/test/data/cat.jpg +0 -0
  61. data/test/scout/llm/agent/test_chat.rb +14 -0
  62. data/test/scout/llm/backends/test_bedrock.rb +60 -0
  63. data/test/scout/llm/backends/test_huggingface.rb +3 -3
  64. data/test/scout/llm/backends/test_ollama.rb +48 -10
  65. data/test/scout/llm/backends/test_openai.rb +96 -11
  66. data/test/scout/llm/backends/test_responses.rb +115 -0
  67. data/test/scout/llm/test_ask.rb +1 -0
  68. data/test/scout/llm/test_chat.rb +214 -0
  69. data/test/scout/llm/test_parse.rb +81 -2
  70. data/test/scout/model/python/huggingface/causal/test_next_token.rb +59 -0
  71. data/test/scout/model/python/huggingface/test_causal.rb +33 -0
  72. data/test/scout/model/python/huggingface/test_classification.rb +30 -0
  73. data/test/scout/model/python/test_base.rb +44 -0
  74. data/test/scout/model/python/test_huggingface.rb +9 -0
  75. data/test/scout/model/python/test_torch.rb +71 -0
  76. data/test/scout/model/python/torch/test_helpers.rb +14 -0
  77. data/test/scout/model/test_base.rb +117 -0
  78. data/test/scout/model/util/test_save.rb +31 -0
  79. metadata +72 -5
  80. data/questions/coach +0 -2
data/scout-ai.gemspec ADDED
@@ -0,0 +1,130 @@
1
+ # Generated by juwelier
2
+ # DO NOT EDIT THIS FILE DIRECTLY
3
+ # Instead, edit Juwelier::Tasks in Rakefile, and run 'rake gemspec'
4
+ # -*- encoding: utf-8 -*-
5
+ # stub: scout-ai 1.0.0 ruby lib
6
+
7
+ Gem::Specification.new do |s|
8
+ s.name = "scout-ai".freeze
9
+ s.version = "1.0.0".freeze
10
+
11
+ s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
12
+ s.require_paths = ["lib".freeze]
13
+ s.authors = ["Miguel Vazquez".freeze]
14
+ s.date = "2025-06-05"
15
+ s.description = "assorted functionalities to help scouts use AI".freeze
16
+ s.email = "mikisvaz@gmail.com".freeze
17
+ s.executables = ["scout-ai".freeze]
18
+ s.extra_rdoc_files = [
19
+ "LICENSE",
20
+ "LICENSE.txt",
21
+ "README.rdoc"
22
+ ]
23
+ s.files = [
24
+ ".document",
25
+ ".vimproject",
26
+ "LICENSE",
27
+ "LICENSE.txt",
28
+ "README.rdoc",
29
+ "Rakefile",
30
+ "VERSION",
31
+ "bin/scout-ai",
32
+ "lib/scout-ai.rb",
33
+ "lib/scout/llm/agent.rb",
34
+ "lib/scout/llm/agent/chat.rb",
35
+ "lib/scout/llm/ask.rb",
36
+ "lib/scout/llm/backends/bedrock.rb",
37
+ "lib/scout/llm/backends/huggingface.rb",
38
+ "lib/scout/llm/backends/ollama.rb",
39
+ "lib/scout/llm/backends/openai.rb",
40
+ "lib/scout/llm/backends/openwebui.rb",
41
+ "lib/scout/llm/backends/relay.rb",
42
+ "lib/scout/llm/backends/responses.rb",
43
+ "lib/scout/llm/chat.rb",
44
+ "lib/scout/llm/embed.rb",
45
+ "lib/scout/llm/parse.rb",
46
+ "lib/scout/llm/rag.rb",
47
+ "lib/scout/llm/tools.rb",
48
+ "lib/scout/llm/utils.rb",
49
+ "lib/scout/model/base.rb",
50
+ "lib/scout/model/python/base.rb",
51
+ "lib/scout/model/python/huggingface.rb",
52
+ "lib/scout/model/python/huggingface/causal.rb",
53
+ "lib/scout/model/python/huggingface/causal/next_token.rb",
54
+ "lib/scout/model/python/huggingface/classification",
55
+ "lib/scout/model/python/huggingface/classification.rb",
56
+ "lib/scout/model/python/torch.rb",
57
+ "lib/scout/model/python/torch/dataloader.rb",
58
+ "lib/scout/model/python/torch/helpers.rb",
59
+ "lib/scout/model/python/torch/introspection.rb",
60
+ "lib/scout/model/python/torch/load_and_save.rb",
61
+ "lib/scout/model/util/run.rb",
62
+ "lib/scout/model/util/save.rb",
63
+ "python/scout_ai/__init__.py",
64
+ "python/scout_ai/__pycache__/__init__.cpython-310.pyc",
65
+ "python/scout_ai/__pycache__/__init__.cpython-311.pyc",
66
+ "python/scout_ai/__pycache__/huggingface.cpython-310.pyc",
67
+ "python/scout_ai/__pycache__/huggingface.cpython-311.pyc",
68
+ "python/scout_ai/__pycache__/util.cpython-310.pyc",
69
+ "python/scout_ai/__pycache__/util.cpython-311.pyc",
70
+ "python/scout_ai/atcold/__init__.py",
71
+ "python/scout_ai/atcold/plot_lib.py",
72
+ "python/scout_ai/atcold/spiral.py",
73
+ "python/scout_ai/huggingface/data.py",
74
+ "python/scout_ai/huggingface/eval.py",
75
+ "python/scout_ai/huggingface/model.py",
76
+ "python/scout_ai/huggingface/rlhf.py",
77
+ "python/scout_ai/huggingface/train/__init__.py",
78
+ "python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc",
79
+ "python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc",
80
+ "python/scout_ai/huggingface/train/next_token.py",
81
+ "python/scout_ai/language_model.py",
82
+ "python/scout_ai/util.py",
83
+ "scout-ai.gemspec",
84
+ "scout_commands/agent/ask",
85
+ "scout_commands/agent/kb",
86
+ "scout_commands/llm/ask",
87
+ "scout_commands/llm/process",
88
+ "scout_commands/llm/template",
89
+ "test/data/cat.jpg",
90
+ "test/data/person/brothers",
91
+ "test/data/person/identifiers",
92
+ "test/data/person/marriages",
93
+ "test/data/person/parents",
94
+ "test/scout/llm/agent/test_chat.rb",
95
+ "test/scout/llm/backends/test_bedrock.rb",
96
+ "test/scout/llm/backends/test_huggingface.rb",
97
+ "test/scout/llm/backends/test_ollama.rb",
98
+ "test/scout/llm/backends/test_openai.rb",
99
+ "test/scout/llm/backends/test_openwebui.rb",
100
+ "test/scout/llm/backends/test_relay.rb",
101
+ "test/scout/llm/backends/test_responses.rb",
102
+ "test/scout/llm/test_agent.rb",
103
+ "test/scout/llm/test_ask.rb",
104
+ "test/scout/llm/test_chat.rb",
105
+ "test/scout/llm/test_embed.rb",
106
+ "test/scout/llm/test_parse.rb",
107
+ "test/scout/llm/test_rag.rb",
108
+ "test/scout/llm/test_tools.rb",
109
+ "test/scout/llm/test_utils.rb",
110
+ "test/scout/model/python/huggingface/causal/test_next_token.rb",
111
+ "test/scout/model/python/huggingface/test_causal.rb",
112
+ "test/scout/model/python/huggingface/test_classification.rb",
113
+ "test/scout/model/python/test_base.rb",
114
+ "test/scout/model/python/test_huggingface.rb",
115
+ "test/scout/model/python/test_torch.rb",
116
+ "test/scout/model/python/torch/test_helpers.rb",
117
+ "test/scout/model/test_base.rb",
118
+ "test/scout/model/util/test_save.rb",
119
+ "test/test_helper.rb"
120
+ ]
121
+ s.homepage = "http://github.com/mikisvaz/scout-ai".freeze
122
+ s.licenses = ["MIT".freeze]
123
+ s.rubygems_version = "3.6.6".freeze
124
+ s.summary = "AI gear for scouts".freeze
125
+
126
+ s.specification_version = 4
127
+
128
+ s.add_runtime_dependency(%q<scout-rig>.freeze, [">= 0".freeze])
129
+ end
130
+
@@ -19,8 +19,11 @@ Use STDIN to add context to the question
19
19
  -h--help Print this help
20
20
  -l--log* Log level
21
21
  -t--template* Use a template
22
+ -c--chat* Follow a conversation
22
23
  -m--model* Model to use
24
+ -e--endpoint* Endpoint to use
23
25
  -f--file* Incorporate file at the start
26
+ -wt--workflow_tasks* Export these tasks to the agent
24
27
  EOF
25
28
  if options[:help]
26
29
  if defined? scout_usage
@@ -37,34 +40,149 @@ file = options.delete(:file)
37
40
 
38
41
  agent, *question_parts = ARGV
39
42
 
43
+ agent_path = Scout.var.Agent[agent].find_with_extension(:rb)
44
+
45
+ agent = LLM::Agent.load agent_path
46
+
47
+ #workflow_tasks = options.delete(:workflow_tasks)
48
+ #
49
+ #workflow = begin
50
+ # if agent_dir.workflow.set_extension('rb').exists?
51
+ # Workflow.require_workflow agent_dir.workflow.set_extension('rb').find
52
+ # else
53
+ # Misc.with_env "SCOUT_WORKFLOW_AUTOINSTALL", false do
54
+ # Workflow.require_workflow agent
55
+ # end
56
+ # end
57
+ # rescue
58
+ # end
59
+ #
60
+ #if workflow_tasks and workflow
61
+ # workflow.clear_exports
62
+ # workflow.export_asynchronous *workflow_tasks.split(',')
63
+ #end
64
+ #
65
+ #knowledge_base = KnowledgeBase.load(agent_dir.knowledge_base) if agent_dir.knowledge_base.exists?
66
+ #knowledge_base ||= begin workflow.knowledge_base rescue nil end || KnowledgeBase.new(agent_dir.knowledge_base)
67
+ #
68
+ #agent = LLM::Agent.new **options.merge(workflow: workflow, knowledge_base: knowledge_base)
69
+
70
+ #question = question_parts * " "
71
+ #
72
+ #if template = options.delete(:template)
73
+ # if Open.exists?(template)
74
+ # template_question = Open.read(template)
75
+ # else
76
+ # template_question = Scout.questions[template].read
77
+ # end
78
+ # if template_question.include?('???')
79
+ # question = template_question.sub('???', question)
80
+ # else
81
+ # question = template_question
82
+ # end
83
+ #end
84
+ #
85
+ #if question.include?('...')
86
+ # context = file ? Open.read(file) : STDIN.read
87
+ # question = question.sub('...', context)
88
+ #end
89
+ #
90
+ #if chat
91
+ # conversation = Open.exist?(chat)? Open.read(chat) : ""
92
+ # question = question.empty? ? conversation : conversation + "\nuser:\n" + question
93
+ # new = agent.ask(question, options)
94
+ # conversation = question + "\nassistant:\n" + new
95
+ # Open.write(chat, conversation)
96
+ #else
97
+ # puts agent.ask(question, options)
98
+ #end
99
+
100
+ file, chat, inline, template, dry_run = IndiferentHash.process_options options, :file, :chat, :inline, :template, :dry_run
101
+
102
+ agent, question = ARGV * " "
103
+
104
+ agent_name ||= 'default'
105
+ agent_file = Scout.chats[agent_name]
106
+
107
+ agent_file = agent_file.find_with_extension('rb')
108
+
109
+ if agent_file.exists?
110
+ if agent_file.directory?
111
+ agent = load agent_file.agent.find_with_extension('rb')
112
+ else
113
+ agent = load agent_file
114
+ end
115
+ else
116
+ raise ParameterException agent_file
117
+ end
40
118
 
41
- workflow = begin
42
- Workflow.require_workflow agent
43
- rescue
44
- end
45
-
46
- knowledge_base = begin workflow.knowledge_base rescue nil end || KnowledgeBase.new(Scout.var.Agent[agent])
47
-
48
- agent = LLM::Agent.new workflow: workflow, knowledge_base: knowledge_base
49
-
50
- question = question_parts * " "
51
-
52
- if template = options.delete(:template)
119
+ if template
53
120
  if Open.exists?(template)
54
121
  template_question = Open.read(template)
55
- else
122
+ elsif Scout.questions[template].exists?
56
123
  template_question = Scout.questions[template].read
124
+ elsif Scout.chats.system[template].exists?
125
+ template_question = Scout.chats.system[template].read
126
+ elsif Scout.chats[template].exists?
127
+ template_question = Scout.chats[template].read
57
128
  end
58
129
  if template_question.include?('???')
59
130
  question = template_question.sub('???', question)
131
+ elsif not question.empty?
132
+ question = template_question + "\nuser: #{question}"
60
133
  else
61
134
  question = template_question
62
135
  end
63
136
  end
64
137
 
65
138
  if question.include?('...')
66
- context = file ? Open.read(file) : STDIN.read
139
+ context = file ? Open.read(file) : STDIN.read
67
140
  question = question.sub('...', context)
141
+ elsif file
142
+ question = "<file basename=#{File.basename file}>[[[\n" + Open.read(file) + "\n]]]</file>"
68
143
  end
69
144
 
70
- puts LLM.ask(question, options)
145
+ if chat
146
+ conversation = Open.exist?(chat)? LLM.chat(chat) : []
147
+ convo_options = LLM.options conversation
148
+ conversation = question.empty? ? conversation : conversation + LLM.chat(question)
149
+
150
+ if dry_run
151
+ ppp LLM.print conversation
152
+ exit 0
153
+ end
154
+ new = agent.ask(conversation, convo_options.merge(options.merge(return_messages: true)))
155
+ conversation = Open.read(chat) + LLM.print(new)
156
+ Open.write(chat, conversation)
157
+ elsif inline
158
+
159
+ file = Open.read inline
160
+
161
+ new_file = ""
162
+ while true
163
+ pre, question, post =
164
+ file.partition(/^\s*#\s*ask:(?:.*?)(?=^\s*[^\s#])/smu)
165
+
166
+ break if post.empty?
167
+
168
+ new_file << pre
169
+ new_file << question
170
+ clean_question = question.gsub('#', '').gsub(/\s+/,' ').sub(/.*ask:\s*/,'').strip
171
+ chat = [
172
+ {role: :system, content: "Write a succint reply with no commentary and no formatting."},
173
+ {role: :user, content: "Find the following question as a comment in the file give a response to be placed inline: #{question}"},
174
+ LLM.tag('file', file, inline)
175
+ ]
176
+ response = LLM.ask(LLM.chat(chat))
177
+ new_file << <<-EOF
178
+ # Response start
179
+ #{response}
180
+ # Response end
181
+ EOF
182
+ file = post
183
+ end
184
+ new_file << file
185
+ Open.write(inline, new_file)
186
+ else
187
+ puts agent.ask(question, options)
188
+ end
@@ -0,0 +1,15 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ agent = ARGV.shift
4
+
5
+ agent_dir = Scout.var.Agent[agent]
6
+
7
+ if ARGV.any?
8
+ ARGV.push "--knowledge_base"
9
+ ARGV.push agent_dir.knowledge_base
10
+ ARGV.push "--log"
11
+ ARGV.push Log.severity.to_s
12
+ end
13
+ ARGV.unshift 'kb'
14
+
15
+ load Scout.bin.scout.find
@@ -3,22 +3,30 @@
3
3
  require 'scout'
4
4
  require 'scout-ai'
5
5
 
6
- $0 = "scout #{$previous_commands.any? ? $previous_commands*" " + " " : "" }#{ File.basename(__FILE__) }" if $previous_commands
6
+ $0 = "scout-ai #{$previous_commands.any? ? $previous_commands*" " + " " : "" }#{ File.basename(__FILE__) }" if $previous_commands
7
7
 
8
8
  options = SOPT.setup <<EOF
9
+ Ask an LLM model
9
10
 
10
- Ask GPT
11
+ $ #{$0} [<options>] [<question>]
11
12
 
12
- $ #{$0} [<options>] [question]
13
-
14
- Use STDIN to add context to the question
13
+ Use STDIN to add context to the question. The context can be referenced using
14
+ three dots '...'. The model will be prompted with the question, unless the
15
+ inline option is used. If the chat option is used, the response will be added
16
+ to the end of the file. If the file option is used the file contents will be
17
+ prepended before the question. With the template option, the file will be read
18
+ as if it were the question, and the actual question will be placed under the
19
+ characters '???', if they are present.
15
20
 
16
21
  -h--help Print this help
17
- -l--log* Log level
18
22
  -t--template* Use a template
23
+ -c--chat* Follow a conversation
24
+ -i--inline* Ask inline questions about a file
25
+ -f--file* Incorporate file at the start
19
26
  -m--model* Model to use
20
27
  -e--endpoint* Endpoint to use
21
- -f--file* Incorporate file at the start
28
+ -b--backend* Backend to use
29
+ -d--dry_run Dry run, don't ask
22
30
  EOF
23
31
  if options[:help]
24
32
  if defined? scout_usage
@@ -31,26 +39,77 @@ end
31
39
 
32
40
  Log.severity = options.delete(:log).to_i if options.include? :log
33
41
 
34
- file = options.delete(:file)
42
+ file, chat, inline, template, dry_run = IndiferentHash.process_options options, :file, :chat, :inline, :template, :dry_run
35
43
 
36
44
  question = ARGV * " "
37
45
 
38
- if template = options.delete(:template)
46
+ if template
39
47
  if Open.exists?(template)
40
48
  template_question = Open.read(template)
41
- else
49
+ elsif Scout.questions[template].exists?
42
50
  template_question = Scout.questions[template].read
51
+ elsif Scout.chats.system[template].exists?
52
+ template_question = Scout.chats.system[template].read
53
+ elsif Scout.chats[template].exists?
54
+ template_question = Scout.chats[template].read
43
55
  end
44
56
  if template_question.include?('???')
45
57
  question = template_question.sub('???', question)
58
+ elsif not question.empty?
59
+ question = template_question + "\nuser: #{question}"
46
60
  else
47
61
  question = template_question
48
62
  end
49
63
  end
50
64
 
51
65
  if question.include?('...')
52
- context = file ? Open.read(file) : STDIN.read
66
+ context = file ? Open.read(file) : STDIN.read
53
67
  question = question.sub('...', context)
68
+ elsif file
69
+ question = "<file basename=#{File.basename file}>[[[\n" + Open.read(file) + "\n]]]</file>"
54
70
  end
55
71
 
56
- puts LLM.ask(question, options)
72
+ if chat
73
+ conversation = Open.exist?(chat)? LLM.chat(chat) : []
74
+ convo_options = LLM.options conversation
75
+ conversation = question.empty? ? conversation : conversation + LLM.chat(question)
76
+
77
+ if dry_run
78
+ ppp LLM.print conversation
79
+ exit 0
80
+ end
81
+ new = LLM.ask(conversation, convo_options.merge(options.merge(return_messages: true)))
82
+ conversation = Open.read(chat) + LLM.print(new)
83
+ Open.write(chat, conversation)
84
+ elsif inline
85
+
86
+ file = Open.read inline
87
+
88
+ new_file = ""
89
+ while true
90
+ pre, question, post =
91
+ file.partition(/^\s*#\s*ask:(?:.*?)(?=^\s*[^\s#])/smu)
92
+
93
+ break if post.empty?
94
+
95
+ new_file << pre
96
+ new_file << question
97
+ clean_question = question.gsub('#', '').gsub(/\s+/,' ').sub(/.*ask:\s*/,'').strip
98
+ chat = [
99
+ {role: :system, content: "Write a succint reply with no commentary and no formatting."},
100
+ {role: :user, content: "Find the following question as a comment in the file give a response to be placed inline: #{question}"},
101
+ LLM.tag('file', file, inline)
102
+ ]
103
+ response = LLM.ask(LLM.chat(chat))
104
+ new_file << <<-EOF
105
+ # Response start
106
+ #{response}
107
+ # Response end
108
+ EOF
109
+ file = post
110
+ end
111
+ new_file << file
112
+ Open.write(inline, new_file)
113
+ else
114
+ puts LLM.ask(question, options)
115
+ end
@@ -32,7 +32,9 @@ directory = ARGV.first || Scout.var.ask.find
32
32
  directory = Path.setup directory
33
33
 
34
34
  while true
35
- directory.glob('*.json').each do |file|
35
+ files = directory.glob('*.json')
36
+
37
+ files.each do |file|
36
38
  target = directory.reply[id + '.json']
37
39
 
38
40
  if ! File.exist?(target)
@@ -46,5 +48,5 @@ while true
46
48
  Open.rm(file)
47
49
  end
48
50
 
49
- sleep 1
51
+ sleep 1 if files.empty?
50
52
  end
data/test/data/cat.jpg ADDED
Binary file
@@ -0,0 +1,14 @@
1
+ require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
2
+ require 'scout/llm/agent'
3
+ require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
4
+
5
+ require 'scout/llm/agent'
6
+ class TestAgent < Test::Unit::TestCase
7
+ def test_true
8
+ a = LLM::Agent.new
9
+ a.start_chat.system 'you are a robot'
10
+ a.user "hi"
11
+ ppp a.print
12
+ end
13
+ end
14
+
@@ -0,0 +1,60 @@
1
+ require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
2
+ require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
3
+
4
+ class TestLLMBedrock < Test::Unit::TestCase
5
+ def _test_ask
6
+ prompt =<<-EOF
7
+ say hi
8
+ EOF
9
+ ppp LLM::Bedrock.ask prompt, model: "anthropic.claude-3-sonnet-20240229-v1:0", model_max_tokens: 100, model_anthropic_version: 'bedrock-2023-05-31'
10
+ end
11
+
12
+
13
+ def _test_embeddings
14
+ Log.severity = 0
15
+ text =<<-EOF
16
+ Some text
17
+ EOF
18
+ emb = LLM::Bedrock.embed text, log_errors: true
19
+ assert(Float === emb.first)
20
+ end
21
+
22
+ def test_tool
23
+ prompt =<<-EOF
24
+ What is the weather in London. Should I take my umbrella? Use the provided tool
25
+ EOF
26
+
27
+ tools = [
28
+ {
29
+ "type": "function",
30
+ "function": {
31
+ "name": "get_weather",
32
+ "description": "Get the current temperature and raining conditions for a specific location",
33
+ "parameters": {
34
+ "type": "object",
35
+ "properties": {
36
+ "location": {
37
+ "type": "string",
38
+ "description": "The city and state, e.g., San Francisco, CA"
39
+ },
40
+ "unit": {
41
+ "type": "string",
42
+ "enum": ["Celsius", "Fahrenheit"],
43
+ "description": "The temperature unit to use. Infer this from the user's location."
44
+ }
45
+ },
46
+ "required": ["location", "unit"]
47
+ }
48
+ }
49
+ },
50
+ ]
51
+
52
+ sss 0
53
+ response = LLM::Bedrock.ask prompt, tool_choice: 'required', tools: tools, model: "anthropic.claude-3-sonnet-20240229-v1:0", model_max_tokens: 100, model_anthropic_version: 'bedrock-2023-05-31' do |name,arguments|
54
+ "It's 15 degrees and raining."
55
+ end
56
+
57
+ ppp response
58
+ end
59
+ end
60
+
@@ -13,7 +13,7 @@ user: write a script that sorts files in a directory
13
13
  ppp LLM::Huggingface.ask prompt, model: 'HuggingFaceTB/SmolLM2-135M-Instruct'
14
14
  end
15
15
 
16
- def test_embeddings
16
+ def _test_embeddings
17
17
  Log.severity = 0
18
18
  text =<<-EOF
19
19
  Some text
@@ -22,7 +22,7 @@ Some text
22
22
  assert(Float === emb.first)
23
23
  end
24
24
 
25
- def test_embedding_array
25
+ def _test_embedding_array
26
26
  Log.severity = 0
27
27
  text =<<-EOF
28
28
  Some text
@@ -31,7 +31,7 @@ Some text
31
31
  assert(Float === emb.first.first)
32
32
  end
33
33
 
34
- def test_tool
34
+ def _test_tool
35
35
  prompt =<<-EOF
36
36
  What is the weather in London. Should I take an umbrella?
37
37
  EOF
@@ -13,22 +13,42 @@ user: write a script that sorts files in a directory
13
13
  ppp LLM::OLlama.ask prompt, model: 'mistral', mode: 'chat'
14
14
  end
15
15
 
16
- def test_embeddings
16
+ def test_tool_call_output
17
17
  Log.severity = 0
18
- text =<<-EOF
19
- Some text
18
+ prompt =<<-EOF
19
+ function_call:
20
+
21
+ {"type":"function","function":{"name":"Baking-bake_muffin_tray","arguments":{}},"id":"Baking_bake_muffin_tray_Default"}
22
+
23
+ function_call_output:
24
+
25
+ {"id":"Baking_bake_muffin_tray_Default","content":"Baking batter (Mixing base (Whisking eggs from share/pantry/eggs) with mixer (share/pantry/flour))"}
26
+
27
+ user:
28
+
29
+ How do you bake muffins, according to the tool I provided you. Don't
30
+ tell me the recipe you already know, use the tool call output. Let me
31
+ know if you didn't get it.
20
32
  EOF
21
- emb = LLM::OLlama.embed text, model: 'mistral'
22
- assert(Float === emb.first)
33
+ ppp LLM::OLlama.ask prompt, model: 'mistral', mode: 'chat'
23
34
  end
24
35
 
25
- def test_embedding_array
36
+ def test_tool_call_output_weather
26
37
  Log.severity = 0
27
- text =<<-EOF
28
- Some text
38
+ prompt =<<-EOF
39
+ function_call:
40
+
41
+ {"name":"get_current_temperature", "arguments":{"location":"London","unit":"Celsius"},"id":"tNTnsQq2s6jGh0npOh43AwDD"}
42
+
43
+ function_call_output:
44
+
45
+ {"id":"tNTnsQq2s6jGh0npOh43AwDD", "content":"It's 15 degrees and raining."}
46
+
47
+ user:
48
+
49
+ should i take an umbrella?
29
50
  EOF
30
- emb = LLM::OLlama.embed [text], model: 'mistral'
31
- assert(Float === emb.first.first)
51
+ ppp LLM::OLlama.ask prompt, model: 'mistral'
32
52
  end
33
53
 
34
54
  def test_tool
@@ -68,5 +88,23 @@ What is the weather in London. Should I take an umbrella?
68
88
 
69
89
  ppp respose
70
90
  end
91
+
92
+ def test_embeddings
93
+ Log.severity = 0
94
+ text =<<-EOF
95
+ Some text
96
+ EOF
97
+ emb = LLM::OLlama.embed text, model: 'mistral'
98
+ assert(Float === emb.first)
99
+ end
100
+
101
+ def test_embedding_array
102
+ Log.severity = 0
103
+ text =<<-EOF
104
+ Some text
105
+ EOF
106
+ emb = LLM::OLlama.embed [text], model: 'mistral'
107
+ assert(Float === emb.first.first)
108
+ end
71
109
  end
72
110