scout-ai 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.document +5 -0
- data/.vimproject +61 -0
- data/LICENSE +20 -0
- data/LICENSE.txt +20 -0
- data/README.rdoc +18 -0
- data/Rakefile +44 -0
- data/VERSION +1 -0
- data/bin/scout-ai +5 -0
- data/lib/scout/llm/agent.rb +78 -0
- data/lib/scout/llm/ask.rb +50 -0
- data/lib/scout/llm/backends/huggingface.rb +67 -0
- data/lib/scout/llm/backends/ollama.rb +103 -0
- data/lib/scout/llm/backends/openai.rb +86 -0
- data/lib/scout/llm/backends/openwebui.rb +63 -0
- data/lib/scout/llm/backends/relay.rb +36 -0
- data/lib/scout/llm/embed.rb +31 -0
- data/lib/scout/llm/parse.rb +33 -0
- data/lib/scout/llm/rag.rb +16 -0
- data/lib/scout/llm/tools.rb +104 -0
- data/lib/scout/llm/utils.rb +35 -0
- data/lib/scout-ai.rb +7 -0
- data/questions/coach +2 -0
- data/scout_commands/agent/ask +70 -0
- data/scout_commands/llm/ask +56 -0
- data/scout_commands/llm/process +50 -0
- data/scout_commands/llm/template +26 -0
- data/test/data/person/brothers +4 -0
- data/test/data/person/identifiers +10 -0
- data/test/data/person/marriages +3 -0
- data/test/data/person/parents +6 -0
- data/test/scout/llm/backends/test_huggingface.rb +73 -0
- data/test/scout/llm/backends/test_ollama.rb +72 -0
- data/test/scout/llm/backends/test_openai.rb +68 -0
- data/test/scout/llm/backends/test_openwebui.rb +57 -0
- data/test/scout/llm/backends/test_relay.rb +10 -0
- data/test/scout/llm/test_agent.rb +114 -0
- data/test/scout/llm/test_ask.rb +63 -0
- data/test/scout/llm/test_embed.rb +0 -0
- data/test/scout/llm/test_parse.rb +19 -0
- data/test/scout/llm/test_rag.rb +30 -0
- data/test/scout/llm/test_tools.rb +54 -0
- data/test/scout/llm/test_utils.rb +10 -0
- data/test/test_helper.rb +68 -0
- metadata +86 -0
@@ -0,0 +1,31 @@
|
|
1
|
+
require 'scout'
|
2
|
+
require_relative 'backends/ollama'
|
3
|
+
require_relative 'backends/openai'
|
4
|
+
require_relative 'backends/openwebui'
|
5
|
+
require_relative 'backends/relay'
|
6
|
+
|
7
|
+
module LLM
|
8
|
+
def self.embed(text, options = {})
|
9
|
+
endpoint = IndiferentHash.process_options options, :endpoint
|
10
|
+
endpoint ||= Scout::Config.get :endpoint, :embed, :llm, env: 'EMBED_ENDPOINT,LLM_ENDPOINT', default: :openai
|
11
|
+
if endpoint && Scout.etc.AI[endpoint].exists?
|
12
|
+
options = IndiferentHash.add_defaults options, Scout.etc.AI[endpoint].yaml
|
13
|
+
end
|
14
|
+
|
15
|
+
backend = IndiferentHash.process_options options, :backend
|
16
|
+
backend ||= Scout::Config.get :backend, :embed, :llm, env: 'EMBED_BACKEND,LLM_BACKEND', default: :openai
|
17
|
+
|
18
|
+
case backend
|
19
|
+
when :openai, "openai"
|
20
|
+
LLM::OpenAI.embed(text, options)
|
21
|
+
when :ollama, "ollama"
|
22
|
+
LLM::OLlama.embed(text, options)
|
23
|
+
when :openwebui, "openwebui"
|
24
|
+
LLM::OpenWebUI.embed(text, options)
|
25
|
+
when :relay, "relay"
|
26
|
+
LLM::Relay.embed(text, options)
|
27
|
+
else
|
28
|
+
raise "Unknown backend: #{backend}"
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
module LLM
|
2
|
+
def self.parse(question, role = nil)
|
3
|
+
role = :user if role.nil?
|
4
|
+
|
5
|
+
if Array === question
|
6
|
+
question.collect do |q|
|
7
|
+
Hash === q ? q : {role: role, content: q}
|
8
|
+
end
|
9
|
+
else
|
10
|
+
if m = question.match(/(.*?)\[\[(.*?)\]\](.*)/m)
|
11
|
+
pre = m[1]
|
12
|
+
inside = m[2]
|
13
|
+
post = m[3]
|
14
|
+
messages = parse(pre, role)
|
15
|
+
messages.last[:content] += "\n" + inside
|
16
|
+
messages.concat parse(post, role)
|
17
|
+
else
|
18
|
+
question.split("\n").collect do |line|
|
19
|
+
if line.include?("\t")
|
20
|
+
question_role, _sep, q = line.partition("\t")
|
21
|
+
elsif m = line.match(/^([^\s]*): ?(.*)/)
|
22
|
+
question_role, q = m.values_at 1, 2
|
23
|
+
else
|
24
|
+
question_role = role
|
25
|
+
q = line
|
26
|
+
end
|
27
|
+
next if q.empty?
|
28
|
+
{role: question_role, content: q}
|
29
|
+
end.compact
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
module LLM
|
2
|
+
class RAG
|
3
|
+
def self.index(data)
|
4
|
+
require 'hnswlib'
|
5
|
+
|
6
|
+
dim = data.first.length
|
7
|
+
t = Hnswlib::HierarchicalNSW.new(space: 'l2', dim: dim)
|
8
|
+
t.init_index(max_elements: data.length)
|
9
|
+
|
10
|
+
data.each_with_index do |vector,i|
|
11
|
+
t.add_point vector, i
|
12
|
+
end
|
13
|
+
t
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,104 @@
|
|
1
|
+
require 'scout/workflow'
|
2
|
+
require 'scout/knowledge_base'
|
3
|
+
module LLM
|
4
|
+
def self.tool_response(tool_call, &block)
|
5
|
+
tool_call_id = tool_call.dig("id")
|
6
|
+
function_name = tool_call.dig("function", "name")
|
7
|
+
function_arguments = tool_call.dig("function", "arguments")
|
8
|
+
function_arguments = JSON.parse(function_arguments, { symbolize_names: true }) if String === function_arguments
|
9
|
+
function_response = block.call function_name, function_arguments
|
10
|
+
|
11
|
+
#content = String === function_response ? function_response : function_response.to_json,
|
12
|
+
content = case function_response
|
13
|
+
when String
|
14
|
+
function_response
|
15
|
+
when nil
|
16
|
+
"success"
|
17
|
+
else
|
18
|
+
function_response.to_json
|
19
|
+
end
|
20
|
+
{
|
21
|
+
tool_call_id: tool_call_id,
|
22
|
+
role: "tool",
|
23
|
+
content: content
|
24
|
+
}
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.task_tool_definition(workflow, task_name)
|
28
|
+
task_info = workflow.task_info(task_name)
|
29
|
+
|
30
|
+
properties = task_info[:inputs].inject({}) do |acc,input|
|
31
|
+
type = task_info[:input_types][input]
|
32
|
+
description = task_info[:input_descriptions][input]
|
33
|
+
|
34
|
+
type = :string if type == :select
|
35
|
+
|
36
|
+
acc[input] = {
|
37
|
+
"type": type,
|
38
|
+
"description": description
|
39
|
+
}
|
40
|
+
|
41
|
+
if input_options = task_info[:input_options][input]
|
42
|
+
if select_options = input_options[:select_options]
|
43
|
+
select_options = select_options.values if Hash === select_options
|
44
|
+
acc[input]["enum"] = select_options
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
acc
|
49
|
+
end
|
50
|
+
|
51
|
+
required_inputs = task_info[:inputs].select do |input|
|
52
|
+
task_info[:input_options].include?(input) && task_info[:input_options][:required]
|
53
|
+
end
|
54
|
+
|
55
|
+
{
|
56
|
+
type: "function",
|
57
|
+
function: {
|
58
|
+
name: task_name,
|
59
|
+
description: task_info[:description],
|
60
|
+
parameters: {
|
61
|
+
type: "object",
|
62
|
+
properties: properties,
|
63
|
+
required: required_inputs
|
64
|
+
}
|
65
|
+
}
|
66
|
+
}
|
67
|
+
end
|
68
|
+
|
69
|
+
def self.workflow_tools(workflow, tasks = nil)
|
70
|
+
tasks ||= workflow.tasks.keys
|
71
|
+
tasks.collect{|task_name| self.task_tool_definition(workflow, task_name) }
|
72
|
+
end
|
73
|
+
|
74
|
+
def self.knowledge_base_tool_definition(knowledge_base)
|
75
|
+
|
76
|
+
databases = knowledge_base.all_databases.collect{|d| d.to_s }
|
77
|
+
|
78
|
+
properties = {
|
79
|
+
database: {
|
80
|
+
type: "string",
|
81
|
+
enum: databases,
|
82
|
+
description: "Database to traverse"
|
83
|
+
},
|
84
|
+
entities: {
|
85
|
+
type: "array",
|
86
|
+
items: { type: :string },
|
87
|
+
description: "Parent entities to find children for"
|
88
|
+
}
|
89
|
+
}
|
90
|
+
|
91
|
+
[{
|
92
|
+
type: "function",
|
93
|
+
function: {
|
94
|
+
name: 'children',
|
95
|
+
description: "Find the graph children for a list of entities in a format like parent~child. Returns a list.",
|
96
|
+
parameters: {
|
97
|
+
type: "object",
|
98
|
+
properties: properties,
|
99
|
+
required: ['database', 'entities']
|
100
|
+
}
|
101
|
+
}
|
102
|
+
}]
|
103
|
+
end
|
104
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
module LLM
|
2
|
+
def self.get_url_server_tokens(url, prefix=nil)
|
3
|
+
return get_url_server_tokens(url).collect{|e| prefix.to_s + "." + e } if prefix
|
4
|
+
|
5
|
+
server = url.match(/(?:https?:\/\/)?([^\/:]*)/)[1] || "NOSERVER"
|
6
|
+
parts = server.split(".")
|
7
|
+
parts.pop if parts.last.length <= 3
|
8
|
+
combinations = []
|
9
|
+
(1..parts.length).each do |l|
|
10
|
+
parts.each_cons(l){|p| combinations << p*"."}
|
11
|
+
end
|
12
|
+
(parts + combinations + [server]).uniq
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.get_url_config(key, url = nil, *tokens)
|
16
|
+
hash = tokens.pop if Hash === tokens.last
|
17
|
+
if url
|
18
|
+
url_tokens = tokens.inject([]){|acc,prefix| acc.concat(get_url_server_tokens(url, prefix))}
|
19
|
+
all_tokens = url_tokens + tokens
|
20
|
+
else
|
21
|
+
all_tokens = tokens
|
22
|
+
end
|
23
|
+
Scout::Config.get(key, *all_tokens, hash)
|
24
|
+
end
|
25
|
+
|
26
|
+
def self
|
27
|
+
if workflow.root.etc.AI[@model || 'default'].exists?
|
28
|
+
workflow.root.etc.AI[@model || 'default'].json
|
29
|
+
elsif Scout.etc.AI[@model || 'default'].exists?
|
30
|
+
Scout.etc.AI[@model || 'default'].json
|
31
|
+
else
|
32
|
+
{}
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
data/lib/scout-ai.rb
ADDED
data/questions/coach
ADDED
@@ -0,0 +1,70 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'scout'
|
4
|
+
require 'scout-ai'
|
5
|
+
require 'scout/workflow'
|
6
|
+
require 'scout/knowledge_base'
|
7
|
+
require 'scout/llm/agent'
|
8
|
+
|
9
|
+
$0 = "scout #{$previous_commands.any? ? $previous_commands*" " + " " : "" }#{ File.basename(__FILE__) }" if $previous_commands
|
10
|
+
|
11
|
+
options = SOPT.setup <<EOF
|
12
|
+
|
13
|
+
Ask GPT
|
14
|
+
|
15
|
+
$ #{$0} [<options>] [question]
|
16
|
+
|
17
|
+
Use STDIN to add context to the question
|
18
|
+
|
19
|
+
-h--help Print this help
|
20
|
+
-l--log* Log level
|
21
|
+
-t--template* Use a template
|
22
|
+
-m--model* Model to use
|
23
|
+
-f--file* Incorporate file at the start
|
24
|
+
EOF
|
25
|
+
if options[:help]
|
26
|
+
if defined? scout_usage
|
27
|
+
scout_usage
|
28
|
+
else
|
29
|
+
puts SOPT.doc
|
30
|
+
end
|
31
|
+
exit 0
|
32
|
+
end
|
33
|
+
|
34
|
+
Log.severity = options.delete(:log).to_i if options.include? :log
|
35
|
+
|
36
|
+
file = options.delete(:file)
|
37
|
+
|
38
|
+
agent, *question_parts = ARGV
|
39
|
+
|
40
|
+
|
41
|
+
workflow = begin
|
42
|
+
Workflow.require_workflow agent
|
43
|
+
rescue
|
44
|
+
end
|
45
|
+
|
46
|
+
knowledge_base = begin workflow.knowledge_base rescue nil end || KnowledgeBase.new(Scout.var.Agent[agent])
|
47
|
+
|
48
|
+
agent = LLM::Agent.new workflow: workflow, knowledge_base: knowledge_base
|
49
|
+
|
50
|
+
question = question_parts * " "
|
51
|
+
|
52
|
+
if template = options.delete(:template)
|
53
|
+
if Open.exists?(template)
|
54
|
+
template_question = Open.read(template)
|
55
|
+
else
|
56
|
+
template_question = Scout.questions[template].read
|
57
|
+
end
|
58
|
+
if template_question.include?('???')
|
59
|
+
question = template_question.sub('???', question)
|
60
|
+
else
|
61
|
+
question = template_question
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
if question.include?('...')
|
66
|
+
context = file ? Open.read(file) : STDIN.read
|
67
|
+
question = question.sub('...', context)
|
68
|
+
end
|
69
|
+
|
70
|
+
puts LLM.ask(question, options)
|
@@ -0,0 +1,56 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'scout'
|
4
|
+
require 'scout-ai'
|
5
|
+
|
6
|
+
$0 = "scout #{$previous_commands.any? ? $previous_commands*" " + " " : "" }#{ File.basename(__FILE__) }" if $previous_commands
|
7
|
+
|
8
|
+
options = SOPT.setup <<EOF
|
9
|
+
|
10
|
+
Ask GPT
|
11
|
+
|
12
|
+
$ #{$0} [<options>] [question]
|
13
|
+
|
14
|
+
Use STDIN to add context to the question
|
15
|
+
|
16
|
+
-h--help Print this help
|
17
|
+
-l--log* Log level
|
18
|
+
-t--template* Use a template
|
19
|
+
-m--model* Model to use
|
20
|
+
-e--endpoint* Endpoint to use
|
21
|
+
-f--file* Incorporate file at the start
|
22
|
+
EOF
|
23
|
+
if options[:help]
|
24
|
+
if defined? scout_usage
|
25
|
+
scout_usage
|
26
|
+
else
|
27
|
+
puts SOPT.doc
|
28
|
+
end
|
29
|
+
exit 0
|
30
|
+
end
|
31
|
+
|
32
|
+
Log.severity = options.delete(:log).to_i if options.include? :log
|
33
|
+
|
34
|
+
file = options.delete(:file)
|
35
|
+
|
36
|
+
question = ARGV * " "
|
37
|
+
|
38
|
+
if template = options.delete(:template)
|
39
|
+
if Open.exists?(template)
|
40
|
+
template_question = Open.read(template)
|
41
|
+
else
|
42
|
+
template_question = Scout.questions[template].read
|
43
|
+
end
|
44
|
+
if template_question.include?('???')
|
45
|
+
question = template_question.sub('???', question)
|
46
|
+
else
|
47
|
+
question = template_question
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
if question.include?('...')
|
52
|
+
context = file ? Open.read(file) : STDIN.read
|
53
|
+
question = question.sub('...', context)
|
54
|
+
end
|
55
|
+
|
56
|
+
puts LLM.ask(question, options)
|
@@ -0,0 +1,50 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'scout'
|
4
|
+
require 'scout-ai'
|
5
|
+
require 'scout/llm/backends/relay'
|
6
|
+
|
7
|
+
$0 = "scout #{$previous_commands.any? ? $previous_commands*" " + " " : "" }#{ File.basename(__FILE__) }" if $previous_commands
|
8
|
+
|
9
|
+
options = SOPT.setup <<EOF
|
10
|
+
|
11
|
+
Ask GPT
|
12
|
+
|
13
|
+
$ #{$0} [<options>] [<directory>]
|
14
|
+
|
15
|
+
Use STDIN to add context to the question
|
16
|
+
|
17
|
+
-h--help Print this help
|
18
|
+
EOF
|
19
|
+
if options[:help]
|
20
|
+
if defined? scout_usage
|
21
|
+
scout_usage
|
22
|
+
else
|
23
|
+
puts SOPT.doc
|
24
|
+
end
|
25
|
+
exit 0
|
26
|
+
end
|
27
|
+
|
28
|
+
Log.severity = options.delete(:log).to_i if options.include? :log
|
29
|
+
|
30
|
+
directory = ARGV.first || Scout.var.ask.find
|
31
|
+
|
32
|
+
directory = Path.setup directory
|
33
|
+
|
34
|
+
while true
|
35
|
+
directory.glob('*.json').each do |file|
|
36
|
+
target = directory.reply[id + '.json']
|
37
|
+
|
38
|
+
if ! File.exist?(target)
|
39
|
+
id = File.basename(file, '.json')
|
40
|
+
options = IndiferentHash.setup(JSON.parse(Open.read(file)))
|
41
|
+
question = options.delete(:question)
|
42
|
+
reply = LLM.ask(question, options)
|
43
|
+
Open.write(target, reply.to_json)
|
44
|
+
end
|
45
|
+
|
46
|
+
Open.rm(file)
|
47
|
+
end
|
48
|
+
|
49
|
+
sleep 1
|
50
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'scout'
|
4
|
+
|
5
|
+
$0 = "scout #{$previous_commands.any? ? $previous_commands*" " + " " : "" }#{ File.basename(__FILE__) }" if $previous_commands
|
6
|
+
|
7
|
+
options = SOPT.setup <<EOF
|
8
|
+
|
9
|
+
List all ask templates
|
10
|
+
|
11
|
+
$ #{$0} [<options>] <filename> [<other|->]*
|
12
|
+
|
13
|
+
-h--help Print this help
|
14
|
+
EOF
|
15
|
+
if options[:help]
|
16
|
+
if defined? scout_usage
|
17
|
+
scout_usage
|
18
|
+
else
|
19
|
+
puts SOPT.doc
|
20
|
+
end
|
21
|
+
exit 0
|
22
|
+
end
|
23
|
+
|
24
|
+
puts Scout.questions.glob_all("*") * "\n"
|
25
|
+
|
26
|
+
|
@@ -0,0 +1,73 @@
|
|
1
|
+
require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
|
2
|
+
require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
|
3
|
+
|
4
|
+
class TestLLMHF < Test::Unit::TestCase
|
5
|
+
|
6
|
+
def test_ask
|
7
|
+
Log.severity = 0
|
8
|
+
prompt =<<-EOF
|
9
|
+
system: you are a coding helper that only write code and inline comments. No extra explanations or comentary
|
10
|
+
system: Avoid using backticks ``` to format code.
|
11
|
+
user: write a script that sorts files in a directory
|
12
|
+
EOF
|
13
|
+
ppp LLM::Huggingface.ask prompt, model: 'HuggingFaceTB/SmolLM2-135M-Instruct'
|
14
|
+
end
|
15
|
+
|
16
|
+
def test_embeddings
|
17
|
+
Log.severity = 0
|
18
|
+
text =<<-EOF
|
19
|
+
Some text
|
20
|
+
EOF
|
21
|
+
emb = LLM::Huggingface.embed text, model: 'distilbert-base-uncased-finetuned-sst-2-english'
|
22
|
+
assert(Float === emb.first)
|
23
|
+
end
|
24
|
+
|
25
|
+
def test_embedding_array
|
26
|
+
Log.severity = 0
|
27
|
+
text =<<-EOF
|
28
|
+
Some text
|
29
|
+
EOF
|
30
|
+
emb = LLM::Huggingface.embed [text], model: 'distilbert-base-uncased-finetuned-sst-2-english'
|
31
|
+
assert(Float === emb.first.first)
|
32
|
+
end
|
33
|
+
|
34
|
+
def test_tool
|
35
|
+
prompt =<<-EOF
|
36
|
+
What is the weather in London. Should I take an umbrella?
|
37
|
+
EOF
|
38
|
+
|
39
|
+
tools = [
|
40
|
+
{
|
41
|
+
"type": "function",
|
42
|
+
"function": {
|
43
|
+
"name": "get_current_temperature",
|
44
|
+
"description": "Get the current temperature for a specific location",
|
45
|
+
"parameters": {
|
46
|
+
"type": "object",
|
47
|
+
"properties": {
|
48
|
+
"location": {
|
49
|
+
"type": "string",
|
50
|
+
"description": "The city and state, e.g., San Francisco, CA"
|
51
|
+
},
|
52
|
+
"unit": {
|
53
|
+
"type": "string",
|
54
|
+
"enum": ["Celsius", "Fahrenheit"],
|
55
|
+
"description": "The temperature unit to use. Infer this from the user's location."
|
56
|
+
}
|
57
|
+
},
|
58
|
+
"required": ["location", "unit"]
|
59
|
+
}
|
60
|
+
}
|
61
|
+
},
|
62
|
+
]
|
63
|
+
|
64
|
+
sss 0
|
65
|
+
respose = LLM::Huggingface.ask prompt, model: 'HuggingFaceTB/SmolLM2-135M-Instruct', tool_choice: 'required', tools: tools do |name,arguments|
|
66
|
+
"It's raining cats and dogs"
|
67
|
+
end
|
68
|
+
|
69
|
+
ppp respose
|
70
|
+
end
|
71
|
+
|
72
|
+
end
|
73
|
+
|
@@ -0,0 +1,72 @@
|
|
1
|
+
require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
|
2
|
+
require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
|
3
|
+
|
4
|
+
class TestLLMOllama < Test::Unit::TestCase
|
5
|
+
|
6
|
+
def test_ask
|
7
|
+
Log.severity = 0
|
8
|
+
prompt =<<-EOF
|
9
|
+
system: you are a coding helper that only write code and inline comments. No extra explanations or comentary
|
10
|
+
system: Avoid using backticks ``` to format code.
|
11
|
+
user: write a script that sorts files in a directory
|
12
|
+
EOF
|
13
|
+
ppp LLM::OLlama.ask prompt, model: 'mistral', mode: 'chat'
|
14
|
+
end
|
15
|
+
|
16
|
+
def test_embeddings
|
17
|
+
Log.severity = 0
|
18
|
+
text =<<-EOF
|
19
|
+
Some text
|
20
|
+
EOF
|
21
|
+
emb = LLM::OLlama.embed text, model: 'mistral'
|
22
|
+
assert(Float === emb.first)
|
23
|
+
end
|
24
|
+
|
25
|
+
def test_embedding_array
|
26
|
+
Log.severity = 0
|
27
|
+
text =<<-EOF
|
28
|
+
Some text
|
29
|
+
EOF
|
30
|
+
emb = LLM::OLlama.embed [text], model: 'mistral'
|
31
|
+
assert(Float === emb.first.first)
|
32
|
+
end
|
33
|
+
|
34
|
+
def test_tool
|
35
|
+
prompt =<<-EOF
|
36
|
+
What is the weather in London. Should I take an umbrella?
|
37
|
+
EOF
|
38
|
+
|
39
|
+
tools = [
|
40
|
+
{
|
41
|
+
"type": "function",
|
42
|
+
"function": {
|
43
|
+
"name": "get_current_temperature",
|
44
|
+
"description": "Get the current temperature for a specific location",
|
45
|
+
"parameters": {
|
46
|
+
"type": "object",
|
47
|
+
"properties": {
|
48
|
+
"location": {
|
49
|
+
"type": "string",
|
50
|
+
"description": "The city and state, e.g., San Francisco, CA"
|
51
|
+
},
|
52
|
+
"unit": {
|
53
|
+
"type": "string",
|
54
|
+
"enum": ["Celsius", "Fahrenheit"],
|
55
|
+
"description": "The temperature unit to use. Infer this from the user's location."
|
56
|
+
}
|
57
|
+
},
|
58
|
+
"required": ["location", "unit"]
|
59
|
+
}
|
60
|
+
}
|
61
|
+
},
|
62
|
+
]
|
63
|
+
|
64
|
+
sss 0
|
65
|
+
respose = LLM::OLlama.ask prompt, model: 'mistral', tool_choice: 'required', tools: tools do |name,arguments|
|
66
|
+
"It's raining cats and dogs"
|
67
|
+
end
|
68
|
+
|
69
|
+
ppp respose
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
@@ -0,0 +1,68 @@
|
|
1
|
+
require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
|
2
|
+
require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
|
3
|
+
|
4
|
+
class TestLLMOpenAI < Test::Unit::TestCase
|
5
|
+
def test_ask
|
6
|
+
prompt =<<-EOF
|
7
|
+
system: you are a coding helper that only write code and comments without formatting so that it can work directly, avoid the initial and end commas ```.
|
8
|
+
user: write a script that sorts files in a directory
|
9
|
+
EOF
|
10
|
+
sss 0
|
11
|
+
ppp LLM::OpenAI.ask prompt
|
12
|
+
end
|
13
|
+
|
14
|
+
def _test_argonne
|
15
|
+
prompt =<<-EOF
|
16
|
+
user: write a script that sorts files in a directory
|
17
|
+
EOF
|
18
|
+
sss 0
|
19
|
+
end
|
20
|
+
|
21
|
+
def _test_embeddings
|
22
|
+
Log.severity = 0
|
23
|
+
text =<<-EOF
|
24
|
+
Some text
|
25
|
+
EOF
|
26
|
+
emb = LLM::OpenAI.embed text, log_errors: true
|
27
|
+
assert(Float === emb.first)
|
28
|
+
end
|
29
|
+
|
30
|
+
def _test_tool
|
31
|
+
prompt =<<-EOF
|
32
|
+
What is the weather in London. Should I take my umbrella?
|
33
|
+
EOF
|
34
|
+
|
35
|
+
tools = [
|
36
|
+
{
|
37
|
+
"type": "function",
|
38
|
+
"function": {
|
39
|
+
"name": "get_current_temperature",
|
40
|
+
"description": "Get the current temperature and raining conditions for a specific location",
|
41
|
+
"parameters": {
|
42
|
+
"type": "object",
|
43
|
+
"properties": {
|
44
|
+
"location": {
|
45
|
+
"type": "string",
|
46
|
+
"description": "The city and state, e.g., San Francisco, CA"
|
47
|
+
},
|
48
|
+
"unit": {
|
49
|
+
"type": "string",
|
50
|
+
"enum": ["Celsius", "Fahrenheit"],
|
51
|
+
"description": "The temperature unit to use. Infer this from the user's location."
|
52
|
+
}
|
53
|
+
},
|
54
|
+
"required": ["location", "unit"]
|
55
|
+
}
|
56
|
+
}
|
57
|
+
},
|
58
|
+
]
|
59
|
+
|
60
|
+
sss 0
|
61
|
+
respose = LLM::OpenAI.ask prompt, tool_choice: 'required', tools: tools, model: "gpt-4o" do |name,arguments|
|
62
|
+
"It's 15 degrees and raining."
|
63
|
+
end
|
64
|
+
|
65
|
+
ppp respose
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|