glim_ai 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +25 -0
- data/Gemfile.lock +49 -0
- data/LICENSE.txt +21 -0
- data/README.md +125 -0
- data/Rakefile +31 -0
- data/examples/autocode/autocode.rb +166 -0
- data/examples/autocode/solargraph_test.rb +59 -0
- data/examples/autocode/templates/changed_files_now_evaluate_output.erb +29 -0
- data/examples/autocode/templates/task.erb +16 -0
- data/examples/calc/calc.rb +50 -0
- data/examples/code_competition/code_competition.rb +78 -0
- data/examples/code_competition/output/python_claude-2.rb +33 -0
- data/examples/code_competition/output/python_claude-instant-1.rb +18 -0
- data/examples/code_competition/output/python_gpt-3.5-turbo-16k.rb +69 -0
- data/examples/code_competition/output/python_gpt-3.5-turbo.rb +43 -0
- data/examples/code_competition/output/python_gpt-4.rb +34 -0
- data/examples/code_competition/output/ruby_claude-2.rb +22 -0
- data/examples/code_competition/output/ruby_claude-instant-1.rb +20 -0
- data/examples/code_competition/output/ruby_gpt-3.5-turbo-16k.rb +27 -0
- data/examples/code_competition/output/ruby_gpt-3.5-turbo.rb +30 -0
- data/examples/code_competition/output/ruby_gpt-4.rb +31 -0
- data/examples/code_competition/output/ruby_human.rb +41 -0
- data/examples/code_competition/templates/analyze_code.erb +33 -0
- data/examples/code_competition/templates/write_code.erb +26 -0
- data/examples/glim_demo/ask_all.rb +35 -0
- data/examples/glim_demo/templates/rate_all.erb +24 -0
- data/examples/improve_prompt/improve_prompt.rb +62 -0
- data/examples/improve_prompt/templates/stashed/prompt_attempt_explicit_steps.erb +15 -0
- data/examples/improve_prompt/templates/stashed/prompt_attempt_explicit_steps_user_message.erb +15 -0
- data/examples/improve_prompt/templates/stashed/prompt_attempt_initial.erb +8 -0
- data/examples/improve_prompt/templates/stashed/prompt_attempt_nothing.erb +19 -0
- data/examples/improve_prompt/templates/try_code_first.erb +13 -0
- data/examples/improve_prompt/templates/try_code_first_system.erb +22 -0
- data/examples/old/econ/discounting.rb +27 -0
- data/examples/old/econ/templates/discounting.erb +10 -0
- data/examples/old/generate_glim_code/generate_glim_code.rb +34 -0
- data/examples/old/generate_glim_code/templates/generate_glim_code.erb +17 -0
- data/examples/old/generate_glim_code/templates/improve_code.erb +27 -0
- data/examples/old/glim_dev_tools/ask_code_question.rb +38 -0
- data/examples/old/glim_dev_tools/templates/ask_code_question.erb +12 -0
- data/examples/old/glim_dev_tools/templates/write_globals_test.erb +28 -0
- data/examples/old/glim_dev_tools/write_globals_test.rb +20 -0
- data/examples/old/linguistics/nine.rb +0 -0
- data/examples/old/rewrite_code/input/hello.py +1 -0
- data/examples/old/rewrite_code/input/subdir/hello.py +1 -0
- data/examples/old/rewrite_code/input/world.py +1 -0
- data/examples/old/rewrite_code/rewrite_code.rb +18 -0
- data/examples/old/rewrite_code/templates/rewrite_code.erb +32 -0
- data/examples/window_check/data.rb +1260 -0
- data/examples/window_check/fruits.rb +118 -0
- data/examples/window_check/tools.rb +56 -0
- data/examples/window_check/window_check.rb +214 -0
- data/glim_generated_tests/make_special_code_with_fixed_length_test.rb +44 -0
- data/glim_generated_tests/old-20230831120513-make_special_code_with_fixed_length_test.rb +1 -0
- data/glim_generated_tests/old-20230831121222-make_special_code_with_fixed_length_test.rb +55 -0
- data/glim_generated_tests/old-20230831124501-make_special_code_with_fixed_length_test.rb +33 -0
- data/glim_generated_tests/test/make_special_code_with_fixed_length_test.rb +58 -0
- data/lib/anthropic_request_details.rb +37 -0
- data/lib/anthropic_response.rb +101 -0
- data/lib/chat_request_details.rb +140 -0
- data/lib/chat_response.rb +303 -0
- data/lib/glim_ai/version.rb +5 -0
- data/lib/glim_ai.rb +8 -0
- data/lib/glim_ai_callable.rb +151 -0
- data/lib/glim_context.rb +62 -0
- data/lib/glim_helpers.rb +54 -0
- data/lib/glim_request.rb +266 -0
- data/lib/glim_response.rb +155 -0
- data/lib/globals.rb +255 -0
- data/lib/html_templates/chat_request.erb +86 -0
- data/sample.env +9 -0
- metadata +131 -0
@@ -0,0 +1,27 @@
|
|
1
|
+
require_relative '../../lib/globals'
|
2
|
+
|
3
|
+
glim = GlimContext.new
|
4
|
+
|
5
|
+
#llm_name = "gpt-3.5-turbo"
|
6
|
+
llm_name = "claude-1-instant"
|
7
|
+
#llm_name = "gpt-4"
|
8
|
+
|
9
|
+
temperature = 0.1
|
10
|
+
|
11
|
+
# WOW, this is something LLMs seem to be useless for!
|
12
|
+
|
13
|
+
countries = %w(Germany, China, Panama, USA, Brazil, Ghana, Philippines)
|
14
|
+
responses = Hash[countries.map { |key| [key, []] }]
|
15
|
+
|
16
|
+
1.times do
|
17
|
+
for country in countries
|
18
|
+
req = glim.request(llm_name:, temperature:)
|
19
|
+
req.no_cache = true
|
20
|
+
req.process_template("discounting", country:)
|
21
|
+
responses[country] << req.response
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
for country in countries
|
26
|
+
puts "Country: #{country} = #{responses[country].first.completion}"
|
27
|
+
end
|
@@ -0,0 +1,10 @@
|
|
1
|
+
Scenario:
|
2
|
+
You are an average person from <%= country %>.
|
3
|
+
|
4
|
+
|
5
|
+
Question:
|
6
|
+
Imagine that I will give you either $50 today or $100 in the future.
|
7
|
+
|
8
|
+
How soon in the future would my $100 promise need to be paid out for you to prefer to wait?
|
9
|
+
|
10
|
+
Respond only with the number of centuries, nothing else. Decimals are ok.
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require_relative '../../lib/globals'
|
2
|
+
include GlimFileHelper
|
3
|
+
|
4
|
+
generator_name = "generate_glim_code"
|
5
|
+
|
6
|
+
input_root = "examples/glim_demo"
|
7
|
+
glim = GlimContext.new
|
8
|
+
|
9
|
+
program_name = "analogies.rb"
|
10
|
+
|
11
|
+
response = glim.response_from_template(generator_name, input_root:, program_name:)
|
12
|
+
completion = response.completion
|
13
|
+
outpath = File.join("generated_code",generator_name)
|
14
|
+
text, _ = extract_and_save_files(completion, outpath)
|
15
|
+
|
16
|
+
|
17
|
+
puts "\nResponse from LLM:\n #{text}\n\n"
|
18
|
+
|
19
|
+
generated_program_path = File.join(outpath, program_name)
|
20
|
+
command = "ruby #{generated_program_path}"
|
21
|
+
puts "\nCommand: #{command}\n\n"
|
22
|
+
|
23
|
+
while true
|
24
|
+
puts "Running command..."
|
25
|
+
# run command and store output in string
|
26
|
+
output = `#{command}`
|
27
|
+
puts output
|
28
|
+
response = glim.response_from_template("improve_code", input_root:, program_name:, output:)
|
29
|
+
completion = response.completion
|
30
|
+
break if completion.include?("YES!!!")
|
31
|
+
puts "\nResponse from LLM:\n #{completion}\n\n"
|
32
|
+
text = extract_and_save_files(completion, ".")
|
33
|
+
puts "\nAfter extraction:\n #{text}\n\n"
|
34
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
<%
|
2
|
+
#req.llm_name = "gpt-3.5-turbo"
|
3
|
+
req.llm_name = "gpt-4"
|
4
|
+
|
5
|
+
req.temperature = 0
|
6
|
+
%>
|
7
|
+
<%= prompt_output_files %>
|
8
|
+
|
9
|
+
Glim is new ruby library that makes it easy to use various large language models, such as GPT-3, in ruby.
|
10
|
+
|
11
|
+
Here is some example code:
|
12
|
+
<%= include_files(input_root) %>
|
13
|
+
|
14
|
+
---
|
15
|
+
Write a ruby program "<%= program_name %>" that generates 3 random analogy problems.
|
16
|
+
You can this by first asking an LLM to generate a list of 10 abstract nouns. Then, sample from that list.
|
17
|
+
<%= program_name %> should output a list of 3 analogy problems, one per line.
|
@@ -0,0 +1,27 @@
|
|
1
|
+
<%
|
2
|
+
#req.llm_name = "gpt-3.5-turbo"
|
3
|
+
req.llm_name = "gpt-4"
|
4
|
+
|
5
|
+
req.temperature = 0
|
6
|
+
%>
|
7
|
+
<%= prompt_output_files %>
|
8
|
+
|
9
|
+
Glim is new ruby library that makes it easy to use various large language models, such as GPT-3, in ruby.
|
10
|
+
|
11
|
+
Here is some example code:
|
12
|
+
<%= include_files(input_root) %>
|
13
|
+
|
14
|
+
---
|
15
|
+
The ruby program "<%= program_name %>" is supposed to generate 3 random analogy problems.
|
16
|
+
<%= program_name %> should output a list of 3 analogy problems, one per line.
|
17
|
+
|
18
|
+
Here is the code for it:
|
19
|
+
<%= include_file(File.join("generated_code","generate_glim_code", program_name)) %>
|
20
|
+
|
21
|
+
Here is the output it produces:
|
22
|
+
---
|
23
|
+
<%= output %>
|
24
|
+
---
|
25
|
+
|
26
|
+
If this looks like what one would expect, respond with "YES!!!".
|
27
|
+
Otherwise, please fix it.
|
@@ -0,0 +1,38 @@
|
|
1
|
+
|
2
|
+
|
3
|
+
require_relative '../../lib/globals'
|
4
|
+
include GlimFileHelper
|
5
|
+
|
6
|
+
glim = GlimContext.new(log_name: "ask_code_question")
|
7
|
+
|
8
|
+
question = <<-EOF
|
9
|
+
|
10
|
+
I'm getting this error:
|
11
|
+
|
12
|
+
First question to GPT: What is the resistance of a 100m long copper cable that has a 6mm^2 cross section?
|
13
|
+
0.001: Computing cache key based on:
|
14
|
+
0.001: Cache key was: 4232e625fd593d6251d74b6bbdec1c5631106df6
|
15
|
+
0.001: Computing cache key based on:
|
16
|
+
0.001: Cache key was: 4232e625fd593d6251d74b6bbdec1c5631106df6
|
17
|
+
0.001: Cached ChatResponse found for key: 4232e625fd593d6251d74b6bbdec1c5631106df6
|
18
|
+
0.001: Computing cache key based on:
|
19
|
+
0.001: Cache key was: 4232e625fd593d6251d74b6bbdec1c5631106df6
|
20
|
+
0.001: Using cached response for key: 4232e625fd593d6251d74b6bbdec1c5631106df6
|
21
|
+
######### evaluate_expression
|
22
|
+
{
|
23
|
+
"expression_to_evaluate": "(1.68e-8) * (100 / (6 * 1e-6))"
|
24
|
+
}
|
25
|
+
examples/calc/calc.rb:53:in `evaluate_expression': wrong number of arguments (given 1, expected 0; required keyword: expression_to_evaluate) (ArgumentError)
|
26
|
+
from /Users/ulrichgall/code/ruby-llm/lib/models_openai.rb:242:in `process_response_from_api'
|
27
|
+
from /Users/ulrichgall/code/ruby-llm/lib/glim_response.rb:25:in `initialize'
|
28
|
+
from /Users/ulrichgall/code/ruby-llm/lib/glim_request.rb:209:in `new'
|
29
|
+
from /Users/ulrichgall/code/ruby-llm/lib/glim_request.rb:209:in `response'
|
30
|
+
from examples/calc/calc.rb:79:in `<main>'
|
31
|
+
|
32
|
+
EOF
|
33
|
+
|
34
|
+
|
35
|
+
req = glim.request_from_template("ask_code_question", files: ['lib/glim_ai_callable.rb', 'lib/models_openai.rb', 'examples/calc/calc.rb'], question: )
|
36
|
+
response = req.response
|
37
|
+
completion = response.completion
|
38
|
+
puts completion
|
@@ -0,0 +1,28 @@
|
|
1
|
+
<%
|
2
|
+
#req.llm_name = "gpt-3.5-turbo"
|
3
|
+
req.llm_name = "gpt-4"
|
4
|
+
|
5
|
+
%>
|
6
|
+
|
7
|
+
<%= prompt_output_files %>
|
8
|
+
|
9
|
+
Scenario: You are an experienced software developer who is meticulous when writing automated tests.
|
10
|
+
|
11
|
+
Consider the following code:
|
12
|
+
|
13
|
+
<%= include_file "lib/globals.rb" %>
|
14
|
+
|
15
|
+
Think step by step and describe what the function <%= function_to_test %> is supposed to do.
|
16
|
+
Then list ways in which it could fail, and on what kind of inputs it might fail.
|
17
|
+
Include not just invalid arguments, but also test whether the algorithm works for what it's supposed to to.
|
18
|
+
|
19
|
+
Then, write a comprehensive minitest compliant test for <%= function_to_test %>.
|
20
|
+
<%= special_instructions %>
|
21
|
+
|
22
|
+
When you generate the code for the test, remember that the first line should be:
|
23
|
+
# File: <%= function_to_test %>_test.rb
|
24
|
+
|
25
|
+
The code should include the following:
|
26
|
+
require_relative "../lib/globals"
|
27
|
+
|
28
|
+
<%= prompt_output_files %>
|
@@ -0,0 +1,20 @@
|
|
1
|
+
|
2
|
+
|
3
|
+
require_relative '../../lib/globals'
|
4
|
+
include GlimFileHelper
|
5
|
+
|
6
|
+
|
7
|
+
tests = [
|
8
|
+
# ["levenshtein_distance",""],
|
9
|
+
["make_special_code_with_fixed_length", "Use levenshtein_distance in the test."]
|
10
|
+
]
|
11
|
+
|
12
|
+
for function_to_test, special_instructions in tests
|
13
|
+
glim = GlimContext.new(log_name: "write_minitest")
|
14
|
+
req = glim.request_from_template("write_globals_test", function_to_test:, special_instructions: )
|
15
|
+
response = req.response
|
16
|
+
completion = response.completion
|
17
|
+
outpath = File.join("glim_generated_tests")
|
18
|
+
text = extract_and_save_files(completion, outpath)
|
19
|
+
puts text
|
20
|
+
end
|
File without changes
|
@@ -0,0 +1 @@
|
|
1
|
+
puts("Hello World!")
|
@@ -0,0 +1 @@
|
|
1
|
+
puts("Hello World!")
|
@@ -0,0 +1 @@
|
|
1
|
+
puts("Hello World!")
|
@@ -0,0 +1,18 @@
|
|
1
|
+
require_relative '../../lib/globals'
|
2
|
+
|
3
|
+
input_root = "/Users/ulrichgall/code/developer/smol_dev/"
|
4
|
+
#input_root = "private/input/"
|
5
|
+
|
6
|
+
# content = read_all_files(input_root)
|
7
|
+
# puts content
|
8
|
+
|
9
|
+
glim = GlimContext.new(log_name: "glim_log")
|
10
|
+
# response = glim.response_from_spec("rewrite_code", input_root:)
|
11
|
+
# puts response.req.prompt
|
12
|
+
|
13
|
+
response = glim.response_from_template("rewrite_code", input_root:)
|
14
|
+
completion = response.completion
|
15
|
+
File.write("rewritten_code.rb", completion)
|
16
|
+
puts completion
|
17
|
+
|
18
|
+
# TODO -- lots more to do here
|
@@ -0,0 +1,32 @@
|
|
1
|
+
<%
|
2
|
+
req.llm_name = "gpt-3.5-turbo-16k"
|
3
|
+
req.temperature = 0
|
4
|
+
|
5
|
+
%>
|
6
|
+
|
7
|
+
<%
|
8
|
+
|
9
|
+
# Write a ruby program that captures the essence of the code below.
|
10
|
+
# There is a lot of useless boilerplate there, no need to convert that to ruby.
|
11
|
+
|
12
|
+
# Just capture the essence of the code, and make it work in ruby, using the following API:
|
13
|
+
|
14
|
+
# For making a request to OpenAI in your code, you can do this:
|
15
|
+
# req = LLMRequest.new(messages)
|
16
|
+
# response = LLMResponse.compute(req)
|
17
|
+
|
18
|
+
# Focus on the OpenAI API invocations. Which messages and prommpts are used, and how does the completion from one request affect the next pronpt?
|
19
|
+
# Are there any loops where the output from one request is a list of items, and then a request is made for each item in the list?
|
20
|
+
# Reflect this accurately in your ruby code.
|
21
|
+
|
22
|
+
# Whenever you encounter a prompt or an array of messages, do not describe what it does.
|
23
|
+
# Instead, give it a filename and then write it at the end in a separate section, marked with the filename you gave it.
|
24
|
+
|
25
|
+
# Describe this precisely, including the prompts.
|
26
|
+
%>
|
27
|
+
|
28
|
+
Review the code below.
|
29
|
+
|
30
|
+
Then, describe what it does.
|
31
|
+
|
32
|
+
<%= include_files(input_root) %>
|