llmed 0.6.1 → 0.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 87fc5d9fa3991a03d2055b3da5f8ec1203af3e987c6ff711353a3d617f3a5add
4
- data.tar.gz: e4b535f0353bc4c575d84cb0648a7d8d38bc62b3d585f87f9eb4fde18b36d835
3
+ metadata.gz: d9bc1517e8b12805cae255599e37f2f6cb17ee1aae43043c2d36e232291dc63f
4
+ data.tar.gz: 2d82a1d9ff3fc1585b8fe952800786cc25ee107f9a1cf679e1ada541c4b105df
5
5
  SHA512:
6
- metadata.gz: bf87dca2cc6babed42917e624511032e5f9747679244199a8340f6f2cbeca95bfa9a3fbf839a5ede32051138f7ae18affe154fe20177a9ae1ca20b03052e6cd7
7
- data.tar.gz: 6243e74686ffc0aabfe27310216fab81a1dd5a6a7ea987da095b579eb6ba23323e6f68f26b948f4899d3b52c7f01ecf6ceb3ada1eaa8512c26b897c06eccf89a
6
+ metadata.gz: 347c5b9e439ceedbb8de5a850185d1a4e3a8aec23564d9f379747abda2d5a5921047d2891563d9205077476f29910ffe511819d729b24ef1ee3a94002ad1fbe5
7
+ data.tar.gz: 2b5053b7389eee52215e44799737f74fdb9fc43acf02c2a1036b2f052a1bef339f13427c6bc92606a1b7fb7d31b80b44fa6e6b4c94f3284d16d5550dfc1e321a
data/exe/llmed CHANGED
@@ -1,23 +1,24 @@
1
1
  #!/bin/env ruby
2
2
  # Copyright 2025 Jovany Leandro G.C <bit4bit@riseup.net>
3
3
  # frozen_string_literal: true
4
+
4
5
  require 'optparse'
5
6
  require 'llmed'
6
7
 
7
8
  logger = Logger.new(STDERR)
8
9
  output_dir = './llmed-out'
9
10
  release_dir = output_dir
10
- template = <<-TMP
11
- set_llm provider: :openai, api_key: ENV['OPENAI_API_KEY'], model: 'gpt-4o'
12
-
13
- # Increment the RELEASE number once you approve the output.
14
- application "hi world", release: nil, language: '<HERE LANGUAGE>', output_file: "<HERE NAME>.ollmed" do
15
- context "main" do
16
- <<-LLM
17
- Show to user 'hi world!'.
18
- LLM
11
+ template = <<~TMP
12
+ set_llm provider: :openai, api_key: ENV['OPENAI_API_KEY'], model: 'gpt-4o'
13
+
14
+ # Increment the RELEASE number once you approve the output.
15
+ application "hi world", release: nil, language: '<HERE LANGUAGE>', output_file: "<HERE NAME>.ollmed" do
16
+ context "main" do
17
+ <<-LLM
18
+ Show to user 'hi world!'.
19
+ LLM
20
+ end
19
21
  end
20
- end
21
22
  TMP
22
23
 
23
24
  OptionParser.new do |parser|
data/lib/llm.rb CHANGED
@@ -18,18 +18,16 @@ class LLMed
18
18
 
19
19
  Response = Struct.new(:provider, :model, :source_code, :duration_seconds, :total_tokens, keyword_init: true)
20
20
 
21
- class OpenAI
22
-
23
- DEFAULT_URI_BASE = "https://api.openai.com/".freeze
24
- MAX_TOKENS = 8192
25
-
26
- def initialize(**args)
27
- @logger = args.delete(:logger)
28
- @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
21
+ class Agent
22
+ def initialize(llm_instance)
23
+ @llm_instance = llm_instance
24
+ @assistant = Langchain::Assistant.new(
25
+ llm: @llm_instance.llm
26
+ )
29
27
  end
30
28
 
31
29
  def chat(messages: [])
32
- messages = messages.map do |m|
30
+ llm_messages = messages.map do |m|
33
31
  case m
34
32
  when Message::System
35
33
  { role: 'system', content: m.content }
@@ -39,26 +37,40 @@ class LLMed
39
37
  end
40
38
 
41
39
  start = Time.now
42
- llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
43
- warn_token_limits(llm_response)
40
+
41
+ @assistant.add_messages(messages: llm_messages)
42
+ assistant_messages = @assistant.run!
43
+
44
+ assistant_response = assistant_messages.select { |m| m.role == 'assistant' }.last
44
45
 
45
46
  stop = Time.now
46
- Response.new({ provider: provider,
47
- model: @llm.chat_parameters[:model],
47
+ Response.new({ provider: @llm_instance.provider,
48
+ model: @llm_instance.model,
48
49
  duration_seconds: stop.to_i - start.to_i,
49
- source_code: source_code(llm_response.chat_completion),
50
- total_tokens: llm_response.total_tokens })
50
+ source_code: @llm_instance.source_code(assistant_response.content),
51
+ total_tokens: @assistant.total_tokens })
51
52
  end
53
+ end
52
54
 
53
- private
54
- def warn_token_limits(llm_response)
55
- if llm_response.completion_tokens >= MAX_TOKENS
56
- @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
57
- end
55
+ class OpenAI
56
+ DEFAULT_URI_BASE = "https://api.openai.com/".freeze
57
+ MAX_TOKENS = 8192
58
+
59
+ attr_reader :llm
60
+
61
+ def initialize(**args)
62
+ @logger = args.delete(:logger)
63
+ @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
58
64
  end
59
65
 
60
- def llm_arguments(args)
61
- args
66
+ def chat(messages: [])
67
+ llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
68
+ warn_token_limits(llm_response)
69
+ llm_response
70
+ end
71
+
72
+ def model
73
+ @llm.chat_parameters[:model]
62
74
  end
63
75
 
64
76
  def provider
@@ -68,31 +80,43 @@ class LLMed
68
80
  def source_code(content)
69
81
  content.gsub('```', '').sub(/^(node(js)?|javascript|ruby|python(\d*)|elixir|bash|html|go|c(pp)?)([ \n])/, '')
70
82
  end
71
- end
72
83
 
73
- class Anthropic < OpenAI
74
84
  private
75
85
 
86
+ def warn_token_limits(llm_response)
87
+ if llm_response.completion_tokens >= MAX_TOKENS
88
+ @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
89
+ end
90
+ end
91
+
76
92
  def llm_arguments(args)
77
- @logger = args.delete(:logger)
78
- args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
93
+ args
79
94
  end
95
+ end
80
96
 
97
+ class Anthropic < OpenAI
81
98
  def provider
82
99
  :anthropic
83
100
  end
84
- end
85
101
 
86
- class LikeOpenAI < OpenAI
87
102
  private
88
103
 
89
104
  def llm_arguments(args)
90
- args
105
+ @logger = args.delete(:logger)
106
+ args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
91
107
  end
108
+ end
92
109
 
110
+ class LikeOpenAI < OpenAI
93
111
  def provider
94
112
  :like_openai
95
113
  end
114
+
115
+ private
116
+
117
+ def llm_arguments(args)
118
+ args
119
+ end
96
120
  end
97
121
 
98
122
  class Test
@@ -100,14 +124,25 @@ class LLMed
100
124
  @output = ''
101
125
  end
102
126
 
127
+ def chat_parameters
128
+ { model: 'test' }
129
+ end
130
+
131
+ def model
132
+ 'test'
133
+ end
134
+
103
135
  def chat(messages: [])
104
- @output = messages.map { |m| m[:content] }.join("\n")
136
+ output = messages.map { |m| m[:content] }.join("\n")
137
+ Struct.new(:chat_completion, :total_tokens).new(output, 0)
138
+ end
139
+
140
+ def source_code(code)
141
+ code
142
+ end
105
143
 
106
- Response.new({ provider: :test,
107
- model: 'test',
108
- duration_seconds: 0,
109
- source_code: @output,
110
- total_tokens: 0 })
144
+ def provider
145
+ :test
111
146
  end
112
147
  end
113
148
  end
@@ -49,6 +49,7 @@ class LLMed
49
49
 
50
50
  # Example:
51
51
  # application { context "demo" { "content" } }
52
+ # DEPRECATED: Use spec instead of context
52
53
  def context(name, **opts, &block)
53
54
  opts[:release_dir] = @release_dir
54
55
  ctx = Context.new(name: name, options: opts)
@@ -58,6 +59,12 @@ class LLMed
58
59
  @contexts << ctx
59
60
  end
60
61
 
62
+ # Example:
63
+ # application { spec "demo" { "content" } }
64
+ def spec(name, **opts, &block)
65
+ context(name, **opts, &block)
66
+ end
67
+
61
68
  def achieve(name, **opts, &block)
62
69
  opts[:release_dir] = @release_dir
63
70
  goal = Goal.new(name: name, options: opts)
@@ -85,7 +92,7 @@ class LLMed
85
92
  end
86
93
 
87
94
  def refresh(contexts)
88
- @contexts = contexts.map{ |ctx| [ctx.name, {'name' => ctx.name, 'message' => ctx.raw}]}.to_h
95
+ @contexts = contexts.map { |ctx| [ctx.name, { 'name' => ctx.name, 'message' => ctx.raw }] }.to_h
89
96
  dump
90
97
  end
91
98
 
@@ -98,7 +105,7 @@ class LLMed
98
105
  else
99
106
  line_diff(current_ctx['message'], other_ctx.raw)
100
107
  end
101
- if !result.all?{|op, line| op == '=:'}
108
+ if !result.all? { |op, line| op == '=:' }
102
109
  diffs[other_ctx.name] = result
103
110
  end
104
111
  end
@@ -133,8 +140,8 @@ class LLMed
133
140
  i2 += 1
134
141
  else
135
142
  # Try to find if one of the lines matches later
136
- idx1 = lines1[i1+1..-1]&.index(line2)
137
- idx2 = lines2[i2+1..-1]&.index(line1)
143
+ idx1 = lines1[i1 + 1..-1]&.index(line2)
144
+ idx2 = lines2[i2 + 1..-1]&.index(line1)
138
145
 
139
146
  if !idx1.nil? && (idx2.nil? || idx1 <= idx2)
140
147
  result << ["-:", line1]
@@ -161,13 +168,13 @@ class LLMed
161
168
  @contexts = JSON.load(f.read)['contexts']
162
169
  end
163
170
  else
164
- @contexts = default.map{ |ctx| [ctx.name, {'name' => ctx.name, 'message' => ctx.raw}]}.to_h
171
+ @contexts = default.map { |ctx| [ctx.name, { 'name' => ctx.name, 'message' => ctx.raw }] }.to_h
165
172
  end
166
173
  end
167
174
 
168
175
  def dump
169
176
  File.open(@snapshot_file, 'w') do |file|
170
- file.write(JSON.dump({'contexts' => @contexts}))
177
+ file.write(JSON.dump({ 'contexts' => @contexts }))
171
178
  end
172
179
  end
173
180
  end
@@ -216,7 +223,6 @@ class LLMed
216
223
  end
217
224
 
218
225
  def patch_or_create(output)
219
-
220
226
  output_content = output
221
227
 
222
228
  if @release && File.exist?(release_source_code) && !release_contexts.empty?
@@ -246,6 +252,7 @@ class LLMed
246
252
  end
247
253
 
248
254
  return unless @output_file.is_a?(String)
255
+
249
256
  output_file = Pathname.new(@output_dir) + @output_file
250
257
  FileUtils.cp(output_file, release_source_code)
251
258
  FileUtils.cp(output_file, release_main_source_code)
@@ -60,7 +60,8 @@ class LLMed
60
60
  ", input_variables: %w[language source_code code_comment_begin code_comment_end update_context_digests changes_of_contexts goals])
61
61
  end
62
62
 
63
- def prompt(language:, source_code:, code_comment_begin:, code_comment_end:, update_context_digests: [], changes_of_contexts: '', goals: '')
63
+ def prompt(language:, source_code:, code_comment_begin:, code_comment_end:, update_context_digests: [],
64
+ changes_of_contexts: '', goals: '')
64
65
  @prompt.format(language: language,
65
66
  source_code: source_code,
66
67
  code_comment_begin: code_comment_begin,
@@ -102,33 +103,35 @@ class LLMed
102
103
  end
103
104
 
104
105
  def llm
105
- case @provider
106
- when :openai
107
- LLMed::LLM::OpenAI.new(
108
- logger: @logger,
109
- api_key: @provider_api_key,
110
- default_options: { max_tokens: nil, temperature: 0.7, chat_model: @provider_model }
111
- )
112
- when :anthropic
113
- LLMed::LLM::Anthropic.new(
114
- logger: @logger,
115
- api_key: @provider_api_key,
116
- default_options: { max_tokens: nil, temperature: 0.7, chat_model: @provider_model }
117
- )
118
- when :like_openai
119
- LLMed::LLM::LikeOpenAI.new(
120
- logger: @logger,
121
- api_key: @provider_api_key,
122
- default_options: { temperature: 0.7, chat_model: @provider_model },
123
- llm_options: @provider_options
124
- )
125
- when :test
126
- LLMed::LLM::Test.new
127
- when nil
128
- raise 'Please set the provider with `set_llm(provider, api_key, model)`'
129
- else
130
- raise "not implemented provider #{@provider}"
131
- end
106
+ llm_instance = case @provider
107
+ when :openai
108
+ LLMed::LLM::OpenAI.new(
109
+ logger: @logger,
110
+ api_key: @provider_api_key,
111
+ default_options: { max_tokens: nil, temperature: 0.7, chat_model: @provider_model }
112
+ )
113
+ when :anthropic
114
+ LLMed::LLM::Anthropic.new(
115
+ logger: @logger,
116
+ api_key: @provider_api_key,
117
+ default_options: { max_tokens: nil, temperature: 0.7, chat_model: @provider_model }
118
+ )
119
+ when :like_openai
120
+ LLMed::LLM::LikeOpenAI.new(
121
+ logger: @logger,
122
+ api_key: @provider_api_key,
123
+ default_options: { temperature: 0.7, chat_model: @provider_model },
124
+ llm_options: @provider_options
125
+ )
126
+ when :test
127
+ LLMed::LLM::Test.new
128
+ when nil
129
+ raise 'Please set the provider with `set_llm(provider, api_key, model)`'
130
+ else
131
+ raise "not implemented provider #{@provider}"
132
+ end
133
+
134
+ LLMed::LLM::Agent.new(llm_instance)
132
135
  end
133
136
  end
134
137
  end
data/lib/llmed/goal.rb CHANGED
@@ -8,7 +8,6 @@ class LLMed
8
8
  def initialize(name:, options: {})
9
9
  @name = name
10
10
  @options = options
11
-
12
11
  end
13
12
 
14
13
  def llm(message)
@@ -20,7 +19,7 @@ class LLMed
20
19
  end
21
20
 
22
21
  def message?
23
- !(@message.nil? || @message.empty?)
22
+ !(@message.nil? || @message.empty?)
24
23
  end
25
24
  end
26
25
  end
@@ -1,9 +1,9 @@
1
- #<llmed-code context='Library LLMed::LiterateProgramming::Markdown' digest='9c0e3f61ab4cdc3c56c29230a800487dd1a7ef0d929c843fd2461907d0831ab2' after=''>
1
+ # <llmed-code context='Library LLMed::LiterateProgramming::Markdown' digest='9c0e3f61ab4cdc3c56c29230a800487dd1a7ef0d929c843fd2461907d0831ab2' after=''>
2
2
  class LLMed::LiterateProgramming::Markdown
3
3
  def parse(input)
4
4
  contexts = []
5
5
  current_context = { type: :context, title: "_default", content: [] }
6
-
6
+
7
7
  input.each_line do |line|
8
8
  if line.strip =~ /^# (.+)$/
9
9
  contexts << current_context unless current_context[:content].empty?
@@ -23,4 +23,4 @@ class LLMed::LiterateProgramming::Markdown
23
23
  contexts
24
24
  end
25
25
  end
26
- #</llmed-code>
26
+ # </llmed-code>
@@ -41,7 +41,7 @@ class LLMed
41
41
  context[:content] += "#{URI.open(item_content[:reference]).read}\n"
42
42
  end
43
43
  end
44
- contexts << context
44
+ contexts << context
45
45
  end
46
46
  end
47
47
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llmed
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.1
4
+ version: 0.6.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jovany Leandro G.C
@@ -93,9 +93,7 @@ files:
93
93
  - README.md
94
94
  - exe/llmed
95
95
  - exe/llmed.literate
96
- - lib/#llm.rb#
97
96
  - lib/llm.rb
98
- - lib/llm.rb~
99
97
  - lib/llmed.rb
100
98
  - lib/llmed/application.rb
101
99
  - lib/llmed/configuration.rb
data/lib/#llm.rb# DELETED
@@ -1,114 +0,0 @@
1
- require 'openai'
2
- require 'langchain'
3
-
4
- Langchain.logger.level = Logger::ERROR
5
-
6
- class LLMed
7
- module LLM
8
- module Message
9
- System = Struct.new(:content)
10
- User = Struct.new(:content)
11
- end
12
-
13
- module Template
14
- def self.build(template:, input_variables:)
15
- Langchain::Prompt::PromptTemplate.new(template: template, input_variables: input_variables)
16
- end
17
- end
18
-
19
- Response = Struct.new(:provider, :model, :source_code, :duration_seconds, :total_tokens, keyword_init: true)
20
-
21
- class OpenAI
22
-
23
- DEFAULT_URI_BASE = "https://api.openai.com/".freeze
24
- MAX_TOKENS = 8192
25
-
26
- def initialize(**args)
27
- @logger = args.delete(:logger)
28
- @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
29
- end
30
-
31
- def chat(messages: [])
32
- messages = messages.map do |m|
33
- case messages
34
- when Message::System
35
- { role: 'system', content: m.content }
36
- when Message::User
37
- { role: 'user', content: m.content }
38
- end
39
- end
40
- messages.e
41
- start = Time.now
42
- llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
43
- warn_token_limits(llm_response)
44
-
45
- stop = Time.now
46
- Response.new({ provider: provider,
47
- model: @llm.chat_parameters[:model],
48
- duration_seconds: stop.to_i - start.to_i,
49
- source_code: source_code(llm_response.chat_completion),
50
- total_tokens: llm_response.total_tokens })
51
- end
52
-
53
- private
54
- def warn_token_limits(llm_response)
55
- if llm_response.completion_tokens >= MAX_TOKENS
56
- @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
57
- end
58
- end
59
-
60
- def llm_arguments(args)
61
- args
62
- end
63
-
64
- def provider
65
- :openai
66
- end
67
-
68
- def source_code(content)
69
- content.gsub('```', '').sub(/^(node(js)?|javascript|ruby|python(\d*)|elixir|bash|html|go|c(pp)?)([ \n])/, '')
70
- end
71
- end
72
-
73
- class Anthropic < OpenAI
74
- private
75
-
76
- def llm_arguments(args)
77
- @logger = args.delete(:logger)
78
- args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
79
- end
80
-
81
- def provider
82
- :anthropic
83
- end
84
- end
85
-
86
- class LikeOpenAI < OpenAI
87
- private
88
-
89
- def llm_arguments(args)
90
- args
91
- end
92
-
93
- def provider
94
- :like_openai
95
- end
96
- end
97
-
98
- class Test
99
- def initialize
100
- @output = ''
101
- end
102
-
103
- def chat(messages: [])
104
- @output = messages.map { |m| m[:content] }.join("\n")
105
-
106
- Response.new({ provider: :test,
107
- model: 'test',
108
- duration_seconds: 0,
109
- source_code: @output,
110
- total_tokens: 0 })
111
- end
112
- end
113
- end
114
- end
data/lib/llm.rb~ DELETED
@@ -1,114 +0,0 @@
1
- require 'openai'
2
- require 'langchain'
3
-
4
- Langchain.logger.level = Logger::ERROR
5
-
6
- class LLMed
7
- module LLM
8
- module Message
9
- System = Struct.new(:content)
10
- User = Struct.new(:content)
11
- end
12
-
13
- module Template
14
- def self.build(template:, input_variables:)
15
- Langchain::Prompt::PromptTemplate.new(template: template, input_variables: input_variables)
16
- end
17
- end
18
-
19
- Response = Struct.new(:provider, :model, :source_code, :duration_seconds, :total_tokens, keyword_init: true)
20
-
21
- class OpenAI
22
-
23
- DEFAULT_URI_BASE = "https://api.openai.com/".freeze
24
- MAX_TOKENS = 8192
25
-
26
- def initialize(**args)
27
- @logger = args.delete(:logger)
28
- @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
29
- end
30
-
31
- def chat(messages: [])
32
- messages = messages.map do |m|
33
- case m
34
- when Message::System
35
- { role: 'system', content: m.content }
36
- when Message::User
37
- { role: 'user', content: m.content }
38
- end
39
- end
40
-
41
- start = Time.now
42
- llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
43
- warn_token_limits(llm_response)
44
-
45
- stop = Time.now
46
- Response.new({ provider: provider,
47
- model: @llm.chat_parameters[:model],
48
- duration_seconds: stop.to_i - start.to_i,
49
- source_code: source_code(llm_response.chat_completion),
50
- total_tokens: llm_response.total_tokens })
51
- end
52
-
53
- private
54
- def warn_token_limits(llm_response)
55
- if llm_response.completion_tokens >= MAX_TOKENS
56
- @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
57
- end
58
- end
59
-
60
- def llm_arguments(args)
61
- args
62
- end
63
-
64
- def provider
65
- :openai
66
- end
67
-
68
- def source_code(content)
69
- content.gsub('```', '').sub(/^(node(js)?|javascript|ruby|python(\d*)|elixir|bash|html|go|c(pp)?)([ \n])/, '')
70
- end
71
- end
72
-
73
- class Anthropic < OpenAI
74
- private
75
-
76
- def llm_arguments(args)
77
- @logger = args.delete(:logger)
78
- args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
79
- end
80
-
81
- def provider
82
- :anthropic
83
- end
84
- end
85
-
86
- class LikeOpenAI < OpenAI
87
- private
88
-
89
- def llm_arguments(args)
90
- args
91
- end
92
-
93
- def provider
94
- :like_openai
95
- end
96
- end
97
-
98
- class Test
99
- def initialize
100
- @output = ''
101
- end
102
-
103
- def chat(messages: [])
104
- @output = messages.map { |m| m[:content] }.join("\n")
105
-
106
- Response.new({ provider: :test,
107
- model: 'test',
108
- duration_seconds: 0,
109
- source_code: @output,
110
- total_tokens: 0 })
111
- end
112
- end
113
- end
114
- end