llmed 0.4.4 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 48b4c352f5fd54aa841fe411fd7f5025f1c98f40b8a287589aeb46ad213def68
4
- data.tar.gz: c53e2efec6164261d6985f9297b101b363d1e9da432123ddeac1e369a7f045d2
3
+ metadata.gz: 87fc5d9fa3991a03d2055b3da5f8ec1203af3e987c6ff711353a3d617f3a5add
4
+ data.tar.gz: e4b535f0353bc4c575d84cb0648a7d8d38bc62b3d585f87f9eb4fde18b36d835
5
5
  SHA512:
6
- metadata.gz: 1b228350f691be1762449160bafde3f3c3eba0cc180fd918207f09a855939d0ec784f53861f72304a61d6c4c2adfd525d4e29cf868c1bc1d1dbfb9aa0ff7ad15
7
- data.tar.gz: 7eea2cc52879f589c1b4612409c7c0923aa88135bcb705817966fb4827d449032cc528891140e37a088ff0145fe792982f068162b716785d41e5c3ac736a8bfd
6
+ metadata.gz: bf87dca2cc6babed42917e624511032e5f9747679244199a8340f6f2cbeca95bfa9a3fbf839a5ede32051138f7ae18affe154fe20177a9ae1ca20b03052e6cd7
7
+ data.tar.gz: 6243e74686ffc0aabfe27310216fab81a1dd5a6a7ea987da095b579eb6ba23323e6f68f26b948f4899d3b52c7f01ecf6ceb3ada1eaa8512c26b897c06eccf89a
data/README.md CHANGED
@@ -17,6 +17,8 @@ In classic terms the LLM is the Compiler, Source Code is the Binary, the Program
17
17
  set_llm provider: :like_openai, api_key: ENV['TOGETHERAI_API_KEY'], model: 'Qwen/Qwen2.5-Coder-32B-Instruct', options: {uri_base: 'https://api.together.xyz/v1'}
18
18
 
19
19
  application "MINI COUNTER", release: nil, language: :node, output_file: "minicounter.ollmed" do
20
+ achieve('coding guidelines') { 'Add top comment with a technical summary of the implementation' }
21
+
20
22
  # Most stable context: if this changes, all subsequent context will be recompiled.
21
23
  context "dependencies" do
22
24
  <<-LLM
@@ -66,12 +68,43 @@ add CORS endpoints.
66
68
  ```
67
69
  then compile using command `llmed.literate`.
68
70
 
71
+ ## HOWTO Programing using LLMED
72
+
73
+ Programming with LLMED involves breaking down the problem into smaller contexts, where each context must be connected to the next, creating a chain of contexts that expresses the final solution (program/application/software). The final code will map each context to a block of code (module, function, or statements—this is determined by the LLM), so any changes to a context will be reflected in the source code. This is important to keep in mind. For example, it is not the same to write:
74
+
75
+ ```
76
+ # Dependencies
77
+ ...
78
+ # Application
79
+ ...
80
+ ```
81
+
82
+ as
83
+
84
+ ```
85
+ # Application
86
+ ...
87
+ # Dependencies
88
+ ...
89
+ ```
90
+
91
+ !!The LLM can do crazy things when trying to create working source code for that.
92
+
93
+ At the top of the document, write the most stable concepts (the contexts that don't change frequently), going down to the most unstable (the contexts that are expected to change more frequently) concepts. The purpose of these two things:
94
+
95
+ 1. The map between context and code block.
96
+ 2. Rebuilding of contexts: LLMed assumes that there is a unique chain, so it will recompile from the changed context to the end of the chain.
97
+
98
+ So, programming with LLMed means being aware of the technology (programming language, libraries, software architecture, tools, etc.). LLMed's job is to provide a free-form natural language programming compiler.
99
+
100
+
69
101
  ## Programming flow
70
102
 
71
- * Cycle
103
+ 1. Cycle
72
104
  * Edit application.
73
105
  * Once you agree with the current state of the application, increase the value of the `release` attribute
74
- * Commit the release file (.release) and the source code (.llmed).
106
+ 2. Commit the release file (.release) and the source code (.llmed) and the snapshot (.snapshot).
107
+ 3. Go to 1.
75
108
 
76
109
  # Usage
77
110
 
data/lib/#llm.rb# ADDED
@@ -0,0 +1,114 @@
1
+ require 'openai'
2
+ require 'langchain'
3
+
4
+ Langchain.logger.level = Logger::ERROR
5
+
6
+ class LLMed
7
+ module LLM
8
+ module Message
9
+ System = Struct.new(:content)
10
+ User = Struct.new(:content)
11
+ end
12
+
13
+ module Template
14
+ def self.build(template:, input_variables:)
15
+ Langchain::Prompt::PromptTemplate.new(template: template, input_variables: input_variables)
16
+ end
17
+ end
18
+
19
+ Response = Struct.new(:provider, :model, :source_code, :duration_seconds, :total_tokens, keyword_init: true)
20
+
21
+ class OpenAI
22
+
23
+ DEFAULT_URI_BASE = "https://api.openai.com/".freeze
24
+ MAX_TOKENS = 8192
25
+
26
+ def initialize(**args)
27
+ @logger = args.delete(:logger)
28
+ @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
29
+ end
30
+
31
+ def chat(messages: [])
32
+ messages = messages.map do |m|
33
+ case messages
34
+ when Message::System
35
+ { role: 'system', content: m.content }
36
+ when Message::User
37
+ { role: 'user', content: m.content }
38
+ end
39
+ end
40
+ messages.e
41
+ start = Time.now
42
+ llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
43
+ warn_token_limits(llm_response)
44
+
45
+ stop = Time.now
46
+ Response.new({ provider: provider,
47
+ model: @llm.chat_parameters[:model],
48
+ duration_seconds: stop.to_i - start.to_i,
49
+ source_code: source_code(llm_response.chat_completion),
50
+ total_tokens: llm_response.total_tokens })
51
+ end
52
+
53
+ private
54
+ def warn_token_limits(llm_response)
55
+ if llm_response.completion_tokens >= MAX_TOKENS
56
+ @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
57
+ end
58
+ end
59
+
60
+ def llm_arguments(args)
61
+ args
62
+ end
63
+
64
+ def provider
65
+ :openai
66
+ end
67
+
68
+ def source_code(content)
69
+ content.gsub('```', '').sub(/^(node(js)?|javascript|ruby|python(\d*)|elixir|bash|html|go|c(pp)?)([ \n])/, '')
70
+ end
71
+ end
72
+
73
+ class Anthropic < OpenAI
74
+ private
75
+
76
+ def llm_arguments(args)
77
+ @logger = args.delete(:logger)
78
+ args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
79
+ end
80
+
81
+ def provider
82
+ :anthropic
83
+ end
84
+ end
85
+
86
+ class LikeOpenAI < OpenAI
87
+ private
88
+
89
+ def llm_arguments(args)
90
+ args
91
+ end
92
+
93
+ def provider
94
+ :like_openai
95
+ end
96
+ end
97
+
98
+ class Test
99
+ def initialize
100
+ @output = ''
101
+ end
102
+
103
+ def chat(messages: [])
104
+ @output = messages.map { |m| m[:content] }.join("\n")
105
+
106
+ Response.new({ provider: :test,
107
+ model: 'test',
108
+ duration_seconds: 0,
109
+ source_code: @output,
110
+ total_tokens: 0 })
111
+ end
112
+ end
113
+ end
114
+ end
data/lib/llm.rb~ ADDED
@@ -0,0 +1,114 @@
1
+ require 'openai'
2
+ require 'langchain'
3
+
4
+ Langchain.logger.level = Logger::ERROR
5
+
6
+ class LLMed
7
+ module LLM
8
+ module Message
9
+ System = Struct.new(:content)
10
+ User = Struct.new(:content)
11
+ end
12
+
13
+ module Template
14
+ def self.build(template:, input_variables:)
15
+ Langchain::Prompt::PromptTemplate.new(template: template, input_variables: input_variables)
16
+ end
17
+ end
18
+
19
+ Response = Struct.new(:provider, :model, :source_code, :duration_seconds, :total_tokens, keyword_init: true)
20
+
21
+ class OpenAI
22
+
23
+ DEFAULT_URI_BASE = "https://api.openai.com/".freeze
24
+ MAX_TOKENS = 8192
25
+
26
+ def initialize(**args)
27
+ @logger = args.delete(:logger)
28
+ @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
29
+ end
30
+
31
+ def chat(messages: [])
32
+ messages = messages.map do |m|
33
+ case m
34
+ when Message::System
35
+ { role: 'system', content: m.content }
36
+ when Message::User
37
+ { role: 'user', content: m.content }
38
+ end
39
+ end
40
+
41
+ start = Time.now
42
+ llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
43
+ warn_token_limits(llm_response)
44
+
45
+ stop = Time.now
46
+ Response.new({ provider: provider,
47
+ model: @llm.chat_parameters[:model],
48
+ duration_seconds: stop.to_i - start.to_i,
49
+ source_code: source_code(llm_response.chat_completion),
50
+ total_tokens: llm_response.total_tokens })
51
+ end
52
+
53
+ private
54
+ def warn_token_limits(llm_response)
55
+ if llm_response.completion_tokens >= MAX_TOKENS
56
+ @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
57
+ end
58
+ end
59
+
60
+ def llm_arguments(args)
61
+ args
62
+ end
63
+
64
+ def provider
65
+ :openai
66
+ end
67
+
68
+ def source_code(content)
69
+ content.gsub('```', '').sub(/^(node(js)?|javascript|ruby|python(\d*)|elixir|bash|html|go|c(pp)?)([ \n])/, '')
70
+ end
71
+ end
72
+
73
+ class Anthropic < OpenAI
74
+ private
75
+
76
+ def llm_arguments(args)
77
+ @logger = args.delete(:logger)
78
+ args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
79
+ end
80
+
81
+ def provider
82
+ :anthropic
83
+ end
84
+ end
85
+
86
+ class LikeOpenAI < OpenAI
87
+ private
88
+
89
+ def llm_arguments(args)
90
+ args
91
+ end
92
+
93
+ def provider
94
+ :like_openai
95
+ end
96
+ end
97
+
98
+ class Test
99
+ def initialize
100
+ @output = ''
101
+ end
102
+
103
+ def chat(messages: [])
104
+ @output = messages.map { |m| m[:content] }.join("\n")
105
+
106
+ Response.new({ provider: :test,
107
+ model: 'test',
108
+ duration_seconds: 0,
109
+ source_code: @output,
110
+ total_tokens: 0 })
111
+ end
112
+ end
113
+ end
114
+ end
@@ -1,6 +1,8 @@
1
1
  # Copyright 2025 Jovany Leandro G.C <bit4bit@riseup.net>
2
2
  # frozen_string_literal: true
3
3
 
4
+ require 'set'
5
+
4
6
  class LLMed
5
7
  class Application
6
8
  attr_reader :contexts, :name, :language
@@ -29,16 +31,20 @@ class LLMed
29
31
  end
30
32
 
31
33
  def initialize(name:, language:, output_file:, block:, logger:, release:, release_dir:, output_dir:)
34
+ snapshot_file = Pathname.new(release_dir) + "#{output_file}.snapshot"
35
+
32
36
  @name = name
33
37
  @output_file = output_file
34
38
  @language = language.to_sym
35
39
  @code_comment = CodeComment.new(language)
36
40
  @block = block
37
41
  @contexts = []
42
+ @goals = []
38
43
  @logger = logger
39
44
  @release = release
40
45
  @release_dir = release_dir
41
46
  @output_dir = output_dir
47
+ @snapshot = Snapshot.new(snapshot_file)
42
48
  end
43
49
 
44
50
  # Example:
@@ -52,11 +58,127 @@ class LLMed
52
58
  @contexts << ctx
53
59
  end
54
60
 
61
+ def achieve(name, **opts, &block)
62
+ opts[:release_dir] = @release_dir
63
+ goal = Goal.new(name: name, options: opts)
64
+ output = goal.instance_eval(&block)
65
+ goal.llm(output) unless goal.message?
66
+
67
+ @goals << goal
68
+ end
69
+
55
70
  def evaluate
56
71
  instance_eval(&@block)
57
72
  end
58
73
 
59
- def prepare
74
+ class Snapshot
75
+ attr_reader :snapshot_file
76
+
77
+ def initialize(snapshot_file)
78
+ @snapshot_file = snapshot_file
79
+ @contexts = []
80
+ end
81
+
82
+ def sync(default)
83
+ load(default)
84
+ dump
85
+ end
86
+
87
+ def refresh(contexts)
88
+ @contexts = contexts.map{ |ctx| [ctx.name, {'name' => ctx.name, 'message' => ctx.raw}]}.to_h
89
+ dump
90
+ end
91
+
92
+ def diff(other_contexts)
93
+ diffs = {}
94
+ other_contexts.each do |other_ctx|
95
+ current_ctx = @contexts[other_ctx.name]
96
+ result = if current_ctx.nil?
97
+ other_ctx.raw.split("\n").map { |line| ["+:", line] }
98
+ else
99
+ line_diff(current_ctx['message'], other_ctx.raw)
100
+ end
101
+ if !result.all?{|op, line| op == '=:'}
102
+ diffs[other_ctx.name] = result
103
+ end
104
+ end
105
+
106
+ diffs
107
+ end
108
+
109
+ private
110
+
111
+ def line_diff(text1, text2)
112
+ lines1 = text1.split("\n")
113
+ lines2 = text2.split("\n")
114
+
115
+ result = []
116
+
117
+ i1 = 0
118
+ i2 = 0
119
+
120
+ while i1 < lines1.size || i2 < lines2.size
121
+ line1 = lines1[i1]
122
+ line2 = lines2[i2]
123
+
124
+ if i1 < lines1.size && i2 < lines2.size && line1 == line2
125
+ result << ["=:", line1]
126
+ i1 += 1
127
+ i2 += 1
128
+ elsif i1 < lines1.size && (i2 >= lines2.size || !lines2[i2..-1].include?(line1))
129
+ result << ["-:", line1]
130
+ i1 += 1
131
+ elsif i2 < lines2.size && (i1 >= lines1.size || !lines1[i1..-1].include?(line2))
132
+ result << ["+:", line2]
133
+ i2 += 1
134
+ else
135
+ # Try to find if one of the lines matches later
136
+ idx1 = lines1[i1+1..-1]&.index(line2)
137
+ idx2 = lines2[i2+1..-1]&.index(line1)
138
+
139
+ if !idx1.nil? && (idx2.nil? || idx1 <= idx2)
140
+ result << ["-:", line1]
141
+ i1 += 1
142
+ elsif !idx2.nil?
143
+ result << ["+:", line2]
144
+ i2 += 1
145
+ else
146
+ # Lines differ, treat both as deleted and added
147
+ result << ["-:", line1]
148
+ result << ["+:", line2]
149
+ i1 += 1
150
+ i2 += 1
151
+ end
152
+ end
153
+ end
154
+
155
+ result
156
+ end
157
+
158
+ def load(default)
159
+ if File.exist?(@snapshot_file)
160
+ File.open(@snapshot_file, 'r') do |f|
161
+ @contexts = JSON.load(f.read)['contexts']
162
+ end
163
+ else
164
+ @contexts = default.map{ |ctx| [ctx.name, {'name' => ctx.name, 'message' => ctx.raw}]}.to_h
165
+ end
166
+ end
167
+
168
+ def dump
169
+ File.open(@snapshot_file, 'w') do |file|
170
+ file.write(JSON.dump({'contexts' => @contexts}))
171
+ end
172
+ end
173
+ end
174
+
175
+ def prepare_snapshot
176
+ @logger.info("APPLICATION #{@name} PREPARING SNAPSHOT #{@snapshot.snapshot_file}")
177
+
178
+ @snapshot.sync(@contexts)
179
+ end
180
+
181
+ def prepare_release
60
182
  @logger.info("APPLICATION #{@name} COMPILING FOR #{@language} RELEASE #{@release}")
61
183
  return unless @output_file.is_a?(String)
62
184
  return unless @release
@@ -64,9 +186,7 @@ class LLMed
64
186
  output_file = Pathname.new(@output_dir) + @output_file
65
187
 
66
188
  if @release && File.exist?(output_file) && !File.exist?(release_source_code)
67
- FileUtils.cp(output_file, release_source_code)
68
- FileUtils.cp(output_file, release_main_source_code)
69
- @logger.info("APPLICATION #{@name} RELEASE FILE #{release_source_code}")
189
+
70
190
  elsif @release && !File.exist?(output_file) && File.exist?(release_main_source_code)
71
191
  FileUtils.mkdir_p(File.dirname(output_file))
72
192
  FileUtils.cp(release_main_source_code, output_file)
@@ -96,6 +216,7 @@ class LLMed
96
216
  end
97
217
 
98
218
  def patch_or_create(output)
219
+
99
220
  output_content = output
100
221
 
101
222
  if @release && File.exist?(release_source_code) && !release_contexts.empty?
@@ -117,14 +238,37 @@ class LLMed
117
238
  output_file(@output_dir) do |file|
118
239
  file.write(output_content)
119
240
  end
241
+
242
+ # only update snapshot if changes are made
243
+ if !File.exist?(release_source_code)
244
+ @snapshot.refresh(@contexts)
245
+ @logger.info("APPLICATION #{@name} SNAPSHOT REFRESHED")
246
+ end
247
+
248
+ return unless @output_file.is_a?(String)
249
+ output_file = Pathname.new(@output_dir) + @output_file
250
+ FileUtils.cp(output_file, release_source_code)
251
+ FileUtils.cp(output_file, release_main_source_code)
252
+ @logger.info("APPLICATION #{@name} RELEASE FILE #{release_source_code}")
120
253
  end
121
254
 
122
255
  def system_prompt(configuration)
256
+ contexts_diffs = @snapshot.diff(contexts)
257
+ changes_of_contexts = ''
258
+ if contexts_diffs.any?
259
+ contexts_diffs.each do |context_name, diffs|
260
+ changes_of_contexts += "# Context: #{context_name}\n"
261
+ changes_of_contexts += diffs.map { |op, line| "#{op} #{line}" }.join("\n")
262
+ end
263
+ end
264
+
123
265
  configuration.prompt(language: language,
124
266
  source_code: source_code,
125
267
  code_comment_begin: @code_comment.begin,
126
268
  code_comment_end: @code_comment.end,
127
- update_context_digests: digests_of_context_to_update)
269
+ update_context_digests: digests_of_context_to_update,
270
+ changes_of_contexts: changes_of_contexts,
271
+ goals: goals)
128
272
  end
129
273
 
130
274
  def rebuild?
@@ -169,6 +313,10 @@ class LLMed
169
313
 
170
314
  private
171
315
 
316
+ def goals
317
+ @goals.map(&:message).join("\n")
318
+ end
319
+
172
320
  def digests_of_context_to_update
173
321
  update_context_digest = []
174
322
 
@@ -7,36 +7,67 @@ class LLMed
7
7
  @logger = logger
8
8
  # Manual tested, pass 5 times execution
9
9
  @prompt = LLMed::LLM::Template.build(template: "
10
- You are a software developer with knowledge only of the programming language {language}, following the SOLID principles strictly, you always use only imperative and functional programming, design highly isolated components.
11
- Don't make any assumptions/expectations or wait for implementations, always implement the necessary.
12
- The contexts are declarations of how the source code will be (not a file) ensure to follow this always.
13
- The contexts are connected as a flat linked list.
14
- All the contexts represent one source code.
15
- Always exists one-to-one correspondence between context and source code.
16
- Always include the properly escaped comment: LLMED-COMPILED.
17
-
18
- You must only modify the following source code:
19
- ```{language}
20
- {source_code}
21
- ```
22
-
23
- Only generate source code of the context who digest belongs to {update_context_digests}.
24
-
25
- Wrap with comment every code that belongs to the indicated context, example in {language}:
26
- {code_comment_begin}<llmed-code context='here context name' digest='....' link-digest='next digest' after='here same value of attribute link-digest'>{code_comment_end}
27
- ...
28
- {code_comment_begin}</llmed-code>{code_comment_end}
29
-
30
- !!Your response must contain only the generated source code with all indicated contexts, with no additional text or comments, and you must ensure that runs correctly on the first attempt.
31
- ", input_variables: %w[language source_code code_comment_begin code_comment_end update_context_digests])
10
+ You are a software developer specialized in the programming language {language}. Follow SOLID principles strictly. Use only imperative and functional programming styles and design highly isolated components. You have full access to the standard library and third-party packages only if explicitly allowed.
11
+ Hard requirements: produce complete, executable, and compilable source code — no placeholders, no pseudo-código, no partial implementations, no explanations. If anything below cannot be satisfied, produce a runtime error implementation that clearly fails fast.
12
+
13
+ The input contexts are functional sections of a single large source file (not separate files). Contexts are linked as a flat linked list. There must be a one-to-one correspondence between each context and the code generated for that context. You must only generate source code for the context(s) whose digest is listed in {update_context_digests}.
14
+
15
+ Always include the escaped literal comment token LLMED-COMPILED somewhere in the generated code.
16
+
17
+ You must only modify the following source code that is provided between the code fences:
18
+ ```{language}
19
+ {source_code}
20
+ ```
21
+
22
+ Strict formatting for context wrappers: wrap every context implementation with the exact comment markers below. Use the literal placeholders {code_comment_begin} and {code_comment_end} replaced with the exact comment open/close strings for the target language. Example wrapper (replace placeholders when running the prompt):
23
+ {code_comment_begin}<llmed-code context='context name' digest='CURRENT_DIGEST' link-digest='NEXT_DIGEST' after='NEXT_DIGEST'>{code_comment_end}
24
+ ... COMPLETE, RUNNABLE implementation for that context ...
25
+ {code_comment_begin}</llmed-code>{code_comment_end}
26
+
27
+ Behavioral rules (must be obeyed):
28
+ 1. No comments-only outputs. If your natural answer would be comments, instead implement executable code that performs the described behavior. Do not output explanatory text outside code blocks — output only source code for the indicated contexts.
29
+ 2. All functions/methods must have bodies implementing the intended behavior. If external information is missing, implement a reasonable, deterministic default rather than leaving a stub.
30
+ 3. Fail-fast fallback: if the requested context genuinely cannot be implemented, include a clear runtime failure function implementation_impossible() that raises/prints a single machine-readable error (e.g. throws an exception with message IMPLEMENTATION-IMPOSSIBLE) also a technical description with the reasons and still compiles.
31
+ 4. One-to-one mapping: produce exactly one code block per digest requested. Do not add unrelated helper contexts unless they are wrapped and linked to an indicated digest; if helpers are necessary, include them inside the same context wrapper.
32
+ 5. Include the literal LLMED-COMPILED comment somewhere inside the code.
33
+ 6. Do not output any text outside the source code. The assistant response must be only source code for the requested context(s).
34
+ 7. Absolute goal precedence: Before generating code, always merge the goals with the context. If any context conflicts with a goal, the goal must override the context's intended purpose, not its implementation details.
35
+ 8. All user-facing strings, messages, and output must comply with the goals. This overrides literal instructions in context descriptions if there is a conflict.
36
+ 9. Step-by-step plan to follow:
37
+ a. Read goals and extract all global rules (e.g., language, style, behavior).
38
+ b. Read the context descriptions.
39
+ c. Merge context with the goal; if there is any conflict, modify the context output to satisfy the goal.
40
+ d. Generate code fully complying with both the merged specification and the required context wrapper format.
41
+
42
+ All behavior described by contexts marked with '-:' in <changes> must be completely removed from the generated source code.
43
+ Do not leave any code, print statements, functions, or references implementing deleted contexts.
44
+ Each context listed in '+:' or '=:' must be implemented exactly according to its description.
45
+ Behavior from removed contexts must not appear anywhere in the output, even indirectly.
46
+ <changes>
47
+ {changes_of_contexts}
48
+ </changes>
49
+
50
+
51
+ <goals>
52
+ {goals}
53
+ <goals>
54
+
55
+ Goals and context are orthogonal: goals orient the context.
56
+
57
+ Plan: Before writing any code, ensure you fully integrate both the overarching specifications (goal sections) and the context-level descriptions during code generation.
58
+
59
+ Output requirement: your response must contain only the generated source code for the indicated context(s), with the required wrapper comments and the test harness; nothing else.
60
+ ", input_variables: %w[language source_code code_comment_begin code_comment_end update_context_digests changes_of_contexts goals])
32
61
  end
33
62
 
34
- def prompt(language:, source_code:, code_comment_begin:, code_comment_end:, update_context_digests: [])
63
+ def prompt(language:, source_code:, code_comment_begin:, code_comment_end:, update_context_digests: [], changes_of_contexts: '', goals: '')
35
64
  @prompt.format(language: language,
36
65
  source_code: source_code,
37
66
  code_comment_begin: code_comment_begin,
38
67
  code_comment_end: code_comment_end,
39
- update_context_digests: update_context_digests.join(','))
68
+ update_context_digests: update_context_digests.join(','),
69
+ changes_of_contexts: changes_of_contexts,
70
+ goals: goals)
40
71
  end
41
72
 
42
73
  # Change the default prompt, input variables: language, source_code
data/lib/llmed/context.rb CHANGED
@@ -67,6 +67,10 @@ class LLMed
67
67
  "# Context: \"#{@name}\" Digest: #{digest}\n\n#{@message}"
68
68
  end
69
69
 
70
+ def raw
71
+ @message
72
+ end
73
+
70
74
  def llm(message)
71
75
  @message = message
72
76
  end
data/lib/llmed/goal.rb ADDED
@@ -0,0 +1,26 @@
1
+ # Copyright 2025 Jovany Leandro G.C <bit4bit@riseup.net>
2
+ # frozen_string_literal: true
3
+
4
+ class LLMed
5
+ class Goal
6
+ attr_reader :name, :options
7
+
8
+ def initialize(name:, options: {})
9
+ @name = name
10
+ @options = options
11
+
12
+ end
13
+
14
+ def llm(message)
15
+ @message = message
16
+ end
17
+
18
+ def message
19
+ "<goal name=\"#{@name}\">#{@message}</goal>"
20
+ end
21
+
22
+ def message?
23
+ !(@message.nil? || @message.empty?)
24
+ end
25
+ end
26
+ end
data/lib/llmed/release.rb CHANGED
@@ -120,13 +120,19 @@ class LLMed
120
120
  else
121
121
  user_contexts_iter = user_contexts.dup
122
122
  contexts_iter = contexts.dup
123
- rewire_code_contexts(contexts_iter, user_contexts_iter)
124
- contexts_sorted = contexts_iter
123
+ order_digests = rewire_code_contexts(contexts_iter, user_contexts_iter)
124
+
125
+ contexts_on_digests = order_digests.map { |digest| contexts_iter.find { |ctx| ctx.digest == digest } }
126
+ contexts_missing_digests = contexts_iter.select { |ctx| !order_digests.include?(ctx.digest) }
127
+ contexts_sorted = contexts_on_digests + contexts_missing_digests
125
128
  end
126
129
 
130
+ # Sort contexts so that the latest digest (the one whose 'after' is empty) comes last
127
131
  @contexts = contexts_sorted.sort do |a, b|
128
- if a.digest == b.after
132
+ if a.after.empty? && !b.after.empty?
129
133
  1
134
+ elsif !a.after.empty? && b.after.empty?
135
+ -1
130
136
  else
131
137
  0
132
138
  end
@@ -138,13 +144,18 @@ class LLMed
138
144
  private
139
145
 
140
146
  def rewire_code_contexts(code_contexts, user_contexts)
147
+ order_digests = []
141
148
  user_contexts.each_with_next do |user_context, next_user_context|
142
- ctx = code_contexts.find { |ctx| ctx.digest == user_context.digest }
149
+ ctx = code_contexts.find { |ctx| ctx.name == user_context.name }
143
150
  if ctx
151
+ ctx.digest = user_context.digest
152
+ order_digests << user_context.digest
144
153
  ctx.after = '' if user_contexts.count > 1
145
154
  ctx.after = next_user_context.digest if next_user_context
146
155
  end
147
156
  end
157
+
158
+ order_digests
148
159
  end
149
160
 
150
161
  def initialize(origin, code_comment)
data/lib/llmed.rb CHANGED
@@ -66,10 +66,12 @@ class LLMed
66
66
  def compile_application(app)
67
67
  app.notify('COMPILE START')
68
68
 
69
- app.prepare
69
+ app.prepare_release
70
70
  app.evaluate
71
+ app.prepare_snapshot
71
72
  if app.rebuild?
72
73
  llm = @configuration.llm
74
+
73
75
  messages = [LLMed::LLM::Message::System.new(app.system_prompt(@configuration))]
74
76
  app.contexts.each do |ctx|
75
77
  next if ctx.skip?
@@ -91,6 +93,7 @@ end
91
93
  require_relative 'llm'
92
94
  require_relative 'llmed/configuration'
93
95
  require_relative 'llmed/context'
96
+ require_relative 'llmed/goal'
94
97
  require_relative 'llmed/release'
95
98
  require_relative 'llmed/application'
96
99
  require_relative 'llmed/deployment'
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llmed
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.4
4
+ version: 0.6.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jovany Leandro G.C
@@ -93,12 +93,15 @@ files:
93
93
  - README.md
94
94
  - exe/llmed
95
95
  - exe/llmed.literate
96
+ - lib/#llm.rb#
96
97
  - lib/llm.rb
98
+ - lib/llm.rb~
97
99
  - lib/llmed.rb
98
100
  - lib/llmed/application.rb
99
101
  - lib/llmed/configuration.rb
100
102
  - lib/llmed/context.rb
101
103
  - lib/llmed/deployment.rb
104
+ - lib/llmed/goal.rb
102
105
  - lib/llmed/literate_programming.rb
103
106
  - lib/llmed/literate_programming/markdown.rb
104
107
  - lib/llmed/literate_programming/markdown.rb.release