llmed 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c47ebb44059bae3a1ffc2d27dd064098036ebd390cb677a488761845e13d4b6b
4
- data.tar.gz: 1442dec8d3c594b8610af95d7edfbdbdee8881ac00a046001cc6fd4eb9c2e68c
3
+ metadata.gz: 87fc5d9fa3991a03d2055b3da5f8ec1203af3e987c6ff711353a3d617f3a5add
4
+ data.tar.gz: e4b535f0353bc4c575d84cb0648a7d8d38bc62b3d585f87f9eb4fde18b36d835
5
5
  SHA512:
6
- metadata.gz: 80a7c93eed1106305b848bae99b0fcce744c55bc933538465041e19329ed1e6fe01f398c38ec849f2ae4981ec260024d3a9ded96f1465224a0b9f60cc8924b53
7
- data.tar.gz: 7c3df54bec3ee2e2f02bd7108c92004e6969bb8019ab5209bda32327cc916492ac2946948b2ce835325b93d33238292187870e68bc35bfa50feefb0b38164263
6
+ metadata.gz: bf87dca2cc6babed42917e624511032e5f9747679244199a8340f6f2cbeca95bfa9a3fbf839a5ede32051138f7ae18affe154fe20177a9ae1ca20b03052e6cd7
7
+ data.tar.gz: 6243e74686ffc0aabfe27310216fab81a1dd5a6a7ea987da095b579eb6ba23323e6f68f26b948f4899d3b52c7f01ecf6ceb3ada1eaa8512c26b897c06eccf89a
data/README.md CHANGED
@@ -17,6 +17,8 @@ In classic terms the LLM is the Compiler, Source Code is the Binary, the Program
17
17
  set_llm provider: :like_openai, api_key: ENV['TOGETHERAI_API_KEY'], model: 'Qwen/Qwen2.5-Coder-32B-Instruct', options: {uri_base: 'https://api.together.xyz/v1'}
18
18
 
19
19
  application "MINI COUNTER", release: nil, language: :node, output_file: "minicounter.ollmed" do
20
+ achieve('coding guidelines') { 'Add top comment with a technical summary of the implementation' }
21
+
20
22
  # Most stable context: if this changes, all subsequent context will be recompiled.
21
23
  context "dependencies" do
22
24
  <<-LLM
@@ -66,12 +68,43 @@ add CORS endpoints.
66
68
  ```
67
69
  then compile using command `llmed.literate`.
68
70
 
71
+ ## HOWTO Programing using LLMED
72
+
73
+ Programming with LLMED involves breaking down the problem into smaller contexts, where each context must be connected to the next, creating a chain of contexts that expresses the final solution (program/application/software). The final code will map each context to a block of code (module, function, or statements—this is determined by the LLM), so any changes to a context will be reflected in the source code. This is important to keep in mind. For example, it is not the same to write:
74
+
75
+ ```
76
+ # Dependencies
77
+ ...
78
+ # Application
79
+ ...
80
+ ```
81
+
82
+ as
83
+
84
+ ```
85
+ # Application
86
+ ...
87
+ # Dependencies
88
+ ...
89
+ ```
90
+
91
+ !!The LLM can do crazy things when trying to create working source code for that.
92
+
93
+ At the top of the document, write the most stable concepts (the contexts that don't change frequently), going down to the most unstable (the contexts that are expected to change more frequently) concepts. The purpose of these two things:
94
+
95
+ 1. The map between context and code block.
96
+ 2. Rebuilding of contexts: LLMed assumes that there is a unique chain, so it will recompile from the changed context to the end of the chain.
97
+
98
+ So, programming with LLMed means being aware of the technology (programming language, libraries, software architecture, tools, etc.). LLMed's job is to provide a free-form natural language programming compiler.
99
+
100
+
69
101
  ## Programming flow
70
102
 
71
- * Cycle
103
+ 1. Cycle
72
104
  * Edit application.
73
105
  * Once you agree with the current state of the application, increase the value of the `release` attribute
74
- * Commit the release file (.release) and the source code (.llmed).
106
+ 2. Commit the release file (.release) and the source code (.llmed) and the snapshot (.snapshot).
107
+ 3. Go to 1.
75
108
 
76
109
  # Usage
77
110
 
data/lib/#llm.rb# ADDED
@@ -0,0 +1,114 @@
1
+ require 'openai'
2
+ require 'langchain'
3
+
4
+ Langchain.logger.level = Logger::ERROR
5
+
6
+ class LLMed
7
+ module LLM
8
+ module Message
9
+ System = Struct.new(:content)
10
+ User = Struct.new(:content)
11
+ end
12
+
13
+ module Template
14
+ def self.build(template:, input_variables:)
15
+ Langchain::Prompt::PromptTemplate.new(template: template, input_variables: input_variables)
16
+ end
17
+ end
18
+
19
+ Response = Struct.new(:provider, :model, :source_code, :duration_seconds, :total_tokens, keyword_init: true)
20
+
21
+ class OpenAI
22
+
23
+ DEFAULT_URI_BASE = "https://api.openai.com/".freeze
24
+ MAX_TOKENS = 8192
25
+
26
+ def initialize(**args)
27
+ @logger = args.delete(:logger)
28
+ @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
29
+ end
30
+
31
+ def chat(messages: [])
32
+ messages = messages.map do |m|
33
+ case messages
34
+ when Message::System
35
+ { role: 'system', content: m.content }
36
+ when Message::User
37
+ { role: 'user', content: m.content }
38
+ end
39
+ end
40
+ messages.e
41
+ start = Time.now
42
+ llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
43
+ warn_token_limits(llm_response)
44
+
45
+ stop = Time.now
46
+ Response.new({ provider: provider,
47
+ model: @llm.chat_parameters[:model],
48
+ duration_seconds: stop.to_i - start.to_i,
49
+ source_code: source_code(llm_response.chat_completion),
50
+ total_tokens: llm_response.total_tokens })
51
+ end
52
+
53
+ private
54
+ def warn_token_limits(llm_response)
55
+ if llm_response.completion_tokens >= MAX_TOKENS
56
+ @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
57
+ end
58
+ end
59
+
60
+ def llm_arguments(args)
61
+ args
62
+ end
63
+
64
+ def provider
65
+ :openai
66
+ end
67
+
68
+ def source_code(content)
69
+ content.gsub('```', '').sub(/^(node(js)?|javascript|ruby|python(\d*)|elixir|bash|html|go|c(pp)?)([ \n])/, '')
70
+ end
71
+ end
72
+
73
+ class Anthropic < OpenAI
74
+ private
75
+
76
+ def llm_arguments(args)
77
+ @logger = args.delete(:logger)
78
+ args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
79
+ end
80
+
81
+ def provider
82
+ :anthropic
83
+ end
84
+ end
85
+
86
+ class LikeOpenAI < OpenAI
87
+ private
88
+
89
+ def llm_arguments(args)
90
+ args
91
+ end
92
+
93
+ def provider
94
+ :like_openai
95
+ end
96
+ end
97
+
98
+ class Test
99
+ def initialize
100
+ @output = ''
101
+ end
102
+
103
+ def chat(messages: [])
104
+ @output = messages.map { |m| m[:content] }.join("\n")
105
+
106
+ Response.new({ provider: :test,
107
+ model: 'test',
108
+ duration_seconds: 0,
109
+ source_code: @output,
110
+ total_tokens: 0 })
111
+ end
112
+ end
113
+ end
114
+ end
data/lib/llm.rb~ ADDED
@@ -0,0 +1,114 @@
1
+ require 'openai'
2
+ require 'langchain'
3
+
4
+ Langchain.logger.level = Logger::ERROR
5
+
6
+ class LLMed
7
+ module LLM
8
+ module Message
9
+ System = Struct.new(:content)
10
+ User = Struct.new(:content)
11
+ end
12
+
13
+ module Template
14
+ def self.build(template:, input_variables:)
15
+ Langchain::Prompt::PromptTemplate.new(template: template, input_variables: input_variables)
16
+ end
17
+ end
18
+
19
+ Response = Struct.new(:provider, :model, :source_code, :duration_seconds, :total_tokens, keyword_init: true)
20
+
21
+ class OpenAI
22
+
23
+ DEFAULT_URI_BASE = "https://api.openai.com/".freeze
24
+ MAX_TOKENS = 8192
25
+
26
+ def initialize(**args)
27
+ @logger = args.delete(:logger)
28
+ @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
29
+ end
30
+
31
+ def chat(messages: [])
32
+ messages = messages.map do |m|
33
+ case m
34
+ when Message::System
35
+ { role: 'system', content: m.content }
36
+ when Message::User
37
+ { role: 'user', content: m.content }
38
+ end
39
+ end
40
+
41
+ start = Time.now
42
+ llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
43
+ warn_token_limits(llm_response)
44
+
45
+ stop = Time.now
46
+ Response.new({ provider: provider,
47
+ model: @llm.chat_parameters[:model],
48
+ duration_seconds: stop.to_i - start.to_i,
49
+ source_code: source_code(llm_response.chat_completion),
50
+ total_tokens: llm_response.total_tokens })
51
+ end
52
+
53
+ private
54
+ def warn_token_limits(llm_response)
55
+ if llm_response.completion_tokens >= MAX_TOKENS
56
+ @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
57
+ end
58
+ end
59
+
60
+ def llm_arguments(args)
61
+ args
62
+ end
63
+
64
+ def provider
65
+ :openai
66
+ end
67
+
68
+ def source_code(content)
69
+ content.gsub('```', '').sub(/^(node(js)?|javascript|ruby|python(\d*)|elixir|bash|html|go|c(pp)?)([ \n])/, '')
70
+ end
71
+ end
72
+
73
+ class Anthropic < OpenAI
74
+ private
75
+
76
+ def llm_arguments(args)
77
+ @logger = args.delete(:logger)
78
+ args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
79
+ end
80
+
81
+ def provider
82
+ :anthropic
83
+ end
84
+ end
85
+
86
+ class LikeOpenAI < OpenAI
87
+ private
88
+
89
+ def llm_arguments(args)
90
+ args
91
+ end
92
+
93
+ def provider
94
+ :like_openai
95
+ end
96
+ end
97
+
98
+ class Test
99
+ def initialize
100
+ @output = ''
101
+ end
102
+
103
+ def chat(messages: [])
104
+ @output = messages.map { |m| m[:content] }.join("\n")
105
+
106
+ Response.new({ provider: :test,
107
+ model: 'test',
108
+ duration_seconds: 0,
109
+ source_code: @output,
110
+ total_tokens: 0 })
111
+ end
112
+ end
113
+ end
114
+ end
@@ -39,6 +39,7 @@ class LLMed
39
39
  @code_comment = CodeComment.new(language)
40
40
  @block = block
41
41
  @contexts = []
42
+ @goals = []
42
43
  @logger = logger
43
44
  @release = release
44
45
  @release_dir = release_dir
@@ -57,6 +58,15 @@ class LLMed
57
58
  @contexts << ctx
58
59
  end
59
60
 
61
+ def achieve(name, **opts, &block)
62
+ opts[:release_dir] = @release_dir
63
+ goal = Goal.new(name: name, options: opts)
64
+ output = goal.instance_eval(&block)
65
+ goal.llm(output) unless goal.message?
66
+
67
+ @goals << goal
68
+ end
69
+
60
70
  def evaluate
61
71
  instance_eval(&@block)
62
72
  end
@@ -83,8 +93,11 @@ class LLMed
83
93
  diffs = {}
84
94
  other_contexts.each do |other_ctx|
85
95
  current_ctx = @contexts[other_ctx.name]
86
- result = line_diff(current_ctx['message'], other_ctx.raw)
87
- # omit not changes
96
+ result = if current_ctx.nil?
97
+ other_ctx.raw.split("\n").map { |line| ["+:", line] }
98
+ else
99
+ line_diff(current_ctx['message'], other_ctx.raw)
100
+ end
88
101
  if !result.all?{|op, line| op == '=:'}
89
102
  diffs[other_ctx.name] = result
90
103
  end
@@ -160,8 +173,6 @@ class LLMed
160
173
  end
161
174
 
162
175
  def prepare_snapshot
163
- raise "snapshot preparation require contexts" if @contexts.empty?
164
-
165
176
  @logger.info("APPLICATION #{@name} PREPARING SNAPSHOT #{@snapshot.snapshot_file}")
166
177
 
167
178
  @snapshot.sync(@contexts)
@@ -205,6 +216,7 @@ class LLMed
205
216
  end
206
217
 
207
218
  def patch_or_create(output)
219
+
208
220
  output_content = output
209
221
 
210
222
  if @release && File.exist?(release_source_code) && !release_contexts.empty?
@@ -233,6 +245,7 @@ class LLMed
233
245
  @logger.info("APPLICATION #{@name} SNAPSHOT REFRESHED")
234
246
  end
235
247
 
248
+ return unless @output_file.is_a?(String)
236
249
  output_file = Pathname.new(@output_dir) + @output_file
237
250
  FileUtils.cp(output_file, release_source_code)
238
251
  FileUtils.cp(output_file, release_main_source_code)
@@ -254,7 +267,8 @@ class LLMed
254
267
  code_comment_begin: @code_comment.begin,
255
268
  code_comment_end: @code_comment.end,
256
269
  update_context_digests: digests_of_context_to_update,
257
- changes_of_contexts: changes_of_contexts)
270
+ changes_of_contexts: changes_of_contexts,
271
+ goals: goals)
258
272
  end
259
273
 
260
274
  def rebuild?
@@ -299,6 +313,10 @@ class LLMed
299
313
 
300
314
  private
301
315
 
316
+ def goals
317
+ @goals.map(&:message).join("\n")
318
+ end
319
+
302
320
  def digests_of_context_to_update
303
321
  update_context_digest = []
304
322
 
@@ -27,10 +27,17 @@ class LLMed
27
27
  Behavioral rules (must be obeyed):
28
28
  1. No comments-only outputs. If your natural answer would be comments, instead implement executable code that performs the described behavior. Do not output explanatory text outside code blocks — output only source code for the indicated contexts.
29
29
  2. All functions/methods must have bodies implementing the intended behavior. If external information is missing, implement a reasonable, deterministic default rather than leaving a stub.
30
- 3. Fail-fast fallback: if the requested context genuinely cannot be implemented, include a clear runtime failure function implementation_impossible() that raises/prints a single machine-readable error (e.g. throws an exception with message IMPLEMENTATION-IMPOSSIBLE) and still compiles.
30
+ 3. Fail-fast fallback: if the requested context genuinely cannot be implemented, include a clear runtime failure function implementation_impossible() that raises/prints a single machine-readable error (e.g. throws an exception with message IMPLEMENTATION-IMPOSSIBLE) also a technical description with the reasons and still compiles.
31
31
  4. One-to-one mapping: produce exactly one code block per digest requested. Do not add unrelated helper contexts unless they are wrapped and linked to an indicated digest; if helpers are necessary, include them inside the same context wrapper.
32
32
  5. Include the literal LLMED-COMPILED comment somewhere inside the code.
33
33
  6. Do not output any text outside the source code. The assistant response must be only source code for the requested context(s).
34
+ 7. Absolute goal precedence: Before generating code, always merge the goals with the context. If any context conflicts with a goal, the goal must override the context's intended purpose, not its implementation details.
35
+ 8. All user-facing strings, messages, and output must comply with the goals. This overrides literal instructions in context descriptions if there is a conflict.
36
+ 9. Step-by-step plan to follow:
37
+ a. Read goals and extract all global rules (e.g., language, style, behavior).
38
+ b. Read the context descriptions.
39
+ c. Merge context with the goal; if there is any conflict, modify the context output to satisfy the goal.
40
+ d. Generate code fully complying with both the merged specification and the required context wrapper format.
34
41
 
35
42
  All behavior described by contexts marked with '-:' in <changes> must be completely removed from the generated source code.
36
43
  Do not leave any code, print statements, functions, or references implementing deleted contexts.
@@ -40,17 +47,27 @@ class LLMed
40
47
  {changes_of_contexts}
41
48
  </changes>
42
49
 
50
+
51
+ <goals>
52
+ {goals}
53
+ <goals>
54
+
55
+ Goals and context are orthogonal: goals orient the context.
56
+
57
+ Plan: Before writing any code, ensure you fully integrate both the overarching specifications (goal sections) and the context-level descriptions during code generation.
58
+
43
59
  Output requirement: your response must contain only the generated source code for the indicated context(s), with the required wrapper comments and the test harness; nothing else.
44
- ", input_variables: %w[language source_code code_comment_begin code_comment_end update_context_digests changes_of_contexts])
60
+ ", input_variables: %w[language source_code code_comment_begin code_comment_end update_context_digests changes_of_contexts goals])
45
61
  end
46
62
 
47
- def prompt(language:, source_code:, code_comment_begin:, code_comment_end:, update_context_digests: [], changes_of_contexts: '')
63
+ def prompt(language:, source_code:, code_comment_begin:, code_comment_end:, update_context_digests: [], changes_of_contexts: '', goals: '')
48
64
  @prompt.format(language: language,
49
65
  source_code: source_code,
50
66
  code_comment_begin: code_comment_begin,
51
67
  code_comment_end: code_comment_end,
52
68
  update_context_digests: update_context_digests.join(','),
53
- changes_of_contexts: changes_of_contexts)
69
+ changes_of_contexts: changes_of_contexts,
70
+ goals: goals)
54
71
  end
55
72
 
56
73
  # Change the default prompt, input variables: language, source_code
data/lib/llmed/goal.rb ADDED
@@ -0,0 +1,26 @@
1
+ # Copyright 2025 Jovany Leandro G.C <bit4bit@riseup.net>
2
+ # frozen_string_literal: true
3
+
4
+ class LLMed
5
+ class Goal
6
+ attr_reader :name, :options
7
+
8
+ def initialize(name:, options: {})
9
+ @name = name
10
+ @options = options
11
+
12
+ end
13
+
14
+ def llm(message)
15
+ @message = message
16
+ end
17
+
18
+ def message
19
+ "<goal name=\"#{@name}\">#{@message}</goal>"
20
+ end
21
+
22
+ def message?
23
+ !(@message.nil? || @message.empty?)
24
+ end
25
+ end
26
+ end
data/lib/llmed.rb CHANGED
@@ -93,6 +93,7 @@ end
93
93
  require_relative 'llm'
94
94
  require_relative 'llmed/configuration'
95
95
  require_relative 'llmed/context'
96
+ require_relative 'llmed/goal'
96
97
  require_relative 'llmed/release'
97
98
  require_relative 'llmed/application'
98
99
  require_relative 'llmed/deployment'
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llmed
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.0
4
+ version: 0.6.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jovany Leandro G.C
@@ -93,12 +93,15 @@ files:
93
93
  - README.md
94
94
  - exe/llmed
95
95
  - exe/llmed.literate
96
+ - lib/#llm.rb#
96
97
  - lib/llm.rb
98
+ - lib/llm.rb~
97
99
  - lib/llmed.rb
98
100
  - lib/llmed/application.rb
99
101
  - lib/llmed/configuration.rb
100
102
  - lib/llmed/context.rb
101
103
  - lib/llmed/deployment.rb
104
+ - lib/llmed/goal.rb
102
105
  - lib/llmed/literate_programming.rb
103
106
  - lib/llmed/literate_programming/markdown.rb
104
107
  - lib/llmed/literate_programming/markdown.rb.release