llmed 0.6.0 → 0.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c47ebb44059bae3a1ffc2d27dd064098036ebd390cb677a488761845e13d4b6b
4
- data.tar.gz: 1442dec8d3c594b8610af95d7edfbdbdee8881ac00a046001cc6fd4eb9c2e68c
3
+ metadata.gz: d9bc1517e8b12805cae255599e37f2f6cb17ee1aae43043c2d36e232291dc63f
4
+ data.tar.gz: 2d82a1d9ff3fc1585b8fe952800786cc25ee107f9a1cf679e1ada541c4b105df
5
5
  SHA512:
6
- metadata.gz: 80a7c93eed1106305b848bae99b0fcce744c55bc933538465041e19329ed1e6fe01f398c38ec849f2ae4981ec260024d3a9ded96f1465224a0b9f60cc8924b53
7
- data.tar.gz: 7c3df54bec3ee2e2f02bd7108c92004e6969bb8019ab5209bda32327cc916492ac2946948b2ce835325b93d33238292187870e68bc35bfa50feefb0b38164263
6
+ metadata.gz: 347c5b9e439ceedbb8de5a850185d1a4e3a8aec23564d9f379747abda2d5a5921047d2891563d9205077476f29910ffe511819d729b24ef1ee3a94002ad1fbe5
7
+ data.tar.gz: 2b5053b7389eee52215e44799737f74fdb9fc43acf02c2a1036b2f052a1bef339f13427c6bc92606a1b7fb7d31b80b44fa6e6b4c94f3284d16d5550dfc1e321a
data/README.md CHANGED
@@ -17,6 +17,8 @@ In classic terms the LLM is the Compiler, Source Code is the Binary, the Program
17
17
  set_llm provider: :like_openai, api_key: ENV['TOGETHERAI_API_KEY'], model: 'Qwen/Qwen2.5-Coder-32B-Instruct', options: {uri_base: 'https://api.together.xyz/v1'}
18
18
 
19
19
  application "MINI COUNTER", release: nil, language: :node, output_file: "minicounter.ollmed" do
20
+ achieve('coding guidelines') { 'Add top comment with a technical summary of the implementation' }
21
+
20
22
  # Most stable context: if this changes, all subsequent context will be recompiled.
21
23
  context "dependencies" do
22
24
  <<-LLM
@@ -66,12 +68,43 @@ add CORS endpoints.
66
68
  ```
67
69
  then compile using command `llmed.literate`.
68
70
 
71
+ ## HOWTO Programing using LLMED
72
+
73
+ Programming with LLMED involves breaking down the problem into smaller contexts, where each context must be connected to the next, creating a chain of contexts that expresses the final solution (program/application/software). The final code will map each context to a block of code (module, function, or statements—this is determined by the LLM), so any changes to a context will be reflected in the source code. This is important to keep in mind. For example, it is not the same to write:
74
+
75
+ ```
76
+ # Dependencies
77
+ ...
78
+ # Application
79
+ ...
80
+ ```
81
+
82
+ as
83
+
84
+ ```
85
+ # Application
86
+ ...
87
+ # Dependencies
88
+ ...
89
+ ```
90
+
91
+ !!The LLM can do crazy things when trying to create working source code for that.
92
+
93
+ At the top of the document, write the most stable concepts (the contexts that don't change frequently), going down to the most unstable (the contexts that are expected to change more frequently) concepts. The purpose of these two things:
94
+
95
+ 1. The map between context and code block.
96
+ 2. Rebuilding of contexts: LLMed assumes that there is a unique chain, so it will recompile from the changed context to the end of the chain.
97
+
98
+ So, programming with LLMed means being aware of the technology (programming language, libraries, software architecture, tools, etc.). LLMed's job is to provide a free-form natural language programming compiler.
99
+
100
+
69
101
  ## Programming flow
70
102
 
71
- * Cycle
103
+ 1. Cycle
72
104
  * Edit application.
73
105
  * Once you agree with the current state of the application, increase the value of the `release` attribute
74
- * Commit the release file (.release) and the source code (.llmed).
106
+ 2. Commit the release file (.release) and the source code (.llmed) and the snapshot (.snapshot).
107
+ 3. Go to 1.
75
108
 
76
109
  # Usage
77
110
 
data/exe/llmed CHANGED
@@ -1,23 +1,24 @@
1
1
  #!/bin/env ruby
2
2
  # Copyright 2025 Jovany Leandro G.C <bit4bit@riseup.net>
3
3
  # frozen_string_literal: true
4
+
4
5
  require 'optparse'
5
6
  require 'llmed'
6
7
 
7
8
  logger = Logger.new(STDERR)
8
9
  output_dir = './llmed-out'
9
10
  release_dir = output_dir
10
- template = <<-TMP
11
- set_llm provider: :openai, api_key: ENV['OPENAI_API_KEY'], model: 'gpt-4o'
12
-
13
- # Increment the RELEASE number once you approve the output.
14
- application "hi world", release: nil, language: '<HERE LANGUAGE>', output_file: "<HERE NAME>.ollmed" do
15
- context "main" do
16
- <<-LLM
17
- Show to user 'hi world!'.
18
- LLM
11
+ template = <<~TMP
12
+ set_llm provider: :openai, api_key: ENV['OPENAI_API_KEY'], model: 'gpt-4o'
13
+
14
+ # Increment the RELEASE number once you approve the output.
15
+ application "hi world", release: nil, language: '<HERE LANGUAGE>', output_file: "<HERE NAME>.ollmed" do
16
+ context "main" do
17
+ <<-LLM
18
+ Show to user 'hi world!'.
19
+ LLM
20
+ end
19
21
  end
20
- end
21
22
  TMP
22
23
 
23
24
  OptionParser.new do |parser|
data/lib/llm.rb CHANGED
@@ -18,18 +18,16 @@ class LLMed
18
18
 
19
19
  Response = Struct.new(:provider, :model, :source_code, :duration_seconds, :total_tokens, keyword_init: true)
20
20
 
21
- class OpenAI
22
-
23
- DEFAULT_URI_BASE = "https://api.openai.com/".freeze
24
- MAX_TOKENS = 8192
25
-
26
- def initialize(**args)
27
- @logger = args.delete(:logger)
28
- @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
21
+ class Agent
22
+ def initialize(llm_instance)
23
+ @llm_instance = llm_instance
24
+ @assistant = Langchain::Assistant.new(
25
+ llm: @llm_instance.llm
26
+ )
29
27
  end
30
28
 
31
29
  def chat(messages: [])
32
- messages = messages.map do |m|
30
+ llm_messages = messages.map do |m|
33
31
  case m
34
32
  when Message::System
35
33
  { role: 'system', content: m.content }
@@ -39,26 +37,40 @@ class LLMed
39
37
  end
40
38
 
41
39
  start = Time.now
42
- llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
43
- warn_token_limits(llm_response)
40
+
41
+ @assistant.add_messages(messages: llm_messages)
42
+ assistant_messages = @assistant.run!
43
+
44
+ assistant_response = assistant_messages.select { |m| m.role == 'assistant' }.last
44
45
 
45
46
  stop = Time.now
46
- Response.new({ provider: provider,
47
- model: @llm.chat_parameters[:model],
47
+ Response.new({ provider: @llm_instance.provider,
48
+ model: @llm_instance.model,
48
49
  duration_seconds: stop.to_i - start.to_i,
49
- source_code: source_code(llm_response.chat_completion),
50
- total_tokens: llm_response.total_tokens })
50
+ source_code: @llm_instance.source_code(assistant_response.content),
51
+ total_tokens: @assistant.total_tokens })
51
52
  end
53
+ end
52
54
 
53
- private
54
- def warn_token_limits(llm_response)
55
- if llm_response.completion_tokens >= MAX_TOKENS
56
- @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
57
- end
55
+ class OpenAI
56
+ DEFAULT_URI_BASE = "https://api.openai.com/".freeze
57
+ MAX_TOKENS = 8192
58
+
59
+ attr_reader :llm
60
+
61
+ def initialize(**args)
62
+ @logger = args.delete(:logger)
63
+ @llm = Langchain::LLM::OpenAI.new(**llm_arguments(args))
58
64
  end
59
65
 
60
- def llm_arguments(args)
61
- args
66
+ def chat(messages: [])
67
+ llm_response = @llm.chat(messages: messages, max_tokens: MAX_TOKENS)
68
+ warn_token_limits(llm_response)
69
+ llm_response
70
+ end
71
+
72
+ def model
73
+ @llm.chat_parameters[:model]
62
74
  end
63
75
 
64
76
  def provider
@@ -68,31 +80,43 @@ class LLMed
68
80
  def source_code(content)
69
81
  content.gsub('```', '').sub(/^(node(js)?|javascript|ruby|python(\d*)|elixir|bash|html|go|c(pp)?)([ \n])/, '')
70
82
  end
71
- end
72
83
 
73
- class Anthropic < OpenAI
74
84
  private
75
85
 
86
+ def warn_token_limits(llm_response)
87
+ if llm_response.completion_tokens >= MAX_TOKENS
88
+ @logger.warn("POSSIBLE INCONSISTENCY COMPLETED TOKENS REACHED MAX TOKENS #{MAX_TOKENS}")
89
+ end
90
+ end
91
+
76
92
  def llm_arguments(args)
77
- @logger = args.delete(:logger)
78
- args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
93
+ args
79
94
  end
95
+ end
80
96
 
97
+ class Anthropic < OpenAI
81
98
  def provider
82
99
  :anthropic
83
100
  end
84
- end
85
101
 
86
- class LikeOpenAI < OpenAI
87
102
  private
88
103
 
89
104
  def llm_arguments(args)
90
- args
105
+ @logger = args.delete(:logger)
106
+ args.merge({ llm_options: { uri_base: 'https://api.anthropic.com/v1/' } })
91
107
  end
108
+ end
92
109
 
110
+ class LikeOpenAI < OpenAI
93
111
  def provider
94
112
  :like_openai
95
113
  end
114
+
115
+ private
116
+
117
+ def llm_arguments(args)
118
+ args
119
+ end
96
120
  end
97
121
 
98
122
  class Test
@@ -100,14 +124,25 @@ class LLMed
100
124
  @output = ''
101
125
  end
102
126
 
127
+ def chat_parameters
128
+ { model: 'test' }
129
+ end
130
+
131
+ def model
132
+ 'test'
133
+ end
134
+
103
135
  def chat(messages: [])
104
- @output = messages.map { |m| m[:content] }.join("\n")
136
+ output = messages.map { |m| m[:content] }.join("\n")
137
+ Struct.new(:chat_completion, :total_tokens).new(output, 0)
138
+ end
139
+
140
+ def source_code(code)
141
+ code
142
+ end
105
143
 
106
- Response.new({ provider: :test,
107
- model: 'test',
108
- duration_seconds: 0,
109
- source_code: @output,
110
- total_tokens: 0 })
144
+ def provider
145
+ :test
111
146
  end
112
147
  end
113
148
  end
@@ -39,6 +39,7 @@ class LLMed
39
39
  @code_comment = CodeComment.new(language)
40
40
  @block = block
41
41
  @contexts = []
42
+ @goals = []
42
43
  @logger = logger
43
44
  @release = release
44
45
  @release_dir = release_dir
@@ -48,6 +49,7 @@ class LLMed
48
49
 
49
50
  # Example:
50
51
  # application { context "demo" { "content" } }
52
+ # DEPRECATED: Use spec instead of context
51
53
  def context(name, **opts, &block)
52
54
  opts[:release_dir] = @release_dir
53
55
  ctx = Context.new(name: name, options: opts)
@@ -57,6 +59,21 @@ class LLMed
57
59
  @contexts << ctx
58
60
  end
59
61
 
62
+ # Example:
63
+ # application { spec "demo" { "content" } }
64
+ def spec(name, **opts, &block)
65
+ context(name, **opts, &block)
66
+ end
67
+
68
+ def achieve(name, **opts, &block)
69
+ opts[:release_dir] = @release_dir
70
+ goal = Goal.new(name: name, options: opts)
71
+ output = goal.instance_eval(&block)
72
+ goal.llm(output) unless goal.message?
73
+
74
+ @goals << goal
75
+ end
76
+
60
77
  def evaluate
61
78
  instance_eval(&@block)
62
79
  end
@@ -75,7 +92,7 @@ class LLMed
75
92
  end
76
93
 
77
94
  def refresh(contexts)
78
- @contexts = contexts.map{ |ctx| [ctx.name, {'name' => ctx.name, 'message' => ctx.raw}]}.to_h
95
+ @contexts = contexts.map { |ctx| [ctx.name, { 'name' => ctx.name, 'message' => ctx.raw }] }.to_h
79
96
  dump
80
97
  end
81
98
 
@@ -83,9 +100,12 @@ class LLMed
83
100
  diffs = {}
84
101
  other_contexts.each do |other_ctx|
85
102
  current_ctx = @contexts[other_ctx.name]
86
- result = line_diff(current_ctx['message'], other_ctx.raw)
87
- # omit not changes
88
- if !result.all?{|op, line| op == '=:'}
103
+ result = if current_ctx.nil?
104
+ other_ctx.raw.split("\n").map { |line| ["+:", line] }
105
+ else
106
+ line_diff(current_ctx['message'], other_ctx.raw)
107
+ end
108
+ if !result.all? { |op, line| op == '=:' }
89
109
  diffs[other_ctx.name] = result
90
110
  end
91
111
  end
@@ -120,8 +140,8 @@ class LLMed
120
140
  i2 += 1
121
141
  else
122
142
  # Try to find if one of the lines matches later
123
- idx1 = lines1[i1+1..-1]&.index(line2)
124
- idx2 = lines2[i2+1..-1]&.index(line1)
143
+ idx1 = lines1[i1 + 1..-1]&.index(line2)
144
+ idx2 = lines2[i2 + 1..-1]&.index(line1)
125
145
 
126
146
  if !idx1.nil? && (idx2.nil? || idx1 <= idx2)
127
147
  result << ["-:", line1]
@@ -148,20 +168,18 @@ class LLMed
148
168
  @contexts = JSON.load(f.read)['contexts']
149
169
  end
150
170
  else
151
- @contexts = default.map{ |ctx| [ctx.name, {'name' => ctx.name, 'message' => ctx.raw}]}.to_h
171
+ @contexts = default.map { |ctx| [ctx.name, { 'name' => ctx.name, 'message' => ctx.raw }] }.to_h
152
172
  end
153
173
  end
154
174
 
155
175
  def dump
156
176
  File.open(@snapshot_file, 'w') do |file|
157
- file.write(JSON.dump({'contexts' => @contexts}))
177
+ file.write(JSON.dump({ 'contexts' => @contexts }))
158
178
  end
159
179
  end
160
180
  end
161
181
 
162
182
  def prepare_snapshot
163
- raise "snapshot preparation require contexts" if @contexts.empty?
164
-
165
183
  @logger.info("APPLICATION #{@name} PREPARING SNAPSHOT #{@snapshot.snapshot_file}")
166
184
 
167
185
  @snapshot.sync(@contexts)
@@ -233,6 +251,8 @@ class LLMed
233
251
  @logger.info("APPLICATION #{@name} SNAPSHOT REFRESHED")
234
252
  end
235
253
 
254
+ return unless @output_file.is_a?(String)
255
+
236
256
  output_file = Pathname.new(@output_dir) + @output_file
237
257
  FileUtils.cp(output_file, release_source_code)
238
258
  FileUtils.cp(output_file, release_main_source_code)
@@ -254,7 +274,8 @@ class LLMed
254
274
  code_comment_begin: @code_comment.begin,
255
275
  code_comment_end: @code_comment.end,
256
276
  update_context_digests: digests_of_context_to_update,
257
- changes_of_contexts: changes_of_contexts)
277
+ changes_of_contexts: changes_of_contexts,
278
+ goals: goals)
258
279
  end
259
280
 
260
281
  def rebuild?
@@ -299,6 +320,10 @@ class LLMed
299
320
 
300
321
  private
301
322
 
323
+ def goals
324
+ @goals.map(&:message).join("\n")
325
+ end
326
+
302
327
  def digests_of_context_to_update
303
328
  update_context_digest = []
304
329
 
@@ -27,10 +27,17 @@ class LLMed
27
27
  Behavioral rules (must be obeyed):
28
28
  1. No comments-only outputs. If your natural answer would be comments, instead implement executable code that performs the described behavior. Do not output explanatory text outside code blocks — output only source code for the indicated contexts.
29
29
  2. All functions/methods must have bodies implementing the intended behavior. If external information is missing, implement a reasonable, deterministic default rather than leaving a stub.
30
- 3. Fail-fast fallback: if the requested context genuinely cannot be implemented, include a clear runtime failure function implementation_impossible() that raises/prints a single machine-readable error (e.g. throws an exception with message IMPLEMENTATION-IMPOSSIBLE) and still compiles.
30
+ 3. Fail-fast fallback: if the requested context genuinely cannot be implemented, include a clear runtime failure function implementation_impossible() that raises/prints a single machine-readable error (e.g. throws an exception with message IMPLEMENTATION-IMPOSSIBLE) also a technical description with the reasons and still compiles.
31
31
  4. One-to-one mapping: produce exactly one code block per digest requested. Do not add unrelated helper contexts unless they are wrapped and linked to an indicated digest; if helpers are necessary, include them inside the same context wrapper.
32
32
  5. Include the literal LLMED-COMPILED comment somewhere inside the code.
33
33
  6. Do not output any text outside the source code. The assistant response must be only source code for the requested context(s).
34
+ 7. Absolute goal precedence: Before generating code, always merge the goals with the context. If any context conflicts with a goal, the goal must override the context's intended purpose, not its implementation details.
35
+ 8. All user-facing strings, messages, and output must comply with the goals. This overrides literal instructions in context descriptions if there is a conflict.
36
+ 9. Step-by-step plan to follow:
37
+ a. Read goals and extract all global rules (e.g., language, style, behavior).
38
+ b. Read the context descriptions.
39
+ c. Merge context with the goal; if there is any conflict, modify the context output to satisfy the goal.
40
+ d. Generate code fully complying with both the merged specification and the required context wrapper format.
34
41
 
35
42
  All behavior described by contexts marked with '-:' in <changes> must be completely removed from the generated source code.
36
43
  Do not leave any code, print statements, functions, or references implementing deleted contexts.
@@ -40,17 +47,28 @@ class LLMed
40
47
  {changes_of_contexts}
41
48
  </changes>
42
49
 
50
+
51
+ <goals>
52
+ {goals}
53
+ <goals>
54
+
55
+ Goals and context are orthogonal: goals orient the context.
56
+
57
+ Plan: Before writing any code, ensure you fully integrate both the overarching specifications (goal sections) and the context-level descriptions during code generation.
58
+
43
59
  Output requirement: your response must contain only the generated source code for the indicated context(s), with the required wrapper comments and the test harness; nothing else.
44
- ", input_variables: %w[language source_code code_comment_begin code_comment_end update_context_digests changes_of_contexts])
60
+ ", input_variables: %w[language source_code code_comment_begin code_comment_end update_context_digests changes_of_contexts goals])
45
61
  end
46
62
 
47
- def prompt(language:, source_code:, code_comment_begin:, code_comment_end:, update_context_digests: [], changes_of_contexts: '')
63
+ def prompt(language:, source_code:, code_comment_begin:, code_comment_end:, update_context_digests: [],
64
+ changes_of_contexts: '', goals: '')
48
65
  @prompt.format(language: language,
49
66
  source_code: source_code,
50
67
  code_comment_begin: code_comment_begin,
51
68
  code_comment_end: code_comment_end,
52
69
  update_context_digests: update_context_digests.join(','),
53
- changes_of_contexts: changes_of_contexts)
70
+ changes_of_contexts: changes_of_contexts,
71
+ goals: goals)
54
72
  end
55
73
 
56
74
  # Change the default prompt, input variables: language, source_code
@@ -85,33 +103,35 @@ class LLMed
85
103
  end
86
104
 
87
105
  def llm
88
- case @provider
89
- when :openai
90
- LLMed::LLM::OpenAI.new(
91
- logger: @logger,
92
- api_key: @provider_api_key,
93
- default_options: { max_tokens: nil, temperature: 0.7, chat_model: @provider_model }
94
- )
95
- when :anthropic
96
- LLMed::LLM::Anthropic.new(
97
- logger: @logger,
98
- api_key: @provider_api_key,
99
- default_options: { max_tokens: nil, temperature: 0.7, chat_model: @provider_model }
100
- )
101
- when :like_openai
102
- LLMed::LLM::LikeOpenAI.new(
103
- logger: @logger,
104
- api_key: @provider_api_key,
105
- default_options: { temperature: 0.7, chat_model: @provider_model },
106
- llm_options: @provider_options
107
- )
108
- when :test
109
- LLMed::LLM::Test.new
110
- when nil
111
- raise 'Please set the provider with `set_llm(provider, api_key, model)`'
112
- else
113
- raise "not implemented provider #{@provider}"
114
- end
106
+ llm_instance = case @provider
107
+ when :openai
108
+ LLMed::LLM::OpenAI.new(
109
+ logger: @logger,
110
+ api_key: @provider_api_key,
111
+ default_options: { max_tokens: nil, temperature: 0.7, chat_model: @provider_model }
112
+ )
113
+ when :anthropic
114
+ LLMed::LLM::Anthropic.new(
115
+ logger: @logger,
116
+ api_key: @provider_api_key,
117
+ default_options: { max_tokens: nil, temperature: 0.7, chat_model: @provider_model }
118
+ )
119
+ when :like_openai
120
+ LLMed::LLM::LikeOpenAI.new(
121
+ logger: @logger,
122
+ api_key: @provider_api_key,
123
+ default_options: { temperature: 0.7, chat_model: @provider_model },
124
+ llm_options: @provider_options
125
+ )
126
+ when :test
127
+ LLMed::LLM::Test.new
128
+ when nil
129
+ raise 'Please set the provider with `set_llm(provider, api_key, model)`'
130
+ else
131
+ raise "not implemented provider #{@provider}"
132
+ end
133
+
134
+ LLMed::LLM::Agent.new(llm_instance)
115
135
  end
116
136
  end
117
137
  end
data/lib/llmed/goal.rb ADDED
@@ -0,0 +1,25 @@
1
+ # Copyright 2025 Jovany Leandro G.C <bit4bit@riseup.net>
2
+ # frozen_string_literal: true
3
+
4
+ class LLMed
5
+ class Goal
6
+ attr_reader :name, :options
7
+
8
+ def initialize(name:, options: {})
9
+ @name = name
10
+ @options = options
11
+ end
12
+
13
+ def llm(message)
14
+ @message = message
15
+ end
16
+
17
+ def message
18
+ "<goal name=\"#{@name}\">#{@message}</goal>"
19
+ end
20
+
21
+ def message?
22
+ !(@message.nil? || @message.empty?)
23
+ end
24
+ end
25
+ end
@@ -1,9 +1,9 @@
1
- #<llmed-code context='Library LLMed::LiterateProgramming::Markdown' digest='9c0e3f61ab4cdc3c56c29230a800487dd1a7ef0d929c843fd2461907d0831ab2' after=''>
1
+ # <llmed-code context='Library LLMed::LiterateProgramming::Markdown' digest='9c0e3f61ab4cdc3c56c29230a800487dd1a7ef0d929c843fd2461907d0831ab2' after=''>
2
2
  class LLMed::LiterateProgramming::Markdown
3
3
  def parse(input)
4
4
  contexts = []
5
5
  current_context = { type: :context, title: "_default", content: [] }
6
-
6
+
7
7
  input.each_line do |line|
8
8
  if line.strip =~ /^# (.+)$/
9
9
  contexts << current_context unless current_context[:content].empty?
@@ -23,4 +23,4 @@ class LLMed::LiterateProgramming::Markdown
23
23
  contexts
24
24
  end
25
25
  end
26
- #</llmed-code>
26
+ # </llmed-code>
@@ -41,7 +41,7 @@ class LLMed
41
41
  context[:content] += "#{URI.open(item_content[:reference]).read}\n"
42
42
  end
43
43
  end
44
- contexts << context
44
+ contexts << context
45
45
  end
46
46
  end
47
47
 
data/lib/llmed.rb CHANGED
@@ -93,6 +93,7 @@ end
93
93
  require_relative 'llm'
94
94
  require_relative 'llmed/configuration'
95
95
  require_relative 'llmed/context'
96
+ require_relative 'llmed/goal'
96
97
  require_relative 'llmed/release'
97
98
  require_relative 'llmed/application'
98
99
  require_relative 'llmed/deployment'
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llmed
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.0
4
+ version: 0.6.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jovany Leandro G.C
@@ -99,6 +99,7 @@ files:
99
99
  - lib/llmed/configuration.rb
100
100
  - lib/llmed/context.rb
101
101
  - lib/llmed/deployment.rb
102
+ - lib/llmed/goal.rb
102
103
  - lib/llmed/literate_programming.rb
103
104
  - lib/llmed/literate_programming/markdown.rb
104
105
  - lib/llmed/literate_programming/markdown.rb.release