releasehx 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/README.adoc +2915 -0
- data/bin/releasehx +7 -0
- data/bin/rhx +7 -0
- data/bin/rhx-mcp +7 -0
- data/bin/sourcerer +32 -0
- data/build/docs/CNAME +1 -0
- data/build/docs/Gemfile.lock +95 -0
- data/build/docs/_config.yml +36 -0
- data/build/docs/config-reference.adoc +4104 -0
- data/build/docs/config-reference.json +1546 -0
- data/build/docs/index.adoc +2915 -0
- data/build/docs/landing.adoc +21 -0
- data/build/docs/manpage.adoc +68 -0
- data/build/docs/releasehx.1 +281 -0
- data/build/docs/releasehx_readme.html +367 -0
- data/build/docs/sample-config.adoc +9 -0
- data/build/docs/sample-config.yml +251 -0
- data/build/docs/schemagraphy_readme.html +0 -0
- data/build/docs/sourcerer_readme.html +46 -0
- data/build/snippets/helpscreen.txt +29 -0
- data/lib/docopslab/mcp/asset_packager.rb +30 -0
- data/lib/docopslab/mcp/manifest.rb +67 -0
- data/lib/docopslab/mcp/resource_pack.rb +46 -0
- data/lib/docopslab/mcp/server.rb +92 -0
- data/lib/docopslab/mcp.rb +6 -0
- data/lib/releasehx/cli.rb +937 -0
- data/lib/releasehx/configuration.rb +215 -0
- data/lib/releasehx/generated.rb +17 -0
- data/lib/releasehx/helpers.rb +58 -0
- data/lib/releasehx/mcp/asset_packager.rb +21 -0
- data/lib/releasehx/mcp/assets/agent-config-guide.md +178 -0
- data/lib/releasehx/mcp/assets/config-def.yml +1426 -0
- data/lib/releasehx/mcp/assets/config-reference.adoc +4104 -0
- data/lib/releasehx/mcp/assets/config-reference.json +1546 -0
- data/lib/releasehx/mcp/assets/sample-config.yml +251 -0
- data/lib/releasehx/mcp/manifest.rb +18 -0
- data/lib/releasehx/mcp/resource_pack.rb +26 -0
- data/lib/releasehx/mcp/server.rb +57 -0
- data/lib/releasehx/mcp.rb +7 -0
- data/lib/releasehx/ops/check_ops.rb +136 -0
- data/lib/releasehx/ops/draft_ops.rb +173 -0
- data/lib/releasehx/ops/enrich_ops.rb +221 -0
- data/lib/releasehx/ops/template_ops.rb +61 -0
- data/lib/releasehx/ops/write_ops.rb +124 -0
- data/lib/releasehx/rest/clients/github.yml +46 -0
- data/lib/releasehx/rest/clients/gitlab.yml +31 -0
- data/lib/releasehx/rest/clients/jira.yml +31 -0
- data/lib/releasehx/rest/yaml_client.rb +418 -0
- data/lib/releasehx/rhyml/adapter.rb +740 -0
- data/lib/releasehx/rhyml/change.rb +167 -0
- data/lib/releasehx/rhyml/liquid.rb +13 -0
- data/lib/releasehx/rhyml/loaders.rb +37 -0
- data/lib/releasehx/rhyml/mappings/github.yaml +60 -0
- data/lib/releasehx/rhyml/mappings/gitlab.yaml +73 -0
- data/lib/releasehx/rhyml/mappings/jira.yaml +29 -0
- data/lib/releasehx/rhyml/mappings/verb_past_tenses.yml +98 -0
- data/lib/releasehx/rhyml/release.rb +144 -0
- data/lib/releasehx/rhyml.rb +15 -0
- data/lib/releasehx/sgyml/helpers.rb +45 -0
- data/lib/releasehx/transforms/adf_to_markdown.rb +307 -0
- data/lib/releasehx/version.rb +7 -0
- data/lib/releasehx.rb +69 -0
- data/lib/schemagraphy/attribute_resolver.rb +48 -0
- data/lib/schemagraphy/cfgyml/definition.rb +90 -0
- data/lib/schemagraphy/cfgyml/doc_builder.rb +52 -0
- data/lib/schemagraphy/cfgyml/path_reference.rb +24 -0
- data/lib/schemagraphy/data_query/json_pointer.rb +42 -0
- data/lib/schemagraphy/loader.rb +59 -0
- data/lib/schemagraphy/regexp_utils.rb +215 -0
- data/lib/schemagraphy/safe_expression.rb +189 -0
- data/lib/schemagraphy/schema_utils.rb +124 -0
- data/lib/schemagraphy/tag_utils.rb +32 -0
- data/lib/schemagraphy/templating.rb +104 -0
- data/lib/schemagraphy.rb +17 -0
- data/lib/sourcerer/builder.rb +120 -0
- data/lib/sourcerer/jekyll/bootstrapper.rb +78 -0
- data/lib/sourcerer/jekyll/liquid/file_system.rb +74 -0
- data/lib/sourcerer/jekyll/liquid/filters.rb +215 -0
- data/lib/sourcerer/jekyll/liquid/tags.rb +44 -0
- data/lib/sourcerer/jekyll/monkeypatches.rb +73 -0
- data/lib/sourcerer/jekyll.rb +26 -0
- data/lib/sourcerer/plaintext_converter.rb +75 -0
- data/lib/sourcerer/templating.rb +190 -0
- data/lib/sourcerer.rb +322 -0
- data/specs/data/api-client-schema.yaml +160 -0
- data/specs/data/config-def.yml +1426 -0
- data/specs/data/mcp-manifest.yml +50 -0
- data/specs/data/rhyml-mapping-schema.yaml +410 -0
- data/specs/data/rhyml-schema.yaml +152 -0
- metadata +376 -0
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ReleaseHx
|
|
4
|
+
module Transforms
|
|
5
|
+
# Converts Atlassian Document Format (ADF) to Markdown.
|
|
6
|
+
# Focused on extracting "Release Note" sections from Jira issue descriptions
|
|
7
|
+
# and converting them to clean Markdown for use in release documentation.
|
|
8
|
+
module AdfToMarkdown
|
|
9
|
+
# Checks if an object is an ADF document
|
|
10
|
+
#
|
|
11
|
+
# @param obj [Object] The object to check
|
|
12
|
+
# @return [Boolean] true if obj is an ADF document
|
|
13
|
+
def self.adf? obj
|
|
14
|
+
return false unless obj.is_a?(Hash)
|
|
15
|
+
return false unless obj['type'] == 'doc'
|
|
16
|
+
return false unless obj['version'] == 1
|
|
17
|
+
|
|
18
|
+
obj.key?('content') && obj['content'].is_a?(Array)
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
# Extracts a specific section from an ADF document by heading text
|
|
22
|
+
#
|
|
23
|
+
# @param adf_doc [Hash] The ADF document
|
|
24
|
+
# @param heading [String] The heading text to search for (case-insensitive)
|
|
25
|
+
# @return [Hash] A new ADF document containing only the extracted section
|
|
26
|
+
def self.extract_section adf_doc, heading: 'Release Note'
|
|
27
|
+
return adf_doc unless adf?(adf_doc)
|
|
28
|
+
|
|
29
|
+
content = adf_doc['content'] || []
|
|
30
|
+
heading_normalized = heading.strip.downcase
|
|
31
|
+
|
|
32
|
+
# Find the heading index
|
|
33
|
+
heading_idx = content.find_index do |node|
|
|
34
|
+
node['type'] == 'heading' &&
|
|
35
|
+
extract_text_from_node(node).strip.downcase == heading_normalized
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
return { 'type' => 'doc', 'version' => 1, 'content' => [] } unless heading_idx
|
|
39
|
+
|
|
40
|
+
# Extract nodes after the heading until next same-level or higher heading
|
|
41
|
+
heading_level = content[heading_idx].dig('attrs', 'level') || 1
|
|
42
|
+
section_content = []
|
|
43
|
+
|
|
44
|
+
((heading_idx + 1)...content.length).each do |i|
|
|
45
|
+
node = content[i]
|
|
46
|
+
|
|
47
|
+
# Stop if we hit another heading at same or higher level
|
|
48
|
+
if node['type'] == 'heading'
|
|
49
|
+
node_level = node.dig('attrs', 'level') || 1
|
|
50
|
+
break if node_level <= heading_level
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
section_content << node
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
{ 'type' => 'doc', 'version' => 1, 'content' => section_content }
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Converts an ADF document (or fragment) to Markdown
|
|
60
|
+
#
|
|
61
|
+
# @param adf_doc [Hash] The ADF document to convert
|
|
62
|
+
# @param options [Hash] Conversion options
|
|
63
|
+
# @option options [Array<String>] :exclude_nodes Node types to exclude
|
|
64
|
+
# @return [String] The Markdown representation
|
|
65
|
+
def self.convert adf_doc, options = {}
|
|
66
|
+
return '' unless adf?(adf_doc)
|
|
67
|
+
|
|
68
|
+
excluded = options[:exclude_nodes] || default_excluded_nodes
|
|
69
|
+
content = adf_doc['content'] || []
|
|
70
|
+
|
|
71
|
+
converted = content.map { |node| convert_node(node, excluded) }
|
|
72
|
+
converted.join.strip
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# Default nodes to exclude (headings, media, mentions, etc.)
|
|
76
|
+
def self.default_excluded_nodes
|
|
77
|
+
%w[heading media mediaGroup mediaSingle mediaInline mention emoji status inlineCard blockCard date]
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
# Converts a single ADF node to Markdown
|
|
81
|
+
#
|
|
82
|
+
# @param node [Hash] The ADF node
|
|
83
|
+
# @param excluded [Array<String>] Node types to exclude
|
|
84
|
+
# @param depth [Integer] Current nesting depth for lists
|
|
85
|
+
# @return [String] The Markdown representation
|
|
86
|
+
def self.convert_node node, excluded = [], depth = 0
|
|
87
|
+
return '' unless node.is_a?(Hash)
|
|
88
|
+
return '' if excluded.include?(node['type'])
|
|
89
|
+
|
|
90
|
+
case node['type']
|
|
91
|
+
when 'doc'
|
|
92
|
+
content = node['content'] || []
|
|
93
|
+
content.map { |n| convert_node(n, excluded, depth) }.join
|
|
94
|
+
when 'paragraph'
|
|
95
|
+
"#{convert_paragraph(node, excluded)}\n\n"
|
|
96
|
+
when 'bulletList'
|
|
97
|
+
convert_list(node, excluded, depth, unordered: true)
|
|
98
|
+
when 'orderedList'
|
|
99
|
+
convert_list(node, excluded, depth, unordered: false)
|
|
100
|
+
when 'listItem'
|
|
101
|
+
convert_list_item(node, excluded, depth)
|
|
102
|
+
when 'codeBlock'
|
|
103
|
+
convert_code_block(node)
|
|
104
|
+
when 'blockquote'
|
|
105
|
+
convert_blockquote(node, excluded)
|
|
106
|
+
when 'panel'
|
|
107
|
+
convert_panel(node, excluded)
|
|
108
|
+
when 'rule'
|
|
109
|
+
"\n---\n\n"
|
|
110
|
+
when 'table'
|
|
111
|
+
convert_table(node, excluded)
|
|
112
|
+
when 'tableRow'
|
|
113
|
+
convert_table_row(node, excluded)
|
|
114
|
+
when 'tableHeader', 'tableCell'
|
|
115
|
+
convert_table_cell(node, excluded)
|
|
116
|
+
when 'text'
|
|
117
|
+
apply_marks(node)
|
|
118
|
+
when 'hardBreak'
|
|
119
|
+
" \n"
|
|
120
|
+
when 'taskList'
|
|
121
|
+
convert_task_list(node, excluded, depth)
|
|
122
|
+
when 'taskItem'
|
|
123
|
+
convert_task_item(node, excluded, depth)
|
|
124
|
+
else
|
|
125
|
+
# For unknown nodes, try to extract text content
|
|
126
|
+
ReleaseHx.logger.debug "Skipping unsupported ADF node type: #{node['type']}"
|
|
127
|
+
extract_text_from_node(node)
|
|
128
|
+
end
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
# Converts a paragraph node
|
|
132
|
+
def self.convert_paragraph node, excluded
|
|
133
|
+
content = node['content'] || []
|
|
134
|
+
content.map { |n| convert_node(n, excluded) }.join
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
# Converts a list (bullet or ordered)
|
|
138
|
+
def self.convert_list node, excluded, depth, unordered: true
|
|
139
|
+
content = node['content'] || []
|
|
140
|
+
items = content.map { |item| convert_node(item, excluded, depth + 1) }
|
|
141
|
+
"#{items.join}\n"
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
# Converts a list item
|
|
145
|
+
def self.convert_list_item node, excluded, depth
|
|
146
|
+
content = node['content'] || []
|
|
147
|
+
indent = ' ' * (depth - 1)
|
|
148
|
+
marker = '- '
|
|
149
|
+
|
|
150
|
+
# Separate paragraph content from nested lists
|
|
151
|
+
paragraphs = []
|
|
152
|
+
nested_lists = []
|
|
153
|
+
|
|
154
|
+
content.each do |n|
|
|
155
|
+
if n['type'] == 'paragraph'
|
|
156
|
+
paragraphs << convert_paragraph(n, excluded).strip
|
|
157
|
+
elsif %w[bulletList orderedList].include?(n['type'])
|
|
158
|
+
nested_lists << convert_node(n, excluded, depth)
|
|
159
|
+
else
|
|
160
|
+
paragraphs << convert_node(n, excluded, depth).strip
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
# Build the list item line
|
|
165
|
+
result = "#{indent}#{marker}#{paragraphs.join(' ')}\n"
|
|
166
|
+
|
|
167
|
+
# Add nested lists on new lines with proper indentation
|
|
168
|
+
nested_lists.each do |nested|
|
|
169
|
+
result += nested
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
result
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
# Converts a code block
|
|
176
|
+
def self.convert_code_block node
|
|
177
|
+
lang = node.dig('attrs', 'language') || ''
|
|
178
|
+
content = node['content'] || []
|
|
179
|
+
code = content.map { |n| n['type'] == 'text' ? n['text'] : '' }.join
|
|
180
|
+
|
|
181
|
+
"```#{lang}\n#{code}\n```\n\n"
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
# Converts a blockquote
|
|
185
|
+
def self.convert_blockquote node, excluded
|
|
186
|
+
content = node['content'] || []
|
|
187
|
+
lines = content.map { |n| convert_node(n, excluded).strip }.join("\n")
|
|
188
|
+
quoted = lines.split("\n").map { |line| "> #{line}" }.join("\n")
|
|
189
|
+
"#{quoted}\n\n"
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
# Converts a panel to a blockquote with admonition label
|
|
193
|
+
def self.convert_panel node, excluded
|
|
194
|
+
panel_type = node.dig('attrs', 'panelType') || 'info'
|
|
195
|
+
label = panel_type_to_label(panel_type)
|
|
196
|
+
|
|
197
|
+
content = node['content'] || []
|
|
198
|
+
text = content.map { |n| convert_node(n, excluded).strip }.join("\n")
|
|
199
|
+
|
|
200
|
+
"> **#{label}:** #{text}\n\n"
|
|
201
|
+
end
|
|
202
|
+
|
|
203
|
+
# Maps panel types to admonition labels
|
|
204
|
+
def self.panel_type_to_label panel_type
|
|
205
|
+
{
|
|
206
|
+
'info' => 'NOTE',
|
|
207
|
+
'note' => 'NOTE',
|
|
208
|
+
'warning' => 'WARNING',
|
|
209
|
+
'error' => 'CAUTION',
|
|
210
|
+
'success' => 'TIP'
|
|
211
|
+
}[panel_type] || 'NOTE'
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
# Converts a table (basic GFM table support)
|
|
215
|
+
def self.convert_table node, excluded
|
|
216
|
+
content = node['content'] || []
|
|
217
|
+
return '' if content.empty?
|
|
218
|
+
|
|
219
|
+
# Check if first row contains headers
|
|
220
|
+
first_row = content[0]
|
|
221
|
+
has_header = first_row && first_row['content']&.any? { |cell| cell['type'] == 'tableHeader' }
|
|
222
|
+
|
|
223
|
+
rows = content.map { |row| convert_node(row, excluded) }
|
|
224
|
+
|
|
225
|
+
if has_header
|
|
226
|
+
header = rows[0]
|
|
227
|
+
# Create separator row
|
|
228
|
+
col_count = first_row['content']&.length || 0
|
|
229
|
+
separator = "|#{' --- |' * col_count}\n"
|
|
230
|
+
table_body = rows[1..].join
|
|
231
|
+
|
|
232
|
+
"#{header}#{separator}#{table_body}\n"
|
|
233
|
+
else
|
|
234
|
+
"#{rows.join}\n"
|
|
235
|
+
end
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
# Converts a table row
|
|
239
|
+
def self.convert_table_row node, excluded
|
|
240
|
+
content = node['content'] || []
|
|
241
|
+
cells = content.map { |cell| convert_node(cell, excluded) }
|
|
242
|
+
"| #{cells.join(' | ')} |\n"
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
# Converts a table cell
|
|
246
|
+
def self.convert_table_cell node, excluded
|
|
247
|
+
content = node['content'] || []
|
|
248
|
+
content.map { |n| convert_node(n, excluded).strip }.join(' ')
|
|
249
|
+
end
|
|
250
|
+
|
|
251
|
+
# Converts a task list
|
|
252
|
+
def self.convert_task_list node, excluded, depth
|
|
253
|
+
content = node['content'] || []
|
|
254
|
+
content.map { |item| convert_node(item, excluded, depth + 1) }.join
|
|
255
|
+
end
|
|
256
|
+
|
|
257
|
+
# Converts a task item
|
|
258
|
+
def self.convert_task_item node, excluded, depth
|
|
259
|
+
state = node.dig('attrs', 'state')
|
|
260
|
+
marker = state == 'DONE' ? '[x]' : '[ ]'
|
|
261
|
+
indent = ' ' * (depth - 1)
|
|
262
|
+
|
|
263
|
+
content = node['content'] || []
|
|
264
|
+
text = content.map { |n| convert_node(n, excluded, depth).strip }.join(' ')
|
|
265
|
+
|
|
266
|
+
"#{indent}- #{marker} #{text}\n"
|
|
267
|
+
end
|
|
268
|
+
|
|
269
|
+
# Apply marks (bold, italic, code, link) to text
|
|
270
|
+
def self.apply_marks node
|
|
271
|
+
text = node['text'] || ''
|
|
272
|
+
marks = node['marks'] || []
|
|
273
|
+
|
|
274
|
+
marks.each do |mark|
|
|
275
|
+
case mark['type']
|
|
276
|
+
when 'strong'
|
|
277
|
+
text = "**#{text}**"
|
|
278
|
+
when 'em'
|
|
279
|
+
text = "_#{text}_"
|
|
280
|
+
when 'code'
|
|
281
|
+
text = "`#{text}`"
|
|
282
|
+
when 'link'
|
|
283
|
+
href = mark.dig('attrs', 'href') || ''
|
|
284
|
+
text = "[#{text}](#{href})"
|
|
285
|
+
when 'strike'
|
|
286
|
+
text = "~~#{text}~~"
|
|
287
|
+
when 'underline'
|
|
288
|
+
# Markdown doesn't have native underline; use HTML
|
|
289
|
+
text = "<u>#{text}</u>"
|
|
290
|
+
end
|
|
291
|
+
end
|
|
292
|
+
|
|
293
|
+
text
|
|
294
|
+
end
|
|
295
|
+
|
|
296
|
+
# Extract plain text from any node (recursive)
|
|
297
|
+
def self.extract_text_from_node node
|
|
298
|
+
return '' unless node.is_a?(Hash)
|
|
299
|
+
|
|
300
|
+
return node['text'] || '' if node['type'] == 'text'
|
|
301
|
+
|
|
302
|
+
content = node['content'] || []
|
|
303
|
+
content.map { |n| extract_text_from_node(n) }.join
|
|
304
|
+
end
|
|
305
|
+
end
|
|
306
|
+
end
|
|
307
|
+
end
|
data/lib/releasehx.rb
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'thor'
|
|
4
|
+
require 'logger'
|
|
5
|
+
require 'liquid'
|
|
6
|
+
require 'yaml'
|
|
7
|
+
require_relative 'sourcerer'
|
|
8
|
+
require_relative 'schemagraphy'
|
|
9
|
+
begin
|
|
10
|
+
require_relative 'releasehx/generated'
|
|
11
|
+
rescue LoadError
|
|
12
|
+
raise LoadError, 'ReleaseHx prebuild artifacts missing. Run `bundle exec rake prebuild`.'
|
|
13
|
+
end
|
|
14
|
+
require_relative 'releasehx/helpers'
|
|
15
|
+
require_relative 'releasehx/configuration'
|
|
16
|
+
require_relative 'releasehx/rhyml'
|
|
17
|
+
require_relative 'releasehx/version'
|
|
18
|
+
require_relative 'releasehx/sgyml/helpers'
|
|
19
|
+
require_relative 'releasehx/ops/template_ops'
|
|
20
|
+
require_relative 'releasehx/ops/check_ops'
|
|
21
|
+
require_relative 'releasehx/ops/draft_ops'
|
|
22
|
+
require_relative 'releasehx/ops/write_ops'
|
|
23
|
+
require_relative 'releasehx/ops/enrich_ops'
|
|
24
|
+
require_relative 'releasehx/rest/yaml_client'
|
|
25
|
+
require_relative 'releasehx/transforms/adf_to_markdown'
|
|
26
|
+
|
|
27
|
+
# The ReleaseHx module provides a CLI and a library for generating release
|
|
28
|
+
# histories and changelogs from various sources like Jira, GitHub, and YAML files.
|
|
29
|
+
module ReleaseHx
|
|
30
|
+
def self.attrs
|
|
31
|
+
if ENV['RELEASEHX_DEV_RELOAD'] == 'true'
|
|
32
|
+
# Development-only reload from source document
|
|
33
|
+
require 'asciidoctor' # explicitly required here for dev-only reload
|
|
34
|
+
Sourcerer.load_attributes(File.expand_path('../README.adoc', __dir__))
|
|
35
|
+
else
|
|
36
|
+
# Always use pre-generated attributes at runtime
|
|
37
|
+
ReleaseHx::ATTRIBUTES[:globals]
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
DUMP = Logger::DEBUG - 1 # Custom log level, lower than DEBUG
|
|
42
|
+
|
|
43
|
+
class << self
|
|
44
|
+
# Provides a singleton logger instance for the application.
|
|
45
|
+
#
|
|
46
|
+
# @return [Logger] The application-wide logger instance.
|
|
47
|
+
def logger
|
|
48
|
+
return @logger if @logger
|
|
49
|
+
|
|
50
|
+
$stdout.sync = true
|
|
51
|
+
log = Logger.new($stdout)
|
|
52
|
+
log.level = Logger::INFO
|
|
53
|
+
log.formatter = proc do |severity, _datetime, _progname, msg|
|
|
54
|
+
sev = severity == DUMP ? 'DUMP' : severity
|
|
55
|
+
"#{sev}: #{msg}\n"
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
log.singleton_class.class_eval do
|
|
59
|
+
define_method(:dump) do |msg|
|
|
60
|
+
add(DUMP, msg)
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
@logger = log
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
class Error < StandardError; end
|
|
69
|
+
end
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SchemaGraphy
|
|
4
|
+
# The AttributeResolver module provides methods for resolving AsciiDoc attribute references
|
|
5
|
+
# within a schema hash. It is used to substitute placeholders like `\{attribute_name}`
|
|
6
|
+
# with actual values.
|
|
7
|
+
module AttributeResolver
|
|
8
|
+
# Recursively walk a schema Hash and resolve `\{attribute_name}` references
|
|
9
|
+
# in 'dflt' values.
|
|
10
|
+
#
|
|
11
|
+
# @param schema [Hash] The schema or definition hash to process.
|
|
12
|
+
# @param attrs [Hash] The key-value pairs from AsciiDoc attributes to use for resolution.
|
|
13
|
+
# @return [Hash] The schema with resolved attributes.
|
|
14
|
+
def self.resolve_attributes! schema, attrs
|
|
15
|
+
case schema
|
|
16
|
+
when Hash
|
|
17
|
+
schema.transform_values! do |value|
|
|
18
|
+
if value.is_a?(Hash)
|
|
19
|
+
if value.key?('dflt') && value['dflt'].is_a?(String)
|
|
20
|
+
value['dflt'] = resolve_attribute_reference(value['dflt'], attrs)
|
|
21
|
+
end
|
|
22
|
+
resolve_attributes!(value, attrs)
|
|
23
|
+
else
|
|
24
|
+
value
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
schema
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# Replace `\{attribute_name}` patterns with corresponding values from the attrs hash.
|
|
32
|
+
#
|
|
33
|
+
# @param value [String] The string to process.
|
|
34
|
+
# @param attrs [Hash] The attributes to use for resolution.
|
|
35
|
+
# @return [String] The processed string with attribute references replaced.
|
|
36
|
+
def self.resolve_attribute_reference value, attrs
|
|
37
|
+
# Handle \{attribute_name} references
|
|
38
|
+
if value.match?(/\{[^}]+\}/)
|
|
39
|
+
value.gsub(/\{([^}]+)\}/) do |match|
|
|
40
|
+
attr_name = ::Regexp.last_match(1)
|
|
41
|
+
attrs[attr_name] || match # Keep original if no matching attribute
|
|
42
|
+
end
|
|
43
|
+
else
|
|
44
|
+
value
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
end
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative '../loader'
|
|
4
|
+
require_relative '../schema_utils'
|
|
5
|
+
|
|
6
|
+
module SchemaGraphy
|
|
7
|
+
# A module for handling CFGYML, a schema-driven configuration system.
|
|
8
|
+
module CFGYML
|
|
9
|
+
# Represents a configuration definition loaded from a schema file.
|
|
10
|
+
# It provides methods for accessing defaults and rendering documentation.
|
|
11
|
+
class Definition
|
|
12
|
+
# @return [Hash] The loaded schema hash.
|
|
13
|
+
attr_reader :schema
|
|
14
|
+
|
|
15
|
+
# @return [Hash] The attributes used for resolving placeholders in the schema.
|
|
16
|
+
attr_reader :attributes
|
|
17
|
+
|
|
18
|
+
# @param schema_path [String] The path to the schema YAML file.
|
|
19
|
+
# @param attrs [Hash] A hash of attributes for placeholder resolution.
|
|
20
|
+
def initialize schema_path, attrs = {}
|
|
21
|
+
@schema = Loader.load_yaml_with_attributes(schema_path, attrs)
|
|
22
|
+
@attributes = attrs
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
# Extract default values from the loaded schema.
|
|
26
|
+
# @return [Hash] A hash of default values.
|
|
27
|
+
def defaults
|
|
28
|
+
SchemaUtils.crawl_defaults(@schema)
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# Get the search paths for templates.
|
|
32
|
+
# @return [Array<String>] An array of template paths.
|
|
33
|
+
def template_paths
|
|
34
|
+
@template_paths ||= [
|
|
35
|
+
File.join(File.dirname(__FILE__), '..', 'templates', 'cfgyml'),
|
|
36
|
+
*additional_template_paths
|
|
37
|
+
]
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
# Render a configuration reference or sample in the specified format.
|
|
41
|
+
#
|
|
42
|
+
# @param format [Symbol] The output format (`:adoc` or `:yaml`).
|
|
43
|
+
# @return [String] The rendered output.
|
|
44
|
+
# @raise [ArgumentError] if the format is unsupported.
|
|
45
|
+
def render_reference format = :adoc
|
|
46
|
+
template = case format
|
|
47
|
+
when :adoc
|
|
48
|
+
'config-reference.adoc.liquid'
|
|
49
|
+
when :yaml
|
|
50
|
+
'sample-config.yaml.liquid'
|
|
51
|
+
else
|
|
52
|
+
raise ArgumentError, "Unsupported format: #{format}"
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
render_template(template)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
private
|
|
59
|
+
|
|
60
|
+
# Render a template using the Liquid engine.
|
|
61
|
+
def render_template template_name
|
|
62
|
+
template_path = find_template(template_name)
|
|
63
|
+
raise "Template not found: #{template_name}" unless template_path
|
|
64
|
+
|
|
65
|
+
require 'liquid'
|
|
66
|
+
template_content = File.read(template_path)
|
|
67
|
+
template = Liquid::Template.parse(template_content)
|
|
68
|
+
|
|
69
|
+
template.render(
|
|
70
|
+
'config_def' => @schema,
|
|
71
|
+
'attrs' => @attributes)
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
# Find a template file in the configured template paths.
|
|
75
|
+
def find_template name
|
|
76
|
+
template_paths.each do |path|
|
|
77
|
+
file = File.join(path, name)
|
|
78
|
+
return file if File.exist?(file)
|
|
79
|
+
end
|
|
80
|
+
nil
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
# Provides an extension point for subclasses to add more template paths.
|
|
84
|
+
def additional_template_paths
|
|
85
|
+
# Can be overridden by subclasses
|
|
86
|
+
[]
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'json'
|
|
4
|
+
|
|
5
|
+
module SchemaGraphy
|
|
6
|
+
module CFGYML
|
|
7
|
+
# Builds documentation-friendly CFGYML references for machine consumption.
|
|
8
|
+
module DocBuilder
|
|
9
|
+
module_function
|
|
10
|
+
|
|
11
|
+
def call schema, options = {}
|
|
12
|
+
pretty = options.fetch(:pretty, true)
|
|
13
|
+
data = reference_hash(schema)
|
|
14
|
+
pretty ? JSON.pretty_generate(data) : JSON.generate(data)
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def reference_hash schema
|
|
18
|
+
{
|
|
19
|
+
'format' => 'releasehx-config-reference',
|
|
20
|
+
'version' => 1,
|
|
21
|
+
'properties' => build_properties(schema['properties'], [])
|
|
22
|
+
}
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def build_properties properties, path
|
|
26
|
+
return {} unless properties.is_a?(Hash)
|
|
27
|
+
|
|
28
|
+
properties.each_with_object({}) do |(key, definition), acc|
|
|
29
|
+
next unless definition.is_a?(Hash)
|
|
30
|
+
|
|
31
|
+
current_path = path + [key]
|
|
32
|
+
entry = build_entry(current_path, definition)
|
|
33
|
+
children = build_properties(definition['properties'], current_path)
|
|
34
|
+
entry['properties'] = children unless children.empty?
|
|
35
|
+
acc[key] = entry
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def build_entry path, definition
|
|
40
|
+
entry = {
|
|
41
|
+
'path' => path.join('.'),
|
|
42
|
+
'desc' => definition['desc'],
|
|
43
|
+
'docs' => definition['docs'],
|
|
44
|
+
'type' => definition['type'],
|
|
45
|
+
'templating' => definition['templating'],
|
|
46
|
+
'default' => definition.key?('dflt') ? definition['dflt'] : nil
|
|
47
|
+
}
|
|
48
|
+
entry.compact
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
end
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'json'
|
|
4
|
+
|
|
5
|
+
module SchemaGraphy
|
|
6
|
+
module CFGYML
|
|
7
|
+
# Loads and queries a JSON config reference using JSON Pointer.
|
|
8
|
+
class PathReference
|
|
9
|
+
def initialize data
|
|
10
|
+
@data = data
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def self.load path
|
|
14
|
+
new(JSON.parse(File.read(path)))
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def get pointer
|
|
18
|
+
SchemaGraphy::DataQuery::JSONPointer.resolve(@data, pointer)
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
Reference = PathReference
|
|
23
|
+
end
|
|
24
|
+
end
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SchemaGraphy
|
|
4
|
+
module DataQuery
|
|
5
|
+
# Resolves JSON Pointer queries against a Hash or Array.
|
|
6
|
+
module JSONPointer
|
|
7
|
+
module_function
|
|
8
|
+
|
|
9
|
+
def resolve data, pointer
|
|
10
|
+
return data if pointer.nil? || pointer == ''
|
|
11
|
+
raise ArgumentError, "Invalid JSON Pointer: #{pointer}" unless pointer.start_with?('/')
|
|
12
|
+
|
|
13
|
+
tokens = pointer.split('/')[1..]
|
|
14
|
+
tokens.reduce(data) do |current, token|
|
|
15
|
+
key = unescape(token)
|
|
16
|
+
resolve_token(current, key, pointer)
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def resolve_token current, key, pointer
|
|
21
|
+
case current
|
|
22
|
+
when Array
|
|
23
|
+
index = Integer(key, 10)
|
|
24
|
+
current.fetch(index)
|
|
25
|
+
when Hash
|
|
26
|
+
return current.fetch(key) if current.key?(key)
|
|
27
|
+
return current.fetch(key.to_sym) if current.key?(key.to_sym)
|
|
28
|
+
|
|
29
|
+
raise KeyError, "JSON Pointer not found: #{pointer}"
|
|
30
|
+
else
|
|
31
|
+
raise KeyError, "JSON Pointer not found: #{pointer}"
|
|
32
|
+
end
|
|
33
|
+
rescue ArgumentError, IndexError, KeyError
|
|
34
|
+
raise KeyError, "JSON Pointer not found: #{pointer}"
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def unescape token
|
|
38
|
+
token.gsub('~1', '/').gsub('~0', '~')
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|