gai-cli-docx 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +57 -0
- package/package.json +37 -0
- package/src/convert_md_to_docx.py +227 -0
- package/src/index.js +491 -0
- package/test/hdwallet.md +314 -0
- package/test/test.sh +29 -0
package/README.md
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: gai-docx
|
|
3
|
+
description: "CLI tool for converting Markdown to Word documents. Use when: user wants to convert a Markdown file to a .docx Word document."
|
|
4
|
+
homepage: https://github.com/kakkoii1337/gai-cli-docx
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# md2word
|
|
8
|
+
|
|
9
|
+
CLI tool for converting Markdown files to Word documents (.docx).
|
|
10
|
+
|
|
11
|
+
## Installation
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
npm install -g gai-cli-docx
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
Or run directly:
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npx gai-cli-docx "document.md" "output.docx"
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Usage
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
md2word <source-md-file> <dest-docx-file>
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
### Arguments
|
|
30
|
+
|
|
31
|
+
- `source-md-file` - Path to the source Markdown file (required)
|
|
32
|
+
- `dest-docx-file` - Path to the output Word document (required)
|
|
33
|
+
|
|
34
|
+
### Options
|
|
35
|
+
|
|
36
|
+
- `--help, -h` - Show help message
|
|
37
|
+
|
|
38
|
+
### Examples
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
# Basic conversion
|
|
42
|
+
md2word "README.md" "output.docx"
|
|
43
|
+
|
|
44
|
+
# Save to a subdirectory
|
|
45
|
+
md2word "docs/guide.md" "dist/guide.docx"
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Output
|
|
49
|
+
|
|
50
|
+
Writes a `.docx` Word document to the specified destination path. Prints the output path to stdout on success.
|
|
51
|
+
|
|
52
|
+
## Notes
|
|
53
|
+
|
|
54
|
+
- Supports headings (H1–H6), paragraphs, bullet lists, numbered lists, checkboxes, code blocks, inline formatting (bold, italic, code), images, and page breaks
|
|
55
|
+
- Images are resolved relative to the source Markdown file's directory
|
|
56
|
+
- Uses Calibri 11pt with 1.15 line spacing by default
|
|
57
|
+
- Code blocks use Consolas 10pt with grey background shading
|
package/package.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "gai-cli-docx",
|
|
3
|
+
"version": "1.0.1",
|
|
4
|
+
"description": "CLI tool for converting Markdown to Word documents",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "src/index.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"md2word": "src/index.js"
|
|
9
|
+
},
|
|
10
|
+
"scripts": {
|
|
11
|
+
"start": "node src/index.js"
|
|
12
|
+
},
|
|
13
|
+
"keywords": [
|
|
14
|
+
"cli",
|
|
15
|
+
"markdown",
|
|
16
|
+
"word",
|
|
17
|
+
"docx",
|
|
18
|
+
"converter"
|
|
19
|
+
],
|
|
20
|
+
"author": "kakkoii1337 <kakkoii1337@gmail.com>",
|
|
21
|
+
"license": "MIT",
|
|
22
|
+
"repository": {
|
|
23
|
+
"type": "git",
|
|
24
|
+
"url": "git+https://github.com/kakkoii1337/gai-cli-docx.git"
|
|
25
|
+
},
|
|
26
|
+
"bugs": {
|
|
27
|
+
"url": "https://github.com/kakkoii1337/gai-cli-docx/issues"
|
|
28
|
+
},
|
|
29
|
+
"homepage": "https://github.com/kakkoii1337/gai-cli-docx#readme",
|
|
30
|
+
"engines": {
|
|
31
|
+
"node": ">=18.0.0"
|
|
32
|
+
},
|
|
33
|
+
"dependencies": {
|
|
34
|
+
"docx": "^9.5.1",
|
|
35
|
+
"image-size": "^2.0.2"
|
|
36
|
+
}
|
|
37
|
+
}
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
convert_md_to_docx.py - Convert markdown to .docx files
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python convert_md_to_docx.py <input.md>
|
|
7
|
+
|
|
8
|
+
Output:
|
|
9
|
+
<input>.docx
|
|
10
|
+
|
|
11
|
+
Features:
|
|
12
|
+
- Bold (**text**) -> Bold formatting
|
|
13
|
+
- Italic (*text*) -> Italic formatting
|
|
14
|
+
- Inline code (`text`) -> Monospace font
|
|
15
|
+
- Code blocks (```) -> Bordered box with code font
|
|
16
|
+
- --- -> Page break
|
|
17
|
+
- Headings -> Document headings (H1-H4)
|
|
18
|
+
- Lists -> Bullet/numbered lists
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import re
|
|
22
|
+
import sys
|
|
23
|
+
import os
|
|
24
|
+
from docx import Document
|
|
25
|
+
from docx.shared import Pt, RGBColor, Inches
|
|
26
|
+
from docx.oxml.ns import qn
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def parse_markdown(content):
|
|
30
|
+
"""Parse markdown into structured sections."""
|
|
31
|
+
sections = []
|
|
32
|
+
current_section = {"level": 0, "title": "", "content": [], "items": [], "code_blocks": [], "has_page_break": False}
|
|
33
|
+
|
|
34
|
+
in_code_block = False
|
|
35
|
+
code_lines = []
|
|
36
|
+
code_lang = ""
|
|
37
|
+
|
|
38
|
+
for line in content.split('\n'):
|
|
39
|
+
# Handle code blocks
|
|
40
|
+
if line.strip().startswith('```'):
|
|
41
|
+
if in_code_block:
|
|
42
|
+
# End of code block
|
|
43
|
+
if code_lines:
|
|
44
|
+
current_section["code_blocks"].append({
|
|
45
|
+
"lang": code_lang,
|
|
46
|
+
"code": '\n'.join(code_lines)
|
|
47
|
+
})
|
|
48
|
+
code_lines = []
|
|
49
|
+
code_lang = ""
|
|
50
|
+
in_code_block = False
|
|
51
|
+
else:
|
|
52
|
+
# Start of code block
|
|
53
|
+
in_code_block = True
|
|
54
|
+
code_lang = line.strip()[3:].strip()
|
|
55
|
+
continue
|
|
56
|
+
|
|
57
|
+
if in_code_block:
|
|
58
|
+
code_lines.append(line)
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
# Detect horizontal rules (page breaks)
|
|
62
|
+
if re.match(r'^---+\s*$', line):
|
|
63
|
+
current_section["has_page_break"] = True
|
|
64
|
+
continue
|
|
65
|
+
# Detect headings
|
|
66
|
+
heading_match = re.match(r'^(#{1,6})\s+(.+)$', line)
|
|
67
|
+
if heading_match:
|
|
68
|
+
if current_section["title"] or current_section["content"] or current_section["code_blocks"]:
|
|
69
|
+
sections.append(current_section)
|
|
70
|
+
current_section = {
|
|
71
|
+
"level": len(heading_match.group(1)),
|
|
72
|
+
"title": heading_match.group(2).strip(),
|
|
73
|
+
"content": [],
|
|
74
|
+
"items": [],
|
|
75
|
+
"code_blocks": [],
|
|
76
|
+
"has_page_break": False
|
|
77
|
+
}
|
|
78
|
+
# Detect list items
|
|
79
|
+
elif re.match(r'^\s*[-*]\s+', line):
|
|
80
|
+
item = re.sub(r'^\s*[-*]\s+', '', line).strip()
|
|
81
|
+
current_section["items"].append(item)
|
|
82
|
+
# Detect numbered items
|
|
83
|
+
elif re.match(r'^\s*\d+\.\s+', line):
|
|
84
|
+
item = re.sub(r'^\s*\d+\.\s+', '', line).strip()
|
|
85
|
+
current_section["items"].append(item)
|
|
86
|
+
# Regular content
|
|
87
|
+
elif line.strip():
|
|
88
|
+
current_section["content"].append(line.strip())
|
|
89
|
+
|
|
90
|
+
if current_section["title"] or current_section["content"] or current_section["code_blocks"]:
|
|
91
|
+
sections.append(current_section)
|
|
92
|
+
|
|
93
|
+
return sections
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def add_formatted_paragraph(paragraph, text):
|
|
97
|
+
"""Add text with bold, italic, and inline code formatting."""
|
|
98
|
+
pattern = r'(\*\*(.+?)\*\*|\*(.+?)\*|`(.+?)`)'
|
|
99
|
+
|
|
100
|
+
last_end = 0
|
|
101
|
+
for match in re.finditer(pattern, text):
|
|
102
|
+
if match.start() > last_end:
|
|
103
|
+
plain_text = text[last_end:match.start()]
|
|
104
|
+
if plain_text:
|
|
105
|
+
paragraph.add_run(plain_text)
|
|
106
|
+
|
|
107
|
+
if match.group(2): # **bold**
|
|
108
|
+
run = paragraph.add_run(match.group(2))
|
|
109
|
+
run.bold = True
|
|
110
|
+
elif match.group(3): # *italic*
|
|
111
|
+
run = paragraph.add_run(match.group(3))
|
|
112
|
+
run.italic = True
|
|
113
|
+
elif match.group(4): # `code`
|
|
114
|
+
run = paragraph.add_run(match.group(4))
|
|
115
|
+
run.font.name = 'Consolas'
|
|
116
|
+
run.font.size = Pt(9)
|
|
117
|
+
|
|
118
|
+
last_end = match.end()
|
|
119
|
+
|
|
120
|
+
if last_end < len(text):
|
|
121
|
+
plain_text = text[last_end:]
|
|
122
|
+
if plain_text:
|
|
123
|
+
paragraph.add_run(plain_text)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def add_code_block(doc, code_text, lang=""):
|
|
127
|
+
"""Add a code block with border and monospace font."""
|
|
128
|
+
p = doc.add_paragraph()
|
|
129
|
+
|
|
130
|
+
# Set paragraph border
|
|
131
|
+
pPr = p._p.get_or_add_pPr()
|
|
132
|
+
pBdr = pPr.makeelement(qn('w:pBdr'), {})
|
|
133
|
+
for side in ['top', 'left', 'bottom', 'right']:
|
|
134
|
+
bdr = pBdr.makeelement(qn(f'w:{side}'), {
|
|
135
|
+
qn('w:val'): 'single',
|
|
136
|
+
qn('w:sz'): '4',
|
|
137
|
+
qn('w:space'): '4',
|
|
138
|
+
qn('w:color'): '999999',
|
|
139
|
+
})
|
|
140
|
+
pBdr.append(bdr)
|
|
141
|
+
pPr.append(pBdr)
|
|
142
|
+
|
|
143
|
+
# Set paragraph shading (light gray background)
|
|
144
|
+
shd = pPr.makeelement(qn('w:shd'), {
|
|
145
|
+
qn('w:val'): 'clear',
|
|
146
|
+
qn('w:color'): 'auto',
|
|
147
|
+
qn('w:fill'): 'F5F5F5',
|
|
148
|
+
})
|
|
149
|
+
pPr.append(shd)
|
|
150
|
+
|
|
151
|
+
# Add language label if present
|
|
152
|
+
if lang:
|
|
153
|
+
run = p.add_run(f"{lang}\n")
|
|
154
|
+
run.font.name = 'Consolas'
|
|
155
|
+
run.font.size = Pt(8)
|
|
156
|
+
run.font.color.rgb = RGBColor(128, 128, 128)
|
|
157
|
+
run.italic = True
|
|
158
|
+
|
|
159
|
+
# Add code content
|
|
160
|
+
run = p.add_run(code_text)
|
|
161
|
+
run.font.name = 'Consolas'
|
|
162
|
+
run.font.size = Pt(9)
|
|
163
|
+
|
|
164
|
+
# Set left indent to give some padding
|
|
165
|
+
p.paragraph_format.left_indent = Inches(0.2)
|
|
166
|
+
p.paragraph_format.space_after = Pt(6)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def create_docx(sections, output_path):
|
|
170
|
+
"""Create a Word document from parsed sections."""
|
|
171
|
+
doc = Document()
|
|
172
|
+
|
|
173
|
+
# Add title
|
|
174
|
+
if sections and sections[0]["title"]:
|
|
175
|
+
doc.add_heading(sections[0]["title"], level=0)
|
|
176
|
+
|
|
177
|
+
for section in sections[1:]:
|
|
178
|
+
if not section["title"]:
|
|
179
|
+
continue
|
|
180
|
+
|
|
181
|
+
# Add page break if needed
|
|
182
|
+
if section["has_page_break"]:
|
|
183
|
+
doc.add_page_break()
|
|
184
|
+
|
|
185
|
+
# Add heading
|
|
186
|
+
doc.add_heading(section["title"], level=min(section["level"], 4))
|
|
187
|
+
|
|
188
|
+
# Add content paragraphs
|
|
189
|
+
for para in section["content"]:
|
|
190
|
+
if para:
|
|
191
|
+
p = doc.add_paragraph()
|
|
192
|
+
add_formatted_paragraph(p, para)
|
|
193
|
+
|
|
194
|
+
# Add code blocks
|
|
195
|
+
for code_block in section["code_blocks"]:
|
|
196
|
+
add_code_block(doc, code_block["code"], code_block["lang"])
|
|
197
|
+
|
|
198
|
+
# Add list items
|
|
199
|
+
for item in section["items"]:
|
|
200
|
+
if item:
|
|
201
|
+
p = doc.add_paragraph(style='List Bullet')
|
|
202
|
+
add_formatted_paragraph(p, item)
|
|
203
|
+
|
|
204
|
+
doc.save(output_path)
|
|
205
|
+
print(f"Created: {output_path}")
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def main():
|
|
209
|
+
if len(sys.argv) < 2:
|
|
210
|
+
print("Usage: python convert_md_to_docx.py <input.md>")
|
|
211
|
+
sys.exit(1)
|
|
212
|
+
|
|
213
|
+
input_path = sys.argv[1]
|
|
214
|
+
output_dir = os.path.dirname(input_path) if os.path.dirname(input_path) else "."
|
|
215
|
+
base_name = os.path.splitext(os.path.basename(input_path))[0]
|
|
216
|
+
|
|
217
|
+
with open(input_path, 'r', encoding='utf-8') as f:
|
|
218
|
+
content = f.read()
|
|
219
|
+
|
|
220
|
+
sections = parse_markdown(content)
|
|
221
|
+
|
|
222
|
+
docx_path = os.path.join(output_dir, base_name + ".docx")
|
|
223
|
+
create_docx(sections, docx_path)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
if __name__ == '__main__':
|
|
227
|
+
main()
|
package/src/index.js
ADDED
|
@@ -0,0 +1,491 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* gai-cli-msword - CLI tool for converting Markdown to Word documents
|
|
4
|
+
*
|
|
5
|
+
* Usage: md2word <source-md-file> <dest-docx-file>
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs";
|
|
9
|
+
import { dirname, resolve } from "path";
|
|
10
|
+
import sizeOf from "image-size";
|
|
11
|
+
import {
|
|
12
|
+
Document,
|
|
13
|
+
Packer,
|
|
14
|
+
Paragraph,
|
|
15
|
+
TextRun,
|
|
16
|
+
HeadingLevel,
|
|
17
|
+
CheckBox,
|
|
18
|
+
ImageRun,
|
|
19
|
+
BorderStyle,
|
|
20
|
+
} from "docx";
|
|
21
|
+
|
|
22
|
+
function parseArgs() {
|
|
23
|
+
const args = process.argv.slice(2);
|
|
24
|
+
|
|
25
|
+
for (const arg of args) {
|
|
26
|
+
if (arg === "--help" || arg === "-h") {
|
|
27
|
+
printHelp();
|
|
28
|
+
process.exit(0);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
if (args.length !== 2) {
|
|
33
|
+
console.error("Error: Exactly two arguments required");
|
|
34
|
+
printHelp();
|
|
35
|
+
process.exit(1);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const [sourcePath, destPath] = args;
|
|
39
|
+
|
|
40
|
+
if (!existsSync(sourcePath)) {
|
|
41
|
+
console.error(`Error: Source file "${sourcePath}" does not exist`);
|
|
42
|
+
process.exit(1);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return { sourcePath, destPath };
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function printHelp() {
|
|
49
|
+
console.log(`
|
|
50
|
+
md2word - CLI tool for converting Markdown to Word documents
|
|
51
|
+
|
|
52
|
+
Usage: md2word <source-md-file> <dest-docx-file>
|
|
53
|
+
|
|
54
|
+
Arguments:
|
|
55
|
+
source-md-file Path to the source Markdown file (required)
|
|
56
|
+
dest-docx-file Path to the output Word document (required)
|
|
57
|
+
|
|
58
|
+
Options:
|
|
59
|
+
--help, -h Show this help message
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
md2word "README.md" "output.docx"
|
|
63
|
+
md2word "docs/guide.md" "dist/guide.docx"
|
|
64
|
+
`);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
function parseMarkdownToDocx(markdown) {
|
|
68
|
+
const lines = markdown.split("\n");
|
|
69
|
+
const elements = [];
|
|
70
|
+
let currentCodeBlock = [];
|
|
71
|
+
let inCodeBlock = false;
|
|
72
|
+
let codeBlockLanguage = "";
|
|
73
|
+
let codeBlockIndent = 0;
|
|
74
|
+
|
|
75
|
+
for (let i = 0; i < lines.length; i++) {
|
|
76
|
+
const line = lines[i];
|
|
77
|
+
|
|
78
|
+
// Handle horizontal rule (page break)
|
|
79
|
+
if (line.trim() === "---") {
|
|
80
|
+
while (
|
|
81
|
+
elements.length > 0 &&
|
|
82
|
+
elements[elements.length - 1].type === "emptyLine"
|
|
83
|
+
) {
|
|
84
|
+
elements.pop();
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
elements.push({ type: "pageBreak", content: "" });
|
|
88
|
+
|
|
89
|
+
while (i + 1 < lines.length && lines[i + 1].trim() === "") {
|
|
90
|
+
i++;
|
|
91
|
+
}
|
|
92
|
+
continue;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// Handle code blocks (including indented ones)
|
|
96
|
+
if (line.trim().startsWith("```")) {
|
|
97
|
+
if (inCodeBlock) {
|
|
98
|
+
elements.push({
|
|
99
|
+
type: "codeBlock",
|
|
100
|
+
content: currentCodeBlock.join("\n"),
|
|
101
|
+
language: codeBlockLanguage,
|
|
102
|
+
indent: codeBlockIndent,
|
|
103
|
+
});
|
|
104
|
+
currentCodeBlock = [];
|
|
105
|
+
inCodeBlock = false;
|
|
106
|
+
codeBlockLanguage = "";
|
|
107
|
+
codeBlockIndent = 0;
|
|
108
|
+
} else {
|
|
109
|
+
const trimmedLine = line.trim();
|
|
110
|
+
codeBlockLanguage = trimmedLine.substring(3).trim();
|
|
111
|
+
inCodeBlock = true;
|
|
112
|
+
const leadingSpaces = line.match(/^(\s*)/)[1].length;
|
|
113
|
+
codeBlockIndent = Math.floor(leadingSpaces / 4);
|
|
114
|
+
}
|
|
115
|
+
continue;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
if (inCodeBlock) {
|
|
119
|
+
currentCodeBlock.push(line);
|
|
120
|
+
continue;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// Handle headings
|
|
124
|
+
if (line.startsWith("#")) {
|
|
125
|
+
const level = line.match(/^#+/)[0].length;
|
|
126
|
+
const text = line.substring(level).trim();
|
|
127
|
+
elements.push({ type: "heading", level, content: text });
|
|
128
|
+
continue;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Handle bullet lists (including checkboxes)
|
|
132
|
+
if (line.match(/^\s*[-*+]\s+/)) {
|
|
133
|
+
const indent = line.match(/^\s*/)[0].length;
|
|
134
|
+
let text = line.replace(/^\s*[-*+]\s+/, "");
|
|
135
|
+
|
|
136
|
+
let isCheckbox = false;
|
|
137
|
+
let isChecked = false;
|
|
138
|
+
|
|
139
|
+
if (text.startsWith("[ ] ")) {
|
|
140
|
+
isCheckbox = true;
|
|
141
|
+
isChecked = false;
|
|
142
|
+
text = text.substring(4);
|
|
143
|
+
} else if (text.startsWith("[x] ") || text.startsWith("[X] ")) {
|
|
144
|
+
isCheckbox = true;
|
|
145
|
+
isChecked = true;
|
|
146
|
+
text = text.substring(4);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
elements.push({
|
|
150
|
+
type: isCheckbox ? "checkboxItem" : "listItem",
|
|
151
|
+
content: text,
|
|
152
|
+
indent: Math.floor(indent / 4),
|
|
153
|
+
checked: isChecked,
|
|
154
|
+
});
|
|
155
|
+
continue;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Handle numbered lists
|
|
159
|
+
if (line.match(/^\s*\d+\.\s+/)) {
|
|
160
|
+
const indent = line.match(/^\s*/)[0].length;
|
|
161
|
+
const numberMatch = line.match(/^\s*(\d+)\.\s+/);
|
|
162
|
+
const originalNumber = numberMatch[1];
|
|
163
|
+
const text = line.replace(/^\s*\d+\.\s+/, "");
|
|
164
|
+
elements.push({
|
|
165
|
+
type: "numberedListItem",
|
|
166
|
+
content: text,
|
|
167
|
+
indent: Math.floor(indent / 4),
|
|
168
|
+
originalNumber,
|
|
169
|
+
});
|
|
170
|
+
continue;
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
// Handle images
|
|
174
|
+
if (line.match(/^\s*!\[[^\]]*\]\([^)]+\)\s*$/)) {
|
|
175
|
+
const indent = line.match(/^\s*/)[0].length;
|
|
176
|
+
const imageMatch = line.match(/^\s*!\[([^\]]*)\]\(([^)]+)\)\s*$/);
|
|
177
|
+
if (imageMatch) {
|
|
178
|
+
elements.push({
|
|
179
|
+
type: "image",
|
|
180
|
+
content: imageMatch[1],
|
|
181
|
+
path: imageMatch[2],
|
|
182
|
+
indent: Math.floor(indent / 4),
|
|
183
|
+
});
|
|
184
|
+
}
|
|
185
|
+
continue;
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
// Handle regular paragraphs and empty lines
|
|
189
|
+
if (line.trim() !== "") {
|
|
190
|
+
elements.push({ type: "paragraph", content: line.trim() });
|
|
191
|
+
} else {
|
|
192
|
+
elements.push({ type: "emptyLine", content: "" });
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
return elements;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
function createDocxElements(parsedElements, sourcePath) {
|
|
200
|
+
const docxElements = [];
|
|
201
|
+
|
|
202
|
+
for (const element of parsedElements) {
|
|
203
|
+
switch (element.type) {
|
|
204
|
+
case "heading": {
|
|
205
|
+
const headingLevel = Math.min(element.level, 6);
|
|
206
|
+
docxElements.push(
|
|
207
|
+
new Paragraph({
|
|
208
|
+
children: [new TextRun(element.content)],
|
|
209
|
+
heading:
|
|
210
|
+
headingLevel === 1 ? HeadingLevel.HEADING_1
|
|
211
|
+
: headingLevel === 2 ? HeadingLevel.HEADING_2
|
|
212
|
+
: headingLevel === 3 ? HeadingLevel.HEADING_3
|
|
213
|
+
: headingLevel === 4 ? HeadingLevel.HEADING_4
|
|
214
|
+
: headingLevel === 5 ? HeadingLevel.HEADING_5
|
|
215
|
+
: HeadingLevel.HEADING_6,
|
|
216
|
+
spacing: { before: 240, after: 120 },
|
|
217
|
+
})
|
|
218
|
+
);
|
|
219
|
+
break;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
case "paragraph": {
|
|
223
|
+
const runs = parseInlineFormatting(element.content);
|
|
224
|
+
docxElements.push(
|
|
225
|
+
new Paragraph({ children: runs, spacing: { after: 120 } })
|
|
226
|
+
);
|
|
227
|
+
break;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
case "listItem": {
|
|
231
|
+
const runs = parseInlineFormatting(element.content);
|
|
232
|
+
docxElements.push(
|
|
233
|
+
new Paragraph({
|
|
234
|
+
children: [new TextRun("•\t"), ...runs],
|
|
235
|
+
indent: { left: (element.indent + 1) * 360, hanging: 360 },
|
|
236
|
+
tabStops: [{ type: "left", position: (element.indent + 1) * 360 }],
|
|
237
|
+
spacing: { after: 60 },
|
|
238
|
+
})
|
|
239
|
+
);
|
|
240
|
+
break;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
case "checkboxItem": {
|
|
244
|
+
const runs = parseInlineFormatting(element.content);
|
|
245
|
+
docxElements.push(
|
|
246
|
+
new Paragraph({
|
|
247
|
+
children: [
|
|
248
|
+
new CheckBox({ checked: element.checked }),
|
|
249
|
+
new TextRun("\t"),
|
|
250
|
+
...runs,
|
|
251
|
+
],
|
|
252
|
+
indent: { left: (element.indent + 1) * 360, hanging: 360 },
|
|
253
|
+
tabStops: [{ type: "left", position: (element.indent + 1) * 360 }],
|
|
254
|
+
spacing: { after: 60 },
|
|
255
|
+
})
|
|
256
|
+
);
|
|
257
|
+
break;
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
case "numberedListItem": {
|
|
261
|
+
const runs = parseInlineFormatting(element.content);
|
|
262
|
+
docxElements.push(
|
|
263
|
+
new Paragraph({
|
|
264
|
+
children: [new TextRun(`${element.originalNumber}.\t`), ...runs],
|
|
265
|
+
indent: { left: (element.indent + 1) * 360, hanging: 360 },
|
|
266
|
+
tabStops: [{ type: "left", position: (element.indent + 1) * 360 }],
|
|
267
|
+
spacing: { after: 60 },
|
|
268
|
+
})
|
|
269
|
+
);
|
|
270
|
+
break;
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
case "codeBlock": {
|
|
274
|
+
const codeLines = element.content.split("\n");
|
|
275
|
+
const children = [];
|
|
276
|
+
|
|
277
|
+
if (element.language) {
|
|
278
|
+
children.push(new TextRun({
|
|
279
|
+
text: element.language,
|
|
280
|
+
font: "Consolas",
|
|
281
|
+
size: 16,
|
|
282
|
+
color: "808080",
|
|
283
|
+
italics: true,
|
|
284
|
+
}));
|
|
285
|
+
children.push(new TextRun({ break: 1 }));
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
codeLines.forEach((codeLine, idx) => {
|
|
289
|
+
if (idx > 0) children.push(new TextRun({ break: 1 }));
|
|
290
|
+
children.push(new TextRun({
|
|
291
|
+
text: codeLine || " ",
|
|
292
|
+
font: "Consolas",
|
|
293
|
+
size: 18,
|
|
294
|
+
}));
|
|
295
|
+
});
|
|
296
|
+
|
|
297
|
+
const borderOpts = { style: BorderStyle.SINGLE, size: 4, space: 4, color: "999999" };
|
|
298
|
+
docxElements.push(
|
|
299
|
+
new Paragraph({
|
|
300
|
+
children,
|
|
301
|
+
border: { top: borderOpts, bottom: borderOpts, left: borderOpts, right: borderOpts },
|
|
302
|
+
shading: { fill: "F5F5F5" },
|
|
303
|
+
indent: { left: 288 },
|
|
304
|
+
spacing: { after: 120 },
|
|
305
|
+
})
|
|
306
|
+
);
|
|
307
|
+
break;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
case "pageBreak": {
|
|
311
|
+
docxElements.push(
|
|
312
|
+
new Paragraph({
|
|
313
|
+
children: [],
|
|
314
|
+
pageBreakBefore: true,
|
|
315
|
+
spacing: { before: 0, after: 0 },
|
|
316
|
+
})
|
|
317
|
+
);
|
|
318
|
+
break;
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
case "emptyLine": {
|
|
322
|
+
docxElements.push(new Paragraph({ text: "", spacing: { after: 60 } }));
|
|
323
|
+
break;
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
case "image": {
|
|
327
|
+
try {
|
|
328
|
+
const markdownDir = dirname(resolve(sourcePath));
|
|
329
|
+
const absoluteImagePath = resolve(markdownDir, element.path);
|
|
330
|
+
|
|
331
|
+
if (existsSync(absoluteImagePath)) {
|
|
332
|
+
const imageBuffer = readFileSync(absoluteImagePath);
|
|
333
|
+
|
|
334
|
+
let scaledWidth, scaledHeight;
|
|
335
|
+
try {
|
|
336
|
+
const dimensions = sizeOf(imageBuffer);
|
|
337
|
+
const maxWidth = 400;
|
|
338
|
+
const scaleFactor = Math.min(maxWidth / dimensions.width, 1);
|
|
339
|
+
scaledWidth = Math.round(dimensions.width * scaleFactor);
|
|
340
|
+
scaledHeight = Math.round(dimensions.height * scaleFactor);
|
|
341
|
+
} catch {
|
|
342
|
+
scaledWidth = 400;
|
|
343
|
+
scaledHeight = 300;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
docxElements.push(
|
|
347
|
+
new Paragraph({
|
|
348
|
+
children: [
|
|
349
|
+
new ImageRun({
|
|
350
|
+
data: imageBuffer,
|
|
351
|
+
transformation: { width: scaledWidth, height: scaledHeight },
|
|
352
|
+
type: "png",
|
|
353
|
+
}),
|
|
354
|
+
],
|
|
355
|
+
indent: { left: element.indent * 360 },
|
|
356
|
+
})
|
|
357
|
+
);
|
|
358
|
+
}
|
|
359
|
+
} catch {
|
|
360
|
+
// Silently skip image on error
|
|
361
|
+
}
|
|
362
|
+
break;
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
return docxElements;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
function parseInlineFormatting(text) {
|
|
371
|
+
const runs = [];
|
|
372
|
+
let currentText = "";
|
|
373
|
+
let i = 0;
|
|
374
|
+
|
|
375
|
+
while (i < text.length) {
|
|
376
|
+
// Handle bold (**text**)
|
|
377
|
+
if (text.substr(i, 2) === "**") {
|
|
378
|
+
if (currentText) {
|
|
379
|
+
runs.push(new TextRun(currentText));
|
|
380
|
+
currentText = "";
|
|
381
|
+
}
|
|
382
|
+
i += 2;
|
|
383
|
+
const endIndex = text.indexOf("**", i);
|
|
384
|
+
if (endIndex !== -1) {
|
|
385
|
+
runs.push(new TextRun({ text: text.substring(i, endIndex), bold: true }));
|
|
386
|
+
i = endIndex + 2;
|
|
387
|
+
} else {
|
|
388
|
+
currentText += "**";
|
|
389
|
+
}
|
|
390
|
+
continue;
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// Handle italic (*text*)
|
|
394
|
+
if (text[i] === "*" && text.substr(i, 2) !== "**") {
|
|
395
|
+
if (currentText) {
|
|
396
|
+
runs.push(new TextRun(currentText));
|
|
397
|
+
currentText = "";
|
|
398
|
+
}
|
|
399
|
+
i += 1;
|
|
400
|
+
const endIndex = text.indexOf("*", i);
|
|
401
|
+
if (endIndex !== -1) {
|
|
402
|
+
runs.push(new TextRun({ text: text.substring(i, endIndex), italics: true }));
|
|
403
|
+
i = endIndex + 1;
|
|
404
|
+
} else {
|
|
405
|
+
currentText += "*";
|
|
406
|
+
}
|
|
407
|
+
continue;
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
// Handle inline code (`text`)
|
|
411
|
+
if (text[i] === "`") {
|
|
412
|
+
if (currentText) {
|
|
413
|
+
runs.push(new TextRun(currentText));
|
|
414
|
+
currentText = "";
|
|
415
|
+
}
|
|
416
|
+
i += 1;
|
|
417
|
+
const endIndex = text.indexOf("`", i);
|
|
418
|
+
if (endIndex !== -1) {
|
|
419
|
+
runs.push(
|
|
420
|
+
new TextRun({
|
|
421
|
+
text: text.substring(i, endIndex),
|
|
422
|
+
font: "Consolas",
|
|
423
|
+
shading: { fill: "F5F5F5" },
|
|
424
|
+
})
|
|
425
|
+
);
|
|
426
|
+
i = endIndex + 1;
|
|
427
|
+
} else {
|
|
428
|
+
currentText += "`";
|
|
429
|
+
}
|
|
430
|
+
continue;
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
currentText += text[i];
|
|
434
|
+
i++;
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
if (currentText) {
|
|
438
|
+
runs.push(new TextRun(currentText));
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
return runs.length > 0 ? runs : [new TextRun("")];
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
async function main() {
|
|
445
|
+
const { sourcePath, destPath } = parseArgs();
|
|
446
|
+
|
|
447
|
+
console.error(`Converting ${sourcePath} to ${destPath}...`);
|
|
448
|
+
|
|
449
|
+
const markdownContent = readFileSync(sourcePath, "utf-8");
|
|
450
|
+
const parsedElements = parseMarkdownToDocx(markdownContent);
|
|
451
|
+
const docxElements = createDocxElements(parsedElements, sourcePath);
|
|
452
|
+
|
|
453
|
+
const doc = new Document({
|
|
454
|
+
styles: {
|
|
455
|
+
paragraphStyles: [
|
|
456
|
+
{
|
|
457
|
+
id: "default",
|
|
458
|
+
name: "Default",
|
|
459
|
+
basedOn: "Normal",
|
|
460
|
+
next: "default",
|
|
461
|
+
run: {
|
|
462
|
+
size: 22,
|
|
463
|
+
font: "Calibri",
|
|
464
|
+
},
|
|
465
|
+
paragraph: {
|
|
466
|
+
spacing: {
|
|
467
|
+
line: 276,
|
|
468
|
+
lineRule: "auto",
|
|
469
|
+
},
|
|
470
|
+
},
|
|
471
|
+
},
|
|
472
|
+
],
|
|
473
|
+
},
|
|
474
|
+
sections: [{ children: docxElements }],
|
|
475
|
+
});
|
|
476
|
+
|
|
477
|
+
const buffer = await Packer.toBuffer(doc);
|
|
478
|
+
|
|
479
|
+
const destDir = dirname(resolve(destPath));
|
|
480
|
+
if (!existsSync(destDir)) {
|
|
481
|
+
mkdirSync(destDir, { recursive: true });
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
writeFileSync(destPath, buffer);
|
|
485
|
+
console.log(destPath);
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
main().catch((error) => {
|
|
489
|
+
console.error("Error:", error.message);
|
|
490
|
+
process.exit(1);
|
|
491
|
+
});
|
package/test/hdwallet.md
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
# Hierarchical Deterministic (HD) Wallets
|
|
2
|
+
|
|
3
|
+
📌 **NOTE: This assignment is compulsory. You need to complete this in order to generate your own mnemonic phrase in a .env file which will be used for future labs.**
|
|
4
|
+
|
|
5
|
+
## 1. From Keys to Wallets
|
|
6
|
+
|
|
7
|
+
We have learnt that an Ethereum account is represented by an address derived from a private key. In practice, however, users often need many addresses — for privacy, account separation, or interacting with different apps. Since private keys are cryptic, managing dozens of unrelated private keys would be cumbersome and risky. To solve this, modern wallets use a system called Hierarchical Deterministic (HD) wallets, which can generate and manage unlimited addresses from a single master seed.
|
|
8
|
+
|
|
9
|
+
### Core Concepts
|
|
10
|
+
|
|
11
|
+
HD Wallets use cryptographic principles to generate multiple addresses from a single seed phrase (mnemonic). This system provides:
|
|
12
|
+
|
|
13
|
+
- **Deterministic Generation**: Same seed always produces same address sequence
|
|
14
|
+
- **Infinite Addresses**: Can generate unlimited addresses from one seed
|
|
15
|
+
- **Backup Simplicity**: One mnemonic backs up entire wallet
|
|
16
|
+
- **Cross-Wallet Compatibility**: Standard ensures wallet interoperability
|
|
17
|
+
|
|
18
|
+
### Mnemonic Seed Phrases
|
|
19
|
+
|
|
20
|
+
Typically consist of 12 or 24 words that encode the master seed for address generation. Each word comes from a standardized list of 2048 words (BIP39 standard).
|
|
21
|
+
|
|
22
|
+
### Protecting Your Mnemonic
|
|
23
|
+
|
|
24
|
+
This mnemonic phrase is imported into wallet software to generate your private keys and addresses, for example, Metamask. But sometimes, we may need to save the mnemonic in server-side applications such as Hardhat Network for automated tasks like contract deployment or scheduled transactions.
|
|
25
|
+
|
|
26
|
+
The mnemonic we generated from the Lab Practice is sensitive information that should not be stored as plain text in the config file or hard-coded in your code base.
|
|
27
|
+
|
|
28
|
+
- **Single Point of Failure**: Compromised mnemonic exposes all derived addresses
|
|
29
|
+
- **Backup Critical**: Loss of mnemonic means loss of all funds
|
|
30
|
+
- **Storage Best Practices**: Never store digitally, use secure physical storage
|
|
31
|
+
|
|
32
|
+
In the following lab, we will learn how to use the `dotenv` package to securely manage environment variables like mnemonics.
|
|
33
|
+
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## 🛠️ Lab Practice: Using Mnemonic Phrase
|
|
37
|
+
|
|
38
|
+
In this lab, we will learn how to configure HD wallets in Hardhat Network and manage mnemonics securely using environment variables.
|
|
39
|
+
|
|
40
|
+
Hardhat uses a well-known default mnemonic for its local network:
|
|
41
|
+
|
|
42
|
+
```txt
|
|
43
|
+
test test test test test test test test test test test junk
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
We will prove that this is indeed the default mnemonic by comparing the addresses generated from this mnemonic with the accounts provided by Hardhat Network.
|
|
47
|
+
|
|
48
|
+
### Step 1: Start Hardhat Local Node
|
|
49
|
+
|
|
50
|
+
- **Install packages**
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
cd /workspace/day-1/home-assignments/06-hd-wallet
|
|
54
|
+
npm i
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
- **Startup Hardhat Standalone Network**
|
|
58
|
+
|
|
59
|
+
If the node is already running from previous lab, press `Ctrl+C` to stop it first before starting again.
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
hh node
|
|
63
|
+
|
|
64
|
+
# Output:
|
|
65
|
+
|
|
66
|
+
# Accounts
|
|
67
|
+
# ========
|
|
68
|
+
#
|
|
69
|
+
# WARNING: These accounts, and their private keys, are publicly known.
|
|
70
|
+
# Any funds sent to them on Mainnet or any other live network WILL BE # LOST.
|
|
71
|
+
#
|
|
72
|
+
# Account #0: 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 (10000 ETH)
|
|
73
|
+
# Private Key: # 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Keep this running till the end of this lab as we want to compare the addresses generated from the mnemonic after we set it in the config file.
|
|
77
|
+
|
|
78
|
+
### Step 2: Configure Hardhat Network with Default Mnemonic
|
|
79
|
+
|
|
80
|
+
- **Set Mnemonic Phrase for Hardhat Network**
|
|
81
|
+
|
|
82
|
+
Open hardhat.config.js and add a **hardhat** network configuration with
|
|
83
|
+
the mnemonic "test test test test test test test test test test test junk"
|
|
84
|
+
|
|
85
|
+
```javascript
|
|
86
|
+
module.exports = {
|
|
87
|
+
solidity: "0.8.20",
|
|
88
|
+
networks: {
|
|
89
|
+
localhost: {
|
|
90
|
+
url: "http://localhost:8545",
|
|
91
|
+
},
|
|
92
|
+
hardhat: {
|
|
93
|
+
accounts: {
|
|
94
|
+
mnemonic:
|
|
95
|
+
"test test test test test test test test test test test junk",
|
|
96
|
+
},
|
|
97
|
+
},
|
|
98
|
+
},
|
|
99
|
+
};
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
- **Start Hardhat Console**
|
|
103
|
+
|
|
104
|
+
Open a parallel terminal window and run:
|
|
105
|
+
|
|
106
|
+
```bash
|
|
107
|
+
hh console
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
NOTE: Do not connect to localhost because we want to use the Hardhat Network built-in provider which uses the mnemonic we just set in the config file.
|
|
111
|
+
|
|
112
|
+
- **Get account addresses**
|
|
113
|
+
|
|
114
|
+
In the Hardhat console, run the following command to address of the first account:
|
|
115
|
+
|
|
116
|
+
```js
|
|
117
|
+
> const { ethers } = require("hardhat");
|
|
118
|
+
> accounts = await ethers.getSigners();
|
|
119
|
+
> accounts[0].address
|
|
120
|
+
// '0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266'
|
|
121
|
+
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
Compare this address with the first account address printed in the Hardhat Node terminal. They should match.
|
|
125
|
+
|
|
126
|
+
### Step 3: Generate Custom Mnemonic and Configure Hardhat Network
|
|
127
|
+
|
|
128
|
+
**NOTE:📌** This is the most important part of this lab. Please follow the instructions carefully.
|
|
129
|
+
|
|
130
|
+
- **Generate New Mnemonic Phrase**
|
|
131
|
+
|
|
132
|
+
In the Hardhat console, run the following commands line by line after the `>` prompt to generate a new mnemonic phrase:
|
|
133
|
+
|
|
134
|
+
```js
|
|
135
|
+
> mnemonic = ethers.Wallet.createRandom().mnemonic.phrase;
|
|
136
|
+
|
|
137
|
+
// Sample Output:
|
|
138
|
+
// 'hill drive sure whip bargain horn raven sunny claw example merit income'
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
Record the generated mnemonic as we will need it in the next step.
|
|
142
|
+
|
|
143
|
+
Type "CTRL+C" to exit the Hardhat console.
|
|
144
|
+
|
|
145
|
+
- **Install dotenv package**
|
|
146
|
+
|
|
147
|
+
We need to install a package called `dotenv` that allow us to load environment variables from a `.env` file.
|
|
148
|
+
|
|
149
|
+
```bash
|
|
150
|
+
npm i dotenv
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
- **Create .env file**
|
|
154
|
+
Create a file named `.env` in the lesson directory and add the following content:
|
|
155
|
+
|
|
156
|
+
```env
|
|
157
|
+
FIN556_MNEMONIC="your mnemonic phrase here"
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
Replace `your mnemonic phrase here` with the mnemonic you generated earlier.
|
|
161
|
+
|
|
162
|
+
- **Update hardhat.config.js**
|
|
163
|
+
|
|
164
|
+
Open `hardhat.config.js`.
|
|
165
|
+
|
|
166
|
+
Replace the mnemonic in the **hardhat** network configuration from this:
|
|
167
|
+
|
|
168
|
+
```javascript
|
|
169
|
+
mnemonic:
|
|
170
|
+
"test test test test test test test test test test test junk",
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
to this:
|
|
174
|
+
|
|
175
|
+
```javascript
|
|
176
|
+
mnemonic: process.env.FIN556_MNEMONIC,
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
### Step 4: Verify New Mnemonic is Used
|
|
180
|
+
|
|
181
|
+
- **Restart Hardhat Console**
|
|
182
|
+
|
|
183
|
+
```bash
|
|
184
|
+
hh console
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
- **Get account addresses again**
|
|
188
|
+
|
|
189
|
+
In the Hardhat console, run the following command to address of the first account:
|
|
190
|
+
|
|
191
|
+
```js
|
|
192
|
+
> const { ethers } = require("hardhat");
|
|
193
|
+
> accounts = await ethers.getSigners();
|
|
194
|
+
> accounts[0].address
|
|
195
|
+
|
|
196
|
+
// '0x...' Your new address from the new mnemonic will show here
|
|
197
|
+
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
Compare this address with the first account address printed in the Hardhat Node terminal. They will not match because we have changed the mnemonic.
|
|
201
|
+
|
|
202
|
+
**NOTE:📌** Pay attention to the **".env"** file and **hardhat.config.js** changes you made in this lab. It will be used in future labs.
|
|
203
|
+
|
|
204
|
+
---
|
|
205
|
+
|
|
206
|
+
## 2. Derivation Paths
|
|
207
|
+
|
|
208
|
+
You may have noticed that Hardhat Local Node generates 20 accounts by default. And each time you run the following commands:
|
|
209
|
+
|
|
210
|
+
```js
|
|
211
|
+
const accounts = await ethers.getSigners();
|
|
212
|
+
accounts[0].address
|
|
213
|
+
accounts[1].address
|
|
214
|
+
accounts[2].address
|
|
215
|
+
...
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
You get the same 20 addresses.
|
|
219
|
+
|
|
220
|
+
That is because unlike a single private key wallet, HD wallets can generate multiple addresses from the same mnemonic using a concept called derivation paths.
|
|
221
|
+
|
|
222
|
+
Addresses are generated using derivation paths like `m/44'/60'/0'/0` where:
|
|
223
|
+
|
|
224
|
+
- `m`: Master key
|
|
225
|
+
- `44'`: Purpose (HD wallets)
|
|
226
|
+
- `60'`: Coin type (Ethereum)
|
|
227
|
+
- `0'`: Account index
|
|
228
|
+
- `0`: Change index (external addresses)
|
|
229
|
+
|
|
230
|
+
By changing the last segment of the derivation path, we can generate different addresses from the same mnemonic.
|
|
231
|
+
|
|
232
|
+
For example, the first three addresses are derived using the following paths:
|
|
233
|
+
|
|
234
|
+
- First address: `m/44'/60'/0'/0/0`
|
|
235
|
+
- Second address: `m/44'/60'/0'/0/1`
|
|
236
|
+
- Third address: `m/44'/60'/0'/0/2`
|
|
237
|
+
|
|
238
|
+
Notice how only the last segment changes to generate different addresses.
|
|
239
|
+
|
|
240
|
+
---
|
|
241
|
+
|
|
242
|
+
## 🛠️ Lab Practice: Using Derivation Paths
|
|
243
|
+
|
|
244
|
+
- **Start Hardhat Console**
|
|
245
|
+
|
|
246
|
+
```bash
|
|
247
|
+
hh console
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
- **Generate New Mnemonic Phrase**
|
|
251
|
+
|
|
252
|
+
Generate a new mnemonic phrase and save it into a variable:
|
|
253
|
+
|
|
254
|
+
```js
|
|
255
|
+
> const { ethers } = require("ethers");
|
|
256
|
+
> mnemonic = ethers.Wallet.createRandom().mnemonic.phrase;
|
|
257
|
+
|
|
258
|
+
// Sample Output:
|
|
259
|
+
// 'hill drive sure whip bargain horn raven sunny claw example merit income'
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
- **Generate Accounts**
|
|
263
|
+
|
|
264
|
+
Use the generated mnemonic to derive the first three Ethereum accounts using the standard derivation path `m/44'/60'/0'/0/n` where `n` is the account index (0, 1, 2).
|
|
265
|
+
|
|
266
|
+
```js
|
|
267
|
+
|
|
268
|
+
// Derive the first account (Ethereum derivation path: m/44'/60'/0'/0/0)
|
|
269
|
+
|
|
270
|
+
> const wallet0 = ethers.HDNodeWallet.fromPhrase(
|
|
271
|
+
mnemonic,
|
|
272
|
+
null,
|
|
273
|
+
"m/44'/60'/0'/0/0"
|
|
274
|
+
);
|
|
275
|
+
> wallet0.address
|
|
276
|
+
|
|
277
|
+
// Sample Output:
|
|
278
|
+
// '0x18b2Ba693Fc01A6e7e6031e5a31936AC8ED8Aef5'
|
|
279
|
+
|
|
280
|
+
// ------------------------------------------------------------------
|
|
281
|
+
|
|
282
|
+
// Derive the second account (m/44'/60'/0'/0/1)
|
|
283
|
+
|
|
284
|
+
> const wallet1 = ethers.HDNodeWallet.fromPhrase(
|
|
285
|
+
mnemonic,
|
|
286
|
+
null,
|
|
287
|
+
"m/44'/60'/0'/0/1"
|
|
288
|
+
);
|
|
289
|
+
> wallet1.address
|
|
290
|
+
|
|
291
|
+
// Sample Output:
|
|
292
|
+
// '0x1B1256AD2F06d73F44C211660124c3d1ad706369'
|
|
293
|
+
|
|
294
|
+
// ------------------------------------------------------------------
|
|
295
|
+
|
|
296
|
+
// Derive the third account (m/44'/60'/0'/0/2)
|
|
297
|
+
|
|
298
|
+
> const wallet2 = ethers.HDNodeWallet.fromPhrase(
|
|
299
|
+
mnemonic,
|
|
300
|
+
null,
|
|
301
|
+
"m/44'/60'/0'/0/2"
|
|
302
|
+
);
|
|
303
|
+
> wallet2.address
|
|
304
|
+
|
|
305
|
+
// Sample Output:
|
|
306
|
+
//'0x56EDa570299e4e28B8dA016E1eFABc2FB8872A4f'
|
|
307
|
+
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
Record the generated mnemonic and the first three account addresses.
|
|
311
|
+
|
|
312
|
+
You can see that by changing the last segment of the derivation path, we can generate different addresses from the same mnemonic.
|
|
313
|
+
|
|
314
|
+
- **Task Completed ✅**
|
package/test/test.sh
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -e
|
|
3
|
+
|
|
4
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
5
|
+
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
|
6
|
+
INPUT="$SCRIPT_DIR/hdwallet.md"
|
|
7
|
+
OUTPUT="$SCRIPT_DIR/hdwallet.docx"
|
|
8
|
+
|
|
9
|
+
# Install dependencies if needed
|
|
10
|
+
if [ ! -d "$PROJECT_DIR/node_modules" ]; then
|
|
11
|
+
echo "Installing dependencies..."
|
|
12
|
+
(cd "$PROJECT_DIR" && npm install)
|
|
13
|
+
fi
|
|
14
|
+
|
|
15
|
+
# Remove any previous output
|
|
16
|
+
rm -f "$OUTPUT"
|
|
17
|
+
|
|
18
|
+
# Run the conversion
|
|
19
|
+
echo "Running: node src/index.js test/hdwallet.md test/hdwallet.docx"
|
|
20
|
+
node "$PROJECT_DIR/src/index.js" "$INPUT" "$OUTPUT"
|
|
21
|
+
|
|
22
|
+
# Verify the output file was created
|
|
23
|
+
if [ -f "$OUTPUT" ]; then
|
|
24
|
+
SIZE=$(wc -c < "$OUTPUT")
|
|
25
|
+
echo "OK: $OUTPUT created ($SIZE bytes)"
|
|
26
|
+
else
|
|
27
|
+
echo "FAIL: $OUTPUT was not created"
|
|
28
|
+
exit 1
|
|
29
|
+
fi
|