medium2md-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- medium2md/__init__.py +0 -0
- medium2md/cli.py +102 -0
- medium2md/main.py +6 -0
- medium2md/pipeline.py +229 -0
- medium2md_cli-0.1.0.dist-info/METADATA +237 -0
- medium2md_cli-0.1.0.dist-info/RECORD +9 -0
- medium2md_cli-0.1.0.dist-info/WHEEL +4 -0
- medium2md_cli-0.1.0.dist-info/entry_points.txt +2 -0
- medium2md_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
medium2md/__init__.py
ADDED
|
File without changes
|
medium2md/cli.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import zipfile
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
import tempfile
|
|
4
|
+
import typer
|
|
5
|
+
|
|
6
|
+
from medium2md.pipeline import (
|
|
7
|
+
find_post_html_files,
|
|
8
|
+
get_title_canonical,
|
|
9
|
+
convert_html_file,
|
|
10
|
+
slug_from_post,
|
|
11
|
+
write_bundle,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
app = typer.Typer(help="Convert a Medium export ZIP into Hugo page bundles.")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@app.command()
|
|
18
|
+
def convert(
|
|
19
|
+
export_zip: Path = typer.Argument(..., exists=True, file_okay=True, dir_okay=False),
|
|
20
|
+
out: Path = typer.Option(Path("content/posts"), "--out", "-o"),
|
|
21
|
+
):
|
|
22
|
+
out = out.resolve()
|
|
23
|
+
if not out.exists():
|
|
24
|
+
if not typer.confirm(
|
|
25
|
+
f"Output directory does not exist: {out}\nCreate it?",
|
|
26
|
+
default=True,
|
|
27
|
+
):
|
|
28
|
+
raise typer.Exit(1)
|
|
29
|
+
out.mkdir(parents=True, exist_ok=True)
|
|
30
|
+
typer.echo(f"Created {out}")
|
|
31
|
+
elif not out.is_dir():
|
|
32
|
+
typer.echo(f"Error: Output path is not a directory: {out}", err=True)
|
|
33
|
+
raise typer.Exit(1)
|
|
34
|
+
|
|
35
|
+
typer.echo(f"Export: {export_zip}")
|
|
36
|
+
typer.echo(f"Out: {out}")
|
|
37
|
+
|
|
38
|
+
with tempfile.TemporaryDirectory(prefix="medium2md_") as td:
|
|
39
|
+
tmp_dir = Path(td)
|
|
40
|
+
|
|
41
|
+
with zipfile.ZipFile(export_zip, "r") as z:
|
|
42
|
+
z.extractall(tmp_dir)
|
|
43
|
+
|
|
44
|
+
all_html = sorted(tmp_dir.rglob("*.html"))
|
|
45
|
+
post_files = find_post_html_files(tmp_dir)
|
|
46
|
+
|
|
47
|
+
typer.echo(f"Found {len(all_html)} HTML file(s) in export, {len(post_files)} post(s) to convert.")
|
|
48
|
+
|
|
49
|
+
if not post_files:
|
|
50
|
+
typer.echo(
|
|
51
|
+
typer.style(
|
|
52
|
+
"Warning: No post HTML files found. "
|
|
53
|
+
"Expected posts under a 'posts/' folder in the export, or excluded README/blocks/bookmarks/claps/highlights/interests.",
|
|
54
|
+
fg="yellow",
|
|
55
|
+
),
|
|
56
|
+
err=True,
|
|
57
|
+
)
|
|
58
|
+
raise typer.Exit(1)
|
|
59
|
+
|
|
60
|
+
used_slugs: set[str] = set()
|
|
61
|
+
written = 0
|
|
62
|
+
errors = 0
|
|
63
|
+
|
|
64
|
+
for i, html_path in enumerate(post_files, 1):
|
|
65
|
+
rel = html_path.relative_to(tmp_dir)
|
|
66
|
+
try:
|
|
67
|
+
title, canonical = get_title_canonical(html_path)
|
|
68
|
+
slug = slug_from_post(title, canonical)
|
|
69
|
+
base_slug = slug
|
|
70
|
+
while slug in used_slugs:
|
|
71
|
+
# Simple collision: append -2, -3, ...
|
|
72
|
+
suffix = 2 if slug == base_slug else int(slug.split("-")[-1]) + 1
|
|
73
|
+
slug = f"{base_slug}-{suffix}"
|
|
74
|
+
used_slugs.add(slug)
|
|
75
|
+
bundle_dir = out / slug
|
|
76
|
+
bundle_dir.mkdir(parents=True, exist_ok=True)
|
|
77
|
+
title, canonical, body_md, num_images = convert_html_file(html_path, tmp_dir, bundle_dir)
|
|
78
|
+
write_bundle(out, slug, title, canonical, body_md)
|
|
79
|
+
written += 1
|
|
80
|
+
img_info = f" ({num_images} image(s))" if num_images else ""
|
|
81
|
+
typer.echo(f" [{i}/{len(post_files)}] {slug} → {out / slug / 'index.md'}{img_info}")
|
|
82
|
+
except Exception as e:
|
|
83
|
+
errors += 1
|
|
84
|
+
typer.echo(
|
|
85
|
+
typer.style(f" [{i}/{len(post_files)}] Failed {rel}: {e}", fg="red"),
|
|
86
|
+
err=True,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
typer.echo("")
|
|
90
|
+
if written:
|
|
91
|
+
typer.echo(typer.style(f"Done. {written} post(s) written to {out}", fg="green"))
|
|
92
|
+
if errors:
|
|
93
|
+
typer.echo(typer.style(f"Failed: {errors} post(s) could not be converted.", fg="red"), err=True)
|
|
94
|
+
if written == 0:
|
|
95
|
+
typer.echo(
|
|
96
|
+
typer.style(
|
|
97
|
+
"No posts were written. Check errors above or export structure (expected 'posts/' folder).",
|
|
98
|
+
fg="yellow",
|
|
99
|
+
),
|
|
100
|
+
err=True,
|
|
101
|
+
)
|
|
102
|
+
raise typer.Exit(1)
|
medium2md/main.py
ADDED
medium2md/pipeline.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
"""Minimal conversion pipeline: find post HTML, parse, convert to Markdown, write Hugo bundles."""
|
|
2
|
+
|
|
3
|
+
import shutil
|
|
4
|
+
import time
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from urllib.parse import urlparse
|
|
7
|
+
|
|
8
|
+
import yaml
|
|
9
|
+
from bs4 import BeautifulSoup
|
|
10
|
+
from markdownify import markdownify as md
|
|
11
|
+
from slugify import slugify
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
import httpx
|
|
15
|
+
except ImportError:
|
|
16
|
+
httpx = None # type: ignore[assignment]
|
|
17
|
+
|
|
18
|
+
# Request like a browser so Medium's CDN serves images
|
|
19
|
+
IMAGE_REQUEST_HEADERS = {
|
|
20
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:109.0) Gecko/20100101 Firefox/119.0",
|
|
21
|
+
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*,*/*;q=0.8",
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# Non-post directories in Medium export (utility pages)
|
|
26
|
+
NON_POST_DIRS = {"blocks", "bookmarks", "claps", "highlights", "interests"}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def find_post_html_files(root: Path) -> list[Path]:
|
|
30
|
+
"""Return HTML files that are likely Medium posts.
|
|
31
|
+
|
|
32
|
+
Prefer files under root/posts/ if that directory exists (standard export layout).
|
|
33
|
+
Otherwise exclude known non-post directories and return the rest.
|
|
34
|
+
"""
|
|
35
|
+
posts_dir = root / "posts"
|
|
36
|
+
if posts_dir.is_dir():
|
|
37
|
+
return sorted(posts_dir.rglob("*.html"))
|
|
38
|
+
# No posts/ folder: exclude known utility dirs
|
|
39
|
+
all_html = sorted(p for p in root.rglob("*.html") if p.is_file())
|
|
40
|
+
return [
|
|
41
|
+
p
|
|
42
|
+
for p in all_html
|
|
43
|
+
if not any(part in p.parts for part in NON_POST_DIRS)
|
|
44
|
+
and p.name != "README.html"
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _extract_canonical(soup: BeautifulSoup) -> str | None:
|
|
49
|
+
link = soup.find("link", rel="canonical", href=True)
|
|
50
|
+
return link["href"].strip() if link else None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _extract_title(soup: BeautifulSoup) -> str:
|
|
54
|
+
h1 = soup.find("h1")
|
|
55
|
+
if h1:
|
|
56
|
+
return h1.get_text(strip=True)
|
|
57
|
+
title_el = soup.find("title")
|
|
58
|
+
if title_el:
|
|
59
|
+
return title_el.get_text(strip=True)
|
|
60
|
+
return "Untitled"
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _extract_article_html(soup: BeautifulSoup) -> str:
|
|
64
|
+
"""Extract main article content as HTML string."""
|
|
65
|
+
article = soup.find("article")
|
|
66
|
+
if article:
|
|
67
|
+
return str(article)
|
|
68
|
+
# Fallback: first main or content-heavy section
|
|
69
|
+
main = soup.find("main")
|
|
70
|
+
if main:
|
|
71
|
+
return str(main)
|
|
72
|
+
# Last resort: body
|
|
73
|
+
body = soup.find("body")
|
|
74
|
+
return str(body) if body else ""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def get_title_canonical(html_path: Path) -> tuple[str, str | None]:
|
|
78
|
+
"""Light parse to get title and canonical URL only (for slug + bundle dir)."""
|
|
79
|
+
raw = html_path.read_text(encoding="utf-8", errors="replace")
|
|
80
|
+
soup = BeautifulSoup(raw, "lxml")
|
|
81
|
+
return _extract_title(soup), _extract_canonical(soup)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
# Extension from URL path or Content-Type
|
|
85
|
+
_CT_EXT = {
|
|
86
|
+
"image/jpeg": ".jpg",
|
|
87
|
+
"image/jpg": ".jpg",
|
|
88
|
+
"image/png": ".png",
|
|
89
|
+
"image/gif": ".gif",
|
|
90
|
+
"image/webp": ".webp",
|
|
91
|
+
"image/svg+xml": ".svg",
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _extension_for_url(url: str, content_type: str | None = None) -> str:
|
|
96
|
+
if content_type and content_type.split(";")[0].strip().lower() in _CT_EXT:
|
|
97
|
+
return _CT_EXT[content_type.split(";")[0].strip().lower()]
|
|
98
|
+
path = urlparse(url).path
|
|
99
|
+
if path and "." in path:
|
|
100
|
+
ext = path.rsplit(".", 1)[-1].lower()
|
|
101
|
+
if ext in ("png", "jpg", "jpeg", "gif", "webp", "svg"):
|
|
102
|
+
return f".{ext}"
|
|
103
|
+
return ".png"
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _localize_images(
|
|
107
|
+
article_soup: BeautifulSoup,
|
|
108
|
+
html_path: Path,
|
|
109
|
+
tmp_dir: Path,
|
|
110
|
+
bundle_dir: Path,
|
|
111
|
+
) -> int:
|
|
112
|
+
"""In-place: resolve each img src to a local file or download, copy into bundle/images/, set src to images/<name>. Returns count of images localized."""
|
|
113
|
+
images_dir = bundle_dir / "images"
|
|
114
|
+
images_dir.mkdir(parents=True, exist_ok=True)
|
|
115
|
+
imgs = article_soup.find_all("img", src=True)
|
|
116
|
+
localized = 0
|
|
117
|
+
for i, img in enumerate(imgs, 1):
|
|
118
|
+
src = img["src"].strip()
|
|
119
|
+
if not src:
|
|
120
|
+
continue
|
|
121
|
+
# Relative or file path: resolve against the HTML file's directory
|
|
122
|
+
if not src.startswith(("http://", "https://")):
|
|
123
|
+
resolved = (html_path.parent / src).resolve()
|
|
124
|
+
try:
|
|
125
|
+
resolved.relative_to(tmp_dir)
|
|
126
|
+
except ValueError:
|
|
127
|
+
continue # outside export, skip
|
|
128
|
+
if not resolved.is_file():
|
|
129
|
+
continue
|
|
130
|
+
ext = resolved.suffix.lower() or ".png"
|
|
131
|
+
dest_name = f"{i}{ext}"
|
|
132
|
+
dest = images_dir / dest_name
|
|
133
|
+
shutil.copy2(resolved, dest)
|
|
134
|
+
img["src"] = f"images/{dest_name}"
|
|
135
|
+
localized += 1
|
|
136
|
+
continue
|
|
137
|
+
# Remote URL: download (with User-Agent and retry so CDNs don't block)
|
|
138
|
+
if not httpx:
|
|
139
|
+
continue
|
|
140
|
+
last_error: Exception | None = None
|
|
141
|
+
for attempt in range(2):
|
|
142
|
+
try:
|
|
143
|
+
r = httpx.get(
|
|
144
|
+
src,
|
|
145
|
+
follow_redirects=True,
|
|
146
|
+
timeout=45,
|
|
147
|
+
headers=IMAGE_REQUEST_HEADERS,
|
|
148
|
+
)
|
|
149
|
+
r.raise_for_status()
|
|
150
|
+
if len(r.content) == 0:
|
|
151
|
+
raise ValueError("empty response")
|
|
152
|
+
ct = r.headers.get("content-type", "")
|
|
153
|
+
ext = _extension_for_url(src, ct)
|
|
154
|
+
dest_name = f"{i}{ext}"
|
|
155
|
+
dest = images_dir / dest_name
|
|
156
|
+
dest.write_bytes(r.content)
|
|
157
|
+
img["src"] = f"images/{dest_name}"
|
|
158
|
+
localized += 1
|
|
159
|
+
last_error = None
|
|
160
|
+
break
|
|
161
|
+
except Exception as e:
|
|
162
|
+
last_error = e
|
|
163
|
+
if attempt < 1:
|
|
164
|
+
time.sleep(0.5 + attempt)
|
|
165
|
+
if last_error is not None:
|
|
166
|
+
# Leave src unchanged so the MD still has the URL; user can fix manually
|
|
167
|
+
pass
|
|
168
|
+
return localized
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def convert_html_file(
|
|
172
|
+
html_path: Path,
|
|
173
|
+
tmp_dir: Path,
|
|
174
|
+
bundle_dir: Path,
|
|
175
|
+
) -> tuple[str, str | None, str, int]:
|
|
176
|
+
"""Parse one post HTML file, localize images into bundle_dir/images/, return (title, canonical_url, markdown_body, num_images_localized)."""
|
|
177
|
+
raw = html_path.read_text(encoding="utf-8", errors="replace")
|
|
178
|
+
soup = BeautifulSoup(raw, "lxml")
|
|
179
|
+
title = _extract_title(soup)
|
|
180
|
+
canonical = _extract_canonical(soup)
|
|
181
|
+
article_html = _extract_article_html(soup)
|
|
182
|
+
if not article_html:
|
|
183
|
+
body_md = ""
|
|
184
|
+
localized = 0
|
|
185
|
+
else:
|
|
186
|
+
article_soup = BeautifulSoup(article_html, "lxml")
|
|
187
|
+
localized = _localize_images(article_soup, html_path, tmp_dir, bundle_dir)
|
|
188
|
+
body_md = md(
|
|
189
|
+
str(article_soup),
|
|
190
|
+
heading_style="ATX",
|
|
191
|
+
strip=["script", "style"],
|
|
192
|
+
escape_asterisks=False,
|
|
193
|
+
escape_underscores=False,
|
|
194
|
+
)
|
|
195
|
+
return title, canonical, (body_md or "").strip(), localized
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def slug_from_post(title: str, canonical: str | None) -> str:
|
|
199
|
+
"""Generate a Hugo-friendly slug."""
|
|
200
|
+
if canonical and "/" in canonical:
|
|
201
|
+
# e.g. https://medium.com/@user/some-post-slug-123
|
|
202
|
+
part = canonical.rstrip("/").split("/")[-1]
|
|
203
|
+
if part and part != "medium.com":
|
|
204
|
+
s = slugify(part, max_length=80)
|
|
205
|
+
if s:
|
|
206
|
+
return s
|
|
207
|
+
return slugify(title, max_length=80) or "untitled"
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def write_bundle(out_root: Path, slug: str, title: str, canonical: str | None, body_md: str) -> Path:
|
|
211
|
+
"""Write one Hugo page bundle: out_root/<slug>/index.md. Returns path to index.md."""
|
|
212
|
+
bundle_dir = out_root / slug
|
|
213
|
+
bundle_dir.mkdir(parents=True, exist_ok=True)
|
|
214
|
+
front = {
|
|
215
|
+
"title": title,
|
|
216
|
+
"draft": True,
|
|
217
|
+
"slug": slug,
|
|
218
|
+
}
|
|
219
|
+
if canonical:
|
|
220
|
+
front["medium"] = {"canonical": canonical}
|
|
221
|
+
index_md = bundle_dir / "index.md"
|
|
222
|
+
with index_md.open("w", encoding="utf-8") as f:
|
|
223
|
+
f.write("---\n")
|
|
224
|
+
f.write(yaml.dump(front, default_flow_style=False, allow_unicode=True, sort_keys=False))
|
|
225
|
+
f.write("---\n\n")
|
|
226
|
+
f.write(body_md)
|
|
227
|
+
if body_md and not body_md.endswith("\n"):
|
|
228
|
+
f.write("\n")
|
|
229
|
+
return index_md
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: medium2md-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Convert a Medium export ZIP into Hugo-ready Markdown page bundles with localized images.
|
|
5
|
+
Project-URL: Homepage, https://github.com/edgarbc/medium2md
|
|
6
|
+
Project-URL: Repository, https://github.com/edgarbc/medium2md
|
|
7
|
+
Project-URL: Documentation, https://github.com/edgarbc/medium2md#readme
|
|
8
|
+
Author: Edgar Bermudez
|
|
9
|
+
License: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: export,hugo,markdown,medium,migration,static-site
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Environment :: Console
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
18
|
+
Classifier: Topic :: Text Processing :: Markup
|
|
19
|
+
Requires-Python: >=3.13
|
|
20
|
+
Requires-Dist: beautifulsoup4>=4.14.3
|
|
21
|
+
Requires-Dist: httpx>=0.28.1
|
|
22
|
+
Requires-Dist: lxml>=6.0.2
|
|
23
|
+
Requires-Dist: markdownify>=1.2.2
|
|
24
|
+
Requires-Dist: python-dateutil>=2.9.0.post0
|
|
25
|
+
Requires-Dist: python-slugify>=8.0.4
|
|
26
|
+
Requires-Dist: pyyaml>=6.0.3
|
|
27
|
+
Requires-Dist: rich>=14.3.3
|
|
28
|
+
Requires-Dist: typer>=0.24.1
|
|
29
|
+
Provides-Extra: dev
|
|
30
|
+
Requires-Dist: twine>=6.0; extra == 'dev'
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
# medium2md
|
|
34
|
+
|
|
35
|
+
[](https://pypi.org/project/medium2md-cli/)
|
|
36
|
+
[](https://pypi.org/project/medium2md-cli/)
|
|
37
|
+
[](https://opensource.org/licenses/MIT)
|
|
38
|
+
|
|
39
|
+
> Convert a Medium export ZIP into clean, Hugo-ready Markdown page bundles.
|
|
40
|
+
|
|
41
|
+
**medium2md** is a CLI tool that transforms Medium's HTML export into properly structured [Hugo](https://gohugo.io/) content using page bundles — enabling full ownership of your content and a clean, reproducible migration from Medium to Hugo.
|
|
42
|
+
|
|
43
|
+
---
|
|
44
|
+
|
|
45
|
+
## Table of Contents
|
|
46
|
+
|
|
47
|
+
- [Why This Exists](#why-this-exists)
|
|
48
|
+
- [Features](#features)
|
|
49
|
+
- [Installation](#installation)
|
|
50
|
+
- [Usage](#usage)
|
|
51
|
+
- [Output Structure](#output-structure)
|
|
52
|
+
- [Project Structure](#project-structure)
|
|
53
|
+
- [Development Roadmap](#development-roadmap)
|
|
54
|
+
- [Contributing](#contributing)
|
|
55
|
+
- [License](#license)
|
|
56
|
+
|
|
57
|
+
---
|
|
58
|
+
|
|
59
|
+
## Why This Exists
|
|
60
|
+
|
|
61
|
+
Medium allows you to export your account data as a ZIP archive, but the raw export:
|
|
62
|
+
|
|
63
|
+
- Contains unstructured HTML
|
|
64
|
+
- Includes inconsistent metadata
|
|
65
|
+
- References remote image URLs
|
|
66
|
+
|
|
67
|
+
**medium2md** solves this by providing:
|
|
68
|
+
|
|
69
|
+
| Feature | Description |
|
|
70
|
+
|---|---|
|
|
71
|
+
| HTML → Markdown | Converts Medium HTML posts to clean Markdown |
|
|
72
|
+
| Hugo front matter | Generates YAML front matter from post metadata |
|
|
73
|
+
| Image localization | Downloads remote images into each bundle; copies local images when present in the export |
|
|
74
|
+
| Canonical URL | Preserves the original Medium URL |
|
|
75
|
+
| Conversion reports | Summarizes what was converted and what was skipped |
|
|
76
|
+
| Incremental re-runs | *(planned)* Re-run only changed posts |
|
|
77
|
+
|
|
78
|
+
This tool is designed to be **deterministic**, **reproducible**, and **CI-friendly**.
|
|
79
|
+
|
|
80
|
+
---
|
|
81
|
+
|
|
82
|
+
## Features
|
|
83
|
+
|
|
84
|
+
### MVP (current)
|
|
85
|
+
|
|
86
|
+
- Convert Medium export ZIP (posts under `posts/` in the export)
|
|
87
|
+
- Extract title and canonical URL; generate slug
|
|
88
|
+
- Convert HTML to Markdown
|
|
89
|
+
- Create Hugo page bundles with `index.md` and optional `images/`
|
|
90
|
+
- Image localization: download remote images into the bundle; copy local images when present in the export
|
|
91
|
+
- Basic slug collision handling (`slug-2`, `slug-3`, …)
|
|
92
|
+
- Terminal progress and summary; per-post image count; prompt to create missing output dir
|
|
93
|
+
|
|
94
|
+
### Planned
|
|
95
|
+
|
|
96
|
+
- Extract date and optional metadata (tags, etc.) into front matter
|
|
97
|
+
- Incremental runs via state file
|
|
98
|
+
- Embed detection and shortcode conversion (YouTube, Twitter, Gist)
|
|
99
|
+
- Pandoc backend option
|
|
100
|
+
- Verification command
|
|
101
|
+
- Theme-specific front matter mapping
|
|
102
|
+
- Conversion report (e.g. JSON/file)
|
|
103
|
+
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
## Installation
|
|
107
|
+
|
|
108
|
+
This project uses [uv](https://github.com/astral-sh/uv) for dependency management.
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
git clone https://github.com/edgarbc/medium2md.git
|
|
112
|
+
cd medium2md
|
|
113
|
+
uv sync
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
Once published to PyPI, install with:
|
|
117
|
+
|
|
118
|
+
```bash
|
|
119
|
+
pip install medium2md-cli
|
|
120
|
+
# or with uv:
|
|
121
|
+
uv tool install medium2md-cli
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
The CLI command is still `medium2md`.
|
|
125
|
+
|
|
126
|
+
---
|
|
127
|
+
|
|
128
|
+
## Usage
|
|
129
|
+
|
|
130
|
+
Copy your Medium export ZIP into the `input/` directory (already set up and git-ignored):
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
cp ~/Downloads/medium-export.zip input/
|
|
134
|
+
uv run medium2md input/medium-export.zip --out ../blog/content/posts
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
> **Note:** The `input/` directory is tracked by git (via `.gitkeep`) so it exists after a fresh clone, but its contents are ignored — your ZIP files will never be accidentally committed.
|
|
138
|
+
|
|
139
|
+
### Front Matter Example
|
|
140
|
+
|
|
141
|
+
Each converted post produces an `index.md` with Hugo-compatible YAML front matter. Current output:
|
|
142
|
+
|
|
143
|
+
```yaml
|
|
144
|
+
---
|
|
145
|
+
title: "My Post Title"
|
|
146
|
+
draft: true
|
|
147
|
+
slug: "my-post-slug"
|
|
148
|
+
medium:
|
|
149
|
+
canonical: "https://medium.com/@you/post-slug"
|
|
150
|
+
---
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
Additional keys (e.g. `date`, `lastmod`, `tags`) are planned.
|
|
154
|
+
|
|
155
|
+
---
|
|
156
|
+
|
|
157
|
+
## Output Structure
|
|
158
|
+
|
|
159
|
+
Each Medium post becomes a Hugo page bundle. Image links in the Markdown point into the bundle’s `images/` folder (remote images are downloaded; local images from the export are copied):
|
|
160
|
+
|
|
161
|
+
```
|
|
162
|
+
content/posts/
|
|
163
|
+
└── my-post-slug/
|
|
164
|
+
├── index.md
|
|
165
|
+
└── images/
|
|
166
|
+
├── 1.png
|
|
167
|
+
├── 2.jpg
|
|
168
|
+
└── …
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
---
|
|
172
|
+
|
|
173
|
+
## Project Structure
|
|
174
|
+
|
|
175
|
+
```
|
|
176
|
+
medium2md/
|
|
177
|
+
├── medium2md/
|
|
178
|
+
│ ├── __init__.py
|
|
179
|
+
│ ├── cli.py
|
|
180
|
+
│ ├── pipeline.py
|
|
181
|
+
│ └── main.py
|
|
182
|
+
├── pyproject.toml
|
|
183
|
+
├── README.md
|
|
184
|
+
├── project-plan.md
|
|
185
|
+
└── input/
|
|
186
|
+
└── medium-export.zip
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
### Pipeline Architecture
|
|
190
|
+
|
|
191
|
+
medium2md follows a layered pipeline:
|
|
192
|
+
|
|
193
|
+
```
|
|
194
|
+
ZIP → extract → find posts → parse HTML → localize images (copy/download) → Markdown conversion → front matter + Hugo bundle write
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
> **Philosophy:** Correctness first, cleverness later.
|
|
198
|
+
|
|
199
|
+
---
|
|
200
|
+
|
|
201
|
+
## Development Roadmap
|
|
202
|
+
|
|
203
|
+
| Milestone | Focus | Status |
|
|
204
|
+
|---|---|---|
|
|
205
|
+
| 1 — MVP | ZIP ingestion, HTML→Markdown, Hugo bundle writing, image localization | ✅ Done |
|
|
206
|
+
| 2 — Robustness | Incremental state tracking, metadata fallback, verify command | 📋 Planned |
|
|
207
|
+
| 3 — Polish | Embed conversion, theme config mapping, Pandoc backend, internal link rewriting | 📋 Planned |
|
|
208
|
+
|
|
209
|
+
---
|
|
210
|
+
|
|
211
|
+
## Contributing
|
|
212
|
+
|
|
213
|
+
Contributions are welcome! To get started:
|
|
214
|
+
|
|
215
|
+
1. Fork the repository
|
|
216
|
+
2. Create a feature branch (`git checkout -b feat/my-feature`)
|
|
217
|
+
3. Make your changes
|
|
218
|
+
4. Open a pull request (run `uv run medium2md --help` to confirm the CLI works)
|
|
219
|
+
|
|
220
|
+
---
|
|
221
|
+
|
|
222
|
+
## Publishing to PyPI (maintainers)
|
|
223
|
+
|
|
224
|
+
1. Bump `version` in `pyproject.toml`.
|
|
225
|
+
2. Build: `uv build` (creates `dist/`).
|
|
226
|
+
3. Install dev deps and upload: `uv sync --extra dev` then `uv run twine upload dist/*` (requires a [PyPI API token](https://pypi.org/help/#apitoken); use `__token__` as username).
|
|
227
|
+
4. Optionally tag the release: `git tag v0.1.0 && git push --tags`.
|
|
228
|
+
|
|
229
|
+
## License
|
|
230
|
+
|
|
231
|
+
This project is licensed under the [MIT License](LICENSE).
|
|
232
|
+
|
|
233
|
+
---
|
|
234
|
+
|
|
235
|
+
> Built by [Edgar Bermudez](https://github.com/edgarbc) and [GitHub Copilot](https://github.com/features/copilot) with 💖 to enable long-term content ownership and reproducible publishing workflows.
|
|
236
|
+
>
|
|
237
|
+
> Not affiliated with Medium or any of its subsidiaries.
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
medium2md/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
medium2md/cli.py,sha256=B22AXFzgL7lSZv1NCMmyYQewe0gWkqeYpRyzV2aoDlQ,3711
|
|
3
|
+
medium2md/main.py,sha256=8Yg-1EqgELUgB_nTPV4TZYm5ZjFkdiIjfm0UlJluc7Y,87
|
|
4
|
+
medium2md/pipeline.py,sha256=qpVA9wJ3rlOxq1e_nUcEBmb04IAteLXJgBsp_SK_AAE,7875
|
|
5
|
+
medium2md_cli-0.1.0.dist-info/METADATA,sha256=PxztbcnfLwld8xQUCLnUP9FBBsZKVLKIZM97rD9L7QE,7215
|
|
6
|
+
medium2md_cli-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
|
|
7
|
+
medium2md_cli-0.1.0.dist-info/entry_points.txt,sha256=RZBS8UcYhjELVwc0XTwboCZIPMcI5uH8rYxTpQ8mswQ,48
|
|
8
|
+
medium2md_cli-0.1.0.dist-info/licenses/LICENSE,sha256=sIy7BqZo1LLk4ZVHX15j86tqhU4CihZHu-FKdyLWZyA,1071
|
|
9
|
+
medium2md_cli-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Edgar Bermudez
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|