ai-dockpack 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Kuan-Peng
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,14 @@
1
+ Metadata-Version: 2.4
2
+ Name: ai-dockpack
3
+ Version: 0.1.0
4
+ Summary: AI-readable doc pack generator (CLI)
5
+ Requires-Python: >=3.9
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: typer>=0.12
9
+ Requires-Dist: rich>=13
10
+ Requires-Dist: markdown-it-py>=3.0.0
11
+ Dynamic: license-file
12
+
13
+ # ai-dockpack
14
+ AI-readable doc pack generator (CLI)
@@ -0,0 +1,2 @@
1
+ # ai-dockpack
2
+ AI-readable doc pack generator (CLI)
@@ -0,0 +1,20 @@
1
+ [build-system]
2
+ requires = ["setuptools>=68", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "ai-dockpack"
7
+ version = "0.1.0"
8
+ description = "AI-readable doc pack generator (CLI)"
9
+ readme = "README.md"
10
+ requires-python = ">=3.9"
11
+ dependencies = ["typer>=0.12", "rich>=13","markdown-it-py>=3.0.0"]
12
+
13
+ [project.scripts]
14
+ ai-dockpack = "ai_dockpack.cli:app"
15
+
16
+ [tool.setuptools]
17
+ package-dir = {"" = "src"}
18
+
19
+ [tool.setuptools.packages.find]
20
+ where = ["src"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
File without changes
@@ -0,0 +1,423 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import hashlib
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+ from typing import List, Dict, Any, Optional
8
+ import zipfile
9
+ import re
10
+ from importlib import metadata as importlib_metadata
11
+ from markdown_it import MarkdownIt
12
+
13
+
14
+ @dataclass
15
+ class DocFile:
16
+ relpath: str
17
+ abspath: Path
18
+ text: str
19
+ links: List[str]
20
+
21
+
22
+ def _read_text(path: Path) -> str:
23
+ return path.read_text(encoding="utf-8", errors="ignore")
24
+
25
+
26
+ def _sha256_text(text: str) -> str:
27
+ return hashlib.sha256(text.encode("utf-8", errors="ignore")).hexdigest()
28
+
29
+
30
+ def _get_attr(token, key: str) -> Optional[str]:
31
+ """兼容 markdown-it-py 的 attrs 结构:可能是 dict,也可能是 [(k,v), ...]"""
32
+ attrs = getattr(token, "attrs", None)
33
+ if not attrs:
34
+ return None
35
+
36
+ if isinstance(attrs, dict):
37
+ v = attrs.get(key)
38
+ return v if isinstance(v, str) and v else None
39
+
40
+ # 兼容 list/tuple of pairs
41
+ try:
42
+ for item in attrs:
43
+ if isinstance(item, (list, tuple)) and len(item) >= 2:
44
+ k, v = item[0], item[1]
45
+ if k == key and isinstance(v, str) and v:
46
+ return v
47
+ except TypeError:
48
+ return None
49
+
50
+ return None
51
+
52
+
53
+ def _extract_links_md(text: str) -> List[str]:
54
+ """从 Markdown 里抽取链接(MVP:抽出 href,不做判断)"""
55
+ md = MarkdownIt()
56
+ tokens = md.parse(text)
57
+ links: List[str] = []
58
+
59
+ def walk(ts):
60
+ for t in ts:
61
+ if t.type == "link_open":
62
+ href = _get_attr(t, "href")
63
+ if href:
64
+ links.append(href)
65
+
66
+ # 关键:Markdown 链接常在 inline token 的 children 里
67
+ children = getattr(t, "children", None)
68
+ if children:
69
+ walk(children)
70
+
71
+ walk(tokens)
72
+ return links
73
+
74
+
75
+ def _collect_doc_files(repo: Path) -> List[Path]:
76
+ """MVP:只收 README*.md + docs/**/*.md"""
77
+ files: List[Path] = []
78
+
79
+ for name in ["README.md", "README.MD", "readme.md", "Readme.md"]:
80
+ p = repo / name
81
+ if p.exists() and p.is_file():
82
+ files.append(p)
83
+
84
+ docs_dir = repo / "docs"
85
+ if docs_dir.exists() and docs_dir.is_dir():
86
+ files.extend(sorted(docs_dir.rglob("*.md")))
87
+
88
+ uniq: List[Path] = []
89
+ seen = set()
90
+ for f in files:
91
+ rp = str(f.resolve())
92
+ if rp not in seen:
93
+ uniq.append(f)
94
+ seen.add(rp)
95
+ return uniq
96
+
97
+
98
+ def _make_tree_text(relpaths: List[str]) -> str:
99
+ tree: Dict[str, Any] = {}
100
+
101
+ def add_path(path: str):
102
+ parts = path.split("/")
103
+ node = tree
104
+ for part in parts[:-1]:
105
+ node = node.setdefault(part, {})
106
+ node.setdefault("__files__", []).append(parts[-1])
107
+
108
+ def render(node: Dict[str, Any], indent: int = 0) -> List[str]:
109
+ lines: List[str] = []
110
+ dirs = sorted([k for k in node.keys() if k != "__files__"])
111
+ files = sorted(node.get("__files__", []))
112
+ for d in dirs:
113
+ lines.append(" " * indent + f"- {d}/")
114
+ lines.extend(render(node[d], indent + 1))
115
+ for f in files:
116
+ lines.append(" " * indent + f"- {f}")
117
+ return lines
118
+
119
+ for rp in relpaths:
120
+ add_path(rp)
121
+
122
+ lines = render(tree, 0)
123
+ return ("\n".join(lines) + "\n") if lines else ""
124
+
125
+ def _anchors_in_markdown(text: str) -> set[str]:
126
+ """非常粗糙的锚点提取:把 '# Title' 变成 'title' 这种 slug(够 MVP 用)"""
127
+ anchors: set[str] = set()
128
+ for line in text.splitlines():
129
+ s = line.strip()
130
+ if s.startswith("#"):
131
+ # 去掉前面的 ### 和空格
132
+ title = s.lstrip("#").strip()
133
+ if not title:
134
+ continue
135
+ slug = (
136
+ title.lower()
137
+ .replace(" ", "-")
138
+ .replace("/", "-")
139
+ )
140
+ anchors.add(slug)
141
+ return anchors
142
+
143
+ def _resolve_relative_link(doc_rel: str, href: str) -> str:
144
+ """
145
+ 把 './x.md' 这种相对链接解析成 repo 内的相对路径(POSIX 风格)
146
+ doc_rel: 当前文档相对路径,如 'docs/setup.md'
147
+ """
148
+ base_dir = str(Path(doc_rel).parent).replace("\\", "/")
149
+ # base_dir 可能是 '.',处理成空
150
+ if base_dir == ".":
151
+ base_dir = ""
152
+ joined = (Path(base_dir) / href).as_posix()
153
+ # 规范化 ./ ../
154
+ norm = Path(joined).as_posix()
155
+ return norm
156
+
157
+ def _health_check(repo: Path, docfiles: List[DocFile], links_map: Dict[str, List[str]]) -> Dict[str, Any]:
158
+ """MVP:只检查两类:缺文件、缺锚点(只处理 md 内部链接)"""
159
+ # 方便查文件是否存在
160
+ existing_paths = {d.relpath for d in docfiles}
161
+ # 方便查某个文档有哪些 anchors
162
+ anchors_map = {d.relpath: _anchors_in_markdown(d.text) for d in docfiles}
163
+
164
+ issues: List[Dict[str, Any]] = []
165
+
166
+ for doc_rel, hrefs in links_map.items():
167
+ for href in hrefs:
168
+ if href.startswith("#"):
169
+ # 锚点检查
170
+ anchor = href[1:]
171
+ if anchor and anchor not in anchors_map.get(doc_rel, set()):
172
+ issues.append(
173
+ {"type": "missing_anchor", "doc": doc_rel, "target": href}
174
+ )
175
+ continue
176
+ # 只处理 repo 内相对 md 链接(MVP:以 .md 结尾或以 ./ ../ 开头)
177
+ if href.startswith("http://") or href.startswith("https://"):
178
+ continue
179
+ if not (href.endswith(".md") or href.startswith("./") or href.startswith("../")):
180
+ continue
181
+
182
+ resolved = _resolve_relative_link(doc_rel, href)
183
+ # 统一成 posix
184
+ resolved = resolved.replace("\\", "/")
185
+
186
+ if resolved not in existing_paths:
187
+ issues.append(
188
+ {
189
+ "type": "missing_file",
190
+ "doc": doc_rel,
191
+ "target": href,
192
+ "resolved_path": resolved,
193
+ }
194
+ )
195
+
196
+ return {"schema": "ai-dockpack/health@0.1", "issues": issues}
197
+
198
+ def _read_tool_version() -> str:
199
+ """
200
+ 读取“工具自身”的版本(ai-dockpack 工具项目版本)
201
+ 优先:工具项目根目录的 pyproject.toml
202
+ 兜底:importlib.metadata 读取已安装包版本
203
+ """
204
+ tool_root = Path(__file__).resolve().parents[2] # .../ai-dockpack/
205
+ pyproject = tool_root / "pyproject.toml"
206
+ if pyproject.exists():
207
+ text = pyproject.read_text(encoding="utf-8", errors="ignore")
208
+ m = re.search(r'^\s*version\s*=\s*"([^"]+)"\s*$', text, flags=re.M)
209
+ if m:
210
+ return m.group(1).strip()
211
+
212
+ try:
213
+ return importlib_metadata.version("ai-dockpack")
214
+ except Exception:
215
+ return ""
216
+
217
+ def _parse_semver(v: str):
218
+ """把 '1.2.3' -> (1,2,3),解析失败返回 None(MVP)"""
219
+ m = re.match(r"^\s*(\d+)\.(\d+)\.(\d+)\s*$", v)
220
+ if not m:
221
+ return None
222
+ return tuple(int(x) for x in m.groups())
223
+
224
+ def _freshness_check(repo: Path, docfiles: List[DocFile]) -> Dict[str, Any]:
225
+ """
226
+ MVP:只做一种规则
227
+ - 在文档中找 `ai-dockpack==X.Y.Z`
228
+ - 若 X.Y.Z < 当前项目版本(repo/pyproject.toml 的 version),报告 outdated_pinned_version
229
+ """
230
+ current = _read_tool_version()
231
+ current_sem = _parse_semver(current) if current else None
232
+
233
+ issues: List[Dict[str, Any]] = []
234
+ if not current_sem:
235
+ return {"schema": "ai-dockpack/freshness@0.1", "current_version": current, "issues": issues}
236
+
237
+ pattern = re.compile(r"ai-dockpack==(\d+\.\d+\.\d+)")
238
+
239
+ for d in docfiles:
240
+ for m in pattern.finditer(d.text):
241
+ pinned = m.group(1)
242
+ pinned_sem = _parse_semver(pinned)
243
+ if pinned_sem and pinned_sem < current_sem:
244
+ issues.append(
245
+ {
246
+ "type": "outdated_pinned_version",
247
+ "doc": d.relpath,
248
+ "found": pinned,
249
+ "current": current,
250
+ }
251
+ )
252
+
253
+ return {"schema": "ai-dockpack/freshness@0.1", "current_version": current, "issues": issues}
254
+
255
+ def _slugify(title: str) -> str:
256
+ s = title.strip().lower()
257
+ s = s.replace("/", "-")
258
+ s = "-".join(s.split())
259
+ return s
260
+
261
+ def _split_markdown_by_headings(text: str) -> List[Dict[str, str]]:
262
+ """
263
+ 极简切段:
264
+ - 以 '#', '##', '###' 开头的行作为新段落起点
265
+ - 每段返回:section(title)、anchor(slug)、content
266
+ """
267
+ lines = text.splitlines()
268
+ chunks: List[Dict[str, str]] = []
269
+
270
+ current_title = "Document"
271
+ current_anchor = "document"
272
+ buf: List[str] = []
273
+
274
+ def flush():
275
+ nonlocal buf
276
+ content = "\n".join(buf).strip()
277
+ if content:
278
+ chunks.append(
279
+ {"section": current_title, "anchor": current_anchor, "content": content}
280
+ )
281
+ buf = []
282
+
283
+ for line in lines:
284
+ s = line.strip()
285
+ if s.startswith("#"):
286
+ # 标题行:先把上一段 flush
287
+ flush()
288
+ title = s.lstrip("#").strip()
289
+ current_title = title if title else "Untitled"
290
+ current_anchor = _slugify(current_title)
291
+ # 标题行本身也写进 chunk 内容(AI 读更自然)
292
+ buf.append(line)
293
+ else:
294
+ buf.append(line)
295
+
296
+ flush()
297
+ return chunks
298
+
299
+ def _render_report(manifest: Dict[str, Any], health: Dict[str, Any], freshness: Dict[str, Any]) -> str:
300
+ doc_count = manifest.get("doc_count", 0)
301
+ chunk_count = manifest.get("chunk_count", 0)
302
+
303
+ health_issues = health.get("issues", []) or []
304
+ fresh_issues = freshness.get("issues", []) or []
305
+
306
+ lines: List[str] = []
307
+ lines.append("# AI DocPack Report")
308
+ lines.append("")
309
+ lines.append("## Summary")
310
+ lines.append(f"- docs: **{doc_count}**")
311
+ lines.append(f"- chunks: **{chunk_count}**")
312
+ lines.append(f"- health issues: **{len(health_issues)}**")
313
+ lines.append(f"- freshness issues: **{len(fresh_issues)}**")
314
+ lines.append("")
315
+
316
+ lines.append("## Doc Health (broken / missing)")
317
+ if not health_issues:
318
+ lines.append("- ✅ no issues")
319
+ else:
320
+ for it in health_issues:
321
+ t = it.get("type", "")
322
+ doc = it.get("doc", "")
323
+ target = it.get("target", "")
324
+ if t == "missing_file":
325
+ rp = it.get("resolved_path", "")
326
+ lines.append(f"- ❌ **missing_file** in `{doc}` → `{target}` (resolved: `{rp}`)")
327
+ elif t == "missing_anchor":
328
+ lines.append(f"- ❌ **missing_anchor** in `{doc}` → `{target}`")
329
+ else:
330
+ lines.append(f"- ❌ **{t}** in `{doc}` → `{target}`")
331
+ lines.append("")
332
+
333
+ lines.append("## Freshness (possibly outdated)")
334
+ if not fresh_issues:
335
+ lines.append("- ✅ no issues")
336
+ else:
337
+ for it in fresh_issues:
338
+ t = it.get("type", "")
339
+ doc = it.get("doc", "")
340
+ found = it.get("found", "")
341
+ current = it.get("current", freshness.get("current_version", ""))
342
+ if t == "outdated_pinned_version":
343
+ lines.append(f"- ⚠️ **outdated_pinned_version** in `{doc}`: found `{found}`, current `{current}`")
344
+ else:
345
+ lines.append(f"- ⚠️ **{t}** in `{doc}`")
346
+ lines.append("")
347
+ return "\n".join(lines)
348
+
349
+ def build_docpack(repo_path: str, out_zip: str) -> Path:
350
+ repo = Path(repo_path).resolve()
351
+ if not repo.exists():
352
+ raise FileNotFoundError(f"repo not found: {repo}")
353
+
354
+ doc_paths = _collect_doc_files(repo)
355
+
356
+ docfiles: List[DocFile] = []
357
+ all_links: Dict[str, List[str]] = {}
358
+
359
+ for p in doc_paths:
360
+ rel = str(p.relative_to(repo)).replace("\\", "/")
361
+ text = _read_text(p)
362
+ links = _extract_links_md(text)
363
+
364
+ docfiles.append(DocFile(relpath=rel, abspath=p, text=text, links=links))
365
+ if links:
366
+ all_links[rel] = links
367
+ chunk_total = sum(len(_split_markdown_by_headings(d.text)) for d in docfiles)
368
+ tree_txt = _make_tree_text([d.relpath for d in docfiles])
369
+
370
+ manifest: Dict[str, Any] = {
371
+ "schema": "ai-dockpack/manifest@0.1",
372
+ "repo": str(repo),
373
+ "doc_count": len(docfiles),
374
+ "chunk_count": chunk_total,
375
+ "docs": [
376
+ {
377
+ "path": d.relpath,
378
+ "bytes": len(d.text.encode("utf-8", errors="ignore")),
379
+ "sha256": _sha256_text(d.text),
380
+ "mtime": int(d.abspath.stat().st_mtime),
381
+ "links_count": len(d.links),
382
+ }
383
+ for d in docfiles
384
+ ],
385
+ "generated_at": __import__("datetime").datetime.utcnow().isoformat() + "Z",
386
+ }
387
+
388
+ out = Path(out_zip).resolve()
389
+
390
+ with zipfile.ZipFile(out, "w", compression=zipfile.ZIP_DEFLATED) as z:
391
+ z.writestr("manifest.json", json.dumps(manifest, ensure_ascii=False, indent=2))
392
+ z.writestr("tree.txt", tree_txt)
393
+ z.writestr("links.json", json.dumps(all_links, ensure_ascii=False, indent=2))
394
+ health = _health_check(repo, docfiles, all_links)
395
+ z.writestr("health.json", json.dumps(health, ensure_ascii=False, indent=2))
396
+ freshness = _freshness_check(repo, docfiles)
397
+ z.writestr("freshness.json", json.dumps(freshness, ensure_ascii=False, indent=2))
398
+ report_md = _render_report(manifest, health, freshness)
399
+ z.writestr("report.md", report_md)
400
+
401
+ chunk_total = 0
402
+ for d in docfiles:
403
+ parts = _split_markdown_by_headings(d.text)
404
+ for idx, part in enumerate(parts, start=1):
405
+
406
+
407
+ chunk_name = f"{d.relpath.replace('/', '__')}__{idx:03d}.txt"
408
+ chunk_text = part["content"]
409
+ chunk_sha = _sha256_text(chunk_text)
410
+
411
+ header = (
412
+ "---\n"
413
+ f"source_path: {d.relpath}\n"
414
+ f"section: {part['section']}\n"
415
+ f"anchor: {part['anchor']}\n"
416
+ f"bytes: {len(chunk_text.encode('utf-8', errors='ignore'))}\n"
417
+ f"sha256: {chunk_sha}\n"
418
+ "---\n\n"
419
+ )
420
+
421
+ z.writestr(f"chunks/{chunk_name}", header + chunk_text + "\n")
422
+
423
+ return out
@@ -0,0 +1,35 @@
1
+ import typer
2
+ from rich import print
3
+
4
+ app = typer.Typer(help="ai-dockpack: AI-readable doc pack generator (CLI)")
5
+
6
+ @app.command()
7
+ def build(
8
+ repo: str = typer.Option(".", help="要扫描的仓库路径(默认当前目录)"),
9
+ out: str = typer.Option("docpack.zip", help="输出压缩包文件名(先占位)"),
10
+ ):
11
+ """生成 docpack.zip(Step1 先把命令跑通,后续再实现真正打包逻辑)"""
12
+ from .build_impl import build_docpack
13
+ out_path = build_docpack(repo, out)
14
+ print(f"[green]OK[/green] 已生成:{out_path}")
15
+
16
+
17
+ @app.command()
18
+ def about():
19
+ """显示工具信息(用于强制进入“多命令模式”)"""
20
+ print("[cyan]ai-dockpack[/cyan] - AI-readable doc pack generator (CLI)")
21
+
22
+ if __name__ == "__main__":
23
+ app()
24
+
25
+ @app.command("init")
26
+ def init_cmd(
27
+ repo: str = typer.Option(".", help="要写入 workflow 的仓库路径(默认当前目录)"),
28
+ release: bool = typer.Option(True, "--release/--no-release", help="是否发布到 GitHub Releases"),
29
+ force: bool = typer.Option(False, "--force", help="覆盖已存在的 docpack.yml"),
30
+ ):
31
+ """一键生成 .github/workflows/docpack.yml(让仓库自动产出 docpack.zip)"""
32
+ from .init_impl import init_workflow
33
+
34
+ wf = init_workflow(repo, publish_release=release, force=force)
35
+ print(f"[green]OK[/green] 已生成 workflow:{wf}")
@@ -0,0 +1,84 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from textwrap import dedent,indent
5
+
6
+
7
+ def init_workflow(repo_path: str, *, publish_release: bool = True, force: bool = False) -> Path:
8
+ """
9
+ 在目标仓库生成 .github/workflows/docpack.yml
10
+ - publish_release=True: 生成 Release(Assets 里可直接下载 docpack.zip)
11
+ - publish_release=False: 只上传 Actions Artifact
12
+ """
13
+ repo = Path(repo_path).resolve()
14
+ if not repo.exists():
15
+ raise FileNotFoundError(f"repo not found: {repo}")
16
+
17
+ wf_dir = repo / ".github" / "workflows"
18
+ wf_dir.mkdir(parents=True, exist_ok=True)
19
+
20
+ wf_file = wf_dir / "docpack.yml"
21
+ if wf_file.exists() and not force:
22
+ raise FileExistsError(f"{wf_file} already exists (use --force to overwrite)")
23
+
24
+ # permissions(只有发布 release 时才需要)
25
+ permissions_block = ""
26
+ if publish_release:
27
+ permissions_block = "permissions:\n contents: write\n\n"
28
+
29
+ # Release step:先写成“顶格”,再统一缩进到 steps 的层级(6 个空格)
30
+ release_step = ""
31
+ if publish_release:
32
+ release_step = indent(
33
+ dedent(
34
+ """\
35
+ - name: Publish Release (docpack.zip)
36
+ uses: softprops/action-gh-release@v2
37
+ with:
38
+ tag_name: docpack-${{ github.run_number }}
39
+ name: DocPack ${{ github.run_number }}
40
+ prerelease: true
41
+ files: docpack.zip
42
+ env:
43
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
44
+ """
45
+ ).rstrip(),
46
+ " ",
47
+ )
48
+
49
+ yml = (
50
+ "name: Build DocPack\n\n"
51
+ "on:\n"
52
+ " push:\n"
53
+ ' branches: ["main"]\n'
54
+ " workflow_dispatch:\n\n"
55
+ f"{permissions_block}"
56
+ "jobs:\n"
57
+ " build-docpack:\n"
58
+ " runs-on: ubuntu-latest\n"
59
+ " steps:\n"
60
+ " - name: Checkout\n"
61
+ " uses: actions/checkout@v4\n\n"
62
+ " - name: Setup Python\n"
63
+ " uses: actions/setup-python@v5\n"
64
+ " with:\n"
65
+ ' python-version: "3.11"\n\n'
66
+ " - name: Install ai-dockpack (from GitHub)\n"
67
+ " run: |\n"
68
+ " python -m pip install --upgrade pip\n"
69
+ ' pip install --no-cache-dir --force-reinstall "git+https://github.com/Kuan-Peng/ai-dockpack.git@main"\n\n'
70
+ " - name: Build docpack.zip\n"
71
+ " run: |\n"
72
+ " ai-dockpack build --repo . --out docpack.zip\n\n"
73
+ " - name: Upload artifact\n"
74
+ " uses: actions/upload-artifact@v4\n"
75
+ " with:\n"
76
+ " name: docpack\n"
77
+ " path: docpack.zip\n"
78
+ )
79
+
80
+ if publish_release:
81
+ yml = yml + "\n" + release_step + "\n"
82
+
83
+ wf_file.write_text(yml, encoding="utf-8")
84
+ return wf_file
@@ -0,0 +1,14 @@
1
+ Metadata-Version: 2.4
2
+ Name: ai-dockpack
3
+ Version: 0.1.0
4
+ Summary: AI-readable doc pack generator (CLI)
5
+ Requires-Python: >=3.9
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: typer>=0.12
9
+ Requires-Dist: rich>=13
10
+ Requires-Dist: markdown-it-py>=3.0.0
11
+ Dynamic: license-file
12
+
13
+ # ai-dockpack
14
+ AI-readable doc pack generator (CLI)
@@ -0,0 +1,13 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ src/ai_dockpack/__init__.py
5
+ src/ai_dockpack/build_impl.py
6
+ src/ai_dockpack/cli.py
7
+ src/ai_dockpack/init_impl.py
8
+ src/ai_dockpack.egg-info/PKG-INFO
9
+ src/ai_dockpack.egg-info/SOURCES.txt
10
+ src/ai_dockpack.egg-info/dependency_links.txt
11
+ src/ai_dockpack.egg-info/entry_points.txt
12
+ src/ai_dockpack.egg-info/requires.txt
13
+ src/ai_dockpack.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ ai-dockpack = ai_dockpack.cli:app
@@ -0,0 +1,3 @@
1
+ typer>=0.12
2
+ rich>=13
3
+ markdown-it-py>=3.0.0
@@ -0,0 +1 @@
1
+ ai_dockpack