agentpack-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. agentpack/__init__.py +3 -0
  2. agentpack/adapters/__init__.py +0 -0
  3. agentpack/adapters/base.py +22 -0
  4. agentpack/adapters/claude.py +32 -0
  5. agentpack/adapters/codex.py +26 -0
  6. agentpack/adapters/cursor.py +29 -0
  7. agentpack/adapters/generic.py +18 -0
  8. agentpack/adapters/windsurf.py +26 -0
  9. agentpack/analysis/__init__.py +0 -0
  10. agentpack/analysis/dependency_graph.py +80 -0
  11. agentpack/analysis/go_imports.py +32 -0
  12. agentpack/analysis/java_imports.py +19 -0
  13. agentpack/analysis/js_ts_imports.py +53 -0
  14. agentpack/analysis/python_imports.py +45 -0
  15. agentpack/analysis/ranking.py +400 -0
  16. agentpack/analysis/rust_imports.py +32 -0
  17. agentpack/analysis/symbols.py +154 -0
  18. agentpack/analysis/tests.py +30 -0
  19. agentpack/application/__init__.py +0 -0
  20. agentpack/application/pack_service.py +352 -0
  21. agentpack/cli.py +33 -0
  22. agentpack/commands/__init__.py +0 -0
  23. agentpack/commands/_shared.py +13 -0
  24. agentpack/commands/benchmark.py +302 -0
  25. agentpack/commands/claude_cmd.py +55 -0
  26. agentpack/commands/diff.py +46 -0
  27. agentpack/commands/doctor.py +185 -0
  28. agentpack/commands/explain.py +238 -0
  29. agentpack/commands/init.py +79 -0
  30. agentpack/commands/install.py +252 -0
  31. agentpack/commands/monitor.py +105 -0
  32. agentpack/commands/pack.py +188 -0
  33. agentpack/commands/scan.py +51 -0
  34. agentpack/commands/session.py +204 -0
  35. agentpack/commands/stats.py +138 -0
  36. agentpack/commands/status.py +37 -0
  37. agentpack/commands/summarize.py +64 -0
  38. agentpack/commands/watch.py +185 -0
  39. agentpack/core/__init__.py +0 -0
  40. agentpack/core/bootstrap.py +46 -0
  41. agentpack/core/cache.py +41 -0
  42. agentpack/core/config.py +101 -0
  43. agentpack/core/context_pack.py +222 -0
  44. agentpack/core/diff.py +40 -0
  45. agentpack/core/git.py +145 -0
  46. agentpack/core/git_hooks.py +8 -0
  47. agentpack/core/global_install.py +14 -0
  48. agentpack/core/ignore.py +66 -0
  49. agentpack/core/merkle.py +8 -0
  50. agentpack/core/models.py +115 -0
  51. agentpack/core/redactor.py +99 -0
  52. agentpack/core/scanner.py +150 -0
  53. agentpack/core/snapshot.py +60 -0
  54. agentpack/core/token_estimator.py +26 -0
  55. agentpack/core/vscode_tasks.py +5 -0
  56. agentpack/data/agentpack.md +160 -0
  57. agentpack/installers/__init__.py +0 -0
  58. agentpack/installers/claude.py +160 -0
  59. agentpack/installers/codex.py +54 -0
  60. agentpack/installers/cursor.py +76 -0
  61. agentpack/installers/windsurf.py +50 -0
  62. agentpack/integrations/__init__.py +0 -0
  63. agentpack/integrations/git_hooks.py +109 -0
  64. agentpack/integrations/global_install.py +221 -0
  65. agentpack/integrations/vscode_tasks.py +85 -0
  66. agentpack/renderers/__init__.py +3 -0
  67. agentpack/renderers/compact.py +75 -0
  68. agentpack/renderers/markdown.py +144 -0
  69. agentpack/renderers/receipts.py +10 -0
  70. agentpack/session/__init__.py +33 -0
  71. agentpack/session/state.py +105 -0
  72. agentpack/summaries/__init__.py +0 -0
  73. agentpack/summaries/base.py +42 -0
  74. agentpack/summaries/llm.py +100 -0
  75. agentpack/summaries/offline.py +97 -0
  76. agentpack_cli-0.1.0.dist-info/METADATA +1391 -0
  77. agentpack_cli-0.1.0.dist-info/RECORD +80 -0
  78. agentpack_cli-0.1.0.dist-info/WHEEL +4 -0
  79. agentpack_cli-0.1.0.dist-info/entry_points.txt +2 -0
  80. agentpack_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,41 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from pathlib import Path
5
+
6
+ from agentpack.core.models import FileSummary
7
+
8
+
9
+ def _cache_key(path: str, file_hash: str, provider: str, schema_version: int) -> str:
10
+ import hashlib
11
+ raw = f"{path}|{file_hash}|{provider}|{schema_version}"
12
+ return hashlib.sha256(raw.encode()).hexdigest()
13
+
14
+
15
+ def _cache_dir(root: Path) -> Path:
16
+ return root / ".agentpack" / "cache"
17
+
18
+
19
+ def load_summary(
20
+ root: Path, path: str, file_hash: str, provider: str = "offline", schema_version: int = 1
21
+ ) -> FileSummary | None:
22
+ key = _cache_key(path, file_hash, provider, schema_version)
23
+ cache_file = _cache_dir(root) / f"{key}.json"
24
+ if not cache_file.exists():
25
+ return None
26
+ try:
27
+ return FileSummary.model_validate_json(cache_file.read_text())
28
+ except Exception:
29
+ try:
30
+ cache_file.unlink(missing_ok=True)
31
+ except OSError:
32
+ pass
33
+ return None
34
+
35
+
36
+ def save_summary(root: Path, summary: FileSummary) -> None:
37
+ key = _cache_key(summary.path, summary.hash, summary.provider, summary.schema_version)
38
+ cache_dir = _cache_dir(root)
39
+ cache_dir.mkdir(parents=True, exist_ok=True)
40
+ cache_file = cache_dir / f"{key}.json"
41
+ cache_file.write_text(summary.model_dump_json(indent=2))
@@ -0,0 +1,101 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from typing import Any
5
+
6
+ import tomllib
7
+ import tomli_w
8
+ from pydantic import BaseModel, Field
9
+
10
+
11
+ class ProjectConfig(BaseModel):
12
+ root: str = "."
13
+ ignore_file: str = ".agentignore"
14
+
15
+
16
+ class ContextConfig(BaseModel):
17
+ default_budget: int = 25000
18
+ default_mode: str = "balanced"
19
+ max_file_tokens: int = 4000
20
+ include_tests: bool = True
21
+ include_configs: bool = True
22
+ include_receipts: bool = True
23
+
24
+
25
+ class SummaryConfig(BaseModel):
26
+ provider: str = "offline"
27
+ schema_version: int = 1
28
+
29
+
30
+ class AgentConfig(BaseModel):
31
+ output: str
32
+ patch_claude_md: bool = False
33
+
34
+
35
+ class AgentsConfig(BaseModel):
36
+ claude: AgentConfig = Field(
37
+ default_factory=lambda: AgentConfig(
38
+ output=".agentpack/context.claude.md",
39
+ patch_claude_md=True,
40
+ )
41
+ )
42
+ generic: AgentConfig = Field(
43
+ default_factory=lambda: AgentConfig(output=".agentpack/context.md")
44
+ )
45
+
46
+
47
+ class ScoringWeights(BaseModel):
48
+ """Configurable scoring weights. All values are additive points."""
49
+ modified: float = 100
50
+ staged: float = 90
51
+ filename_keyword: float = 80
52
+ symbol_keyword: float = 70
53
+ content_keyword_per_hit: float = 10
54
+ content_keyword_max: float = 60
55
+ direct_dep: float = 50
56
+ reverse_dep: float = 40
57
+ related_test: float = 35
58
+ config_file: float = 25
59
+ recently_modified: float = 20
60
+ large_unrelated_penalty: float = -50
61
+ ignored_penalty: float = -100
62
+
63
+
64
+ class Config(BaseModel):
65
+ project: ProjectConfig = Field(default_factory=ProjectConfig)
66
+ context: ContextConfig = Field(default_factory=ContextConfig)
67
+ summary: SummaryConfig = Field(default_factory=SummaryConfig)
68
+ agents: AgentsConfig = Field(default_factory=AgentsConfig)
69
+ scoring: ScoringWeights = Field(default_factory=ScoringWeights)
70
+
71
+
72
+ DEFAULT_CONFIG = Config()
73
+
74
+
75
+ def config_path(root: Path) -> Path:
76
+ return root / ".agentpack" / "config.toml"
77
+
78
+
79
+ def load_config(root: Path) -> Config:
80
+ path = config_path(root)
81
+ if not path.exists():
82
+ return DEFAULT_CONFIG
83
+ try:
84
+ with path.open("rb") as f:
85
+ data: dict[str, Any] = tomllib.load(f)
86
+ return Config.model_validate(data)
87
+ except Exception:
88
+ import warnings
89
+ warnings.warn(
90
+ f"Failed to parse {path} — using defaults. Fix or delete the file.",
91
+ stacklevel=2,
92
+ )
93
+ return DEFAULT_CONFIG
94
+
95
+
96
+ def save_config(cfg: Config, root: Path) -> None:
97
+ path = config_path(root)
98
+ path.parent.mkdir(parents=True, exist_ok=True)
99
+ data = cfg.model_dump()
100
+ with path.open("wb") as f:
101
+ tomli_w.dump(data, f)
@@ -0,0 +1,222 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from datetime import datetime, timezone
5
+ from pathlib import Path
6
+ from typing import Any, Literal
7
+
8
+ from agentpack.core.config import Config
9
+ from agentpack.core.models import (
10
+ ContextPack,
11
+ FileInfo,
12
+ Receipt,
13
+ SelectedFile,
14
+ Symbol,
15
+ )
16
+ from agentpack.core.redactor import redact_secrets
17
+ from agentpack.core.token_estimator import estimate_tokens
18
+
19
+
20
+ Mode = Literal["minimal", "balanced", "deep"]
21
+
22
+ _MODE_WEIGHTS: dict[str, dict[str, bool]] = {
23
+ "minimal": {
24
+ "include_unchanged_deps": False,
25
+ "include_rev_deps": False,
26
+ "include_tests": False,
27
+ "include_docs": False,
28
+ "extra_full": False,
29
+ },
30
+ "balanced": {
31
+ "include_unchanged_deps": True,
32
+ "include_rev_deps": True,
33
+ "include_tests": True,
34
+ "include_docs": False,
35
+ "extra_full": False,
36
+ },
37
+ "deep": {
38
+ "include_unchanged_deps": True,
39
+ "include_rev_deps": True,
40
+ "include_tests": True,
41
+ "include_docs": True,
42
+ "extra_full": True,
43
+ },
44
+ }
45
+
46
+
47
+ def _metadata_path(root: Path) -> Path:
48
+ return root / ".agentpack" / "pack_metadata.json"
49
+
50
+
51
+ def save_pack_metadata(
52
+ root: Path,
53
+ context_path: str,
54
+ snapshot_root_hash: str,
55
+ task: str,
56
+ agent: str,
57
+ mode: str,
58
+ budget: int,
59
+ token_estimate: int = 0,
60
+ ) -> None:
61
+ meta = {
62
+ "context_path": context_path,
63
+ "generated_at": datetime.now(timezone.utc).isoformat(),
64
+ "snapshot_root_hash": snapshot_root_hash,
65
+ "task": task,
66
+ "agent": agent,
67
+ "mode": mode,
68
+ "budget": budget,
69
+ "token_estimate": token_estimate,
70
+ }
71
+ _metadata_path(root).write_text(json.dumps(meta, indent=2))
72
+
73
+
74
+ def load_pack_metadata(root: Path) -> dict[str, Any] | None:
75
+ path = _metadata_path(root)
76
+ if not path.exists():
77
+ return None
78
+ try:
79
+ return json.loads(path.read_text())
80
+ except (json.JSONDecodeError, OSError):
81
+ return None
82
+
83
+
84
+ def _extract_relevant_symbol_bodies(
85
+ fi: FileInfo,
86
+ syms: list[Symbol],
87
+ keywords: set[str],
88
+ budget_remaining: int,
89
+ ) -> tuple[str | None, int]:
90
+ """Assemble symbol bodies from Symbol.body (captured at extraction time — no file re-read)."""
91
+ from agentpack.analysis.symbols import filter_symbols_by_keywords
92
+
93
+ relevant = filter_symbols_by_keywords(syms, keywords) if keywords else syms[:5]
94
+ if not relevant:
95
+ return None, 0
96
+
97
+ parts: list[str] = []
98
+ tokens_used = 0
99
+ for sym in relevant:
100
+ body = sym.body
101
+ if body:
102
+ tok = estimate_tokens(body)
103
+ if tokens_used + tok <= budget_remaining:
104
+ parts.append(body)
105
+ tokens_used += tok
106
+ elif sym.signature:
107
+ sig_tok = estimate_tokens(sym.signature)
108
+ if tokens_used + sig_tok <= budget_remaining:
109
+ parts.append(sym.signature)
110
+ tokens_used += sig_tok
111
+ elif sym.signature:
112
+ sig_tok = estimate_tokens(sym.signature)
113
+ if tokens_used + sig_tok <= budget_remaining:
114
+ parts.append(sym.signature)
115
+ tokens_used += sig_tok
116
+
117
+ return "\n\n".join(parts) if parts else None, tokens_used
118
+
119
+
120
+ def select_files(
121
+ files: list[FileInfo],
122
+ scored: list[tuple[FileInfo, float, list[str]]],
123
+ changed_paths: set[str],
124
+ summaries: dict[str, Any],
125
+ mode: Mode,
126
+ budget: int,
127
+ max_file_tokens: int,
128
+ keywords: set[str] | None = None,
129
+ ) -> tuple[list[SelectedFile], list[Receipt]]:
130
+ opts = _MODE_WEIGHTS[mode]
131
+ selected: list[SelectedFile] = []
132
+ receipts: list[Receipt] = []
133
+ tokens_used = 0
134
+ kw = keywords or set()
135
+
136
+ for fi, score, reasons in sorted(scored, key=lambda x: -x[1]):
137
+ if fi.ignored or fi.binary:
138
+ receipts.append(Receipt(path=fi.path, action="excluded", reason="ignored or binary"))
139
+ continue
140
+
141
+ if score <= 0:
142
+ receipts.append(Receipt(path=fi.path, action="excluded", reason="score too low"))
143
+ continue
144
+
145
+ is_changed = fi.path in changed_paths
146
+ summary_data = summaries.get(fi.path)
147
+
148
+ # Determine inclusion mode
149
+ if is_changed and fi.estimated_tokens <= max_file_tokens:
150
+ mode_str: Literal["full", "symbols", "summary"] = "full"
151
+ content = fi.content if fi.content is not None else (
152
+ fi.abs_path.read_text(errors="replace") if fi.abs_path.exists() else None
153
+ )
154
+ tok = fi.estimated_tokens
155
+ elif is_changed or (opts["extra_full"] and fi.estimated_tokens <= max_file_tokens):
156
+ mode_str = "symbols"
157
+ content = None
158
+ tok = min(fi.estimated_tokens, max_file_tokens // 2)
159
+ elif summary_data:
160
+ mode_str = "summary"
161
+ content = None
162
+ tok = estimate_tokens(summary_data.get("summary", ""))
163
+ else:
164
+ mode_str = "summary"
165
+ content = None
166
+ tok = min(fi.estimated_tokens, 200)
167
+
168
+ if tokens_used + tok > budget:
169
+ receipts.append(Receipt(path=fi.path, action="excluded", reason="budget exhausted"))
170
+ continue
171
+
172
+ tokens_used += tok
173
+
174
+ # Build symbol list
175
+ syms: list[Symbol] = []
176
+ if summary_data and mode_str in ("symbols", "summary"):
177
+ raw_syms = summary_data.get("symbols", [])
178
+ for s in raw_syms:
179
+ try:
180
+ syms.append(Symbol(**s) if isinstance(s, dict) else s)
181
+ except Exception as exc:
182
+ import warnings
183
+ warnings.warn(f"skipping malformed symbol in {fi.path}: {exc}", stacklevel=2)
184
+
185
+ # Symbol body extraction for "symbols" mode
186
+ sym_body_content: str | None = None
187
+ if mode_str == "symbols" and syms and fi.abs_path.exists():
188
+ budget_remaining = budget - tokens_used
189
+ sym_body_content, extra_tok = _extract_relevant_symbol_bodies(
190
+ fi, syms, kw, min(budget_remaining, max_file_tokens // 2)
191
+ )
192
+ if extra_tok > 0 and tokens_used + extra_tok <= budget:
193
+ tokens_used += extra_tok
194
+
195
+ # Redact secrets at materialization — before content reaches any renderer or adapter
196
+ materialized = content if mode_str == "full" else sym_body_content
197
+ redaction_warnings: list[str] = []
198
+ if materialized:
199
+ materialized, redaction_warnings = redact_secrets(materialized, fi.path)
200
+
201
+ selected.append(
202
+ SelectedFile(
203
+ path=fi.path,
204
+ language=fi.language,
205
+ score=score,
206
+ include_mode=mode_str,
207
+ reasons=reasons,
208
+ content=materialized,
209
+ summary=summary_data.get("summary") if summary_data else None,
210
+ symbols=syms,
211
+ redaction_warnings=redaction_warnings,
212
+ )
213
+ )
214
+
215
+ action: Literal["included", "excluded", "summarized"] = (
216
+ "included" if mode_str == "full" else "summarized"
217
+ )
218
+ receipts.append(
219
+ Receipt(path=fi.path, action=action, reason=", ".join(reasons[:2]))
220
+ )
221
+
222
+ return selected, receipts
agentpack/core/diff.py ADDED
@@ -0,0 +1,40 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Any
5
+
6
+
7
+ @dataclass
8
+ class SnapshotDiff:
9
+ added: list[str]
10
+ modified: list[str]
11
+ deleted: list[str]
12
+ unchanged: list[str]
13
+
14
+
15
+ def diff_snapshots(
16
+ old: dict[str, Any] | None,
17
+ new: dict[str, Any],
18
+ ) -> SnapshotDiff:
19
+ new_files: dict[str, str] = {
20
+ p: info["hash"] for p, info in new.get("files", {}).items() if info.get("hash")
21
+ }
22
+
23
+ if old is None:
24
+ return SnapshotDiff(
25
+ added=sorted(new_files),
26
+ modified=[],
27
+ deleted=[],
28
+ unchanged=[],
29
+ )
30
+
31
+ old_files: dict[str, str] = {
32
+ p: info["hash"] for p, info in old.get("files", {}).items() if info.get("hash")
33
+ }
34
+
35
+ added = sorted(p for p in new_files if p not in old_files)
36
+ deleted = sorted(p for p in old_files if p not in new_files)
37
+ modified = sorted(p for p in new_files if p in old_files and new_files[p] != old_files[p])
38
+ unchanged = sorted(p for p in new_files if p in old_files and new_files[p] == old_files[p])
39
+
40
+ return SnapshotDiff(added=added, modified=modified, deleted=deleted, unchanged=unchanged)
agentpack/core/git.py ADDED
@@ -0,0 +1,145 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess
4
+ from pathlib import Path
5
+
6
+
7
+ def _run(args: list[str], cwd: Path) -> str | None:
8
+ try:
9
+ result = subprocess.run(
10
+ args,
11
+ cwd=cwd,
12
+ capture_output=True,
13
+ text=True,
14
+ timeout=10,
15
+ )
16
+ if result.returncode == 0:
17
+ return result.stdout
18
+ return None
19
+ except (FileNotFoundError, subprocess.TimeoutExpired):
20
+ return None
21
+
22
+
23
+ def is_git_repo(root: Path) -> bool:
24
+ out = _run(["git", "rev-parse", "--is-inside-work-tree"], root)
25
+ return out is not None and out.strip() == "true"
26
+
27
+
28
+ def changed_files(root: Path) -> set[str]:
29
+ """Unstaged + staged modified/added files."""
30
+ result: set[str] = set()
31
+ for args in [
32
+ ["git", "diff", "--name-only"],
33
+ ["git", "diff", "--cached", "--name-only"],
34
+ ]:
35
+ out = _run(args, root)
36
+ if out:
37
+ for line in out.splitlines():
38
+ line = line.strip()
39
+ if line:
40
+ result.add(line)
41
+ return result
42
+
43
+
44
+ def untracked_files(root: Path) -> set[str]:
45
+ out = _run(["git", "status", "--short"], root)
46
+ result: set[str] = set()
47
+ if not out:
48
+ return result
49
+ for line in out.splitlines():
50
+ if line.startswith("??"):
51
+ result.add(line[3:].strip())
52
+ return result
53
+
54
+
55
+ def recently_modified_files(root: Path, n: int = 20) -> list[str]:
56
+ out = _run(
57
+ ["git", "log", "--diff-filter=M", "--name-only", "--format=", f"-{n}"],
58
+ root,
59
+ )
60
+ if not out:
61
+ return []
62
+ return [line.strip() for line in out.splitlines() if line.strip()]
63
+
64
+
65
+ def changed_files_since(root: Path, ref: str) -> set[str]:
66
+ """Files changed between ref and HEAD (e.g. ref='HEAD~1', ref='main')."""
67
+ result: set[str] = set()
68
+ out = _run(["git", "diff", "--name-only", ref, "HEAD"], root)
69
+ if out:
70
+ for line in out.splitlines():
71
+ line = line.strip()
72
+ if line:
73
+ result.add(line)
74
+ return result
75
+
76
+
77
+ def infer_task_from_git(root: Path) -> str:
78
+ """Infer a task description from branch name, changed files, and recent commits.
79
+
80
+ Priority: branch name (explicit intent) → changed file paths (current work) → recent commit.
81
+ """
82
+ branch: str | None = None
83
+ branch_out = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"], root)
84
+ if branch_out:
85
+ b = branch_out.strip()
86
+ if b and b not in ("HEAD", "main", "master", "develop"):
87
+ slug = b.split("/", 1)[-1]
88
+ branch = slug.replace("-", " ").replace("_", " ")
89
+
90
+ # Changed files are the strongest signal for *current* work
91
+ changed = changed_files(root)
92
+ file_topic = _topic_from_paths(changed) if changed else None
93
+
94
+ # Fallback: most recent non-merge commit
95
+ commit: str | None = None
96
+ log_out = _run(["git", "log", "--oneline", "-5"], root)
97
+ if log_out:
98
+ for line in log_out.splitlines():
99
+ line = line.strip()
100
+ if not line:
101
+ continue
102
+ msg = line.split(" ", 1)[1] if " " in line else line
103
+ if not msg.lower().startswith("merge "):
104
+ commit = msg
105
+ break
106
+
107
+ if branch and file_topic:
108
+ return f"{branch}: {file_topic}"
109
+ if branch:
110
+ return branch
111
+ if file_topic:
112
+ return file_topic
113
+ if commit:
114
+ return commit
115
+ return "general development"
116
+
117
+
118
+ def _topic_from_paths(paths: set[str]) -> str | None:
119
+ """Extract a short topic string from a set of file paths."""
120
+ _SKIP = {"__init__", "index", "main", "mod", "lib", "utils", "helpers", "types", "constants"}
121
+ words: list[str] = []
122
+ for path in sorted(paths):
123
+ parts = Path(path).parts
124
+ # Skip test dirs and generated dirs
125
+ stem = Path(path).stem
126
+ if stem in _SKIP:
127
+ continue
128
+ # Take the most specific meaningful directory + stem
129
+ for part in reversed(parts[:-1]):
130
+ if part not in ("src", "lib", "pkg", "app", "tests", "test", "__pycache__"):
131
+ words.append(part.replace("_", " ").replace("-", " "))
132
+ break
133
+ words.append(stem.replace("_", " ").replace("-", " "))
134
+ if not words:
135
+ return None
136
+ # Deduplicate preserving order, keep up to 5 words
137
+ seen: set[str] = set()
138
+ unique: list[str] = []
139
+ for w in words:
140
+ if w not in seen:
141
+ seen.add(w)
142
+ unique.append(w)
143
+ if len(unique) == 5:
144
+ break
145
+ return ", ".join(unique)
@@ -0,0 +1,8 @@
1
+ """Backward-compat shim — moved to agentpack.integrations.git_hooks."""
2
+ from agentpack.integrations.git_hooks import ( # noqa: F401
3
+ install_git_hooks,
4
+ remove_git_hooks,
5
+ _HOOK_EVENTS,
6
+ _AGENTPACK_MARKER,
7
+ _hook_script,
8
+ )
@@ -0,0 +1,14 @@
1
+ """Backward-compat shim — moved to agentpack.integrations.global_install."""
2
+ from agentpack.integrations.global_install import ( # noqa: F401
3
+ install_git_template_hooks,
4
+ configure_git_template_dir,
5
+ remove_git_template_hooks,
6
+ install_shell_hook,
7
+ remove_shell_hook,
8
+ _GIT_TEMPLATE_DIR,
9
+ _AGENTPACK_MARKER,
10
+ _SHELL_MARKER_START,
11
+ _SHELL_MARKER_END,
12
+ _HOOK_SCRIPTS,
13
+ _detect_rc_file,
14
+ )
@@ -0,0 +1,66 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import pathspec
6
+
7
+
8
+ DEFAULT_AGENTIGNORE = """\
9
+ # dependencies
10
+ node_modules/
11
+ .venv/
12
+ venv/
13
+ __pycache__/
14
+
15
+ # builds
16
+ dist/
17
+ build/
18
+ .next/
19
+ coverage/
20
+
21
+ # caches
22
+ .pytest_cache/
23
+ .mypy_cache/
24
+ .ruff_cache/
25
+
26
+ # generated/noisy
27
+ generated/
28
+ *.generated.*
29
+ *.min.js
30
+ *.map
31
+ *.lock
32
+ *.log
33
+
34
+ # secrets
35
+ .env
36
+ .env.*
37
+ *.pem
38
+ *.key
39
+
40
+ # lock files
41
+ package-lock.json
42
+ yarn.lock
43
+ pnpm-lock.yaml
44
+ Pipfile.lock
45
+ poetry.lock
46
+ Cargo.lock
47
+ composer.lock
48
+ Gemfile.lock
49
+
50
+ # large data
51
+ *.csv
52
+ *.jsonl
53
+ *.parquet
54
+ """
55
+
56
+
57
+ def load_spec(ignore_path: Path) -> pathspec.PathSpec:
58
+ if ignore_path.exists():
59
+ lines = ignore_path.read_text().splitlines()
60
+ else:
61
+ lines = DEFAULT_AGENTIGNORE.splitlines()
62
+ return pathspec.PathSpec.from_lines("gitignore", lines)
63
+
64
+
65
+ def is_ignored(spec: pathspec.PathSpec, path: str) -> bool:
66
+ return spec.match_file(path)
@@ -0,0 +1,8 @@
1
+ import hashlib
2
+
3
+
4
+ def root_hash(file_hashes: dict[str, str]) -> str:
5
+ h = hashlib.sha256()
6
+ for path in sorted(file_hashes):
7
+ h.update(f"{path}:{file_hashes[path]}".encode())
8
+ return h.hexdigest()