agentpack-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentpack/__init__.py +3 -0
- agentpack/adapters/__init__.py +0 -0
- agentpack/adapters/base.py +22 -0
- agentpack/adapters/claude.py +32 -0
- agentpack/adapters/codex.py +26 -0
- agentpack/adapters/cursor.py +29 -0
- agentpack/adapters/generic.py +18 -0
- agentpack/adapters/windsurf.py +26 -0
- agentpack/analysis/__init__.py +0 -0
- agentpack/analysis/dependency_graph.py +80 -0
- agentpack/analysis/go_imports.py +32 -0
- agentpack/analysis/java_imports.py +19 -0
- agentpack/analysis/js_ts_imports.py +53 -0
- agentpack/analysis/python_imports.py +45 -0
- agentpack/analysis/ranking.py +400 -0
- agentpack/analysis/rust_imports.py +32 -0
- agentpack/analysis/symbols.py +154 -0
- agentpack/analysis/tests.py +30 -0
- agentpack/application/__init__.py +0 -0
- agentpack/application/pack_service.py +352 -0
- agentpack/cli.py +33 -0
- agentpack/commands/__init__.py +0 -0
- agentpack/commands/_shared.py +13 -0
- agentpack/commands/benchmark.py +302 -0
- agentpack/commands/claude_cmd.py +55 -0
- agentpack/commands/diff.py +46 -0
- agentpack/commands/doctor.py +185 -0
- agentpack/commands/explain.py +238 -0
- agentpack/commands/init.py +79 -0
- agentpack/commands/install.py +252 -0
- agentpack/commands/monitor.py +105 -0
- agentpack/commands/pack.py +188 -0
- agentpack/commands/scan.py +51 -0
- agentpack/commands/session.py +204 -0
- agentpack/commands/stats.py +138 -0
- agentpack/commands/status.py +37 -0
- agentpack/commands/summarize.py +64 -0
- agentpack/commands/watch.py +185 -0
- agentpack/core/__init__.py +0 -0
- agentpack/core/bootstrap.py +46 -0
- agentpack/core/cache.py +41 -0
- agentpack/core/config.py +101 -0
- agentpack/core/context_pack.py +222 -0
- agentpack/core/diff.py +40 -0
- agentpack/core/git.py +145 -0
- agentpack/core/git_hooks.py +8 -0
- agentpack/core/global_install.py +14 -0
- agentpack/core/ignore.py +66 -0
- agentpack/core/merkle.py +8 -0
- agentpack/core/models.py +115 -0
- agentpack/core/redactor.py +99 -0
- agentpack/core/scanner.py +150 -0
- agentpack/core/snapshot.py +60 -0
- agentpack/core/token_estimator.py +26 -0
- agentpack/core/vscode_tasks.py +5 -0
- agentpack/data/agentpack.md +160 -0
- agentpack/installers/__init__.py +0 -0
- agentpack/installers/claude.py +160 -0
- agentpack/installers/codex.py +54 -0
- agentpack/installers/cursor.py +76 -0
- agentpack/installers/windsurf.py +50 -0
- agentpack/integrations/__init__.py +0 -0
- agentpack/integrations/git_hooks.py +109 -0
- agentpack/integrations/global_install.py +221 -0
- agentpack/integrations/vscode_tasks.py +85 -0
- agentpack/renderers/__init__.py +3 -0
- agentpack/renderers/compact.py +75 -0
- agentpack/renderers/markdown.py +144 -0
- agentpack/renderers/receipts.py +10 -0
- agentpack/session/__init__.py +33 -0
- agentpack/session/state.py +105 -0
- agentpack/summaries/__init__.py +0 -0
- agentpack/summaries/base.py +42 -0
- agentpack/summaries/llm.py +100 -0
- agentpack/summaries/offline.py +97 -0
- agentpack_cli-0.1.0.dist-info/METADATA +1391 -0
- agentpack_cli-0.1.0.dist-info/RECORD +80 -0
- agentpack_cli-0.1.0.dist-info/WHEEL +4 -0
- agentpack_cli-0.1.0.dist-info/entry_points.txt +2 -0
- agentpack_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
_TASK_LABEL = "AgentPack: Repack context"
|
|
7
|
+
_TASK_LABEL_AUTO = "AgentPack: Repack (auto task)"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _agentpack_tasks(agent: str) -> list[dict]:
|
|
11
|
+
return [
|
|
12
|
+
{
|
|
13
|
+
"label": _TASK_LABEL,
|
|
14
|
+
"type": "shell",
|
|
15
|
+
"command": f"agentpack pack --agent {agent} --task auto --mode balanced",
|
|
16
|
+
"group": "none",
|
|
17
|
+
"presentation": {"reveal": "always", "panel": "shared"},
|
|
18
|
+
"problemMatcher": [],
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"label": _TASK_LABEL_AUTO,
|
|
22
|
+
"type": "shell",
|
|
23
|
+
"command": f"agentpack pack --agent {agent} --task auto --mode balanced",
|
|
24
|
+
"runOptions": {"runOn": "folderOpen"},
|
|
25
|
+
"group": "none",
|
|
26
|
+
"presentation": {"reveal": "silent", "panel": "shared"},
|
|
27
|
+
"problemMatcher": [],
|
|
28
|
+
},
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def install_vscode_tasks(root: Path, agent: str) -> str:
|
|
33
|
+
"""Merge agentpack tasks into .vscode/tasks.json. Returns action taken.
|
|
34
|
+
|
|
35
|
+
Idempotent — safe to re-run. Existing tasks with matching labels are
|
|
36
|
+
updated; other tasks are preserved.
|
|
37
|
+
"""
|
|
38
|
+
vscode_dir = root / ".vscode"
|
|
39
|
+
vscode_dir.mkdir(exist_ok=True)
|
|
40
|
+
tasks_path = vscode_dir / "tasks.json"
|
|
41
|
+
|
|
42
|
+
existing: dict = {"version": "2.0.0", "tasks": []}
|
|
43
|
+
if tasks_path.exists():
|
|
44
|
+
try:
|
|
45
|
+
existing = json.loads(tasks_path.read_text())
|
|
46
|
+
except json.JSONDecodeError:
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
existing.setdefault("version", "2.0.0")
|
|
50
|
+
existing.setdefault("tasks", [])
|
|
51
|
+
|
|
52
|
+
new_tasks = _agentpack_tasks(agent)
|
|
53
|
+
new_labels = {t["label"] for t in new_tasks}
|
|
54
|
+
|
|
55
|
+
# Remove stale agentpack tasks, keep everything else
|
|
56
|
+
kept = [t for t in existing["tasks"] if t.get("label") not in new_labels]
|
|
57
|
+
had_any = len(kept) < len(existing["tasks"])
|
|
58
|
+
|
|
59
|
+
existing["tasks"] = kept + new_tasks
|
|
60
|
+
tasks_path.write_text(json.dumps(existing, indent=2) + "\n")
|
|
61
|
+
|
|
62
|
+
return "updated" if had_any else "created"
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def remove_vscode_tasks(root: Path) -> str:
|
|
66
|
+
"""Remove agentpack tasks from .vscode/tasks.json. Returns action taken."""
|
|
67
|
+
tasks_path = root / ".vscode" / "tasks.json"
|
|
68
|
+
if not tasks_path.exists():
|
|
69
|
+
return "unchanged"
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
existing = json.loads(tasks_path.read_text())
|
|
73
|
+
except json.JSONDecodeError:
|
|
74
|
+
return "unchanged"
|
|
75
|
+
|
|
76
|
+
labels = {_TASK_LABEL, _TASK_LABEL_AUTO}
|
|
77
|
+
before = len(existing.get("tasks", []))
|
|
78
|
+
existing["tasks"] = [t for t in existing.get("tasks", []) if t.get("label") not in labels]
|
|
79
|
+
after = len(existing["tasks"])
|
|
80
|
+
|
|
81
|
+
if before == after:
|
|
82
|
+
return "unchanged"
|
|
83
|
+
|
|
84
|
+
tasks_path.write_text(json.dumps(existing, indent=2) + "\n")
|
|
85
|
+
return "cleaned"
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
|
|
5
|
+
from agentpack.core.models import ContextPack, SelectedFile
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _format_file_entry(sf: SelectedFile) -> str:
|
|
9
|
+
"""Format a single selected file entry for the compact format."""
|
|
10
|
+
lines: list[str] = [sf.path]
|
|
11
|
+
lines.append(f"score: {int(sf.score)}")
|
|
12
|
+
lines.append(f"include: {sf.include_mode}")
|
|
13
|
+
if sf.reasons:
|
|
14
|
+
lines.append(f"why: {', '.join(sf.reasons)}")
|
|
15
|
+
if sf.symbols:
|
|
16
|
+
symbol_names = ", ".join(s.name for s in sf.symbols)
|
|
17
|
+
lines.append(f"symbols: {symbol_names}")
|
|
18
|
+
return "\n".join(lines)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def render_compact(pack: ContextPack) -> str:
|
|
22
|
+
"""Render a ContextPack into a structured compact format."""
|
|
23
|
+
selected: list[SelectedFile] = []
|
|
24
|
+
deps: list[SelectedFile] = []
|
|
25
|
+
|
|
26
|
+
for sf in pack.selected_files:
|
|
27
|
+
if sf.include_mode in ("full", "symbols"):
|
|
28
|
+
selected.append(sf)
|
|
29
|
+
else:
|
|
30
|
+
deps.append(sf)
|
|
31
|
+
|
|
32
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
33
|
+
sections: list[str] = []
|
|
34
|
+
|
|
35
|
+
sections.append("# AgentPack Context")
|
|
36
|
+
sections.append("")
|
|
37
|
+
sections.append(f"task: {pack.task}")
|
|
38
|
+
sections.append(f"mode: {pack.mode}")
|
|
39
|
+
sections.append(f"budget: {pack.token_estimate}/{pack.budget}")
|
|
40
|
+
sections.append(f"generated: {now}")
|
|
41
|
+
sections.append("")
|
|
42
|
+
|
|
43
|
+
sections.append("## selected")
|
|
44
|
+
sections.append("")
|
|
45
|
+
if selected:
|
|
46
|
+
for sf in selected:
|
|
47
|
+
sections.append(_format_file_entry(sf))
|
|
48
|
+
sections.append("")
|
|
49
|
+
else:
|
|
50
|
+
sections.append("(none)")
|
|
51
|
+
sections.append("")
|
|
52
|
+
|
|
53
|
+
sections.append("## deps")
|
|
54
|
+
sections.append("")
|
|
55
|
+
if deps:
|
|
56
|
+
for sf in deps:
|
|
57
|
+
lines: list[str] = [sf.path]
|
|
58
|
+
lines.append(f"score: {int(sf.score)}")
|
|
59
|
+
lines.append("include: summary")
|
|
60
|
+
if sf.reasons:
|
|
61
|
+
lines.append(f"why: {sf.reasons[0]}")
|
|
62
|
+
sections.append("\n".join(lines))
|
|
63
|
+
sections.append("")
|
|
64
|
+
else:
|
|
65
|
+
sections.append("(none)")
|
|
66
|
+
sections.append("")
|
|
67
|
+
|
|
68
|
+
sections.append("## instructions")
|
|
69
|
+
sections.append("")
|
|
70
|
+
sections.append("- Prefer selected files first.")
|
|
71
|
+
sections.append("- If task changes significantly, update `.agentpack/task.md`.")
|
|
72
|
+
sections.append("- Run `agentpack session refresh` if context seems stale.")
|
|
73
|
+
sections.append("")
|
|
74
|
+
|
|
75
|
+
return "\n".join(sections)
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from agentpack.core.models import ContextPack, SelectedFile, Symbol
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _lang_fence(lang: str | None) -> str:
|
|
7
|
+
return lang or ""
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _symbols_block(symbols: list[Symbol], lang: str | None) -> str:
|
|
11
|
+
if not symbols:
|
|
12
|
+
return ""
|
|
13
|
+
lines = ["```" + _lang_fence(lang)]
|
|
14
|
+
for s in symbols:
|
|
15
|
+
if s.signature:
|
|
16
|
+
lines.append(s.signature)
|
|
17
|
+
if s.summary:
|
|
18
|
+
lines.append(f" # {s.summary}")
|
|
19
|
+
lines.append("```")
|
|
20
|
+
return "\n".join(lines)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _file_section(sf: SelectedFile) -> str:
|
|
24
|
+
# Content is already redacted at materialization time (context_pack.select_files)
|
|
25
|
+
parts = [f"### {sf.path}", ""]
|
|
26
|
+
parts.append(f"Included as: **{sf.include_mode}**")
|
|
27
|
+
parts.append("")
|
|
28
|
+
if sf.reasons:
|
|
29
|
+
parts.append("Reasons:")
|
|
30
|
+
for r in sf.reasons:
|
|
31
|
+
parts.append(f"- {r}")
|
|
32
|
+
parts.append("")
|
|
33
|
+
|
|
34
|
+
if sf.include_mode in ("full", "symbols") and sf.content:
|
|
35
|
+
parts.append("```" + _lang_fence(sf.language))
|
|
36
|
+
parts.append(sf.content)
|
|
37
|
+
parts.append("```")
|
|
38
|
+
if sf.redaction_warnings:
|
|
39
|
+
types = ", ".join(
|
|
40
|
+
w.split(": ", 1)[1] if ": " in w else w for w in sf.redaction_warnings
|
|
41
|
+
)
|
|
42
|
+
parts.append(f"> ⚠ Secrets redacted: {types}")
|
|
43
|
+
|
|
44
|
+
elif sf.include_mode == "symbols":
|
|
45
|
+
if sf.summary:
|
|
46
|
+
parts.append("Summary:")
|
|
47
|
+
parts.append(sf.summary)
|
|
48
|
+
parts.append("")
|
|
49
|
+
if sf.symbols:
|
|
50
|
+
parts.append("Relevant symbols:")
|
|
51
|
+
parts.append("")
|
|
52
|
+
parts.append(_symbols_block(sf.symbols, sf.language))
|
|
53
|
+
|
|
54
|
+
elif sf.include_mode == "summary":
|
|
55
|
+
if sf.summary:
|
|
56
|
+
parts.append("Summary:")
|
|
57
|
+
parts.append(sf.summary)
|
|
58
|
+
|
|
59
|
+
return "\n".join(parts)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def render_claude(pack: ContextPack) -> str:
|
|
63
|
+
sections: list[str] = []
|
|
64
|
+
|
|
65
|
+
sections.append("# AgentPack Context for Claude")
|
|
66
|
+
sections.append("")
|
|
67
|
+
|
|
68
|
+
if pack.stale:
|
|
69
|
+
sections.append("> **Warning:** This context pack may be stale. Run `agentpack pack` to regenerate.")
|
|
70
|
+
sections.append("")
|
|
71
|
+
|
|
72
|
+
sections.append("## Task")
|
|
73
|
+
sections.append("")
|
|
74
|
+
sections.append(pack.task)
|
|
75
|
+
sections.append("")
|
|
76
|
+
|
|
77
|
+
sections.append("## Instructions for Claude")
|
|
78
|
+
sections.append("")
|
|
79
|
+
sections.append(
|
|
80
|
+
"This is a task-focused context pack. Act on it immediately:\n\n"
|
|
81
|
+
"1. **Orient** — identify the changed files and key symbols relevant to the task.\n"
|
|
82
|
+
"2. **Diagnose or plan** — find the root cause (bug fix) or outline the approach (feature).\n"
|
|
83
|
+
"3. **Work** — edit files, write code, fix the issue. Do not wait for more instructions.\n\n"
|
|
84
|
+
"Priority order: changed files → keyword-matched files → dependencies → summaries.\n"
|
|
85
|
+
"Files marked `full` contain complete source. Files marked `symbols` contain relevant "
|
|
86
|
+
"function/class bodies. Files marked `summary` are unchanged context.\n"
|
|
87
|
+
"If the pack looks stale (changed files list is empty but you expect changes), "
|
|
88
|
+
"ask the user to run `agentpack pack --task \"<task>\"` to refresh."
|
|
89
|
+
)
|
|
90
|
+
sections.append("")
|
|
91
|
+
|
|
92
|
+
sections.append("## Token Stats")
|
|
93
|
+
sections.append("")
|
|
94
|
+
sections.append(f"Raw repo tokens: {pack.raw_repo_tokens:,}")
|
|
95
|
+
sections.append(f"After ignore: {pack.after_ignore_tokens:,}")
|
|
96
|
+
sections.append(f"Packed tokens: {pack.token_estimate:,}")
|
|
97
|
+
sections.append(f"Estimated saving: {pack.estimated_savings_percent:.1f}%")
|
|
98
|
+
sections.append("")
|
|
99
|
+
|
|
100
|
+
if pack.redaction_warnings:
|
|
101
|
+
sections.append("## Security")
|
|
102
|
+
sections.append("")
|
|
103
|
+
sections.append("> The following secrets were redacted before packing:")
|
|
104
|
+
sections.append("")
|
|
105
|
+
for w in pack.redaction_warnings:
|
|
106
|
+
sections.append(f"- {w}")
|
|
107
|
+
sections.append("")
|
|
108
|
+
|
|
109
|
+
sections.append("## Changed Files")
|
|
110
|
+
sections.append("")
|
|
111
|
+
if pack.changed_files:
|
|
112
|
+
for f in pack.changed_files:
|
|
113
|
+
sections.append(f"- {f}")
|
|
114
|
+
else:
|
|
115
|
+
sections.append("_No changed files detected._")
|
|
116
|
+
sections.append("")
|
|
117
|
+
|
|
118
|
+
sections.append("## Selected Files")
|
|
119
|
+
sections.append("")
|
|
120
|
+
sections.append("| File | Mode | Score | Why |")
|
|
121
|
+
sections.append("|---|---|---:|---|")
|
|
122
|
+
for sf in pack.selected_files:
|
|
123
|
+
why = sf.reasons[0] if sf.reasons else ""
|
|
124
|
+
sections.append(f"| `{sf.path}` | {sf.include_mode} | {sf.score:.0f} | {why} |")
|
|
125
|
+
sections.append("")
|
|
126
|
+
|
|
127
|
+
if pack.receipts:
|
|
128
|
+
sections.append("## Context Receipts")
|
|
129
|
+
sections.append("")
|
|
130
|
+
for r in pack.receipts:
|
|
131
|
+
sections.append(f"- `{r.path}` {r.action} because {r.reason}")
|
|
132
|
+
sections.append("")
|
|
133
|
+
|
|
134
|
+
sections.append("## File Context")
|
|
135
|
+
sections.append("")
|
|
136
|
+
for sf in pack.selected_files:
|
|
137
|
+
sections.append(_file_section(sf))
|
|
138
|
+
sections.append("")
|
|
139
|
+
|
|
140
|
+
return "\n".join(sections)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def render_generic(pack: ContextPack) -> str:
|
|
144
|
+
return render_claude(pack).replace("# AgentPack Context for Claude", "# AgentPack Context")
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from agentpack.session.state import (
|
|
4
|
+
SessionState,
|
|
5
|
+
load_session,
|
|
6
|
+
save_session,
|
|
7
|
+
create_session,
|
|
8
|
+
stop_session,
|
|
9
|
+
log_activity,
|
|
10
|
+
AGENTPACK_DIR,
|
|
11
|
+
SESSION_FILE,
|
|
12
|
+
TASK_FILE,
|
|
13
|
+
CONTEXT_FILE,
|
|
14
|
+
COMPACT_FILE,
|
|
15
|
+
ACTIVITY_LOG,
|
|
16
|
+
TASK_FILE_TEMPLATE,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"SessionState",
|
|
21
|
+
"load_session",
|
|
22
|
+
"save_session",
|
|
23
|
+
"create_session",
|
|
24
|
+
"stop_session",
|
|
25
|
+
"log_activity",
|
|
26
|
+
"AGENTPACK_DIR",
|
|
27
|
+
"SESSION_FILE",
|
|
28
|
+
"TASK_FILE",
|
|
29
|
+
"CONTEXT_FILE",
|
|
30
|
+
"COMPACT_FILE",
|
|
31
|
+
"ACTIVITY_LOG",
|
|
32
|
+
"TASK_FILE_TEMPLATE",
|
|
33
|
+
]
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field, asdict
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
AGENTPACK_DIR = ".agentpack"
|
|
10
|
+
SESSION_FILE = ".agentpack/session.json"
|
|
11
|
+
TASK_FILE = ".agentpack/task.md"
|
|
12
|
+
CONTEXT_FILE = ".agentpack/context.md"
|
|
13
|
+
COMPACT_FILE = ".agentpack/context.compact.md"
|
|
14
|
+
ACTIVITY_LOG = ".agentpack/activity.log"
|
|
15
|
+
|
|
16
|
+
TASK_FILE_TEMPLATE = """\
|
|
17
|
+
# Current Task
|
|
18
|
+
|
|
19
|
+
Write or update the current coding task here.
|
|
20
|
+
|
|
21
|
+
AgentPack will refresh context based on this task.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class SessionState:
|
|
27
|
+
active: bool
|
|
28
|
+
started_at: Optional[str]
|
|
29
|
+
agent: str = "generic"
|
|
30
|
+
mode: str = "balanced"
|
|
31
|
+
context_file: str = CONTEXT_FILE
|
|
32
|
+
compact_context_file: str = COMPACT_FILE
|
|
33
|
+
task_file: str = TASK_FILE
|
|
34
|
+
last_refresh_at: Optional[str] = None
|
|
35
|
+
last_task_hash: str = ""
|
|
36
|
+
last_git_hash: str = ""
|
|
37
|
+
refresh_count: int = 0
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def load_session(root: Path) -> Optional[SessionState]:
|
|
41
|
+
"""Load session state from .agentpack/session.json. Returns None if missing."""
|
|
42
|
+
session_path = root / SESSION_FILE
|
|
43
|
+
try:
|
|
44
|
+
data = json.loads(session_path.read_text(encoding="utf-8"))
|
|
45
|
+
return SessionState(
|
|
46
|
+
active=data.get("active", False),
|
|
47
|
+
started_at=data.get("started_at"),
|
|
48
|
+
agent=data.get("agent", "generic"),
|
|
49
|
+
mode=data.get("mode", "balanced"),
|
|
50
|
+
context_file=data.get("context_file", CONTEXT_FILE),
|
|
51
|
+
compact_context_file=data.get("compact_context_file", COMPACT_FILE),
|
|
52
|
+
task_file=data.get("task_file", TASK_FILE),
|
|
53
|
+
last_refresh_at=data.get("last_refresh_at"),
|
|
54
|
+
last_task_hash=data.get("last_task_hash", ""),
|
|
55
|
+
last_git_hash=data.get("last_git_hash", ""),
|
|
56
|
+
refresh_count=data.get("refresh_count", 0),
|
|
57
|
+
)
|
|
58
|
+
except FileNotFoundError:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def save_session(root: Path, state: SessionState) -> None:
|
|
63
|
+
"""Write session state to .agentpack/session.json."""
|
|
64
|
+
session_path = root / SESSION_FILE
|
|
65
|
+
session_path.parent.mkdir(parents=True, exist_ok=True)
|
|
66
|
+
session_path.write_text(
|
|
67
|
+
json.dumps(asdict(state), indent=2, default=str),
|
|
68
|
+
encoding="utf-8",
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def create_session(root: Path, agent: str, mode: str) -> SessionState:
|
|
73
|
+
"""Create a new active session, write session.json, create task.md if missing."""
|
|
74
|
+
(root / AGENTPACK_DIR).mkdir(parents=True, exist_ok=True)
|
|
75
|
+
|
|
76
|
+
task_path = root / TASK_FILE
|
|
77
|
+
if not task_path.exists():
|
|
78
|
+
task_path.write_text(TASK_FILE_TEMPLATE, encoding="utf-8")
|
|
79
|
+
|
|
80
|
+
state = SessionState(
|
|
81
|
+
active=True,
|
|
82
|
+
started_at=datetime.now(timezone.utc).isoformat(),
|
|
83
|
+
agent=agent,
|
|
84
|
+
mode=mode,
|
|
85
|
+
)
|
|
86
|
+
save_session(root, state)
|
|
87
|
+
return state
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def stop_session(root: Path) -> None:
|
|
91
|
+
"""Mark the active session as inactive and update session.json."""
|
|
92
|
+
state = load_session(root)
|
|
93
|
+
if state is None:
|
|
94
|
+
return
|
|
95
|
+
state.active = False
|
|
96
|
+
save_session(root, state)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def log_activity(root: Path, message: str) -> None:
|
|
100
|
+
"""Append a timestamped line to .agentpack/activity.log."""
|
|
101
|
+
log_path = root / ACTIVITY_LOG
|
|
102
|
+
log_path.parent.mkdir(parents=True, exist_ok=True)
|
|
103
|
+
timestamp = datetime.now(timezone.utc).isoformat()
|
|
104
|
+
with log_path.open("a", encoding="utf-8") as fh:
|
|
105
|
+
fh.write(f"[{timestamp}] {message}\n")
|
|
File without changes
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from agentpack.core.models import FileInfo, FileSummary
|
|
6
|
+
from agentpack.core import cache as summary_cache
|
|
7
|
+
from agentpack.summaries import offline
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
_LLM_PROVIDERS = {"claude", "openai"}
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_or_build_summary(fi: FileInfo, root: Path, provider: str = "offline") -> FileSummary:
|
|
14
|
+
if fi.hash is None:
|
|
15
|
+
return offline.summarize(fi.path, fi.abs_path, fi.language, "")
|
|
16
|
+
|
|
17
|
+
cached = summary_cache.load_summary(root, fi.path, fi.hash, provider)
|
|
18
|
+
if cached:
|
|
19
|
+
return cached
|
|
20
|
+
|
|
21
|
+
if provider in _LLM_PROVIDERS:
|
|
22
|
+
from agentpack.summaries import llm as llm_mod
|
|
23
|
+
summary = llm_mod.summarize(fi.path, fi.abs_path, fi.language, fi.hash, provider=provider)
|
|
24
|
+
else:
|
|
25
|
+
summary = offline.summarize(fi.path, fi.abs_path, fi.language, fi.hash)
|
|
26
|
+
|
|
27
|
+
summary_cache.save_summary(root, summary)
|
|
28
|
+
return summary
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def build_all_summaries(
|
|
32
|
+
files: list[FileInfo],
|
|
33
|
+
root: Path,
|
|
34
|
+
provider: str = "offline",
|
|
35
|
+
) -> dict[str, FileSummary]:
|
|
36
|
+
"""Build summaries for packable files. Skips ignored and binary entries defensively."""
|
|
37
|
+
result: dict[str, FileSummary] = {}
|
|
38
|
+
for fi in files:
|
|
39
|
+
if fi.ignored or fi.binary:
|
|
40
|
+
continue
|
|
41
|
+
result[fi.path] = get_or_build_summary(fi, root, provider)
|
|
42
|
+
return result
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from agentpack.core.models import FileSummary
|
|
6
|
+
from agentpack.analysis.symbols import extract_symbols
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
_SYSTEM_PROMPT = """\
|
|
10
|
+
You are a code summarizer. Given source code, produce a concise summary (3-5 sentences) covering:
|
|
11
|
+
- What this file does and its likely responsibility
|
|
12
|
+
- Key classes, functions, or exports it provides
|
|
13
|
+
- Important dependencies or side effects
|
|
14
|
+
|
|
15
|
+
Be factual and terse. No filler phrases."""
|
|
16
|
+
|
|
17
|
+
_MAX_INPUT_CHARS = 12000
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def summarize_with_claude(
|
|
21
|
+
path: str,
|
|
22
|
+
abs_path: Path,
|
|
23
|
+
language: str | None,
|
|
24
|
+
file_hash: str,
|
|
25
|
+
model: str = "claude-haiku-4-5-20251001",
|
|
26
|
+
) -> FileSummary:
|
|
27
|
+
try:
|
|
28
|
+
import anthropic
|
|
29
|
+
except ImportError:
|
|
30
|
+
raise ImportError("Install agentpack[llm] to use LLM summaries: pip install 'agentpack[llm]'")
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
content = abs_path.read_text(errors="replace")[:_MAX_INPUT_CHARS]
|
|
34
|
+
except OSError:
|
|
35
|
+
content = ""
|
|
36
|
+
|
|
37
|
+
import os
|
|
38
|
+
if not os.environ.get("ANTHROPIC_API_KEY"):
|
|
39
|
+
raise EnvironmentError(
|
|
40
|
+
"ANTHROPIC_API_KEY is not set. "
|
|
41
|
+
"Set it or use --summary-provider offline (the default)."
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
client = anthropic.Anthropic()
|
|
46
|
+
message = client.messages.create(
|
|
47
|
+
model=model,
|
|
48
|
+
max_tokens=300,
|
|
49
|
+
system=_SYSTEM_PROMPT,
|
|
50
|
+
messages=[
|
|
51
|
+
{
|
|
52
|
+
"role": "user",
|
|
53
|
+
"content": f"File: {path}\nLanguage: {language or 'unknown'}\n\n```\n{content}\n```",
|
|
54
|
+
}
|
|
55
|
+
],
|
|
56
|
+
)
|
|
57
|
+
except anthropic.AuthenticationError as exc:
|
|
58
|
+
raise EnvironmentError(
|
|
59
|
+
f"Anthropic authentication failed — check ANTHROPIC_API_KEY. ({exc})"
|
|
60
|
+
) from exc
|
|
61
|
+
except anthropic.RateLimitError as exc:
|
|
62
|
+
raise RuntimeError(
|
|
63
|
+
f"Anthropic rate limit hit while summarising {path}. "
|
|
64
|
+
"Wait and retry, or use --summary-provider offline."
|
|
65
|
+
) from exc
|
|
66
|
+
except anthropic.APIStatusError as exc:
|
|
67
|
+
raise RuntimeError(
|
|
68
|
+
f"Anthropic API error while summarising {path}: {exc}"
|
|
69
|
+
) from exc
|
|
70
|
+
|
|
71
|
+
summary_text = message.content[0].text if message.content else ""
|
|
72
|
+
|
|
73
|
+
symbols = extract_symbols(abs_path, language)
|
|
74
|
+
|
|
75
|
+
return FileSummary(
|
|
76
|
+
path=path,
|
|
77
|
+
hash=file_hash,
|
|
78
|
+
language=language,
|
|
79
|
+
provider="claude",
|
|
80
|
+
schema_version=1,
|
|
81
|
+
summary=summary_text,
|
|
82
|
+
imports=[],
|
|
83
|
+
symbols=symbols,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def summarize(
|
|
88
|
+
path: str,
|
|
89
|
+
abs_path: Path,
|
|
90
|
+
language: str | None,
|
|
91
|
+
file_hash: str,
|
|
92
|
+
provider: str = "claude",
|
|
93
|
+
model: str | None = None,
|
|
94
|
+
) -> FileSummary:
|
|
95
|
+
if provider == "claude":
|
|
96
|
+
return summarize_with_claude(
|
|
97
|
+
path, abs_path, language, file_hash,
|
|
98
|
+
model=model or "claude-haiku-4-5-20251001",
|
|
99
|
+
)
|
|
100
|
+
raise ValueError(f"Unknown LLM provider: {provider}")
|