@laitszkin/apollo-toolkit 3.13.2 → 3.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AGENTS.md +7 -7
- package/CHANGELOG.md +27 -0
- package/CLAUDE.md +8 -8
- package/analyse-app-logs/SKILL.md +3 -3
- package/bin/apollo-toolkit.ts +7 -0
- package/codex/codex-memory-manager/SKILL.md +2 -2
- package/codex/learn-skill-from-conversations/SKILL.md +3 -3
- package/dist/bin/apollo-toolkit.d.ts +2 -0
- package/dist/bin/apollo-toolkit.js +7 -0
- package/dist/lib/cli.d.ts +41 -0
- package/dist/lib/cli.js +655 -0
- package/dist/lib/installer.d.ts +59 -0
- package/dist/lib/installer.js +404 -0
- package/dist/lib/tool-runner.d.ts +19 -0
- package/dist/lib/tool-runner.js +536 -0
- package/dist/lib/tools/architecture.d.ts +2 -0
- package/dist/lib/tools/architecture.js +34 -0
- package/dist/lib/tools/create-specs.d.ts +2 -0
- package/dist/lib/tools/create-specs.js +175 -0
- package/dist/lib/tools/docs-to-voice.d.ts +2 -0
- package/dist/lib/tools/docs-to-voice.js +705 -0
- package/dist/lib/tools/enforce-video-aspect-ratio.d.ts +2 -0
- package/dist/lib/tools/enforce-video-aspect-ratio.js +312 -0
- package/dist/lib/tools/extract-conversations.d.ts +2 -0
- package/dist/lib/tools/extract-conversations.js +105 -0
- package/dist/lib/tools/extract-pdf-text.d.ts +2 -0
- package/dist/lib/tools/extract-pdf-text.js +92 -0
- package/dist/lib/tools/filter-logs.d.ts +2 -0
- package/dist/lib/tools/filter-logs.js +94 -0
- package/dist/lib/tools/find-github-issues.d.ts +2 -0
- package/dist/lib/tools/find-github-issues.js +176 -0
- package/dist/lib/tools/generate-storyboard-images.d.ts +2 -0
- package/dist/lib/tools/generate-storyboard-images.js +419 -0
- package/dist/lib/tools/log-cli-utils.d.ts +35 -0
- package/dist/lib/tools/log-cli-utils.js +233 -0
- package/dist/lib/tools/open-github-issue.d.ts +2 -0
- package/dist/lib/tools/open-github-issue.js +750 -0
- package/dist/lib/tools/read-github-issue.d.ts +2 -0
- package/dist/lib/tools/read-github-issue.js +134 -0
- package/dist/lib/tools/render-error-book.d.ts +2 -0
- package/dist/lib/tools/render-error-book.js +265 -0
- package/dist/lib/tools/render-katex.d.ts +2 -0
- package/dist/lib/tools/render-katex.js +294 -0
- package/dist/lib/tools/review-threads.d.ts +2 -0
- package/dist/lib/tools/review-threads.js +491 -0
- package/dist/lib/tools/search-logs.d.ts +2 -0
- package/dist/lib/tools/search-logs.js +164 -0
- package/dist/lib/tools/sync-memory-index.d.ts +2 -0
- package/dist/lib/tools/sync-memory-index.js +113 -0
- package/dist/lib/tools/validate-openai-agent-config.d.ts +2 -0
- package/dist/lib/tools/validate-openai-agent-config.js +184 -0
- package/dist/lib/tools/validate-skill-frontmatter.d.ts +2 -0
- package/dist/lib/tools/validate-skill-frontmatter.js +118 -0
- package/dist/lib/types.d.ts +82 -0
- package/dist/lib/types.js +2 -0
- package/dist/lib/updater.d.ts +34 -0
- package/dist/lib/updater.js +112 -0
- package/dist/lib/utils/format.d.ts +2 -0
- package/dist/lib/utils/format.js +6 -0
- package/dist/lib/utils/terminal.d.ts +12 -0
- package/dist/lib/utils/terminal.js +26 -0
- package/docs-to-voice/SKILL.md +0 -1
- package/generate-spec/SKILL.md +1 -1
- package/katex/SKILL.md +1 -2
- package/lib/cli.ts +780 -0
- package/lib/installer.ts +466 -0
- package/lib/tool-runner.ts +561 -0
- package/lib/tools/architecture.ts +34 -0
- package/lib/tools/create-specs.ts +204 -0
- package/lib/tools/docs-to-voice.ts +799 -0
- package/lib/tools/enforce-video-aspect-ratio.ts +368 -0
- package/lib/tools/extract-conversations.ts +114 -0
- package/lib/tools/extract-pdf-text.ts +99 -0
- package/lib/tools/filter-logs.ts +118 -0
- package/lib/tools/find-github-issues.ts +211 -0
- package/lib/tools/generate-storyboard-images.ts +455 -0
- package/lib/tools/log-cli-utils.ts +262 -0
- package/lib/tools/open-github-issue.ts +930 -0
- package/lib/tools/read-github-issue.ts +179 -0
- package/lib/tools/render-error-book.ts +300 -0
- package/lib/tools/render-katex.ts +325 -0
- package/lib/tools/review-threads.ts +590 -0
- package/lib/tools/search-logs.ts +200 -0
- package/lib/tools/sync-memory-index.ts +114 -0
- package/lib/tools/validate-openai-agent-config.ts +209 -0
- package/lib/tools/validate-skill-frontmatter.ts +124 -0
- package/lib/types.ts +90 -0
- package/lib/updater.ts +165 -0
- package/lib/utils/format.ts +7 -0
- package/lib/utils/terminal.ts +22 -0
- package/open-github-issue/SKILL.md +2 -2
- package/optimise-skill/SKILL.md +1 -1
- package/package.json +13 -4
- package/resources/project-architecture/assets/architecture.css +764 -0
- package/resources/project-architecture/assets/viewer.client.js +144 -0
- package/resources/project-architecture/index.html +42 -0
- package/review-spec-related-changes/SKILL.md +1 -1
- package/solve-issues-found-during-review/SKILL.md +2 -1
- package/tsconfig.json +28 -0
- package/analyse-app-logs/scripts/__pycache__/filter_logs_by_time.cpython-312.pyc +0 -0
- package/analyse-app-logs/scripts/__pycache__/log_cli_utils.cpython-312.pyc +0 -0
- package/analyse-app-logs/scripts/__pycache__/search_logs.cpython-312.pyc +0 -0
- package/analyse-app-logs/scripts/filter_logs_by_time.py +0 -64
- package/analyse-app-logs/scripts/log_cli_utils.py +0 -112
- package/analyse-app-logs/scripts/search_logs.py +0 -137
- package/analyse-app-logs/tests/test_filter_logs_by_time.py +0 -95
- package/analyse-app-logs/tests/test_search_logs.py +0 -100
- package/codex/codex-memory-manager/scripts/extract_recent_conversations.py +0 -369
- package/codex/codex-memory-manager/scripts/sync_memory_index.py +0 -130
- package/codex/codex-memory-manager/tests/test_extract_recent_conversations.py +0 -177
- package/codex/codex-memory-manager/tests/test_memory_template.py +0 -37
- package/codex/codex-memory-manager/tests/test_sync_memory_index.py +0 -84
- package/codex/learn-skill-from-conversations/scripts/extract_recent_conversations.py +0 -369
- package/codex/learn-skill-from-conversations/tests/test_extract_recent_conversations.py +0 -177
- package/docs-to-voice/scripts/__pycache__/docs_to_voice.cpython-312.pyc +0 -0
- package/docs-to-voice/scripts/docs_to_voice.py +0 -1385
- package/docs-to-voice/scripts/docs_to_voice.sh +0 -11
- package/docs-to-voice/tests/test_docs_to_voice_api_max_chars.py +0 -210
- package/docs-to-voice/tests/test_docs_to_voice_sentence_timeline.py +0 -115
- package/docs-to-voice/tests/test_docs_to_voice_settings.py +0 -43
- package/docs-to-voice/tests/test_docs_to_voice_shell_wrapper.py +0 -51
- package/docs-to-voice/tests/test_docs_to_voice_speech_rate.py +0 -57
- package/generate-spec/scripts/__pycache__/create-specscpython-312.pyc +0 -0
- package/generate-spec/scripts/create-specs +0 -215
- package/generate-spec/tests/test_create_specs.py +0 -200
- package/init-project-html/scripts/architecture-bootstrap-render.js +0 -16
- package/init-project-html/scripts/architecture.js +0 -296
- package/katex/scripts/__pycache__/render_katex.cpython-312.pyc +0 -0
- package/katex/scripts/render_katex.py +0 -247
- package/katex/scripts/render_katex.sh +0 -11
- package/katex/tests/test_render_katex.py +0 -174
- package/learning-error-book/scripts/render_error_book_json_to_pdf.py +0 -590
- package/learning-error-book/tests/test_render_error_book_json_to_pdf.py +0 -134
- package/open-github-issue/scripts/__pycache__/open_github_issue.cpython-312.pyc +0 -0
- package/open-github-issue/scripts/open_github_issue.py +0 -705
- package/open-github-issue/tests/test_open_github_issue.py +0 -381
- package/openai-text-to-image-storyboard/scripts/generate_storyboard_images.py +0 -763
- package/openai-text-to-image-storyboard/tests/test_generate_storyboard_images.py +0 -177
- package/read-github-issue/scripts/__pycache__/find_issues.cpython-312.pyc +0 -0
- package/read-github-issue/scripts/__pycache__/read_issue.cpython-312.pyc +0 -0
- package/read-github-issue/scripts/find_issues.py +0 -148
- package/read-github-issue/scripts/read_issue.py +0 -108
- package/read-github-issue/tests/test_find_issues.py +0 -127
- package/read-github-issue/tests/test_read_issue.py +0 -109
- package/resolve-review-comments/scripts/__pycache__/review_threads.cpython-312.pyc +0 -0
- package/resolve-review-comments/scripts/review_threads.py +0 -425
- package/resolve-review-comments/tests/test_review_threads.py +0 -74
- package/scripts/validate_openai_agent_config.py +0 -209
- package/scripts/validate_skill_frontmatter.py +0 -131
- package/text-to-short-video/scripts/__pycache__/enforce_video_aspect_ratio.cpython-312.pyc +0 -0
- package/text-to-short-video/scripts/enforce_video_aspect_ratio.py +0 -350
- package/text-to-short-video/tests/test_enforce_video_aspect_ratio.py +0 -194
- package/weekly-financial-event-report/scripts/extract_pdf_text_pdfkit.swift +0 -99
- package/weekly-financial-event-report/tests/test_extract_pdf_text_pdfkit.py +0 -64
|
@@ -1,100 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import importlib.util
|
|
6
|
-
import io
|
|
7
|
-
import tempfile
|
|
8
|
-
import unittest
|
|
9
|
-
from argparse import Namespace
|
|
10
|
-
from pathlib import Path
|
|
11
|
-
from unittest.mock import patch
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
SCRIPT_PATH = Path(__file__).resolve().parents[1] / "scripts" / "search_logs.py"
|
|
15
|
-
SCRIPT_DIR = SCRIPT_PATH.parent
|
|
16
|
-
if str(SCRIPT_DIR) not in __import__("sys").path:
|
|
17
|
-
__import__("sys").path.insert(0, str(SCRIPT_DIR))
|
|
18
|
-
SPEC = importlib.util.spec_from_file_location("search_logs", SCRIPT_PATH)
|
|
19
|
-
MODULE = importlib.util.module_from_spec(SPEC)
|
|
20
|
-
SPEC.loader.exec_module(MODULE)
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class SearchLogsTests(unittest.TestCase):
|
|
24
|
-
def test_keyword_search_respects_time_window(self) -> None:
|
|
25
|
-
with tempfile.NamedTemporaryFile("w+", encoding="utf-8", delete=False) as handle:
|
|
26
|
-
handle.write(
|
|
27
|
-
"2026-03-24T10:00:00Z INFO boot\n"
|
|
28
|
-
"2026-03-24T10:05:00Z ERROR payment timeout\n"
|
|
29
|
-
"2026-03-24T10:10:00Z ERROR payment timeout\n"
|
|
30
|
-
)
|
|
31
|
-
path = handle.name
|
|
32
|
-
|
|
33
|
-
args = Namespace(
|
|
34
|
-
paths=[path],
|
|
35
|
-
keyword=["timeout"],
|
|
36
|
-
regex=[],
|
|
37
|
-
mode="any",
|
|
38
|
-
ignore_case=False,
|
|
39
|
-
start="2026-03-24T10:01:00Z",
|
|
40
|
-
end="2026-03-24T10:06:00Z",
|
|
41
|
-
assume_timezone="UTC",
|
|
42
|
-
before_context=0,
|
|
43
|
-
after_context=0,
|
|
44
|
-
count_only=False,
|
|
45
|
-
)
|
|
46
|
-
|
|
47
|
-
with patch.object(MODULE, "parse_args", return_value=args), patch(
|
|
48
|
-
"sys.stdout", new_callable=io.StringIO
|
|
49
|
-
) as stdout:
|
|
50
|
-
code = MODULE.main()
|
|
51
|
-
|
|
52
|
-
self.assertEqual(code, 0)
|
|
53
|
-
self.assertEqual(
|
|
54
|
-
stdout.getvalue().strip(),
|
|
55
|
-
"2026-03-24T10:05:00Z ERROR payment timeout",
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
def test_all_mode_requires_every_matcher(self) -> None:
|
|
59
|
-
matchers = [
|
|
60
|
-
lambda line: "timeout" in line,
|
|
61
|
-
lambda line: "payment" in line,
|
|
62
|
-
]
|
|
63
|
-
|
|
64
|
-
self.assertTrue(MODULE.line_matches("payment timeout", matchers, "all"))
|
|
65
|
-
self.assertFalse(MODULE.line_matches("timeout only", matchers, "all"))
|
|
66
|
-
|
|
67
|
-
def test_count_only_reports_match_total(self) -> None:
|
|
68
|
-
with tempfile.NamedTemporaryFile("w+", encoding="utf-8", delete=False) as handle:
|
|
69
|
-
handle.write(
|
|
70
|
-
"2026-03-24T10:00:00Z INFO boot\n"
|
|
71
|
-
"2026-03-24T10:05:00Z ERROR payment timeout\n"
|
|
72
|
-
"2026-03-24T10:06:00Z WARN retry timeout\n"
|
|
73
|
-
)
|
|
74
|
-
path = handle.name
|
|
75
|
-
|
|
76
|
-
args = Namespace(
|
|
77
|
-
paths=[path],
|
|
78
|
-
keyword=["timeout"],
|
|
79
|
-
regex=[],
|
|
80
|
-
mode="any",
|
|
81
|
-
ignore_case=True,
|
|
82
|
-
start=None,
|
|
83
|
-
end=None,
|
|
84
|
-
assume_timezone="UTC",
|
|
85
|
-
before_context=0,
|
|
86
|
-
after_context=0,
|
|
87
|
-
count_only=True,
|
|
88
|
-
)
|
|
89
|
-
|
|
90
|
-
with patch.object(MODULE, "parse_args", return_value=args), patch(
|
|
91
|
-
"sys.stdout", new_callable=io.StringIO
|
|
92
|
-
) as stdout:
|
|
93
|
-
code = MODULE.main()
|
|
94
|
-
|
|
95
|
-
self.assertEqual(code, 0)
|
|
96
|
-
self.assertEqual(stdout.getvalue().strip(), "2")
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
if __name__ == "__main__":
|
|
100
|
-
unittest.main()
|
|
@@ -1,369 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""Extract recent Codex conversation history from Codex session stores."""
|
|
3
|
-
|
|
4
|
-
from __future__ import annotations
|
|
5
|
-
|
|
6
|
-
import argparse
|
|
7
|
-
import json
|
|
8
|
-
from dataclasses import dataclass
|
|
9
|
-
from datetime import datetime, timedelta, timezone
|
|
10
|
-
from pathlib import Path
|
|
11
|
-
from typing import Iterable, List, Optional, Sequence, Tuple
|
|
12
|
-
|
|
13
|
-
DEFAULT_LOOKBACK_MINUTES = 24 * 60
|
|
14
|
-
DEFAULT_RETENTION_DAYS = 7
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@dataclass
|
|
18
|
-
class SessionRecord:
|
|
19
|
-
path: Path
|
|
20
|
-
timestamp_utc: datetime
|
|
21
|
-
messages: Optional[List[Tuple[str, str]]] = None
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def parse_iso_timestamp(raw: Optional[str]) -> Optional[datetime]:
|
|
25
|
-
if not raw:
|
|
26
|
-
return None
|
|
27
|
-
value = raw.strip()
|
|
28
|
-
if not value:
|
|
29
|
-
return None
|
|
30
|
-
if value.endswith("Z"):
|
|
31
|
-
value = value[:-1] + "+00:00"
|
|
32
|
-
try:
|
|
33
|
-
parsed = datetime.fromisoformat(value)
|
|
34
|
-
except ValueError:
|
|
35
|
-
return None
|
|
36
|
-
if parsed.tzinfo is None:
|
|
37
|
-
parsed = parsed.replace(tzinfo=timezone.utc)
|
|
38
|
-
return parsed.astimezone(timezone.utc)
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def read_session_timestamp(path: Path) -> Optional[datetime]:
|
|
42
|
-
try:
|
|
43
|
-
with path.open("r", encoding="utf-8") as handle:
|
|
44
|
-
first_line = handle.readline().strip()
|
|
45
|
-
except OSError:
|
|
46
|
-
return None
|
|
47
|
-
|
|
48
|
-
if not first_line:
|
|
49
|
-
return None
|
|
50
|
-
|
|
51
|
-
try:
|
|
52
|
-
first_entry = json.loads(first_line)
|
|
53
|
-
except json.JSONDecodeError:
|
|
54
|
-
return None
|
|
55
|
-
|
|
56
|
-
if first_entry.get("type") != "session_meta":
|
|
57
|
-
return None
|
|
58
|
-
|
|
59
|
-
payload = first_entry.get("payload", {})
|
|
60
|
-
if not isinstance(payload, dict):
|
|
61
|
-
return None
|
|
62
|
-
|
|
63
|
-
return parse_iso_timestamp(payload.get("timestamp")) or parse_iso_timestamp(first_entry.get("timestamp"))
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
def iter_session_paths(root: Path) -> Iterable[Path]:
|
|
67
|
-
if not root.exists() or not root.is_dir():
|
|
68
|
-
return
|
|
69
|
-
yield from root.rglob("*.jsonl")
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def find_recent_sessions(
|
|
73
|
-
session_roots: Sequence[Path],
|
|
74
|
-
cutoff_utc: datetime,
|
|
75
|
-
limit: Optional[int],
|
|
76
|
-
) -> List[SessionRecord]:
|
|
77
|
-
candidates: List[SessionRecord] = []
|
|
78
|
-
seen_paths = set()
|
|
79
|
-
|
|
80
|
-
for root in session_roots:
|
|
81
|
-
for path in iter_session_paths(root):
|
|
82
|
-
resolved_path = path.resolve()
|
|
83
|
-
if resolved_path in seen_paths:
|
|
84
|
-
continue
|
|
85
|
-
seen_paths.add(resolved_path)
|
|
86
|
-
|
|
87
|
-
timestamp_utc = read_session_timestamp(path)
|
|
88
|
-
if timestamp_utc is None:
|
|
89
|
-
continue
|
|
90
|
-
if timestamp_utc < cutoff_utc:
|
|
91
|
-
continue
|
|
92
|
-
candidates.append(SessionRecord(path=path, timestamp_utc=timestamp_utc))
|
|
93
|
-
|
|
94
|
-
candidates.sort(key=lambda record: record.timestamp_utc, reverse=True)
|
|
95
|
-
if limit is None:
|
|
96
|
-
return candidates
|
|
97
|
-
return candidates[:limit]
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
def sanitize_text(text: str, max_chars: int) -> str:
|
|
101
|
-
cleaned = text.replace("\r\n", "\n").replace("\r", "\n").strip()
|
|
102
|
-
if max_chars <= 0:
|
|
103
|
-
return cleaned
|
|
104
|
-
if len(cleaned) <= max_chars:
|
|
105
|
-
return cleaned
|
|
106
|
-
return cleaned[: max_chars - 1].rstrip() + "..."
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
def looks_like_wrapper_message(text: str) -> bool:
|
|
110
|
-
stripped = text.strip()
|
|
111
|
-
if not stripped:
|
|
112
|
-
return True
|
|
113
|
-
lower = stripped.lower()
|
|
114
|
-
return (
|
|
115
|
-
stripped.startswith("# AGENTS.md instructions for")
|
|
116
|
-
or stripped.startswith("<environment_context>")
|
|
117
|
-
or "<collaboration_mode>" in lower
|
|
118
|
-
or stripped.startswith("<permissions instructions>")
|
|
119
|
-
or stripped.startswith("<app-context>")
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
def extract_text_from_content(content: Sequence[object]) -> str:
|
|
124
|
-
texts: List[str] = []
|
|
125
|
-
for part in content:
|
|
126
|
-
if not isinstance(part, dict):
|
|
127
|
-
continue
|
|
128
|
-
part_type = part.get("type")
|
|
129
|
-
if part_type in {"input_text", "output_text", "text"}:
|
|
130
|
-
value = part.get("text", "")
|
|
131
|
-
if isinstance(value, str) and value.strip():
|
|
132
|
-
texts.append(value)
|
|
133
|
-
return "\n".join(texts).strip()
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
def extract_messages_from_event_entries(entries: Iterable[dict], max_chars: int) -> List[Tuple[str, str]]:
|
|
137
|
-
messages: List[Tuple[str, str]] = []
|
|
138
|
-
for entry in entries:
|
|
139
|
-
if entry.get("type") != "event_msg":
|
|
140
|
-
continue
|
|
141
|
-
payload = entry.get("payload", {})
|
|
142
|
-
if not isinstance(payload, dict):
|
|
143
|
-
continue
|
|
144
|
-
|
|
145
|
-
payload_type = payload.get("type")
|
|
146
|
-
if payload_type == "user_message":
|
|
147
|
-
text = payload.get("message", "")
|
|
148
|
-
if isinstance(text, str) and text.strip():
|
|
149
|
-
messages.append(("user", sanitize_text(text, max_chars)))
|
|
150
|
-
elif payload_type == "agent_message":
|
|
151
|
-
text = payload.get("message", "")
|
|
152
|
-
if isinstance(text, str) and text.strip():
|
|
153
|
-
messages.append(("assistant", sanitize_text(text, max_chars)))
|
|
154
|
-
return messages
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
def extract_messages_from_response_items(entries: Iterable[dict], max_chars: int) -> List[Tuple[str, str]]:
|
|
158
|
-
messages: List[Tuple[str, str]] = []
|
|
159
|
-
for entry in entries:
|
|
160
|
-
if entry.get("type") != "response_item":
|
|
161
|
-
continue
|
|
162
|
-
payload = entry.get("payload", {})
|
|
163
|
-
if not isinstance(payload, dict):
|
|
164
|
-
continue
|
|
165
|
-
if payload.get("type") != "message":
|
|
166
|
-
continue
|
|
167
|
-
|
|
168
|
-
role = payload.get("role")
|
|
169
|
-
if role not in {"user", "assistant"}:
|
|
170
|
-
continue
|
|
171
|
-
|
|
172
|
-
text = extract_text_from_content(payload.get("content", []))
|
|
173
|
-
if not text or looks_like_wrapper_message(text):
|
|
174
|
-
continue
|
|
175
|
-
messages.append((role, sanitize_text(text, max_chars)))
|
|
176
|
-
return messages
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
def extract_session_messages(path: Path, max_chars: int) -> List[Tuple[str, str]]:
|
|
180
|
-
entries: List[dict] = []
|
|
181
|
-
try:
|
|
182
|
-
with path.open("r", encoding="utf-8") as handle:
|
|
183
|
-
for line in handle:
|
|
184
|
-
line = line.strip()
|
|
185
|
-
if not line:
|
|
186
|
-
continue
|
|
187
|
-
try:
|
|
188
|
-
entries.append(json.loads(line))
|
|
189
|
-
except json.JSONDecodeError:
|
|
190
|
-
continue
|
|
191
|
-
except OSError:
|
|
192
|
-
return []
|
|
193
|
-
|
|
194
|
-
event_messages = extract_messages_from_event_entries(entries, max_chars)
|
|
195
|
-
if event_messages:
|
|
196
|
-
return event_messages
|
|
197
|
-
return extract_messages_from_response_items(entries, max_chars)
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
def delete_matching_files(root: Path, predicate) -> int:
|
|
201
|
-
if not root.exists() or not root.is_dir():
|
|
202
|
-
return 0
|
|
203
|
-
|
|
204
|
-
deleted_count = 0
|
|
205
|
-
for path in root.rglob("*.jsonl"):
|
|
206
|
-
if not predicate(path):
|
|
207
|
-
continue
|
|
208
|
-
try:
|
|
209
|
-
path.unlink()
|
|
210
|
-
except OSError:
|
|
211
|
-
continue
|
|
212
|
-
deleted_count += 1
|
|
213
|
-
return deleted_count
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
def path_is_same_or_nested(path: Path, root: Optional[Path]) -> bool:
|
|
217
|
-
if root is None:
|
|
218
|
-
return False
|
|
219
|
-
try:
|
|
220
|
-
path.resolve().relative_to(root.resolve())
|
|
221
|
-
return True
|
|
222
|
-
except ValueError:
|
|
223
|
-
return False
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def cleanup_session_history(
|
|
227
|
-
sessions_dir: Path,
|
|
228
|
-
archived_sessions_dir: Path,
|
|
229
|
-
retention_cutoff_utc: datetime,
|
|
230
|
-
) -> Tuple[int, int]:
|
|
231
|
-
sessions_root = sessions_dir.resolve() if sessions_dir.exists() else None
|
|
232
|
-
removed_old_sessions = delete_matching_files(
|
|
233
|
-
sessions_dir,
|
|
234
|
-
lambda path: (
|
|
235
|
-
(timestamp := read_session_timestamp(path)) is not None
|
|
236
|
-
and timestamp < retention_cutoff_utc
|
|
237
|
-
),
|
|
238
|
-
)
|
|
239
|
-
removed_archived_sessions = delete_matching_files(
|
|
240
|
-
archived_sessions_dir,
|
|
241
|
-
lambda path: not path_is_same_or_nested(path, sessions_root),
|
|
242
|
-
)
|
|
243
|
-
return removed_old_sessions, removed_archived_sessions
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
def render_text_output(
|
|
247
|
-
records: Sequence[SessionRecord],
|
|
248
|
-
lookback_minutes: int,
|
|
249
|
-
max_message_chars: int,
|
|
250
|
-
removed_old_sessions: int,
|
|
251
|
-
removed_archived_sessions: int,
|
|
252
|
-
) -> str:
|
|
253
|
-
if not records:
|
|
254
|
-
return "NO_RECENT_CONVERSATIONS"
|
|
255
|
-
|
|
256
|
-
lines: List[str] = [
|
|
257
|
-
f"RECENT_CONVERSATIONS_FOUND={len(records)}",
|
|
258
|
-
f"LOOKBACK_MINUTES={lookback_minutes}",
|
|
259
|
-
"ARCHIVED_SESSIONS_INCLUDED=true",
|
|
260
|
-
f"CLEANUP_REMOVED_OLD_SESSIONS={removed_old_sessions}",
|
|
261
|
-
f"CLEANUP_REMOVED_ARCHIVED_SESSIONS={removed_archived_sessions}",
|
|
262
|
-
]
|
|
263
|
-
|
|
264
|
-
for index, record in enumerate(records, start=1):
|
|
265
|
-
lines.append(f"=== SESSION {index} ===")
|
|
266
|
-
lines.append(f"TIMESTAMP_UTC={record.timestamp_utc.isoformat()}")
|
|
267
|
-
lines.append(f"FILE={record.path}")
|
|
268
|
-
|
|
269
|
-
messages = record.messages
|
|
270
|
-
if messages is None:
|
|
271
|
-
messages = extract_session_messages(record.path, max_message_chars)
|
|
272
|
-
if not messages:
|
|
273
|
-
lines.append("MESSAGES=NONE")
|
|
274
|
-
continue
|
|
275
|
-
|
|
276
|
-
for role, message in messages:
|
|
277
|
-
tag = "USER" if role == "user" else "ASSISTANT"
|
|
278
|
-
lines.append(f"[{tag}]")
|
|
279
|
-
lines.append(message)
|
|
280
|
-
lines.append(f"[/{tag}]")
|
|
281
|
-
|
|
282
|
-
return "\n".join(lines)
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
def parse_args() -> argparse.Namespace:
|
|
286
|
-
parser = argparse.ArgumentParser(
|
|
287
|
-
description="Extract the latest conversation history from Codex session stores",
|
|
288
|
-
)
|
|
289
|
-
parser.add_argument(
|
|
290
|
-
"--sessions-dir",
|
|
291
|
-
default="~/.codex/sessions",
|
|
292
|
-
help="Path to the Codex sessions directory (default: ~/.codex/sessions)",
|
|
293
|
-
)
|
|
294
|
-
parser.add_argument(
|
|
295
|
-
"--archived-sessions-dir",
|
|
296
|
-
default="~/.codex/archived_sessions",
|
|
297
|
-
help="Path to archived Codex sessions (default: ~/.codex/archived_sessions)",
|
|
298
|
-
)
|
|
299
|
-
parser.add_argument(
|
|
300
|
-
"--lookback-minutes",
|
|
301
|
-
type=int,
|
|
302
|
-
default=DEFAULT_LOOKBACK_MINUTES,
|
|
303
|
-
help=f"How far back to look for sessions (default: {DEFAULT_LOOKBACK_MINUTES})",
|
|
304
|
-
)
|
|
305
|
-
parser.add_argument(
|
|
306
|
-
"--limit",
|
|
307
|
-
type=int,
|
|
308
|
-
default=None,
|
|
309
|
-
help="Maximum number of sessions to return (default: all within lookback window)",
|
|
310
|
-
)
|
|
311
|
-
parser.add_argument(
|
|
312
|
-
"--max-message-chars",
|
|
313
|
-
type=int,
|
|
314
|
-
default=1600,
|
|
315
|
-
help="Maximum characters per extracted message (default: 1600)",
|
|
316
|
-
)
|
|
317
|
-
parser.add_argument(
|
|
318
|
-
"--retention-days",
|
|
319
|
-
type=int,
|
|
320
|
-
default=DEFAULT_RETENTION_DAYS,
|
|
321
|
-
help=f"Delete sessions older than this many days after reading (default: {DEFAULT_RETENTION_DAYS})",
|
|
322
|
-
)
|
|
323
|
-
return parser.parse_args()
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
def main() -> int:
|
|
327
|
-
args = parse_args()
|
|
328
|
-
|
|
329
|
-
sessions_dir = Path(args.sessions_dir).expanduser().resolve()
|
|
330
|
-
archived_sessions_dir = Path(args.archived_sessions_dir).expanduser().resolve()
|
|
331
|
-
lookback_minutes = max(args.lookback_minutes, 1)
|
|
332
|
-
limit = args.limit if args.limit is not None and args.limit > 0 else None
|
|
333
|
-
max_message_chars = max(args.max_message_chars, 100)
|
|
334
|
-
retention_days = max(args.retention_days, 1)
|
|
335
|
-
now_utc = datetime.now(timezone.utc)
|
|
336
|
-
|
|
337
|
-
if (
|
|
338
|
-
(not sessions_dir.exists() or not sessions_dir.is_dir())
|
|
339
|
-
and (not archived_sessions_dir.exists() or not archived_sessions_dir.is_dir())
|
|
340
|
-
):
|
|
341
|
-
print("NO_RECENT_CONVERSATIONS")
|
|
342
|
-
return 0
|
|
343
|
-
|
|
344
|
-
cutoff_utc = now_utc - timedelta(minutes=lookback_minutes)
|
|
345
|
-
recent_records = find_recent_sessions((sessions_dir, archived_sessions_dir), cutoff_utc, limit)
|
|
346
|
-
for record in recent_records:
|
|
347
|
-
record.messages = extract_session_messages(record.path, max_message_chars)
|
|
348
|
-
|
|
349
|
-
retention_cutoff_utc = now_utc - timedelta(days=retention_days)
|
|
350
|
-
removed_old_sessions, removed_archived_sessions = cleanup_session_history(
|
|
351
|
-
sessions_dir,
|
|
352
|
-
archived_sessions_dir,
|
|
353
|
-
retention_cutoff_utc,
|
|
354
|
-
)
|
|
355
|
-
|
|
356
|
-
print(
|
|
357
|
-
render_text_output(
|
|
358
|
-
recent_records,
|
|
359
|
-
lookback_minutes,
|
|
360
|
-
max_message_chars,
|
|
361
|
-
removed_old_sessions,
|
|
362
|
-
removed_archived_sessions,
|
|
363
|
-
)
|
|
364
|
-
)
|
|
365
|
-
return 0
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
if __name__ == "__main__":
|
|
369
|
-
raise SystemExit(main())
|
|
@@ -1,130 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""Synchronize a normalized memory index section into ~/.codex/AGENTS.md."""
|
|
3
|
-
|
|
4
|
-
from __future__ import annotations
|
|
5
|
-
|
|
6
|
-
import argparse
|
|
7
|
-
import re
|
|
8
|
-
from pathlib import Path
|
|
9
|
-
from typing import Iterable
|
|
10
|
-
|
|
11
|
-
START_MARKER = "<!-- codex-memory-manager:start -->"
|
|
12
|
-
END_MARKER = "<!-- codex-memory-manager:end -->"
|
|
13
|
-
DEFAULT_SECTION_TITLE = "## User Memory Index"
|
|
14
|
-
DEFAULT_INSTRUCTIONS = [
|
|
15
|
-
"Before starting work, review the index below and open any relevant user preference files.",
|
|
16
|
-
"When a new preference category appears, create or update the matching memory file and refresh this index.",
|
|
17
|
-
]
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def parse_args() -> argparse.Namespace:
|
|
21
|
-
parser = argparse.ArgumentParser(
|
|
22
|
-
description="Sync the Codex user memory index section inside AGENTS.md",
|
|
23
|
-
)
|
|
24
|
-
parser.add_argument(
|
|
25
|
-
"--agents-file",
|
|
26
|
-
default="~/.codex/AGENTS.md",
|
|
27
|
-
help="Path to AGENTS.md (default: ~/.codex/AGENTS.md)",
|
|
28
|
-
)
|
|
29
|
-
parser.add_argument(
|
|
30
|
-
"--memory-dir",
|
|
31
|
-
default="~/.codex/memory",
|
|
32
|
-
help="Directory that stores memory markdown files (default: ~/.codex/memory)",
|
|
33
|
-
)
|
|
34
|
-
parser.add_argument(
|
|
35
|
-
"--section-title",
|
|
36
|
-
default=DEFAULT_SECTION_TITLE,
|
|
37
|
-
help=f"Heading to use for the index section (default: {DEFAULT_SECTION_TITLE!r})",
|
|
38
|
-
)
|
|
39
|
-
parser.add_argument(
|
|
40
|
-
"--instruction-line",
|
|
41
|
-
action="append",
|
|
42
|
-
dest="instruction_lines",
|
|
43
|
-
help="Instruction line to place before the index bullets. Repeat to add more lines.",
|
|
44
|
-
)
|
|
45
|
-
return parser.parse_args()
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def title_from_memory_file(path: Path) -> str:
|
|
49
|
-
try:
|
|
50
|
-
content = path.read_text(encoding="utf-8")
|
|
51
|
-
except OSError:
|
|
52
|
-
return path.stem.replace("-", " ").title()
|
|
53
|
-
|
|
54
|
-
for line in content.splitlines():
|
|
55
|
-
stripped = line.strip()
|
|
56
|
-
if stripped.startswith("# "):
|
|
57
|
-
return stripped[2:].strip() or path.stem.replace("-", " ").title()
|
|
58
|
-
|
|
59
|
-
return path.stem.replace("-", " ").title()
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def iter_memory_files(memory_dir: Path) -> Iterable[Path]:
|
|
63
|
-
if not memory_dir.exists() or not memory_dir.is_dir():
|
|
64
|
-
return []
|
|
65
|
-
return sorted(
|
|
66
|
-
(path for path in memory_dir.glob("*.md") if path.is_file()),
|
|
67
|
-
key=lambda path: path.name.lower(),
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def render_section(memory_files: list[Path], section_title: str, instruction_lines: list[str]) -> str:
|
|
72
|
-
lines = [START_MARKER, section_title.strip(), ""]
|
|
73
|
-
|
|
74
|
-
cleaned_instructions = [line.strip() for line in instruction_lines if line and line.strip()]
|
|
75
|
-
for line in cleaned_instructions:
|
|
76
|
-
lines.append(line)
|
|
77
|
-
if cleaned_instructions:
|
|
78
|
-
lines.append("")
|
|
79
|
-
|
|
80
|
-
if memory_files:
|
|
81
|
-
entries = sorted(
|
|
82
|
-
((title_from_memory_file(path), path.expanduser().resolve()) for path in memory_files),
|
|
83
|
-
key=lambda item: (item[0].lower(), str(item[1]).lower()),
|
|
84
|
-
)
|
|
85
|
-
for title, path in entries:
|
|
86
|
-
lines.append(f"- [{title}]({path})")
|
|
87
|
-
else:
|
|
88
|
-
lines.append("- No memory files are currently indexed.")
|
|
89
|
-
|
|
90
|
-
lines.append(END_MARKER)
|
|
91
|
-
return "\n".join(lines)
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
def remove_existing_section(content: str) -> str:
|
|
95
|
-
pattern = re.compile(
|
|
96
|
-
rf"\n*{re.escape(START_MARKER)}.*?{re.escape(END_MARKER)}\n*",
|
|
97
|
-
re.DOTALL,
|
|
98
|
-
)
|
|
99
|
-
return re.sub(pattern, "\n\n", content).rstrip()
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def sync_agents_file(agents_file: Path, section_text: str) -> None:
|
|
103
|
-
agents_file.parent.mkdir(parents=True, exist_ok=True)
|
|
104
|
-
try:
|
|
105
|
-
original = agents_file.read_text(encoding="utf-8")
|
|
106
|
-
except FileNotFoundError:
|
|
107
|
-
original = ""
|
|
108
|
-
|
|
109
|
-
base = remove_existing_section(original)
|
|
110
|
-
if base:
|
|
111
|
-
updated = f"{base}\n\n{section_text}\n"
|
|
112
|
-
else:
|
|
113
|
-
updated = f"{section_text}\n"
|
|
114
|
-
agents_file.write_text(updated, encoding="utf-8")
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def main() -> int:
|
|
118
|
-
args = parse_args()
|
|
119
|
-
agents_file = Path(args.agents_file).expanduser()
|
|
120
|
-
memory_dir = Path(args.memory_dir).expanduser()
|
|
121
|
-
instruction_lines = args.instruction_lines or DEFAULT_INSTRUCTIONS
|
|
122
|
-
section_text = render_section(list(iter_memory_files(memory_dir)), args.section_title, instruction_lines)
|
|
123
|
-
sync_agents_file(agents_file, section_text)
|
|
124
|
-
print(f"SYNCED_AGENTS_FILE={agents_file.resolve()}")
|
|
125
|
-
print(f"MEMORY_FILES_INDEXED={len(list(iter_memory_files(memory_dir)))}")
|
|
126
|
-
return 0
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
if __name__ == "__main__":
|
|
130
|
-
raise SystemExit(main())
|