lyingdocs 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lyingdocs/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ """LyingDocs: Documentation-Code Misalignment Detection."""
2
+
3
+ __version__ = "0.1.0"
lyingdocs/__main__.py ADDED
@@ -0,0 +1,5 @@
1
+ """Allow running as: python -m lyingdocs"""
2
+
3
+ from .cli import main
4
+
5
+ main()
lyingdocs/agent.py ADDED
@@ -0,0 +1,352 @@
1
+ """DocentAgent: autonomous documentation-code misalignment detection agent."""
2
+
3
+ import json
4
+ import logging
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ from dataclasses import asdict
7
+ from pathlib import Path
8
+
9
+ from .codex import find_codex_binary
10
+ from .doctree import DocTree
11
+ from .llm import call_llm, call_llm_with_tools, make_client
12
+ from .tools import TOOL_SCHEMAS, ToolExecutor
13
+ from .workspace import Workspace
14
+
15
+ logger = logging.getLogger("lyingdocs")
16
+
17
+ PROMPTS_DIR = Path(__file__).resolve().parent / "prompts"
18
+
19
+
20
+ class DocentAgent:
21
+ """Autonomous agent that traverses documentation and dispatches code audits."""
22
+
23
+ def __init__(
24
+ self,
25
+ config: dict,
26
+ doc_path: Path,
27
+ code_path: Path,
28
+ output_dir: Path,
29
+ ):
30
+ self.config = config
31
+ self.doc_path = doc_path
32
+ self.code_path = code_path
33
+ self.output_dir = output_dir
34
+
35
+ self.client = make_client(config)
36
+ self.model = config["model"]
37
+ self.max_iterations = config.get("max_iterations", 50)
38
+ self.token_budget = config.get("token_budget", 524_288)
39
+
40
+ self.doctree = DocTree(doc_path)
41
+ self.workspace = Workspace(
42
+ output_dir, max_dispatches=config.get("max_dispatches", 20)
43
+ )
44
+
45
+ # Resolve codex binary once at startup
46
+ self.codex_bin = None
47
+ if config.get("codex_enabled", True):
48
+ self.codex_bin = find_codex_binary(config)
49
+ if self.codex_bin:
50
+ logger.info("Codex CLI found: %s", self.codex_bin)
51
+ else:
52
+ logger.warning(
53
+ "Codex CLI not found. Code analysis dispatches will be unavailable. "
54
+ "Install via: npm install -g @openai/codex"
55
+ )
56
+ else:
57
+ logger.info("Codex CLI disabled by configuration.")
58
+
59
+ self.tool_executor = ToolExecutor(
60
+ doc_root=doc_path,
61
+ code_path=code_path,
62
+ output_dir=output_dir,
63
+ workspace=self.workspace,
64
+ config=config,
65
+ codex_bin=self.codex_bin,
66
+ )
67
+
68
+ self.messages: list[dict] = []
69
+
70
+ def run(self) -> str:
71
+ """Execute the full agent loop. Returns the final report path."""
72
+ # Resume from checkpoint if requested
73
+ if self.config.get("resume"):
74
+ self.workspace.load_state()
75
+
76
+ # Build doc index
77
+ self.doctree.build_index()
78
+ self.doctree.save_index(self.output_dir)
79
+
80
+ # Seed conversation
81
+ system_prompt = self._load_prompt("agent_system.txt")
82
+ kickoff = self._build_kickoff_message()
83
+
84
+ self.messages = [
85
+ {"role": "system", "content": system_prompt},
86
+ {"role": "user", "content": kickoff},
87
+ ]
88
+
89
+ logger.info("DocentAgent started — %d doc files indexed", len(self.doctree.files))
90
+
91
+ # Agent loop
92
+ iteration = 0
93
+ while iteration < self.max_iterations:
94
+ iteration += 1
95
+ logger.info("--- Agent iteration %d/%d ---", iteration, self.max_iterations)
96
+
97
+ # Call LLM with tools
98
+ response = call_llm_with_tools(
99
+ self.client, self.model, self.messages, TOOL_SCHEMAS
100
+ )
101
+
102
+ # Append assistant message
103
+ assistant_msg = self._response_to_message(response)
104
+ self.messages.append(assistant_msg)
105
+
106
+ # Handle tool calls (parallel when multiple)
107
+ if response.tool_calls:
108
+ parsed_calls = []
109
+ for tool_call in response.tool_calls:
110
+ name = tool_call.function.name
111
+ try:
112
+ args = json.loads(tool_call.function.arguments)
113
+ except json.JSONDecodeError:
114
+ args = {}
115
+ parsed_calls.append((tool_call, name, args))
116
+
117
+ if len(parsed_calls) == 1:
118
+ # Single tool call — execute directly
119
+ tc, name, args = parsed_calls[0]
120
+ logger.info(" Tool call: %s(%s)", name, _truncate(str(args), 100))
121
+ result = self.tool_executor.execute(name, args)
122
+ logger.info(" Result: %s", _truncate(result, 200))
123
+ self.messages.append({
124
+ "role": "tool",
125
+ "tool_call_id": tc.id,
126
+ "content": result,
127
+ })
128
+ else:
129
+ # Multiple tool calls — execute in parallel
130
+ def _exec(item):
131
+ tc, name, args = item
132
+ logger.info(" Tool call: %s(%s)", name, _truncate(str(args), 100))
133
+ result = self.tool_executor.execute(name, args)
134
+ logger.info(" Result: %s", _truncate(result, 200))
135
+ return tc, result
136
+
137
+ with ThreadPoolExecutor(max_workers=len(parsed_calls)) as pool:
138
+ futures = {
139
+ pool.submit(_exec, item): item[0].id
140
+ for item in parsed_calls
141
+ }
142
+ results_map = {}
143
+ for future in as_completed(futures):
144
+ tc, result = future.result()
145
+ results_map[tc.id] = result
146
+
147
+ # Append results in original order to keep conversation deterministic
148
+ for tc, _, _ in parsed_calls:
149
+ self.messages.append({
150
+ "role": "tool",
151
+ "tool_call_id": tc.id,
152
+ "content": results_map[tc.id],
153
+ })
154
+
155
+ # Save state after each batch of tool calls
156
+ self.workspace.save_state()
157
+ else:
158
+ # Text-only response — agent is thinking or done
159
+ if response.content:
160
+ logger.info(" Agent: %s", _truncate(response.content, 200))
161
+
162
+ # Check completion
163
+ if self.workspace.is_complete():
164
+ logger.info("Agent signaled completion.")
165
+ break
166
+
167
+ # Budget check
168
+ if self.workspace.is_budget_exhausted():
169
+ logger.warning("Codex dispatch budget exhausted — nudging agent to finalize.")
170
+ self.messages.append({
171
+ "role": "user",
172
+ "content": (
173
+ "Your Codex dispatch budget is exhausted. Please call "
174
+ "finalize_report now to generate the final report with "
175
+ "the findings collected so far."
176
+ ),
177
+ })
178
+
179
+ # Context management
180
+ if self._estimate_tokens() > self.token_budget:
181
+ self._compress_context()
182
+
183
+ # If no tool calls and not complete, nudge to continue
184
+ if not response.tool_calls and not self.workspace.is_complete():
185
+ self.messages.append({
186
+ "role": "user",
187
+ "content": "Continue with the audit. Use your tools to proceed.",
188
+ })
189
+ else:
190
+ logger.warning(
191
+ "Max iterations (%d) reached — auto-finalizing.", self.max_iterations
192
+ )
193
+
194
+ # Generate final report
195
+ report_path = self._generate_report()
196
+ self.workspace.save_state()
197
+
198
+ return str(report_path)
199
+
200
+ def _build_kickoff_message(self) -> str:
201
+ """Build the initial message with doc tree overview."""
202
+ overview = self.doctree.get_overview()
203
+ progress = ""
204
+ if self.workspace.findings or self.workspace.completed_sections:
205
+ progress = (
206
+ "\n\n## Resumed Session\n"
207
+ + self.workspace.get_progress_summary()
208
+ )
209
+
210
+ codex_status = (
211
+ f"Codex dispatches available: {self.workspace.dispatches_remaining()}"
212
+ if self.codex_bin
213
+ else "Codex CLI: NOT AVAILABLE — you must rely on documentation analysis only"
214
+ )
215
+
216
+ return (
217
+ f"## Documentation to Audit\n\n{overview}\n\n"
218
+ f"## Code Repository\nPath: {self.code_path}\n\n"
219
+ f"## Your Budget\n"
220
+ f"{codex_status}\n"
221
+ f"Max iterations: {self.max_iterations}\n"
222
+ f"{progress}\n\n"
223
+ "Begin your audit. Start by examining the high-priority documentation "
224
+ "files, then formulate targeted questions for Codex."
225
+ )
226
+
227
+ def _response_to_message(self, response) -> dict:
228
+ """Convert an OpenAI response message to a serializable dict."""
229
+ msg = {"role": "assistant"}
230
+ if response.content:
231
+ msg["content"] = response.content
232
+ if response.tool_calls:
233
+ msg["tool_calls"] = [
234
+ {
235
+ "id": tc.id,
236
+ "type": "function",
237
+ "function": {
238
+ "name": tc.function.name,
239
+ "arguments": tc.function.arguments,
240
+ },
241
+ }
242
+ for tc in response.tool_calls
243
+ ]
244
+ return msg
245
+
246
+ def _estimate_tokens(self) -> int:
247
+ """Rough token estimate: ~4 chars per token."""
248
+ total_chars = sum(
249
+ len(json.dumps(m)) for m in self.messages
250
+ )
251
+ return total_chars // 4
252
+
253
+ def _compress_context(self) -> None:
254
+ """Compress older messages to stay within token budget."""
255
+ logger.info(" Compressing context (estimated %d tokens)", self._estimate_tokens())
256
+
257
+ keep_recent = 8 # Keep last 4 exchanges
258
+ if len(self.messages) <= keep_recent + 1:
259
+ return # Not enough to compress
260
+
261
+ # Extract messages to summarize (skip system prompt)
262
+ old_messages = self.messages[1:-keep_recent]
263
+ if not old_messages:
264
+ return
265
+
266
+ # Build a summary request
267
+ summary_input = []
268
+ for m in old_messages:
269
+ role = m.get("role", "unknown")
270
+ content = m.get("content", "")
271
+ if m.get("tool_calls"):
272
+ calls = [
273
+ f'{tc["function"]["name"]}({_truncate(tc["function"]["arguments"], 80)})'
274
+ for tc in m["tool_calls"]
275
+ ]
276
+ content = "Tool calls: " + ", ".join(calls)
277
+ if content:
278
+ summary_input.append(f"[{role}] {content}")
279
+
280
+ summary_text = "\n".join(summary_input)
281
+
282
+ summary = call_llm(
283
+ self.client,
284
+ self.model,
285
+ (
286
+ "Summarize the following agent conversation. Preserve ALL key findings, "
287
+ "decisions, audit results, and which doc sections have been examined. "
288
+ "Be concise but complete — this summary replaces the original messages."
289
+ ),
290
+ summary_text,
291
+ )
292
+
293
+ # Replace old messages with summary
294
+ self.messages = [
295
+ self.messages[0], # system prompt
296
+ self.messages[1], # kickoff message
297
+ {"role": "user", "content": f"[Context Summary from prior work]\n\n{summary}"},
298
+ *self.messages[-keep_recent:],
299
+ ]
300
+
301
+ logger.info(
302
+ " Context compressed: now %d messages (~%d tokens)",
303
+ len(self.messages), self._estimate_tokens(),
304
+ )
305
+
306
+ def _generate_report(self) -> Path:
307
+ """Generate the final Misalignment_Report.md from collected findings."""
308
+ report_path = self.output_dir / "Misalignment_Report.md"
309
+
310
+ if not self.workspace.findings:
311
+ report = (
312
+ f"# Documentation-Code Misalignment Report: "
313
+ f"{self.doc_path.name}\n\n"
314
+ "## Executive Summary\n\n"
315
+ "No misalignment findings were detected during the audit. "
316
+ "The documentation appears to be well-aligned with the codebase.\n"
317
+ )
318
+ report_path.write_text(report, encoding="utf-8")
319
+ logger.info("No findings — wrote empty report to %s", report_path)
320
+ return report_path
321
+
322
+ # Use LLM to synthesize the report
323
+ synthesis_prompt = self._load_prompt("report_synthesis.txt")
324
+ findings_json = json.dumps(
325
+ [asdict(f) for f in self.workspace.findings], indent=2
326
+ )
327
+
328
+ user_content = (
329
+ f"Project: {self.doc_path.name}\n\n"
330
+ f"## Raw Findings ({len(self.workspace.findings)} total)\n\n"
331
+ f"```json\n{findings_json}\n```\n\n"
332
+ f"## Audit Coverage\n"
333
+ f"Sections audited: {len(self.workspace.completed_sections)}\n"
334
+ f"Codex dispatches used: {self.workspace.codex_dispatch_count}\n"
335
+ )
336
+
337
+ report = call_llm(
338
+ self.client, self.model, synthesis_prompt, user_content
339
+ )
340
+
341
+ report_path.write_text(report, encoding="utf-8")
342
+ logger.info("Final report written to %s (%d chars)", report_path, len(report))
343
+ return report_path
344
+
345
+ def _load_prompt(self, filename: str) -> str:
346
+ return (PROMPTS_DIR / filename).read_text(encoding="utf-8")
347
+
348
+
349
+ def _truncate(text: str, max_len: int) -> str:
350
+ if len(text) <= max_len:
351
+ return text
352
+ return text[:max_len] + "... (truncated)"
lyingdocs/cli.py ADDED
@@ -0,0 +1,149 @@
1
+ """CLI entry point for LyingDocs."""
2
+
3
+ import argparse
4
+ import logging
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ from . import __version__
9
+ from .config import DEFAULTS
10
+
11
+
12
+ def setup_logging(output_dir: Path) -> None:
13
+ output_dir.mkdir(parents=True, exist_ok=True)
14
+ logger = logging.getLogger("lyingdocs")
15
+ logger.setLevel(logging.INFO)
16
+
17
+ fmt = logging.Formatter(
18
+ "%(asctime)s [%(levelname)s] %(message)s", datefmt="%H:%M:%S"
19
+ )
20
+
21
+ console = logging.StreamHandler(sys.stderr)
22
+ console.setFormatter(fmt)
23
+ logger.addHandler(console)
24
+
25
+ fh = logging.FileHandler(output_dir / "pipeline.log", mode="a")
26
+ fh.setFormatter(fmt)
27
+ logger.addHandler(fh)
28
+
29
+
30
+ def cmd_analyze(args: argparse.Namespace) -> None:
31
+ """Run the documentation-code misalignment analysis."""
32
+ from .agent import DocentAgent
33
+ from .config import load_config
34
+
35
+ # Validate paths
36
+ doc_path = Path(args.doc_path)
37
+ code_path = Path(args.code_path)
38
+ if not doc_path.is_dir():
39
+ sys.exit(f"ERROR: Documentation directory not found: {doc_path}")
40
+ if not code_path.is_dir():
41
+ sys.exit(f"ERROR: Code repository not found: {code_path}")
42
+
43
+ config = load_config(args)
44
+ output_dir = config["output_dir"]
45
+
46
+ setup_logging(output_dir)
47
+ logger = logging.getLogger("lyingdocs")
48
+ logger.info(
49
+ "LyingDocs starting — doc=%s code=%s output=%s",
50
+ doc_path, code_path, output_dir,
51
+ )
52
+
53
+ agent = DocentAgent(
54
+ config=config,
55
+ doc_path=doc_path,
56
+ code_path=code_path,
57
+ output_dir=output_dir,
58
+ )
59
+
60
+ report_path = agent.run()
61
+ logger.info("Done. Report at: %s", report_path)
62
+ print(f"\nReport generated: {report_path}")
63
+
64
+
65
+ def cmd_version(_args: argparse.Namespace) -> None:
66
+ """Print version and exit."""
67
+ print(f"lyingdocs {__version__}")
68
+
69
+
70
+ def main():
71
+ parser = argparse.ArgumentParser(
72
+ prog="lyingdocs",
73
+ description="LyingDocs: Documentation-Code Misalignment Detection",
74
+ )
75
+ subparsers = parser.add_subparsers(dest="command")
76
+
77
+ # -- analyze subcommand --
78
+ analyze_parser = subparsers.add_parser(
79
+ "analyze",
80
+ help="Analyze documentation against code for misalignments",
81
+ formatter_class=argparse.RawDescriptionHelpFormatter,
82
+ epilog="""\
83
+ examples:
84
+ lyingdocs analyze --doc-path docs/ --code-path . -o output/audit
85
+ lyingdocs analyze --doc-path docs/ --code-path . --no-codex
86
+ lyingdocs analyze --doc-path docs/ --code-path . --config lyingdocs.toml
87
+ """,
88
+ )
89
+ analyze_parser.add_argument(
90
+ "--doc-path", required=True,
91
+ help="Path to documentation root directory",
92
+ )
93
+ analyze_parser.add_argument(
94
+ "--code-path", required=True,
95
+ help="Path to code repository root",
96
+ )
97
+ analyze_parser.add_argument(
98
+ "--output-dir", "-o", default="output",
99
+ help="Output directory (default: output/)",
100
+ )
101
+ analyze_parser.add_argument(
102
+ "--model", "-m", default=None,
103
+ help="LLM model name (overrides config/env)",
104
+ )
105
+ analyze_parser.add_argument(
106
+ "--base-url", default=None,
107
+ help="API base URL (overrides config/env)",
108
+ )
109
+ analyze_parser.add_argument(
110
+ "--codex-provider", default=None,
111
+ help="Codex CLI model provider name",
112
+ )
113
+ analyze_parser.add_argument(
114
+ "--wire-api", default=None,
115
+ help="Codex CLI provider wire_api setting (e.g. 'responses' or 'chat')",
116
+ )
117
+ analyze_parser.add_argument(
118
+ "--max-dispatches", type=int, default=DEFAULTS["max_dispatches"],
119
+ help="Max Codex CLI dispatches (default: %(default)s)",
120
+ )
121
+ analyze_parser.add_argument(
122
+ "--max-iterations", type=int, default=DEFAULTS["max_iterations"],
123
+ help="Max agent loop iterations (default: %(default)s)",
124
+ )
125
+ analyze_parser.add_argument(
126
+ "--no-codex", action="store_true",
127
+ help="Disable Codex CLI integration (doc-only analysis)",
128
+ )
129
+ analyze_parser.add_argument(
130
+ "--config", default=None,
131
+ help="Path to config file (default: auto-detect lyingdocs.toml)",
132
+ )
133
+ analyze_parser.add_argument(
134
+ "--resume", action="store_true",
135
+ help="Resume from workspace checkpoint if available",
136
+ )
137
+ analyze_parser.set_defaults(func=cmd_analyze)
138
+
139
+ # -- version subcommand --
140
+ version_parser = subparsers.add_parser("version", help="Show version")
141
+ version_parser.set_defaults(func=cmd_version)
142
+
143
+ args = parser.parse_args()
144
+
145
+ if not args.command:
146
+ parser.print_help()
147
+ sys.exit(1)
148
+
149
+ args.func(args)
lyingdocs/codex.py ADDED
@@ -0,0 +1,150 @@
1
+ """Codex CLI wrapper for atomic code analysis task dispatch."""
2
+
3
+ import logging
4
+ import os
5
+ import shutil
6
+ import subprocess
7
+ from pathlib import Path
8
+
9
+ logger = logging.getLogger("lyingdocs")
10
+
11
+ PROMPTS_DIR = Path(__file__).resolve().parent / "prompts"
12
+
13
+
14
+ def find_codex_binary(config: dict) -> str | None:
15
+ """Locate the codex CLI binary. Returns path string or None."""
16
+ # 1. Explicit path from config
17
+ explicit = config.get("codex_path")
18
+ if explicit:
19
+ p = Path(explicit)
20
+ if p.is_file() and os.access(str(p), os.X_OK):
21
+ return str(p)
22
+ logger.warning("Configured codex_path not found or not executable: %s", p)
23
+
24
+ # 2. System PATH (globally installed via npm install -g @openai/codex)
25
+ system_codex = shutil.which("codex")
26
+ if system_codex:
27
+ return system_codex
28
+
29
+ # 3. Local node_modules (dev setup / legacy)
30
+ for root in (Path.cwd(), Path(__file__).resolve().parent.parent):
31
+ local = root / "node_modules" / ".bin" / "codex"
32
+ if local.is_file():
33
+ return str(local)
34
+
35
+ return None
36
+
37
+
38
+ def codex_provider_flags(config: dict) -> list[str]:
39
+ """Return the CLI flags that configure the model provider for codex."""
40
+ p = config.get("codex_provider", "openai")
41
+
42
+ # For the default OpenAI provider, codex knows it natively — just set model
43
+ if p == "openai":
44
+ return ["-m", config["model"]]
45
+
46
+ # Custom provider: inject full provider config
47
+ return [
48
+ "-m", config["model"],
49
+ "-c", f'model_provider="{p}"',
50
+ "-c", f'model_providers.{p}.name="{p}"',
51
+ "-c", f'model_providers.{p}.base_url="{config["base_url"]}"',
52
+ "-c", f'model_providers.{p}.env_key="OPENAI_API_KEY"',
53
+ "-c", f'model_providers.{p}.wire_api="{config.get("wire_api", "responses")}"',
54
+ "-c", 'model_reasoning_effort="high"',
55
+ ]
56
+
57
+
58
+ def _load_codex_task_template() -> str:
59
+ return (PROMPTS_DIR / "codex_task.txt").read_text(encoding="utf-8")
60
+
61
+
62
+ def run_codex_task(
63
+ config: dict,
64
+ task_description: str,
65
+ code_path: Path,
66
+ output_dir: Path,
67
+ task_id: str,
68
+ focus_paths: list[str] | None = None,
69
+ codex_bin: str | None = None,
70
+ ) -> str:
71
+ """Run a single atomic Codex analysis task.
72
+
73
+ Returns the Codex output text, or an error message if codex is unavailable.
74
+ """
75
+ if not codex_bin:
76
+ return (
77
+ "[UNAVAILABLE] Codex CLI binary not found. "
78
+ "Install it via 'npm install -g @openai/codex' to enable code analysis. "
79
+ "You can also set codex.path in your config file."
80
+ )
81
+
82
+ template = _load_codex_task_template()
83
+
84
+ focus_section = ""
85
+ if focus_paths:
86
+ paths_str = "\n".join(f" - {p}" for p in focus_paths)
87
+ focus_section = f"\nPriority files/directories to examine:\n{paths_str}\n"
88
+
89
+ full_prompt = template.format(
90
+ task_description=task_description,
91
+ focus_paths_section=focus_section,
92
+ )
93
+
94
+ output_file = output_dir / f"codex_task_{task_id}.txt"
95
+ stderr_file = output_dir / f"codex_stderr_{task_id}.txt"
96
+
97
+ cmd = [
98
+ codex_bin, "exec",
99
+ "--dangerously-bypass-approvals-and-sandbox",
100
+ "-C", str(code_path.resolve()),
101
+ "--skip-git-repo-check",
102
+ "-o", str(output_file.resolve()),
103
+ *codex_provider_flags(config),
104
+ "-", # read prompt from stdin
105
+ ]
106
+
107
+ logger.info(" Codex task %s: dispatching ...", task_id)
108
+ logger.debug(" Command: %s", " ".join(cmd[:7]) + " ...")
109
+
110
+ try:
111
+ result = subprocess.run(
112
+ cmd,
113
+ input=full_prompt,
114
+ capture_output=True,
115
+ text=True,
116
+ timeout=config.get("codex_task_timeout", 1200),
117
+ env=os.environ.copy(),
118
+ )
119
+
120
+ # Save stderr for debugging
121
+ stderr_file.write_text(result.stderr, encoding="utf-8")
122
+
123
+ if result.returncode != 0:
124
+ logger.warning(
125
+ " Codex task %s exited with code %d", task_id, result.returncode
126
+ )
127
+
128
+ # Read output: -o file first, fallback to stdout
129
+ output = ""
130
+ if output_file.exists() and output_file.stat().st_size > 0:
131
+ output = output_file.read_text(encoding="utf-8")
132
+ elif result.stdout.strip():
133
+ output = result.stdout.strip()
134
+ output_file.write_text(output, encoding="utf-8")
135
+
136
+ if output:
137
+ logger.info(
138
+ " Codex task %s: completed (%d chars)", task_id, len(output)
139
+ )
140
+ else:
141
+ logger.warning(" Codex task %s: no output produced", task_id)
142
+
143
+ return output
144
+
145
+ except subprocess.TimeoutExpired:
146
+ logger.error(
147
+ " Codex task %s timed out after %ds",
148
+ task_id, config.get("codex_task_timeout", 1200),
149
+ )
150
+ return "[ERROR] Codex task timed out."