execforge 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- execforge-0.1.0.dist-info/METADATA +367 -0
- execforge-0.1.0.dist-info/RECORD +44 -0
- execforge-0.1.0.dist-info/WHEEL +5 -0
- execforge-0.1.0.dist-info/entry_points.txt +5 -0
- execforge-0.1.0.dist-info/licenses/LICENSE +21 -0
- execforge-0.1.0.dist-info/top_level.txt +1 -0
- orchestrator/__init__.py +4 -0
- orchestrator/__main__.py +5 -0
- orchestrator/backends/__init__.py +1 -0
- orchestrator/backends/base.py +29 -0
- orchestrator/backends/factory.py +53 -0
- orchestrator/backends/llm_cli_backend.py +87 -0
- orchestrator/backends/mock_backend.py +34 -0
- orchestrator/backends/shell_backend.py +49 -0
- orchestrator/cli/__init__.py +1 -0
- orchestrator/cli/main.py +971 -0
- orchestrator/config.py +272 -0
- orchestrator/domain/__init__.py +1 -0
- orchestrator/domain/types.py +77 -0
- orchestrator/exceptions.py +18 -0
- orchestrator/git/__init__.py +1 -0
- orchestrator/git/service.py +202 -0
- orchestrator/logging_setup.py +53 -0
- orchestrator/prompts/__init__.py +1 -0
- orchestrator/prompts/parser.py +91 -0
- orchestrator/reporting/__init__.py +1 -0
- orchestrator/reporting/console.py +197 -0
- orchestrator/reporting/events.py +44 -0
- orchestrator/reporting/selection_result.py +15 -0
- orchestrator/services/__init__.py +1 -0
- orchestrator/services/agent_runner.py +831 -0
- orchestrator/services/agent_service.py +122 -0
- orchestrator/services/project_service.py +47 -0
- orchestrator/services/prompt_source_service.py +65 -0
- orchestrator/services/run_service.py +42 -0
- orchestrator/services/step_executor.py +100 -0
- orchestrator/services/task_service.py +155 -0
- orchestrator/storage/__init__.py +1 -0
- orchestrator/storage/db.py +29 -0
- orchestrator/storage/models.py +95 -0
- orchestrator/utils/__init__.py +1 -0
- orchestrator/utils/process.py +44 -0
- orchestrator/validation/__init__.py +1 -0
- orchestrator/validation/pipeline.py +52 -0
orchestrator/config.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import asdict, dataclass
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
import tempfile
|
|
8
|
+
import tomllib
|
|
9
|
+
from typing import Any, Callable
|
|
10
|
+
|
|
11
|
+
from platformdirs import user_data_dir
|
|
12
|
+
|
|
13
|
+
from orchestrator.exceptions import ConfigError
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
APP_NAME = "agent-orchestrator"
|
|
17
|
+
logger = logging.getLogger("orchestrator.config")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass(slots=True)
|
|
21
|
+
class AppPaths:
|
|
22
|
+
root: Path
|
|
23
|
+
db_file: Path
|
|
24
|
+
config_file: Path
|
|
25
|
+
logs_dir: Path
|
|
26
|
+
prompt_sources_dir: Path
|
|
27
|
+
runs_dir: Path
|
|
28
|
+
cache_dir: Path
|
|
29
|
+
lock_dir: Path
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass(slots=True)
|
|
33
|
+
class AppConfig:
|
|
34
|
+
log_level: str = "INFO"
|
|
35
|
+
default_timeout_seconds: int = 900
|
|
36
|
+
default_require_clean_tree: bool = True
|
|
37
|
+
default_allow_push: bool = False
|
|
38
|
+
claude_api_key: str | None = None
|
|
39
|
+
codex_api_key: str | None = None
|
|
40
|
+
opencode_api_key: str | None = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass(slots=True)
|
|
44
|
+
class ConfigFieldSpec:
|
|
45
|
+
key: str
|
|
46
|
+
value_type: type
|
|
47
|
+
default: Any
|
|
48
|
+
sensitive: bool = False
|
|
49
|
+
validator: Callable[[Any], bool] | None = None
|
|
50
|
+
description: str = ""
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _bool_from_text(value: str) -> bool:
|
|
54
|
+
lowered = value.strip().lower()
|
|
55
|
+
if lowered in {"true", "1", "yes", "y", "on"}:
|
|
56
|
+
return True
|
|
57
|
+
if lowered in {"false", "0", "no", "n", "off"}:
|
|
58
|
+
return False
|
|
59
|
+
raise ConfigError(f"Expected boolean value, got '{value}'")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _cast_value(spec: ConfigFieldSpec, raw: Any) -> Any:
|
|
63
|
+
if raw is None:
|
|
64
|
+
return None
|
|
65
|
+
if spec.value_type is bool:
|
|
66
|
+
if isinstance(raw, bool):
|
|
67
|
+
return raw
|
|
68
|
+
if isinstance(raw, str):
|
|
69
|
+
return _bool_from_text(raw)
|
|
70
|
+
raise ConfigError(f"Config '{spec.key}' must be a boolean")
|
|
71
|
+
if spec.value_type is int:
|
|
72
|
+
try:
|
|
73
|
+
return int(raw)
|
|
74
|
+
except (TypeError, ValueError) as exc:
|
|
75
|
+
raise ConfigError(f"Config '{spec.key}' must be an integer") from exc
|
|
76
|
+
if spec.value_type is str:
|
|
77
|
+
return str(raw)
|
|
78
|
+
return raw
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def _validate_log_level(value: str) -> bool:
|
|
82
|
+
return value.upper() in {"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _validate_timeout(value: int) -> bool:
|
|
86
|
+
return 1 <= value <= 86400
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def get_config_schema() -> dict[str, ConfigFieldSpec]:
|
|
90
|
+
return {
|
|
91
|
+
"log_level": ConfigFieldSpec(
|
|
92
|
+
key="log_level",
|
|
93
|
+
value_type=str,
|
|
94
|
+
default="INFO",
|
|
95
|
+
validator=_validate_log_level,
|
|
96
|
+
description="Logging level",
|
|
97
|
+
),
|
|
98
|
+
"default_timeout_seconds": ConfigFieldSpec(
|
|
99
|
+
key="default_timeout_seconds",
|
|
100
|
+
value_type=int,
|
|
101
|
+
default=900,
|
|
102
|
+
validator=_validate_timeout,
|
|
103
|
+
description="Default run timeout in seconds",
|
|
104
|
+
),
|
|
105
|
+
"default_require_clean_tree": ConfigFieldSpec(
|
|
106
|
+
key="default_require_clean_tree",
|
|
107
|
+
value_type=bool,
|
|
108
|
+
default=True,
|
|
109
|
+
description="Require clean working tree before run",
|
|
110
|
+
),
|
|
111
|
+
"default_allow_push": ConfigFieldSpec(
|
|
112
|
+
key="default_allow_push",
|
|
113
|
+
value_type=bool,
|
|
114
|
+
default=False,
|
|
115
|
+
description="Allow push by default",
|
|
116
|
+
),
|
|
117
|
+
"claude_api_key": ConfigFieldSpec(
|
|
118
|
+
key="claude_api_key",
|
|
119
|
+
value_type=str,
|
|
120
|
+
default=None,
|
|
121
|
+
sensitive=True,
|
|
122
|
+
description="Optional Claude API key",
|
|
123
|
+
),
|
|
124
|
+
"codex_api_key": ConfigFieldSpec(
|
|
125
|
+
key="codex_api_key",
|
|
126
|
+
value_type=str,
|
|
127
|
+
default=None,
|
|
128
|
+
sensitive=True,
|
|
129
|
+
description="Optional Codex API key",
|
|
130
|
+
),
|
|
131
|
+
"opencode_api_key": ConfigFieldSpec(
|
|
132
|
+
key="opencode_api_key",
|
|
133
|
+
value_type=str,
|
|
134
|
+
default=None,
|
|
135
|
+
sensitive=True,
|
|
136
|
+
description="Optional OpenCode API key",
|
|
137
|
+
),
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def get_app_paths() -> AppPaths:
|
|
142
|
+
override = os.environ.get("AGENT_ORCHESTRATOR_HOME") or os.environ.get("ORCHESTRATOR_HOME")
|
|
143
|
+
root = Path(override).expanduser() if override else Path(user_data_dir(APP_NAME, APP_NAME))
|
|
144
|
+
return AppPaths(
|
|
145
|
+
root=root,
|
|
146
|
+
db_file=root / "app.db",
|
|
147
|
+
config_file=root / "config.toml",
|
|
148
|
+
logs_dir=root / "logs",
|
|
149
|
+
prompt_sources_dir=root / "prompt-sources",
|
|
150
|
+
runs_dir=root / "runs",
|
|
151
|
+
cache_dir=root / "cache",
|
|
152
|
+
lock_dir=root / "locks",
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def ensure_app_dirs(paths: AppPaths) -> None:
|
|
157
|
+
for p in [
|
|
158
|
+
paths.root,
|
|
159
|
+
paths.logs_dir,
|
|
160
|
+
paths.prompt_sources_dir,
|
|
161
|
+
paths.runs_dir,
|
|
162
|
+
paths.cache_dir,
|
|
163
|
+
paths.lock_dir,
|
|
164
|
+
]:
|
|
165
|
+
p.mkdir(parents=True, exist_ok=True)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def _normalize_loaded_data(data: dict[str, Any]) -> dict[str, Any]:
|
|
169
|
+
schema = get_config_schema()
|
|
170
|
+
normalized: dict[str, Any] = {}
|
|
171
|
+
for key, spec in schema.items():
|
|
172
|
+
if key not in data:
|
|
173
|
+
continue
|
|
174
|
+
value = _cast_value(spec, data[key])
|
|
175
|
+
if spec.validator and value is not None and not spec.validator(value):
|
|
176
|
+
raise ConfigError(f"Invalid value for '{key}': {value}")
|
|
177
|
+
normalized[key] = value
|
|
178
|
+
return normalized
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def load_config(paths: AppPaths) -> AppConfig:
|
|
182
|
+
if not paths.config_file.exists():
|
|
183
|
+
return AppConfig()
|
|
184
|
+
with paths.config_file.open("rb") as fh:
|
|
185
|
+
data = tomllib.load(fh)
|
|
186
|
+
normalized = _normalize_loaded_data(data)
|
|
187
|
+
return AppConfig(**normalized)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def _serialize_toml(config: AppConfig) -> str:
|
|
191
|
+
data = asdict(config)
|
|
192
|
+
schema = get_config_schema()
|
|
193
|
+
lines: list[str] = []
|
|
194
|
+
for key, spec in schema.items():
|
|
195
|
+
value = data.get(key)
|
|
196
|
+
if value is None:
|
|
197
|
+
continue
|
|
198
|
+
if spec.value_type is bool:
|
|
199
|
+
lines.append(f"{key} = {str(value).lower()}")
|
|
200
|
+
elif spec.value_type is int:
|
|
201
|
+
lines.append(f"{key} = {value}")
|
|
202
|
+
else:
|
|
203
|
+
escaped = str(value).replace('"', '\\"')
|
|
204
|
+
lines.append(f'{key} = "{escaped}"')
|
|
205
|
+
lines.append("")
|
|
206
|
+
return "\n".join(lines)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def save_config(paths: AppPaths, config: AppConfig) -> None:
|
|
210
|
+
text = _serialize_toml(config)
|
|
211
|
+
paths.config_file.parent.mkdir(parents=True, exist_ok=True)
|
|
212
|
+
with tempfile.NamedTemporaryFile("w", delete=False, encoding="utf-8", dir=str(paths.config_file.parent)) as tmp:
|
|
213
|
+
tmp.write(text)
|
|
214
|
+
tmp_path = Path(tmp.name)
|
|
215
|
+
tmp_path.replace(paths.config_file)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def config_to_display_dict(config: AppConfig, mask_sensitive: bool = True) -> dict[str, Any]:
|
|
219
|
+
data = asdict(config)
|
|
220
|
+
schema = get_config_schema()
|
|
221
|
+
out: dict[str, Any] = {}
|
|
222
|
+
for key, spec in schema.items():
|
|
223
|
+
value = data.get(key)
|
|
224
|
+
if mask_sensitive and spec.sensitive and value:
|
|
225
|
+
out[key] = "********"
|
|
226
|
+
else:
|
|
227
|
+
out[key] = value
|
|
228
|
+
return out
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def update_config_values(paths: AppPaths, updates: dict[str, str]) -> AppConfig:
|
|
232
|
+
schema = get_config_schema()
|
|
233
|
+
unknown = [k for k in updates if k not in schema]
|
|
234
|
+
if unknown:
|
|
235
|
+
known = ", ".join(sorted(schema.keys()))
|
|
236
|
+
raise ConfigError(f"Unknown config key(s): {', '.join(unknown)}. Known keys: {known}")
|
|
237
|
+
|
|
238
|
+
config = load_config(paths)
|
|
239
|
+
data = asdict(config)
|
|
240
|
+
changed_keys: list[str] = []
|
|
241
|
+
|
|
242
|
+
for key, raw_value in updates.items():
|
|
243
|
+
spec = schema[key]
|
|
244
|
+
value = None if raw_value == "null" else _cast_value(spec, raw_value)
|
|
245
|
+
if spec.validator and value is not None and not spec.validator(value):
|
|
246
|
+
raise ConfigError(f"Invalid value for '{key}': {raw_value}")
|
|
247
|
+
if data.get(key) != value:
|
|
248
|
+
changed_keys.append(key)
|
|
249
|
+
data[key] = value
|
|
250
|
+
|
|
251
|
+
updated = AppConfig(**data)
|
|
252
|
+
save_config(paths, updated)
|
|
253
|
+
if changed_keys:
|
|
254
|
+
logger.info("config updated: keys=%s", changed_keys)
|
|
255
|
+
return updated
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def reset_config_values(paths: AppPaths, keys: list[str] | None = None) -> AppConfig:
|
|
259
|
+
schema = get_config_schema()
|
|
260
|
+
target_keys = keys or list(schema.keys())
|
|
261
|
+
unknown = [k for k in target_keys if k not in schema]
|
|
262
|
+
if unknown:
|
|
263
|
+
raise ConfigError(f"Unknown config key(s): {', '.join(unknown)}")
|
|
264
|
+
|
|
265
|
+
config = load_config(paths)
|
|
266
|
+
data = asdict(config)
|
|
267
|
+
for key in target_keys:
|
|
268
|
+
data[key] = schema[key].default
|
|
269
|
+
updated = AppConfig(**data)
|
|
270
|
+
save_config(paths, updated)
|
|
271
|
+
logger.info("config reset: keys=%s", target_keys)
|
|
272
|
+
return updated
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Domain models for runtime flow."""
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(slots=True)
|
|
8
|
+
class TaskGitPolicy:
|
|
9
|
+
base_branch: str | None = None
|
|
10
|
+
work_branch: str | None = None
|
|
11
|
+
push_on_success: bool | None = None
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass(slots=True)
|
|
15
|
+
class TaskStep:
|
|
16
|
+
id: str
|
|
17
|
+
type: str
|
|
18
|
+
tool_preferences: list[str] = field(default_factory=list)
|
|
19
|
+
prompt_file: str | None = None
|
|
20
|
+
prompt_inline: str | None = None
|
|
21
|
+
command: str | None = None
|
|
22
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass(slots=True)
|
|
26
|
+
class PromptTask:
|
|
27
|
+
external_id: str | None
|
|
28
|
+
source_path: str
|
|
29
|
+
title: str
|
|
30
|
+
description: str
|
|
31
|
+
priority: str = "medium"
|
|
32
|
+
status: str = "todo"
|
|
33
|
+
labels: list[str] = field(default_factory=list)
|
|
34
|
+
target_repo: str | None = None
|
|
35
|
+
target_paths: list[str] = field(default_factory=list)
|
|
36
|
+
depends_on: list[str] = field(default_factory=list)
|
|
37
|
+
acceptance_criteria: list[str] = field(default_factory=list)
|
|
38
|
+
steps: list[TaskStep] = field(default_factory=list)
|
|
39
|
+
git: TaskGitPolicy = field(default_factory=TaskGitPolicy)
|
|
40
|
+
raw_content: str = ""
|
|
41
|
+
last_seen_hash: str = ""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass(slots=True)
|
|
45
|
+
class BackendContext:
|
|
46
|
+
run_id: int
|
|
47
|
+
timeout_seconds: int
|
|
48
|
+
max_steps: int
|
|
49
|
+
safety_settings: dict[str, Any]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@dataclass(slots=True)
|
|
53
|
+
class BackendResult:
|
|
54
|
+
success: bool
|
|
55
|
+
summary: str
|
|
56
|
+
stdout: str = ""
|
|
57
|
+
stderr: str = ""
|
|
58
|
+
tool_invocations: list[dict[str, Any]] = field(default_factory=list)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass(slots=True)
|
|
62
|
+
class StepExecutionResult:
|
|
63
|
+
step_id: str
|
|
64
|
+
step_type: str
|
|
65
|
+
backend: str
|
|
66
|
+
success: bool
|
|
67
|
+
summary: str
|
|
68
|
+
stdout: str = ""
|
|
69
|
+
stderr: str = ""
|
|
70
|
+
tool_invocations: list[dict[str, Any]] = field(default_factory=list)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass(slots=True)
|
|
74
|
+
class ValidationStepResult:
|
|
75
|
+
name: str
|
|
76
|
+
success: bool
|
|
77
|
+
details: str
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
class OrchestratorError(Exception):
|
|
2
|
+
"""Base exception for expected orchestration failures."""
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ConfigError(OrchestratorError):
|
|
6
|
+
"""Raised when config/state is missing or invalid."""
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RepoError(OrchestratorError):
|
|
10
|
+
"""Raised for git or repository state issues."""
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BackendError(OrchestratorError):
|
|
14
|
+
"""Raised for backend invocation failures."""
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ValidationError(OrchestratorError):
|
|
18
|
+
"""Raised when one or more validations fail."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Git integration services."""
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import re
|
|
5
|
+
|
|
6
|
+
from orchestrator.exceptions import RepoError
|
|
7
|
+
from orchestrator.utils.process import run_command
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _sanitize(name: str) -> str:
|
|
11
|
+
return re.sub(r"[^a-zA-Z0-9._/-]+", "-", name).strip("-").lower()
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GitService:
|
|
15
|
+
def __init__(self, timeout_seconds: int = 900):
|
|
16
|
+
self.timeout_seconds = timeout_seconds
|
|
17
|
+
|
|
18
|
+
def ensure_git_repo(self, path: Path) -> None:
|
|
19
|
+
result = run_command(["git", "rev-parse", "--is-inside-work-tree"], cwd=path, timeout=self.timeout_seconds)
|
|
20
|
+
if result.code != 0 or "true" not in result.stdout:
|
|
21
|
+
raise RepoError(f"Not a git repository: {path}")
|
|
22
|
+
|
|
23
|
+
def is_clean(self, path: Path) -> bool:
|
|
24
|
+
result = run_command(["git", "status", "--porcelain"], cwd=path, timeout=self.timeout_seconds)
|
|
25
|
+
return result.code == 0 and result.stdout.strip() == ""
|
|
26
|
+
|
|
27
|
+
def current_branch(self, path: Path) -> str:
|
|
28
|
+
result = run_command(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=path, timeout=self.timeout_seconds)
|
|
29
|
+
if result.code != 0:
|
|
30
|
+
raise RepoError(result.stderr.strip() or "Failed to read current branch")
|
|
31
|
+
return result.stdout.strip()
|
|
32
|
+
|
|
33
|
+
def checkout_branch(self, path: Path, branch: str) -> None:
|
|
34
|
+
result = run_command(["git", "checkout", branch], cwd=path, timeout=self.timeout_seconds)
|
|
35
|
+
if result.code != 0:
|
|
36
|
+
raise RepoError(result.stderr.strip() or f"Failed to checkout branch {branch}")
|
|
37
|
+
|
|
38
|
+
def checkout_or_create_branch(self, path: Path, branch: str, start_point: str, allow_create: bool) -> None:
|
|
39
|
+
if self.local_branch_exists(path, branch):
|
|
40
|
+
self.checkout_branch(path, branch)
|
|
41
|
+
return
|
|
42
|
+
if not allow_create:
|
|
43
|
+
raise RepoError(f"Branch '{branch}' does not exist and branch creation is disabled")
|
|
44
|
+
result = run_command(["git", "checkout", "-b", branch, start_point], cwd=path, timeout=self.timeout_seconds)
|
|
45
|
+
if result.code != 0:
|
|
46
|
+
raise RepoError(result.stderr.strip() or f"Failed to create branch {branch} from {start_point}")
|
|
47
|
+
|
|
48
|
+
def local_branch_exists(self, path: Path, branch: str) -> bool:
|
|
49
|
+
result = run_command(["git", "show-ref", "--verify", f"refs/heads/{branch}"], cwd=path, timeout=self.timeout_seconds)
|
|
50
|
+
return result.code == 0
|
|
51
|
+
|
|
52
|
+
def remote_branch_exists(self, path: Path, branch: str) -> bool:
|
|
53
|
+
remote = self.primary_remote(path)
|
|
54
|
+
result = run_command(["git", "ls-remote", "--heads", remote, branch], cwd=path, timeout=self.timeout_seconds)
|
|
55
|
+
return result.code == 0 and bool(result.stdout.strip())
|
|
56
|
+
|
|
57
|
+
def remotes(self, path: Path) -> list[str]:
|
|
58
|
+
result = run_command(["git", "remote"], cwd=path, timeout=self.timeout_seconds)
|
|
59
|
+
if result.code != 0:
|
|
60
|
+
raise RepoError(result.stderr.strip() or "Failed to read git remotes")
|
|
61
|
+
return [line.strip() for line in result.stdout.splitlines() if line.strip()]
|
|
62
|
+
|
|
63
|
+
def primary_remote(self, path: Path) -> str:
|
|
64
|
+
remotes = self.remotes(path)
|
|
65
|
+
if "origin" in remotes:
|
|
66
|
+
return "origin"
|
|
67
|
+
if remotes:
|
|
68
|
+
return remotes[0]
|
|
69
|
+
raise RepoError("No git remote configured for repository")
|
|
70
|
+
|
|
71
|
+
def has_commits(self, path: Path) -> bool:
|
|
72
|
+
result = run_command(["git", "rev-parse", "--verify", "HEAD"], cwd=path, timeout=self.timeout_seconds)
|
|
73
|
+
return result.code == 0
|
|
74
|
+
|
|
75
|
+
def checkout_or_create_tracking_branch(self, path: Path, branch: str, create_and_push_if_missing: bool) -> None:
|
|
76
|
+
remote = self.primary_remote(path)
|
|
77
|
+
if self.local_branch_exists(path, branch):
|
|
78
|
+
self.checkout_branch(path, branch)
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
if self.remote_branch_exists(path, branch):
|
|
82
|
+
fetch = run_command(["git", "fetch", remote, branch], cwd=path, timeout=self.timeout_seconds)
|
|
83
|
+
if fetch.code != 0:
|
|
84
|
+
raise RepoError(fetch.stderr.strip() or f"Failed to fetch {remote}/{branch}")
|
|
85
|
+
result = run_command(
|
|
86
|
+
["git", "checkout", "-b", branch, f"{remote}/{branch}"],
|
|
87
|
+
cwd=path,
|
|
88
|
+
timeout=self.timeout_seconds,
|
|
89
|
+
)
|
|
90
|
+
if result.code != 0:
|
|
91
|
+
raise RepoError(result.stderr.strip() or f"Failed to checkout remote branch {remote}/{branch}")
|
|
92
|
+
upstream = run_command(
|
|
93
|
+
["git", "branch", "--set-upstream-to", f"{remote}/{branch}", branch],
|
|
94
|
+
cwd=path,
|
|
95
|
+
timeout=self.timeout_seconds,
|
|
96
|
+
)
|
|
97
|
+
if upstream.code != 0:
|
|
98
|
+
raise RepoError(upstream.stderr.strip() or f"Failed to set upstream to {remote}/{branch}")
|
|
99
|
+
return
|
|
100
|
+
|
|
101
|
+
if not create_and_push_if_missing:
|
|
102
|
+
raise RepoError(f"Remote branch '{branch}' not found")
|
|
103
|
+
|
|
104
|
+
create_result = run_command(["git", "checkout", "-b", branch], cwd=path, timeout=self.timeout_seconds)
|
|
105
|
+
if create_result.code != 0:
|
|
106
|
+
raise RepoError(create_result.stderr.strip() or f"Failed to create branch {branch}")
|
|
107
|
+
|
|
108
|
+
if not self.has_commits(path):
|
|
109
|
+
bootstrap_commit = run_command(
|
|
110
|
+
[
|
|
111
|
+
"git",
|
|
112
|
+
"-c",
|
|
113
|
+
"user.name=execforge",
|
|
114
|
+
"-c",
|
|
115
|
+
"user.email=execforge@local",
|
|
116
|
+
"commit",
|
|
117
|
+
"--allow-empty",
|
|
118
|
+
"-m",
|
|
119
|
+
f"chore(execforge): bootstrap branch {branch}",
|
|
120
|
+
],
|
|
121
|
+
cwd=path,
|
|
122
|
+
timeout=self.timeout_seconds,
|
|
123
|
+
)
|
|
124
|
+
if bootstrap_commit.code != 0:
|
|
125
|
+
raise RepoError(
|
|
126
|
+
bootstrap_commit.stderr.strip()
|
|
127
|
+
or f"Created branch '{branch}' but failed to create bootstrap commit"
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
push_result = run_command(["git", "push", "-u", remote, branch], cwd=path, timeout=self.timeout_seconds)
|
|
131
|
+
if push_result.code != 0:
|
|
132
|
+
raise RepoError(
|
|
133
|
+
push_result.stderr.strip()
|
|
134
|
+
or f"Created local branch '{branch}' but failed to push it to {remote}"
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def make_agent_branch_name(self, agent_name: str, task_ref: str) -> str:
|
|
138
|
+
return f"agent/{_sanitize(agent_name)}/{_sanitize(task_ref)}"
|
|
139
|
+
|
|
140
|
+
def commit_all(self, path: Path, message: str) -> str | None:
|
|
141
|
+
if self.is_clean(path):
|
|
142
|
+
return None
|
|
143
|
+
add_result = run_command(["git", "add", "."], cwd=path, timeout=self.timeout_seconds)
|
|
144
|
+
if add_result.code != 0:
|
|
145
|
+
raise RepoError(add_result.stderr.strip() or "git add failed")
|
|
146
|
+
commit_result = run_command(["git", "commit", "-m", message], cwd=path, timeout=self.timeout_seconds)
|
|
147
|
+
if commit_result.code != 0:
|
|
148
|
+
raise RepoError(commit_result.stderr.strip() or "git commit failed")
|
|
149
|
+
sha_result = run_command(["git", "rev-parse", "HEAD"], cwd=path, timeout=self.timeout_seconds)
|
|
150
|
+
if sha_result.code != 0:
|
|
151
|
+
raise RepoError(sha_result.stderr.strip() or "Failed to read commit sha")
|
|
152
|
+
return sha_result.stdout.strip()
|
|
153
|
+
|
|
154
|
+
def push(self, path: Path, branch: str) -> None:
|
|
155
|
+
remote = self.primary_remote(path)
|
|
156
|
+
result = run_command(["git", "push", "-u", remote, branch], cwd=path, timeout=self.timeout_seconds)
|
|
157
|
+
if result.code != 0:
|
|
158
|
+
raise RepoError(result.stderr.strip() or "git push failed")
|
|
159
|
+
|
|
160
|
+
def clone(self, repo_url: str, clone_path: Path, branch: str, bootstrap_missing_branch: bool = False) -> None:
|
|
161
|
+
clone_path.parent.mkdir(parents=True, exist_ok=True)
|
|
162
|
+
result = run_command(["git", "clone", "--branch", branch, repo_url, str(clone_path)], cwd=clone_path.parent)
|
|
163
|
+
if result.code == 0:
|
|
164
|
+
return
|
|
165
|
+
|
|
166
|
+
missing_branch = "Remote branch" in result.stderr and "not found" in result.stderr
|
|
167
|
+
if not missing_branch:
|
|
168
|
+
raise RepoError(result.stderr.strip() or "git clone failed")
|
|
169
|
+
|
|
170
|
+
fallback = run_command(["git", "clone", repo_url, str(clone_path)], cwd=clone_path.parent)
|
|
171
|
+
if fallback.code != 0:
|
|
172
|
+
raise RepoError(fallback.stderr.strip() or "git clone failed")
|
|
173
|
+
|
|
174
|
+
if not bootstrap_missing_branch:
|
|
175
|
+
raise RepoError(
|
|
176
|
+
f"Remote branch '{branch}' not found. Re-run with missing-branch bootstrap enabled to create and push it."
|
|
177
|
+
)
|
|
178
|
+
self.checkout_or_create_tracking_branch(clone_path, branch, create_and_push_if_missing=True)
|
|
179
|
+
|
|
180
|
+
def pull(
|
|
181
|
+
self,
|
|
182
|
+
repo_path: Path,
|
|
183
|
+
strategy: str = "ff-only",
|
|
184
|
+
branch: str | None = None,
|
|
185
|
+
bootstrap_missing_branch: bool = False,
|
|
186
|
+
) -> None:
|
|
187
|
+
remote = self.primary_remote(repo_path)
|
|
188
|
+
if branch:
|
|
189
|
+
self.checkout_or_create_tracking_branch(
|
|
190
|
+
repo_path,
|
|
191
|
+
branch,
|
|
192
|
+
create_and_push_if_missing=bootstrap_missing_branch,
|
|
193
|
+
)
|
|
194
|
+
if strategy == "none":
|
|
195
|
+
return
|
|
196
|
+
if branch:
|
|
197
|
+
cmd = ["git", "pull", "--ff-only", remote, branch] if strategy == "ff-only" else ["git", "pull", "--rebase", remote, branch]
|
|
198
|
+
else:
|
|
199
|
+
cmd = ["git", "pull", "--ff-only"] if strategy == "ff-only" else ["git", "pull", "--rebase"]
|
|
200
|
+
result = run_command(cmd, cwd=repo_path, timeout=self.timeout_seconds)
|
|
201
|
+
if result.code != 0:
|
|
202
|
+
raise RepoError(result.stderr.strip() or "git pull failed")
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
import logging
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def configure_logging(logs_dir: Path, level: str = "INFO", console_debug: bool = False) -> Path:
|
|
9
|
+
logs_dir.mkdir(parents=True, exist_ok=True)
|
|
10
|
+
stamp = datetime.utcnow().strftime("%Y%m%d-%H%M%S")
|
|
11
|
+
log_file = logs_dir / f"orchestrator-{stamp}.log"
|
|
12
|
+
|
|
13
|
+
root = logging.getLogger()
|
|
14
|
+
root.setLevel(level.upper())
|
|
15
|
+
root.handlers.clear()
|
|
16
|
+
|
|
17
|
+
formatter = logging.Formatter(
|
|
18
|
+
"%(asctime)s %(levelname)s %(name)s "
|
|
19
|
+
"run=%(run_id)s agent=%(agent)s task=%(task)s "
|
|
20
|
+
"base=%(base_branch)s branch=%(branch)s step=%(step)s "
|
|
21
|
+
"%(message)s"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
if console_debug:
|
|
25
|
+
stream = logging.StreamHandler()
|
|
26
|
+
stream.setFormatter(formatter)
|
|
27
|
+
stream.setLevel(level.upper())
|
|
28
|
+
root.addHandler(stream)
|
|
29
|
+
|
|
30
|
+
file_handler = logging.FileHandler(log_file, encoding="utf-8")
|
|
31
|
+
file_handler.setFormatter(formatter)
|
|
32
|
+
file_handler.setLevel(level.upper())
|
|
33
|
+
root.addHandler(file_handler)
|
|
34
|
+
|
|
35
|
+
return log_file
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ContextAdapter(logging.LoggerAdapter):
|
|
39
|
+
def process(self, msg, kwargs):
|
|
40
|
+
raw_extra = kwargs.get("extra")
|
|
41
|
+
extra = raw_extra if isinstance(raw_extra, dict) else {}
|
|
42
|
+
base: dict[str, object] = {
|
|
43
|
+
"run_id": "-",
|
|
44
|
+
"agent": "-",
|
|
45
|
+
"task": "-",
|
|
46
|
+
"base_branch": "-",
|
|
47
|
+
"branch": "-",
|
|
48
|
+
"step": "-",
|
|
49
|
+
}
|
|
50
|
+
base.update(dict(self.extra or {}))
|
|
51
|
+
base.update(extra)
|
|
52
|
+
kwargs["extra"] = base
|
|
53
|
+
return msg, kwargs
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Prompt and task parsing helpers."""
|