agentweaver 0.1.9 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/README.md +226 -200
  2. package/dist/artifacts.js +101 -56
  3. package/dist/errors.js +7 -0
  4. package/dist/executors/{codex-local-executor.js → codex-executor.js} +4 -4
  5. package/dist/executors/configs/{codex-local-config.js → codex-config.js} +1 -1
  6. package/dist/executors/configs/jira-fetch-config.js +2 -0
  7. package/dist/executors/configs/telegram-notifier-config.js +3 -0
  8. package/dist/executors/fetch-gitlab-diff-executor.js +1 -1
  9. package/dist/executors/fetch-gitlab-review-executor.js +1 -1
  10. package/dist/executors/git-commit-executor.js +25 -0
  11. package/dist/executors/telegram-notifier-executor.js +54 -0
  12. package/dist/flow-state.js +46 -1
  13. package/dist/gitlab.js +13 -8
  14. package/dist/index.js +507 -520
  15. package/dist/interactive-ui.js +495 -87
  16. package/dist/jira.js +52 -5
  17. package/dist/pipeline/auto-flow.js +6 -6
  18. package/dist/pipeline/context.js +1 -0
  19. package/dist/pipeline/declarative-flows.js +7 -4
  20. package/dist/pipeline/flow-catalog.js +60 -23
  21. package/dist/pipeline/flow-model-settings.js +77 -0
  22. package/dist/pipeline/flow-specs/auto-common.json +446 -0
  23. package/dist/pipeline/flow-specs/auto-golang.json +563 -0
  24. package/dist/pipeline/flow-specs/{bug-analyze.json → bugz/bug-analyze.json} +43 -25
  25. package/dist/pipeline/flow-specs/{bug-fix.json → bugz/bug-fix.json} +5 -4
  26. package/dist/pipeline/flow-specs/git-commit.json +196 -0
  27. package/dist/pipeline/flow-specs/{gitlab-diff-review.json → gitlab/gitlab-diff-review.json} +20 -50
  28. package/dist/pipeline/flow-specs/gitlab/gitlab-review.json +165 -0
  29. package/dist/pipeline/flow-specs/{mr-description.json → gitlab/mr-description.json} +17 -10
  30. package/dist/pipeline/flow-specs/{run-go-linter-loop.json → go/run-go-linter-loop.json} +40 -14
  31. package/dist/pipeline/flow-specs/{run-go-tests-loop.json → go/run-go-tests-loop.json} +40 -14
  32. package/dist/pipeline/flow-specs/implement.json +5 -4
  33. package/dist/pipeline/flow-specs/plan.json +40 -148
  34. package/dist/pipeline/flow-specs/{review-fix.json → review/review-fix.json} +73 -13
  35. package/dist/pipeline/flow-specs/review/review-loop.json +280 -0
  36. package/dist/pipeline/flow-specs/review/review-project.json +87 -0
  37. package/dist/pipeline/flow-specs/review/review.json +126 -0
  38. package/dist/pipeline/flow-specs/task-describe.json +191 -11
  39. package/dist/pipeline/launch-profile-config.js +38 -0
  40. package/dist/pipeline/node-registry.js +75 -45
  41. package/dist/pipeline/nodes/build-failure-summary-node.js +16 -29
  42. package/dist/pipeline/nodes/build-review-fix-prompt-node.js +36 -0
  43. package/dist/pipeline/nodes/codex-prompt-node.js +41 -0
  44. package/dist/pipeline/nodes/commit-message-form-node.js +79 -0
  45. package/dist/pipeline/nodes/git-commit-form-node.js +138 -0
  46. package/dist/pipeline/nodes/git-commit-node.js +28 -0
  47. package/dist/pipeline/nodes/git-status-node.js +221 -0
  48. package/dist/pipeline/nodes/gitlab-review-artifacts-node.js +10 -6
  49. package/dist/pipeline/nodes/jira-context-node.js +10 -0
  50. package/dist/pipeline/nodes/llm-prompt-node.js +62 -0
  51. package/dist/pipeline/nodes/plan-codex-node.js +1 -1
  52. package/dist/pipeline/nodes/read-file-node.js +11 -0
  53. package/dist/pipeline/nodes/review-findings-form-node.js +18 -14
  54. package/dist/pipeline/nodes/select-files-form-node.js +72 -0
  55. package/dist/pipeline/nodes/telegram-notifier-node.js +28 -0
  56. package/dist/pipeline/nodes/user-input-node.js +29 -8
  57. package/dist/pipeline/nodes/write-selection-file-node.js +46 -0
  58. package/dist/pipeline/prompt-registry.js +2 -4
  59. package/dist/pipeline/prompt-runtime.js +13 -3
  60. package/dist/pipeline/registry.js +6 -8
  61. package/dist/pipeline/spec-compiler.js +5 -0
  62. package/dist/pipeline/spec-loader.js +18 -7
  63. package/dist/pipeline/spec-types.js +7 -3
  64. package/dist/pipeline/spec-validator.js +4 -0
  65. package/dist/pipeline/types.js +1 -0
  66. package/dist/pipeline/value-resolver.js +40 -38
  67. package/dist/prompts.js +104 -110
  68. package/dist/runtime/agentweaver-home.js +8 -0
  69. package/dist/runtime/command-resolution.js +0 -38
  70. package/dist/runtime/env-loader.js +43 -0
  71. package/dist/runtime/process-runner.js +45 -1
  72. package/dist/structured-artifact-schema-registry.js +53 -0
  73. package/dist/structured-artifact-schemas.json +0 -20
  74. package/dist/structured-artifacts.js +3 -43
  75. package/dist/user-input.js +30 -2
  76. package/package.json +2 -6
  77. package/Dockerfile.codex +0 -56
  78. package/dist/executors/claude-executor.js +0 -46
  79. package/dist/executors/codex-docker-executor.js +0 -27
  80. package/dist/executors/configs/claude-config.js +0 -12
  81. package/dist/executors/configs/codex-docker-config.js +0 -10
  82. package/dist/executors/configs/verify-build-config.js +0 -7
  83. package/dist/executors/verify-build-executor.js +0 -123
  84. package/dist/pipeline/flow-specs/auto.json +0 -979
  85. package/dist/pipeline/flow-specs/gitlab-review.json +0 -317
  86. package/dist/pipeline/flow-specs/plan-opencode.json +0 -603
  87. package/dist/pipeline/flow-specs/preflight.json +0 -206
  88. package/dist/pipeline/flow-specs/review-project.json +0 -243
  89. package/dist/pipeline/flow-specs/review.json +0 -312
  90. package/dist/pipeline/flow-specs/run-linter-loop.json +0 -155
  91. package/dist/pipeline/flow-specs/run-tests-loop.json +0 -155
  92. package/dist/pipeline/flows/preflight-flow.js +0 -19
  93. package/dist/pipeline/nodes/claude-prompt-node.js +0 -54
  94. package/dist/pipeline/nodes/codex-docker-prompt-node.js +0 -32
  95. package/dist/pipeline/nodes/codex-local-prompt-node.js +0 -32
  96. package/dist/pipeline/nodes/review-claude-node.js +0 -38
  97. package/dist/pipeline/nodes/review-reply-codex-node.js +0 -40
  98. package/dist/pipeline/nodes/verify-build-node.js +0 -15
  99. package/dist/runtime/docker-runtime.js +0 -51
  100. package/docker-compose.yml +0 -445
  101. package/verify_build.sh +0 -105
package/dist/prompts.js CHANGED
@@ -1,113 +1,107 @@
1
- export const BASE_PROMPT_HEADER = "Основная задача:";
2
- export const EXTRA_PROMPT_HEADER = "Дополнительные указания:";
3
- export const PLAN_PROMPT_TEMPLATE = "Посмотри и проанализируй задачу в {jira_task_file}. " +
4
- "Обязательно проанализируй дополнительные материалы из Jira attachments manifest {jira_attachments_manifest_file} и текстовый контекст {jira_attachments_context_file}; если attachment содержит более детальную постановку, ограничения, список файлов, migration strategy или инварианты, считай attachment source of truth для planning наравне с Jira issue. " +
5
- "Сначала создай структурированные JSON-артефакты, они являются source of truth для следующих flow. " +
6
- "Человекочитаемые markdown-файлы сделай как подробное производное представление этих JSON-артефактов для пользователя, а не как краткое summary. " +
7
- "Markdown не должен влиять на структуру JSON: сначала определи корректные JSON-типы, затем строй markdown как производное представление. " +
8
- "Не схлопывай конкретику из задачи и attachment: сохраняй явные файлы, методы, API, инварианты, migration steps, DB-ограничения, business rules и acceptance criteria. " +
9
- "Разработай системный дизайн решения и запиши JSON в {design_json_file}, затем markdown в {design_file}. " +
10
- 'Для {design_json_file} используй строго JSON-объект вида { "summary": "string", "goals": ["string"], "non_goals": ["string"], "components": ["string"], "current_state": ["string"], "target_state": ["string"], "affected_code": [{ "area": "string", "files": ["string"], "details": "string" }], "business_rules": ["string"], "decisions": [{ "component": "string", "decision": "string", "rationale": "string" }], "migration_strategy": ["string"], "database_changes": ["string"], "api_changes": ["string"], "risks": ["string"], "acceptance_criteria": ["string"], "open_questions": ["string"] }. ' +
11
- 'Строго соблюдай типы. В частности, current_state и target_state всегда должны быть массивами строк, даже если пункт только один: ["..."], а не "...". ' +
12
- 'Точно так же files, goals, non_goals, components, business_rules, migration_strategy, database_changes, api_changes, risks, acceptance_criteria и open_questions должны быть массивами, а не одиночными строками. ' +
13
- "Разработай подробный план реализации и запиши JSON в {plan_json_file}, затем markdown в {plan_file}. " +
14
- 'Для {plan_json_file} используй строго JSON-объект вида { "summary": "string", "prerequisites": ["string"], "workstreams": ["string"], "implementation_steps": [{ "id": "string", "title": "string", "details": "string", "affected_files": ["string"], "verification": ["string"], "dependencies": ["string"] }], "tests": ["string"], "rollout_notes": ["string"], "follow_up_items": ["string"] }. ' +
15
- 'Строго соблюдай типы. prerequisites, workstreams, tests, rollout_notes, follow_up_items, affected_files, verification и dependencies всегда должны быть массивами, даже если элемент только один. implementation_steps должен быть массивом объектов, а не одним объектом. ' +
16
- 'Каждый элемент implementation_steps должен иметь вид { "id": "step-1", "title": "string", "details": "string", "affected_files": ["string"], "verification": ["string"], "dependencies": ["string"] }. ' +
17
- 'Нельзя использовать "verification": "..." или "affected_files": "...". Нужно использовать массивы: ["..."]. ' +
18
- "Разработай план тестирования для QA и запиши JSON в {qa_json_file}, затем markdown в {qa_file}. " +
19
- 'Для {qa_json_file} используй строго JSON-объект вида { "summary": "string", "test_scenarios": [{ "id": "string", "title": "string", "expected_result": "string" }], "non_functional_checks": ["string"] }. ' +
20
- 'Строго соблюдай типы. test_scenarios должен быть массивом объектов, а non_functional_checks должен быть массивом строк, даже если пункт только один. ' +
21
- "Markdown для design и plan оформи развёрнуто, с отдельными секциями Summary, Current State, Target State, Affected Code, Decisions, Migration/DB Changes, Risks, Implementation Steps, Tests, Rollout. " +
22
- "JSON-файлы должны быть валидными и содержать только JSON без markdown-обёртки. ";
23
- export const PLAN_QUESTIONS_PROMPT_TEMPLATE = "Посмотри и проанализируй задачу в {jira_task_file}. " +
24
- "Обязательно проанализируй дополнительные материалы из Jira attachments manifest {jira_attachments_manifest_file} и текстовый контекст {jira_attachments_context_file}; если attachment содержит более детальную постановку, ограничения, список файлов, migration strategy или инварианты, считай attachment source of truth для planning наравне с Jira issue. " +
25
- "Перед финальным planning определи, нужны ли уточнения от пользователя. " +
26
- 'Если уточнения не нужны, запиши в {planning_questions_json_file} строго JSON-объект { "summary": "string", "questions": [] }. ' +
27
- 'Если уточнения нужны, запиши в {planning_questions_json_file} строго JSON-объект { "summary": "string", "questions": [{ "id": "string", "question": "string", "details": "string", "required": true, "multiline": false, "placeholder": "string" }] }. ' +
28
- 'questions всегда должен быть массивом. required и multiline должны быть boolean, а не строками "true"/"false". ' +
29
- "Задавай только вопросы, без ответа на которые design/plan могут оказаться неверными или слишком предположительными. " +
30
- "Не задавай очевидные, декоративные или дублирующие вопросы. " +
31
- "Обычно достаточно 1-5 вопросов. " +
32
- "JSON-файл должен быть валидным и содержать только JSON без markdown-обёртки. ";
33
- export const BUG_ANALYZE_PROMPT_TEMPLATE = "Посмотри и проанализируй баг в {jira_task_file}. " +
34
- "Сначала создай структурированные JSON-артефакты, они являются source of truth для следующих flow. " +
35
- "Человекочитаемые markdown-файлы сделай как краткое производное представление этих JSON-артефактов для пользователя. " +
36
- "Запиши структурированный анализ бага в {bug_analyze_json_file}, затем краткую markdown-версию в {bug_analyze_file}. " +
37
- "Запиши структурированный дизайн исправления в {bug_fix_design_json_file}, затем краткую markdown-версию в {bug_fix_design_file}. " +
38
- "Запиши структурированный план реализации в {bug_fix_plan_json_file}, затем краткую markdown-версию в {bug_fix_plan_file}. " +
39
- "JSON-файлы должны быть валидными и содержать только JSON без markdown-обёртки. " +
40
- 'Для {bug_analyze_json_file} используй строго JSON-объект { "summary": "string", "suspected_root_cause": { "hypothesis": "string", "confidence": "string" }, "reproduction_steps": ["string"], "affected_components": ["string"], "evidence": ["string"], "risks": ["string"], "open_questions": ["string"] }. ' +
41
- 'reproduction_steps, affected_components, evidence, risks и open_questions всегда должны быть массивами строк. suspected_root_cause всегда должен быть объектом, а не строкой. ' +
42
- 'Для {bug_fix_design_json_file} используй строго JSON-объект { "summary": "string", "goals": ["string"], "non_goals": ["string"], "target_components": ["string"], "proposed_changes": [{ "component": "string", "change": "string", "rationale": "string" }], "alternatives_considered": [{ "option": "string", "decision": "string", "rationale": "string" }], "risks": ["string"], "validation_strategy": ["string"] }. ' +
43
- 'goals, non_goals, target_components, risks и validation_strategy всегда должны быть массивами строк. proposed_changes и alternatives_considered всегда должны быть массивами объектов. ' +
44
- 'Для {bug_fix_plan_json_file} используй строго JSON-объект { "summary": "string", "prerequisites": ["string"], "implementation_steps": [{ "id": "string", "title": "string", "details": "string" }], "tests": ["string"], "rollout_notes": ["string"] }. ' +
45
- 'prerequisites, tests и rollout_notes всегда должны быть массивами строк. implementation_steps всегда должен быть массивом объектов. ';
46
- export const BUG_FIX_PROMPT_TEMPLATE = "Используй только структурированные артефакты как source of truth. " +
47
- "Проанализируй баг по {bug_analyze_json_file}. " +
48
- "Используй дизайн исправления из {bug_fix_design_json_file}. " +
49
- "Используй план реализации из {bug_fix_plan_json_file}. " +
50
- "Markdown-артефакты предназначены только для чтения человеком и не должны определять реализацию. " +
51
- "После этого приступай к реализации исправления в коде. ";
52
- export const MR_DESCRIPTION_PROMPT_TEMPLATE = "Посмотри задачу в {jira_task_file} и текущие изменения в репозитории. " +
53
- "Подготовь очень краткое intent-описание для merge request без подробностей реализации, списков файлов и технических деталей. " +
54
- "Сначала запиши source-of-truth JSON в {mr_description_json_file} в виде объекта { summary: string }, затем производную markdown-версию в {mr_description_file}. ";
55
- export const IMPLEMENT_PROMPT_TEMPLATE = "Используй только структурированные артефакты как source of truth. " +
56
- "Проанализируй системный дизайн {design_json_file}, план реализации {plan_json_file} и приступай к реализации по плану. " +
57
- "Markdown-артефакты предназначены только для чтения человеком и не должны определять реализацию. ";
58
- export const REVIEW_PROMPT_TEMPLATE = "Проведи код-ревью текущих изменений. " +
59
- "Используй только структурированные артефакты как source of truth: задачу в {jira_task_file}, дизайн в {design_json_file} и план в {plan_json_file}. " +
60
- 'Сначала запиши структурированный результат в {review_json_file} в виде строго JSON-объекта { "summary": "string", "ready_to_merge": true, "findings": [{ "severity": "string", "title": "string", "description": "string" }] }. ' +
61
- 'ready_to_merge должен быть boolean, а не строкой. findings всегда должен быть массивом объектов, даже если замечание одно или их нет. ' +
62
- "Затем запиши производную markdown-версию в {review_file}. " +
63
- "Если ready_to_merge=true и нет блокеров, препятствующих merge - создай файл ready-to-merge.md.";
64
- export const REVIEW_PROJECT_PROMPT_TEMPLATE = "Проведи код-ревью текущих изменений в проекте без Jira-контекста. " +
65
- "Оцени качество изменений по текущему коду, тестам, рискам регрессий и общему инженерному качеству. " +
66
- 'Сначала запиши структурированный результат в {review_json_file} в виде строго JSON-объекта { "summary": "string", "ready_to_merge": true, "findings": [{ "severity": "string", "title": "string", "description": "string" }] }. ' +
67
- 'ready_to_merge должен быть boolean, а findings всегда должен быть массивом объектов. ' +
68
- "Затем запиши производную markdown-версию в {review_file}. " +
69
- "Если ready_to_merge=true и нет блокеров, создай файл {ready_to_merge_file}.";
70
- export const GITLAB_DIFF_REVIEW_PROMPT_TEMPLATE = "Проведи код-ревью diff merge request из GitLab. " +
71
- "Используй structured diff artifact {gitlab_diff_json_file} как source of truth, а markdown {gitlab_diff_file} только как удобное представление для чтения человеком. " +
72
- "Оцени только изменения из diff: корректность, риски регрессий, отсутствие тестов, опасные edge cases, нарушения контрактов и поддерживаемость. " +
73
- 'Сначала запиши структурированный результат в {review_json_file} в виде строго JSON-объекта { "summary": "string", "ready_to_merge": true, "findings": [{ "severity": "string", "title": "string", "description": "string" }] }. ' +
74
- 'ready_to_merge должен быть boolean, а findings всегда должен быть массивом объектов. ' +
75
- "Затем запиши производную markdown-версию в {review_file}. " +
76
- "Если ready_to_merge=true и нет блокеров, создай файл {ready_to_merge_file}.";
77
- export const REVIEW_REPLY_PROMPT_TEMPLATE = "Твой коллега провёл код-ревью и записал структурированный результат в {review_json_file}. " +
78
- "Используй только структурированные артефакты как source of truth: задачу в {jira_task_file}, дизайн в {design_json_file}, план в {plan_json_file} и review в {review_json_file}. " +
79
- 'Сначала запиши структурированный ответ в {review_reply_json_file} в виде строго JSON-объекта { "summary": "string", "ready_to_merge": true, "responses": [{ "finding_title": "string", "disposition": "string", "action": "string" }] }. ' +
80
- 'ready_to_merge должен быть boolean, а responses всегда должен быть массивом объектов. ' +
81
- "Затем запиши производную markdown-версию в {review_reply_file}.";
82
- export const REVIEW_REPLY_PROJECT_PROMPT_TEMPLATE = "Твой коллега провёл код-ревью и записал структурированный результат в {review_json_file}. " +
83
- "Используй review в {review_json_file} как source of truth, разберись в замечаниях и подготовь структурированный ответ. " +
84
- 'Сначала запиши структурированный ответ в {review_reply_json_file} в виде строго JSON-объекта { "summary": "string", "ready_to_merge": true, "responses": [{ "finding_title": "string", "disposition": "string", "action": "string" }] }. ' +
85
- 'ready_to_merge должен быть boolean, а responses всегда должен быть массивом объектов. ' +
86
- "Затем запиши производную markdown-версию в {review_reply_file}.";
87
- export const REVIEW_SUMMARY_PROMPT_TEMPLATE = "Посмотри в {review_file}. " +
88
- "Сделай краткий список комментариев без подробностей, 3-7 пунктов. " +
89
- "Запиши результат в {review_summary_file}.";
90
- export const REVIEW_REPLY_SUMMARY_PROMPT_TEMPLATE = "Посмотри в {review_reply_file}. " +
91
- "Сделай краткий список ответов и итоговых действий без подробностей, 3-7 пунктов. " +
92
- "Запиши результат в {review_reply_summary_file}.";
93
- export const REVIEW_FIX_PROMPT_TEMPLATE = "Используй только структурированные артефакты как source of truth. " +
94
- "Проанализируй комментарии в {review_reply_json_file}. " +
95
- "Исправь то, что содержится в дополнительных указаниях, а если таковых нет - исправь все пункты. " +
96
- "По окончании обязательно прогони вне песочницы линтер, все тесты, сгенерируй make swagger. " +
97
- "Исправь ошибки линтера и тестов, если будут. " +
98
- 'По завершении сначала запиши структурированный отчёт в {review_fix_json_file} в виде строго JSON-объекта { "summary": "string", "completed_actions": ["string"], "validation_steps": ["string"] }, затем производную markdown-версию в {review_fix_file}. ' +
99
- 'completed_actions и validation_steps всегда должны быть массивами строк, даже если пункт только один.';
100
- export const TASK_SUMMARY_PROMPT_TEMPLATE = "Посмотри в {jira_task_file}. " +
101
- "Сделай краткое резюме задачи, на 1-2 абзаца. " +
102
- "Сначала запиши source-of-truth JSON в {task_summary_json_file} в виде объекта { summary: string }, затем markdown-версию в {task_summary_file}.";
103
- export const JIRA_DESCRIPTION_PROMPT_TEMPLATE = "Посмотри задачу в {jira_task_file}. " +
104
- "Проанализируй код и оформи краткое описание для Jira, упомяни ключевые точки, модели данных, сервисы, REST-методы. " +
105
- "Сначала запиши source-of-truth JSON в {jira_description_json_file} в виде объекта { summary: string }, затем markdown-версию в {jira_description_file}.";
106
- export const RUN_GO_TESTS_LOOP_FIX_PROMPT_TEMPLATE = "Используй структурированный результат последнего запуска run_go_tests.py из {tests_result_json_file} как source of truth. " +
107
- "Проанализируй последнюю ошибку проверки, исправь код и подготовь изменения так, чтобы следующий прогон run_go_tests.py прошёл успешно.";
108
- export const RUN_GO_LINTER_LOOP_FIX_PROMPT_TEMPLATE = "Используй структурированный результат последнего запуска run_go_linter.py из {linter_result_json_file} как source of truth. " +
109
- "Проанализируй последнюю ошибку линтера или генерации, исправь код и подготовь изменения так, чтобы следующий прогон run_go_linter.py прошёл успешно.";
110
- export const AUTO_REVIEW_FIX_EXTRA_PROMPT = "Исправлять только блокеры, критикалы и важные";
1
+ import { renderStructuredArtifactSchema, } from "./structured-artifact-schema-registry.js";
2
+ export const BASE_PROMPT_HEADER = "Primary task:";
3
+ export const EXTRA_PROMPT_HEADER = "Additional instructions:";
4
+ export const STRUCTURED_JSON_LANGUAGE_INSTRUCTION = "All structured JSON artifacts are machine-readable and must use English for all generated semantic string values. " +
5
+ "If a JSON artifact needs to preserve verbatim user-provided or external source text, keep that quoted source text unchanged, but write all generated summaries, titles, descriptions, decisions, and explanations in English. ";
6
+ function strictSchemaInstruction(outputFileVar, schemaId) {
7
+ return (`The artifact format for ${outputFileVar} must fully conform to schema ${schemaId} from the registry. ` +
8
+ "Do not skip required fields, do not rename fields, do not change types, do not replace arrays with objects or strings, and do not leave required strings empty. " +
9
+ "The final JSON must pass validation against this schema without manual corrections. " +
10
+ STRUCTURED_JSON_LANGUAGE_INSTRUCTION +
11
+ `Canonical schema:\n${renderStructuredArtifactSchema(schemaId)}\n`);
12
+ }
13
+ export const PLAN_PROMPT_TEMPLATE = "Review and analyze the task in {jira_task_file}. " +
14
+ "Be sure to analyze additional materials from Jira attachments manifest {jira_attachments_manifest_file} and text context {jira_attachments_context_file}; if an attachment contains more detailed requirements, constraints, file lists, migration strategy, or invariants, treat the attachment as source of truth for planning alongside the Jira issue. " +
15
+ "First create structured JSON artifacts - they are the source of truth for subsequent flows. " +
16
+ "Create human-readable markdown files as detailed derivative representations of these JSON artifacts for the user, not as brief summaries. " +
17
+ "Markdown should not influence JSON structure: first determine the correct JSON types, then build markdown as a derivative representation. " +
18
+ "Do not collapse specifics from the task and attachments: preserve explicit files, methods, APIs, invariants, migration steps, DB constraints, business rules, and acceptance criteria. " +
19
+ "Develop a system design for the solution and write JSON to {design_json_file}, then markdown to {design_file}. " +
20
+ strictSchemaInstruction("{design_json_file}", "implementation-design/v1") +
21
+ "Develop a detailed implementation plan and write JSON to {plan_json_file}, then markdown to {plan_file}. " +
22
+ strictSchemaInstruction("{plan_json_file}", "implementation-plan/v1") +
23
+ "Develop a QA test plan and write JSON to {qa_json_file}, then markdown to {qa_file}. " +
24
+ strictSchemaInstruction("{qa_json_file}", "qa-plan/v1") +
25
+ "Format markdown for design and plan comprehensively, with separate sections for Summary, Current State, Target State, Affected Code, Decisions, Migration/DB Changes, Risks, Implementation Steps, Tests, Rollout. " +
26
+ "JSON files must be valid and contain only JSON without markdown wrapping. ";
27
+ export const PLAN_QUESTIONS_PROMPT_TEMPLATE = "Review and analyze the task in {jira_task_file}. " +
28
+ "Be sure to analyze additional materials from Jira attachments manifest {jira_attachments_manifest_file} and text context {jira_attachments_context_file}; if an attachment contains more detailed requirements, constraints, file lists, migration strategy, or invariants, treat the attachment as source of truth for planning alongside the Jira issue. " +
29
+ "Before final planning, determine if any clarifications are needed from the user. " +
30
+ strictSchemaInstruction("{planning_questions_json_file}", "planning-questions/v1") +
31
+ "Ask only questions without which the design/plan could be incorrect or too speculative. " +
32
+ "Do not ask obvious, decorative, or duplicate questions. " +
33
+ "Usually 1-5 questions are sufficient. " +
34
+ "The JSON file must be valid and contain only JSON without markdown wrapping. ";
35
+ export const BUG_ANALYZE_PROMPT_TEMPLATE = "Review and analyze the bug in {jira_task_file}. " +
36
+ "First create structured JSON artifacts - they are the source of truth for subsequent flows. " +
37
+ "Create human-readable markdown files as brief derivative representations of these JSON artifacts for the user. " +
38
+ "Write structured bug analysis to {bug_analyze_json_file}, then a brief markdown version to {bug_analyze_file}. " +
39
+ "Write structured fix design to {bug_fix_design_json_file}, then a brief markdown version to {bug_fix_design_file}. " +
40
+ "Write structured implementation plan to {bug_fix_plan_json_file}, then a brief markdown version to {bug_fix_plan_file}. " +
41
+ "JSON files must be valid and contain only JSON without markdown wrapping. " +
42
+ strictSchemaInstruction("{bug_analyze_json_file}", "bug-analysis/v1") +
43
+ strictSchemaInstruction("{bug_fix_design_json_file}", "bug-fix-design/v1") +
44
+ strictSchemaInstruction("{bug_fix_plan_json_file}", "bug-fix-plan/v1");
45
+ export const BUG_FIX_PROMPT_TEMPLATE = "Use only structured artifacts as source of truth. " +
46
+ "Analyze the bug from {bug_analyze_json_file}. " +
47
+ "Use the fix design from {bug_fix_design_json_file}. " +
48
+ "Use the implementation plan from {bug_fix_plan_json_file}. " +
49
+ "Markdown artifacts are intended only for human reading and should not define the implementation. " +
50
+ "After that, proceed to implement the fix in code. ";
51
+ export const MR_DESCRIPTION_PROMPT_TEMPLATE = "Review the task in {jira_task_file} and the current changes in the repository. " +
52
+ "Prepare a very brief intent description for the merge request without implementation details, file lists, or technical details. " +
53
+ `First write the source-of-truth JSON to {mr_description_json_file}. ${strictSchemaInstruction("{mr_description_json_file}", "mr-description/v1")}Then write the derivative markdown version to {mr_description_file}. `;
54
+ export const IMPLEMENT_PROMPT_TEMPLATE = "Use only structured artifacts as source of truth. " +
55
+ "Analyze the system design {design_json_file}, implementation plan {plan_json_file}, and proceed with implementation according to the plan. " +
56
+ "Markdown artifacts are intended only for human reading and should not define the implementation. ";
57
+ export const REVIEW_PROMPT_TEMPLATE = "Conduct a code review of the current changes. " +
58
+ "Use only structured artifacts as source of truth: the task in {jira_task_file}, design in {design_json_file}, and plan in {plan_json_file}. " +
59
+ `First write the structured result to {review_json_file}. ${strictSchemaInstruction("{review_json_file}", "review-findings/v1")}` +
60
+ "Then write the derivative markdown version to {review_file}. " +
61
+ "If ready_to_merge=true and there are no blockers preventing merge - create the ready-to-merge.md file.";
62
+ export const REVIEW_PROJECT_PROMPT_TEMPLATE = "Conduct a code review of current changes in the project without Jira context. " +
63
+ "Evaluate the quality of changes based on current code, tests, regression risks, and overall engineering quality. " +
64
+ `First write the structured result to {review_json_file}. ${strictSchemaInstruction("{review_json_file}", "review-findings/v1")}` +
65
+ "Then write the derivative markdown version to {review_file}. " +
66
+ "If ready_to_merge=true and there are no blockers, create the {ready_to_merge_file} file.";
67
+ export const GITLAB_DIFF_REVIEW_PROMPT_TEMPLATE = "Conduct a code review of the GitLab merge request diff. " +
68
+ "Use the structured diff artifact {gitlab_diff_json_file} as source of truth, and markdown {gitlab_diff_file} only as a convenient human-readable representation. " +
69
+ "Evaluate only the changes from the diff: correctness, regression risks, missing tests, dangerous edge cases, contract violations, and maintainability. " +
70
+ `First write the structured result to {review_json_file}. ${strictSchemaInstruction("{review_json_file}", "review-findings/v1")}` +
71
+ "Then write the derivative markdown version to {review_file}. " +
72
+ "If ready_to_merge=true and there are no blockers, create the {ready_to_merge_file} file.";
73
+ export const REVIEW_SUMMARY_PROMPT_TEMPLATE = "Look at {review_file}. " +
74
+ "Create a brief list of comments without details, 3-7 items. " +
75
+ "Write the result to {review_summary_file}.";
76
+ export const REVIEW_FIX_PROMPT_TEMPLATE = "Use only structured artifacts as source of truth. " +
77
+ "Analyze the findings in {review_json_file}. " +
78
+ "Fix what is contained in the additional instructions, and if there are none - fix all items. " +
79
+ "After completion, be sure to run the linter outside the sandbox, all tests, generate make swagger. " +
80
+ "Fix any linter and test errors if they occur. " +
81
+ `Upon completion, first write the structured report to {review_fix_json_file}. ${strictSchemaInstruction("{review_fix_json_file}", "review-fix-report/v1")}Then write the derivative markdown version to {review_fix_file}.`;
82
+ export const TASK_SUMMARY_PROMPT_TEMPLATE = "Look at {jira_task_file}. " +
83
+ "Create a brief summary of the task, 1-2 paragraphs. " +
84
+ `First write the source-of-truth JSON to {task_summary_json_file}. ${strictSchemaInstruction("{task_summary_json_file}", "task-summary/v1")}Then write the markdown version to {task_summary_file}.`;
85
+ export const JIRA_DESCRIPTION_PROMPT_TEMPLATE = "Look at the task in {jira_task_file}. " +
86
+ "Formulate a typical Jira task description in simple product language, without overloading with technical details. " +
87
+ "Description structure: Problem, Context, What needs to be done, Acceptance criteria. " +
88
+ "Write only what helps understand the essence of the task and expected result; technical details, internal service names, data models, file names, REST methods, and implementation steps should only be mentioned if without them the task's meaning is lost. " +
89
+ `First write the source-of-truth JSON to {jira_description_json_file}. ${strictSchemaInstruction("{jira_description_json_file}", "jira-description/v1")}Then write the markdown version to {jira_description_file}.`;
90
+ export const RUN_GO_TESTS_LOOP_FIX_PROMPT_TEMPLATE = "Use the structured result of the last run of run_go_tests.py from {tests_result_json_file} as source of truth. " +
91
+ "Analyze the last test error, fix the code, and prepare changes so that the next run of run_go_tests.py succeeds.";
92
+ export const RUN_GO_LINTER_LOOP_FIX_PROMPT_TEMPLATE = "Use the structured result of the last run of run_go_linter.py from {linter_result_json_file} as source of truth. " +
93
+ "Analyze the last linter or generation error, fix the code, and prepare changes so that the next run of run_go_linter.py succeeds.";
94
+ export const COMMIT_MESSAGE_PROMPT_TEMPLATE = "Generate a commit message for the current changes. " +
95
+ "Task context (Jira): {jira_task_file}. " +
96
+ "Current changes (git diff): {git_diff_file}. " +
97
+ "List of changed files: {git_status_json_file}. " +
98
+ "Rules: " +
99
+ "1) Subject line ≤72 characters. " +
100
+ "2) Format: {taskKey}: {taskDescription} (e.g., DEMO-1234: Add user authentication). " +
101
+ "3) Include task key from Jira task. " +
102
+ "4) Commit message language: English. " +
103
+ "5) Write JSON to {commit_message_json_file}: {\"subject\": \"...\"}.";
104
+ export const AUTO_REVIEW_FIX_EXTRA_PROMPT = "Fix only blockers, criticals, and important issues";
111
105
  export function formatTemplate(template, values) {
112
106
  let result = template;
113
107
  for (const [key, value] of Object.entries(values)) {
@@ -0,0 +1,8 @@
1
+ import path from "node:path";
2
+ export function agentweaverHome(packageRoot) {
3
+ const configured = process.env.AGENTWEAVER_HOME?.trim();
4
+ if (configured) {
5
+ return path.resolve(configured);
6
+ }
7
+ return packageRoot;
8
+ }
@@ -1,6 +1,5 @@
1
1
  import { accessSync, constants } from "node:fs";
2
2
  import { spawnSync } from "node:child_process";
3
- import path from "node:path";
4
3
  import { TaskRunnerError } from "../errors.js";
5
4
  function splitArgs(input) {
6
5
  const result = [];
@@ -100,40 +99,3 @@ export function resolveCmd(commandName, envVarName) {
100
99
  }
101
100
  throw new TaskRunnerError(`Missing required command: ${commandName}`);
102
101
  }
103
- export function requireDockerCompose() {
104
- if (!commandExists("docker")) {
105
- throw new TaskRunnerError("Missing required command: docker");
106
- }
107
- const result = spawnSync("docker", ["compose", "version"], { stdio: "ignore" });
108
- if (result.status !== 0) {
109
- throw new TaskRunnerError("Missing required docker compose plugin");
110
- }
111
- }
112
- export function resolveDockerComposeCmd() {
113
- const configured = process.env.DOCKER_COMPOSE_BIN?.trim() ?? "";
114
- if (configured) {
115
- const parts = splitArgs(configured);
116
- if (parts.length === 0) {
117
- throw new TaskRunnerError("DOCKER_COMPOSE_BIN is set but empty.");
118
- }
119
- const executable = parts[0] ?? "";
120
- try {
121
- if (path.isAbsolute(executable)) {
122
- accessSync(executable, constants.X_OK);
123
- return parts;
124
- }
125
- }
126
- catch {
127
- throw new TaskRunnerError(`Configured docker compose command is not executable: ${configured}`);
128
- }
129
- if (commandExists(executable)) {
130
- return parts;
131
- }
132
- throw new TaskRunnerError(`Configured docker compose command is not executable: ${configured}`);
133
- }
134
- if (commandExists("docker-compose")) {
135
- return ["docker-compose"];
136
- }
137
- requireDockerCompose();
138
- return ["docker", "compose"];
139
- }
@@ -0,0 +1,43 @@
1
+ import { existsSync, mkdirSync, readFileSync } from "node:fs";
2
+ import os from "node:os";
3
+ import path from "node:path";
4
+ function parseEnvFile(envFilePath, protectedKeys) {
5
+ if (!existsSync(envFilePath)) {
6
+ return;
7
+ }
8
+ const lines = readFileSync(envFilePath, "utf8").split(/\r?\n/);
9
+ for (const rawLine of lines) {
10
+ let line = rawLine.trim();
11
+ if (!line || line.startsWith("#")) {
12
+ continue;
13
+ }
14
+ if (line.startsWith("export ")) {
15
+ line = line.slice(7).trim();
16
+ }
17
+ const separatorIndex = line.indexOf("=");
18
+ if (separatorIndex < 0) {
19
+ continue;
20
+ }
21
+ const key = line.slice(0, separatorIndex).trim();
22
+ if (!key || protectedKeys.has(key)) {
23
+ continue;
24
+ }
25
+ let value = line.slice(separatorIndex + 1).trim();
26
+ if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) {
27
+ value = value.slice(1, -1);
28
+ }
29
+ process.env[key] = value;
30
+ }
31
+ }
32
+ function globalConfigDir() {
33
+ return path.join(os.homedir(), ".agentweaver");
34
+ }
35
+ function ensureGlobalConfigDir() {
36
+ mkdirSync(globalConfigDir(), { recursive: true });
37
+ }
38
+ export function loadTieredEnv(projectDir) {
39
+ ensureGlobalConfigDir();
40
+ const shellEnvKeys = new Set(Object.keys(process.env));
41
+ parseEnvFile(path.join(globalConfigDir(), ".env"), shellEnvKeys);
42
+ parseEnvFile(path.join(projectDir, ".agentweaver", ".env"), shellEnvKeys);
43
+ }
@@ -1,6 +1,7 @@
1
1
  import path from "node:path";
2
2
  import process from "node:process";
3
3
  import { spawn } from "node:child_process";
4
+ import { FlowInterruptedError } from "../errors.js";
4
5
  import { getExecutionState, getOutputAdapter, printFramedBlock, printInfo, setCurrentExecutor } from "../tui.js";
5
6
  import { shellQuote } from "./command-resolution.js";
6
7
  export function formatCommand(argv, env) {
@@ -34,8 +35,11 @@ function formatLaunchDetails(statusLabel) {
34
35
  return lines.join("\n");
35
36
  }
36
37
  export async function runCommand(argv, options = {}) {
37
- const { env, dryRun = false, verbose = false, label, printFailureOutput = true } = options;
38
+ const { env, dryRun = false, verbose = false, label, printFailureOutput = true, signal } = options;
38
39
  const outputAdapter = getOutputAdapter();
40
+ if (signal?.aborted) {
41
+ throw new FlowInterruptedError();
42
+ }
39
43
  if (dryRun) {
40
44
  setCurrentExecutor(label ?? path.basename(argv[0] ?? argv.join(" ")));
41
45
  outputAdapter.writeStdout(`${formatCommand(argv, env)}\n`);
@@ -48,9 +52,29 @@ export async function runCommand(argv, options = {}) {
48
52
  stdio: "inherit",
49
53
  env,
50
54
  });
55
+ let abortTimer = null;
56
+ const abortHandler = () => {
57
+ child.kill("SIGTERM");
58
+ abortTimer = setTimeout(() => {
59
+ child.kill("SIGKILL");
60
+ }, 2000);
61
+ };
62
+ signal?.addEventListener("abort", abortHandler, { once: true });
51
63
  child.on("exit", (code) => (code === 0 ? resolve() : reject(new Error(String(code ?? 1)))));
52
64
  child.on("error", reject);
65
+ child.on("close", () => {
66
+ signal?.removeEventListener("abort", abortHandler);
67
+ if (abortTimer) {
68
+ clearTimeout(abortTimer);
69
+ }
70
+ });
53
71
  }).catch((error) => {
72
+ if (signal?.aborted) {
73
+ throw Object.assign(new FlowInterruptedError(), {
74
+ returnCode: 130,
75
+ output: "",
76
+ });
77
+ }
54
78
  const code = Number.parseInt(error.message, 10);
55
79
  throw Object.assign(new Error(`Command failed with exit code ${Number.isNaN(code) ? 1 : code}`), {
56
80
  returnCode: Number.isNaN(code) ? 1 : code,
@@ -86,12 +110,32 @@ export async function runCommand(argv, options = {}) {
86
110
  }
87
111
  try {
88
112
  const exitCode = await new Promise((resolve, reject) => {
113
+ let abortTimer = null;
114
+ const abortHandler = () => {
115
+ child.kill("SIGTERM");
116
+ abortTimer = setTimeout(() => {
117
+ child.kill("SIGKILL");
118
+ }, 2000);
119
+ };
120
+ signal?.addEventListener("abort", abortHandler, { once: true });
89
121
  child.on("error", reject);
90
122
  child.on("exit", (code) => resolve(code ?? 1));
123
+ child.on("close", () => {
124
+ signal?.removeEventListener("abort", abortHandler);
125
+ if (abortTimer) {
126
+ clearTimeout(abortTimer);
127
+ }
128
+ });
91
129
  });
92
130
  if (outputAdapter.renderAuxiliaryOutput !== false) {
93
131
  printInfo(`Закончили работу: ${statusLabel} (${formatDuration(Date.now() - startedAt)})`);
94
132
  }
133
+ if (signal?.aborted) {
134
+ throw Object.assign(new FlowInterruptedError(), {
135
+ returnCode: 130,
136
+ output,
137
+ });
138
+ }
95
139
  if (exitCode !== 0) {
96
140
  if (output && printFailureOutput && outputAdapter.supportsTransientStatus) {
97
141
  process.stderr.write(output);
@@ -0,0 +1,53 @@
1
+ import { existsSync, readFileSync } from "node:fs";
2
+ import path from "node:path";
3
+ import { fileURLToPath } from "node:url";
4
+ import { TaskRunnerError } from "./errors.js";
5
+ export const STRUCTURED_ARTIFACT_SCHEMA_IDS = [
6
+ "bug-analysis/v1",
7
+ "bug-fix-design/v1",
8
+ "bug-fix-plan/v1",
9
+ "gitlab-mr-diff/v1",
10
+ "gitlab-review/v1",
11
+ "implementation-design/v1",
12
+ "implementation-plan/v1",
13
+ "jira-description/v1",
14
+ "mr-description/v1",
15
+ "planning-questions/v1",
16
+ "qa-plan/v1",
17
+ "review-findings/v1",
18
+ "review-fix-report/v1",
19
+ "task-summary/v1",
20
+ "user-input/v1",
21
+ ];
22
+ const MODULE_DIR = path.dirname(fileURLToPath(import.meta.url));
23
+ export const SCHEMA_REGISTRY_PATH = path.join(MODULE_DIR, "structured-artifact-schemas.json");
24
+ function isRecord(value) {
25
+ return typeof value === "object" && value !== null && !Array.isArray(value);
26
+ }
27
+ export function loadStructuredArtifactSchemaRegistry() {
28
+ if (!existsSync(SCHEMA_REGISTRY_PATH)) {
29
+ throw new TaskRunnerError(`Structured artifact schema registry not found: ${SCHEMA_REGISTRY_PATH}`);
30
+ }
31
+ let parsed;
32
+ try {
33
+ parsed = JSON.parse(readFileSync(SCHEMA_REGISTRY_PATH, "utf8"));
34
+ }
35
+ catch (error) {
36
+ throw new TaskRunnerError(`Failed to parse structured artifact schema registry: ${error.message}`);
37
+ }
38
+ if (!isRecord(parsed)) {
39
+ throw new TaskRunnerError(`Structured artifact schema registry ${SCHEMA_REGISTRY_PATH} must be a JSON object.`);
40
+ }
41
+ return parsed;
42
+ }
43
+ const schemaRegistry = loadStructuredArtifactSchemaRegistry();
44
+ export function getStructuredArtifactSchema(schemaId) {
45
+ const schema = schemaRegistry[schemaId];
46
+ if (!schema) {
47
+ throw new TaskRunnerError(`Structured artifact schema is not registered: ${schemaId}`);
48
+ }
49
+ return schema;
50
+ }
51
+ export function renderStructuredArtifactSchema(schemaId) {
52
+ return JSON.stringify(getStructuredArtifactSchema(schemaId), null, 2);
53
+ }
@@ -521,26 +521,6 @@
521
521
  },
522
522
  "required": ["summary", "completed_actions", "validation_steps"]
523
523
  },
524
- "review-reply/v1": {
525
- "type": "object",
526
- "properties": {
527
- "summary": { "type": "string", "nonEmpty": true },
528
- "responses": {
529
- "type": "array",
530
- "items": {
531
- "type": "object",
532
- "properties": {
533
- "finding_title": { "type": "string", "nonEmpty": true },
534
- "disposition": { "type": "string", "nonEmpty": true },
535
- "action": { "type": "string", "nonEmpty": true }
536
- },
537
- "required": ["finding_title", "disposition", "action"]
538
- }
539
- },
540
- "ready_to_merge": { "type": "boolean" }
541
- },
542
- "required": ["summary", "responses", "ready_to_merge"]
543
- },
544
524
  "task-summary/v1": {
545
525
  "type": "object",
546
526
  "properties": {