agentweaver 0.1.14 → 0.1.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +29 -7
- package/dist/artifact-manifest.js +219 -0
- package/dist/artifacts.js +21 -1
- package/dist/doctor/checks/cwd-context.js +4 -3
- package/dist/doctor/checks/env-diagnostics.js +193 -71
- package/dist/doctor/checks/flow-readiness.js +212 -203
- package/dist/doctor/index.js +1 -1
- package/dist/doctor/orchestrator.js +18 -7
- package/dist/doctor/runner.js +9 -8
- package/dist/doctor/types.js +12 -0
- package/dist/flow-state.js +75 -15
- package/dist/index.js +499 -199
- package/dist/interactive/blessed-session.js +361 -0
- package/dist/interactive/controller.js +1293 -0
- package/dist/interactive/create-interactive-session.js +5 -0
- package/dist/interactive/ink/index.js +576 -0
- package/dist/interactive/progress.js +245 -0
- package/dist/interactive/selectors.js +14 -0
- package/dist/interactive/session.js +1 -0
- package/dist/interactive/state.js +34 -0
- package/dist/interactive/tree.js +155 -0
- package/dist/interactive/types.js +1 -0
- package/dist/interactive/view-model.js +1 -0
- package/dist/interactive-ui.js +159 -194
- package/dist/pipeline/context.js +1 -0
- package/dist/pipeline/declarative-flow-runner.js +212 -6
- package/dist/pipeline/declarative-flows.js +27 -0
- package/dist/pipeline/execution-routing-config.js +15 -0
- package/dist/pipeline/flow-catalog.js +23 -3
- package/dist/pipeline/flow-run-resume.js +29 -0
- package/dist/pipeline/flow-specs/auto-common.json +89 -360
- package/dist/pipeline/flow-specs/auto-golang.json +58 -363
- package/dist/pipeline/flow-specs/auto-simple.json +141 -0
- package/dist/pipeline/flow-specs/bugz/bug-analyze.json +2 -0
- package/dist/pipeline/flow-specs/bugz/bug-fix.json +1 -0
- package/dist/pipeline/flow-specs/design-review/design-review-loop.json +304 -0
- package/dist/pipeline/flow-specs/design-review.json +249 -0
- package/dist/pipeline/flow-specs/gitlab/gitlab-diff-review.json +11 -0
- package/dist/pipeline/flow-specs/gitlab/gitlab-review.json +2 -0
- package/dist/pipeline/flow-specs/gitlab/mr-description.json +1 -0
- package/dist/pipeline/flow-specs/go/run-go-linter-loop.json +2 -0
- package/dist/pipeline/flow-specs/go/run-go-tests-loop.json +2 -0
- package/dist/pipeline/flow-specs/implement.json +24 -5
- package/dist/pipeline/flow-specs/instant-task.json +177 -0
- package/dist/pipeline/flow-specs/normalize-task-source.json +311 -0
- package/dist/pipeline/flow-specs/plan-revise.json +267 -0
- package/dist/pipeline/flow-specs/plan.json +48 -70
- package/dist/pipeline/flow-specs/review/review-fix.json +24 -4
- package/dist/pipeline/flow-specs/review/review-loop.json +351 -45
- package/dist/pipeline/flow-specs/review/review-project-loop.json +590 -0
- package/dist/pipeline/flow-specs/review/review-project.json +12 -0
- package/dist/pipeline/flow-specs/review/review.json +37 -31
- package/dist/pipeline/flow-specs/task-describe.json +62 -2
- package/dist/pipeline/flow-specs/task-source/jira-fetch.json +70 -0
- package/dist/pipeline/flow-specs/task-source/manual-input.json +216 -0
- package/dist/pipeline/node-registry.js +49 -1
- package/dist/pipeline/node-runner.js +3 -2
- package/dist/pipeline/nodes/build-review-fix-prompt-node.js +5 -1
- package/dist/pipeline/nodes/clear-ready-to-merge-node.js +11 -0
- package/dist/pipeline/nodes/commit-message-form-node.js +8 -0
- package/dist/pipeline/nodes/design-review-verdict-node.js +36 -0
- package/dist/pipeline/nodes/ensure-summary-json-node.js +70 -0
- package/dist/pipeline/nodes/fetch-gitlab-diff-node.js +19 -2
- package/dist/pipeline/nodes/fetch-gitlab-review-node.js +19 -2
- package/dist/pipeline/nodes/flow-run-node.js +226 -7
- package/dist/pipeline/nodes/git-commit-form-node.js +8 -0
- package/dist/pipeline/nodes/gitlab-review-artifacts-node.js +19 -2
- package/dist/pipeline/nodes/jira-fetch-node.js +50 -4
- package/dist/pipeline/nodes/llm-prompt-node.js +32 -12
- package/dist/pipeline/nodes/planning-bundle-node.js +10 -0
- package/dist/pipeline/nodes/review-verdict-node.js +86 -0
- package/dist/pipeline/nodes/select-files-form-node.js +8 -0
- package/dist/pipeline/nodes/structured-summary-node.js +24 -0
- package/dist/pipeline/nodes/user-input-node.js +38 -3
- package/dist/pipeline/nodes/write-selection-file-node.js +20 -4
- package/dist/pipeline/prompt-registry.js +5 -1
- package/dist/pipeline/prompt-runtime.js +4 -1
- package/dist/pipeline/review-iteration.js +26 -0
- package/dist/pipeline/spec-compiler.js +2 -0
- package/dist/pipeline/spec-types.js +5 -0
- package/dist/pipeline/spec-validator.js +14 -0
- package/dist/pipeline/value-resolver.js +84 -1
- package/dist/prompts.js +82 -13
- package/dist/review-severity.js +45 -0
- package/dist/runtime/artifact-registry.js +402 -0
- package/dist/runtime/design-review-input-contract.js +113 -0
- package/dist/runtime/env-loader.js +3 -0
- package/dist/runtime/execution-routing-store.js +134 -0
- package/dist/runtime/execution-routing.js +227 -0
- package/dist/runtime/interactive-execution-routing.js +462 -0
- package/dist/runtime/plan-revise-input-contract.js +147 -0
- package/dist/runtime/planning-bundle.js +123 -0
- package/dist/runtime/ready-to-merge.js +31 -0
- package/dist/runtime/review-input-contract.js +100 -0
- package/dist/scope.js +11 -2
- package/dist/structured-artifact-schema-registry.js +10 -0
- package/dist/structured-artifact-schemas.json +257 -1
- package/dist/structured-artifacts.js +83 -6
- package/dist/user-input.js +70 -3
- package/package.json +6 -3
package/dist/prompts.js
CHANGED
|
@@ -10,12 +10,12 @@ function strictSchemaInstruction(outputFileVar, schemaId) {
|
|
|
10
10
|
STRUCTURED_JSON_LANGUAGE_INSTRUCTION +
|
|
11
11
|
`Canonical schema:\n${renderStructuredArtifactSchema(schemaId)}\n`);
|
|
12
12
|
}
|
|
13
|
-
export const PLAN_PROMPT_TEMPLATE = "Review and analyze the task in {
|
|
14
|
-
"
|
|
13
|
+
export const PLAN_PROMPT_TEMPLATE = "Review and analyze the normalized task context in {task_context_json_file}. " +
|
|
14
|
+
"Use planning answers from {planning_answers_json_file} when they exist and treat them as structured user clarifications. " +
|
|
15
15
|
"First create structured JSON artifacts - they are the source of truth for subsequent flows. " +
|
|
16
16
|
"Create human-readable markdown files as detailed derivative representations of these JSON artifacts for the user, not as brief summaries. " +
|
|
17
17
|
"Markdown should not influence JSON structure: first determine the correct JSON types, then build markdown as a derivative representation. " +
|
|
18
|
-
"Do not collapse specifics from the task
|
|
18
|
+
"Do not collapse specifics from the task context: preserve explicit files, methods, APIs, invariants, migration steps, DB constraints, business rules, acceptance criteria, and constraints. " +
|
|
19
19
|
"Develop a system design for the solution and write JSON to {design_json_file}, then markdown to {design_file}. " +
|
|
20
20
|
strictSchemaInstruction("{design_json_file}", "implementation-design/v1") +
|
|
21
21
|
"Develop a detailed implementation plan and write JSON to {plan_json_file}, then markdown to {plan_file}. " +
|
|
@@ -24,14 +24,31 @@ export const PLAN_PROMPT_TEMPLATE = "Review and analyze the task in {jira_task_f
|
|
|
24
24
|
strictSchemaInstruction("{qa_json_file}", "qa-plan/v1") +
|
|
25
25
|
"Format markdown for design and plan comprehensively, with separate sections for Summary, Current State, Target State, Affected Code, Decisions, Migration/DB Changes, Risks, Implementation Steps, Tests, Rollout. " +
|
|
26
26
|
"JSON files must be valid and contain only JSON without markdown wrapping. ";
|
|
27
|
-
export const PLAN_QUESTIONS_PROMPT_TEMPLATE = "Review and analyze the task in {
|
|
28
|
-
"Be sure to analyze additional materials from Jira attachments manifest {jira_attachments_manifest_file} and text context {jira_attachments_context_file}; if an attachment contains more detailed requirements, constraints, file lists, migration strategy, or invariants, treat the attachment as source of truth for planning alongside the Jira issue. " +
|
|
27
|
+
export const PLAN_QUESTIONS_PROMPT_TEMPLATE = "Review and analyze the normalized task context in {task_context_json_file}. " +
|
|
29
28
|
"Before final planning, determine if any clarifications are needed from the user. " +
|
|
30
29
|
strictSchemaInstruction("{planning_questions_json_file}", "planning-questions/v1") +
|
|
31
30
|
"Ask only questions without which the design/plan could be incorrect or too speculative. " +
|
|
32
31
|
"Do not ask obvious, decorative, or duplicate questions. " +
|
|
33
32
|
"Usually 1-5 questions are sufficient. " +
|
|
34
33
|
"The JSON file must be valid and contain only JSON without markdown wrapping. ";
|
|
34
|
+
export const TASK_CONTEXT_FROM_JIRA_PROMPT_TEMPLATE = "Normalize the Jira task context into a connector-agnostic task context. " +
|
|
35
|
+
"Use Jira issue JSON {jira_task_file} as the primary source of truth. " +
|
|
36
|
+
"Also analyze Jira attachments manifest {jira_attachments_manifest_file} and attachments text context {jira_attachments_context_file}; if attachments contain more specific requirements, constraints, file lists, migration strategy, or invariants, preserve them in the normalized result. " +
|
|
37
|
+
"Write the source-of-truth JSON to {task_context_json_file}. " +
|
|
38
|
+
strictSchemaInstruction("{task_context_json_file}", "task-context/v1") +
|
|
39
|
+
"Set source_type to 'jira'. " +
|
|
40
|
+
"Write the derivative markdown version to {task_context_file}. " +
|
|
41
|
+
"Do not invent metadata that is not supported by Jira content or attachments. " +
|
|
42
|
+
"Keep JSON semantic fields in English. JSON files must contain only JSON without markdown wrapping.";
|
|
43
|
+
export const TASK_CONTEXT_FROM_MANUAL_PROMPT_TEMPLATE = "Normalize the manual task input into a connector-agnostic task context. " +
|
|
44
|
+
"Use the structured user input in {task_input_file}. " +
|
|
45
|
+
"Use values.task_description as the primary source of truth and values.additional_instructions as additional context when present. " +
|
|
46
|
+
"Do not reshape the input into Jira-like fields or invent Jira metadata. " +
|
|
47
|
+
"Write the source-of-truth JSON to {task_context_json_file}. " +
|
|
48
|
+
strictSchemaInstruction("{task_context_json_file}", "task-context/v1") +
|
|
49
|
+
"Set source_type to 'manual'. " +
|
|
50
|
+
"Write the derivative markdown version to {task_context_file}. " +
|
|
51
|
+
"Keep JSON semantic fields in English. JSON files must contain only JSON without markdown wrapping.";
|
|
35
52
|
export const BUG_ANALYZE_PROMPT_TEMPLATE = "Review and analyze the bug in {jira_task_file}. " +
|
|
36
53
|
"First create structured JSON artifacts - they are the source of truth for subsequent flows. " +
|
|
37
54
|
"Create human-readable markdown files as brief derivative representations of these JSON artifacts for the user. " +
|
|
@@ -52,28 +69,52 @@ export const MR_DESCRIPTION_PROMPT_TEMPLATE = "Review the task in {jira_task_fil
|
|
|
52
69
|
"Prepare a very brief intent description for the merge request without implementation details, file lists, or technical details. " +
|
|
53
70
|
`First write the source-of-truth JSON to {mr_description_json_file}. ${strictSchemaInstruction("{mr_description_json_file}", "mr-description/v1")}Then write the derivative markdown version to {mr_description_file}. `;
|
|
54
71
|
export const IMPLEMENT_PROMPT_TEMPLATE = "Use only structured artifacts as source of truth. " +
|
|
55
|
-
"Analyze the system design {design_json_file}, implementation plan {plan_json_file}, and proceed with implementation according to
|
|
56
|
-
"
|
|
72
|
+
"Analyze the system design {design_json_file}, implementation plan {plan_json_file}, and QA plan {qa_json_file}, then proceed with implementation according to those artifacts. " +
|
|
73
|
+
"Treat the QA plan as the source of truth for the minimum required test scenarios, edge cases, regression checks, and validation behavior that the implementation must satisfy. " +
|
|
74
|
+
"When the repository contains automated tests, add or update tests for the key scenarios from the QA plan whenever it is practical in the current codebase. " +
|
|
75
|
+
"If some QA scenarios cannot be automated in the current change, still implement the code so those scenarios are satisfied and keep them explicit in your reasoning while editing. " +
|
|
76
|
+
"Markdown artifacts such as {design_file}, {plan_file}, and {qa_file} are intended only for human reading and should not define the implementation. ";
|
|
57
77
|
export const REVIEW_PROMPT_TEMPLATE = "Conduct a code review of the current changes. " +
|
|
58
|
-
"Use only structured artifacts as source of truth
|
|
78
|
+
"Use only structured artifacts as source of truth. " +
|
|
79
|
+
"Required planning inputs: design markdown {design_file}, design JSON {design_json_file}, plan markdown {plan_file}, and plan JSON {plan_json_file}. " +
|
|
80
|
+
"Optional task context is provided through these variables and may contain the literal value 'not provided' when absent: normalized task context JSON {task_context_json_file}, Jira task JSON {jira_task_file}, instant-task input JSON {task_input_json_file}. " +
|
|
81
|
+
"When an optional variable is 'not provided', treat that source as unavailable and do not invent details from it. " +
|
|
82
|
+
"Evaluate the current code against the available task context and the structured planning artifacts. " +
|
|
83
|
+
"Use exactly one severity per finding from this list: blocker, critical, high, medium, low, info. " +
|
|
59
84
|
`First write the structured result to {review_json_file}. ${strictSchemaInstruction("{review_json_file}", "review-findings/v1")}` +
|
|
85
|
+
"Then write the derivative markdown version to {review_file}. ";
|
|
86
|
+
export const DESIGN_REVIEW_PROMPT_TEMPLATE = "Conduct a structured planning critique as a specification critic, not as an implementer. " +
|
|
87
|
+
"Use structured JSON artifacts as the source of truth for semantics. " +
|
|
88
|
+
"Required planning inputs: design markdown {design_file}, design JSON {design_json_file}, implementation plan markdown {plan_file}, implementation plan JSON {plan_json_file}. " +
|
|
89
|
+
"Review the markdown files as derivative human-readable renderings of the same planning run, but do not let markdown override the structured JSON. " +
|
|
90
|
+
"Optional supplemental context is provided through these variables and may contain the literal value 'not provided' when absent: normalized task context JSON {task_context_json_file}, QA markdown {qa_file}, QA JSON {qa_json_file}, Jira task JSON {jira_task_file}, Jira attachments manifest {jira_attachments_manifest_file}, Jira attachments context {jira_attachments_context_file}, planning answers JSON {planning_answers_json_file}, instant-task input JSON {task_input_json_file}. " +
|
|
91
|
+
"When an optional variable is 'not provided', treat that source as unavailable and do not invent details from it. " +
|
|
92
|
+
"Evaluate completeness, consistency, implementation readiness, risk coverage, QA coverage, and scope discipline across the available planning artifacts and optional context. " +
|
|
93
|
+
"Identify blocking findings, major non-blocking findings, warnings, missing information, consistency check results, QA coverage gaps, and concise recommended actions. " +
|
|
94
|
+
"Use exactly one status value: approved, approved_with_warnings, or needs_revision. " +
|
|
95
|
+
"Set status to needs_revision when any blocking finding exists or when required information is missing in a way that blocks safe implementation start. " +
|
|
96
|
+
"Set status to approved_with_warnings when there are no blocking findings, but there are major findings, warnings, non-blocking missing information items, QA coverage gaps, or non-blocking consistency issues. " +
|
|
97
|
+
"Set status to approved only when there are no unresolved blocking findings, major findings, warnings, missing information items, or QA coverage gaps. " +
|
|
98
|
+
`First write the structured design review to {review_json_file}. ${strictSchemaInstruction("{review_json_file}", "design-review/v1")}` +
|
|
60
99
|
"Then write the derivative markdown version to {review_file}. " +
|
|
61
|
-
"
|
|
100
|
+
"Create ready-to-merge.md only when status is approved or approved_with_warnings. " +
|
|
101
|
+
"Do not create ready-to-merge.md when status is needs_revision.";
|
|
62
102
|
export const REVIEW_PROJECT_PROMPT_TEMPLATE = "Conduct a code review of current changes in the project without Jira context. " +
|
|
63
103
|
"Evaluate the quality of changes based on current code, tests, regression risks, and overall engineering quality. " +
|
|
104
|
+
"Use exactly one severity per finding from this list: blocker, critical, high, medium, low, info. " +
|
|
64
105
|
`First write the structured result to {review_json_file}. ${strictSchemaInstruction("{review_json_file}", "review-findings/v1")}` +
|
|
65
|
-
"Then write the derivative markdown version to {review_file}. "
|
|
66
|
-
"If ready_to_merge=true and there are no blockers, create the {ready_to_merge_file} file.";
|
|
106
|
+
"Then write the derivative markdown version to {review_file}. ";
|
|
67
107
|
export const GITLAB_DIFF_REVIEW_PROMPT_TEMPLATE = "Conduct a code review of the GitLab merge request diff. " +
|
|
68
108
|
"Use the structured diff artifact {gitlab_diff_json_file} as source of truth, and markdown {gitlab_diff_file} only as a convenient human-readable representation. " +
|
|
69
109
|
"Evaluate only the changes from the diff: correctness, regression risks, missing tests, dangerous edge cases, contract violations, and maintainability. " +
|
|
110
|
+
"Use exactly one severity per finding from this list: blocker, critical, high, medium, low, info. " +
|
|
70
111
|
`First write the structured result to {review_json_file}. ${strictSchemaInstruction("{review_json_file}", "review-findings/v1")}` +
|
|
71
|
-
"Then write the derivative markdown version to {review_file}. "
|
|
72
|
-
"If ready_to_merge=true and there are no blockers, create the {ready_to_merge_file} file.";
|
|
112
|
+
"Then write the derivative markdown version to {review_file}. ";
|
|
73
113
|
export const GITLAB_REVIEW_PROMPT_TEMPLATE = "Validate GitLab merge request review comments. " +
|
|
74
114
|
"Use the structured GitLab review artifact {gitlab_review_json_file} as source of truth, and markdown {gitlab_review_file} only as a convenient human-readable representation. " +
|
|
75
115
|
"Determine which comments are valid actionable findings that should be addressed in the current code. " +
|
|
76
116
|
"Ignore comments that are obsolete, already resolved, duplicates, purely conversational, or not actionable. " +
|
|
117
|
+
"Use exactly one severity per finding from this list: blocker, critical, high, medium, low, info. " +
|
|
77
118
|
"Normalize the remaining actionable findings into the review findings schema with accurate severities, concise titles, and concrete descriptions. " +
|
|
78
119
|
"For each remaining finding, assess whether the complaint is fair in the current code and propose a concrete fix. " +
|
|
79
120
|
`First write the structured result to {review_json_file}. ${strictSchemaInstruction("{review_json_file}", "review-findings/v1")}` +
|
|
@@ -116,6 +157,34 @@ export const COMMIT_MESSAGE_PROMPT_TEMPLATE = "Generate a commit message for the
|
|
|
116
157
|
"3) Include task key from Jira task. " +
|
|
117
158
|
"4) Commit message language: English. " +
|
|
118
159
|
"5) Write JSON to {commit_message_json_file}: {\"subject\": \"...\"}.";
|
|
160
|
+
export const PLAN_REVISE_PROMPT_TEMPLATE = "Revise the planning artifacts based on the design-review verdict. " +
|
|
161
|
+
"Use structured JSON artifacts as the source of truth for semantics. " +
|
|
162
|
+
"First revise the structured JSON artifacts; only after the JSON is complete and schema-valid should you write the derivative markdown files. " +
|
|
163
|
+
"Markdown must not influence JSON structure or types. " +
|
|
164
|
+
"The design-review verdict JSON {review_json_file} is the primary source of revision instructions — treat its blocking findings, major findings, and recommended actions as mandatory revision targets. " +
|
|
165
|
+
"The design-review markdown {review_file} is a derivative rendering only and must not override the structured verdict. " +
|
|
166
|
+
"Required source planning inputs: design JSON {design_json_file}, design markdown {design_file}, plan JSON {plan_json_file}, plan markdown {plan_file}. " +
|
|
167
|
+
"Optional source QA inputs (may be 'not provided'): QA JSON {qa_json_file}, QA markdown {qa_file}. " +
|
|
168
|
+
"When QA inputs are 'not provided', synthesize a new QA plan from the revised design and plan. " +
|
|
169
|
+
"Optional supplemental context (may be 'not provided'): normalized task context JSON {task_context_json_file}, Jira task JSON {jira_task_file}, Jira attachments manifest {jira_attachments_manifest_file}, Jira attachments context {jira_attachments_context_file}, planning answers JSON {planning_answers_json_file}, instant-task input JSON {task_input_json_file}. " +
|
|
170
|
+
"When an optional variable is 'not provided', treat that source as unavailable and do not invent details from it. " +
|
|
171
|
+
"For every blocking finding and major finding in the verdict, address it directly in the revised artifacts. " +
|
|
172
|
+
"Preserve all content from the original artifacts that is not directly addressed by findings in the verdict — do not drop details, sections, or decisions that remain valid. " +
|
|
173
|
+
"Preserve semantics, not source formatting conventions from the verdict: do not copy verdict-style nested objects into fields whose schemas require plain strings. " +
|
|
174
|
+
"For implementation-design/v1 specifically, goals, non_goals, components, current_state, target_state, business_rules, migration_strategy, database_changes, api_changes, risks, acceptance_criteria, and open_questions must remain arrays of non-empty strings. " +
|
|
175
|
+
"Only affected_code and decisions may contain nested objects in implementation-design/v1. " +
|
|
176
|
+
"If you need to preserve extra detail such as mitigation, resolution, or answer text for a string-array field, fold that detail into a single English sentence string instead of creating an object. " +
|
|
177
|
+
"Produce the following revised outputs: " +
|
|
178
|
+
`revised design JSON to {revised_design_json_file}. ${strictSchemaInstruction("{revised_design_json_file}", "implementation-design/v1")}` +
|
|
179
|
+
"Revised design markdown to {revised_design_file}. " +
|
|
180
|
+
`revised plan JSON to {revised_plan_json_file}. ${strictSchemaInstruction("{revised_plan_json_file}", "implementation-plan/v1")}` +
|
|
181
|
+
"Revised plan markdown to {revised_plan_file}. " +
|
|
182
|
+
`revised QA JSON to {revised_qa_json_file}. ${strictSchemaInstruction("{revised_qa_json_file}", "qa-plan/v1")}` +
|
|
183
|
+
"Revised QA markdown to {revised_qa_file}. " +
|
|
184
|
+
"Create ready-to-merge.md only when all blocking findings from the verdict have been addressed in the revised artifacts. " +
|
|
185
|
+
"Do not create ready-to-merge.md if any blocking finding remains unresolved. " +
|
|
186
|
+
"JSON files must be valid and contain only JSON without markdown wrapping. " +
|
|
187
|
+
"Markdown files must be comprehensive derivative representations of the corresponding JSON artifacts.";
|
|
119
188
|
export const AUTO_REVIEW_FIX_EXTRA_PROMPT = "Fix only blockers, criticals, and important issues";
|
|
120
189
|
export function formatTemplate(template, values) {
|
|
121
190
|
let result = template;
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import { TaskRunnerError } from "./errors.js";
|
|
2
|
+
export const REVIEW_SEVERITIES = ["blocker", "critical", "high", "medium", "low", "info"];
|
|
3
|
+
export const AGENTWEAVER_REVIEW_BLOCKING_SEVERITIES_ENV = "AGENTWEAVER_REVIEW_BLOCKING_SEVERITIES";
|
|
4
|
+
export const DEFAULT_REVIEW_BLOCKING_SEVERITIES = ["blocker", "critical", "high"];
|
|
5
|
+
const REVIEW_SEVERITY_SET = new Set(REVIEW_SEVERITIES);
|
|
6
|
+
export function normalizeReviewSeverity(value) {
|
|
7
|
+
if (typeof value !== "string") {
|
|
8
|
+
return null;
|
|
9
|
+
}
|
|
10
|
+
const normalized = value.trim().toLowerCase();
|
|
11
|
+
return REVIEW_SEVERITY_SET.has(normalized) ? normalized : null;
|
|
12
|
+
}
|
|
13
|
+
export function normalizeReviewSeverityList(values) {
|
|
14
|
+
const result = [];
|
|
15
|
+
for (const value of values) {
|
|
16
|
+
const normalized = normalizeReviewSeverity(value);
|
|
17
|
+
if (!normalized || result.includes(normalized)) {
|
|
18
|
+
continue;
|
|
19
|
+
}
|
|
20
|
+
result.push(normalized);
|
|
21
|
+
}
|
|
22
|
+
return result;
|
|
23
|
+
}
|
|
24
|
+
export function resolveBlockingReviewSeverities(values) {
|
|
25
|
+
const normalized = Array.isArray(values) ? normalizeReviewSeverityList(values) : [];
|
|
26
|
+
return normalized.length > 0 ? normalized : [...DEFAULT_REVIEW_BLOCKING_SEVERITIES];
|
|
27
|
+
}
|
|
28
|
+
export function parseReviewSeverityCsv(raw) {
|
|
29
|
+
const values = raw
|
|
30
|
+
.split(",")
|
|
31
|
+
.map((item) => item.trim())
|
|
32
|
+
.filter((item) => item.length > 0);
|
|
33
|
+
const normalized = normalizeReviewSeverityList(values);
|
|
34
|
+
if (values.length === 0 || normalized.length !== values.length) {
|
|
35
|
+
throw new TaskRunnerError(`Invalid review severity list '${raw}'. Allowed values: ${REVIEW_SEVERITIES.join(", ")}.`);
|
|
36
|
+
}
|
|
37
|
+
return normalized;
|
|
38
|
+
}
|
|
39
|
+
export function resolveReviewBlockingSeveritiesFromEnv(env = process.env) {
|
|
40
|
+
const raw = env[AGENTWEAVER_REVIEW_BLOCKING_SEVERITIES_ENV]?.trim();
|
|
41
|
+
if (!raw) {
|
|
42
|
+
return [...DEFAULT_REVIEW_BLOCKING_SEVERITIES];
|
|
43
|
+
}
|
|
44
|
+
return parseReviewSeverityCsv(raw);
|
|
45
|
+
}
|
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
import { createHash } from "node:crypto";
|
|
2
|
+
import { existsSync, mkdirSync, readFileSync, readdirSync, renameSync, rmSync, writeFileSync, } from "node:fs";
|
|
3
|
+
import path from "node:path";
|
|
4
|
+
import { buildArtifactId, buildLogicalKeyForPayload, buildPublicationKey, createProducerSummary, diagnosticsForManifest, inferPayloadContract, parseArtifactReference, validateArtifactManifest, } from "../artifact-manifest.js";
|
|
5
|
+
import { artifactIndexFile, artifactManifestSidecarPath, ensureScopeWorkspaceDir, scopeArtifactsDir, scopeWorkspaceDir, } from "../artifacts.js";
|
|
6
|
+
import { TaskRunnerError } from "../errors.js";
|
|
7
|
+
import { isArtifactPayloadSchemaId, validateArtifactPayload, } from "../structured-artifacts.js";
|
|
8
|
+
function nowIso8601() {
|
|
9
|
+
return new Date().toISOString();
|
|
10
|
+
}
|
|
11
|
+
function historyDir(scopeKey) {
|
|
12
|
+
return path.join(scopeArtifactsDir(scopeKey), "manifest-history");
|
|
13
|
+
}
|
|
14
|
+
function historyManifestPath(scopeKey, artifactId) {
|
|
15
|
+
return path.join(historyDir(scopeKey), `${encodeURIComponent(artifactId)}.manifest.json`);
|
|
16
|
+
}
|
|
17
|
+
function scopeKeyFromPayloadPath(payloadPath) {
|
|
18
|
+
const scopeMarker = `${path.sep}.agentweaver${path.sep}scopes${path.sep}`;
|
|
19
|
+
const markerIndex = payloadPath.lastIndexOf(scopeMarker);
|
|
20
|
+
if (markerIndex < 0) {
|
|
21
|
+
return null;
|
|
22
|
+
}
|
|
23
|
+
const scopePart = payloadPath.slice(markerIndex + scopeMarker.length);
|
|
24
|
+
return scopePart.split(path.sep)[0] || null;
|
|
25
|
+
}
|
|
26
|
+
function writeJsonAtomic(filePath, value) {
|
|
27
|
+
mkdirSync(path.dirname(filePath), { recursive: true });
|
|
28
|
+
const tempPath = `${filePath}.tmp-${process.pid}-${Date.now()}`;
|
|
29
|
+
writeFileSync(tempPath, `${JSON.stringify(value, null, 2)}\n`, "utf8");
|
|
30
|
+
renameSync(tempPath, filePath);
|
|
31
|
+
}
|
|
32
|
+
function computeContentHash(payloadPath) {
|
|
33
|
+
const hash = createHash("sha256");
|
|
34
|
+
hash.update(readFileSync(payloadPath));
|
|
35
|
+
return `sha256:${hash.digest("hex")}`;
|
|
36
|
+
}
|
|
37
|
+
function toIndexRecord(manifest) {
|
|
38
|
+
const producerSummary = manifest.producer.summary ?? createProducerSummary(manifest.producer);
|
|
39
|
+
return {
|
|
40
|
+
artifact_id: manifest.artifact_id,
|
|
41
|
+
logical_key: manifest.logical_key,
|
|
42
|
+
payload_path: manifest.payload_path,
|
|
43
|
+
manifest_path: manifest.manifest_path,
|
|
44
|
+
version: manifest.version,
|
|
45
|
+
status: manifest.status,
|
|
46
|
+
schema_id: manifest.schema_id,
|
|
47
|
+
schema_version: manifest.schema_version,
|
|
48
|
+
created_at: manifest.created_at,
|
|
49
|
+
content_hash: manifest.content_hash,
|
|
50
|
+
producer_summary: producerSummary,
|
|
51
|
+
...(manifest.supersedes ? { supersedes: manifest.supersedes } : {}),
|
|
52
|
+
is_latest: manifest.status === "ready",
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
function tryLoadManifest(filePath) {
|
|
56
|
+
if (!existsSync(filePath)) {
|
|
57
|
+
return null;
|
|
58
|
+
}
|
|
59
|
+
try {
|
|
60
|
+
const parsed = JSON.parse(readFileSync(filePath, "utf8"));
|
|
61
|
+
validateArtifactManifest(parsed, filePath);
|
|
62
|
+
return parsed;
|
|
63
|
+
}
|
|
64
|
+
catch {
|
|
65
|
+
return null;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
function collectManifestFiles(rootDir) {
|
|
69
|
+
if (!existsSync(rootDir)) {
|
|
70
|
+
return [];
|
|
71
|
+
}
|
|
72
|
+
const queue = [rootDir];
|
|
73
|
+
const files = [];
|
|
74
|
+
while (queue.length > 0) {
|
|
75
|
+
const current = queue.shift();
|
|
76
|
+
if (!current) {
|
|
77
|
+
continue;
|
|
78
|
+
}
|
|
79
|
+
const entries = readdirSync(current, { withFileTypes: true })
|
|
80
|
+
.sort((left, right) => left.name.localeCompare(right.name));
|
|
81
|
+
for (const entry of entries) {
|
|
82
|
+
const fullPath = path.join(current, entry.name);
|
|
83
|
+
if (entry.isDirectory()) {
|
|
84
|
+
queue.push(fullPath);
|
|
85
|
+
continue;
|
|
86
|
+
}
|
|
87
|
+
if (entry.isFile() && fullPath.endsWith(".manifest.json")) {
|
|
88
|
+
files.push(fullPath);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
return files;
|
|
93
|
+
}
|
|
94
|
+
function collectScopeManifests(scopeKey) {
|
|
95
|
+
const allPaths = [
|
|
96
|
+
...collectManifestFiles(scopeWorkspaceDir(scopeKey)),
|
|
97
|
+
...collectManifestFiles(historyDir(scopeKey)),
|
|
98
|
+
];
|
|
99
|
+
const manifests = new Map();
|
|
100
|
+
for (const filePath of allPaths) {
|
|
101
|
+
const manifest = tryLoadManifest(filePath);
|
|
102
|
+
if (!manifest) {
|
|
103
|
+
continue;
|
|
104
|
+
}
|
|
105
|
+
manifests.set(manifest.artifact_id, manifest);
|
|
106
|
+
}
|
|
107
|
+
return Array.from(manifests.values()).sort((left, right) => {
|
|
108
|
+
if (left.logical_key !== right.logical_key) {
|
|
109
|
+
return left.logical_key.localeCompare(right.logical_key);
|
|
110
|
+
}
|
|
111
|
+
return left.version - right.version;
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
function writeManifestSidecar(manifest) {
|
|
115
|
+
writeJsonAtomic(manifest.manifest_path, manifest);
|
|
116
|
+
}
|
|
117
|
+
function writeManifestHistory(scopeKey, manifest) {
|
|
118
|
+
writeJsonAtomic(historyManifestPath(scopeKey, manifest.artifact_id), manifest);
|
|
119
|
+
}
|
|
120
|
+
function isRegistryTempFile(scopeKey, filePath) {
|
|
121
|
+
const relativePath = path.relative(scopeWorkspaceDir(scopeKey), filePath);
|
|
122
|
+
if (relativePath.startsWith("..")) {
|
|
123
|
+
return false;
|
|
124
|
+
}
|
|
125
|
+
if (/\.manifest\.json\.tmp-[^/\\]+$/.test(relativePath)) {
|
|
126
|
+
return true;
|
|
127
|
+
}
|
|
128
|
+
return relativePath.startsWith(`${path.join(".artifacts", path.basename(artifactIndexFile(scopeKey)))}.tmp-`);
|
|
129
|
+
}
|
|
130
|
+
function removeStaleTempFiles(scopeKey) {
|
|
131
|
+
const workspaceDir = scopeWorkspaceDir(scopeKey);
|
|
132
|
+
if (!existsSync(workspaceDir)) {
|
|
133
|
+
return;
|
|
134
|
+
}
|
|
135
|
+
const queue = [workspaceDir];
|
|
136
|
+
while (queue.length > 0) {
|
|
137
|
+
const current = queue.shift();
|
|
138
|
+
if (!current) {
|
|
139
|
+
continue;
|
|
140
|
+
}
|
|
141
|
+
const entries = readdirSync(current, { withFileTypes: true })
|
|
142
|
+
.sort((left, right) => left.name.localeCompare(right.name));
|
|
143
|
+
for (const entry of entries) {
|
|
144
|
+
const fullPath = path.join(current, entry.name);
|
|
145
|
+
if (entry.isDirectory()) {
|
|
146
|
+
queue.push(fullPath);
|
|
147
|
+
continue;
|
|
148
|
+
}
|
|
149
|
+
if (entry.isFile() && isRegistryTempFile(scopeKey, fullPath)) {
|
|
150
|
+
rmSync(fullPath, { force: true });
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
function indexRecordEquals(left, right) {
|
|
156
|
+
return left.artifact_id === right.artifact_id
|
|
157
|
+
&& left.logical_key === right.logical_key
|
|
158
|
+
&& left.payload_path === right.payload_path
|
|
159
|
+
&& left.manifest_path === right.manifest_path
|
|
160
|
+
&& left.version === right.version
|
|
161
|
+
&& left.status === right.status
|
|
162
|
+
&& left.schema_id === right.schema_id
|
|
163
|
+
&& left.schema_version === right.schema_version
|
|
164
|
+
&& left.created_at === right.created_at
|
|
165
|
+
&& left.content_hash === right.content_hash
|
|
166
|
+
&& left.producer_summary === right.producer_summary
|
|
167
|
+
&& left.supersedes === right.supersedes
|
|
168
|
+
&& left.is_latest === right.is_latest;
|
|
169
|
+
}
|
|
170
|
+
function selectLatestReadyManifest(manifests) {
|
|
171
|
+
return manifests
|
|
172
|
+
.filter((manifest) => manifest.status === "ready")
|
|
173
|
+
.sort((left, right) => right.version - left.version)[0] ?? null;
|
|
174
|
+
}
|
|
175
|
+
function selectNewestManifest(manifests) {
|
|
176
|
+
return manifests
|
|
177
|
+
.slice()
|
|
178
|
+
.sort((left, right) => right.version - left.version)[0] ?? null;
|
|
179
|
+
}
|
|
180
|
+
function buildScopeRecords(scopeKey, computeDiagnostics) {
|
|
181
|
+
const manifests = collectScopeManifests(scopeKey).map((manifest) => ({
|
|
182
|
+
...manifest,
|
|
183
|
+
diagnostics: computeDiagnostics(manifest),
|
|
184
|
+
}));
|
|
185
|
+
const latestByLogicalKey = new Map();
|
|
186
|
+
for (const manifest of manifests) {
|
|
187
|
+
if (manifest.status === "ready") {
|
|
188
|
+
latestByLogicalKey.set(manifest.logical_key, manifest.artifact_id);
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
return manifests.map((manifest) => ({
|
|
192
|
+
...toIndexRecord(manifest),
|
|
193
|
+
is_latest: latestByLogicalKey.get(manifest.logical_key) === manifest.artifact_id,
|
|
194
|
+
manifest,
|
|
195
|
+
}));
|
|
196
|
+
}
|
|
197
|
+
function readIndexProjection(scopeKey) {
|
|
198
|
+
const indexPath = artifactIndexFile(scopeKey);
|
|
199
|
+
if (!existsSync(indexPath)) {
|
|
200
|
+
return null;
|
|
201
|
+
}
|
|
202
|
+
try {
|
|
203
|
+
const parsed = JSON.parse(readFileSync(indexPath, "utf8"));
|
|
204
|
+
return Array.isArray(parsed.records) ? parsed.records : null;
|
|
205
|
+
}
|
|
206
|
+
catch {
|
|
207
|
+
return null;
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
function syncIndexProjection(scopeKey, records) {
|
|
211
|
+
const projected = records.map(({ manifest: _manifest, ...record }) => record);
|
|
212
|
+
const current = readIndexProjection(scopeKey);
|
|
213
|
+
if (!current && projected.length === 0) {
|
|
214
|
+
return;
|
|
215
|
+
}
|
|
216
|
+
const needsRewrite = !current
|
|
217
|
+
|| current.length !== projected.length
|
|
218
|
+
|| current.some((record, index) => !indexRecordEquals(record, projected[index]));
|
|
219
|
+
if (!needsRewrite) {
|
|
220
|
+
return;
|
|
221
|
+
}
|
|
222
|
+
writeJsonAtomic(artifactIndexFile(scopeKey), {
|
|
223
|
+
scope: scopeKey,
|
|
224
|
+
generated_at: nowIso8601(),
|
|
225
|
+
records: projected,
|
|
226
|
+
});
|
|
227
|
+
}
|
|
228
|
+
export function createArtifactRegistry() {
|
|
229
|
+
return {
|
|
230
|
+
publish(input) {
|
|
231
|
+
ensureScopeWorkspaceDir(input.scopeKey);
|
|
232
|
+
removeStaleTempFiles(input.scopeKey);
|
|
233
|
+
const sidecarPath = artifactManifestSidecarPath(input.payloadPath);
|
|
234
|
+
const manifests = collectScopeManifests(input.scopeKey);
|
|
235
|
+
const logicalKey = input.logicalKey ?? buildLogicalKeyForPayload(input.scopeKey, input.payloadPath);
|
|
236
|
+
const publicationKey = buildPublicationKey({
|
|
237
|
+
runId: input.runId,
|
|
238
|
+
...(input.publicationRunId ? { publicationRunId: input.publicationRunId } : {}),
|
|
239
|
+
flowId: input.flowId,
|
|
240
|
+
phaseId: input.phaseId,
|
|
241
|
+
stepId: input.stepId,
|
|
242
|
+
logicalKey,
|
|
243
|
+
});
|
|
244
|
+
const existing = manifests.find((candidate) => candidate.publication_key === publicationKey);
|
|
245
|
+
if (existing) {
|
|
246
|
+
if (existing.payload_path !== input.payloadPath) {
|
|
247
|
+
throw new TaskRunnerError(`Manifest publication key collision for ${publicationKey}: ${existing.payload_path} and ${input.payloadPath} resolved to the same logical_key within one step.`);
|
|
248
|
+
}
|
|
249
|
+
return {
|
|
250
|
+
...toIndexRecord(existing),
|
|
251
|
+
manifest: existing,
|
|
252
|
+
};
|
|
253
|
+
}
|
|
254
|
+
const contract = inferPayloadContract(input.scopeKey, input.payloadPath, {
|
|
255
|
+
...(input.payloadFamily ? { payloadFamily: input.payloadFamily } : {}),
|
|
256
|
+
...(input.schemaId ? { schemaId: input.schemaId } : {}),
|
|
257
|
+
...(input.schemaVersion ? { schemaVersion: input.schemaVersion } : {}),
|
|
258
|
+
});
|
|
259
|
+
const versions = manifests
|
|
260
|
+
.filter((candidate) => candidate.logical_key === logicalKey)
|
|
261
|
+
.sort((left, right) => left.version - right.version);
|
|
262
|
+
const previousLatest = [...versions].reverse().find((candidate) => candidate.status === "ready") ?? versions.at(-1) ?? null;
|
|
263
|
+
const version = (versions.at(-1)?.version ?? 0) + 1;
|
|
264
|
+
const artifactId = buildArtifactId(input.scopeKey, logicalKey, version);
|
|
265
|
+
const manifest = {
|
|
266
|
+
artifact_id: artifactId,
|
|
267
|
+
logical_key: logicalKey,
|
|
268
|
+
scope: input.scopeKey,
|
|
269
|
+
run_id: input.runId,
|
|
270
|
+
flow_id: input.flowId,
|
|
271
|
+
phase_id: input.phaseId,
|
|
272
|
+
step_id: input.stepId,
|
|
273
|
+
kind: input.kind,
|
|
274
|
+
version,
|
|
275
|
+
payload_family: contract.payloadFamily,
|
|
276
|
+
schema_id: contract.schemaId,
|
|
277
|
+
schema_version: contract.schemaVersion,
|
|
278
|
+
created_at: nowIso8601(),
|
|
279
|
+
producer: {
|
|
280
|
+
node: input.nodeKind,
|
|
281
|
+
summary: createProducerSummary({
|
|
282
|
+
node: input.nodeKind,
|
|
283
|
+
...(input.executor ? { executor: input.executor } : {}),
|
|
284
|
+
...(input.model ? { model: input.model } : {}),
|
|
285
|
+
}),
|
|
286
|
+
...(input.executor ? { executor: input.executor } : {}),
|
|
287
|
+
...(input.model ? { model: input.model } : {}),
|
|
288
|
+
},
|
|
289
|
+
inputs: input.inputs,
|
|
290
|
+
content_hash: computeContentHash(input.payloadPath),
|
|
291
|
+
status: "ready",
|
|
292
|
+
payload_path: input.payloadPath,
|
|
293
|
+
manifest_path: sidecarPath,
|
|
294
|
+
publication_key: publicationKey,
|
|
295
|
+
...(previousLatest ? { supersedes: previousLatest.artifact_id } : {}),
|
|
296
|
+
};
|
|
297
|
+
validateArtifactManifest(manifest, sidecarPath);
|
|
298
|
+
writeManifestHistory(input.scopeKey, manifest);
|
|
299
|
+
writeManifestSidecar(manifest);
|
|
300
|
+
if (previousLatest) {
|
|
301
|
+
const superseded = {
|
|
302
|
+
...previousLatest,
|
|
303
|
+
status: "superseded",
|
|
304
|
+
status_reason: `Superseded by ${artifactId}`,
|
|
305
|
+
};
|
|
306
|
+
writeManifestHistory(input.scopeKey, superseded);
|
|
307
|
+
if (previousLatest.manifest_path !== manifest.manifest_path) {
|
|
308
|
+
writeManifestSidecar(superseded);
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
this.rebuildIndex(input.scopeKey);
|
|
312
|
+
return {
|
|
313
|
+
...toIndexRecord(manifest),
|
|
314
|
+
manifest,
|
|
315
|
+
};
|
|
316
|
+
},
|
|
317
|
+
resolveArtifact(scopeKey, reference) {
|
|
318
|
+
removeStaleTempFiles(scopeKey);
|
|
319
|
+
const parsedReference = parseArtifactReference(reference);
|
|
320
|
+
if (!parsedReference) {
|
|
321
|
+
throw new TaskRunnerError(`Artifact reference '${reference}' is invalid. Expected an artifact_id or a logical reference in the form <logical_key>@latest or <logical_key>@vN.`);
|
|
322
|
+
}
|
|
323
|
+
const records = buildScopeRecords(scopeKey, this.computeDiagnostics);
|
|
324
|
+
syncIndexProjection(scopeKey, records);
|
|
325
|
+
const manifests = records.map((record) => record.manifest);
|
|
326
|
+
if (parsedReference.kind === "artifact-id") {
|
|
327
|
+
if (parsedReference.parsedId.scopeKey !== scopeKey) {
|
|
328
|
+
throw new TaskRunnerError(`Artifact id '${reference}' belongs to scope '${parsedReference.parsedId.scopeKey}', expected '${scopeKey}'.`);
|
|
329
|
+
}
|
|
330
|
+
const manifest = manifests.find((candidate) => candidate.artifact_id === parsedReference.artifactId);
|
|
331
|
+
if (!manifest) {
|
|
332
|
+
throw new TaskRunnerError(`Artifact '${reference}' was not found in scope '${scopeKey}'.`);
|
|
333
|
+
}
|
|
334
|
+
return manifest;
|
|
335
|
+
}
|
|
336
|
+
const candidates = manifests.filter((candidate) => candidate.logical_key === parsedReference.logicalKey);
|
|
337
|
+
if (parsedReference.version === "latest") {
|
|
338
|
+
const manifest = selectLatestReadyManifest(candidates);
|
|
339
|
+
if (!manifest) {
|
|
340
|
+
throw new TaskRunnerError(`No ready artifact found for logical reference '${reference}' in scope '${scopeKey}'.`);
|
|
341
|
+
}
|
|
342
|
+
return manifest;
|
|
343
|
+
}
|
|
344
|
+
const manifest = candidates.find((candidate) => candidate.version === parsedReference.version);
|
|
345
|
+
if (!manifest) {
|
|
346
|
+
throw new TaskRunnerError(`Artifact reference '${reference}' was not found in scope '${scopeKey}'.`);
|
|
347
|
+
}
|
|
348
|
+
return manifest;
|
|
349
|
+
},
|
|
350
|
+
loadManifestByPayloadPath(payloadPath) {
|
|
351
|
+
const manifest = tryLoadManifest(artifactManifestSidecarPath(payloadPath));
|
|
352
|
+
const scopeKey = scopeKeyFromPayloadPath(payloadPath);
|
|
353
|
+
if (!scopeKey) {
|
|
354
|
+
return manifest;
|
|
355
|
+
}
|
|
356
|
+
removeStaleTempFiles(scopeKey);
|
|
357
|
+
const records = buildScopeRecords(scopeKey, this.computeDiagnostics);
|
|
358
|
+
syncIndexProjection(scopeKey, records);
|
|
359
|
+
const candidates = records
|
|
360
|
+
.map((record) => record.manifest)
|
|
361
|
+
.filter((candidate) => candidate.payload_path === payloadPath);
|
|
362
|
+
return selectLatestReadyManifest(candidates) ?? selectNewestManifest(candidates) ?? manifest;
|
|
363
|
+
},
|
|
364
|
+
listScopeArtifacts(scopeKey) {
|
|
365
|
+
removeStaleTempFiles(scopeKey);
|
|
366
|
+
const records = buildScopeRecords(scopeKey, this.computeDiagnostics);
|
|
367
|
+
syncIndexProjection(scopeKey, records);
|
|
368
|
+
return records;
|
|
369
|
+
},
|
|
370
|
+
rebuildIndex(scopeKey) {
|
|
371
|
+
removeStaleTempFiles(scopeKey);
|
|
372
|
+
const records = buildScopeRecords(scopeKey, this.computeDiagnostics);
|
|
373
|
+
syncIndexProjection(scopeKey, records);
|
|
374
|
+
return records;
|
|
375
|
+
},
|
|
376
|
+
resolveLineageInputFromPath(scopeKey, payloadPath) {
|
|
377
|
+
const manifest = this.loadManifestByPayloadPath(payloadPath);
|
|
378
|
+
if (!manifest) {
|
|
379
|
+
return {
|
|
380
|
+
source: "external-path",
|
|
381
|
+
path: payloadPath,
|
|
382
|
+
};
|
|
383
|
+
}
|
|
384
|
+
return {
|
|
385
|
+
source: "manifest",
|
|
386
|
+
path: payloadPath,
|
|
387
|
+
artifact_id: manifest.artifact_id,
|
|
388
|
+
logical_key: manifest.logical_key,
|
|
389
|
+
schema_id: manifest.schema_id,
|
|
390
|
+
schema_version: manifest.schema_version,
|
|
391
|
+
};
|
|
392
|
+
},
|
|
393
|
+
computeDiagnostics(manifest) {
|
|
394
|
+
return diagnosticsForManifest(manifest, (schemaId, payloadPath) => {
|
|
395
|
+
if (!isArtifactPayloadSchemaId(schemaId)) {
|
|
396
|
+
throw new Error(`Structured artifact schema is not registered: ${schemaId}`);
|
|
397
|
+
}
|
|
398
|
+
validateArtifactPayload(payloadPath, schemaId);
|
|
399
|
+
});
|
|
400
|
+
},
|
|
401
|
+
};
|
|
402
|
+
}
|