prompt-language-shell 0.9.4 → 0.9.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +37 -6
- package/dist/components/Workflow.js +12 -1
- package/dist/components/controllers/Execute.js +3 -3
- package/dist/components/controllers/Schedule.js +6 -11
- package/dist/components/views/Debug.js +6 -1
- package/dist/components/views/Feedback.js +2 -13
- package/dist/components/views/Output.js +10 -8
- package/dist/components/views/Schedule.js +4 -3
- package/dist/components/views/Table.js +15 -0
- package/dist/configuration/io.js +10 -0
- package/dist/configuration/schema.js +6 -0
- package/dist/configuration/validation.js +5 -0
- package/dist/execution/processing.js +45 -14
- package/dist/execution/runner.js +1 -1
- package/dist/index.js +2 -0
- package/dist/services/anthropic.js +27 -31
- package/dist/services/colors.js +2 -1
- package/dist/services/filesystem.js +13 -1
- package/dist/services/logger.js +254 -21
- package/dist/services/messages.js +7 -4
- package/dist/services/monitor.js +288 -0
- package/dist/services/parser.js +13 -5
- package/dist/services/performance.js +14 -0
- package/dist/services/refinement.js +14 -11
- package/dist/services/router.js +159 -122
- package/dist/services/shell.js +32 -14
- package/dist/services/skills.js +35 -7
- package/dist/skills/execute.md +82 -3
- package/dist/skills/schedule.md +155 -0
- package/dist/tools/schedule.tool.js +1 -1
- package/package.json +5 -4
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
1
|
+
import { ComponentStatus, } from '../types/components.js';
|
|
2
|
+
import { formatTaskAsYaml } from '../execution/processing.js';
|
|
3
|
+
import { createCommand, createRefinement } from './components.js';
|
|
3
4
|
import { formatErrorMessage, getRefiningMessage } from './messages.js';
|
|
4
5
|
import { routeTasksWithConfirm } from './router.js';
|
|
5
6
|
/**
|
|
@@ -7,6 +8,11 @@ import { routeTasksWithConfirm } from './router.js';
|
|
|
7
8
|
* Called when user selects options from a plan with DEFINE tasks
|
|
8
9
|
*/
|
|
9
10
|
export async function handleRefinement(selectedTasks, service, originalCommand, lifecycleHandlers, workflowHandlers, requestHandlers) {
|
|
11
|
+
// Display the resolved command (from user's selection)
|
|
12
|
+
// The first task's action contains the full resolved command
|
|
13
|
+
const resolvedCommand = selectedTasks[0]?.action || originalCommand;
|
|
14
|
+
const commandDisplay = createCommand({ command: resolvedCommand, service, onAborted: requestHandlers.onAborted }, ComponentStatus.Done);
|
|
15
|
+
workflowHandlers.addToTimeline(commandDisplay);
|
|
10
16
|
// Create and add refinement component to queue
|
|
11
17
|
const refinementDef = createRefinement({
|
|
12
18
|
text: getRefiningMessage(),
|
|
@@ -16,18 +22,15 @@ export async function handleRefinement(selectedTasks, service, originalCommand,
|
|
|
16
22
|
});
|
|
17
23
|
workflowHandlers.addToQueue(refinementDef);
|
|
18
24
|
try {
|
|
19
|
-
// Build refined command
|
|
25
|
+
// Build refined command with action line followed by YAML metadata
|
|
20
26
|
const refinedCommand = selectedTasks
|
|
21
27
|
.map((task) => {
|
|
22
|
-
|
|
23
|
-
const
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
return `${action} (shell execution)`;
|
|
27
|
-
}
|
|
28
|
-
return `${action} (type: ${type})`;
|
|
28
|
+
// Replace commas with dashes for cleaner LLM prompt formatting
|
|
29
|
+
const action = task.action.replace(/,/g, ' -');
|
|
30
|
+
const metadata = { ...task.params, type: task.type };
|
|
31
|
+
return formatTaskAsYaml(action, metadata);
|
|
29
32
|
})
|
|
30
|
-
.join('
|
|
33
|
+
.join('\n\n');
|
|
31
34
|
// Call LLM to refine plan with selected tasks
|
|
32
35
|
const refinedResult = await service.processWithTool(refinedCommand, 'schedule');
|
|
33
36
|
// Complete the Refinement component with success state
|
package/dist/services/router.js
CHANGED
|
@@ -5,9 +5,74 @@ import { getConfigSchema } from '../configuration/schema.js';
|
|
|
5
5
|
import { createConfigStepsFromSchema } from '../configuration/steps.js';
|
|
6
6
|
import { unflattenConfig } from '../configuration/transformation.js';
|
|
7
7
|
import { saveConfigLabels } from '../configuration/labels.js';
|
|
8
|
-
import { createAnswer, createConfig, createConfirm, createExecute, createFeedback, createIntrospect,
|
|
9
|
-
import { getCancellationMessage, getConfirmationMessage,
|
|
8
|
+
import { createAnswer, createConfig, createConfirm, createExecute, createFeedback, createIntrospect, createSchedule, createValidate, } from './components.js';
|
|
9
|
+
import { getCancellationMessage, getConfirmationMessage, getUnknownRequestMessage, } from './messages.js';
|
|
10
10
|
import { validateExecuteTasks } from './validator.js';
|
|
11
|
+
/**
|
|
12
|
+
* Flatten inner task structure completely - removes all nested groups.
|
|
13
|
+
* Used internally to flatten subtasks within a top-level group.
|
|
14
|
+
*/
|
|
15
|
+
function flattenInnerTasks(tasks) {
|
|
16
|
+
const result = [];
|
|
17
|
+
for (const task of tasks) {
|
|
18
|
+
if (task.type === TaskType.Group &&
|
|
19
|
+
task.subtasks &&
|
|
20
|
+
task.subtasks.length > 0) {
|
|
21
|
+
// Recursively flatten inner group
|
|
22
|
+
result.push(...flattenInnerTasks(task.subtasks));
|
|
23
|
+
}
|
|
24
|
+
else if (task.type !== TaskType.Group) {
|
|
25
|
+
// Leaf task - add as-is
|
|
26
|
+
const leafTask = {
|
|
27
|
+
action: task.action,
|
|
28
|
+
type: task.type,
|
|
29
|
+
};
|
|
30
|
+
if (task.params)
|
|
31
|
+
leafTask.params = task.params;
|
|
32
|
+
if (task.config)
|
|
33
|
+
leafTask.config = task.config;
|
|
34
|
+
result.push(leafTask);
|
|
35
|
+
}
|
|
36
|
+
// Skip empty groups
|
|
37
|
+
}
|
|
38
|
+
return result;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Flatten hierarchical task structure, preserving top-level groups.
|
|
42
|
+
* Top-level groups are kept with their subtasks flattened.
|
|
43
|
+
* Inner nested groups are removed and their subtasks extracted recursively.
|
|
44
|
+
*/
|
|
45
|
+
export function flattenTasks(tasks) {
|
|
46
|
+
const result = [];
|
|
47
|
+
for (const task of tasks) {
|
|
48
|
+
if (task.type === TaskType.Group &&
|
|
49
|
+
task.subtasks &&
|
|
50
|
+
task.subtasks.length > 0) {
|
|
51
|
+
// Preserve top-level group but flatten its subtasks
|
|
52
|
+
const flattenedSubtasks = flattenInnerTasks(task.subtasks);
|
|
53
|
+
const groupTask = {
|
|
54
|
+
action: task.action,
|
|
55
|
+
type: task.type,
|
|
56
|
+
subtasks: flattenedSubtasks,
|
|
57
|
+
};
|
|
58
|
+
result.push(groupTask);
|
|
59
|
+
}
|
|
60
|
+
else if (task.type !== TaskType.Group) {
|
|
61
|
+
// Non-group task - add as-is
|
|
62
|
+
const leafTask = {
|
|
63
|
+
action: task.action,
|
|
64
|
+
type: task.type,
|
|
65
|
+
};
|
|
66
|
+
if (task.params)
|
|
67
|
+
leafTask.params = task.params;
|
|
68
|
+
if (task.config)
|
|
69
|
+
leafTask.config = task.config;
|
|
70
|
+
result.push(leafTask);
|
|
71
|
+
}
|
|
72
|
+
// Skip empty groups (group with no subtasks)
|
|
73
|
+
}
|
|
74
|
+
return result;
|
|
75
|
+
}
|
|
11
76
|
/**
|
|
12
77
|
* Determine the operation name based on task types
|
|
13
78
|
*/
|
|
@@ -31,8 +96,12 @@ export function routeTasksWithConfirm(tasks, message, service, userRequest, life
|
|
|
31
96
|
const validTasks = tasks.filter((task) => task.type !== TaskType.Ignore && task.type !== TaskType.Discard);
|
|
32
97
|
// Check if no valid tasks remain after filtering
|
|
33
98
|
if (validTasks.length === 0) {
|
|
34
|
-
|
|
35
|
-
|
|
99
|
+
// Use action from first ignore task if available, otherwise generic message
|
|
100
|
+
const ignoreTask = tasks.find((task) => task.type === TaskType.Ignore);
|
|
101
|
+
const message = ignoreTask?.action
|
|
102
|
+
? `${ignoreTask.action}.`
|
|
103
|
+
: getUnknownRequestMessage();
|
|
104
|
+
workflowHandlers.addToQueue(createFeedback({ type: FeedbackType.Warning, message }));
|
|
36
105
|
return;
|
|
37
106
|
}
|
|
38
107
|
const operation = getOperationName(validTasks);
|
|
@@ -80,61 +149,44 @@ export function routeTasksWithConfirm(tasks, message, service, userRequest, life
|
|
|
80
149
|
}
|
|
81
150
|
}
|
|
82
151
|
/**
|
|
83
|
-
* Validate task
|
|
84
|
-
*
|
|
152
|
+
* Validate task structure after flattening.
|
|
153
|
+
* Currently no-op since flattening removes Groups and mixed types are allowed.
|
|
85
154
|
*/
|
|
86
|
-
function validateTaskTypes(
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
// Convert to ScheduledTask to access subtasks property
|
|
90
|
-
const scheduledTasks = asScheduledTasks(tasks);
|
|
91
|
-
// Check each Group task's subtasks for uniform types
|
|
92
|
-
for (const task of scheduledTasks) {
|
|
93
|
-
if (task.type === TaskType.Group &&
|
|
94
|
-
task.subtasks &&
|
|
95
|
-
task.subtasks.length > 0) {
|
|
96
|
-
const subtaskTypes = new Set(task.subtasks.map((t) => t.type));
|
|
97
|
-
if (subtaskTypes.size > 1) {
|
|
98
|
-
throw new Error(getMixedTaskTypesError(Array.from(subtaskTypes)));
|
|
99
|
-
}
|
|
100
|
-
// Recursively validate nested groups
|
|
101
|
-
validateTaskTypes(task.subtasks);
|
|
102
|
-
}
|
|
103
|
-
}
|
|
155
|
+
function validateTaskTypes(_tasks) {
|
|
156
|
+
// After flattening, Groups are removed and mixed leaf types are allowed.
|
|
157
|
+
// The router handles different task types by routing each to its handler.
|
|
104
158
|
}
|
|
105
159
|
/**
|
|
106
160
|
* Execute tasks after confirmation (internal helper)
|
|
107
|
-
*
|
|
108
|
-
* Supports mixed types at top level with Groups
|
|
161
|
+
* Flattens hierarchical structure, validates task types, and routes appropriately
|
|
109
162
|
*/
|
|
110
163
|
function executeTasksAfterConfirm(tasks, context) {
|
|
111
164
|
const { service, userRequest, workflowHandlers, requestHandlers } = context;
|
|
112
|
-
//
|
|
165
|
+
// Flatten hierarchical structure into flat list of leaf tasks
|
|
166
|
+
const scheduledTasks = asScheduledTasks(tasks);
|
|
167
|
+
const flatTasks = flattenTasks(scheduledTasks);
|
|
168
|
+
// Validate that all tasks have uniform type
|
|
113
169
|
try {
|
|
114
|
-
validateTaskTypes(
|
|
170
|
+
validateTaskTypes(flatTasks);
|
|
115
171
|
}
|
|
116
172
|
catch (error) {
|
|
117
173
|
requestHandlers.onError(error instanceof Error ? error.message : String(error));
|
|
118
174
|
return;
|
|
119
175
|
}
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
const
|
|
123
|
-
for (const task of scheduledTasks) {
|
|
176
|
+
// Collect all Execute tasks for validation (including those inside groups)
|
|
177
|
+
const executeTasks = [];
|
|
178
|
+
for (const task of flatTasks) {
|
|
124
179
|
if (task.type === TaskType.Execute) {
|
|
125
|
-
|
|
180
|
+
executeTasks.push(task);
|
|
126
181
|
}
|
|
127
182
|
else if (task.type === TaskType.Group && task.subtasks) {
|
|
128
|
-
|
|
129
|
-
if (subtasks.length > 0 && subtasks[0].type === TaskType.Execute) {
|
|
130
|
-
allExecuteTasks.push(...subtasks);
|
|
131
|
-
}
|
|
183
|
+
executeTasks.push(...task.subtasks.filter((t) => t.type === TaskType.Execute));
|
|
132
184
|
}
|
|
133
185
|
}
|
|
134
|
-
// Validate
|
|
135
|
-
if (
|
|
186
|
+
// Validate Execute tasks to collect missing config upfront
|
|
187
|
+
if (executeTasks.length > 0) {
|
|
136
188
|
try {
|
|
137
|
-
const validation = validateExecuteTasks(
|
|
189
|
+
const validation = validateExecuteTasks(executeTasks);
|
|
138
190
|
if (validation.validationErrors.length > 0) {
|
|
139
191
|
// Show error feedback for invalid skills
|
|
140
192
|
const errorMessages = validation.validationErrors.map((error) => {
|
|
@@ -150,7 +202,7 @@ function executeTasksAfterConfirm(tasks, context) {
|
|
|
150
202
|
return;
|
|
151
203
|
}
|
|
152
204
|
else if (validation.missingConfig.length > 0) {
|
|
153
|
-
// Missing config detected - create
|
|
205
|
+
// Missing config detected - create Validate component for all missing config
|
|
154
206
|
workflowHandlers.addToQueue(createValidate({
|
|
155
207
|
missingConfig: validation.missingConfig,
|
|
156
208
|
userRequest,
|
|
@@ -160,7 +212,7 @@ function executeTasksAfterConfirm(tasks, context) {
|
|
|
160
212
|
},
|
|
161
213
|
onValidationComplete: () => {
|
|
162
214
|
// After config is complete, resume task routing
|
|
163
|
-
routeTasksAfterConfig(
|
|
215
|
+
routeTasksAfterConfig(flatTasks, context);
|
|
164
216
|
},
|
|
165
217
|
onAborted: (operation) => {
|
|
166
218
|
requestHandlers.onAborted(operation);
|
|
@@ -175,105 +227,90 @@ function executeTasksAfterConfirm(tasks, context) {
|
|
|
175
227
|
}
|
|
176
228
|
}
|
|
177
229
|
// No missing config - proceed with normal routing
|
|
178
|
-
routeTasksAfterConfig(
|
|
230
|
+
routeTasksAfterConfig(flatTasks, context);
|
|
179
231
|
}
|
|
180
232
|
/**
|
|
181
233
|
* Task types that should appear in the upcoming display
|
|
182
234
|
*/
|
|
183
|
-
const UPCOMING_TASK_TYPES = [TaskType.Execute, TaskType.Answer];
|
|
235
|
+
const UPCOMING_TASK_TYPES = [TaskType.Execute, TaskType.Answer, TaskType.Group];
|
|
184
236
|
/**
|
|
185
|
-
* Collect names
|
|
186
|
-
*
|
|
237
|
+
* Collect action names for tasks that appear in upcoming display.
|
|
238
|
+
* Groups are included with their group name (not individual subtask names).
|
|
187
239
|
*/
|
|
188
|
-
function collectUpcomingNames(
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
const subtasks = task.subtasks;
|
|
193
|
-
if (UPCOMING_TASK_TYPES.includes(subtasks[0].type)) {
|
|
194
|
-
names.push(task.action);
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
else if (UPCOMING_TASK_TYPES.includes(task.type)) {
|
|
198
|
-
names.push(task.action);
|
|
199
|
-
}
|
|
200
|
-
}
|
|
201
|
-
return names;
|
|
240
|
+
function collectUpcomingNames(tasks) {
|
|
241
|
+
return tasks
|
|
242
|
+
.filter((t) => UPCOMING_TASK_TYPES.includes(t.type))
|
|
243
|
+
.map((t) => t.action);
|
|
202
244
|
}
|
|
203
245
|
/**
|
|
204
246
|
* Route tasks after config is complete (or when no config is needed)
|
|
205
|
-
* Processes
|
|
247
|
+
* Processes task list, routing each task type to its handler.
|
|
248
|
+
* Top-level groups are preserved: their subtasks are routed with the group name.
|
|
249
|
+
* Config tasks are grouped together; Execute/Answer are routed individually.
|
|
206
250
|
*/
|
|
207
|
-
function routeTasksAfterConfig(
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
const
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
const
|
|
219
|
-
|
|
220
|
-
|
|
251
|
+
function routeTasksAfterConfig(tasks, context) {
|
|
252
|
+
if (tasks.length === 0)
|
|
253
|
+
return;
|
|
254
|
+
// Collect all upcoming names for display (Execute, Answer, and Group tasks)
|
|
255
|
+
const allUpcomingNames = collectUpcomingNames(tasks);
|
|
256
|
+
let upcomingIndex = 0;
|
|
257
|
+
// Task types that should be grouped together (one component for all tasks)
|
|
258
|
+
const groupedTypes = [TaskType.Config, TaskType.Introspect];
|
|
259
|
+
// Route grouped task types together (collect from all tasks including subtasks)
|
|
260
|
+
for (const groupedType of groupedTypes) {
|
|
261
|
+
const typeTasks = [];
|
|
262
|
+
for (const task of tasks) {
|
|
263
|
+
if (task.type === groupedType) {
|
|
264
|
+
typeTasks.push(task);
|
|
265
|
+
}
|
|
266
|
+
else if (task.type === TaskType.Group && task.subtasks) {
|
|
267
|
+
typeTasks.push(...task.subtasks.filter((t) => t.type === groupedType));
|
|
268
|
+
}
|
|
221
269
|
}
|
|
222
|
-
|
|
223
|
-
|
|
270
|
+
if (typeTasks.length > 0) {
|
|
271
|
+
routeTasksByType(groupedType, typeTasks, context, []);
|
|
224
272
|
}
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
273
|
+
}
|
|
274
|
+
// Process Execute, Answer, and Group tasks individually (with upcoming support)
|
|
275
|
+
for (let i = 0; i < tasks.length; i++) {
|
|
276
|
+
const task = tasks[i];
|
|
277
|
+
const taskType = task.type;
|
|
278
|
+
// Skip grouped task types (already routed above)
|
|
279
|
+
if (groupedTypes.includes(taskType))
|
|
280
|
+
continue;
|
|
281
|
+
if (taskType === TaskType.Group && task.subtasks) {
|
|
282
|
+
// Route group's subtasks - Execute tasks get group label, others routed normally
|
|
283
|
+
const upcoming = allUpcomingNames.slice(upcomingIndex + 1);
|
|
284
|
+
upcomingIndex++;
|
|
285
|
+
// Separate subtasks by type
|
|
286
|
+
const executeSubtasks = task.subtasks.filter((t) => t.type === TaskType.Execute);
|
|
287
|
+
const answerSubtasks = task.subtasks.filter((t) => t.type === TaskType.Answer);
|
|
288
|
+
// Route Execute subtasks with group name as label
|
|
289
|
+
if (executeSubtasks.length > 0) {
|
|
290
|
+
routeExecuteTasks(executeSubtasks, context, upcoming, task.action);
|
|
238
291
|
}
|
|
239
|
-
|
|
240
|
-
|
|
292
|
+
// Route Answer subtasks individually
|
|
293
|
+
if (answerSubtasks.length > 0) {
|
|
294
|
+
routeAnswerTasks(answerSubtasks, context, upcoming);
|
|
241
295
|
}
|
|
242
296
|
}
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
//
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
// Calculate upcoming (all units after this one)
|
|
255
|
-
const upcoming = UPCOMING_TASK_TYPES.includes(taskType)
|
|
256
|
-
? allUnitNames.slice(currentUnitIndex + 1)
|
|
257
|
-
: [];
|
|
258
|
-
if (UPCOMING_TASK_TYPES.includes(taskType)) {
|
|
259
|
-
currentUnitIndex++;
|
|
260
|
-
}
|
|
261
|
-
// Pass group name as label for Execute groups
|
|
262
|
-
if (taskType === TaskType.Execute) {
|
|
263
|
-
routeExecuteTasks(subtasks, context, upcoming, task.action);
|
|
264
|
-
}
|
|
265
|
-
else {
|
|
266
|
-
routeTasksByType(taskType, subtasks, context, upcoming);
|
|
267
|
-
}
|
|
268
|
-
}
|
|
297
|
+
else if (taskType === TaskType.Execute) {
|
|
298
|
+
// Calculate upcoming for this Execute task
|
|
299
|
+
const upcoming = allUpcomingNames.slice(upcomingIndex + 1);
|
|
300
|
+
upcomingIndex++;
|
|
301
|
+
routeExecuteTasks([task], context, upcoming);
|
|
302
|
+
}
|
|
303
|
+
else if (taskType === TaskType.Answer) {
|
|
304
|
+
// Calculate upcoming for this Answer task
|
|
305
|
+
const upcoming = allUpcomingNames.slice(upcomingIndex + 1);
|
|
306
|
+
upcomingIndex++;
|
|
307
|
+
routeTasksByType(taskType, [task], context, upcoming);
|
|
269
308
|
}
|
|
270
309
|
else {
|
|
271
|
-
//
|
|
272
|
-
|
|
310
|
+
// For other types (Report, etc.), route without upcoming
|
|
311
|
+
routeTasksByType(taskType, [task], context, []);
|
|
273
312
|
}
|
|
274
313
|
}
|
|
275
|
-
// Process any remaining standalone tasks
|
|
276
|
-
processStandaloneTasks();
|
|
277
314
|
}
|
|
278
315
|
/**
|
|
279
316
|
* Route Answer tasks - creates separate Answer component for each question
|
package/dist/services/shell.js
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { spawn } from 'child_process';
|
|
2
|
+
import { killGracefully, MemoryMonitor, } from './monitor.js';
|
|
2
3
|
export var ExecutionStatus;
|
|
3
4
|
(function (ExecutionStatus) {
|
|
4
5
|
ExecutionStatus["Pending"] = "pending";
|
|
@@ -60,12 +61,12 @@ export class DummyExecutor {
|
|
|
60
61
|
}
|
|
61
62
|
}
|
|
62
63
|
// Marker for extracting pwd from command output
|
|
63
|
-
const PWD_MARKER = '__PWD_MARKER_7x9k2m__';
|
|
64
|
-
const MAX_OUTPUT_LINES = 128;
|
|
64
|
+
export const PWD_MARKER = '__PWD_MARKER_7x9k2m__';
|
|
65
|
+
export const MAX_OUTPUT_LINES = 128;
|
|
65
66
|
/**
|
|
66
67
|
* Limit output to last MAX_OUTPUT_LINES lines.
|
|
67
68
|
*/
|
|
68
|
-
function limitLines(output) {
|
|
69
|
+
export function limitLines(output) {
|
|
69
70
|
const lines = output.split('\n');
|
|
70
71
|
return lines.slice(-MAX_OUTPUT_LINES).join('\n');
|
|
71
72
|
}
|
|
@@ -73,7 +74,7 @@ function limitLines(output) {
|
|
|
73
74
|
* Parse stdout to extract workdir and clean output.
|
|
74
75
|
* Returns the cleaned output and the extracted workdir.
|
|
75
76
|
*/
|
|
76
|
-
function parseWorkdir(rawOutput) {
|
|
77
|
+
export function parseWorkdir(rawOutput) {
|
|
77
78
|
const markerIndex = rawOutput.lastIndexOf(PWD_MARKER);
|
|
78
79
|
if (markerIndex === -1) {
|
|
79
80
|
return { output: rawOutput };
|
|
@@ -88,7 +89,7 @@ function parseWorkdir(rawOutput) {
|
|
|
88
89
|
* Manages streaming output while filtering out the PWD marker.
|
|
89
90
|
* Buffers output to avoid emitting partial markers to the callback.
|
|
90
91
|
*/
|
|
91
|
-
class OutputStreamer {
|
|
92
|
+
export class OutputStreamer {
|
|
92
93
|
chunks = [];
|
|
93
94
|
emittedLength = 0;
|
|
94
95
|
callback;
|
|
@@ -183,18 +184,22 @@ export class RealExecutor {
|
|
|
183
184
|
return;
|
|
184
185
|
}
|
|
185
186
|
// Handle timeout if specified
|
|
186
|
-
const SIGKILL_GRACE_PERIOD = 3000;
|
|
187
187
|
let timeoutId;
|
|
188
188
|
let killTimeoutId;
|
|
189
189
|
if (cmd.timeout && cmd.timeout > 0) {
|
|
190
190
|
timeoutId = setTimeout(() => {
|
|
191
|
-
child
|
|
192
|
-
// Escalate to SIGKILL if process doesn't terminate
|
|
193
|
-
killTimeoutId = setTimeout(() => {
|
|
194
|
-
child.kill('SIGKILL');
|
|
195
|
-
}, SIGKILL_GRACE_PERIOD);
|
|
191
|
+
killTimeoutId = killGracefully(child);
|
|
196
192
|
}, cmd.timeout);
|
|
197
193
|
}
|
|
194
|
+
// Handle memory limit monitoring
|
|
195
|
+
let memoryMonitor;
|
|
196
|
+
let memoryInfo;
|
|
197
|
+
if (cmd.memoryLimit) {
|
|
198
|
+
memoryMonitor = new MemoryMonitor(child, cmd.memoryLimit, (info) => {
|
|
199
|
+
memoryInfo = info;
|
|
200
|
+
});
|
|
201
|
+
memoryMonitor.start();
|
|
202
|
+
}
|
|
198
203
|
// Use OutputStreamer for buffered stdout streaming
|
|
199
204
|
const stdoutStreamer = new OutputStreamer(this.outputCallback);
|
|
200
205
|
child.stdout.on('data', (data) => {
|
|
@@ -217,6 +222,7 @@ export class RealExecutor {
|
|
|
217
222
|
clearTimeout(timeoutId);
|
|
218
223
|
if (killTimeoutId)
|
|
219
224
|
clearTimeout(killTimeoutId);
|
|
225
|
+
memoryMonitor?.stop();
|
|
220
226
|
const commandResult = {
|
|
221
227
|
description: cmd.description,
|
|
222
228
|
command: cmd.command,
|
|
@@ -228,20 +234,32 @@ export class RealExecutor {
|
|
|
228
234
|
onProgress?.(ExecutionStatus.Failed);
|
|
229
235
|
resolve(commandResult);
|
|
230
236
|
});
|
|
231
|
-
child.on('
|
|
237
|
+
child.on('exit', (code) => {
|
|
232
238
|
if (timeoutId)
|
|
233
239
|
clearTimeout(timeoutId);
|
|
234
240
|
if (killTimeoutId)
|
|
235
241
|
clearTimeout(killTimeoutId);
|
|
236
|
-
|
|
242
|
+
memoryMonitor?.stop();
|
|
237
243
|
const { output, workdir } = parseWorkdir(stdoutStreamer.getAccumulated());
|
|
244
|
+
// Check if terminated due to memory limit
|
|
245
|
+
const killedByMemoryLimit = memoryMonitor?.wasKilledByMemoryLimit();
|
|
246
|
+
const success = code === 0 && !killedByMemoryLimit;
|
|
247
|
+
let errorMessage;
|
|
248
|
+
if (killedByMemoryLimit && memoryInfo) {
|
|
249
|
+
errorMessage =
|
|
250
|
+
`Process exceeded ${memoryInfo.limit} MB memory limit, ` +
|
|
251
|
+
`${memoryInfo.used} MB was used.`;
|
|
252
|
+
}
|
|
253
|
+
else if (!success) {
|
|
254
|
+
errorMessage = `Exit code: ${code}`;
|
|
255
|
+
}
|
|
238
256
|
const commandResult = {
|
|
239
257
|
description: cmd.description,
|
|
240
258
|
command: cmd.command,
|
|
241
259
|
output,
|
|
242
260
|
errors: limitLines(stderr.join('')),
|
|
243
261
|
result: success ? ExecutionResult.Success : ExecutionResult.Error,
|
|
244
|
-
error:
|
|
262
|
+
error: errorMessage,
|
|
245
263
|
workdir,
|
|
246
264
|
};
|
|
247
265
|
onProgress?.(success ? ExecutionStatus.Success : ExecutionStatus.Failed);
|
package/dist/services/skills.js
CHANGED
|
@@ -94,17 +94,47 @@ export function loadSkillDefinitions(fs = defaultFileSystem) {
|
|
|
94
94
|
const skills = loadSkills(fs);
|
|
95
95
|
return skills.map(({ key, content }) => parseSkillMarkdown(key, content));
|
|
96
96
|
}
|
|
97
|
+
/**
|
|
98
|
+
* Mark incomplete skill in markdown by appending (INCOMPLETE) to name
|
|
99
|
+
*/
|
|
100
|
+
function markIncompleteSkill(content) {
|
|
101
|
+
return content.replace(/^(#{1,6}\s+Name\s*\n+)(.+?)(\n|$)/im, `$1$2 (INCOMPLETE)$3`);
|
|
102
|
+
}
|
|
103
|
+
/**
|
|
104
|
+
* Load skills with both formatted prompt section and parsed definitions
|
|
105
|
+
* Single source of truth for both LLM prompts and debug display
|
|
106
|
+
* Parses each skill only once for efficiency
|
|
107
|
+
*/
|
|
108
|
+
export function loadSkillsForPrompt(fs = defaultFileSystem) {
|
|
109
|
+
const skills = loadSkills(fs);
|
|
110
|
+
// Parse each skill once and build both outputs
|
|
111
|
+
const definitions = [];
|
|
112
|
+
const markedContent = [];
|
|
113
|
+
for (const { key, content } of skills) {
|
|
114
|
+
const parsed = parseSkillMarkdown(key, content);
|
|
115
|
+
definitions.push(parsed);
|
|
116
|
+
// Mark incomplete skills in markdown for LLM
|
|
117
|
+
if (parsed.isIncomplete) {
|
|
118
|
+
markedContent.push(markIncompleteSkill(content));
|
|
119
|
+
}
|
|
120
|
+
else {
|
|
121
|
+
markedContent.push(content);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
const formatted = formatSkillsForPrompt(markedContent);
|
|
125
|
+
return { formatted, definitions };
|
|
126
|
+
}
|
|
97
127
|
/**
|
|
98
128
|
* Load skills and mark incomplete ones in their markdown
|
|
99
129
|
* Returns array of skill markdown with status markers
|
|
130
|
+
* Uses loadSkillsForPrompt internally to avoid duplicating logic
|
|
100
131
|
*/
|
|
101
132
|
export function loadSkillsWithValidation(fs = defaultFileSystem) {
|
|
102
133
|
const skills = loadSkills(fs);
|
|
103
134
|
return skills.map(({ key, content }) => {
|
|
104
135
|
const parsed = parseSkillMarkdown(key, content);
|
|
105
|
-
// If skill is incomplete (either validation failed or needs more documentation), append (INCOMPLETE) to the name
|
|
106
136
|
if (parsed.isIncomplete) {
|
|
107
|
-
return content
|
|
137
|
+
return markIncompleteSkill(content);
|
|
108
138
|
}
|
|
109
139
|
return content;
|
|
110
140
|
});
|
|
@@ -127,6 +157,7 @@ export function createSkillLookup(definitions) {
|
|
|
127
157
|
}
|
|
128
158
|
/**
|
|
129
159
|
* Format skills for inclusion in the planning prompt
|
|
160
|
+
* Skills are joined with double newlines (skill headers provide separation)
|
|
130
161
|
*/
|
|
131
162
|
export function formatSkillsForPrompt(skills) {
|
|
132
163
|
if (skills.length === 0) {
|
|
@@ -148,11 +179,8 @@ brackets for additional information. Use commas instead. For example:
|
|
|
148
179
|
- WRONG: "Build project Alpha (the legacy version)"
|
|
149
180
|
|
|
150
181
|
`;
|
|
151
|
-
const
|
|
152
|
-
|
|
153
|
-
.map((s) => s.trim())
|
|
154
|
-
.join('\n\n' + separator + '\n\n');
|
|
155
|
-
return header + separator + '\n\n' + skillsContent;
|
|
182
|
+
const skillsContent = skills.map((s) => s.trim()).join('\n\n');
|
|
183
|
+
return header + skillsContent;
|
|
156
184
|
}
|
|
157
185
|
/**
|
|
158
186
|
* Parse skill reference from execution line
|