@output.ai/cli 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +27 -0
- package/dist/api/generated/api.d.ts +35 -27
- package/dist/api/generated/api.js +4 -4
- package/dist/assets/docker/docker-compose-dev.yml +2 -0
- package/dist/commands/workflow/debug.d.ts +15 -0
- package/dist/commands/workflow/debug.js +57 -0
- package/dist/commands/workflow/debug.spec.js +36 -0
- package/dist/commands/workflow/{output.d.ts → result.d.ts} +1 -1
- package/dist/commands/workflow/{output.js → result.js} +8 -8
- package/dist/commands/workflow/result.test.d.ts +1 -0
- package/dist/commands/workflow/result.test.js +23 -0
- package/dist/commands/workflow/start.js +1 -1
- package/dist/services/trace_reader.d.ts +14 -0
- package/dist/services/trace_reader.js +67 -0
- package/dist/services/trace_reader.spec.d.ts +1 -0
- package/dist/services/trace_reader.spec.js +164 -0
- package/dist/types/trace.d.ts +161 -0
- package/dist/types/trace.js +18 -0
- package/dist/utils/date_formatter.d.ts +8 -0
- package/dist/utils/date_formatter.js +19 -0
- package/dist/utils/trace_formatter.d.ts +11 -0
- package/dist/utils/trace_formatter.js +402 -0
- package/package.json +2 -1
- package/dist/commands/workflow/output.test.js +0 -23
- /package/dist/commands/workflow/{output.test.d.ts → debug.spec.d.ts} +0 -0
package/README.md
CHANGED
|
@@ -95,6 +95,33 @@ output workflow list --filter simple
|
|
|
95
95
|
|
|
96
96
|
The list command connects to the API server and retrieves all available workflows. By default, it displays a simple list of workflow names (like `ls`). Use `--format table` for detailed information.
|
|
97
97
|
|
|
98
|
+
### Debug a Workflow Execution
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
# Display trace information for a workflow run
|
|
102
|
+
output workflow debug <workflowId>
|
|
103
|
+
|
|
104
|
+
# Display trace in JSON format
|
|
105
|
+
output workflow debug <workflowId> --format json
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
#### What It Does
|
|
109
|
+
|
|
110
|
+
The `debug` command retrieves and displays detailed execution traces for debugging workflow runs. It shows:
|
|
111
|
+
- Complete execution timeline with all events
|
|
112
|
+
- Hierarchical execution tree showing workflow structure
|
|
113
|
+
- Step and activity inputs/outputs
|
|
114
|
+
- Error details and stack traces
|
|
115
|
+
- Performance metrics and durations
|
|
116
|
+
|
|
117
|
+
#### Command Options
|
|
118
|
+
|
|
119
|
+
- `--format, -f` - Output format: `text` (default, human-readable) or `json` (raw trace data)
|
|
120
|
+
|
|
121
|
+
#### Environment Variables
|
|
122
|
+
|
|
123
|
+
- `HOST_TRACE_PATH` - When running in Docker, this maps the container's trace log path to the host filesystem path. Set this to your project's `logs` directory (e.g., `${PWD}/logs`). Required for trace files to be accessible from the CLI when workflows run in containers.
|
|
124
|
+
|
|
98
125
|
### Generate a Workflow
|
|
99
126
|
|
|
100
127
|
```bash
|
|
@@ -32,6 +32,28 @@ export interface Workflow {
|
|
|
32
32
|
inputSchema?: JSONSchema;
|
|
33
33
|
outputSchema?: JSONSchema;
|
|
34
34
|
}
|
|
35
|
+
/**
|
|
36
|
+
* File destinations for trace data
|
|
37
|
+
*/
|
|
38
|
+
export type TraceInfoDestinations = {
|
|
39
|
+
/**
|
|
40
|
+
* Absolute path to local trace file, or null if not saved locally
|
|
41
|
+
* @nullable
|
|
42
|
+
*/
|
|
43
|
+
local: string | null;
|
|
44
|
+
/**
|
|
45
|
+
* Remote trace location (e.g., S3 URI), or null if not saved remotely
|
|
46
|
+
* @nullable
|
|
47
|
+
*/
|
|
48
|
+
remote: string | null;
|
|
49
|
+
};
|
|
50
|
+
/**
|
|
51
|
+
* An object with information about the trace generated by the execution
|
|
52
|
+
*/
|
|
53
|
+
export interface TraceInfo {
|
|
54
|
+
/** File destinations for trace data */
|
|
55
|
+
destinations?: TraceInfoDestinations;
|
|
56
|
+
}
|
|
35
57
|
export type PostWorkflowRunBody = {
|
|
36
58
|
/** The name of the workflow to execute */
|
|
37
59
|
workflowName: string;
|
|
@@ -42,19 +64,12 @@ export type PostWorkflowRunBody = {
|
|
|
42
64
|
/** The name of the task queue to send the workflow to */
|
|
43
65
|
taskQueue?: string;
|
|
44
66
|
};
|
|
45
|
-
/**
|
|
46
|
-
* An object with information about the trace generated by the execution
|
|
47
|
-
*/
|
|
48
|
-
export type PostWorkflowRun200Trace = {
|
|
49
|
-
[key: string]: unknown;
|
|
50
|
-
};
|
|
51
67
|
export type PostWorkflowRun200 = {
|
|
52
68
|
/** The workflow execution id */
|
|
53
69
|
workflowId?: string;
|
|
54
70
|
/** The output of the workflow */
|
|
55
71
|
output?: unknown;
|
|
56
|
-
|
|
57
|
-
trace?: PostWorkflowRun200Trace;
|
|
72
|
+
trace?: TraceInfo;
|
|
58
73
|
};
|
|
59
74
|
export type PostWorkflowStartBody = {
|
|
60
75
|
/** The name of the workflow to execute */
|
|
@@ -94,19 +109,12 @@ export type GetWorkflowIdStatus200 = {
|
|
|
94
109
|
/** An epoch timestamp representing when the workflow ended */
|
|
95
110
|
completedAt?: number;
|
|
96
111
|
};
|
|
97
|
-
|
|
98
|
-
* An object with information about the trace generated by the execution
|
|
99
|
-
*/
|
|
100
|
-
export type GetWorkflowIdOutput200Trace = {
|
|
101
|
-
[key: string]: unknown;
|
|
102
|
-
};
|
|
103
|
-
export type GetWorkflowIdOutput200 = {
|
|
112
|
+
export type GetWorkflowIdResult200 = {
|
|
104
113
|
/** The workflow execution id */
|
|
105
114
|
workflowId?: string;
|
|
106
|
-
/** The
|
|
115
|
+
/** The result of workflow */
|
|
107
116
|
output?: unknown;
|
|
108
|
-
|
|
109
|
-
trace?: GetWorkflowIdOutput200Trace;
|
|
117
|
+
trace?: TraceInfo;
|
|
110
118
|
};
|
|
111
119
|
export type GetWorkflowCatalogId200 = {
|
|
112
120
|
/** Each workflow available in this catalog */
|
|
@@ -201,25 +209,25 @@ export type patchWorkflowIdStopResponse = (patchWorkflowIdStopResponseSuccess |
|
|
|
201
209
|
export declare const getPatchWorkflowIdStopUrl: (id: string) => string;
|
|
202
210
|
export declare const patchWorkflowIdStop: (id: string, options?: ApiRequestOptions) => Promise<patchWorkflowIdStopResponse>;
|
|
203
211
|
/**
|
|
204
|
-
* @summary Return the
|
|
212
|
+
* @summary Return the result of a workflow
|
|
205
213
|
*/
|
|
206
|
-
export type
|
|
207
|
-
data:
|
|
214
|
+
export type getWorkflowIdResultResponse200 = {
|
|
215
|
+
data: GetWorkflowIdResult200;
|
|
208
216
|
status: 200;
|
|
209
217
|
};
|
|
210
|
-
export type
|
|
218
|
+
export type getWorkflowIdResultResponse404 = {
|
|
211
219
|
data: void;
|
|
212
220
|
status: 404;
|
|
213
221
|
};
|
|
214
|
-
export type
|
|
222
|
+
export type getWorkflowIdResultResponseSuccess = (getWorkflowIdResultResponse200) & {
|
|
215
223
|
headers: Headers;
|
|
216
224
|
};
|
|
217
|
-
export type
|
|
225
|
+
export type getWorkflowIdResultResponseError = (getWorkflowIdResultResponse404) & {
|
|
218
226
|
headers: Headers;
|
|
219
227
|
};
|
|
220
|
-
export type
|
|
221
|
-
export declare const
|
|
222
|
-
export declare const
|
|
228
|
+
export type getWorkflowIdResultResponse = (getWorkflowIdResultResponseSuccess | getWorkflowIdResultResponseError);
|
|
229
|
+
export declare const getGetWorkflowIdResultUrl: (id: string) => string;
|
|
230
|
+
export declare const getWorkflowIdResult: (id: string, options?: ApiRequestOptions) => Promise<getWorkflowIdResultResponse>;
|
|
223
231
|
/**
|
|
224
232
|
* @summary Get a specific workflow catalog by ID
|
|
225
233
|
*/
|
|
@@ -69,11 +69,11 @@ export const patchWorkflowIdStop = async (id, options) => {
|
|
|
69
69
|
method: 'PATCH'
|
|
70
70
|
});
|
|
71
71
|
};
|
|
72
|
-
export const
|
|
73
|
-
return `/workflow/${id}/
|
|
72
|
+
export const getGetWorkflowIdResultUrl = (id) => {
|
|
73
|
+
return `/workflow/${id}/result`;
|
|
74
74
|
};
|
|
75
|
-
export const
|
|
76
|
-
return customFetchInstance(
|
|
75
|
+
export const getWorkflowIdResult = async (id, options) => {
|
|
76
|
+
return customFetchInstance(getGetWorkflowIdResultUrl(id), {
|
|
77
77
|
...options,
|
|
78
78
|
method: 'GET'
|
|
79
79
|
});
|
|
@@ -80,6 +80,7 @@ services:
|
|
|
80
80
|
temporal:
|
|
81
81
|
condition: service_healthy
|
|
82
82
|
image: growthxteam/output-api:latest
|
|
83
|
+
pull_policy: always
|
|
83
84
|
networks:
|
|
84
85
|
- main
|
|
85
86
|
environment:
|
|
@@ -104,6 +105,7 @@ services:
|
|
|
104
105
|
- REDIS_URL=redis://redis:6379
|
|
105
106
|
- TEMPORAL_ADDRESS=temporal:7233
|
|
106
107
|
- TRACE_LOCAL_ON=true
|
|
108
|
+
- HOST_TRACE_PATH=${PWD}/logs
|
|
107
109
|
command: npm run start-worker
|
|
108
110
|
working_dir: /app
|
|
109
111
|
volumes:
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { Command } from '@oclif/core';
|
|
2
|
+
export default class WorkflowDebug extends Command {
|
|
3
|
+
static description: string;
|
|
4
|
+
static examples: string[];
|
|
5
|
+
static args: {
|
|
6
|
+
workflowId: import("@oclif/core/lib/interfaces").Arg<string, Record<string, unknown>>;
|
|
7
|
+
};
|
|
8
|
+
static flags: {
|
|
9
|
+
format: import("@oclif/core/lib/interfaces").OptionFlag<string, import("@oclif/core/lib/interfaces").CustomOptions>;
|
|
10
|
+
};
|
|
11
|
+
run(): Promise<void>;
|
|
12
|
+
private outputJson;
|
|
13
|
+
private displayTextTrace;
|
|
14
|
+
catch(error: Error): Promise<void>;
|
|
15
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import { Args, Command, Flags } from '@oclif/core';
|
|
2
|
+
import { OUTPUT_FORMAT } from '#utils/constants.js';
|
|
3
|
+
import { displayDebugTree } from '#utils/trace_formatter.js';
|
|
4
|
+
import { getTrace } from '#services/trace_reader.js';
|
|
5
|
+
import { handleApiError } from '#utils/error_handler.js';
|
|
6
|
+
export default class WorkflowDebug extends Command {
|
|
7
|
+
static description = 'Get and display workflow execution trace for debugging';
|
|
8
|
+
static examples = [
|
|
9
|
+
'<%= config.bin %> <%= command.id %> wf-12345',
|
|
10
|
+
'<%= config.bin %> <%= command.id %> wf-12345 --format json',
|
|
11
|
+
'<%= config.bin %> <%= command.id %> wf-12345 --format text'
|
|
12
|
+
];
|
|
13
|
+
static args = {
|
|
14
|
+
workflowId: Args.string({
|
|
15
|
+
description: 'The workflow ID to debug',
|
|
16
|
+
required: true
|
|
17
|
+
})
|
|
18
|
+
};
|
|
19
|
+
static flags = {
|
|
20
|
+
format: Flags.string({
|
|
21
|
+
char: 'f',
|
|
22
|
+
description: 'Output format',
|
|
23
|
+
options: [OUTPUT_FORMAT.JSON, OUTPUT_FORMAT.TEXT],
|
|
24
|
+
default: OUTPUT_FORMAT.TEXT
|
|
25
|
+
})
|
|
26
|
+
};
|
|
27
|
+
async run() {
|
|
28
|
+
const { args, flags } = await this.parse(WorkflowDebug);
|
|
29
|
+
const isJsonFormat = flags.format === OUTPUT_FORMAT.JSON;
|
|
30
|
+
if (!isJsonFormat) {
|
|
31
|
+
this.log(`Fetching debug information for workflow: ${args.workflowId}...`);
|
|
32
|
+
}
|
|
33
|
+
const traceData = await getTrace(args.workflowId);
|
|
34
|
+
// Output based on format
|
|
35
|
+
if (isJsonFormat) {
|
|
36
|
+
this.outputJson(traceData);
|
|
37
|
+
return;
|
|
38
|
+
}
|
|
39
|
+
// Display text format
|
|
40
|
+
this.displayTextTrace(traceData);
|
|
41
|
+
}
|
|
42
|
+
outputJson(data) {
|
|
43
|
+
this.log(JSON.stringify(data, null, 2));
|
|
44
|
+
}
|
|
45
|
+
displayTextTrace(traceData) {
|
|
46
|
+
this.log('\nTrace Log:');
|
|
47
|
+
this.log('─'.repeat(80));
|
|
48
|
+
this.log(displayDebugTree(traceData));
|
|
49
|
+
this.log('\n' + '─'.repeat(80));
|
|
50
|
+
this.log('Tip: Use --format json for the full untruncated trace');
|
|
51
|
+
}
|
|
52
|
+
async catch(error) {
|
|
53
|
+
return handleApiError(error, (...args) => this.error(...args), {
|
|
54
|
+
404: 'Workflow not found or trace not available. Check the workflow ID.'
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|
2
|
+
// Mock the TraceReader service
|
|
3
|
+
vi.mock('../../services/trace_reader.js', () => ({
|
|
4
|
+
findTraceFile: vi.fn(),
|
|
5
|
+
readTraceFile: vi.fn(),
|
|
6
|
+
getTrace: vi.fn()
|
|
7
|
+
}));
|
|
8
|
+
// Mock the utilities
|
|
9
|
+
vi.mock('../../utils/trace_formatter.js', () => ({
|
|
10
|
+
displayDebugTree: vi.fn()
|
|
11
|
+
}));
|
|
12
|
+
describe('workflow debug command', () => {
|
|
13
|
+
beforeEach(() => {
|
|
14
|
+
vi.clearAllMocks();
|
|
15
|
+
});
|
|
16
|
+
describe('command definition', () => {
|
|
17
|
+
it('should export a valid OCLIF command', async () => {
|
|
18
|
+
const WorkflowDebug = (await import('./debug.js')).default;
|
|
19
|
+
expect(WorkflowDebug).toBeDefined();
|
|
20
|
+
expect(WorkflowDebug.description).toContain('Get and display workflow execution trace for debugging');
|
|
21
|
+
expect(WorkflowDebug.args).toHaveProperty('workflowId');
|
|
22
|
+
expect(WorkflowDebug.flags).toHaveProperty('format');
|
|
23
|
+
});
|
|
24
|
+
it('should have correct flag configuration', async () => {
|
|
25
|
+
const WorkflowDebug = (await import('./debug.js')).default;
|
|
26
|
+
// Format flag
|
|
27
|
+
expect(WorkflowDebug.flags.format.options).toEqual(['json', 'text']);
|
|
28
|
+
expect(WorkflowDebug.flags.format.default).toBe('text');
|
|
29
|
+
});
|
|
30
|
+
it('should have correct examples', async () => {
|
|
31
|
+
const WorkflowDebug = (await import('./debug.js')).default;
|
|
32
|
+
expect(WorkflowDebug.examples).toBeDefined();
|
|
33
|
+
expect(WorkflowDebug.examples.length).toBeGreaterThan(0);
|
|
34
|
+
});
|
|
35
|
+
});
|
|
36
|
+
});
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
import { Args, Command, Flags } from '@oclif/core';
|
|
2
|
-
import {
|
|
2
|
+
import { getWorkflowIdResult } from '#api/generated/api.js';
|
|
3
3
|
import { OUTPUT_FORMAT } from '#utils/constants.js';
|
|
4
4
|
import { formatOutput } from '#utils/output_formatter.js';
|
|
5
5
|
import { handleApiError } from '#utils/error_handler.js';
|
|
6
|
-
export default class
|
|
7
|
-
static description = 'Get workflow execution
|
|
6
|
+
export default class WorkflowResult extends Command {
|
|
7
|
+
static description = 'Get workflow execution result';
|
|
8
8
|
static examples = [
|
|
9
9
|
'<%= config.bin %> <%= command.id %> wf-12345',
|
|
10
10
|
'<%= config.bin %> <%= command.id %> wf-12345 --format json'
|
|
11
11
|
];
|
|
12
12
|
static args = {
|
|
13
13
|
workflowId: Args.string({
|
|
14
|
-
description: 'The workflow ID to get
|
|
14
|
+
description: 'The workflow ID to get result for',
|
|
15
15
|
required: true
|
|
16
16
|
})
|
|
17
17
|
};
|
|
@@ -24,9 +24,9 @@ export default class WorkflowOutput extends Command {
|
|
|
24
24
|
})
|
|
25
25
|
};
|
|
26
26
|
async run() {
|
|
27
|
-
const { args, flags } = await this.parse(
|
|
28
|
-
this.log(`Fetching
|
|
29
|
-
const response = await
|
|
27
|
+
const { args, flags } = await this.parse(WorkflowResult);
|
|
28
|
+
this.log(`Fetching result for workflow: ${args.workflowId}...`);
|
|
29
|
+
const response = await getWorkflowIdResult(args.workflowId);
|
|
30
30
|
if (!response || !response.data) {
|
|
31
31
|
this.error('API returned invalid response', { exit: 1 });
|
|
32
32
|
}
|
|
@@ -34,7 +34,7 @@ export default class WorkflowOutput extends Command {
|
|
|
34
34
|
const lines = [
|
|
35
35
|
`Workflow ID: ${result.workflowId || 'unknown'}`,
|
|
36
36
|
'',
|
|
37
|
-
'
|
|
37
|
+
'Result:',
|
|
38
38
|
JSON.stringify(result.output, null, 2)
|
|
39
39
|
];
|
|
40
40
|
return lines.join('\n');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|
2
|
+
vi.mock('../../api/generated/api.js', () => ({
|
|
3
|
+
getWorkflowIdResult: vi.fn()
|
|
4
|
+
}));
|
|
5
|
+
describe('workflow result command', () => {
|
|
6
|
+
beforeEach(() => {
|
|
7
|
+
vi.clearAllMocks();
|
|
8
|
+
});
|
|
9
|
+
describe('command definition', () => {
|
|
10
|
+
it('should export a valid OCLIF command', async () => {
|
|
11
|
+
const WorkflowResult = (await import('./result.js')).default;
|
|
12
|
+
expect(WorkflowResult).toBeDefined();
|
|
13
|
+
expect(WorkflowResult.description).toContain('Get workflow execution result');
|
|
14
|
+
expect(WorkflowResult.args).toHaveProperty('workflowId');
|
|
15
|
+
expect(WorkflowResult.flags).toHaveProperty('format');
|
|
16
|
+
});
|
|
17
|
+
it('should have correct flag configuration', async () => {
|
|
18
|
+
const WorkflowResult = (await import('./result.js')).default;
|
|
19
|
+
expect(WorkflowResult.flags.format.options).toEqual(['json', 'text']);
|
|
20
|
+
expect(WorkflowResult.flags.format.default).toBe('text');
|
|
21
|
+
});
|
|
22
|
+
});
|
|
23
|
+
});
|
|
@@ -45,7 +45,7 @@ export default class WorkflowStart extends Command {
|
|
|
45
45
|
`Workflow ID: ${result.workflowId || 'unknown'}`,
|
|
46
46
|
'',
|
|
47
47
|
`Use "workflow status ${result.workflowId || '<workflow-id>'}" to check the workflow status`,
|
|
48
|
-
`Use "workflow
|
|
48
|
+
`Use "workflow result ${result.workflowId || '<workflow-id>'}" to get the workflow result when complete`
|
|
49
49
|
].join('\n');
|
|
50
50
|
this.log(`\n${output}`);
|
|
51
51
|
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import type { TraceData } from '#types/trace.js';
|
|
2
|
+
export type { TraceData };
|
|
3
|
+
/**
|
|
4
|
+
* Find trace file from workflow metadata
|
|
5
|
+
*/
|
|
6
|
+
export declare function findTraceFile(workflowId: string): Promise<string>;
|
|
7
|
+
/**
|
|
8
|
+
* Read and parse trace file
|
|
9
|
+
*/
|
|
10
|
+
export declare function readTraceFile(path: string): Promise<TraceData>;
|
|
11
|
+
/**
|
|
12
|
+
* Get trace data from workflow ID
|
|
13
|
+
*/
|
|
14
|
+
export declare function getTrace(workflowId: string): Promise<TraceData>;
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { readFile, stat } from 'node:fs/promises';
|
|
2
|
+
import { getWorkflowIdResult } from '#api/generated/api.js';
|
|
3
|
+
import { getErrorCode } from '#utils/error_utils.js';
|
|
4
|
+
/**
|
|
5
|
+
* Check if a file exists with detailed error information
|
|
6
|
+
*/
|
|
7
|
+
async function fileExists(path) {
|
|
8
|
+
try {
|
|
9
|
+
await stat(path);
|
|
10
|
+
return { exists: true };
|
|
11
|
+
}
|
|
12
|
+
catch (error) {
|
|
13
|
+
const code = getErrorCode(error);
|
|
14
|
+
if (code === 'ENOENT') {
|
|
15
|
+
return { exists: false };
|
|
16
|
+
}
|
|
17
|
+
if (code === 'EACCES') {
|
|
18
|
+
return { exists: false, error: `Permission denied: ${path}` };
|
|
19
|
+
}
|
|
20
|
+
return { exists: false, error: `Cannot access file: ${path}` };
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Find trace file from workflow metadata
|
|
25
|
+
*/
|
|
26
|
+
export async function findTraceFile(workflowId) {
|
|
27
|
+
const response = await getWorkflowIdResult(workflowId);
|
|
28
|
+
// Check if we got a successful response
|
|
29
|
+
if (response.status !== 200) {
|
|
30
|
+
throw new Error(`Failed to get workflow result for ${workflowId}`);
|
|
31
|
+
}
|
|
32
|
+
const tracePath = response.data.trace?.destinations?.local;
|
|
33
|
+
if (!tracePath) {
|
|
34
|
+
throw new Error(`No trace file path found for workflow ${workflowId}`);
|
|
35
|
+
}
|
|
36
|
+
const fileCheck = await fileExists(tracePath);
|
|
37
|
+
if (!fileCheck.exists) {
|
|
38
|
+
const errorDetail = fileCheck.error || `Trace file not found at path: ${tracePath}`;
|
|
39
|
+
throw new Error(errorDetail);
|
|
40
|
+
}
|
|
41
|
+
return tracePath;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Read and parse trace file
|
|
45
|
+
*/
|
|
46
|
+
export async function readTraceFile(path) {
|
|
47
|
+
try {
|
|
48
|
+
const content = await readFile(path, 'utf-8');
|
|
49
|
+
return JSON.parse(content);
|
|
50
|
+
}
|
|
51
|
+
catch (error) {
|
|
52
|
+
if (getErrorCode(error) === 'ENOENT') {
|
|
53
|
+
throw new Error(`Trace file not found at path: ${path}`);
|
|
54
|
+
}
|
|
55
|
+
if (error instanceof SyntaxError) {
|
|
56
|
+
throw new Error(`Invalid JSON in trace file: ${path}`);
|
|
57
|
+
}
|
|
58
|
+
throw error;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Get trace data from workflow ID
|
|
63
|
+
*/
|
|
64
|
+
export async function getTrace(workflowId) {
|
|
65
|
+
const tracePath = await findTraceFile(workflowId);
|
|
66
|
+
return readTraceFile(tracePath);
|
|
67
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
import { describe, it, expect, vi, afterEach } from 'vitest';
|
|
2
|
+
import { findTraceFile, readTraceFile } from './trace_reader.js';
|
|
3
|
+
// Mock file system operations
|
|
4
|
+
vi.mock('node:fs/promises', () => ({
|
|
5
|
+
readFile: vi.fn(),
|
|
6
|
+
stat: vi.fn()
|
|
7
|
+
}));
|
|
8
|
+
// Mock API
|
|
9
|
+
vi.mock('../api/generated/api.js', () => ({
|
|
10
|
+
getWorkflowIdResult: vi.fn()
|
|
11
|
+
}));
|
|
12
|
+
describe('TraceReader', () => {
|
|
13
|
+
const getMocks = async () => {
|
|
14
|
+
const fsModule = await import('node:fs/promises');
|
|
15
|
+
const apiModule = await import('../api/generated/api.js');
|
|
16
|
+
return {
|
|
17
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
18
|
+
mockReadFile: fsModule.readFile,
|
|
19
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
20
|
+
mockStat: fsModule.stat,
|
|
21
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
22
|
+
mockGetWorkflowIdResult: apiModule.getWorkflowIdResult
|
|
23
|
+
};
|
|
24
|
+
};
|
|
25
|
+
afterEach(() => {
|
|
26
|
+
vi.clearAllMocks();
|
|
27
|
+
});
|
|
28
|
+
describe('findTraceFile', () => {
|
|
29
|
+
it('should find trace file from workflow output metadata', async () => {
|
|
30
|
+
const { mockGetWorkflowIdResult, mockStat } = await getMocks();
|
|
31
|
+
const workflowId = 'test-workflow-123';
|
|
32
|
+
const expectedPath = '/app/logs/runs/test/2024-01-01_test-workflow-123.json';
|
|
33
|
+
mockGetWorkflowIdResult.mockResolvedValue({
|
|
34
|
+
status: 200,
|
|
35
|
+
data: {
|
|
36
|
+
workflowId,
|
|
37
|
+
output: { result: 'test result' },
|
|
38
|
+
trace: {
|
|
39
|
+
destinations: {
|
|
40
|
+
local: expectedPath,
|
|
41
|
+
remote: null
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
});
|
|
46
|
+
mockStat.mockResolvedValue({ isFile: () => true });
|
|
47
|
+
const result = await findTraceFile(workflowId);
|
|
48
|
+
expect(result).toBe(expectedPath);
|
|
49
|
+
expect(mockGetWorkflowIdResult).toHaveBeenCalledWith(workflowId);
|
|
50
|
+
expect(mockStat).toHaveBeenCalledWith(expectedPath);
|
|
51
|
+
});
|
|
52
|
+
it('should throw error when no trace path in metadata', async () => {
|
|
53
|
+
const { mockGetWorkflowIdResult } = await getMocks();
|
|
54
|
+
const workflowId = 'test-workflow-456';
|
|
55
|
+
mockGetWorkflowIdResult.mockResolvedValue({
|
|
56
|
+
status: 200,
|
|
57
|
+
data: {
|
|
58
|
+
workflowId,
|
|
59
|
+
output: { result: 'test result' },
|
|
60
|
+
trace: {
|
|
61
|
+
destinations: {
|
|
62
|
+
local: null,
|
|
63
|
+
remote: null
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
});
|
|
68
|
+
await expect(findTraceFile(workflowId))
|
|
69
|
+
.rejects.toThrow(`No trace file path found for workflow ${workflowId}`);
|
|
70
|
+
});
|
|
71
|
+
it('should throw error when trace file not on disk', async () => {
|
|
72
|
+
const { mockGetWorkflowIdResult, mockStat } = await getMocks();
|
|
73
|
+
const workflowId = 'test-workflow-789';
|
|
74
|
+
const expectedPath = '/app/logs/runs/test/2024-01-01_test-workflow-789.json';
|
|
75
|
+
mockGetWorkflowIdResult.mockResolvedValue({
|
|
76
|
+
status: 200,
|
|
77
|
+
data: {
|
|
78
|
+
workflowId,
|
|
79
|
+
output: { result: 'test result' },
|
|
80
|
+
trace: {
|
|
81
|
+
destinations: {
|
|
82
|
+
local: expectedPath,
|
|
83
|
+
remote: null
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
});
|
|
88
|
+
const enoentError = Object.assign(new Error('ENOENT: no such file or directory'), { code: 'ENOENT' });
|
|
89
|
+
mockStat.mockRejectedValue(enoentError);
|
|
90
|
+
await expect(findTraceFile(workflowId))
|
|
91
|
+
.rejects.toThrow(`Trace file not found at path: ${expectedPath}`);
|
|
92
|
+
});
|
|
93
|
+
it('should throw error when API call fails', async () => {
|
|
94
|
+
const { mockGetWorkflowIdResult } = await getMocks();
|
|
95
|
+
const workflowId = 'non-existent';
|
|
96
|
+
mockGetWorkflowIdResult.mockRejectedValue(new Error('Workflow not found'));
|
|
97
|
+
await expect(findTraceFile(workflowId))
|
|
98
|
+
.rejects.toThrow('Workflow not found');
|
|
99
|
+
});
|
|
100
|
+
it('should handle missing trace property gracefully', async () => {
|
|
101
|
+
const { mockGetWorkflowIdResult } = await getMocks();
|
|
102
|
+
const workflowId = 'test-workflow-no-trace';
|
|
103
|
+
mockGetWorkflowIdResult.mockResolvedValue({
|
|
104
|
+
status: 200,
|
|
105
|
+
data: {
|
|
106
|
+
workflowId,
|
|
107
|
+
output: { result: 'test result' }
|
|
108
|
+
// No trace property at all
|
|
109
|
+
}
|
|
110
|
+
});
|
|
111
|
+
await expect(findTraceFile(workflowId))
|
|
112
|
+
.rejects.toThrow(`No trace file path found for workflow ${workflowId}`);
|
|
113
|
+
});
|
|
114
|
+
it('should throw error when workflow not found (404)', async () => {
|
|
115
|
+
const { mockGetWorkflowIdResult } = await getMocks();
|
|
116
|
+
const workflowId = 'non-existent-workflow';
|
|
117
|
+
mockGetWorkflowIdResult.mockResolvedValue({
|
|
118
|
+
status: 404,
|
|
119
|
+
data: void 0
|
|
120
|
+
});
|
|
121
|
+
await expect(findTraceFile(workflowId))
|
|
122
|
+
.rejects.toThrow(`Failed to get workflow result for ${workflowId}`);
|
|
123
|
+
});
|
|
124
|
+
});
|
|
125
|
+
describe('readTraceFile', () => {
|
|
126
|
+
it('should read and parse JSON file successfully', async () => {
|
|
127
|
+
const { mockReadFile } = await getMocks();
|
|
128
|
+
const path = '/logs/test.json';
|
|
129
|
+
const traceData = {
|
|
130
|
+
root: { workflowName: 'test' },
|
|
131
|
+
events: []
|
|
132
|
+
};
|
|
133
|
+
mockReadFile.mockResolvedValue(JSON.stringify(traceData));
|
|
134
|
+
const result = await readTraceFile(path);
|
|
135
|
+
expect(result).toEqual(traceData);
|
|
136
|
+
expect(mockReadFile).toHaveBeenCalledWith(path, 'utf-8');
|
|
137
|
+
});
|
|
138
|
+
it('should throw error for non-existent file', async () => {
|
|
139
|
+
const { mockReadFile } = await getMocks();
|
|
140
|
+
const path = '/logs/missing.json';
|
|
141
|
+
const error = new Error('ENOENT');
|
|
142
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
143
|
+
error.code = 'ENOENT';
|
|
144
|
+
mockReadFile.mockRejectedValue(error);
|
|
145
|
+
await expect(readTraceFile(path))
|
|
146
|
+
.rejects.toThrow(`Trace file not found at path: ${path}`);
|
|
147
|
+
});
|
|
148
|
+
it('should throw error for invalid JSON', async () => {
|
|
149
|
+
const { mockReadFile } = await getMocks();
|
|
150
|
+
const path = '/logs/invalid.json';
|
|
151
|
+
mockReadFile.mockResolvedValue('invalid json {');
|
|
152
|
+
await expect(readTraceFile(path))
|
|
153
|
+
.rejects.toThrow(`Invalid JSON in trace file: ${path}`);
|
|
154
|
+
});
|
|
155
|
+
it('should rethrow other errors', async () => {
|
|
156
|
+
const { mockReadFile } = await getMocks();
|
|
157
|
+
const path = '/logs/test.json';
|
|
158
|
+
const error = new Error('Permission denied');
|
|
159
|
+
mockReadFile.mockRejectedValue(error);
|
|
160
|
+
await expect(readTraceFile(path))
|
|
161
|
+
.rejects.toThrow('Permission denied');
|
|
162
|
+
});
|
|
163
|
+
});
|
|
164
|
+
});
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Trace types for workflow execution data.
|
|
3
|
+
*
|
|
4
|
+
* These types represent the structure of trace files generated during workflow execution,
|
|
5
|
+
* used for debugging, visualization, and analysis of workflow runs.
|
|
6
|
+
*/
|
|
7
|
+
/** Node kind values for trace events */
|
|
8
|
+
export type NodeKind = 'workflow' | 'activity' | 'step' | 'internal_step' | string;
|
|
9
|
+
/** Phase of a trace event lifecycle */
|
|
10
|
+
export type NodePhase = 'start' | 'end' | 'error' | string;
|
|
11
|
+
/** Execution status of a node */
|
|
12
|
+
export type NodeStatus = 'completed' | 'failed' | 'running' | string;
|
|
13
|
+
/**
|
|
14
|
+
* Details associated with a trace event.
|
|
15
|
+
* Contains input/output data and identifying information for the event.
|
|
16
|
+
*/
|
|
17
|
+
export interface TraceDetails {
|
|
18
|
+
/** Input data passed to the step/activity */
|
|
19
|
+
input?: unknown;
|
|
20
|
+
/** Output data returned from the step/activity */
|
|
21
|
+
output?: unknown;
|
|
22
|
+
/** Name of the activity (for activity events) */
|
|
23
|
+
activityName?: string;
|
|
24
|
+
/** Name of the step (for step events) */
|
|
25
|
+
stepName?: string;
|
|
26
|
+
/** Generic name field */
|
|
27
|
+
name?: string;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* A trace event representing a point in workflow execution.
|
|
31
|
+
* Used for timeline display and event-based analysis.
|
|
32
|
+
*/
|
|
33
|
+
export interface TraceEvent {
|
|
34
|
+
/** The type of event (workflow, activity, step, etc.) */
|
|
35
|
+
kind: NodeKind;
|
|
36
|
+
/** The lifecycle phase of the event */
|
|
37
|
+
phase: NodePhase;
|
|
38
|
+
/** Unix timestamp when the event occurred */
|
|
39
|
+
timestamp: number;
|
|
40
|
+
/** Unique identifier for the workflow run */
|
|
41
|
+
workflowId: string;
|
|
42
|
+
/** Name of the workflow being executed */
|
|
43
|
+
workflowName: string;
|
|
44
|
+
/** Additional event details including input/output */
|
|
45
|
+
details?: TraceDetails;
|
|
46
|
+
/** Child events for nested executions */
|
|
47
|
+
children?: TraceEvent[];
|
|
48
|
+
/** Error information if the event represents a failure */
|
|
49
|
+
error?: unknown;
|
|
50
|
+
/** Duration of the event in milliseconds */
|
|
51
|
+
duration?: number;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* A node in the debug tree representation of workflow execution.
|
|
55
|
+
* Contains more detailed timing and state information than TraceEvent.
|
|
56
|
+
*/
|
|
57
|
+
export interface DebugNode {
|
|
58
|
+
/** The type of node (workflow, activity, step, internal_step) */
|
|
59
|
+
kind?: NodeKind;
|
|
60
|
+
/** Alternative type field */
|
|
61
|
+
type?: string;
|
|
62
|
+
/** The name of the step or activity */
|
|
63
|
+
name?: string;
|
|
64
|
+
/** Name of the workflow (for workflow nodes) */
|
|
65
|
+
workflowName?: string;
|
|
66
|
+
/** Name of the step (for step nodes) */
|
|
67
|
+
stepName?: string;
|
|
68
|
+
/** Name of the activity (for activity nodes) */
|
|
69
|
+
activityName?: string;
|
|
70
|
+
/** The lifecycle phase of the node */
|
|
71
|
+
phase?: NodePhase;
|
|
72
|
+
/** Execution status (completed, failed, running) */
|
|
73
|
+
status?: NodeStatus;
|
|
74
|
+
/** Unix timestamp or ISO string when execution started */
|
|
75
|
+
startedAt?: number | string;
|
|
76
|
+
/** Unix timestamp when the event occurred */
|
|
77
|
+
timestamp?: number | string;
|
|
78
|
+
/** Unix timestamp or ISO string when execution ended */
|
|
79
|
+
endedAt?: number | string;
|
|
80
|
+
/** Unix timestamp when execution started (alternative field) */
|
|
81
|
+
startTime?: number;
|
|
82
|
+
/** Unix timestamp when execution ended (alternative field) */
|
|
83
|
+
endTime?: number;
|
|
84
|
+
/** Execution duration in milliseconds */
|
|
85
|
+
duration?: number;
|
|
86
|
+
/** Input data passed to the step/activity */
|
|
87
|
+
input?: unknown;
|
|
88
|
+
/** Output data returned from the step/activity */
|
|
89
|
+
output?: unknown;
|
|
90
|
+
/** Additional execution details */
|
|
91
|
+
details?: Record<string, unknown>;
|
|
92
|
+
/** Error information if the node failed */
|
|
93
|
+
error?: unknown;
|
|
94
|
+
/** Child nodes representing nested executions */
|
|
95
|
+
children?: DebugNode[];
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Root structure of a workflow trace.
|
|
99
|
+
* Contains the execution tree and optional flat event list.
|
|
100
|
+
*/
|
|
101
|
+
export interface TraceStructure {
|
|
102
|
+
/** Root node of the execution tree */
|
|
103
|
+
root?: TraceEvent | DebugNode;
|
|
104
|
+
/** Flat list of execution events for timeline display */
|
|
105
|
+
events?: TraceEvent[];
|
|
106
|
+
/** Hierarchical tree of execution nodes (alternative to root.children) */
|
|
107
|
+
children?: DebugNode[];
|
|
108
|
+
}
|
|
109
|
+
/**
|
|
110
|
+
* The structure of a workflow trace file generated by Output SDK workflow runs.
|
|
111
|
+
* This file is written to the local filesystem during workflow execution and contains
|
|
112
|
+
* the complete execution history including timing, inputs, outputs, and any errors.
|
|
113
|
+
*/
|
|
114
|
+
export interface TraceData {
|
|
115
|
+
/** Root workflow execution information */
|
|
116
|
+
root: {
|
|
117
|
+
/** The name of the workflow that was executed */
|
|
118
|
+
workflowName: string;
|
|
119
|
+
/** Unique identifier for this workflow run */
|
|
120
|
+
workflowId: string;
|
|
121
|
+
/** Unix timestamp when the workflow started */
|
|
122
|
+
startTime: number;
|
|
123
|
+
/** Unix timestamp when the workflow ended */
|
|
124
|
+
endTime?: number;
|
|
125
|
+
/** Total workflow duration in milliseconds */
|
|
126
|
+
duration?: number;
|
|
127
|
+
/** Final workflow status */
|
|
128
|
+
status?: string;
|
|
129
|
+
/** Error information if the workflow failed */
|
|
130
|
+
error?: unknown;
|
|
131
|
+
};
|
|
132
|
+
/** Flat list of execution events for timeline display */
|
|
133
|
+
events?: Array<{
|
|
134
|
+
name: string;
|
|
135
|
+
phase: string;
|
|
136
|
+
timestamp: number;
|
|
137
|
+
details?: unknown;
|
|
138
|
+
}>;
|
|
139
|
+
/** Hierarchical tree of execution nodes */
|
|
140
|
+
children?: DebugNode[];
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Extracted node information for display formatting.
|
|
144
|
+
* Used internally by the trace formatter.
|
|
145
|
+
*/
|
|
146
|
+
export interface NodeInfo {
|
|
147
|
+
/** Display name for the node */
|
|
148
|
+
name: string;
|
|
149
|
+
/** Formatted phase indicator */
|
|
150
|
+
phase: string;
|
|
151
|
+
/** Formatted duration string */
|
|
152
|
+
duration: string;
|
|
153
|
+
}
|
|
154
|
+
/**
|
|
155
|
+
* Type guard to check if a node is a TraceEvent.
|
|
156
|
+
*/
|
|
157
|
+
export declare const isTraceEvent: (node: TraceEvent | DebugNode) => node is TraceEvent;
|
|
158
|
+
/**
|
|
159
|
+
* Type guard to check if a value is a valid timestamp.
|
|
160
|
+
*/
|
|
161
|
+
export declare const isValidTimestamp: (value: unknown) => value is number | string;
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Trace types for workflow execution data.
|
|
3
|
+
*
|
|
4
|
+
* These types represent the structure of trace files generated during workflow execution,
|
|
5
|
+
* used for debugging, visualization, and analysis of workflow runs.
|
|
6
|
+
*/
|
|
7
|
+
/**
|
|
8
|
+
* Type guard to check if a node is a TraceEvent.
|
|
9
|
+
*/
|
|
10
|
+
export const isTraceEvent = (node) => {
|
|
11
|
+
return 'workflowId' in node && 'timestamp' in node;
|
|
12
|
+
};
|
|
13
|
+
/**
|
|
14
|
+
* Type guard to check if a value is a valid timestamp.
|
|
15
|
+
*/
|
|
16
|
+
export const isValidTimestamp = (value) => {
|
|
17
|
+
return typeof value === 'number' || typeof value === 'string';
|
|
18
|
+
};
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Format a duration in milliseconds to a human-readable string.
|
|
3
|
+
* Uses date-fns for durations >= 1 minute, custom formatting for shorter durations.
|
|
4
|
+
*
|
|
5
|
+
* @param ms - Duration in milliseconds
|
|
6
|
+
* @returns Formatted duration string (e.g., "150ms", "2.50s", "3 minutes 45 seconds")
|
|
7
|
+
*/
|
|
8
|
+
export declare function formatDuration(ms: number): string;
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { formatDuration as formatDurationFns, intervalToDuration } from 'date-fns';
|
|
2
|
+
/**
|
|
3
|
+
* Format a duration in milliseconds to a human-readable string.
|
|
4
|
+
* Uses date-fns for durations >= 1 minute, custom formatting for shorter durations.
|
|
5
|
+
*
|
|
6
|
+
* @param ms - Duration in milliseconds
|
|
7
|
+
* @returns Formatted duration string (e.g., "150ms", "2.50s", "3 minutes 45 seconds")
|
|
8
|
+
*/
|
|
9
|
+
export function formatDuration(ms) {
|
|
10
|
+
const duration = intervalToDuration({ start: 0, end: ms });
|
|
11
|
+
if (ms < 1000) {
|
|
12
|
+
return `${ms}ms`;
|
|
13
|
+
}
|
|
14
|
+
if (ms < 60000) {
|
|
15
|
+
const seconds = ms / 1000;
|
|
16
|
+
return `${seconds.toFixed(2)}s`;
|
|
17
|
+
}
|
|
18
|
+
return formatDurationFns(duration, { format: ['minutes', 'seconds'] });
|
|
19
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { formatDuration } from '#utils/date_formatter.js';
|
|
2
|
+
export { formatDuration };
|
|
3
|
+
export declare function format(traceData: string | object, outputFormat?: 'json' | 'text'): string;
|
|
4
|
+
export declare function getSummary(traceData: string | object): {
|
|
5
|
+
totalDuration: number;
|
|
6
|
+
totalEvents: number;
|
|
7
|
+
totalSteps: number;
|
|
8
|
+
totalActivities: number;
|
|
9
|
+
hasErrors: boolean;
|
|
10
|
+
};
|
|
11
|
+
export declare function displayDebugTree(node: unknown): string;
|
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
import Table from 'cli-table3';
|
|
2
|
+
import { ux } from '@oclif/core';
|
|
3
|
+
import { formatOutput } from '#utils/output_formatter.js';
|
|
4
|
+
import { formatDuration } from '#utils/date_formatter.js';
|
|
5
|
+
import { getErrorMessage } from '#utils/error_utils.js';
|
|
6
|
+
import { isTraceEvent, isValidTimestamp } from '#types/trace.js';
|
|
7
|
+
export { formatDuration };
|
|
8
|
+
const TRUNCATION = {
|
|
9
|
+
SHORT: 50,
|
|
10
|
+
STANDARD: 120,
|
|
11
|
+
SUFFIX: '...',
|
|
12
|
+
SUFFIX_VERBOSE: '... (truncated)'
|
|
13
|
+
};
|
|
14
|
+
const TREE_CHARS = {
|
|
15
|
+
BRANCH: '├─ ',
|
|
16
|
+
LAST: '└─ ',
|
|
17
|
+
VERTICAL: '│ ',
|
|
18
|
+
SPACE: ' ',
|
|
19
|
+
DETAIL_BRANCH: '│ ',
|
|
20
|
+
DETAIL_LAST: ' '
|
|
21
|
+
};
|
|
22
|
+
const HEADER_DIVIDER = '═'.repeat(60);
|
|
23
|
+
const colors = {
|
|
24
|
+
workflowHeader: (text) => ux.colorize('bold', ux.colorize('cyan', text)),
|
|
25
|
+
stepHeader: (text) => ux.colorize('yellow', text),
|
|
26
|
+
internalStep: (text) => ux.colorize('dim', text),
|
|
27
|
+
label: (text) => ux.colorize('blue', text),
|
|
28
|
+
error: (text) => ux.colorize('red', text),
|
|
29
|
+
success: (text) => ux.colorize('green', text)
|
|
30
|
+
};
|
|
31
|
+
const truncate = (value, maxLength = TRUNCATION.STANDARD, suffix = TRUNCATION.SUFFIX_VERBOSE, recursive = false) => {
|
|
32
|
+
if (value === null || value === undefined) {
|
|
33
|
+
return value;
|
|
34
|
+
}
|
|
35
|
+
if (typeof value === 'string') {
|
|
36
|
+
if (value.length <= maxLength) {
|
|
37
|
+
return value;
|
|
38
|
+
}
|
|
39
|
+
return `${value.substring(0, maxLength)}${suffix}`;
|
|
40
|
+
}
|
|
41
|
+
if (typeof value === 'number' || typeof value === 'boolean') {
|
|
42
|
+
if (recursive) {
|
|
43
|
+
return value;
|
|
44
|
+
}
|
|
45
|
+
return String(value);
|
|
46
|
+
}
|
|
47
|
+
if (!recursive) {
|
|
48
|
+
const str = JSON.stringify(value);
|
|
49
|
+
if (str.length <= maxLength) {
|
|
50
|
+
return str;
|
|
51
|
+
}
|
|
52
|
+
return `${str.substring(0, maxLength)}${suffix}`;
|
|
53
|
+
}
|
|
54
|
+
if (Array.isArray(value)) {
|
|
55
|
+
return value.map(item => truncate(item, maxLength, suffix, true));
|
|
56
|
+
}
|
|
57
|
+
if (typeof value === 'object') {
|
|
58
|
+
return Object.fromEntries(Object.entries(value).map(([k, v]) => [k, truncate(v, maxLength, suffix, true)]));
|
|
59
|
+
}
|
|
60
|
+
return value;
|
|
61
|
+
};
|
|
62
|
+
const truncateShort = (value) => {
|
|
63
|
+
return truncate(value, TRUNCATION.SHORT, TRUNCATION.SUFFIX, false);
|
|
64
|
+
};
|
|
65
|
+
const truncateRecursive = (value) => {
|
|
66
|
+
return truncate(value, TRUNCATION.STANDARD, TRUNCATION.SUFFIX_VERBOSE, true);
|
|
67
|
+
};
|
|
68
|
+
const formatPhase = (phase) => {
|
|
69
|
+
const phaseMap = {
|
|
70
|
+
start: '[START]',
|
|
71
|
+
end: '[END]',
|
|
72
|
+
error: '[ERROR]'
|
|
73
|
+
};
|
|
74
|
+
return phaseMap[phase] ?? phase;
|
|
75
|
+
};
|
|
76
|
+
const getNodeName = (node) => {
|
|
77
|
+
return node.name || node.workflowName || node.stepName || node.activityName || '';
|
|
78
|
+
};
|
|
79
|
+
const getNodeKind = (node) => {
|
|
80
|
+
return node.kind || node.type || '';
|
|
81
|
+
};
|
|
82
|
+
const getEventName = (event) => {
|
|
83
|
+
const { kind, workflowName, details } = event;
|
|
84
|
+
if (kind === 'workflow') {
|
|
85
|
+
return `Workflow: ${workflowName}`;
|
|
86
|
+
}
|
|
87
|
+
if (kind === 'activity') {
|
|
88
|
+
return `Activity: ${details?.activityName || 'unknown'}`;
|
|
89
|
+
}
|
|
90
|
+
if (kind === 'step') {
|
|
91
|
+
return `Step: ${details?.stepName || details?.name || 'unknown'}`;
|
|
92
|
+
}
|
|
93
|
+
return kind || 'Unknown Event';
|
|
94
|
+
};
|
|
95
|
+
const colorizeByKind = (kind, text) => {
|
|
96
|
+
if (kind === 'workflow') {
|
|
97
|
+
return colors.workflowHeader(text);
|
|
98
|
+
}
|
|
99
|
+
if (kind === 'internal_step') {
|
|
100
|
+
return colors.internalStep(text);
|
|
101
|
+
}
|
|
102
|
+
if (kind === 'step' || kind === 'activity') {
|
|
103
|
+
return colors.stepHeader(text);
|
|
104
|
+
}
|
|
105
|
+
return text;
|
|
106
|
+
};
|
|
107
|
+
const getStatusIndicator = (node) => {
|
|
108
|
+
if (node.phase === 'error' || node.status === 'failed') {
|
|
109
|
+
return colors.error('[FAILED]');
|
|
110
|
+
}
|
|
111
|
+
if (node.phase === 'end' || node.status === 'completed') {
|
|
112
|
+
return colors.success('[COMPLETED]');
|
|
113
|
+
}
|
|
114
|
+
if (node.status === 'running') {
|
|
115
|
+
return colors.label('[RUNNING]');
|
|
116
|
+
}
|
|
117
|
+
return '';
|
|
118
|
+
};
|
|
119
|
+
const getDebugNodeInfo = (node) => {
|
|
120
|
+
if (typeof node === 'string') {
|
|
121
|
+
return node;
|
|
122
|
+
}
|
|
123
|
+
if (typeof node !== 'object' || node === null) {
|
|
124
|
+
return String(node);
|
|
125
|
+
}
|
|
126
|
+
const debugNode = node;
|
|
127
|
+
const kind = getNodeKind(debugNode);
|
|
128
|
+
const name = getNodeName(debugNode);
|
|
129
|
+
const status = getStatusIndicator(debugNode);
|
|
130
|
+
const parts = [];
|
|
131
|
+
if (kind) {
|
|
132
|
+
parts.push(colorizeByKind(kind, `[${kind}]`));
|
|
133
|
+
}
|
|
134
|
+
if (name) {
|
|
135
|
+
parts.push(colorizeByKind(kind, name));
|
|
136
|
+
}
|
|
137
|
+
if (status) {
|
|
138
|
+
parts.push(status);
|
|
139
|
+
}
|
|
140
|
+
if (parts.length === 0) {
|
|
141
|
+
const keys = Object.keys(debugNode).filter(k => k !== 'children' && k !== 'parent');
|
|
142
|
+
if (keys.length > 0) {
|
|
143
|
+
return `Node {${keys.slice(0, 3).join(', ')}${keys.length > 3 ? ', ...' : ''}}`;
|
|
144
|
+
}
|
|
145
|
+
return 'Node';
|
|
146
|
+
}
|
|
147
|
+
return parts.join(' ');
|
|
148
|
+
};
|
|
149
|
+
const extractNodeInfo = (node) => {
|
|
150
|
+
if (isTraceEvent(node)) {
|
|
151
|
+
return {
|
|
152
|
+
name: getEventName(node),
|
|
153
|
+
phase: formatPhase(node.phase),
|
|
154
|
+
duration: node.duration ? ` (${formatDuration(node.duration)})` : ''
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
return {
|
|
158
|
+
name: getDebugNodeInfo(node),
|
|
159
|
+
phase: node.phase ? formatPhase(node.phase) : '',
|
|
160
|
+
duration: node.duration ? ` (${formatDuration(node.duration)})` : ''
|
|
161
|
+
};
|
|
162
|
+
};
|
|
163
|
+
const formatDetails = (details) => {
|
|
164
|
+
if (!details) {
|
|
165
|
+
return '-';
|
|
166
|
+
}
|
|
167
|
+
if (typeof details === 'string') {
|
|
168
|
+
return details;
|
|
169
|
+
}
|
|
170
|
+
const info = [];
|
|
171
|
+
if (details.input) {
|
|
172
|
+
info.push(`Input: ${truncateShort(details.input)}`);
|
|
173
|
+
}
|
|
174
|
+
if (details.output) {
|
|
175
|
+
info.push(`Output: ${truncateShort(details.output)}`);
|
|
176
|
+
}
|
|
177
|
+
if (details.activityName) {
|
|
178
|
+
info.push(`Activity: ${details.activityName}`);
|
|
179
|
+
}
|
|
180
|
+
if (details.stepName || details.name) {
|
|
181
|
+
info.push(`Step: ${details.stepName || details.name}`);
|
|
182
|
+
}
|
|
183
|
+
if (info.length > 0) {
|
|
184
|
+
return info.join(', ');
|
|
185
|
+
}
|
|
186
|
+
return truncateShort(details);
|
|
187
|
+
};
|
|
188
|
+
const formatTreeDetails = (details, depth) => {
|
|
189
|
+
const indent = ' '.repeat(depth);
|
|
190
|
+
const lines = [];
|
|
191
|
+
if (details.input !== null && details.input !== undefined) {
|
|
192
|
+
lines.push(`${indent}Input: ${truncateShort(details.input)}`);
|
|
193
|
+
}
|
|
194
|
+
if (details.output !== null && details.output !== undefined) {
|
|
195
|
+
lines.push(`${indent}Output: ${truncateShort(details.output)}`);
|
|
196
|
+
}
|
|
197
|
+
return lines;
|
|
198
|
+
};
|
|
199
|
+
const formatValueWithIndent = (value, indentPrefix) => {
|
|
200
|
+
if (value === null || value === undefined) {
|
|
201
|
+
return String(value);
|
|
202
|
+
}
|
|
203
|
+
if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {
|
|
204
|
+
return truncate(value, TRUNCATION.STANDARD, TRUNCATION.SUFFIX_VERBOSE, false);
|
|
205
|
+
}
|
|
206
|
+
const truncated = truncateRecursive(value);
|
|
207
|
+
const jsonStr = JSON.stringify(truncated, null, 2);
|
|
208
|
+
const lines = jsonStr.split('\n');
|
|
209
|
+
if (lines.length <= 1) {
|
|
210
|
+
return jsonStr;
|
|
211
|
+
}
|
|
212
|
+
return lines.map((line, i) => {
|
|
213
|
+
if (i === 0) {
|
|
214
|
+
return line;
|
|
215
|
+
}
|
|
216
|
+
return indentPrefix + line;
|
|
217
|
+
}).join('\n');
|
|
218
|
+
};
|
|
219
|
+
const formatHeader = (root) => {
|
|
220
|
+
const isTrace = isTraceEvent(root);
|
|
221
|
+
const debugRoot = root;
|
|
222
|
+
const workflowName = isTrace ? root.workflowName : (getNodeName(debugRoot) || 'Unknown');
|
|
223
|
+
const workflowId = isTrace ? root.workflowId : 'N/A';
|
|
224
|
+
const timestamp = isTrace ? root.timestamp : debugRoot.startTime;
|
|
225
|
+
const phase = isTrace ? root.phase : debugRoot.phase;
|
|
226
|
+
const status = !isTrace ? debugRoot.status : undefined;
|
|
227
|
+
const lines = [
|
|
228
|
+
HEADER_DIVIDER,
|
|
229
|
+
`Workflow: ${workflowName}`,
|
|
230
|
+
`Workflow ID: ${workflowId}`
|
|
231
|
+
];
|
|
232
|
+
if (isValidTimestamp(timestamp)) {
|
|
233
|
+
lines.push(`Start Time: ${new Date(timestamp).toISOString()}`);
|
|
234
|
+
}
|
|
235
|
+
if (root.duration) {
|
|
236
|
+
lines.push(`Duration: ${formatDuration(root.duration)}`);
|
|
237
|
+
}
|
|
238
|
+
if ((phase === 'error' || status === 'failed') && root.error) {
|
|
239
|
+
lines.push('Status: Failed', `Error: ${getErrorMessage(root.error)}`);
|
|
240
|
+
}
|
|
241
|
+
else if (phase === 'end' || status === 'completed') {
|
|
242
|
+
lines.push('Status: Completed');
|
|
243
|
+
}
|
|
244
|
+
else {
|
|
245
|
+
lines.push('Status: In Progress');
|
|
246
|
+
}
|
|
247
|
+
lines.push(HEADER_DIVIDER);
|
|
248
|
+
return lines.join('\n');
|
|
249
|
+
};
|
|
250
|
+
const formatEventsTable = (events) => {
|
|
251
|
+
const table = new Table({
|
|
252
|
+
head: ['Time', 'Event', 'Phase', 'Duration', 'Details'],
|
|
253
|
+
style: { head: ['cyan'] },
|
|
254
|
+
colWidths: [20, 25, 10, 12, null],
|
|
255
|
+
wordWrap: true
|
|
256
|
+
});
|
|
257
|
+
for (const event of events) {
|
|
258
|
+
table.push([
|
|
259
|
+
new Date(event.timestamp).toISOString().substring(11, 23),
|
|
260
|
+
getEventName(event),
|
|
261
|
+
formatPhase(event.phase),
|
|
262
|
+
event.duration ? formatDuration(event.duration) : '-',
|
|
263
|
+
formatDetails(event.details)
|
|
264
|
+
]);
|
|
265
|
+
}
|
|
266
|
+
return table.toString();
|
|
267
|
+
};
|
|
268
|
+
const formatTree = (node, depth) => {
|
|
269
|
+
const indent = ' '.repeat(depth);
|
|
270
|
+
const marker = depth === 0 ? '' : TREE_CHARS.BRANCH;
|
|
271
|
+
const info = extractNodeInfo(node);
|
|
272
|
+
const lines = [`${indent}${marker} ${info.name} ${info.phase}${info.duration}`];
|
|
273
|
+
if (node.error) {
|
|
274
|
+
lines.push(`${indent} ${TREE_CHARS.LAST.trim()} ERROR: ${getErrorMessage(node.error)}`);
|
|
275
|
+
}
|
|
276
|
+
if (node.details && typeof node.details === 'object') {
|
|
277
|
+
lines.push(...formatTreeDetails(node.details, depth + 1));
|
|
278
|
+
}
|
|
279
|
+
if (node.children && node.children.length > 0) {
|
|
280
|
+
for (const child of node.children) {
|
|
281
|
+
lines.push(...formatTree(child, depth + 1));
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
return lines;
|
|
285
|
+
};
|
|
286
|
+
const getDebugNodeDetails = (node, prefix) => {
|
|
287
|
+
if (typeof node !== 'object' || node === null) {
|
|
288
|
+
return [];
|
|
289
|
+
}
|
|
290
|
+
const details = [];
|
|
291
|
+
const startedAt = node.startedAt || node.timestamp;
|
|
292
|
+
const inputIndentPrefix = prefix + ' '.repeat(7);
|
|
293
|
+
const outputIndentPrefix = prefix + ' '.repeat(8);
|
|
294
|
+
if (isValidTimestamp(startedAt)) {
|
|
295
|
+
const startDate = new Date(startedAt);
|
|
296
|
+
if (!isNaN(startDate.getTime())) {
|
|
297
|
+
details.push(`${prefix}${colors.label('Started:')} ${startDate.toISOString()}`);
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
if (isValidTimestamp(node.endedAt)) {
|
|
301
|
+
const endDate = new Date(node.endedAt);
|
|
302
|
+
if (!isNaN(endDate.getTime())) {
|
|
303
|
+
details.push(`${prefix}${colors.label('Ended:')} ${endDate.toISOString()}`);
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
if (typeof node.startedAt === 'number' && typeof node.endedAt === 'number') {
|
|
307
|
+
details.push(`${prefix}${colors.label('Duration:')} ${formatDuration(node.endedAt - node.startedAt)}`);
|
|
308
|
+
}
|
|
309
|
+
else if (node.duration) {
|
|
310
|
+
details.push(`${prefix}${colors.label('Duration:')} ${formatDuration(node.duration)}`);
|
|
311
|
+
}
|
|
312
|
+
if (node.input !== null && node.input !== undefined) {
|
|
313
|
+
details.push(`${prefix}${colors.label('Input:')} ${formatValueWithIndent(node.input, inputIndentPrefix)}`);
|
|
314
|
+
}
|
|
315
|
+
if (node.output !== null && node.output !== undefined) {
|
|
316
|
+
details.push(`${prefix}${colors.label('Output:')} ${formatValueWithIndent(node.output, outputIndentPrefix)}`);
|
|
317
|
+
}
|
|
318
|
+
if (node.error) {
|
|
319
|
+
details.push(`${prefix}${colors.error('Error:')} ${colors.error(getErrorMessage(node.error))}`);
|
|
320
|
+
}
|
|
321
|
+
if (details.length > 0) {
|
|
322
|
+
details.push('');
|
|
323
|
+
}
|
|
324
|
+
return details;
|
|
325
|
+
};
|
|
326
|
+
const buildDebugTreeLines = (node, depth, isLast, prefix) => {
|
|
327
|
+
if (node === null || node === undefined) {
|
|
328
|
+
return [];
|
|
329
|
+
}
|
|
330
|
+
const isRoot = depth === 0;
|
|
331
|
+
const getConnector = () => {
|
|
332
|
+
if (isRoot) {
|
|
333
|
+
return '';
|
|
334
|
+
}
|
|
335
|
+
return isLast ? TREE_CHARS.LAST : TREE_CHARS.BRANCH;
|
|
336
|
+
};
|
|
337
|
+
const connector = getConnector();
|
|
338
|
+
const indent = isRoot ? '' : prefix + connector;
|
|
339
|
+
const lines = [indent + getDebugNodeInfo(node)];
|
|
340
|
+
const detailPrefix = isRoot ? ' ' : prefix + (isLast ? TREE_CHARS.DETAIL_LAST : TREE_CHARS.DETAIL_BRANCH);
|
|
341
|
+
if (typeof node === 'object' && node !== null) {
|
|
342
|
+
lines.push(...getDebugNodeDetails(node, detailPrefix));
|
|
343
|
+
}
|
|
344
|
+
const childPrefix = isRoot ? '' : prefix + (isLast ? TREE_CHARS.SPACE : TREE_CHARS.VERTICAL);
|
|
345
|
+
if (typeof node === 'object' && node !== null) {
|
|
346
|
+
const debugNode = node;
|
|
347
|
+
if (Array.isArray(debugNode.children)) {
|
|
348
|
+
debugNode.children.forEach((child, i) => {
|
|
349
|
+
const isLastChild = i === debugNode.children.length - 1;
|
|
350
|
+
lines.push(...buildDebugTreeLines(child, depth + 1, isLastChild, childPrefix));
|
|
351
|
+
});
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
return lines;
|
|
355
|
+
};
|
|
356
|
+
const formatAsText = (trace) => {
|
|
357
|
+
const output = [];
|
|
358
|
+
if (trace.root) {
|
|
359
|
+
output.push(formatHeader(trace.root), '');
|
|
360
|
+
}
|
|
361
|
+
if (trace.events && trace.events.length > 0) {
|
|
362
|
+
output.push('Execution Timeline:', formatEventsTable(trace.events), '');
|
|
363
|
+
}
|
|
364
|
+
if (trace.root) {
|
|
365
|
+
output.push('Execution Tree:', ...formatTree(trace.root, 0));
|
|
366
|
+
}
|
|
367
|
+
return output.join('\n');
|
|
368
|
+
};
|
|
369
|
+
export function format(traceData, outputFormat = 'text') {
|
|
370
|
+
const trace = typeof traceData === 'string' ? JSON.parse(traceData) : traceData;
|
|
371
|
+
if (outputFormat === 'json') {
|
|
372
|
+
return formatOutput(trace, 'json');
|
|
373
|
+
}
|
|
374
|
+
return formatAsText(trace);
|
|
375
|
+
}
|
|
376
|
+
export function getSummary(traceData) {
|
|
377
|
+
const trace = typeof traceData === 'string' ? JSON.parse(traceData) : traceData;
|
|
378
|
+
const stats = {
|
|
379
|
+
totalDuration: trace.root?.duration || 0,
|
|
380
|
+
totalEvents: trace.events?.length || 0,
|
|
381
|
+
totalSteps: 0,
|
|
382
|
+
totalActivities: 0,
|
|
383
|
+
hasErrors: false
|
|
384
|
+
};
|
|
385
|
+
if (trace.events) {
|
|
386
|
+
for (const event of trace.events) {
|
|
387
|
+
if (event.kind === 'step') {
|
|
388
|
+
stats.totalSteps++;
|
|
389
|
+
}
|
|
390
|
+
if (event.kind === 'activity') {
|
|
391
|
+
stats.totalActivities++;
|
|
392
|
+
}
|
|
393
|
+
if (event.phase === 'error') {
|
|
394
|
+
stats.hasErrors = true;
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
return stats;
|
|
399
|
+
}
|
|
400
|
+
export function displayDebugTree(node) {
|
|
401
|
+
return buildDebugTreeLines(node, 0, false, '').join('\n');
|
|
402
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@output.ai/cli",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.4.0",
|
|
4
4
|
"description": "CLI for Output.ai workflow generation",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -30,6 +30,7 @@
|
|
|
30
30
|
"change-case": "5.4.4",
|
|
31
31
|
"cli-progress": "3.12.0",
|
|
32
32
|
"cli-table3": "0.6.5",
|
|
33
|
+
"date-fns": "4.1.0",
|
|
33
34
|
"dotenv": "16.4.7",
|
|
34
35
|
"handlebars": "4.7.8",
|
|
35
36
|
"json-schema-library": "10.3.0",
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|
2
|
-
vi.mock('../../api/generated/api.js', () => ({
|
|
3
|
-
getWorkflowIdOutput: vi.fn()
|
|
4
|
-
}));
|
|
5
|
-
describe('workflow output command', () => {
|
|
6
|
-
beforeEach(() => {
|
|
7
|
-
vi.clearAllMocks();
|
|
8
|
-
});
|
|
9
|
-
describe('command definition', () => {
|
|
10
|
-
it('should export a valid OCLIF command', async () => {
|
|
11
|
-
const WorkflowOutput = (await import('./output.js')).default;
|
|
12
|
-
expect(WorkflowOutput).toBeDefined();
|
|
13
|
-
expect(WorkflowOutput.description).toContain('Get workflow execution output');
|
|
14
|
-
expect(WorkflowOutput.args).toHaveProperty('workflowId');
|
|
15
|
-
expect(WorkflowOutput.flags).toHaveProperty('format');
|
|
16
|
-
});
|
|
17
|
-
it('should have correct flag configuration', async () => {
|
|
18
|
-
const WorkflowOutput = (await import('./output.js')).default;
|
|
19
|
-
expect(WorkflowOutput.flags.format.options).toEqual(['json', 'text']);
|
|
20
|
-
expect(WorkflowOutput.flags.format.default).toBe('text');
|
|
21
|
-
});
|
|
22
|
-
});
|
|
23
|
-
});
|
|
File without changes
|