@output.ai/cli 0.0.6 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,92 +1,17 @@
1
1
  import { describe, it, expect, beforeEach, vi } from 'vitest';
2
- import { ensureOutputAIStructure, generatePlanName, writePlanFile, updateAgentTemplates } from './workflow_planner.js';
3
- import { checkAgentStructure, initializeAgentConfig } from './coding_agents.js';
2
+ import { generatePlanName, writePlanFile, updateAgentTemplates } from './workflow_planner.js';
3
+ import { initializeAgentConfig } from './coding_agents.js';
4
4
  import { generateText } from 'local_llm';
5
5
  import { loadPrompt } from 'local_prompt';
6
- import { confirm } from '@inquirer/prompts';
7
6
  import fs from 'node:fs/promises';
8
7
  vi.mock('./coding_agents.js');
9
8
  vi.mock('local_llm');
10
9
  vi.mock('local_prompt');
11
- vi.mock('@inquirer/prompts');
12
10
  vi.mock('node:fs/promises');
13
11
  describe('workflow-planner service', () => {
14
12
  beforeEach(() => {
15
13
  vi.clearAllMocks();
16
14
  });
17
- describe('ensureOutputAIStructure', () => {
18
- it('should call agents init when .outputai does not exist', async () => {
19
- vi.mocked(checkAgentStructure).mockResolvedValue({
20
- dirExists: false,
21
- missingFiles: [
22
- '.outputai/AGENTS.md',
23
- '.outputai/agents/workflow_planner.md',
24
- '.outputai/commands/plan_workflow.md',
25
- 'CLAUDE.md',
26
- '.claude/agents/workflow_planner.md',
27
- '.claude/commands/plan_workflow.md'
28
- ],
29
- isComplete: false
30
- });
31
- vi.mocked(initializeAgentConfig).mockResolvedValue();
32
- await ensureOutputAIStructure('/test/project');
33
- expect(checkAgentStructure).toHaveBeenCalledWith('/test/project');
34
- expect(initializeAgentConfig).toHaveBeenCalledWith(expect.objectContaining({
35
- projectRoot: expect.any(String),
36
- force: false,
37
- agentProvider: 'claude-code'
38
- }));
39
- });
40
- it('should skip initialization if .outputai exists', async () => {
41
- vi.mocked(checkAgentStructure).mockResolvedValue({
42
- dirExists: true,
43
- missingFiles: [],
44
- isComplete: true
45
- });
46
- await ensureOutputAIStructure('/test/project');
47
- expect(checkAgentStructure).toHaveBeenCalledWith('/test/project');
48
- expect(initializeAgentConfig).not.toHaveBeenCalled();
49
- });
50
- it('should throw error if agents init fails', async () => {
51
- vi.mocked(checkAgentStructure).mockResolvedValue({
52
- dirExists: false,
53
- missingFiles: [
54
- '.outputai/AGENTS.md',
55
- '.outputai/agents/workflow_planner.md',
56
- '.outputai/commands/plan_workflow.md',
57
- 'CLAUDE.md',
58
- '.claude/agents/workflow_planner.md',
59
- '.claude/commands/plan_workflow.md'
60
- ],
61
- isComplete: false
62
- });
63
- vi.mocked(initializeAgentConfig).mockRejectedValue(new Error('Init command failed'));
64
- await expect(ensureOutputAIStructure('/test/project')).rejects.toThrow('Init command failed');
65
- });
66
- it('should prompt user when some files are missing', async () => {
67
- vi.mocked(checkAgentStructure).mockResolvedValue({
68
- dirExists: true,
69
- missingFiles: ['.outputai/agents/workflow_planner.md'],
70
- isComplete: false
71
- });
72
- vi.mocked(confirm).mockResolvedValue(true);
73
- vi.mocked(initializeAgentConfig).mockResolvedValue();
74
- await ensureOutputAIStructure('/test/project');
75
- expect(confirm).toHaveBeenCalled();
76
- expect(initializeAgentConfig).toHaveBeenCalledWith(expect.objectContaining({
77
- force: true
78
- }));
79
- });
80
- it('should throw error when user declines partial init', async () => {
81
- vi.mocked(checkAgentStructure).mockResolvedValue({
82
- dirExists: true,
83
- missingFiles: ['.outputai/agents/workflow_planner.md'],
84
- isComplete: false
85
- });
86
- vi.mocked(confirm).mockResolvedValue(false);
87
- await expect(ensureOutputAIStructure('/test/project')).rejects.toThrow('Agent configuration incomplete');
88
- });
89
- });
90
15
  describe('generatePlanName', () => {
91
16
  it('should generate plan name with date prefix using LLM', async () => {
92
17
  const mockPrompt = {
@@ -0,0 +1,246 @@
1
+ ---
2
+ argument-hint: [workflow-plan-file-path] [workflow-name] [workflow-directory]
3
+ description: Workflow Implementation Command for Output SDK
4
+ version: 0.0.1
5
+ model: claude-opus-4-1
6
+ ---
7
+
8
+ Your task is to implement an Output.ai workflow based on a provided plan document.
9
+
10
+ The workflow skeleton has already been created at: `$3` (if not it should be)
11
+
12
+ Please read the plan file and implement the workflow according to its specifications.
13
+
14
+ Use the todo tool to track your progress through the implementation process.
15
+
16
+ # Implementation Rules
17
+
18
+ ## Overview
19
+
20
+ Implement the workflow described in the plan document, following Output SDK patterns and best practices.
21
+
22
+ <pre_flight_check>
23
+ EXECUTE: @.outputai/instructions/meta/pre_flight.md
24
+ </pre_flight_check>
25
+
26
+ <process_flow>
27
+
28
+ <step number="1" name="plan_analysis">
29
+
30
+ ### Step 1: Plan Analysis
31
+
32
+ Read and understand the plan document.
33
+
34
+ 1. Read the plan file from: `$1`
35
+ 2. Identify the workflow name, description, and purpose
36
+ 3. Extract input and output schema definitions
37
+ 4. List all required steps and their relationships
38
+ 5. Note any LLM-based steps that require prompt templates
39
+ 6. Understand error handling and retry requirements
40
+
41
+ </step>
42
+
43
+ <step number="2" name="workflow_implementation">
44
+
45
+ ### Step 2: Workflow Implementation
46
+
47
+ Update `$3/workflow.ts` with the workflow definition.
48
+
49
+ <implementation_checklist>
50
+ - Import required dependencies (workflow, z from 'output.ai/core')
51
+ - Define inputSchema based on plan specifications
52
+ - Define outputSchema based on plan specifications
53
+ - Import step functions from steps.ts
54
+ - Implement workflow function with proper orchestration
55
+ - Handle conditional logic if specified in plan
56
+ - Add proper error handling
57
+ </implementation_checklist>
58
+
59
+ <workflow_template>
60
+ ```typescript
61
+ import { workflow, z } from 'output.ai/core';
62
+ import { stepName } from './steps.js';
63
+
64
+ const inputSchema = z.object( {
65
+ // Define based on plan
66
+ } );
67
+
68
+ const outputSchema = z.object( {
69
+ // Define based on plan
70
+ } );
71
+
72
+ export default workflow( {
73
+ name: '$2',
74
+ description: 'Description from plan',
75
+ inputSchema,
76
+ outputSchema,
77
+ fn: async input => {
78
+ // Implement orchestration logic from plan
79
+ const result = await stepName( input );
80
+ return { result };
81
+ }
82
+ } );
83
+ ```
84
+ </workflow_template>
85
+
86
+ </step>
87
+
88
+ <step number="3" name="steps_implementation">
89
+
90
+ ### Step 3: Steps Implementation
91
+
92
+ Update `$3/steps.ts` with all step definitions from the plan.
93
+
94
+ <implementation_checklist>
95
+ - Import required dependencies (step, z from 'output.ai/core')
96
+ - Implement each step with proper schema validation
97
+ - Add error handling and retry logic as specified
98
+ - Ensure step names match plan specifications
99
+ - Add descriptive comments for complex logic
100
+ </implementation_checklist>
101
+
102
+ <step_template>
103
+ ```typescript
104
+ import { step, z } from 'output.ai/core';
105
+
106
+ export const stepName = step( {
107
+ name: 'stepName',
108
+ description: 'Description from plan',
109
+ inputSchema: z.object( {
110
+ // Define based on plan
111
+ } ),
112
+ outputSchema: z.object( {
113
+ // Define based on plan
114
+ } ),
115
+ fn: async input => {
116
+ // Implement step logic from plan
117
+ return output;
118
+ }
119
+ } );
120
+ ```
121
+ </step_template>
122
+
123
+ </step>
124
+
125
+ <step number="4" name="prompt_templates">
126
+
127
+ ### Step 4: Prompt Templates (if needed)
128
+
129
+ If the plan includes LLM-based steps, create prompt templates in `$3/prompts/`.
130
+
131
+ <decision_tree>
132
+ IF plan_includes_llm_steps:
133
+ CREATE prompt_templates
134
+ UPDATE steps.ts to use loadPrompt and generateText
135
+ ELSE:
136
+ SKIP to step 5
137
+ </decision_tree>
138
+
139
+ <llm_step_template>
140
+ ```typescript
141
+ import { step, z } from 'output.ai/core';
142
+ import { loadPrompt } from 'output.ai/prompt';
143
+ import { generateText } from 'output.ai/llm';
144
+
145
+ export const llmStep = step( {
146
+ name: 'llmStep',
147
+ description: 'LLM-based step',
148
+ inputSchema: z.object( {
149
+ param: z.string()
150
+ } ),
151
+ outputSchema: z.string(),
152
+ fn: async ( { param } ) => {
153
+ const prompt = loadPrompt( 'prompt_name@v1', { param } );
154
+ const response = await generateText( { prompt } );
155
+ return response;
156
+ }
157
+ } );
158
+ ```
159
+ </llm_step_template>
160
+
161
+ <prompt_file_template>
162
+ ```
163
+ ---
164
+ provider: anthropic
165
+ model: claude-sonnet-4-20250514
166
+ temperature: 0.7
167
+ ---
168
+
169
+ <assistant>
170
+ You are a helpful assistant.
171
+ </assistant>
172
+
173
+ <user>
174
+ {{ instructions }}
175
+ </user>
176
+ ```
177
+ </prompt_file_template>
178
+
179
+ </step>
180
+
181
+ <step number="5" name="readme_update">
182
+
183
+ ### Step 5: README Update
184
+
185
+ Update `$3/README.md` with workflow-specific documentation.
186
+
187
+ <documentation_requirements>
188
+ - Update workflow name and description
189
+ - Document input schema with examples
190
+ - Document output schema with examples
191
+ - Explain each step's purpose
192
+ - Provide usage examples
193
+ - Document any prerequisites or setup requirements
194
+ - Include testing instructions
195
+ </documentation_requirements>
196
+
197
+ </step>
198
+
199
+ <step number="6" name="validation">
200
+
201
+ ### Step 6: Implementation Validation
202
+
203
+ Verify the implementation is complete and correct.
204
+
205
+ <validation_checklist>
206
+ - All steps from plan are implemented
207
+ - Input/output schemas match plan specifications
208
+ - Workflow orchestration logic is correct
209
+ - Error handling is in place
210
+ - LLM prompts are created (if needed)
211
+ - README is updated with accurate information
212
+ - Code follows Output SDK patterns
213
+ - TypeScript types are properly defined
214
+ </validation_checklist>
215
+
216
+ </step>
217
+
218
+ <step number="7" name="post_flight_check">
219
+
220
+ ### Step 7: Post-Flight Check
221
+
222
+ Verify the implementation is ready for use.
223
+
224
+ <post_flight_check>
225
+ EXECUTE: @.outputai/instructions/meta/post_flight.md
226
+ </post_flight_check>
227
+
228
+ </step>
229
+
230
+ </process_flow>
231
+
232
+ <post_flight_check>
233
+ EXECUTE: @.outputai/instructions/meta/post_flight.md
234
+ </post_flight_check>
235
+
236
+ ---- START ----
237
+
238
+ Workflow Name: $2
239
+
240
+ Workflow Directory: $3
241
+
242
+ Plan File Path: $1
243
+
244
+ Additional Instructions:
245
+
246
+ $ARGUMENTS
@@ -1,75 +1,46 @@
1
- import { step } from '@output.ai/core';
1
+ import { step, z } from '@output.ai/core';
2
2
  import { loadPrompt } from '@output.ai/prompt';
3
3
  import { generateText } from '@output.ai/llm';
4
- import type { Prompt } from '@output.ai/llm';
5
-
6
- // Define input/output schemas for the LLM step
7
- const llmInputSchema = {
8
- type: 'object',
9
- properties: {
10
- userInput: { type: 'string' }
11
- },
12
- required: ['userInput']
13
- };
14
-
15
- const llmOutputSchema = {
16
- type: 'string'
17
- };
18
4
 
19
5
  // Example step using LLM
20
6
  export const exampleLLMStep = step( {
21
7
  name: 'exampleLLMStep',
22
- description: 'Example step that uses LLM',
23
- inputSchema: llmInputSchema,
24
- outputSchema: llmOutputSchema,
25
- fn: async ( input: { userInput: string } ): Promise<string> => {
26
- // Load the prompt template
27
- const prompt = loadPrompt( 'prompt@v1', {
28
- userInput: input.userInput
29
- } );
30
-
31
- // Generate text using LLM
32
- const response = await generateText( prompt as Prompt );
33
-
8
+ description: 'Example step that uses LLM to generate text',
9
+ inputSchema: z.object( {
10
+ userInput: z.string()
11
+ } ),
12
+ outputSchema: z.string(),
13
+ fn: async ( { userInput } ) => {
14
+ const prompt = loadPrompt( 'prompt@v1', { userInput } );
15
+ const response = await generateText( { prompt } );
34
16
  return response;
35
17
  }
36
18
  } );
37
19
 
38
- // Define schemas for data processing step
39
- const processDataInputSchema = {
40
- type: 'object',
41
- properties: {
42
- value: { type: 'number' },
43
- type: { type: 'string' }
44
- },
45
- required: ['value', 'type']
46
- };
47
-
48
- const processDataOutputSchema = {
49
- type: 'object',
50
- properties: {
51
- processed: { type: 'boolean' },
52
- timestamp: { type: 'string' },
53
- data: { type: 'object' }
54
- },
55
- required: ['processed', 'timestamp', 'data']
56
- };
57
-
58
20
  // Example data processing step
59
21
  export const processDataStep = step( {
60
22
  name: 'processDataStep',
61
- description: 'Process input data',
62
- inputSchema: processDataInputSchema,
63
- outputSchema: processDataOutputSchema,
64
- fn: async ( input: { value: number; type: string } ) => {
23
+ description: 'Process input data and return structured result',
24
+ inputSchema: z.object( {
25
+ value: z.number(),
26
+ type: z.string()
27
+ } ),
28
+ outputSchema: z.object( {
29
+ processed: z.boolean(),
30
+ timestamp: z.string(),
31
+ data: z.object( {
32
+ value: z.number(),
33
+ type: z.string()
34
+ } )
35
+ } ).strict(),
36
+ fn: async ( { value, type } ) => {
65
37
  // TODO: Implement your step logic here
66
- console.log( 'Processing data:', input );
38
+ console.log( 'Processing data:', { value, type } );
67
39
 
68
- // Example processing
69
40
  return {
70
41
  processed: true,
71
42
  timestamp: new Date().toISOString(),
72
- data: input
43
+ data: { value, type }
73
44
  };
74
45
  }
75
46
  } );
@@ -1,67 +1,41 @@
1
- import { workflow } from '@output.ai/core';
1
+ import { workflow, z } from '@output.ai/core';
2
2
  import { exampleLLMStep, processDataStep } from './steps.js';
3
3
 
4
- // Define the input schema for the workflow
5
- const inputSchema = {
6
- type: 'object',
7
- properties: {
8
- prompt: {
9
- type: 'string',
10
- description: 'The prompt to send to the LLM'
11
- },
12
- data: {
13
- type: 'object',
14
- properties: {
15
- value: { type: 'number' },
16
- type: { type: 'string' }
17
- },
18
- required: ['value', 'type']
19
- }
20
- },
21
- required: ['prompt']
22
- };
23
-
24
- // Define the output schema for the workflow
25
- const outputSchema = {
26
- type: 'object',
27
- properties: {
28
- llmResponse: { type: 'string' },
29
- processedData: {
30
- type: 'object',
31
- properties: {
32
- processed: { type: 'boolean' },
33
- timestamp: { type: 'string' },
34
- data: { type: 'object' }
35
- }
36
- },
37
- message: { type: 'string' }
38
- },
39
- required: ['llmResponse', 'processedData', 'message']
40
- };
4
+ const inputSchema = z.object( {
5
+ prompt: z.string().describe( 'The prompt to send to the LLM' ),
6
+ data: z.object( {
7
+ value: z.number(),
8
+ type: z.string()
9
+ } ).optional()
10
+ } );
41
11
 
42
- // Define the input type based on the schema
43
- interface WorkflowInput {
44
- prompt: string;
45
- data?: {
46
- value: number;
47
- type: string;
48
- };
49
- }
12
+ const outputSchema = z.object( {
13
+ llmResponse: z.string(),
14
+ processedData: z.object( {
15
+ processed: z.boolean(),
16
+ timestamp: z.string(),
17
+ data: z.object( {
18
+ value: z.number(),
19
+ type: z.string()
20
+ } )
21
+ } ),
22
+ message: z.string()
23
+ } ).strict();
50
24
 
51
25
  export default workflow( {
52
26
  name: '{{workflowName}}',
53
27
  description: '{{description}}',
54
28
  inputSchema,
55
29
  outputSchema,
56
- fn: async ( input: WorkflowInput ) => {
30
+ fn: async ( { prompt, data } ) => {
57
31
  // Call the LLM step with input from workflow
58
32
  const llmResponse = await exampleLLMStep( {
59
- userInput: input.prompt
33
+ userInput: prompt
60
34
  } );
61
35
 
62
36
  // Process data if provided, otherwise use defaults
63
37
  const processedData = await processDataStep(
64
- input.data || { value: 42, type: 'default' }
38
+ data || { value: 42, type: 'default' }
65
39
  );
66
40
 
67
41
  return {
@@ -0,0 +1,20 @@
1
+ /**
2
+ * Domain-specific types for improved type safety
3
+ */
4
+ export type TodoStatus = 'pending' | 'in_progress' | 'completed';
5
+ export type FileMappingType = 'template' | 'symlink' | 'copy';
6
+ export type InstructionsType = 'plan' | 'build';
7
+ export interface Todo {
8
+ content: string;
9
+ activeForm: string;
10
+ status: TodoStatus;
11
+ }
12
+ export interface ValidationResult {
13
+ isValid: boolean;
14
+ errors: string[];
15
+ warnings: string[];
16
+ }
17
+ export interface SystemValidation {
18
+ missingCommands: string[];
19
+ hasIssues: boolean;
20
+ }
@@ -0,0 +1,4 @@
1
+ /**
2
+ * Domain-specific types for improved type safety
3
+ */
4
+ export {};
@@ -9,7 +9,7 @@ export declare const TEMPLATE_DIRS: {
9
9
  * Default output directories
10
10
  */
11
11
  export declare const DEFAULT_OUTPUT_DIRS: {
12
- readonly workflows: "output-workflows/src";
12
+ readonly workflows: "src";
13
13
  };
14
14
  /**
15
15
  * Resolve the output directory path
@@ -13,7 +13,7 @@ export const TEMPLATE_DIRS = {
13
13
  * Default output directories
14
14
  */
15
15
  export const DEFAULT_OUTPUT_DIRS = {
16
- workflows: 'output-workflows/src'
16
+ workflows: 'src'
17
17
  };
18
18
  /**
19
19
  * Resolve the output directory path
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/cli",
3
- "version": "0.0.6",
3
+ "version": "0.0.7",
4
4
  "description": "CLI for Output.ai workflow generation",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",