@output.ai/cli 0.5.0 → 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/dist/services/env_configurator.d.ts +6 -2
  2. package/dist/services/env_configurator.js +11 -4
  3. package/dist/services/env_configurator.spec.js +89 -16
  4. package/dist/services/messages.js +4 -4
  5. package/dist/services/workflow_generator.spec.js +0 -1
  6. package/dist/templates/agent_instructions/AGENTS.md.template +7 -7
  7. package/dist/templates/agent_instructions/agents/workflow_debugger.md.template +11 -11
  8. package/dist/templates/agent_instructions/commands/debug_workflow.md.template +13 -13
  9. package/dist/templates/agent_instructions/commands/plan_workflow.md.template +1 -1
  10. package/dist/templates/agent_instructions/skills/output-error-direct-io/SKILL.md.template +2 -2
  11. package/dist/templates/agent_instructions/skills/output-error-http-client/SKILL.md.template +2 -2
  12. package/dist/templates/agent_instructions/skills/output-error-missing-schemas/SKILL.md.template +2 -2
  13. package/dist/templates/agent_instructions/skills/output-error-nondeterminism/SKILL.md.template +3 -3
  14. package/dist/templates/agent_instructions/skills/output-error-try-catch/SKILL.md.template +2 -2
  15. package/dist/templates/agent_instructions/skills/output-error-zod-import/SKILL.md.template +2 -2
  16. package/dist/templates/agent_instructions/skills/output-services-check/SKILL.md.template +3 -3
  17. package/dist/templates/agent_instructions/skills/output-workflow-list/SKILL.md.template +9 -9
  18. package/dist/templates/agent_instructions/skills/output-workflow-result/SKILL.md.template +33 -33
  19. package/dist/templates/agent_instructions/skills/output-workflow-run/SKILL.md.template +25 -25
  20. package/dist/templates/agent_instructions/skills/output-workflow-runs-list/SKILL.md.template +15 -15
  21. package/dist/templates/agent_instructions/skills/output-workflow-start/SKILL.md.template +35 -35
  22. package/dist/templates/agent_instructions/skills/output-workflow-status/SKILL.md.template +26 -26
  23. package/dist/templates/agent_instructions/skills/output-workflow-stop/SKILL.md.template +19 -19
  24. package/dist/templates/agent_instructions/skills/output-workflow-trace/SKILL.md.template +7 -7
  25. package/dist/templates/project/README.md.template +1 -1
  26. package/package.json +2 -2
  27. package/dist/templates/workflow/.env.template +0 -7
  28. /package/dist/templates/project/{.env.template → .env.example.template} +0 -0
@@ -2,10 +2,14 @@
2
2
  * Interactively configures environment variables for a project by prompting the user
3
3
  * to provide values for empty variables or variables marked as secrets.
4
4
  *
5
- * @param projectPath - The absolute path to the project directory containing the .env file
5
+ * This function reads from .env.example and, when the user confirms configuration,
6
+ * copies it to .env before prompting for values. The .env.example file remains
7
+ * unchanged as a template for other developers.
8
+ *
9
+ * @param projectPath - The absolute path to the project directory containing the .env.example file
6
10
  * @param skipPrompt - If true, skips the configuration prompt and returns false immediately
7
11
  * @returns A promise that resolves to true if environment variables were successfully configured,
8
- * false if configuration was skipped (no .env file, user declined, no variables to configure,
12
+ * false if configuration was skipped (no .env.example file, user declined, no variables to configure,
9
13
  * or an error occurred)
10
14
  */
11
15
  export declare function configureEnvironmentVariables(projectPath: string, skipPrompt?: boolean): Promise<boolean>;
@@ -115,10 +115,14 @@ async function writeEnvFile(filePath, variables) {
115
115
  * Interactively configures environment variables for a project by prompting the user
116
116
  * to provide values for empty variables or variables marked as secrets.
117
117
  *
118
- * @param projectPath - The absolute path to the project directory containing the .env file
118
+ * This function reads from .env.example and, when the user confirms configuration,
119
+ * copies it to .env before prompting for values. The .env.example file remains
120
+ * unchanged as a template for other developers.
121
+ *
122
+ * @param projectPath - The absolute path to the project directory containing the .env.example file
119
123
  * @param skipPrompt - If true, skips the configuration prompt and returns false immediately
120
124
  * @returns A promise that resolves to true if environment variables were successfully configured,
121
- * false if configuration was skipped (no .env file, user declined, no variables to configure,
125
+ * false if configuration was skipped (no .env.example file, user declined, no variables to configure,
122
126
  * or an error occurred)
123
127
  */
124
128
  export async function configureEnvironmentVariables(projectPath, skipPrompt = false) {
@@ -126,12 +130,13 @@ export async function configureEnvironmentVariables(projectPath, skipPrompt = fa
126
130
  return false;
127
131
  }
128
132
  ux.stdout('configuring environment variables...');
133
+ const envExamplePath = path.join(projectPath, '.env.example');
129
134
  const envPath = path.join(projectPath, '.env');
130
135
  try {
131
- await fs.access(envPath);
136
+ await fs.access(envExamplePath);
132
137
  }
133
138
  catch {
134
- ux.stdout(ux.colorize('red', '🔴 .env file does not exist, nothing to configure'));
139
+ ux.stdout(ux.colorize('red', '.env.example file does not exist, nothing to configure'));
135
140
  return false;
136
141
  }
137
142
  const shouldConfigure = await confirm({
@@ -142,6 +147,8 @@ export async function configureEnvironmentVariables(projectPath, skipPrompt = fa
142
147
  return false;
143
148
  }
144
149
  try {
150
+ // Copy .env.example to .env before configuring
151
+ await fs.copyFile(envExamplePath, envPath);
145
152
  const variables = await parseEnvFile(envPath);
146
153
  const variablesToConfigure = variables.filter(v => isEmpty(v.value) || v.isSecret);
147
154
  if (variablesToConfigure.length === 0) {
@@ -5,16 +5,18 @@ import { configureEnvironmentVariables } from './env_configurator.js';
5
5
  // Mock inquirer prompts
6
6
  vi.mock('@inquirer/prompts', () => ({
7
7
  input: vi.fn(),
8
- confirm: vi.fn()
8
+ confirm: vi.fn(),
9
+ password: vi.fn()
9
10
  }));
10
11
  describe('configureEnvironmentVariables', () => {
11
- const testState = { tempDir: '', envPath: '' };
12
+ const testState = { tempDir: '', envExamplePath: '', envPath: '' };
12
13
  beforeEach(async () => {
13
14
  // Clear all mocks before each test
14
15
  vi.clearAllMocks();
15
16
  // Create temporary directory for test files
16
17
  testState.tempDir = path.join('/tmp', `test-env-${Date.now()}`);
17
18
  await fs.mkdir(testState.tempDir, { recursive: true });
19
+ testState.envExamplePath = path.join(testState.tempDir, '.env.example');
18
20
  testState.envPath = path.join(testState.tempDir, '.env');
19
21
  });
20
22
  afterEach(async () => {
@@ -27,36 +29,74 @@ describe('configureEnvironmentVariables', () => {
27
29
  }
28
30
  });
29
31
  it('should return false if skipPrompt is true', async () => {
30
- // Create a minimal .env file
31
- await fs.writeFile(testState.envPath, 'KEY=value');
32
+ // Create a minimal .env.example file
33
+ await fs.writeFile(testState.envExamplePath, 'KEY=value');
32
34
  const result = await configureEnvironmentVariables(testState.tempDir, true);
33
35
  expect(result).toBe(false);
34
36
  });
35
- it('should return false if .env file does not exist', async () => {
37
+ it('should return false if .env.example file does not exist', async () => {
36
38
  const result = await configureEnvironmentVariables(testState.tempDir, false);
37
39
  expect(result).toBe(false);
38
40
  });
39
41
  it('should return false if user declines configuration', async () => {
40
42
  const { confirm } = await import('@inquirer/prompts');
41
43
  vi.mocked(confirm).mockResolvedValue(false);
42
- await fs.writeFile(testState.envPath, '# API key\nAPIKEY=');
44
+ await fs.writeFile(testState.envExamplePath, '# API key\nAPIKEY=');
43
45
  const result = await configureEnvironmentVariables(testState.tempDir, false);
44
46
  expect(result).toBe(false);
45
47
  expect(vi.mocked(confirm)).toHaveBeenCalled();
46
48
  });
49
+ it('should NOT create .env when user declines configuration', async () => {
50
+ const { confirm } = await import('@inquirer/prompts');
51
+ vi.mocked(confirm).mockResolvedValue(false);
52
+ await fs.writeFile(testState.envExamplePath, '# API key\nAPIKEY=');
53
+ await configureEnvironmentVariables(testState.tempDir, false);
54
+ // .env should NOT exist when user declines
55
+ await expect(fs.access(testState.envPath)).rejects.toThrow();
56
+ // .env.example should still exist
57
+ await expect(fs.access(testState.envExamplePath)).resolves.toBeUndefined();
58
+ });
47
59
  it('should return false if no empty variables exist', async () => {
48
60
  const { confirm } = await import('@inquirer/prompts');
49
61
  vi.mocked(confirm).mockResolvedValue(true);
50
- await fs.writeFile(testState.envPath, 'APIKEY=my-secret-key');
62
+ await fs.writeFile(testState.envExamplePath, 'APIKEY=my-secret-key');
51
63
  const result = await configureEnvironmentVariables(testState.tempDir, false);
52
64
  expect(result).toBe(false);
53
65
  });
66
+ it('should copy .env.example to .env when user confirms configuration', async () => {
67
+ const { input, confirm } = await import('@inquirer/prompts');
68
+ vi.mocked(confirm).mockResolvedValue(true);
69
+ vi.mocked(input).mockResolvedValueOnce('sk-proj-123');
70
+ const originalContent = `# API key
71
+ APIKEY=`;
72
+ await fs.writeFile(testState.envExamplePath, originalContent);
73
+ await configureEnvironmentVariables(testState.tempDir, false);
74
+ // Both files should exist
75
+ await expect(fs.access(testState.envExamplePath)).resolves.toBeUndefined();
76
+ await expect(fs.access(testState.envPath)).resolves.toBeUndefined();
77
+ });
78
+ it('should write configured values to .env while leaving .env.example unchanged', async () => {
79
+ const { input, confirm } = await import('@inquirer/prompts');
80
+ vi.mocked(confirm).mockResolvedValue(true);
81
+ vi.mocked(input).mockResolvedValueOnce('sk-proj-123');
82
+ const originalContent = `# API key
83
+ APIKEY=`;
84
+ await fs.writeFile(testState.envExamplePath, originalContent);
85
+ const result = await configureEnvironmentVariables(testState.tempDir, false);
86
+ expect(result).toBe(true);
87
+ // .env should have the configured value
88
+ const envContent = await fs.readFile(testState.envPath, 'utf-8');
89
+ expect(envContent).toContain('APIKEY=sk-proj-123');
90
+ // .env.example should remain unchanged
91
+ const envExampleContent = await fs.readFile(testState.envExamplePath, 'utf-8');
92
+ expect(envExampleContent).toBe(originalContent);
93
+ });
54
94
  it('should prompt for empty variables and update .env', async () => {
55
95
  const { input, confirm } = await import('@inquirer/prompts');
56
96
  vi.mocked(confirm).mockResolvedValue(true);
57
97
  vi.mocked(input).mockResolvedValueOnce('sk-proj-123');
58
98
  vi.mocked(input).mockResolvedValueOnce('');
59
- await fs.writeFile(testState.envPath, `# API key for Anthropic
99
+ await fs.writeFile(testState.envExamplePath, `# API key for Anthropic
60
100
  ANTHROPIC_API_KEY=
61
101
 
62
102
  # API key for OpenAI
@@ -78,7 +118,7 @@ APIKEY=
78
118
 
79
119
  # Another comment
80
120
  OTHER=value`;
81
- await fs.writeFile(testState.envPath, originalContent);
121
+ await fs.writeFile(testState.envExamplePath, originalContent);
82
122
  await configureEnvironmentVariables(testState.tempDir, false);
83
123
  const content = await fs.readFile(testState.envPath, 'utf-8');
84
124
  expect(content).toContain('# This is a comment');
@@ -90,7 +130,7 @@ OTHER=value`;
90
130
  const { input, confirm } = await import('@inquirer/prompts');
91
131
  vi.mocked(confirm).mockResolvedValue(true);
92
132
  vi.mocked(input).mockResolvedValueOnce('new-key');
93
- await fs.writeFile(testState.envPath, `APIKEY=your_api_key_here
133
+ await fs.writeFile(testState.envExamplePath, `APIKEY=your_api_key_here
94
134
  EMPTY_KEY=`);
95
135
  const result = await configureEnvironmentVariables(testState.tempDir, false);
96
136
  expect(result).toBe(true);
@@ -103,21 +143,54 @@ EMPTY_KEY=`);
103
143
  const { input, confirm } = await import('@inquirer/prompts');
104
144
  vi.mocked(confirm).mockResolvedValue(true);
105
145
  vi.mocked(input).mockResolvedValueOnce('new-key');
106
- await fs.writeFile(testState.envPath, `EXISTING_KEY=existing-value
146
+ await fs.writeFile(testState.envExamplePath, `EXISTING_KEY=existing-value
107
147
 
108
148
  EMPTY_KEY=`);
109
149
  await configureEnvironmentVariables(testState.tempDir, false);
110
150
  // Should only prompt for EMPTY_KEY, not EXISTING_KEY
111
151
  expect(vi.mocked(input)).toHaveBeenCalledTimes(1);
112
152
  });
153
+ it('should handle case where .env already exists (overwrite with copy)', async () => {
154
+ const { input, confirm } = await import('@inquirer/prompts');
155
+ vi.mocked(confirm).mockResolvedValue(true);
156
+ vi.mocked(input).mockResolvedValueOnce('new-configured-value');
157
+ // Create existing .env with old content
158
+ await fs.writeFile(testState.envPath, 'OLD_KEY=old-value');
159
+ // Create .env.example with new content
160
+ await fs.writeFile(testState.envExamplePath, 'NEW_KEY=');
161
+ const result = await configureEnvironmentVariables(testState.tempDir, false);
162
+ expect(result).toBe(true);
163
+ // .env should be overwritten with .env.example content and configured values
164
+ const envContent = await fs.readFile(testState.envPath, 'utf-8');
165
+ expect(envContent).toContain('NEW_KEY=new-configured-value');
166
+ expect(envContent).not.toContain('OLD_KEY');
167
+ });
113
168
  it('should return false if an error occurs during parsing', async () => {
114
- // This test verifies error handling by checking behavior when file operations fail
115
- // The try-catch in configureEnvironmentVariables should handle it gracefully
116
- await fs.writeFile(testState.envPath, 'KEY=');
117
- // Delete the file to cause parsing error
118
- await fs.rm(testState.envPath);
169
+ const { confirm } = await import('@inquirer/prompts');
170
+ vi.mocked(confirm).mockResolvedValue(true);
171
+ await fs.writeFile(testState.envExamplePath, 'KEY=');
172
+ // Delete the .env.example file after access check but before parsing would happen
173
+ // We simulate this by deleting during the copy operation
174
+ const originalCopyFile = fs.copyFile;
175
+ vi.spyOn(fs, 'copyFile').mockImplementation(async () => {
176
+ throw new Error('Copy failed');
177
+ });
119
178
  const result = await configureEnvironmentVariables(testState.tempDir, false);
120
179
  // Should return false when error occurs
121
180
  expect(result).toBe(false);
181
+ // Restore original function
182
+ vi.mocked(fs.copyFile).mockImplementation(originalCopyFile);
183
+ });
184
+ it('should prompt for SECRET marker values with password input', async () => {
185
+ const { password, confirm } = await import('@inquirer/prompts');
186
+ vi.mocked(confirm).mockResolvedValue(true);
187
+ vi.mocked(password).mockResolvedValueOnce('my-secret-api-key');
188
+ await fs.writeFile(testState.envExamplePath, `# API Key
189
+ ANTHROPIC_API_KEY=<SECRET>`);
190
+ const result = await configureEnvironmentVariables(testState.tempDir, false);
191
+ expect(result).toBe(true);
192
+ expect(vi.mocked(password)).toHaveBeenCalledTimes(1);
193
+ const envContent = await fs.readFile(testState.envPath, 'utf-8');
194
+ expect(envContent).toContain('ANTHROPIC_API_KEY=my-secret-api-key');
122
195
  });
123
196
  });
@@ -166,8 +166,8 @@ export const getProjectSuccessMessage = (folderName, installSuccess, envConfigur
166
166
  if (!envConfigured) {
167
167
  steps.push({
168
168
  step: 'Configure environment variables',
169
- command: 'Edit .env file',
170
- note: 'Add your Anthropic/OpenAI API keys'
169
+ command: 'cp .env.example .env',
170
+ note: 'Copy .env.example to .env and add your API keys'
171
171
  });
172
172
  }
173
173
  steps.push({
@@ -248,8 +248,8 @@ export const getWorkflowGenerateSuccessMessage = (workflowName, targetDir, files
248
248
  },
249
249
  {
250
250
  step: 'Configure environment',
251
- command: 'Edit .env file',
252
- note: 'Add your LLM provider credentials (ANTHROPIC_API_KEY or OPENAI_API_KEY)'
251
+ command: 'cp .env.example .env',
252
+ note: 'Copy .env.example to .env and add your LLM provider credentials'
253
253
  },
254
254
  {
255
255
  step: 'Test with example scenario',
@@ -65,7 +65,6 @@ describe('Workflow Generator', () => {
65
65
  'workflow.ts',
66
66
  'steps.ts',
67
67
  'README.md',
68
- '.env',
69
68
  'prompts/prompt@v1.prompt',
70
69
  'scenarios/test_input.json'
71
70
  ];
@@ -40,17 +40,17 @@ src/workflows/{name}/
40
40
  ## Commands
41
41
 
42
42
  ```bash
43
- output dev # Start dev (Temporal:8080, API:3001)
44
- output workflow list # List workflows
43
+ npx output dev # Start dev (Temporal:8080, API:3001)
44
+ npx output workflow list # List workflows
45
45
 
46
46
  # Sync execution (waits for result)
47
- output workflow run <name> --input <JSON|JSON_FILE> # Execute and wait
47
+ npx output workflow run <name> --input <JSON|JSON_FILE> # Execute and wait
48
48
 
49
49
  # Async execution
50
- output workflow start <name> --input <JSON|JSON_FILE> # Start workflow, returns ID
51
- output workflow status <workflowId> # Check execution status
52
- output workflow result <workflowId> # Get result when complete
53
- output workflow stop <workflowId> # Cancel running workflow
50
+ npx output workflow start <name> --input <JSON|JSON_FILE> # Start workflow, returns ID
51
+ npx output workflow status <workflowId> # Check execution status
52
+ npx output workflow result <workflowId> # Get result when complete
53
+ npx output workflow stop <workflowId> # Cancel running workflow
54
54
  ```
55
55
 
56
56
  ## Workflow Pattern
@@ -30,15 +30,15 @@ For detailed command usage, Claude will automatically invoke the relevant skill.
30
30
 
31
31
  | Command | Purpose |
32
32
  |---------|---------|
33
- | `output dev` | Start development services |
34
- | `output workflow list` | List available workflows |
35
- | `output workflow runs list` | List execution history |
36
- | `output workflow run` | Execute synchronously |
37
- | `output workflow start` | Start asynchronously |
38
- | `output workflow status` | Check execution status |
39
- | `output workflow result` | Get execution result |
40
- | `output workflow stop` | Stop running workflow |
41
- | `output workflow debug` | Analyze execution trace |
33
+ | `npx output dev` | Start development services |
34
+ | `npx output workflow list` | List available workflows |
35
+ | `npx output workflow runs list` | List execution history |
36
+ | `npx output workflow run` | Execute synchronously |
37
+ | `npx output workflow start` | Start asynchronously |
38
+ | `npx output workflow status` | Check execution status |
39
+ | `npx output workflow result` | Get execution result |
40
+ | `npx output workflow stop` | Stop running workflow |
41
+ | `npx output workflow debug` | Analyze execution trace |
42
42
 
43
43
  ## Temporal UI
44
44
 
@@ -73,7 +73,7 @@ Match error symptoms to solutions and verify the fix.
73
73
  **Agent**: Let me help you debug this workflow. First, let's get the execution trace:
74
74
 
75
75
  ```bash
76
- output workflow debug abc123 --format json
76
+ npx output workflow debug abc123 --format json
77
77
  ```
78
78
 
79
79
  This will show us exactly where the workflow failed and what error occurred. While I analyze the trace, you can also check the Temporal UI at http://localhost:8080 for a visual timeline of the execution.
@@ -92,7 +92,7 @@ To:
92
92
  import { z } from '@output.ai/core';
93
93
  ```
94
94
 
95
- After making this change, run the workflow again with `output workflow run <workflowName>` to verify the fix.
95
+ After making this change, run the workflow again with `npx output workflow run <workflowName>` to verify the fix.
96
96
 
97
97
  ---
98
98
  *This agent specializes in debugging Output SDK workflows in local development environments.*
@@ -47,7 +47,7 @@ curl -s http://localhost:8080 > /dev/null && echo "Temporal UI accessible" || ec
47
47
  RUN: docker compose up -d
48
48
  WAIT: for services to start (30-60 seconds)
49
49
  IF output_dev_not_running:
50
- RUN: output dev
50
+ RUN: npx output dev
51
51
  WAIT: for services to initialize
52
52
  IF all_services_running:
53
53
  PROCEED: to step 2
@@ -69,16 +69,16 @@ Identify the failing workflow execution by listing recent runs. The `output-work
69
69
  <list_commands>
70
70
  ```bash
71
71
  # List all recent workflow runs
72
- output workflow runs list
72
+ npx output workflow runs list
73
73
 
74
74
  # Filter by specific workflow type (if known)
75
- output workflow runs list <workflowName>
75
+ npx output workflow runs list <workflowName>
76
76
 
77
77
  # Get detailed JSON output for analysis
78
- output workflow runs list --format json
78
+ npx output workflow runs list --format json
79
79
 
80
80
  # Limit results to most recent
81
- output workflow runs list --limit 10
81
+ npx output workflow runs list --limit 10
82
82
  ```
83
83
  </list_commands>
84
84
 
@@ -98,12 +98,12 @@ Look for:
98
98
  NOTE: workflow ID from output
99
99
  PROCEED: to step 3
100
100
  IF no_runs_found:
101
- CHECK: workflow exists with `output workflow list`
101
+ CHECK: workflow exists with `npx output workflow list`
102
102
  IF workflow_not_found:
103
103
  REPORT: workflow doesn't exist
104
104
  SUGGEST: verify workflow name and location
105
105
  ELSE:
106
- SUGGEST: run the workflow with `output workflow run <name>`
106
+ SUGGEST: run the workflow with `npx output workflow run <name>`
107
107
  </decision_tree>
108
108
 
109
109
  </step>
@@ -117,10 +117,10 @@ Retrieve and analyze the execution trace for the identified workflow. The `outpu
117
117
  <debug_commands>
118
118
  ```bash
119
119
  # Display execution trace (text format)
120
- output workflow debug <workflowId>
120
+ npx output workflow debug <workflowId>
121
121
 
122
122
  # Display full untruncated trace (JSON format) - recommended for detailed analysis
123
- output workflow debug <workflowId> --format json
123
+ npx output workflow debug <workflowId> --format json
124
124
  ```
125
125
  </debug_commands>
126
126
 
@@ -174,12 +174,12 @@ Based on the trace analysis, identify the error pattern and suggest targeted fix
174
174
  After applying fix:
175
175
  ```bash
176
176
  # Re-run the workflow to verify
177
- output workflow run <workflowName> <input>
177
+ npx output workflow run <workflowName> <input>
178
178
 
179
179
  # Or start asynchronously and check result
180
- output workflow start <workflowName> <input>
181
- output workflow status <workflowId>
182
- output workflow result <workflowId>
180
+ npx output workflow start <workflowName> <input>
181
+ npx output workflow status <workflowId>
182
+ npx output workflow result <workflowId>
183
183
  ```
184
184
  </verification>
185
185
 
@@ -213,7 +213,7 @@ Design the testing strategy for the workflow.
213
213
 
214
214
  Generate the complete plan in markdown format.
215
215
 
216
- Note that every implementation should start with running the cli command `output workflow generate --skeleton` to create the workflow directory structure.
216
+ Note that every implementation should start with running the cli command `npx output workflow generate --skeleton` to create the workflow directory structure.
217
217
 
218
218
  <file_template>
219
219
  <header>
@@ -230,8 +230,8 @@ Workflow functions should NOT contain:
230
230
 
231
231
  After moving I/O to steps:
232
232
 
233
- 1. **Run the workflow**: `output workflow run <name> '<input>'`
234
- 2. **Check the trace**: `output workflow debug <id> --format json`
233
+ 1. **Run the workflow**: `npx output workflow run <name> '<input>'`
234
+ 2. **Check the trace**: `npx output workflow debug <id> --format json`
235
235
  3. **Verify steps appear**: Look for your I/O steps in the trace
236
236
  4. **Confirm no errors**: No determinism warnings or hangs
237
237
 
@@ -287,8 +287,8 @@ grep -rn "got\|node-fetch\|request\|superagent" src/
287
287
 
288
288
  After migrating to httpClient:
289
289
 
290
- 1. **Run the workflow**: `output workflow run <name> '<input>'`
291
- 2. **Check the trace**: `output workflow debug <id> --format json`
290
+ 1. **Run the workflow**: `npx output workflow run <name> '<input>'`
291
+ 2. **Check the trace**: `npx output workflow debug <id> --format json`
292
292
  3. **Verify tracing**: HTTP requests should appear in the step trace
293
293
  4. **Test retries**: Simulate failures to verify retry behavior
294
294
 
@@ -255,8 +255,8 @@ export const logEvent = step({
255
255
 
256
256
  After adding schemas:
257
257
 
258
- 1. **TypeScript check**: `npm run build` should pass without type errors
259
- 2. **Runtime test**: `output workflow run <name> '<input>'` should validate correctly
258
+ 1. **TypeScript check**: `npm run output:workflow:build` should pass without type errors
259
+ 2. **Runtime test**: `npx output workflow run <name> '<input>'` should validate correctly
260
260
  3. **Invalid input test**: Pass invalid data and verify validation errors appear
261
261
 
262
262
  ## Related Issues
@@ -216,7 +216,7 @@ Look at your workflow `fn` functions specifically. Non-deterministic code is onl
216
216
  ## Verification Steps
217
217
 
218
218
  1. **Fix the code** using solutions above
219
- 2. **Run the workflow**: `output workflow run <name> '<input>'`
219
+ 2. **Run the workflow**: `npx output workflow run <name> '<input>'`
220
220
  3. **Run again with same input**: Result should be identical
221
221
  4. **Check for errors**: No "non-deterministic" messages
222
222
 
@@ -238,10 +238,10 @@ If unsure whether code is causing issues:
238
238
 
239
239
  ```bash
240
240
  # Run the workflow
241
- output workflow start my-workflow '{"input": "test"}'
241
+ npx output workflow start my-workflow '{"input": "test"}'
242
242
 
243
243
  # Get the workflow ID and run debug to see replay behavior
244
- output workflow debug <workflowId> --format json
244
+ npx output workflow debug <workflowId> --format json
245
245
  ```
246
246
 
247
247
  Look for errors or warnings about non-determinism in the trace.
@@ -216,9 +216,9 @@ Do NOT use FatalError to wrap other errors unless you're certain they shouldn't
216
216
 
217
217
  After removing try-catch:
218
218
 
219
- 1. **Test normal operation**: `output workflow run <name> '<valid-input>'`
219
+ 1. **Test normal operation**: `npx output workflow run <name> '<valid-input>'`
220
220
  2. **Test failure scenarios**: Use input that causes step failures
221
- 3. **Check retry behavior**: Look for retry attempts in `output workflow debug <id>`
221
+ 3. **Check retry behavior**: Look for retry attempts in `npx output workflow debug <id>`
222
222
 
223
223
  ## Related Issues
224
224
 
@@ -139,13 +139,13 @@ grep -r 'from "zod"' src/
139
139
  ### 2. Build the project
140
140
 
141
141
  ```bash
142
- npm run build
142
+ npm run output:workflow:build
143
143
  ```
144
144
 
145
145
  ### 3. Run the workflow
146
146
 
147
147
  ```bash
148
- output workflow run <workflowName> '<input>'
148
+ npx output workflow run <workflowName> '<input>'
149
149
  ```
150
150
 
151
151
  ## Prevention
@@ -55,7 +55,7 @@ curl -s http://localhost:8080 > /dev/null && echo "Temporal UI accessible" || ec
55
55
  ### If services are not running:
56
56
  ```bash
57
57
  # Start all development services
58
- output dev
58
+ npx output dev
59
59
  ```
60
60
 
61
61
  Wait 30-60 seconds for all services to initialize, then re-run the checks.
@@ -80,7 +80,7 @@ IF docker_not_running:
80
80
  WAIT: for Docker to initialize
81
81
 
82
82
  IF no_output_containers:
83
- RUN: output dev
83
+ RUN: npx output dev
84
84
  WAIT: 30-60 seconds for services
85
85
 
86
86
  IF api_not_responding:
@@ -105,7 +105,7 @@ docker ps | grep output
105
105
  # Output: (empty - no containers)
106
106
 
107
107
  # Start services
108
- output dev
108
+ npx output dev
109
109
 
110
110
  # Wait and verify
111
111
  sleep 60
@@ -23,7 +23,7 @@ This skill helps you discover all available workflows in an Output SDK project.
23
23
  ### List All Workflows
24
24
 
25
25
  ```bash
26
- output workflow list
26
+ npx output workflow list
27
27
  ```
28
28
 
29
29
  This command scans the project and displays all available workflows.
@@ -67,7 +67,7 @@ ls -la src/workflows/<workflowName>/
67
67
  **Scenario**: Discover available workflows in a project
68
68
 
69
69
  ```bash
70
- output workflow list
70
+ npx output workflow list
71
71
 
72
72
  # Example output:
73
73
  # Name Description Location
@@ -81,18 +81,18 @@ output workflow list
81
81
 
82
82
  ```bash
83
83
  # Check if "email-sender" workflow exists
84
- output workflow list | grep email-sender
84
+ npx output workflow list | grep email-sender
85
85
 
86
86
  # If no output, the workflow doesn't exist
87
87
  # If found, proceed with running it
88
- output workflow run email-sender '{"to": "user@example.com"}'
88
+ npx output workflow run email-sender '{"to": "user@example.com"}'
89
89
  ```
90
90
 
91
91
  **Scenario**: Explore workflow implementation
92
92
 
93
93
  ```bash
94
94
  # List workflows
95
- output workflow list
95
+ npx output workflow list
96
96
 
97
97
  # Find the location and examine it
98
98
  cat src/workflows/simple/workflow.ts
@@ -108,10 +108,10 @@ cat src/workflows/simple/workflow.ts
108
108
  ### Workflow not showing
109
109
  - Check the file exports a valid workflow definition
110
110
  - Ensure the workflow file compiles without errors
111
- - Run `npm run build` to check for TypeScript errors
111
+ - Run `npm run output:workflow:build` to check for TypeScript errors
112
112
 
113
113
  ## Related Commands
114
114
 
115
- - `output workflow run <name>` - Execute a workflow synchronously
116
- - `output workflow start <name>` - Start a workflow asynchronously
117
- - `output workflow runs list` - View execution history
115
+ - `npx output workflow run <name>` - Execute a workflow synchronously
116
+ - `npx output workflow start <name>` - Start a workflow asynchronously
117
+ - `npx output workflow runs list` - View execution history