@output.ai/cli 0.0.6 → 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -3
- package/dist/api/generated/api.d.ts +0 -20
- package/dist/api/generated/api.js +1 -10
- package/dist/commands/workflow/generate.d.ts +1 -0
- package/dist/commands/workflow/generate.js +23 -4
- package/dist/commands/workflow/plan.js +4 -3
- package/dist/commands/workflow/plan.spec.js +5 -3
- package/dist/services/claude_client.d.ts +18 -1
- package/dist/services/claude_client.js +67 -21
- package/dist/services/coding_agents.d.ts +7 -0
- package/dist/services/coding_agents.js +64 -7
- package/dist/services/coding_agents.spec.js +155 -14
- package/dist/services/workflow_builder.d.ts +16 -0
- package/dist/services/workflow_builder.js +85 -0
- package/dist/services/workflow_builder.spec.d.ts +1 -0
- package/dist/services/workflow_builder.spec.js +165 -0
- package/dist/services/workflow_planner.d.ts +0 -5
- package/dist/services/workflow_planner.js +7 -44
- package/dist/services/workflow_planner.spec.js +8 -99
- package/dist/templates/agent_instructions/commands/build_workflow.md.template +247 -0
- package/dist/templates/agent_instructions/commands/plan_workflow.md.template +8 -7
- package/dist/templates/workflow/README.md.template +3 -5
- package/dist/templates/workflow/steps.ts.template +26 -54
- package/dist/templates/workflow/workflow.ts.template +23 -49
- package/dist/types/domain.d.ts +20 -0
- package/dist/types/domain.js +4 -0
- package/dist/utils/paths.d.ts +1 -1
- package/dist/utils/paths.js +1 -1
- package/package.json +18 -23
|
@@ -1,113 +1,28 @@
|
|
|
1
1
|
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
import { generateText } from '
|
|
5
|
-
import { loadPrompt } from 'local_prompt';
|
|
6
|
-
import { confirm } from '@inquirer/prompts';
|
|
2
|
+
import { generatePlanName, writePlanFile, updateAgentTemplates } from './workflow_planner.js';
|
|
3
|
+
import { initializeAgentConfig } from './coding_agents.js';
|
|
4
|
+
import { generateText } from '@output.ai/llm';
|
|
7
5
|
import fs from 'node:fs/promises';
|
|
8
6
|
vi.mock('./coding_agents.js');
|
|
9
|
-
vi.mock('
|
|
10
|
-
vi.mock('local_prompt');
|
|
11
|
-
vi.mock('@inquirer/prompts');
|
|
7
|
+
vi.mock('@output.ai/llm');
|
|
12
8
|
vi.mock('node:fs/promises');
|
|
13
9
|
describe('workflow-planner service', () => {
|
|
14
10
|
beforeEach(() => {
|
|
15
11
|
vi.clearAllMocks();
|
|
16
12
|
});
|
|
17
|
-
describe('ensureOutputAIStructure', () => {
|
|
18
|
-
it('should call agents init when .outputai does not exist', async () => {
|
|
19
|
-
vi.mocked(checkAgentStructure).mockResolvedValue({
|
|
20
|
-
dirExists: false,
|
|
21
|
-
missingFiles: [
|
|
22
|
-
'.outputai/AGENTS.md',
|
|
23
|
-
'.outputai/agents/workflow_planner.md',
|
|
24
|
-
'.outputai/commands/plan_workflow.md',
|
|
25
|
-
'CLAUDE.md',
|
|
26
|
-
'.claude/agents/workflow_planner.md',
|
|
27
|
-
'.claude/commands/plan_workflow.md'
|
|
28
|
-
],
|
|
29
|
-
isComplete: false
|
|
30
|
-
});
|
|
31
|
-
vi.mocked(initializeAgentConfig).mockResolvedValue();
|
|
32
|
-
await ensureOutputAIStructure('/test/project');
|
|
33
|
-
expect(checkAgentStructure).toHaveBeenCalledWith('/test/project');
|
|
34
|
-
expect(initializeAgentConfig).toHaveBeenCalledWith(expect.objectContaining({
|
|
35
|
-
projectRoot: expect.any(String),
|
|
36
|
-
force: false,
|
|
37
|
-
agentProvider: 'claude-code'
|
|
38
|
-
}));
|
|
39
|
-
});
|
|
40
|
-
it('should skip initialization if .outputai exists', async () => {
|
|
41
|
-
vi.mocked(checkAgentStructure).mockResolvedValue({
|
|
42
|
-
dirExists: true,
|
|
43
|
-
missingFiles: [],
|
|
44
|
-
isComplete: true
|
|
45
|
-
});
|
|
46
|
-
await ensureOutputAIStructure('/test/project');
|
|
47
|
-
expect(checkAgentStructure).toHaveBeenCalledWith('/test/project');
|
|
48
|
-
expect(initializeAgentConfig).not.toHaveBeenCalled();
|
|
49
|
-
});
|
|
50
|
-
it('should throw error if agents init fails', async () => {
|
|
51
|
-
vi.mocked(checkAgentStructure).mockResolvedValue({
|
|
52
|
-
dirExists: false,
|
|
53
|
-
missingFiles: [
|
|
54
|
-
'.outputai/AGENTS.md',
|
|
55
|
-
'.outputai/agents/workflow_planner.md',
|
|
56
|
-
'.outputai/commands/plan_workflow.md',
|
|
57
|
-
'CLAUDE.md',
|
|
58
|
-
'.claude/agents/workflow_planner.md',
|
|
59
|
-
'.claude/commands/plan_workflow.md'
|
|
60
|
-
],
|
|
61
|
-
isComplete: false
|
|
62
|
-
});
|
|
63
|
-
vi.mocked(initializeAgentConfig).mockRejectedValue(new Error('Init command failed'));
|
|
64
|
-
await expect(ensureOutputAIStructure('/test/project')).rejects.toThrow('Init command failed');
|
|
65
|
-
});
|
|
66
|
-
it('should prompt user when some files are missing', async () => {
|
|
67
|
-
vi.mocked(checkAgentStructure).mockResolvedValue({
|
|
68
|
-
dirExists: true,
|
|
69
|
-
missingFiles: ['.outputai/agents/workflow_planner.md'],
|
|
70
|
-
isComplete: false
|
|
71
|
-
});
|
|
72
|
-
vi.mocked(confirm).mockResolvedValue(true);
|
|
73
|
-
vi.mocked(initializeAgentConfig).mockResolvedValue();
|
|
74
|
-
await ensureOutputAIStructure('/test/project');
|
|
75
|
-
expect(confirm).toHaveBeenCalled();
|
|
76
|
-
expect(initializeAgentConfig).toHaveBeenCalledWith(expect.objectContaining({
|
|
77
|
-
force: true
|
|
78
|
-
}));
|
|
79
|
-
});
|
|
80
|
-
it('should throw error when user declines partial init', async () => {
|
|
81
|
-
vi.mocked(checkAgentStructure).mockResolvedValue({
|
|
82
|
-
dirExists: true,
|
|
83
|
-
missingFiles: ['.outputai/agents/workflow_planner.md'],
|
|
84
|
-
isComplete: false
|
|
85
|
-
});
|
|
86
|
-
vi.mocked(confirm).mockResolvedValue(false);
|
|
87
|
-
await expect(ensureOutputAIStructure('/test/project')).rejects.toThrow('Agent configuration incomplete');
|
|
88
|
-
});
|
|
89
|
-
});
|
|
90
13
|
describe('generatePlanName', () => {
|
|
91
14
|
it('should generate plan name with date prefix using LLM', async () => {
|
|
92
|
-
const mockPrompt = {
|
|
93
|
-
config: { model: 'claude-sonnet-4-20250514', provider: 'anthropic' },
|
|
94
|
-
messages: [
|
|
95
|
-
{ role: 'system', content: 'You are a technical writing assistant...' },
|
|
96
|
-
{ role: 'user', content: 'Generate a snake_case plan name...' }
|
|
97
|
-
]
|
|
98
|
-
};
|
|
99
|
-
vi.mocked(loadPrompt).mockReturnValue(mockPrompt);
|
|
100
15
|
vi.mocked(generateText).mockResolvedValue('customer_order_processing');
|
|
101
16
|
const testDate = new Date(2025, 9, 6);
|
|
102
17
|
const planName = await generatePlanName('A workflow that processes customer orders', testDate);
|
|
103
18
|
expect(planName).toMatch(/^2025_10_06_/);
|
|
104
19
|
expect(planName).toBe('2025_10_06_customer_order_processing');
|
|
105
|
-
expect(
|
|
106
|
-
|
|
20
|
+
expect(generateText).toHaveBeenCalledWith({
|
|
21
|
+
prompt: 'generate_plan_name@v1',
|
|
22
|
+
variables: { description: 'A workflow that processes customer orders' }
|
|
23
|
+
});
|
|
107
24
|
});
|
|
108
25
|
it('should clean and validate LLM response', async () => {
|
|
109
|
-
const mockPrompt = { config: { provider: 'anthropic', model: 'claude-sonnet-4-20250514' }, messages: [] };
|
|
110
|
-
vi.mocked(loadPrompt).mockReturnValue(mockPrompt);
|
|
111
26
|
vi.mocked(generateText).mockResolvedValue(' User-Auth & Security!@# ');
|
|
112
27
|
const testDate = new Date(2025, 9, 6);
|
|
113
28
|
const planName = await generatePlanName('User authentication workflow', testDate);
|
|
@@ -115,14 +30,10 @@ describe('workflow-planner service', () => {
|
|
|
115
30
|
expect(planName).toMatch(/^[0-9_a-z]+$/);
|
|
116
31
|
});
|
|
117
32
|
it('should handle LLM errors gracefully', async () => {
|
|
118
|
-
const mockPrompt = { config: { provider: 'anthropic', model: 'claude-sonnet-4-20250514' }, messages: [] };
|
|
119
|
-
vi.mocked(loadPrompt).mockReturnValue(mockPrompt);
|
|
120
33
|
vi.mocked(generateText).mockRejectedValue(new Error('API rate limit exceeded'));
|
|
121
34
|
await expect(generatePlanName('Test workflow')).rejects.toThrow('API rate limit exceeded');
|
|
122
35
|
});
|
|
123
36
|
it('should limit plan name length to 50 characters', async () => {
|
|
124
|
-
const mockPrompt = { config: { provider: 'anthropic', model: 'claude-sonnet-4-20250514' }, messages: [] };
|
|
125
|
-
vi.mocked(loadPrompt).mockReturnValue(mockPrompt);
|
|
126
37
|
vi.mocked(generateText).mockResolvedValue('this_is_an_extremely_long_plan_name_that_exceeds_the_maximum_allowed_length_for_file_names');
|
|
127
38
|
const testDate = new Date(2025, 9, 6);
|
|
128
39
|
const planName = await generatePlanName('Long workflow description', testDate);
|
|
@@ -130,8 +41,6 @@ describe('workflow-planner service', () => {
|
|
|
130
41
|
expect(namePart.length).toBeLessThanOrEqual(50);
|
|
131
42
|
});
|
|
132
43
|
it('should handle multiple underscores correctly', async () => {
|
|
133
|
-
const mockPrompt = { config: { provider: 'anthropic', model: 'claude-sonnet-4-20250514' }, messages: [] };
|
|
134
|
-
vi.mocked(loadPrompt).mockReturnValue(mockPrompt);
|
|
135
44
|
vi.mocked(generateText).mockResolvedValue('user___auth___workflow');
|
|
136
45
|
const testDate = new Date(2025, 9, 6);
|
|
137
46
|
const planName = await generatePlanName('Test', testDate);
|
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
---
|
|
2
|
+
argument-hint: [workflow-plan-file-path] [workflow-name] [workflow-directory]
|
|
3
|
+
description: Workflow Implementation Command for Output SDK
|
|
4
|
+
version: 0.0.1
|
|
5
|
+
model: claude-opus-4-1
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
Your task is to implement an Output.ai workflow based on a provided plan document.
|
|
9
|
+
|
|
10
|
+
The workflow skeleton has already been created at: `$3` (if not it should be)
|
|
11
|
+
|
|
12
|
+
Please read the plan file and implement the workflow according to its specifications.
|
|
13
|
+
|
|
14
|
+
Use the todo tool to track your progress through the implementation process.
|
|
15
|
+
|
|
16
|
+
# Implementation Rules
|
|
17
|
+
|
|
18
|
+
## Overview
|
|
19
|
+
|
|
20
|
+
Implement the workflow described in the plan document, following Output SDK patterns and best practices.
|
|
21
|
+
|
|
22
|
+
<pre_flight_check>
|
|
23
|
+
EXECUTE: @.outputai/instructions/meta/pre_flight.md
|
|
24
|
+
</pre_flight_check>
|
|
25
|
+
|
|
26
|
+
<process_flow>
|
|
27
|
+
|
|
28
|
+
<step number="1" name="plan_analysis">
|
|
29
|
+
|
|
30
|
+
### Step 1: Plan Analysis
|
|
31
|
+
|
|
32
|
+
Read and understand the plan document.
|
|
33
|
+
|
|
34
|
+
1. Read the plan file from: `$1`
|
|
35
|
+
2. Identify the workflow name, description, and purpose
|
|
36
|
+
3. Extract input and output schema definitions
|
|
37
|
+
4. List all required steps and their relationships
|
|
38
|
+
5. Note any LLM-based steps that require prompt templates
|
|
39
|
+
6. Understand error handling and retry requirements
|
|
40
|
+
|
|
41
|
+
</step>
|
|
42
|
+
|
|
43
|
+
<step number="2" name="workflow_implementation">
|
|
44
|
+
|
|
45
|
+
### Step 2: Workflow Implementation
|
|
46
|
+
|
|
47
|
+
Update `$3/workflow.ts` with the workflow definition.
|
|
48
|
+
|
|
49
|
+
<implementation_checklist>
|
|
50
|
+
- Import required dependencies (workflow, z from '@output.ai/core')
|
|
51
|
+
- Define inputSchema based on plan specifications
|
|
52
|
+
- Define outputSchema based on plan specifications
|
|
53
|
+
- Import step functions from steps.ts
|
|
54
|
+
- Implement workflow function with proper orchestration
|
|
55
|
+
- Handle conditional logic if specified in plan
|
|
56
|
+
- Add proper error handling
|
|
57
|
+
</implementation_checklist>
|
|
58
|
+
|
|
59
|
+
<workflow_template>
|
|
60
|
+
```typescript
|
|
61
|
+
import { workflow, z } from '@output.ai/core';
|
|
62
|
+
import { stepName } from './steps.js';
|
|
63
|
+
|
|
64
|
+
const inputSchema = z.object( {
|
|
65
|
+
// Define based on plan
|
|
66
|
+
} );
|
|
67
|
+
|
|
68
|
+
const outputSchema = z.object( {
|
|
69
|
+
// Define based on plan
|
|
70
|
+
} );
|
|
71
|
+
|
|
72
|
+
export default workflow( {
|
|
73
|
+
name: '$2',
|
|
74
|
+
description: 'Description from plan',
|
|
75
|
+
inputSchema,
|
|
76
|
+
outputSchema,
|
|
77
|
+
fn: async input => {
|
|
78
|
+
// Implement orchestration logic from plan
|
|
79
|
+
const result = await stepName( input );
|
|
80
|
+
return { result };
|
|
81
|
+
}
|
|
82
|
+
} );
|
|
83
|
+
```
|
|
84
|
+
</workflow_template>
|
|
85
|
+
|
|
86
|
+
</step>
|
|
87
|
+
|
|
88
|
+
<step number="3" name="steps_implementation">
|
|
89
|
+
|
|
90
|
+
### Step 3: Steps Implementation
|
|
91
|
+
|
|
92
|
+
Update `$3/steps.ts` with all step definitions from the plan.
|
|
93
|
+
|
|
94
|
+
<implementation_checklist>
|
|
95
|
+
- Import required dependencies (step, z from '@output.ai/core')
|
|
96
|
+
- Implement each step with proper schema validation
|
|
97
|
+
- Add error handling and retry logic as specified
|
|
98
|
+
- Ensure step names match plan specifications
|
|
99
|
+
- Add descriptive comments for complex logic
|
|
100
|
+
</implementation_checklist>
|
|
101
|
+
|
|
102
|
+
<step_template>
|
|
103
|
+
```typescript
|
|
104
|
+
import { step, z } from '@output.ai/core';
|
|
105
|
+
|
|
106
|
+
export const stepName = step( {
|
|
107
|
+
name: 'stepName',
|
|
108
|
+
description: 'Description from plan',
|
|
109
|
+
inputSchema: z.object( {
|
|
110
|
+
// Define based on plan
|
|
111
|
+
} ),
|
|
112
|
+
outputSchema: z.object( {
|
|
113
|
+
// Define based on plan
|
|
114
|
+
} ),
|
|
115
|
+
fn: async input => {
|
|
116
|
+
// Implement step logic from plan
|
|
117
|
+
return output;
|
|
118
|
+
}
|
|
119
|
+
} );
|
|
120
|
+
```
|
|
121
|
+
</step_template>
|
|
122
|
+
|
|
123
|
+
</step>
|
|
124
|
+
|
|
125
|
+
<step number="4" name="prompt_templates">
|
|
126
|
+
|
|
127
|
+
### Step 4: Prompt Templates (if needed)
|
|
128
|
+
|
|
129
|
+
If the plan includes LLM-based steps, create prompt templates in `$3/prompts/`.
|
|
130
|
+
|
|
131
|
+
<decision_tree>
|
|
132
|
+
IF plan_includes_llm_steps:
|
|
133
|
+
CREATE prompt_templates
|
|
134
|
+
UPDATE steps.ts to use loadPrompt and generateText
|
|
135
|
+
ELSE:
|
|
136
|
+
SKIP to step 5
|
|
137
|
+
</decision_tree>
|
|
138
|
+
|
|
139
|
+
<llm_step_template>
|
|
140
|
+
```typescript
|
|
141
|
+
import { step, z } from '@output.ai/core';
|
|
142
|
+
import { generateText } from '@output.ai/llm';
|
|
143
|
+
|
|
144
|
+
export const llmStep = step( {
|
|
145
|
+
name: 'llmStep',
|
|
146
|
+
description: 'LLM-based step',
|
|
147
|
+
inputSchema: z.object( {
|
|
148
|
+
param: z.string()
|
|
149
|
+
} ),
|
|
150
|
+
outputSchema: z.string(),
|
|
151
|
+
fn: async ( { param } ) => {
|
|
152
|
+
const response = await generateText( {
|
|
153
|
+
prompt: 'prompt_name@v1',
|
|
154
|
+
variables: { param }
|
|
155
|
+
} );
|
|
156
|
+
return response;
|
|
157
|
+
}
|
|
158
|
+
} );
|
|
159
|
+
```
|
|
160
|
+
</llm_step_template>
|
|
161
|
+
|
|
162
|
+
<prompt_file_template>
|
|
163
|
+
```
|
|
164
|
+
---
|
|
165
|
+
provider: anthropic
|
|
166
|
+
model: claude-sonnet-4-20250514
|
|
167
|
+
temperature: 0.7
|
|
168
|
+
---
|
|
169
|
+
|
|
170
|
+
<assistant>
|
|
171
|
+
You are a helpful assistant.
|
|
172
|
+
</assistant>
|
|
173
|
+
|
|
174
|
+
<user>
|
|
175
|
+
{{ instructions }}
|
|
176
|
+
</user>
|
|
177
|
+
```
|
|
178
|
+
</prompt_file_template>
|
|
179
|
+
|
|
180
|
+
</step>
|
|
181
|
+
|
|
182
|
+
<step number="5" name="readme_update">
|
|
183
|
+
|
|
184
|
+
### Step 5: README Update
|
|
185
|
+
|
|
186
|
+
Update `$3/README.md` with workflow-specific documentation.
|
|
187
|
+
|
|
188
|
+
<documentation_requirements>
|
|
189
|
+
- Update workflow name and description
|
|
190
|
+
- Document input schema with examples
|
|
191
|
+
- Document output schema with examples
|
|
192
|
+
- Explain each step's purpose
|
|
193
|
+
- Provide usage examples
|
|
194
|
+
- Document any prerequisites or setup requirements
|
|
195
|
+
- Include testing instructions
|
|
196
|
+
</documentation_requirements>
|
|
197
|
+
|
|
198
|
+
</step>
|
|
199
|
+
|
|
200
|
+
<step number="6" name="validation">
|
|
201
|
+
|
|
202
|
+
### Step 6: Implementation Validation
|
|
203
|
+
|
|
204
|
+
Verify the implementation is complete and correct.
|
|
205
|
+
|
|
206
|
+
<validation_checklist>
|
|
207
|
+
- All steps from plan are implemented
|
|
208
|
+
- Input/output schemas match plan specifications
|
|
209
|
+
- Workflow orchestration logic is correct
|
|
210
|
+
- Error handling is in place
|
|
211
|
+
- LLM prompts are created (if needed)
|
|
212
|
+
- README is updated with accurate information
|
|
213
|
+
- Code follows Output SDK patterns
|
|
214
|
+
- TypeScript types are properly defined
|
|
215
|
+
</validation_checklist>
|
|
216
|
+
|
|
217
|
+
</step>
|
|
218
|
+
|
|
219
|
+
<step number="7" name="post_flight_check">
|
|
220
|
+
|
|
221
|
+
### Step 7: Post-Flight Check
|
|
222
|
+
|
|
223
|
+
Verify the implementation is ready for use.
|
|
224
|
+
|
|
225
|
+
<post_flight_check>
|
|
226
|
+
EXECUTE: @.outputai/instructions/meta/post_flight.md
|
|
227
|
+
</post_flight_check>
|
|
228
|
+
|
|
229
|
+
</step>
|
|
230
|
+
|
|
231
|
+
</process_flow>
|
|
232
|
+
|
|
233
|
+
<post_flight_check>
|
|
234
|
+
EXECUTE: @.outputai/instructions/meta/post_flight.md
|
|
235
|
+
</post_flight_check>
|
|
236
|
+
|
|
237
|
+
---- START ----
|
|
238
|
+
|
|
239
|
+
Workflow Name: $2
|
|
240
|
+
|
|
241
|
+
Workflow Directory: $3
|
|
242
|
+
|
|
243
|
+
Plan File Path: $1
|
|
244
|
+
|
|
245
|
+
Additional Instructions:
|
|
246
|
+
|
|
247
|
+
$ARGUMENTS
|
|
@@ -85,7 +85,7 @@ Design the workflow with clear single purpose steps and sound orchestration logi
|
|
|
85
85
|
|
|
86
86
|
<workflow_template>
|
|
87
87
|
```typescript
|
|
88
|
-
import { workflow, z } from 'output.ai/core';
|
|
88
|
+
import { workflow, z } from '@output.ai/core';
|
|
89
89
|
import { sumValues } from './steps.js';
|
|
90
90
|
|
|
91
91
|
const inputSchema = z.object( {
|
|
@@ -116,7 +116,7 @@ Design the steps with clear boundaries.
|
|
|
116
116
|
|
|
117
117
|
<step_template>
|
|
118
118
|
```typescript
|
|
119
|
-
import { step, z } from 'output.ai/core';
|
|
119
|
+
import { step, z } from '@output.ai/core';
|
|
120
120
|
|
|
121
121
|
export const sumValues = step( {
|
|
122
122
|
name: 'sumValues',
|
|
@@ -145,9 +145,8 @@ If any of the steps use an LLM, design the prompts for the steps.
|
|
|
145
145
|
|
|
146
146
|
<prompt_step_template>
|
|
147
147
|
```typescript
|
|
148
|
-
import { step, z } from 'output.ai/core';
|
|
149
|
-
import {
|
|
150
|
-
import { generateText } from 'output.ai/llm';
|
|
148
|
+
import { step, z } from '@output.ai/core';
|
|
149
|
+
import { generateText } from '@output.ai/llm';
|
|
151
150
|
|
|
152
151
|
export const aiSdkPrompt = step( {
|
|
153
152
|
name: 'aiSdkPrompt',
|
|
@@ -157,8 +156,10 @@ export const aiSdkPrompt = step( {
|
|
|
157
156
|
} ),
|
|
158
157
|
outputSchema: z.string(),
|
|
159
158
|
fn: async ( { topic } ) => {
|
|
160
|
-
const
|
|
161
|
-
|
|
159
|
+
const response = await generateText( {
|
|
160
|
+
prompt: 'prompt@v1',
|
|
161
|
+
variables: { topic }
|
|
162
|
+
} );
|
|
162
163
|
return response;
|
|
163
164
|
}
|
|
164
165
|
} );
|
|
@@ -144,9 +144,7 @@ const result = await myStep( { value: 42 } );
|
|
|
144
144
|
The template includes an example of using LLM with prompts:
|
|
145
145
|
|
|
146
146
|
```typescript
|
|
147
|
-
import { loadPrompt } from '@output.ai/prompt';
|
|
148
147
|
import { generateText } from '@output.ai/llm';
|
|
149
|
-
import type { Prompt } from '@output.ai/llm';
|
|
150
148
|
|
|
151
149
|
export const llmStep = step( {
|
|
152
150
|
name: 'llmStep',
|
|
@@ -159,10 +157,10 @@ export const llmStep = step( {
|
|
|
159
157
|
},
|
|
160
158
|
outputSchema: { type: 'string' },
|
|
161
159
|
fn: async ( input: { userInput: string } ) => {
|
|
162
|
-
const
|
|
163
|
-
|
|
160
|
+
const response = await generateText( {
|
|
161
|
+
prompt: 'prompt@v1',
|
|
162
|
+
variables: { userInput: input.userInput }
|
|
164
163
|
} );
|
|
165
|
-
const response = await generateText( prompt as Prompt );
|
|
166
164
|
return response;
|
|
167
165
|
}
|
|
168
166
|
} );
|
|
@@ -1,75 +1,47 @@
|
|
|
1
|
-
import { step } from '@output.ai/core';
|
|
2
|
-
import { loadPrompt } from '@output.ai/prompt';
|
|
1
|
+
import { step, z } from '@output.ai/core';
|
|
3
2
|
import { generateText } from '@output.ai/llm';
|
|
4
|
-
import type { Prompt } from '@output.ai/llm';
|
|
5
|
-
|
|
6
|
-
// Define input/output schemas for the LLM step
|
|
7
|
-
const llmInputSchema = {
|
|
8
|
-
type: 'object',
|
|
9
|
-
properties: {
|
|
10
|
-
userInput: { type: 'string' }
|
|
11
|
-
},
|
|
12
|
-
required: ['userInput']
|
|
13
|
-
};
|
|
14
|
-
|
|
15
|
-
const llmOutputSchema = {
|
|
16
|
-
type: 'string'
|
|
17
|
-
};
|
|
18
3
|
|
|
19
4
|
// Example step using LLM
|
|
20
5
|
export const exampleLLMStep = step( {
|
|
21
6
|
name: 'exampleLLMStep',
|
|
22
|
-
description: 'Example step that uses LLM',
|
|
23
|
-
inputSchema:
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
7
|
+
description: 'Example step that uses LLM to generate text',
|
|
8
|
+
inputSchema: z.object( {
|
|
9
|
+
userInput: z.string()
|
|
10
|
+
} ),
|
|
11
|
+
outputSchema: z.string(),
|
|
12
|
+
fn: async ( { userInput } ) => {
|
|
13
|
+
const response = await generateText( {
|
|
14
|
+
prompt: 'prompt@v1',
|
|
15
|
+
variables: { userInput }
|
|
29
16
|
} );
|
|
30
|
-
|
|
31
|
-
// Generate text using LLM
|
|
32
|
-
const response = await generateText( prompt as Prompt );
|
|
33
|
-
|
|
34
17
|
return response;
|
|
35
18
|
}
|
|
36
19
|
} );
|
|
37
20
|
|
|
38
|
-
// Define schemas for data processing step
|
|
39
|
-
const processDataInputSchema = {
|
|
40
|
-
type: 'object',
|
|
41
|
-
properties: {
|
|
42
|
-
value: { type: 'number' },
|
|
43
|
-
type: { type: 'string' }
|
|
44
|
-
},
|
|
45
|
-
required: ['value', 'type']
|
|
46
|
-
};
|
|
47
|
-
|
|
48
|
-
const processDataOutputSchema = {
|
|
49
|
-
type: 'object',
|
|
50
|
-
properties: {
|
|
51
|
-
processed: { type: 'boolean' },
|
|
52
|
-
timestamp: { type: 'string' },
|
|
53
|
-
data: { type: 'object' }
|
|
54
|
-
},
|
|
55
|
-
required: ['processed', 'timestamp', 'data']
|
|
56
|
-
};
|
|
57
|
-
|
|
58
21
|
// Example data processing step
|
|
59
22
|
export const processDataStep = step( {
|
|
60
23
|
name: 'processDataStep',
|
|
61
|
-
description: 'Process input data',
|
|
62
|
-
inputSchema:
|
|
63
|
-
|
|
64
|
-
|
|
24
|
+
description: 'Process input data and return structured result',
|
|
25
|
+
inputSchema: z.object( {
|
|
26
|
+
value: z.number(),
|
|
27
|
+
type: z.string()
|
|
28
|
+
} ),
|
|
29
|
+
outputSchema: z.object( {
|
|
30
|
+
processed: z.boolean(),
|
|
31
|
+
timestamp: z.string(),
|
|
32
|
+
data: z.object( {
|
|
33
|
+
value: z.number(),
|
|
34
|
+
type: z.string()
|
|
35
|
+
} )
|
|
36
|
+
} ).strict(),
|
|
37
|
+
fn: async ( { value, type } ) => {
|
|
65
38
|
// TODO: Implement your step logic here
|
|
66
|
-
console.log( 'Processing data:',
|
|
39
|
+
console.log( 'Processing data:', { value, type } );
|
|
67
40
|
|
|
68
|
-
// Example processing
|
|
69
41
|
return {
|
|
70
42
|
processed: true,
|
|
71
43
|
timestamp: new Date().toISOString(),
|
|
72
|
-
data:
|
|
44
|
+
data: { value, type }
|
|
73
45
|
};
|
|
74
46
|
}
|
|
75
47
|
} );
|
|
@@ -1,67 +1,41 @@
|
|
|
1
|
-
import { workflow } from '@output.ai/core';
|
|
1
|
+
import { workflow, z } from '@output.ai/core';
|
|
2
2
|
import { exampleLLMStep, processDataStep } from './steps.js';
|
|
3
3
|
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
},
|
|
12
|
-
data: {
|
|
13
|
-
type: 'object',
|
|
14
|
-
properties: {
|
|
15
|
-
value: { type: 'number' },
|
|
16
|
-
type: { type: 'string' }
|
|
17
|
-
},
|
|
18
|
-
required: ['value', 'type']
|
|
19
|
-
}
|
|
20
|
-
},
|
|
21
|
-
required: ['prompt']
|
|
22
|
-
};
|
|
23
|
-
|
|
24
|
-
// Define the output schema for the workflow
|
|
25
|
-
const outputSchema = {
|
|
26
|
-
type: 'object',
|
|
27
|
-
properties: {
|
|
28
|
-
llmResponse: { type: 'string' },
|
|
29
|
-
processedData: {
|
|
30
|
-
type: 'object',
|
|
31
|
-
properties: {
|
|
32
|
-
processed: { type: 'boolean' },
|
|
33
|
-
timestamp: { type: 'string' },
|
|
34
|
-
data: { type: 'object' }
|
|
35
|
-
}
|
|
36
|
-
},
|
|
37
|
-
message: { type: 'string' }
|
|
38
|
-
},
|
|
39
|
-
required: ['llmResponse', 'processedData', 'message']
|
|
40
|
-
};
|
|
4
|
+
const inputSchema = z.object( {
|
|
5
|
+
prompt: z.string().describe( 'The prompt to send to the LLM' ),
|
|
6
|
+
data: z.object( {
|
|
7
|
+
value: z.number(),
|
|
8
|
+
type: z.string()
|
|
9
|
+
} ).optional()
|
|
10
|
+
} );
|
|
41
11
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
12
|
+
const outputSchema = z.object( {
|
|
13
|
+
llmResponse: z.string(),
|
|
14
|
+
processedData: z.object( {
|
|
15
|
+
processed: z.boolean(),
|
|
16
|
+
timestamp: z.string(),
|
|
17
|
+
data: z.object( {
|
|
18
|
+
value: z.number(),
|
|
19
|
+
type: z.string()
|
|
20
|
+
} )
|
|
21
|
+
} ),
|
|
22
|
+
message: z.string()
|
|
23
|
+
} ).strict();
|
|
50
24
|
|
|
51
25
|
export default workflow( {
|
|
52
26
|
name: '{{workflowName}}',
|
|
53
27
|
description: '{{description}}',
|
|
54
28
|
inputSchema,
|
|
55
29
|
outputSchema,
|
|
56
|
-
fn: async (
|
|
30
|
+
fn: async ( { prompt, data } ) => {
|
|
57
31
|
// Call the LLM step with input from workflow
|
|
58
32
|
const llmResponse = await exampleLLMStep( {
|
|
59
|
-
userInput:
|
|
33
|
+
userInput: prompt
|
|
60
34
|
} );
|
|
61
35
|
|
|
62
36
|
// Process data if provided, otherwise use defaults
|
|
63
37
|
const processedData = await processDataStep(
|
|
64
|
-
|
|
38
|
+
data || { value: 42, type: 'default' }
|
|
65
39
|
);
|
|
66
40
|
|
|
67
41
|
return {
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Domain-specific types for improved type safety
|
|
3
|
+
*/
|
|
4
|
+
export type TodoStatus = 'pending' | 'in_progress' | 'completed';
|
|
5
|
+
export type FileMappingType = 'template' | 'symlink' | 'copy';
|
|
6
|
+
export type InstructionsType = 'plan' | 'build';
|
|
7
|
+
export interface Todo {
|
|
8
|
+
content: string;
|
|
9
|
+
activeForm: string;
|
|
10
|
+
status: TodoStatus;
|
|
11
|
+
}
|
|
12
|
+
export interface ValidationResult {
|
|
13
|
+
isValid: boolean;
|
|
14
|
+
errors: string[];
|
|
15
|
+
warnings: string[];
|
|
16
|
+
}
|
|
17
|
+
export interface SystemValidation {
|
|
18
|
+
missingCommands: string[];
|
|
19
|
+
hasIssues: boolean;
|
|
20
|
+
}
|
package/dist/utils/paths.d.ts
CHANGED