@output.ai/cli 0.7.11 → 0.7.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. package/README.md +1 -1
  2. package/dist/generated/sdk_versions.json +3 -3
  3. package/dist/services/messages.js +2 -2
  4. package/dist/templates/agent_instructions/dotoutputai/AGENTS.md.template +1 -1
  5. package/dist/templates/project/README.md.template +38 -1
  6. package/dist/templates/project/src/shared/clients/jina.ts.template +30 -0
  7. package/dist/templates/project/src/shared/utils/string.ts.template +3 -0
  8. package/dist/templates/project/src/shared/utils/url.ts.template +15 -0
  9. package/dist/templates/project/src/workflows/blog_evaluator/evaluators.ts.template +33 -0
  10. package/dist/templates/project/src/workflows/blog_evaluator/prompts/signal_noise@v1.prompt.template +28 -0
  11. package/dist/templates/project/src/workflows/blog_evaluator/scenarios/paulgraham_hwh.json.template +3 -0
  12. package/dist/templates/project/src/workflows/blog_evaluator/steps.ts.template +27 -0
  13. package/dist/templates/project/src/workflows/blog_evaluator/types.ts.template +24 -0
  14. package/dist/templates/project/src/workflows/blog_evaluator/utils.ts.template +12 -0
  15. package/dist/templates/project/src/workflows/blog_evaluator/workflow.ts.template +25 -0
  16. package/dist/templates/workflow/README.md.template +96 -75
  17. package/dist/templates/workflow/evaluators.ts.template +23 -0
  18. package/package.json +1 -1
  19. package/dist/templates/project/src/workflows/example_question/prompts/answer_question@v1.prompt.template +0 -13
  20. package/dist/templates/project/src/workflows/example_question/scenarios/question_ada_lovelace.json.template +0 -3
  21. package/dist/templates/project/src/workflows/example_question/steps.ts.template +0 -16
  22. package/dist/templates/project/src/workflows/example_question/workflow.ts.template +0 -22
package/README.md CHANGED
@@ -18,7 +18,7 @@ cd <project-name>
18
18
  npx output dev
19
19
 
20
20
  # Run a workflow
21
- npx output workflow run example_question --input '{"question": "who is ada lovelace?"}'
21
+ npx output workflow run blog_evaluator --input src/workflows/blog_evaluator/scenarios/paulgraham_hwh.json
22
22
  ```
23
23
 
24
24
  ## Environment Configuration
@@ -1,6 +1,6 @@
1
1
  {
2
- "core": "0.2.4",
3
- "llm": "0.2.8",
2
+ "core": "0.3.0",
3
+ "llm": "0.2.9",
4
4
  "http": "0.1.1",
5
- "cli": "0.7.11"
5
+ "cli": "0.7.12"
6
6
  }
@@ -177,7 +177,7 @@ export const getProjectSuccessMessage = (folderName, installSuccess, envConfigur
177
177
  note: 'Launches Temporal, Redis, PostgreSQL, API, Worker, and UI'
178
178
  }, {
179
179
  step: 'Run example workflow',
180
- command: 'npx output workflow run example_question --input src/workflows/example_question/scenarios/question_ada_lovelace.json',
180
+ command: 'npx output workflow run blog_evaluator --input src/workflows/blog_evaluator/scenarios/paulgraham_hwh.json',
181
181
  note: 'Execute in a new terminal after services are running'
182
182
  }, {
183
183
  step: 'Monitor workflows',
@@ -321,7 +321,7 @@ ${createSectionHeader('RUN A WORKFLOW', '🚀')}
321
321
 
322
322
  ${ux.colorize('white', 'In a new terminal, execute:')}
323
323
 
324
- ${formatCommand('npx output workflow run example_question --input \'{"question": "Hello!"}\'')}
324
+ ${formatCommand('npx output workflow run blog_evaluator --input src/workflows/blog_evaluator/scenarios/paulgraham_hwh.json')}
325
325
 
326
326
  ${divider}
327
327
 
@@ -186,7 +186,7 @@ export default workflow({
186
186
  });
187
187
  ```
188
188
 
189
- **Allowed imports**: steps.ts, evaluators.ts, shared_steps.ts, types.ts, consts.ts, utils.ts
189
+ **Allowed imports**: steps.ts, evaluators.ts, ../../shared/steps/*.ts, types.ts, consts.ts, utils.ts
190
190
 
191
191
  **Forbidden in workflows**: Direct API calls, Math.random(), Date.now(), dynamic imports
192
192
 
@@ -7,6 +7,43 @@
7
7
  - Node.js >= 24.3
8
8
  - Docker and Docker Compose (for local development)
9
9
 
10
+ ## Project Structure
11
+
12
+ ```
13
+ src/
14
+ ├── shared/ # Shared code across workflows
15
+ │ ├── clients/ # API clients (e.g., jina.ts)
16
+ │ └── utils/ # Utility functions (e.g., string.ts)
17
+ └── workflows/ # Workflow definitions
18
+ └── blog_evaluator/ # Example workflow
19
+ ├── workflow.ts # Main workflow
20
+ ├── steps.ts # Workflow steps
21
+ ├── evaluators.ts # Quality evaluators
22
+ ├── utils.ts # Local utilities
23
+ ├── prompts/ # LLM prompts
24
+ └── scenarios/ # Test scenarios
25
+ ```
26
+
27
+ ### Shared Directory
28
+
29
+ The `src/shared/` directory contains code shared across multiple workflows:
30
+
31
+ - **`shared/clients/`** - API clients using `@output.ai/http` for external services
32
+ - **`shared/utils/`** - Helper functions and utilities
33
+
34
+ ### Import Rules
35
+
36
+ **Workflows** can import from:
37
+ - Local steps, evaluators, and utilities
38
+ - Shared steps, evaluators, clients, and utilities
39
+
40
+ **Steps and Evaluators** can import from:
41
+ - Local utilities and clients
42
+ - Shared utilities and clients
43
+
44
+ **Steps and Evaluators cannot** import from:
45
+ - Other steps or evaluators (Temporal activity isolation)
46
+
10
47
  ## Getting Started
11
48
 
12
49
  ### 1. Install Dependencies
@@ -44,7 +81,7 @@ This starts:
44
81
  In a new terminal:
45
82
 
46
83
  ```bash
47
- npx output workflow run example_question --input '{"question": "who really is ada lovelace?"}'
84
+ npx output workflow run blog_evaluator --input src/workflows/blog_evaluator/scenarios/paulgraham_hwh.json
48
85
  ```
49
86
 
50
87
  ### 5. Stop Services
@@ -0,0 +1,30 @@
1
+ import { httpClient } from '@output.ai/http';
2
+
3
+ export interface JinaReaderResponse {
4
+ code: number;
5
+ status: number;
6
+ data: {
7
+ title: string;
8
+ description: string;
9
+ url: string;
10
+ content: string;
11
+ usage: { tokens: number };
12
+ };
13
+ }
14
+
15
+ const jinaClient = httpClient( {
16
+ prefixUrl: 'https://r.jina.ai',
17
+ timeout: 30000
18
+ } );
19
+
20
+ export async function fetchBlogContent( url: string ): Promise<JinaReaderResponse> {
21
+ const response = await jinaClient.post( '', {
22
+ json: { url },
23
+ headers: {
24
+ 'Accept': 'application/json',
25
+ 'Content-Type': 'application/json',
26
+ 'X-Return-Format': 'markdown'
27
+ }
28
+ } );
29
+ return response.json() as Promise<JinaReaderResponse>;
30
+ }
@@ -0,0 +1,3 @@
1
+ export function lowercase( str: string ): string {
2
+ return str.toLowerCase();
3
+ }
@@ -0,0 +1,15 @@
1
+ export function isValidUrl( urlString: string ): boolean {
2
+ try {
3
+ const url = new URL( urlString );
4
+ return url.protocol === 'http:' || url.protocol === 'https:';
5
+ } catch {
6
+ return false;
7
+ }
8
+ }
9
+
10
+ export function validateUrl( urlString: string ): string {
11
+ if ( !isValidUrl( urlString ) ) {
12
+ throw new Error( `Invalid URL: ${urlString}` );
13
+ }
14
+ return urlString;
15
+ }
@@ -0,0 +1,33 @@
1
+ import { evaluator, z, EvaluationNumberResult } from '@output.ai/core';
2
+ import { generateObject } from '@output.ai/llm';
3
+ import type { BlogContent } from './types.js';
4
+
5
+ const blogContentSchema = z.object( {
6
+ title: z.string(),
7
+ url: z.string(),
8
+ content: z.string(),
9
+ tokenCount: z.number()
10
+ } );
11
+
12
+ export const evaluateSignalToNoise = evaluator( {
13
+ name: 'evaluate_signal_to_noise',
14
+ description: 'Evaluate the signal-to-noise ratio of blog content',
15
+ inputSchema: blogContentSchema,
16
+ fn: async ( input: BlogContent ) => {
17
+ const { result } = await generateObject( {
18
+ prompt: 'signal_noise@v1',
19
+ variables: {
20
+ title: input.title,
21
+ content: input.content
22
+ },
23
+ schema: z.object( {
24
+ score: z.number().min( 0 ).max( 100 ).describe( 'Signal-to-noise score 0-100' )
25
+ } )
26
+ } );
27
+
28
+ return new EvaluationNumberResult( {
29
+ value: result.score,
30
+ confidence: 0.85
31
+ } );
32
+ }
33
+ } );
@@ -0,0 +1,28 @@
1
+ ---
2
+ provider: anthropic
3
+ model: claude-haiku-4-5
4
+ temperature: 0.3
5
+ maxTokens: 256
6
+ ---
7
+
8
+ <system>
9
+ You are an expert content analyst. Evaluate blog posts for their signal-to-noise ratio.
10
+ </system>
11
+
12
+ <user>
13
+ Analyze this blog post for signal-to-noise ratio.
14
+
15
+ Title: \{{ title }}
16
+
17
+ Content:
18
+ \{{ content }}
19
+
20
+ Score 0-100 where:
21
+ - 0-20: Mostly filler/noise
22
+ - 21-40: More noise than signal
23
+ - 41-60: Balanced
24
+ - 61-80: Good signal, minimal noise
25
+ - 81-100: Exceptional, dense valuable content
26
+
27
+ Return only the score.
28
+ </user>
@@ -0,0 +1,3 @@
1
+ {
2
+ "url": "https://paulgraham.com/hwh.html"
3
+ }
@@ -0,0 +1,27 @@
1
+ import { step, z } from '@output.ai/core';
2
+ import { fetchBlogContent } from '../../shared/clients/jina.js';
3
+
4
+ const blogContentSchema = z.object( {
5
+ title: z.string(),
6
+ url: z.string(),
7
+ content: z.string(),
8
+ tokenCount: z.number()
9
+ } );
10
+
11
+ export const fetchContent = step( {
12
+ name: 'fetch_blog_content',
13
+ description: 'Fetch blog content from URL using Jina Reader API',
14
+ inputSchema: z.object( {
15
+ url: z.string().url()
16
+ } ),
17
+ outputSchema: blogContentSchema,
18
+ fn: async ( { url } ) => {
19
+ const response = await fetchBlogContent( url );
20
+ return {
21
+ title: response.data.title,
22
+ url: response.data.url,
23
+ content: response.data.content,
24
+ tokenCount: response.data.usage.tokens
25
+ };
26
+ }
27
+ } );
@@ -0,0 +1,24 @@
1
+ import { z } from '@output.ai/core';
2
+
3
+ export const blogContentSchema = z.object( {
4
+ title: z.string(),
5
+ url: z.string(),
6
+ content: z.string(),
7
+ tokenCount: z.number()
8
+ } );
9
+
10
+ export const workflowInputSchema = z.object( {
11
+ url: z.string().url().describe( 'URL of the blog post to evaluate' )
12
+ } );
13
+
14
+ export const workflowOutputSchema = z.object( {
15
+ url: z.string(),
16
+ title: z.string(),
17
+ signalToNoiseScore: z.number().min( 0 ).max( 100 ),
18
+ confidence: z.number().min( 0 ).max( 1 ),
19
+ summary: z.string()
20
+ } );
21
+
22
+ export type BlogContent = z.infer<typeof blogContentSchema>;
23
+ export type WorkflowInput = z.infer<typeof workflowInputSchema>;
24
+ export type WorkflowOutput = z.infer<typeof workflowOutputSchema>;
@@ -0,0 +1,12 @@
1
+ export function createWorkflowOutput(
2
+ blogContent: { url: string; title: string },
3
+ score: number
4
+ ) {
5
+ return {
6
+ url: blogContent.url,
7
+ title: blogContent.title,
8
+ signalToNoiseScore: score,
9
+ confidence: 0.85,
10
+ summary: `Signal-to-noise score: ${score}/100`
11
+ };
12
+ }
@@ -0,0 +1,25 @@
1
+ import { workflow, z } from '@output.ai/core';
2
+ import { validateUrl } from '../../shared/utils/url.js';
3
+ import { fetchContent } from './steps.js';
4
+ import { evaluateSignalToNoise } from './evaluators.js';
5
+ import { createWorkflowOutput } from './utils.js';
6
+ import { workflowInputSchema, workflowOutputSchema } from './types.js';
7
+
8
+ export default workflow( {
9
+ name: 'blog_evaluator',
10
+ description: '{{description}}',
11
+ inputSchema: workflowInputSchema,
12
+ outputSchema: workflowOutputSchema,
13
+ fn: async ( input ) => {
14
+ const validatedUrl = validateUrl( input.url );
15
+ const blogContent = await fetchContent( { url: validatedUrl } );
16
+ const evaluation = await evaluateSignalToNoise( blogContent );
17
+
18
+ return createWorkflowOutput( blogContent, evaluation.value );
19
+ },
20
+ options: {
21
+ retry: {
22
+ maximumAttempts: 3
23
+ }
24
+ }
25
+ } );
@@ -10,8 +10,50 @@ This workflow was generated using the Output SDK CLI. It provides a starting poi
10
10
 
11
11
  - `workflow.ts` - Main workflow definition with input/output schemas
12
12
  - `steps.ts` - Activity/step definitions with input/output schemas
13
- - `prompt@v1.prompt` - Example LLM prompt template
14
- - `.env` - Environment variables for API keys and configuration
13
+ - `evaluators.ts` - Quality evaluators for workflow outputs
14
+ - `prompts/` - LLM prompt templates
15
+
16
+ ## File Organization
17
+
18
+ You can organize your workflow files in two ways:
19
+
20
+ **Flat files:**
21
+ ```
22
+ workflow/
23
+ ├── workflow.ts
24
+ ├── steps.ts
25
+ ├── evaluators.ts
26
+ └── utils.ts
27
+ ```
28
+
29
+ **Folder-based:**
30
+ ```
31
+ workflow/
32
+ ├── workflow.ts
33
+ ├── steps/
34
+ │ ├── fetch_data.ts
35
+ │ └── process_data.ts
36
+ ├── evaluators/
37
+ │ └── quality.ts
38
+ └── utils/
39
+ └── helpers.ts
40
+ ```
41
+
42
+ ## Import Rules
43
+
44
+ **Important:** Steps and evaluators are Temporal activities. Activities cannot call other activities.
45
+
46
+ **Steps can import from:**
47
+ - Local utilities (`./utils.ts`, `./utils/*.ts`)
48
+ - Shared utilities (`../../shared/utils/*.ts`)
49
+ - Shared clients (`../../shared/clients/*.ts`)
50
+
51
+ **Steps cannot import from:**
52
+ - Other steps or evaluators (activity isolation)
53
+ - Workflow files
54
+
55
+ **Workflows can import from:**
56
+ - Steps, evaluators, and utilities (local and shared)
15
57
 
16
58
  ## Setup
17
59
 
@@ -63,121 +105,109 @@ Example:
63
105
 
64
106
  ### Workflow Structure
65
107
 
66
- The workflow follows the new Output SDK conventions:
108
+ The workflow follows the Output SDK conventions:
67
109
 
68
110
  ```typescript
69
- import { workflow } from '@output.ai/core';
70
- import { myStep, anotherStep } from './steps.js';
71
-
72
- const inputSchema = {
73
- type: 'object',
74
- properties: {
75
- // Define your input properties
76
- }
77
- };
78
-
79
- const outputSchema = {
80
- type: 'object',
81
- properties: {
82
- // Define your output properties
83
- }
84
- };
111
+ import { workflow, z } from '@output.ai/core';
112
+ import { myStep } from './steps.js';
113
+ import { evaluateQuality } from './evaluators.js';
85
114
 
86
115
  export default workflow( {
87
116
  name: 'workflowName',
88
117
  description: 'Workflow description',
89
- inputSchema,
90
- outputSchema,
118
+ inputSchema: z.object( { /* ... */ } ),
119
+ outputSchema: z.object( { /* ... */ } ),
91
120
  fn: async ( input ) => {
92
- // Call steps directly
93
121
  const result = await myStep( input );
94
- return result;
122
+ const { score } = await evaluateQuality( { input, output: result } );
123
+ return { result, qualityScore: score };
95
124
  }
96
125
  } );
97
126
  ```
98
127
 
99
128
  ### Adding New Steps
100
129
 
101
- 1. Define new steps in `steps.ts` with schemas:
130
+ Define steps in `steps.ts` with schemas:
102
131
 
103
132
  ```typescript
104
- import { step } from '@output.ai/core';
105
-
106
- const inputSchema = {
107
- type: 'object',
108
- properties: {
109
- value: { type: 'number' }
110
- },
111
- required: ['value']
112
- };
113
-
114
- const outputSchema = {
115
- type: 'object',
116
- properties: {
117
- result: { type: 'string' }
118
- }
119
- };
133
+ import { step, z } from '@output.ai/core';
120
134
 
121
135
  export const myStep = step( {
122
136
  name: 'myStep',
123
137
  description: 'Description of what this step does',
124
- inputSchema,
125
- outputSchema,
126
- fn: async ( input: { value: number } ) => {
127
- // Step implementation
138
+ inputSchema: z.object( {
139
+ value: z.number()
140
+ } ),
141
+ outputSchema: z.object( {
142
+ result: z.string()
143
+ } ),
144
+ fn: async ( input ) => {
128
145
  return { result: `Processed ${input.value}` };
129
146
  }
130
147
  } );
131
148
  ```
132
149
 
133
- 2. Import and use the step in your workflow (`workflow.ts`):
150
+ ### Adding Evaluators
151
+
152
+ Define evaluators in `evaluators.ts`:
134
153
 
135
154
  ```typescript
136
- import { myStep } from './steps.js';
155
+ import { evaluator, z } from '@output.ai/core';
156
+ import { generateText } from '@output.ai/llm';
137
157
 
138
- // Inside workflow fn:
139
- const result = await myStep( { value: 42 } );
158
+ export const evaluateQuality = evaluator( {
159
+ name: 'evaluate_quality',
160
+ description: 'Evaluate output quality',
161
+ inputSchema: z.object( {
162
+ input: z.any(),
163
+ output: z.any()
164
+ } ),
165
+ outputSchema: z.object( {
166
+ score: z.number().min( 0 ).max( 100 )
167
+ } ),
168
+ fn: async ( data ) => {
169
+ const { result } = await generateText( {
170
+ prompt: 'evaluate@v1',
171
+ variables: { input: data.input, output: data.output }
172
+ } );
173
+ return { score: parseInt( result, 10 ) };
174
+ }
175
+ } );
140
176
  ```
141
177
 
142
178
  ### Using LLM in Steps
143
179
 
144
- The template includes an example of using LLM with prompts:
145
-
146
180
  ```typescript
147
181
  import { generateText } from '@output.ai/llm';
148
182
 
149
183
  export const llmStep = step( {
150
184
  name: 'llmStep',
151
185
  description: 'Generate text using LLM',
152
- inputSchema: {
153
- type: 'object',
154
- properties: {
155
- userInput: { type: 'string' }
156
- }
157
- },
158
- outputSchema: { type: 'string' },
159
- fn: async ( input: { userInput: string } ) => {
160
- const response = await generateText( {
186
+ inputSchema: z.object( {
187
+ userInput: z.string()
188
+ } ),
189
+ outputSchema: z.string(),
190
+ fn: async ( input ) => {
191
+ const { result } = await generateText( {
161
192
  prompt: 'prompt@v1',
162
193
  variables: { userInput: input.userInput }
163
194
  } );
164
- return response;
195
+ return result;
165
196
  }
166
197
  } );
167
198
  ```
168
199
 
169
200
  ### Creating Prompt Templates
170
201
 
171
- Create new prompt files following the pattern:
202
+ Create prompt files in `prompts/` following the pattern:
172
203
  - File naming: `promptName@v1.prompt`
173
- - Include YAML frontmatter with provider and model
204
+ - Include YAML frontmatter with model
174
205
  - Use LiquidJS syntax for variables: `{{ variableName }}`
175
206
 
176
207
  Example prompt file:
177
208
  ```
178
209
  ---
179
- provider: anthropic
180
- model: claude-3-5-sonnet-latest
210
+ model: anthropic/claude-sonnet-4-20250514
181
211
  ---
182
212
 
183
213
  {{ userInput }}
@@ -195,19 +225,10 @@ To test your workflow:
195
225
 
196
226
  Example execution:
197
227
  ```bash
198
- curl -X POST http://localhost:3001/workflow \
199
- -H "Content-Type: application/json" \
200
- -d '{
201
- "workflowName": "{{workflowName}}",
202
- "input": {
203
- "prompt": "Tell me about workflows",
204
- "data": { "value": 42, "type": "example" }
205
- }
206
- }'
228
+ npx output workflow run {{workflowName}} --input '{"prompt": "Hello"}'
207
229
  ```
208
230
 
209
231
  ## Resources
210
232
 
211
- - [Output SDK Documentation](https://github.com/growthxai/output-sdk)
233
+ - [Output SDK Documentation](https://docs.output.ai)
212
234
  - [Temporal Documentation](https://docs.temporal.io)
213
- - [AI SDK Documentation](https://sdk.vercel.ai/docs)
@@ -0,0 +1,23 @@
1
+ import { evaluator, z } from '@output.ai/core';
2
+
3
+ // Example evaluator - customize for your workflow
4
+ export const evaluate{{WorkflowName}} = evaluator( {
5
+ name: 'evaluate_{{workflowName}}',
6
+ description: 'Evaluate the quality of {{workflowName}} output',
7
+ inputSchema: z.object( {
8
+ input: z.any(),
9
+ output: z.any()
10
+ } ),
11
+ outputSchema: z.object( {
12
+ score: z.number().min( 0 ).max( 100 ).describe( 'Quality score 0-100' ),
13
+ feedback: z.string().describe( 'Feedback on the output quality' )
14
+ } ),
15
+ fn: async () => {
16
+ // TODO: Implement evaluation logic
17
+ // Use LLM or custom logic to score the output quality
18
+ return {
19
+ score: 100,
20
+ feedback: 'Evaluation not yet implemented'
21
+ };
22
+ }
23
+ } );
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/cli",
3
- "version": "0.7.11",
3
+ "version": "0.7.12",
4
4
  "description": "CLI for Output.ai workflow generation",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -1,13 +0,0 @@
1
- ---
2
- provider: anthropic
3
- model: claude-opus-4-1-20250805
4
- temperature: 0.7
5
- ---
6
-
7
- <system>
8
- You are a helpful assistant. Answer the user's question concisely and clearly.
9
- </system>
10
-
11
- <user>
12
- Answer the following question: \{{ question }}
13
- </user>
@@ -1,3 +0,0 @@
1
- {
2
- "question": "who really is ada lovelace?"
3
- }
@@ -1,16 +0,0 @@
1
- import { step, z } from '@output.ai/core';
2
- import { generateText } from '@output.ai/llm';
3
-
4
- export const answerQuestion = step( {
5
- name: 'answerQuestion',
6
- description: 'Answer a question using an LLM',
7
- inputSchema: z.string(),
8
- outputSchema: z.string(),
9
- fn: async question => {
10
- const { result } = await generateText( {
11
- prompt: 'answer_question@v1',
12
- variables: { question }
13
- } );
14
- return result;
15
- }
16
- } );
@@ -1,22 +0,0 @@
1
- import { workflow, z } from '@output.ai/core';
2
- import { answerQuestion } from './steps.js';
3
-
4
- export default workflow( {
5
- name: 'example_question',
6
- description: '{{description}}',
7
- inputSchema: z.object( {
8
- question: z.string().describe( 'A question to answer' )
9
- } ),
10
- outputSchema: z.object( {
11
- answer: z.string().describe( 'The answer to the question' )
12
- } ),
13
- fn: async input => {
14
- const answer = await answerQuestion( input.question );
15
- return { answer };
16
- },
17
- options: {
18
- retry: {
19
- maximumAttempts: 3
20
- }
21
- }
22
- } );