@output.ai/core 0.1.9-dev.pr156.0 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,247 +1,98 @@
1
- # Core
1
+ # @output.ai/core
2
2
 
3
- Provides tools to develop and run a workflow, which is a well defined logical unit of work.
3
+ Workflow orchestration and worker runtime for building durable LLM applications with Temporal.
4
4
 
5
- ## Structure
5
+ [![npm version](https://img.shields.io/npm/v/@output.ai/core)](https://www.npmjs.com/package/@output.ai/core)
6
+ [![Documentation](https://img.shields.io/badge/docs-docs.output.ai-blue)](https://docs.output.ai/packages/core)
6
7
 
7
- Workflows are defined using core functions ("workflow", "step", "evaluator"), these are defined in separate files, and must be placed within the same folder:
8
-
9
- ```
10
- └ workflows
11
- └ example
12
- ├ workflow.ts|js <- workflow entry point
13
- ├ steps.ts|js <- file containing steps used by the workflow
14
- ├ evaluators.ts|js <- file containing evaluating functions
15
- └ prompt.prompt <- a prompt file
16
- └ other-example
8
+ ## Installation
17
9
 
10
+ ```bash
11
+ npm install @output.ai/core
18
12
  ```
19
13
 
20
- Workflows are the orchestrator and steps are executors. So the workflow only call the steps and the steps call the IO operations, like APIs, DBs, LLMs, etc. Evaluators are just another different flavor for steps, they work the same, but must return an `EvaluationResult` object.
21
-
22
- ## Components
14
+ ## Quick Start
23
15
 
24
- ### Workflow
16
+ ```typescript
17
+ // workflow.ts
18
+ import { workflow, z } from '@output.ai/core';
19
+ import { processData } from './steps.js';
25
20
 
26
- The main code, must contain only deterministic orchestration code.
27
-
28
- File: `workflow.js`
29
-
30
- Example:
31
- ```js
32
- import { workflow, z } from '@output.ai/workflow';
33
- import { guessByName } from './steps.js';
34
-
35
- export default workflow( {
36
- name: 'guessMyProfession',
37
- description: 'Guess a person profession by its name',
38
- inputSchema: z.object( {
39
- name: z.string()
40
- } ),
41
- outputSchema: z.object( {
42
- profession: z.string()
43
- } ),
44
- fn: async input => {
45
- const profession = await guessByName( input.name );
46
- return { profession };
21
+ export default workflow({
22
+ name: 'myWorkflow',
23
+ inputSchema: z.object({ text: z.string() }),
24
+ outputSchema: z.object({ result: z.string() }),
25
+ fn: async (input) => {
26
+ const result = await processData(input.text);
27
+ return { result };
47
28
  }
48
- })
29
+ });
49
30
  ```
50
31
 
51
- Workflows can only import the following files
52
-
53
- #### Components
54
- - `evaluators.js`
55
- - `shared_steps.js`
56
- - `steps.js`
57
- - `workflow.js`
32
+ ```typescript
33
+ // steps.ts
34
+ import { step, z } from '@output.ai/core';
58
35
 
59
- #### Core library
60
- - `@output.ai.core`
61
-
62
- #### Whitelisted files
63
- - `types.js`
64
- - `consts.js`
65
- - `constants.js`
66
- - `vars.js`
67
- - `variables.js`
68
- - `utils.js`
69
- - `tools.js`
70
- - `functions.js`
71
- - `shared.js`
72
-
73
- ### Step
74
-
75
- Re-usable units of work that can contain IO, used by the workflow.
76
-
77
- File: `steps.js`
78
-
79
- Example:
80
- ```js
81
- import { api } from './api.js'
82
-
83
- export const guessByName = step( {
84
- name: 'guessByName',
36
+ export const processData = step({
37
+ name: 'processData',
85
38
  inputSchema: z.string(),
86
39
  outputSchema: z.string(),
87
- fn: async name => {
88
- const res = await api.consumer( name );
89
- return res.body;
90
- }
91
- } )
92
- ```
93
-
94
- ### Shared Steps
95
-
96
- By default, steps are exclusive to the workflow, so it is not passible to use these steps from elsewhere. In order to have shared steps and make them accessible in different workflows, create a shared steps file. This file can be relatively imported anywhere.
97
-
98
- File: `shared_steps.js`
99
-
100
- Example:
101
- ```js
102
- export const mySharedStep = step( {
103
- name: 'mySharedStep',
104
- ...
105
- } )
106
- ```
107
-
108
- And the usage is the same as any step:
109
- `workflow.js`
110
- ```js
111
- import { mySharedStep } from '../../tools/shared_steps.js'
112
- ```
113
-
114
- ### Evaluators
115
-
116
- Steps that analyze LLM response, or take other measurements are contained in evaluators.
117
-
118
- File: `evaluators.js`
119
-
120
- Example:
121
- ```js
122
- import { evaluator, EvaluationStringResult } from './api.js'
123
-
124
- export const judgeResult = evaluator( {
125
- name: 'judgeResult',
126
- inputSchema: z.string(),
127
- fn: async name => {
128
- ...
129
- return new EvaluationStringResult({
130
- value: 'good',
131
- confidence: .95
132
- });
40
+ fn: async (text) => {
41
+ return text.toUpperCase();
133
42
  }
134
- } )
43
+ });
135
44
  ```
136
45
 
137
- Its usage is the same as steps:
138
- `workflow.js`
139
- ```js
140
- import { workflow, z } from '@output.ai/workflow';
141
- import { judgeResult } from './evaluators.js';
142
-
143
- export default workflow( {
144
- name: 'guessMyProfession',
145
- inputSchema: z.object( {
146
- name: z.string()
147
- } ),
148
- outputSchema: z.object( {
149
- result: z.string()
150
- } ),
151
- fn: async input => {
152
- const judgment = await judgeResult( input.name );
153
- return { result: judgement.value };
154
- }
155
- })
156
- ```
157
-
158
- ## Webhooks
159
-
160
- Workflows can call webhooks that will stop their execution until an answer is given back.
161
-
162
- ```js
163
- import { workflow, createWebhook } from '@output.ai/workflow';
164
- import { guessByName } from './steps.js';
165
-
166
- export default workflow( {
167
- ...
168
- fn: async input => {
169
- ...
170
-
171
- const result = await createWebhook( {
172
- url: 'http://xxx.xxx/feedback',
173
- payload: {
174
- progressSoFar: 'plenty'
175
- }
176
- } );
177
-
178
- }
179
- })
180
- ```
181
-
182
- The url of the example will receive the payload, plus the workflowId:
183
-
184
- ```js
185
- {
186
- workflowId: '', // alphanumerical id of the workflow execution,
187
- payload: { }, // the payload sent using tools.webhook()
188
- }
189
- ```
190
-
191
- To resume the workflow, a POST has to be made with a response payload and the workflowId.
192
-
193
- - Production: `https://output-api-production.onrender.com/workflow/feedback`
194
- - Staging: `https://output-api-staging.onrender.com/workflow/feedback`
195
- - Local: `http://localhost:3001/workflow/feedback`
196
-
197
- Example:
198
-
199
- ```bash
200
- POST http://locahost:3001/workflow/feedback
201
- {
202
- workflowId,
203
- payload: {}
204
- }
205
- ```
206
-
207
- ## Options
208
-
209
- All core interface functions: workflow, step, evaluator have similar signature, with the following options:
210
- - name: The function name, used to call it internally and identify it in the trace files, must be a code friendly string;
211
- - description: Human description of the workflow/step, used for the catalog;
212
- - inputSchema: a zod object indicating the type of the argument received by the `fn` function. It is validated. Omit if it doesn't have input arguments;
213
- - outputSchema: a zod object indicating the type of that the `fn` function returns. It is validated. Omit if it is void. Evaluators do not have this option, since they must always return an EvaluationResult object;
214
- - fn: The actual implementation of the workflow/step, including all its logic.
215
- - options: Advanced options that will overwrite Temporal's ActivityOptions when calling activities.
216
-
217
- If used on `workflow()` it will apply for all activities. If used on `step()` or `evaluator()` it will apply only to that underlying activity. If changed in both places, the end value will be a merge between the initial values, workflow values and the step values.
218
-
219
- Order of precedence
220
- `step options > workflow options > default options`
221
-
222
- ## Developing
223
-
224
- To develop workflows you need the code, which will be called the worker, the API and the engine (Temporal).
225
-
226
- After having the API and the engine running, to start the worker just run:
227
-
228
- ```js
229
- `npm run outputai`
46
+ ## Key Exports
47
+
48
+ | Export | Description |
49
+ |--------|-------------|
50
+ | `workflow` | Define orchestration logic that coordinates steps |
51
+ | `step` | Define reusable units of work that handle I/O |
52
+ | `evaluator` | Define steps that return evaluation results |
53
+ | `createWebhook` | Pause workflow execution until external input |
54
+ | `z` | Zod schema library for input/output validation |
55
+
56
+ ## File Structure
57
+
58
+ Each workflow lives in its own directory:
59
+
60
+ ```text
61
+ src/workflows/
62
+ └── my-workflow/
63
+ ├── workflow.ts # Workflow definition
64
+ ├── steps.ts # Step implementations
65
+ ├── evaluators.ts # Evaluators (optional)
66
+ ├── prompts/ # LLM prompt templates
67
+ │ └── prompt@v1.prompt
68
+ └── scenarios/ # Test scenarios
69
+ └── test_input.json
230
70
  ```
231
71
 
232
- ## Env variables
233
-
234
- Necessary env variables to run the worker locally:
235
-
236
- - `TEMPORAL_ADDRESS`: The temporal backend address, prefer the remote;
237
- - `TEMPORAL_NAMESPACE`: The name of the namespace, if using remote, use: "output-staging.i0jzq";
238
- - `TEMPORAL_API_KEY`: The API key to access remote temporal. If using local temporal, leave it blank;
239
- - `CATALOG_ID`: The name of the local catalog, always set this. Use your email;
240
- - `API_AUTH_KEY`: The API key to access the Framework API. Local can be blank, remote use the proper API Key;
241
- - `TRACE_LOCAL_ON`: A "stringbool" value indicating if traces should be saved locally, needs REDIS_URL;
242
- - `TRACE_REMOTE_ON`: A "stringbool" value indicating if traces should be saved remotely, needs REDIS_URL and AWS_* secrets;
243
- - `REDIS_URL`: The redis address to connect. Only necessary when any type of trace is enabled;
244
- - `TRACE_REMOTE_S3_BUCKET`: The AWS S3 bucket to send the traces. Only necessary when remote trace is enabled;
245
- - `AWS_REGION`: AWS region to connect to send the traces, must match the bucket region. Only necessary when remote trace is enabled;
246
- - `AWS_ACCESS_KEY_ID`: AWS key id. Only necessary when remote trace is enabled;
247
- - `AWS_SECRET_ACCESS_KEY`: AWS secrete. Only necessary when remote trace is enabled;
72
+ ## Environment Variables
73
+
74
+ The worker reads these environment variables:
75
+
76
+ | Variable | Description |
77
+ |----------|-------------|
78
+ | `TEMPORAL_ADDRESS` | Temporal backend address |
79
+ | `TEMPORAL_NAMESPACE` | Temporal namespace name |
80
+ | `TEMPORAL_API_KEY` | API key for remote Temporal (leave blank for local) |
81
+ | `CATALOG_ID` | **Required.** Name of the local catalog (use your email) |
82
+ | `API_AUTH_KEY` | API key for Framework API (blank for local, required for remote) |
83
+ | `TRACE_LOCAL_ON` | Enable local trace saving (requires `REDIS_URL`) |
84
+ | `TRACE_REMOTE_ON` | Enable remote trace saving (requires `REDIS_URL` and AWS secrets) |
85
+ | `REDIS_URL` | Redis address (required when tracing is enabled) |
86
+ | `TRACE_REMOTE_S3_BUCKET` | AWS S3 bucket for traces (required for remote tracing) |
87
+ | `AWS_REGION` | AWS region matching the S3 bucket (required for remote tracing) |
88
+ | `AWS_ACCESS_KEY_ID` | AWS key ID (required for remote tracing) |
89
+ | `AWS_SECRET_ACCESS_KEY` | AWS secret key (required for remote tracing) |
90
+
91
+ ## Documentation
92
+
93
+ For comprehensive documentation, visit:
94
+
95
+ - [Package Reference](https://docs.output.ai/packages/core)
96
+ - [Workflows Guide](https://docs.output.ai/core/workflows)
97
+ - [Steps Guide](https://docs.output.ai/core/steps)
98
+ - [Getting Started](https://docs.output.ai/quickstart)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/core",
3
- "version": "0.1.9-dev.pr156.0",
3
+ "version": "0.1.10",
4
4
  "description": "The core module of the output framework",
5
5
  "type": "module",
6
6
  "exports": {
@@ -40,7 +40,10 @@
40
40
  "stacktrace-parser": "0.1.11",
41
41
  "zod": "4.1.12"
42
42
  },
43
- "license": "UNLICENSED",
43
+ "license": "Apache-2.0",
44
+ "publishConfig": {
45
+ "access": "public"
46
+ },
44
47
  "imports": {
45
48
  "#consts": "./src/consts.js",
46
49
  "#errors": "./src/errors.js",
@@ -4,32 +4,67 @@ import { fileURLToPath } from 'url';
4
4
  import buildTraceTree from '../../tools/build_trace_tree.js';
5
5
  import { EOL } from 'node:os';
6
6
 
7
- const oneWeekInMS = 1000 * 60 * 60 * 24 * 7;
8
7
  const __dirname = dirname( fileURLToPath( import.meta.url ) );
9
- const tempDir = join( __dirname, 'temp', 'traces' );
8
+
9
+ const PURGE_TEMP_FILES_THRESHOLD = 1000 * 60 * 60 * 24 * 7; // 1 week in milliseconds
10
+
11
+ // The path to the project root
12
+ const LOCAL_PROJECT_ROOT_PATH = process.argv[2] || process.cwd();
13
+
14
+ // The path to the local trace logs
15
+ const LOCAL_TRACE_LOG_PATH = join( LOCAL_PROJECT_ROOT_PATH, 'logs' );
16
+
17
+ // The path to the temporary trace logs
18
+ const TMP_TRACE_LOG_PATH = join( __dirname, 'temp', 'traces' );
10
19
 
11
20
  const accumulate = ( { entry, executionContext: { workflowId, startTime } } ) => {
12
- const path = join( tempDir, `${startTime}_${workflowId}.trace` );
21
+ const path = join( TMP_TRACE_LOG_PATH, `${startTime}_${workflowId}.trace` );
13
22
  appendFileSync( path, JSON.stringify( entry ) + EOL, 'utf-8' );
14
23
  return readFileSync( path, 'utf-8' ).split( EOL ).slice( 0, -1 ).map( v => JSON.parse( v ) );
15
24
  };
16
25
 
17
- const cleanupOldTempFiles = ( threshold = Date.now() - oneWeekInMS ) =>
18
- readdirSync( tempDir )
26
+ const cleanupOldTempFiles = ( threshold = Date.now() - PURGE_TEMP_FILES_THRESHOLD ) =>
27
+ readdirSync( TMP_TRACE_LOG_PATH )
19
28
  .filter( f => +f.split( '_' )[0] < threshold )
20
- .forEach( f => rmSync( join( tempDir, f ) ) );
29
+ .forEach( f => rmSync( join( TMP_TRACE_LOG_PATH, f ) ) );
30
+
31
+ /**
32
+ * Get the host trace log path, which is used for reporting trace locations.
33
+ * In containerized environments (e.g., Docker), this can be different from the local path
34
+ * to map container paths to host filesystem paths.
35
+ * @returns {string} The host trace log path from HOST_TRACE_PATH env var, or local path as fallback
36
+ */
37
+ const getHostTraceLogPath = () => {
38
+ return process.env.HOST_TRACE_PATH || LOCAL_TRACE_LOG_PATH;
39
+ };
21
40
 
22
41
  /**
23
42
  * Init this processor
24
43
  */
25
44
  export const init = () => {
26
- mkdirSync( tempDir, { recursive: true } );
45
+ mkdirSync( TMP_TRACE_LOG_PATH, { recursive: true } );
27
46
  cleanupOldTempFiles();
28
47
  };
29
48
 
30
- const getOutputDir = workflowName => join( process.argv[2], 'logs', 'runs', workflowName );
49
+ /**
50
+ * Get the local file system path for ALL file I/O operations (read/write)
51
+ * Uses the project root path passed as argv[2], falls back to cwd
52
+ * @param {string} workflowName - The name of the workflow
53
+ * @returns {string} The local filesystem path for file operations
54
+ */
55
+ const getLocalOutputDir = workflowName => {
56
+ return join( LOCAL_PROJECT_ROOT_PATH, 'logs', 'runs', workflowName );
57
+ };
31
58
 
32
- const getRelativeOutputDir = workflowName => join( 'logs', 'runs', workflowName );
59
+ /**
60
+ * Get the host path for reporting trace file locations to users
61
+ * Uses HOST_TRACE_PATH if set (for Docker), otherwise uses project root
62
+ * @param {string} workflowName - The name of the workflow
63
+ * @returns {string} The path to report to users/API
64
+ */
65
+ const getReportOutputDir = workflowName => {
66
+ return join( getHostTraceLogPath(), 'runs', workflowName );
67
+ };
33
68
 
34
69
  const buildOutputFileName = ( { startTime, workflowId } ) => {
35
70
  const timestamp = new Date( startTime ).toISOString().replace( /[:T.]/g, '-' );
@@ -50,7 +85,8 @@ export const exec = ( { entry, executionContext } ) => {
50
85
  const { workflowId, workflowName, startTime } = executionContext;
51
86
  const content = buildTraceTree( accumulate( { entry, executionContext } ) );
52
87
 
53
- const dir = getOutputDir( workflowName );
88
+ // Always use local path for writing files
89
+ const dir = getLocalOutputDir( workflowName );
54
90
  const path = join( dir, buildOutputFileName( { startTime, workflowId } ) );
55
91
 
56
92
  mkdirSync( dir, { recursive: true } );
@@ -58,12 +94,14 @@ export const exec = ( { entry, executionContext } ) => {
58
94
  };
59
95
 
60
96
  /**
61
- * Returns where the trace is saved (as a relative path)
97
+ * Returns where the trace is saved as an absolute path
62
98
  * @param {object} args
63
99
  * @param {string} args.startTime - The start time of the workflow
64
100
  * @param {string} args.workflowId - The id of the workflow execution
65
101
  * @param {string} args.workflowName - The name of the workflow
66
- * @returns {string} The relative path where the trace will be saved
102
+ * @returns {string} The absolute path where the trace will be saved
67
103
  */
68
- export const getDestination = ( { startTime, workflowId, workflowName } ) =>
69
- join( getRelativeOutputDir( workflowName ), buildOutputFileName( { workflowId, startTime } ) );
104
+ export const getDestination = ( { startTime, workflowId, workflowName } ) => {
105
+ // Use report path for reporting to users/API
106
+ return join( getReportOutputDir( workflowName ), buildOutputFileName( { workflowId, startTime } ) );
107
+ };
@@ -29,6 +29,7 @@ describe( 'tracing/processors/local', () => {
29
29
  vi.clearAllMocks();
30
30
  store.files.clear();
31
31
  process.argv[2] = '/tmp/project';
32
+ delete process.env.HOST_TRACE_PATH; // Clear HOST_TRACE_PATH for clean tests
32
33
  } );
33
34
 
34
35
  it( 'init(): creates temp dir and cleans up old files', async () => {
@@ -39,6 +40,7 @@ describe( 'tracing/processors/local', () => {
39
40
 
40
41
  init();
41
42
 
43
+ // Should create temp dir relative to module location using __dirname
42
44
  expect( mkdirSyncMock ).toHaveBeenCalledWith( expect.stringMatching( /temp\/traces$/ ), { recursive: true } );
43
45
  expect( rmSyncMock ).toHaveBeenCalledTimes( 1 );
44
46
  } );
@@ -60,11 +62,12 @@ describe( 'tracing/processors/local', () => {
60
62
 
61
63
  expect( writeFileSyncMock ).toHaveBeenCalledTimes( 3 );
62
64
  const [ writtenPath, content ] = writeFileSyncMock.mock.calls.at( -1 );
63
- expect( writtenPath ).toMatch( /\/logs\/runs\/WF\// );
65
+ // Changed: Now uses process.cwd() + '/logs' fallback when HOST_TRACE_PATH not set
66
+ expect( writtenPath ).toMatch( /\/runs\/WF\// );
64
67
  expect( JSON.parse( content.trim() ).count ).toBe( 3 );
65
68
  } );
66
69
 
67
- it( 'getDestination(): returns relative path', async () => {
70
+ it( 'getDestination(): returns absolute path', async () => {
68
71
  const { getDestination } = await import( './index.js' );
69
72
 
70
73
  const startTime = Date.parse( '2020-01-02T03:04:05.678Z' );
@@ -73,9 +76,74 @@ describe( 'tracing/processors/local', () => {
73
76
 
74
77
  const destination = getDestination( { startTime, workflowId, workflowName } );
75
78
 
76
- // Should return a relative path, not an absolute path
77
- expect( destination ).not.toMatch( /^\/|^[A-Z]:\\/i ); // Not starting with / or Windows drive letter
78
- expect( destination ).toBe( 'logs/runs/test-workflow/2020-01-02-03-04-05-678Z_workflow-id-123.json' );
79
+ // Should return an absolute path
80
+ expect( destination ).toMatch( /^\/|^[A-Z]:\\/i ); // Starting with / or Windows drive letter
81
+ expect( destination ).toContain( '/logs/runs/test-workflow/2020-01-02-03-04-05-678Z_workflow-id-123.json' );
82
+ } );
83
+
84
+ it( 'exec(): writes to container path regardless of HOST_TRACE_PATH', async () => {
85
+ const { exec, init } = await import( './index.js' );
86
+
87
+ // Set HOST_TRACE_PATH to simulate Docker environment
88
+ process.env.HOST_TRACE_PATH = '/host/path/logs';
89
+
90
+ init();
91
+
92
+ const startTime = Date.parse( '2020-01-02T03:04:05.678Z' );
93
+ const ctx = { executionContext: { workflowId: 'id1', workflowName: 'WF', startTime } };
94
+
95
+ exec( { ...ctx, entry: { name: 'A', phase: 'start', timestamp: startTime } } );
96
+
97
+ expect( writeFileSyncMock ).toHaveBeenCalledTimes( 1 );
98
+ const [ writtenPath ] = writeFileSyncMock.mock.calls.at( -1 );
99
+
100
+ // Should write to process.cwd()/logs, NOT to HOST_TRACE_PATH
101
+ expect( writtenPath ).not.toContain( '/host/path/logs' );
102
+ expect( writtenPath ).toMatch( /logs\/runs\/WF\// );
103
+ } );
104
+
105
+ it( 'getDestination(): returns HOST_TRACE_PATH when set', async () => {
106
+ const { getDestination } = await import( './index.js' );
107
+
108
+ // Set HOST_TRACE_PATH to simulate Docker environment
109
+ process.env.HOST_TRACE_PATH = '/host/path/logs';
110
+
111
+ const startTime = Date.parse( '2020-01-02T03:04:05.678Z' );
112
+ const workflowId = 'workflow-id-123';
113
+ const workflowName = 'test-workflow';
114
+
115
+ const destination = getDestination( { startTime, workflowId, workflowName } );
116
+
117
+ // Should return HOST_TRACE_PATH-based path for reporting
118
+ expect( destination ).toBe( '/host/path/logs/runs/test-workflow/2020-01-02-03-04-05-678Z_workflow-id-123.json' );
119
+ } );
120
+
121
+ it( 'separation of write and report paths works correctly', async () => {
122
+ const { exec, getDestination, init } = await import( './index.js' );
123
+
124
+ // Set HOST_TRACE_PATH to simulate Docker environment
125
+ process.env.HOST_TRACE_PATH = '/Users/ben/project/logs';
126
+
127
+ init();
128
+
129
+ const startTime = Date.parse( '2020-01-02T03:04:05.678Z' );
130
+ const workflowId = 'workflow-id-123';
131
+ const workflowName = 'test-workflow';
132
+ const ctx = { executionContext: { workflowId, workflowName, startTime } };
133
+
134
+ // Execute to write file
135
+ exec( { ...ctx, entry: { name: 'A', phase: 'start', timestamp: startTime } } );
136
+
137
+ // Get destination for reporting
138
+ const destination = getDestination( { startTime, workflowId, workflowName } );
139
+
140
+ // Verify write path is local
141
+ const [ writtenPath ] = writeFileSyncMock.mock.calls.at( -1 );
142
+ expect( writtenPath ).not.toContain( '/Users/ben/project' );
143
+ expect( writtenPath ).toMatch( /logs\/runs\/test-workflow\// );
144
+
145
+ // Verify report path uses HOST_TRACE_PATH
146
+ expect( destination ).toBe( '/Users/ben/project/logs/runs/test-workflow/2020-01-02-03-04-05-678Z_workflow-id-123.json' );
79
147
  } );
80
148
  } );
81
149
 
@@ -9,11 +9,13 @@ const traceBus = new EventEmitter();
9
9
  const processors = [
10
10
  {
11
11
  isOn: isStringboolTrue( process.env.TRACE_LOCAL_ON ),
12
+ name: 'LOCAL',
12
13
  init: localProcessor.init,
13
14
  exec: localProcessor.exec
14
15
  },
15
16
  {
16
17
  isOn: isStringboolTrue( process.env.TRACE_REMOTE_ON ),
18
+ name: 'REMOTE',
17
19
  init: s3Processor.init,
18
20
  exec: s3Processor.exec
19
21
  }
@@ -25,7 +27,13 @@ const processors = [
25
27
  export const init = async () => {
26
28
  for ( const p of processors.filter( p => p.isOn ) ) {
27
29
  await p.init();
28
- traceBus.addListener( 'entry', p.exec );
30
+ traceBus.addListener( 'entry', async ( ...args ) => {
31
+ try {
32
+ await p.exec( ...args );
33
+ } catch ( error ) {
34
+ console.error( `[Tracing] "${p.name}" processor execution error.`, error );
35
+ }
36
+ } );
29
37
  }
30
38
  };
31
39