@link-assistant/agent 0.5.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,174 @@
1
+ /**
2
+ * Echo Provider - A synthetic provider for testing dry-run mode
3
+ *
4
+ * This provider echoes back the user's input message without making actual API calls.
5
+ * It's designed to enable robust testing of round-trips and multi-turn conversations
6
+ * without incurring API costs.
7
+ *
8
+ * Usage:
9
+ * agent --dry-run -p "hello" # Uses echo provider automatically
10
+ * agent --model link-assistant/echo -p "hello" # Explicit usage
11
+ *
12
+ * The echo behavior follows the issue #89 specification:
13
+ * Input: "hi" -> Output: "hi"
14
+ * Input: "How are you?" -> Output: "How are you?"
15
+ */
16
+
17
+ import type { LanguageModelV2, LanguageModelV2CallOptions } from 'ai';
18
+ import { Log } from '../util/log';
19
+
20
+ const log = Log.create({ service: 'provider.echo' });
21
+
22
+ /**
23
+ * Extract text content from the prompt messages
24
+ */
25
+ function extractTextFromPrompt(
26
+ prompt: LanguageModelV2CallOptions['prompt']
27
+ ): string {
28
+ const textParts: string[] = [];
29
+
30
+ for (const message of prompt) {
31
+ if (message.role === 'user') {
32
+ for (const part of message.content) {
33
+ if (part.type === 'text') {
34
+ textParts.push(part.text);
35
+ }
36
+ }
37
+ }
38
+ }
39
+
40
+ // Return the last user message or a default response
41
+ return textParts.length > 0
42
+ ? textParts[textParts.length - 1]
43
+ : 'Echo: No user message found';
44
+ }
45
+
46
+ /**
47
+ * Generate a unique ID for streaming parts
48
+ */
49
+ function generatePartId(): string {
50
+ return `echo_${Date.now()}_${Math.random().toString(36).substring(2, 8)}`;
51
+ }
52
+
53
+ /**
54
+ * Creates an echo language model that echoes back the user's input
55
+ * Implements LanguageModelV2 interface for AI SDK 6.x compatibility
56
+ */
57
+ export function createEchoModel(modelId: string = 'echo'): LanguageModelV2 {
58
+ const model: LanguageModelV2 = {
59
+ specificationVersion: 'v2',
60
+ provider: 'link-assistant',
61
+ modelId,
62
+
63
+ // No external URLs are supported by this synthetic provider
64
+ supportedUrls: {},
65
+
66
+ async doGenerate(options: LanguageModelV2CallOptions) {
67
+ const echoText = extractTextFromPrompt(options.prompt);
68
+ log.info('echo generate', { modelId, echoText });
69
+
70
+ // Simulate token usage
71
+ const promptTokens = Math.ceil(echoText.length / 4);
72
+ const completionTokens = Math.ceil(echoText.length / 4);
73
+
74
+ return {
75
+ content: [
76
+ {
77
+ type: 'text' as const,
78
+ text: echoText,
79
+ },
80
+ ],
81
+ finishReason: 'stop' as const,
82
+ usage: {
83
+ promptTokens,
84
+ completionTokens,
85
+ },
86
+ warnings: [],
87
+ providerMetadata: undefined,
88
+ request: undefined,
89
+ response: undefined,
90
+ };
91
+ },
92
+
93
+ async doStream(options: LanguageModelV2CallOptions) {
94
+ const echoText = extractTextFromPrompt(options.prompt);
95
+ log.info('echo stream', { modelId, echoText });
96
+
97
+ // Simulate token usage
98
+ const promptTokens = Math.ceil(echoText.length / 4);
99
+ const completionTokens = Math.ceil(echoText.length / 4);
100
+
101
+ const textPartId = generatePartId();
102
+
103
+ // Create a ReadableStream with LanguageModelV2StreamPart format
104
+ // V2 format uses: text-start -> text-delta (with delta) -> text-end -> finish
105
+ const stream = new ReadableStream({
106
+ async start(controller) {
107
+ // Emit text-start
108
+ controller.enqueue({
109
+ type: 'text-start',
110
+ id: textPartId,
111
+ providerMetadata: undefined,
112
+ });
113
+
114
+ // Emit the text in chunks for realistic streaming behavior
115
+ const chunkSize = 10;
116
+ for (let i = 0; i < echoText.length; i += chunkSize) {
117
+ const chunk = echoText.slice(i, i + chunkSize);
118
+ controller.enqueue({
119
+ type: 'text-delta',
120
+ id: textPartId,
121
+ delta: chunk,
122
+ providerMetadata: undefined,
123
+ });
124
+ }
125
+
126
+ // Emit text-end
127
+ controller.enqueue({
128
+ type: 'text-end',
129
+ id: textPartId,
130
+ providerMetadata: undefined,
131
+ });
132
+
133
+ // Emit finish event with usage information
134
+ controller.enqueue({
135
+ type: 'finish',
136
+ finishReason: 'stop',
137
+ usage: {
138
+ promptTokens,
139
+ completionTokens,
140
+ },
141
+ providerMetadata: undefined,
142
+ });
143
+
144
+ controller.close();
145
+ },
146
+ });
147
+
148
+ return {
149
+ stream,
150
+ request: undefined,
151
+ response: undefined,
152
+ warnings: [],
153
+ };
154
+ },
155
+ };
156
+
157
+ return model;
158
+ }
159
+
160
+ /**
161
+ * Echo provider factory function - follows AI SDK provider pattern
162
+ */
163
+ export function createEchoProvider(options?: { name?: string }) {
164
+ return {
165
+ languageModel(modelId: string): LanguageModelV2 {
166
+ return createEchoModel(modelId);
167
+ },
168
+ textEmbeddingModel() {
169
+ throw new Error('Echo provider does not support text embeddings');
170
+ },
171
+ };
172
+ }
173
+
174
+ export const echoProvider = createEchoProvider();
@@ -78,18 +78,17 @@ export namespace ModelsDev {
78
78
 
79
79
  export async function refresh() {
80
80
  const file = Bun.file(filepath);
81
- log.info('refreshing', {
82
- file,
83
- });
81
+ log.info(() => ({ message: 'refreshing', file }));
84
82
  const result = await fetch('https://models.dev/api.json', {
85
83
  headers: {
86
84
  'User-Agent': 'agent-cli/1.0.0',
87
85
  },
88
86
  signal: AbortSignal.timeout(10 * 1000),
89
87
  }).catch((e) => {
90
- log.error('Failed to fetch models.dev', {
88
+ log.error(() => ({
89
+ message: 'Failed to fetch models.dev',
91
90
  error: e,
92
- });
91
+ }));
93
92
  });
94
93
  if (result && result.ok) await Bun.write(file, await result.text());
95
94
  }
@@ -14,6 +14,8 @@ import { Instance } from '../project/instance';
14
14
  import { Global } from '../global';
15
15
  import { Flag } from '../flag/flag';
16
16
  import { iife } from '../util/iife';
17
+ import { createEchoModel } from './echo';
18
+ import { createCacheModel } from './cache';
17
19
 
18
20
  export namespace Provider {
19
21
  const log = Log.create({ service: 'provider' });
@@ -35,7 +37,7 @@ export namespace Provider {
35
37
  // Check if OAuth credentials are available via the auth plugin
36
38
  const auth = await Auth.get('anthropic');
37
39
  if (auth?.type === 'oauth') {
38
- log.info('using anthropic oauth credentials');
40
+ log.info(() => ({ message: 'using anthropic oauth credentials' }));
39
41
  const loaderFn = await AuthPlugins.getLoader('anthropic');
40
42
  if (loaderFn) {
41
43
  const result = await loaderFn(() => Auth.get('anthropic'), input);
@@ -328,7 +330,7 @@ export namespace Provider {
328
330
  google: async (input) => {
329
331
  const auth = await Auth.get('google');
330
332
  if (auth?.type === 'oauth') {
331
- log.info('using google oauth credentials');
333
+ log.info(() => ({ message: 'using google oauth credentials' }));
332
334
  const loaderFn = await AuthPlugins.getLoader('google');
333
335
  if (loaderFn) {
334
336
  const result = await loaderFn(() => Auth.get('google'), input);
@@ -353,7 +355,9 @@ export namespace Provider {
353
355
  'github-copilot': async (input) => {
354
356
  const auth = await Auth.get('github-copilot');
355
357
  if (auth?.type === 'oauth') {
356
- log.info('using github copilot oauth credentials');
358
+ log.info(() => ({
359
+ message: 'using github copilot oauth credentials',
360
+ }));
357
361
  const loaderFn = await AuthPlugins.getLoader('github-copilot');
358
362
  if (loaderFn) {
359
363
  const result = await loaderFn(
@@ -381,7 +385,9 @@ export namespace Provider {
381
385
  'github-copilot-enterprise': async (input) => {
382
386
  const auth = await Auth.get('github-copilot-enterprise');
383
387
  if (auth?.type === 'oauth') {
384
- log.info('using github copilot enterprise oauth credentials');
388
+ log.info(() => ({
389
+ message: 'using github copilot enterprise oauth credentials',
390
+ }));
385
391
  const loaderFn = await AuthPlugins.getLoader('github-copilot');
386
392
  if (loaderFn) {
387
393
  const result = await loaderFn(
@@ -433,7 +439,10 @@ export namespace Provider {
433
439
  return { autoload: false };
434
440
  }
435
441
 
436
- log.info('using claude oauth credentials', { source: tokenSource });
442
+ log.info(() => ({
443
+ message: 'using claude oauth credentials',
444
+ source: tokenSource,
445
+ }));
437
446
 
438
447
  // Create authenticated fetch with Bearer token and OAuth beta header
439
448
  const customFetch = ClaudeOAuth.createAuthenticatedFetch(oauthToken);
@@ -452,6 +461,56 @@ export namespace Provider {
452
461
  },
453
462
  };
454
463
  },
464
+ /**
465
+ * Echo provider - synthetic provider for dry-run testing
466
+ * Echoes back the user's input without making actual API calls.
467
+ *
468
+ * This provider is automatically enabled when --dry-run mode is active.
469
+ * It can also be used explicitly with: --model link-assistant/echo
470
+ *
471
+ * @see https://github.com/link-assistant/agent/issues/89
472
+ */
473
+ 'link-assistant': async () => {
474
+ // Echo provider is always available - no external dependencies needed
475
+ return {
476
+ autoload: Flag.OPENCODE_DRY_RUN, // Auto-load only in dry-run mode
477
+ async getModel(_sdk: any, modelID: string) {
478
+ // Return our custom echo model that implements LanguageModelV1
479
+ return createEchoModel(modelID);
480
+ },
481
+ options: {},
482
+ };
483
+ },
484
+ /**
485
+ * Cache provider - synthetic provider for caching API responses
486
+ * Caches responses using links notation for deterministic testing.
487
+ *
488
+ * This provider caches API responses and falls back to echo behavior.
489
+ * It can be used explicitly with: --model link-assistant/cache/opencode/grok-code
490
+ *
491
+ * @see https://github.com/link-assistant/agent/issues/89
492
+ */
493
+ 'link-assistant/cache': async () => {
494
+ // Cache provider is always available - no external dependencies needed
495
+ return {
496
+ autoload: false, // Not auto-loaded
497
+ async getModel(_sdk: any, modelID: string) {
498
+ // modelID should be in format "provider/model" like "opencode/grok-code"
499
+ const parts = modelID.split('/');
500
+ if (parts.length < 2) {
501
+ throw new Error(
502
+ `Invalid cache model ID: ${modelID}. Expected format: provider/model`
503
+ );
504
+ }
505
+ const [providerId, ...modelParts] = parts;
506
+ const actualModelId = modelParts.join('/');
507
+
508
+ // Return our custom cache model that implements LanguageModelV1
509
+ return createCacheModel(providerId, actualModelId);
510
+ },
511
+ options: {},
512
+ };
513
+ },
455
514
  };
456
515
 
457
516
  const state = Instance.state(async () => {
@@ -485,7 +544,7 @@ export namespace Provider {
485
544
  // Maps `${provider}/${key}` to the provider’s actual model ID for custom aliases.
486
545
  const realIdByKey = new Map<string, string>();
487
546
 
488
- log.info('init');
547
+ log.info(() => ({ message: 'init' }));
489
548
 
490
549
  function mergeProvider(
491
550
  id: string,
@@ -555,6 +614,51 @@ export namespace Provider {
555
614
  realIdByKey.set('google/gemini-3-pro', 'gemini-3-pro-preview');
556
615
  }
557
616
 
617
+ // Add link-assistant echo provider for dry-run testing
618
+ // This synthetic provider echoes back user input without API calls
619
+ // @see https://github.com/link-assistant/agent/issues/89
620
+ database['link-assistant'] = {
621
+ id: 'link-assistant',
622
+ name: 'Link Assistant (Echo)',
623
+ env: [], // No environment variables needed - synthetic provider
624
+ models: {
625
+ echo: {
626
+ id: 'echo',
627
+ name: 'Echo Model',
628
+ release_date: '2024-01-01',
629
+ attachment: false,
630
+ reasoning: false,
631
+ temperature: false,
632
+ tool_call: true,
633
+ cost: {
634
+ input: 0,
635
+ output: 0,
636
+ cache_read: 0,
637
+ cache_write: 0,
638
+ },
639
+ limit: {
640
+ context: 1000000, // Virtually unlimited
641
+ output: 100000,
642
+ },
643
+ modalities: {
644
+ input: ['text'],
645
+ output: ['text'],
646
+ },
647
+ options: {},
648
+ },
649
+ },
650
+ };
651
+
652
+ // Add link-assistant/cache provider for caching API responses
653
+ // This synthetic provider caches responses and falls back to echo
654
+ // @see https://github.com/link-assistant/agent/issues/89
655
+ database['link-assistant/cache'] = {
656
+ id: 'link-assistant/cache',
657
+ name: 'Link Assistant (Cache)',
658
+ env: [], // No environment variables needed - synthetic provider
659
+ models: {}, // Models are dynamically created based on the provider/model syntax
660
+ };
661
+
558
662
  for (const [providerID, provider] of configProviders) {
559
663
  const existing = database[providerID];
560
664
  const parsed: ModelsDev.Provider = {
@@ -686,7 +790,7 @@ export namespace Provider {
686
790
  delete providers[providerID];
687
791
  continue;
688
792
  }
689
- log.info('found', { providerID });
793
+ log.info(() => ({ message: 'found', providerID }));
690
794
  }
691
795
 
692
796
  return {
@@ -721,19 +825,21 @@ export namespace Provider {
721
825
 
722
826
  let installedPath: string;
723
827
  if (!pkg.startsWith('file://')) {
724
- log.info('installing provider package', {
828
+ log.info(() => ({
829
+ message: 'installing provider package',
725
830
  providerID: provider.id,
726
831
  pkg,
727
832
  version: 'latest',
728
- });
833
+ }));
729
834
  installedPath = await BunProc.install(pkg, 'latest');
730
- log.info('provider package installed successfully', {
835
+ log.info(() => ({
836
+ message: 'provider package installed successfully',
731
837
  providerID: provider.id,
732
838
  pkg,
733
839
  installedPath,
734
- });
840
+ }));
735
841
  } else {
736
- log.info('loading local provider', { pkg });
842
+ log.info(() => ({ message: 'loading local provider', pkg }));
737
843
  installedPath = pkg;
738
844
  }
739
845
 
@@ -779,13 +885,14 @@ export namespace Provider {
779
885
  s.sdk.set(key, loaded);
780
886
  return loaded as SDK;
781
887
  })().catch((e) => {
782
- log.error('provider initialization failed', {
888
+ log.error(() => ({
889
+ message: 'provider initialization failed',
783
890
  providerID: provider.id,
784
891
  pkg: model.provider?.npm ?? provider.npm ?? provider.id,
785
892
  error: e instanceof Error ? e.message : String(e),
786
893
  stack: e instanceof Error ? e.stack : undefined,
787
894
  cause: e instanceof Error && e.cause ? String(e.cause) : undefined,
788
- });
895
+ }));
789
896
  throw new InitError({ providerID: provider.id }, { cause: e });
790
897
  });
791
898
  }
@@ -799,30 +906,45 @@ export namespace Provider {
799
906
  const s = await state();
800
907
  if (s.models.has(key)) return s.models.get(key)!;
801
908
 
802
- log.info('getModel', {
803
- providerID,
804
- modelID,
805
- });
909
+ log.info(() => ({ message: 'getModel', providerID, modelID }));
806
910
 
807
911
  const provider = s.providers[providerID];
808
912
  if (!provider) throw new ModelNotFoundError({ providerID, modelID });
809
- const info = provider.info.models[modelID];
810
- if (!info) throw new ModelNotFoundError({ providerID, modelID });
811
- const sdk = await getSDK(provider.info, info);
913
+
914
+ // For synthetic providers (like link-assistant/echo and link-assistant/cache), skip SDK loading
915
+ // These providers have a custom getModel function that creates the model directly
916
+ const isSyntheticProvider =
917
+ providerID === 'link-assistant' || providerID === 'link-assistant/cache';
918
+
919
+ // For synthetic providers, we don't need model info from the database
920
+ const info = isSyntheticProvider ? null : provider.info.models[modelID];
921
+ if (!isSyntheticProvider && !info)
922
+ throw new ModelNotFoundError({ providerID, modelID });
812
923
 
813
924
  try {
814
925
  const keyReal = `${providerID}/${modelID}`;
815
- const realID = s.realIdByKey.get(keyReal) ?? info.id;
816
- const language = provider.getModel
817
- ? await provider.getModel(sdk, realID, provider.options)
818
- : sdk.languageModel(realID);
819
- log.info('found', { providerID, modelID });
926
+ const realID = s.realIdByKey.get(keyReal) ?? (info ? info.id : modelID);
927
+
928
+ let language: LanguageModel;
929
+ if (isSyntheticProvider && provider.getModel) {
930
+ // For synthetic providers, call getModel directly without SDK
931
+ language = await provider.getModel(null, realID, provider.options);
932
+ } else {
933
+ // For regular providers, load the SDK first
934
+ const sdk = await getSDK(provider.info, info!);
935
+ language = provider.getModel
936
+ ? await provider.getModel(sdk, realID, provider.options)
937
+ : sdk.languageModel(realID);
938
+ }
939
+ log.info(() => ({ message: 'found', providerID, modelID }));
820
940
  s.models.set(key, {
821
941
  providerID,
822
942
  modelID,
823
943
  info,
824
944
  language,
825
- npm: info.provider?.npm ?? provider.info.npm,
945
+ npm: isSyntheticProvider
946
+ ? provider.info.npm
947
+ : (info.provider?.npm ?? provider.info.npm),
826
948
  });
827
949
  return {
828
950
  modelID,
@@ -897,6 +1019,18 @@ export namespace Provider {
897
1019
 
898
1020
  export async function defaultModel() {
899
1021
  const cfg = await Config.get();
1022
+
1023
+ // In dry-run mode, use the echo provider by default
1024
+ // This allows testing round-trips and multi-turn conversations without API costs
1025
+ // @see https://github.com/link-assistant/agent/issues/89
1026
+ if (Flag.OPENCODE_DRY_RUN) {
1027
+ log.info('dry-run mode enabled, using echo provider as default');
1028
+ return {
1029
+ providerID: 'link-assistant',
1030
+ modelID: 'echo',
1031
+ };
1032
+ }
1033
+
900
1034
  if (cfg.model) return parseModel(cfg.model);
901
1035
 
902
1036
  // Prefer opencode provider if available
@@ -905,10 +1039,11 @@ export namespace Provider {
905
1039
  if (opencodeProvider) {
906
1040
  const [model] = sort(Object.values(opencodeProvider.info.models));
907
1041
  if (model) {
908
- log.info('using opencode provider as default', {
1042
+ log.info(() => ({
1043
+ message: 'using opencode provider as default',
909
1044
  provider: opencodeProvider.info.id,
910
1045
  model: model.id,
911
- });
1046
+ }));
912
1047
  return {
913
1048
  providerID: opencodeProvider.info.id,
914
1049
  modelID: model.id,
@@ -55,9 +55,7 @@ export namespace Server {
55
55
  export const App = lazy(() =>
56
56
  app
57
57
  .onError((err, c) => {
58
- log.error('failed', {
59
- error: err,
60
- });
58
+ log.error(() => ({ message: 'failed', error: err }));
61
59
  if (err instanceof NamedError) {
62
60
  let status: ContentfulStatusCode;
63
61
  if (err instanceof Storage.NotFoundError) status = 404;
@@ -71,10 +69,11 @@ export namespace Server {
71
69
  });
72
70
  })
73
71
  .use(async (c, next) => {
74
- log.info('request', {
72
+ log.info(() => ({
73
+ message: 'request',
75
74
  method: c.req.method,
76
75
  path: c.req.path,
77
- });
76
+ }));
78
77
  const timer = log.time('request', {
79
78
  method: c.req.method,
80
79
  path: c.req.path,
@@ -96,8 +96,22 @@ export class Agent {
96
96
  const errorTime = Date.now();
97
97
  const callID = `call_${Math.floor(Math.random() * 100000000)}`;
98
98
 
99
- // Log full error to stderr for debugging
100
- console.error('Tool execution error:', error);
99
+ // Log full error to stderr for debugging in JSON format
100
+ console.error(
101
+ JSON.stringify({
102
+ log: {
103
+ level: 'error',
104
+ timestamp: new Date().toISOString(),
105
+ message: 'Tool execution error',
106
+ tool: tool.name,
107
+ error: {
108
+ name: error.name,
109
+ message: error.message,
110
+ stack: error.stack,
111
+ },
112
+ },
113
+ })
114
+ );
101
115
 
102
116
  // Emit tool_use event with error
103
117
  this.emitEvent('tool_use', {
@@ -52,7 +52,7 @@ export namespace SessionCompaction {
52
52
  // tool calls that are no longer relevant.
53
53
  export async function prune(input: { sessionID: string }) {
54
54
  if (Flag.OPENCODE_DISABLE_PRUNE) return;
55
- log.info('pruning');
55
+ log.info(() => ({ message: 'pruning' }));
56
56
  const msgs = await Session.messages({ sessionID: input.sessionID });
57
57
  let total = 0;
58
58
  let pruned = 0;
@@ -78,7 +78,7 @@ export namespace SessionCompaction {
78
78
  }
79
79
  }
80
80
  }
81
- log.info('found', { pruned, total });
81
+ log.info(() => ({ message: 'found', pruned, total }));
82
82
  if (pruned > PRUNE_MINIMUM) {
83
83
  for (const part of toPrune) {
84
84
  if (part.state.status === 'completed') {
@@ -86,7 +86,7 @@ export namespace SessionCompaction {
86
86
  await Session.updatePart(part);
87
87
  }
88
88
  }
89
- log.info('pruned', { count: toPrune.length });
89
+ log.info(() => ({ message: 'pruned', count: toPrune.length }));
90
90
  }
91
91
  }
92
92
 
@@ -139,9 +139,7 @@ export namespace SessionCompaction {
139
139
  const result = await processor.process(() =>
140
140
  streamText({
141
141
  onError(error) {
142
- log.error('stream error', {
143
- error,
144
- });
142
+ log.error(() => ({ message: 'stream error', error }));
145
143
  },
146
144
  // set to 0, we handle loop
147
145
  maxRetries: 0,
@@ -178,7 +178,7 @@ export namespace Session {
178
178
  updated: Date.now(),
179
179
  },
180
180
  };
181
- log.info('created', result);
181
+ log.info(() => ({ message: 'created', ...result }));
182
182
  await Storage.write(['session', Instance.project.id, result.id], result);
183
183
  Bus.publish(Event.Created, {
184
184
  info: result,
@@ -273,7 +273,7 @@ export namespace Session {
273
273
  info: session,
274
274
  });
275
275
  } catch (e) {
276
- log.error(e);
276
+ log.error(() => ({ error: e }));
277
277
  }
278
278
  });
279
279
 
@@ -39,7 +39,7 @@ export namespace SessionProcessor {
39
39
  return toolcalls[toolCallID];
40
40
  },
41
41
  async process(fn: () => StreamTextResult<Record<string, AITool>, never>) {
42
- log.info('process');
42
+ log.info(() => ({ message: 'process' }));
43
43
  while (true) {
44
44
  try {
45
45
  let currentText: MessageV2.TextPart | undefined;
@@ -305,16 +305,12 @@ export namespace SessionProcessor {
305
305
  break;
306
306
 
307
307
  default:
308
- log.info('unhandled', {
309
- ...value,
310
- });
308
+ log.info(() => ({ message: 'unhandled', ...value }));
311
309
  continue;
312
310
  }
313
311
  }
314
312
  } catch (e) {
315
- log.error('process', {
316
- error: e,
317
- });
313
+ log.error(() => ({ message: 'process', error: e }));
318
314
  const error = MessageV2.fromError(e, {
319
315
  providerID: input.providerID,
320
316
  });