@mastra/client-js 0.0.0-switch-to-core-20250424015131 → 0.0.0-taofeeqInngest-20250603090617
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +19 -0
- package/CHANGELOG.md +493 -2
- package/dist/index.cjs +770 -48
- package/dist/index.d.cts +358 -36
- package/dist/index.d.ts +358 -36
- package/dist/index.js +766 -48
- package/package.json +10 -7
- package/src/adapters/agui.test.ts +180 -0
- package/src/adapters/agui.ts +239 -0
- package/src/client.ts +122 -2
- package/src/example.ts +29 -30
- package/src/index.test.ts +125 -5
- package/src/resources/a2a.ts +88 -0
- package/src/resources/agent.ts +36 -36
- package/src/resources/base.ts +2 -2
- package/src/resources/index.ts +4 -1
- package/src/resources/legacy-workflow.ts +242 -0
- package/src/resources/mcp-tool.ts +48 -0
- package/src/resources/memory-thread.ts +13 -3
- package/src/resources/network.ts +6 -12
- package/src/resources/tool.ts +16 -3
- package/src/resources/workflow.ts +234 -96
- package/src/types.ts +106 -16
- package/src/utils/index.ts +11 -0
- package/src/utils/zod-to-json-schema.ts +10 -0
package/src/index.test.ts
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
|
-
import type { MessageType } from '@mastra/core';
|
|
2
1
|
import { describe, expect, beforeEach, it, vi } from 'vitest';
|
|
3
|
-
|
|
4
2
|
import { MastraClient } from './client';
|
|
3
|
+
import type { McpServerListResponse, ServerDetailInfo } from './types';
|
|
5
4
|
|
|
6
5
|
// Mock fetch globally
|
|
7
6
|
global.fetch = vi.fn();
|
|
@@ -237,6 +236,7 @@ describe('MastraClient Resources', () => {
|
|
|
237
236
|
model: 'gpt-4',
|
|
238
237
|
instructions: 'Test instructions',
|
|
239
238
|
tools: {},
|
|
239
|
+
workflows: {},
|
|
240
240
|
};
|
|
241
241
|
mockFetchResponse(mockResponse);
|
|
242
242
|
|
|
@@ -489,7 +489,7 @@ describe('MastraClient Resources', () => {
|
|
|
489
489
|
const result = await memoryThread.update({
|
|
490
490
|
title: 'Updated Thread',
|
|
491
491
|
metadata: { updated: true },
|
|
492
|
-
|
|
492
|
+
resourceId: 'test-resource',
|
|
493
493
|
});
|
|
494
494
|
expect(result).toEqual(mockResponse);
|
|
495
495
|
expect(global.fetch).toHaveBeenCalledWith(
|
|
@@ -536,6 +536,7 @@ describe('MastraClient Resources', () => {
|
|
|
536
536
|
content: 'test',
|
|
537
537
|
role: 'user' as const,
|
|
538
538
|
threadId: 'test-thread',
|
|
539
|
+
resourceId: 'test-resource',
|
|
539
540
|
createdAt: new Date('2025-03-26T10:40:55.116Z'),
|
|
540
541
|
},
|
|
541
542
|
];
|
|
@@ -552,6 +553,35 @@ describe('MastraClient Resources', () => {
|
|
|
552
553
|
}),
|
|
553
554
|
);
|
|
554
555
|
});
|
|
556
|
+
|
|
557
|
+
it('should get thread messages with limit', async () => {
|
|
558
|
+
const mockResponse = {
|
|
559
|
+
messages: [
|
|
560
|
+
{
|
|
561
|
+
id: '1',
|
|
562
|
+
content: 'test',
|
|
563
|
+
threadId,
|
|
564
|
+
role: 'user',
|
|
565
|
+
type: 'text',
|
|
566
|
+
resourceId: 'test-resource',
|
|
567
|
+
createdAt: new Date(),
|
|
568
|
+
},
|
|
569
|
+
],
|
|
570
|
+
uiMessages: [],
|
|
571
|
+
};
|
|
572
|
+
mockFetchResponse(mockResponse);
|
|
573
|
+
|
|
574
|
+
const limit = 5;
|
|
575
|
+
const result = await memoryThread.getMessages({ limit });
|
|
576
|
+
|
|
577
|
+
expect(result).toEqual(mockResponse);
|
|
578
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
579
|
+
`${clientOptions.baseUrl}/api/memory/threads/${threadId}/messages?agentId=${agentId}&limit=${limit}`,
|
|
580
|
+
expect.objectContaining({
|
|
581
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
582
|
+
}),
|
|
583
|
+
);
|
|
584
|
+
});
|
|
555
585
|
});
|
|
556
586
|
|
|
557
587
|
describe('Tool Resource', () => {
|
|
@@ -584,10 +614,10 @@ describe('MastraClient Resources', () => {
|
|
|
584
614
|
it('should execute tool', async () => {
|
|
585
615
|
const mockResponse = { data: 'test' };
|
|
586
616
|
mockFetchResponse(mockResponse);
|
|
587
|
-
const result = await tool.execute({ data: '' });
|
|
617
|
+
const result = await tool.execute({ data: '', runId: 'test-run-id' });
|
|
588
618
|
expect(result).toEqual(mockResponse);
|
|
589
619
|
expect(global.fetch).toHaveBeenCalledWith(
|
|
590
|
-
`${clientOptions.baseUrl}/api/tools/test-tool/execute`,
|
|
620
|
+
`${clientOptions.baseUrl}/api/tools/test-tool/execute?runId=test-run-id`,
|
|
591
621
|
expect.objectContaining({
|
|
592
622
|
method: 'POST',
|
|
593
623
|
headers: expect.objectContaining({
|
|
@@ -707,4 +737,94 @@ describe('MastraClient Resources', () => {
|
|
|
707
737
|
);
|
|
708
738
|
});
|
|
709
739
|
});
|
|
740
|
+
|
|
741
|
+
describe('MCP Server Registry Client Methods', () => {
|
|
742
|
+
const mockServerInfo1 = {
|
|
743
|
+
id: 'mcp-server-1',
|
|
744
|
+
name: 'Test MCP Server 1',
|
|
745
|
+
version_detail: { version: '1.0.0', release_date: '2023-01-01T00:00:00Z', is_latest: true },
|
|
746
|
+
};
|
|
747
|
+
const mockServerInfo2 = {
|
|
748
|
+
id: 'mcp-server-2',
|
|
749
|
+
name: 'Test MCP Server 2',
|
|
750
|
+
version_detail: { version: '1.1.0', release_date: '2023-02-01T00:00:00Z', is_latest: true },
|
|
751
|
+
};
|
|
752
|
+
|
|
753
|
+
const mockServerDetail1: ServerDetailInfo = {
|
|
754
|
+
...mockServerInfo1,
|
|
755
|
+
description: 'Detailed description for server 1',
|
|
756
|
+
package_canonical: 'npm',
|
|
757
|
+
packages: [{ registry_name: 'npm', name: '@example/server1', version: '1.0.0' }],
|
|
758
|
+
remotes: [{ transport_type: 'sse', url: 'http://localhost/sse1' }],
|
|
759
|
+
};
|
|
760
|
+
|
|
761
|
+
describe('getMcpServers()', () => {
|
|
762
|
+
it('should fetch a list of MCP servers', async () => {
|
|
763
|
+
const mockResponse: McpServerListResponse = {
|
|
764
|
+
servers: [mockServerInfo1, mockServerInfo2],
|
|
765
|
+
total_count: 2,
|
|
766
|
+
next: null,
|
|
767
|
+
};
|
|
768
|
+
mockFetchResponse(mockResponse);
|
|
769
|
+
|
|
770
|
+
const result = await client.getMcpServers();
|
|
771
|
+
expect(result).toEqual(mockResponse);
|
|
772
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
773
|
+
`${clientOptions.baseUrl}/api/mcp/v0/servers`,
|
|
774
|
+
expect.objectContaining({
|
|
775
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
776
|
+
}),
|
|
777
|
+
);
|
|
778
|
+
});
|
|
779
|
+
|
|
780
|
+
it('should fetch MCP servers with limit and offset parameters', async () => {
|
|
781
|
+
const mockResponse: McpServerListResponse = {
|
|
782
|
+
servers: [mockServerInfo1],
|
|
783
|
+
total_count: 2,
|
|
784
|
+
next: '/api/mcp/v0/servers?limit=1&offset=1',
|
|
785
|
+
};
|
|
786
|
+
mockFetchResponse(mockResponse);
|
|
787
|
+
|
|
788
|
+
const result = await client.getMcpServers({ limit: 1, offset: 0 });
|
|
789
|
+
expect(result).toEqual(mockResponse);
|
|
790
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
791
|
+
`${clientOptions.baseUrl}/api/mcp/v0/servers?limit=1&offset=0`,
|
|
792
|
+
expect.objectContaining({
|
|
793
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
794
|
+
}),
|
|
795
|
+
);
|
|
796
|
+
});
|
|
797
|
+
});
|
|
798
|
+
|
|
799
|
+
describe('getMcpServerDetails()', () => {
|
|
800
|
+
const serverId = 'mcp-server-1';
|
|
801
|
+
|
|
802
|
+
it('should fetch details for a specific MCP server', async () => {
|
|
803
|
+
mockFetchResponse(mockServerDetail1);
|
|
804
|
+
|
|
805
|
+
const result = await client.getMcpServerDetails(serverId);
|
|
806
|
+
expect(result).toEqual(mockServerDetail1);
|
|
807
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
808
|
+
`${clientOptions.baseUrl}/api/mcp/v0/servers/${serverId}`,
|
|
809
|
+
expect.objectContaining({
|
|
810
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
811
|
+
}),
|
|
812
|
+
);
|
|
813
|
+
});
|
|
814
|
+
|
|
815
|
+
it('should fetch MCP server details with a version parameter', async () => {
|
|
816
|
+
mockFetchResponse(mockServerDetail1);
|
|
817
|
+
const version = '1.0.0';
|
|
818
|
+
|
|
819
|
+
const result = await client.getMcpServerDetails(serverId, { version });
|
|
820
|
+
expect(result).toEqual(mockServerDetail1);
|
|
821
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
822
|
+
`${clientOptions.baseUrl}/api/mcp/v0/servers/${serverId}?version=${version}`,
|
|
823
|
+
expect.objectContaining({
|
|
824
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
825
|
+
}),
|
|
826
|
+
);
|
|
827
|
+
});
|
|
828
|
+
});
|
|
829
|
+
});
|
|
710
830
|
});
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import type { TaskSendParams, TaskQueryParams, TaskIdParams, Task, AgentCard, JSONRPCResponse } from '@mastra/core/a2a';
|
|
2
|
+
import type { ClientOptions } from '../types';
|
|
3
|
+
import { BaseResource } from './base';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Class for interacting with an agent via the A2A protocol
|
|
7
|
+
*/
|
|
8
|
+
export class A2A extends BaseResource {
|
|
9
|
+
constructor(
|
|
10
|
+
options: ClientOptions,
|
|
11
|
+
private agentId: string,
|
|
12
|
+
) {
|
|
13
|
+
super(options);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Get the agent card with metadata about the agent
|
|
18
|
+
* @returns Promise containing the agent card information
|
|
19
|
+
*/
|
|
20
|
+
async getCard(): Promise<AgentCard> {
|
|
21
|
+
return this.request(`/.well-known/${this.agentId}/agent.json`);
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Send a message to the agent and get a response
|
|
26
|
+
* @param params - Parameters for the task
|
|
27
|
+
* @returns Promise containing the task response
|
|
28
|
+
*/
|
|
29
|
+
async sendMessage(params: TaskSendParams): Promise<{ task: Task }> {
|
|
30
|
+
const response = await this.request<JSONRPCResponse<Task>>(`/a2a/${this.agentId}`, {
|
|
31
|
+
method: 'POST',
|
|
32
|
+
body: {
|
|
33
|
+
method: 'tasks/send',
|
|
34
|
+
params,
|
|
35
|
+
},
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
return { task: response.result! };
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Get the status and result of a task
|
|
43
|
+
* @param params - Parameters for querying the task
|
|
44
|
+
* @returns Promise containing the task response
|
|
45
|
+
*/
|
|
46
|
+
async getTask(params: TaskQueryParams): Promise<Task> {
|
|
47
|
+
const response = await this.request<JSONRPCResponse<Task>>(`/a2a/${this.agentId}`, {
|
|
48
|
+
method: 'POST',
|
|
49
|
+
body: {
|
|
50
|
+
method: 'tasks/get',
|
|
51
|
+
params,
|
|
52
|
+
},
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
return response.result!;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Cancel a running task
|
|
60
|
+
* @param params - Parameters identifying the task to cancel
|
|
61
|
+
* @returns Promise containing the task response
|
|
62
|
+
*/
|
|
63
|
+
async cancelTask(params: TaskIdParams): Promise<{ task: Task }> {
|
|
64
|
+
return this.request(`/a2a/${this.agentId}`, {
|
|
65
|
+
method: 'POST',
|
|
66
|
+
body: {
|
|
67
|
+
method: 'tasks/cancel',
|
|
68
|
+
params,
|
|
69
|
+
},
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Send a message and subscribe to streaming updates (not fully implemented)
|
|
75
|
+
* @param params - Parameters for the task
|
|
76
|
+
* @returns Promise containing the task response
|
|
77
|
+
*/
|
|
78
|
+
async sendAndSubscribe(params: TaskSendParams): Promise<Response> {
|
|
79
|
+
return this.request(`/a2a/${this.agentId}`, {
|
|
80
|
+
method: 'POST',
|
|
81
|
+
body: {
|
|
82
|
+
method: 'tasks/sendSubscribe',
|
|
83
|
+
params,
|
|
84
|
+
},
|
|
85
|
+
stream: true,
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
}
|
package/src/resources/agent.ts
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
+
import { processDataStream } from '@ai-sdk/ui-utils';
|
|
1
2
|
import type { GenerateReturn } from '@mastra/core';
|
|
2
3
|
import type { JSONSchema7 } from 'json-schema';
|
|
3
4
|
import { ZodSchema } from 'zod';
|
|
4
|
-
import { zodToJsonSchema } from 'zod-to-json-schema';
|
|
5
|
-
import { processDataStream } from '@ai-sdk/ui-utils';
|
|
5
|
+
import { zodToJsonSchema } from '../utils/zod-to-json-schema';
|
|
6
6
|
|
|
7
7
|
import type {
|
|
8
8
|
GenerateParams,
|
|
@@ -14,29 +14,8 @@ import type {
|
|
|
14
14
|
} from '../types';
|
|
15
15
|
|
|
16
16
|
import { BaseResource } from './base';
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
constructor(
|
|
20
|
-
options: ClientOptions,
|
|
21
|
-
private agentId: string,
|
|
22
|
-
private toolId: string,
|
|
23
|
-
) {
|
|
24
|
-
super(options);
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
/**
|
|
28
|
-
* Executes a specific tool for an agent
|
|
29
|
-
* @param params - Parameters required for tool execution
|
|
30
|
-
* @returns Promise containing tool execution results
|
|
31
|
-
*/
|
|
32
|
-
/** @deprecated use CreateRun/startRun */
|
|
33
|
-
execute(params: { data: any }): Promise<any> {
|
|
34
|
-
return this.request(`/api/agents/${this.agentId}/tools/${this.toolId}/execute`, {
|
|
35
|
-
method: 'POST',
|
|
36
|
-
body: params,
|
|
37
|
-
});
|
|
38
|
-
}
|
|
39
|
-
}
|
|
17
|
+
import type { RuntimeContext } from '@mastra/core/runtime-context';
|
|
18
|
+
import { parseClientRuntimeContext } from '../utils';
|
|
40
19
|
|
|
41
20
|
export class AgentVoice extends BaseResource {
|
|
42
21
|
constructor(
|
|
@@ -70,7 +49,7 @@ export class AgentVoice extends BaseResource {
|
|
|
70
49
|
* @param options - Optional provider-specific options
|
|
71
50
|
* @returns Promise containing the transcribed text
|
|
72
51
|
*/
|
|
73
|
-
listen(audio: Blob, options?: Record<string, any>): Promise<
|
|
52
|
+
listen(audio: Blob, options?: Record<string, any>): Promise<{ text: string }> {
|
|
74
53
|
const formData = new FormData();
|
|
75
54
|
formData.append('audio', audio);
|
|
76
55
|
|
|
@@ -91,6 +70,14 @@ export class AgentVoice extends BaseResource {
|
|
|
91
70
|
getSpeakers(): Promise<Array<{ voiceId: string; [key: string]: any }>> {
|
|
92
71
|
return this.request(`/api/agents/${this.agentId}/voice/speakers`);
|
|
93
72
|
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Get the listener configuration for the agent's voice provider
|
|
76
|
+
* @returns Promise containing a check if the agent has listening capabilities
|
|
77
|
+
*/
|
|
78
|
+
getListener(): Promise<{ enabled: boolean }> {
|
|
79
|
+
return this.request(`/api/agents/${this.agentId}/voice/listener`);
|
|
80
|
+
}
|
|
94
81
|
}
|
|
95
82
|
|
|
96
83
|
export class Agent extends BaseResource {
|
|
@@ -122,11 +109,9 @@ export class Agent extends BaseResource {
|
|
|
122
109
|
): Promise<GenerateReturn<T>> {
|
|
123
110
|
const processedParams = {
|
|
124
111
|
...params,
|
|
125
|
-
output: params.output
|
|
126
|
-
experimental_output:
|
|
127
|
-
|
|
128
|
-
? zodToJsonSchema(params.experimental_output)
|
|
129
|
-
: params.experimental_output,
|
|
112
|
+
output: params.output ? zodToJsonSchema(params.output) : undefined,
|
|
113
|
+
experimental_output: params.experimental_output ? zodToJsonSchema(params.experimental_output) : undefined,
|
|
114
|
+
runtimeContext: parseClientRuntimeContext(params.runtimeContext),
|
|
130
115
|
};
|
|
131
116
|
|
|
132
117
|
return this.request(`/api/agents/${this.agentId}/generate`, {
|
|
@@ -149,11 +134,9 @@ export class Agent extends BaseResource {
|
|
|
149
134
|
> {
|
|
150
135
|
const processedParams = {
|
|
151
136
|
...params,
|
|
152
|
-
output: params.output
|
|
153
|
-
experimental_output:
|
|
154
|
-
|
|
155
|
-
? zodToJsonSchema(params.experimental_output)
|
|
156
|
-
: params.experimental_output,
|
|
137
|
+
output: params.output ? zodToJsonSchema(params.output) : undefined,
|
|
138
|
+
experimental_output: params.experimental_output ? zodToJsonSchema(params.experimental_output) : undefined,
|
|
139
|
+
runtimeContext: parseClientRuntimeContext(params.runtimeContext),
|
|
157
140
|
};
|
|
158
141
|
|
|
159
142
|
const response: Response & {
|
|
@@ -187,6 +170,23 @@ export class Agent extends BaseResource {
|
|
|
187
170
|
return this.request(`/api/agents/${this.agentId}/tools/${toolId}`);
|
|
188
171
|
}
|
|
189
172
|
|
|
173
|
+
/**
|
|
174
|
+
* Executes a tool for the agent
|
|
175
|
+
* @param toolId - ID of the tool to execute
|
|
176
|
+
* @param params - Parameters required for tool execution
|
|
177
|
+
* @returns Promise containing the tool execution results
|
|
178
|
+
*/
|
|
179
|
+
executeTool(toolId: string, params: { data: any; runtimeContext?: RuntimeContext }): Promise<any> {
|
|
180
|
+
const body = {
|
|
181
|
+
data: params.data,
|
|
182
|
+
runtimeContext: params.runtimeContext ? Object.fromEntries(params.runtimeContext.entries()) : undefined,
|
|
183
|
+
};
|
|
184
|
+
return this.request(`/api/agents/${this.agentId}/tools/${toolId}/execute`, {
|
|
185
|
+
method: 'POST',
|
|
186
|
+
body,
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
|
|
190
190
|
/**
|
|
191
191
|
* Retrieves evaluation results for the agent
|
|
192
192
|
* @returns Promise containing agent evaluations
|
package/src/resources/base.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { RequestOptions, ClientOptions } from '../types';
|
|
2
2
|
|
|
3
3
|
export class BaseResource {
|
|
4
4
|
readonly options: ClientOptions;
|
|
@@ -21,7 +21,7 @@ export class BaseResource {
|
|
|
21
21
|
|
|
22
22
|
for (let attempt = 0; attempt <= retries; attempt++) {
|
|
23
23
|
try {
|
|
24
|
-
const response = await fetch(`${baseUrl}${path}`, {
|
|
24
|
+
const response = await fetch(`${baseUrl.replace(/\/$/, '')}${path}`, {
|
|
25
25
|
...options,
|
|
26
26
|
headers: {
|
|
27
27
|
...headers,
|
package/src/resources/index.ts
CHANGED
|
@@ -2,6 +2,9 @@ export * from './agent';
|
|
|
2
2
|
export * from './network';
|
|
3
3
|
export * from './memory-thread';
|
|
4
4
|
export * from './vector';
|
|
5
|
-
export * from './workflow';
|
|
5
|
+
export * from './legacy-workflow';
|
|
6
6
|
export * from './tool';
|
|
7
7
|
export * from './base';
|
|
8
|
+
export * from './workflow';
|
|
9
|
+
export * from './a2a';
|
|
10
|
+
export * from './mcp-tool';
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
ClientOptions,
|
|
3
|
+
LegacyWorkflowRunResult,
|
|
4
|
+
GetLegacyWorkflowRunsResponse,
|
|
5
|
+
GetWorkflowRunsParams,
|
|
6
|
+
GetLegacyWorkflowResponse,
|
|
7
|
+
} from '../types';
|
|
8
|
+
|
|
9
|
+
import { BaseResource } from './base';
|
|
10
|
+
|
|
11
|
+
const RECORD_SEPARATOR = '\x1E';
|
|
12
|
+
|
|
13
|
+
export class LegacyWorkflow extends BaseResource {
|
|
14
|
+
constructor(
|
|
15
|
+
options: ClientOptions,
|
|
16
|
+
private workflowId: string,
|
|
17
|
+
) {
|
|
18
|
+
super(options);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Retrieves details about the legacy workflow
|
|
23
|
+
* @returns Promise containing legacy workflow details including steps and graphs
|
|
24
|
+
*/
|
|
25
|
+
details(): Promise<GetLegacyWorkflowResponse> {
|
|
26
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}`);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Retrieves all runs for a legacy workflow
|
|
31
|
+
* @param params - Parameters for filtering runs
|
|
32
|
+
* @returns Promise containing legacy workflow runs array
|
|
33
|
+
*/
|
|
34
|
+
runs(params?: GetWorkflowRunsParams): Promise<GetLegacyWorkflowRunsResponse> {
|
|
35
|
+
const searchParams = new URLSearchParams();
|
|
36
|
+
if (params?.fromDate) {
|
|
37
|
+
searchParams.set('fromDate', params.fromDate.toISOString());
|
|
38
|
+
}
|
|
39
|
+
if (params?.toDate) {
|
|
40
|
+
searchParams.set('toDate', params.toDate.toISOString());
|
|
41
|
+
}
|
|
42
|
+
if (params?.limit) {
|
|
43
|
+
searchParams.set('limit', String(params.limit));
|
|
44
|
+
}
|
|
45
|
+
if (params?.offset) {
|
|
46
|
+
searchParams.set('offset', String(params.offset));
|
|
47
|
+
}
|
|
48
|
+
if (params?.resourceId) {
|
|
49
|
+
searchParams.set('resourceId', params.resourceId);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
if (searchParams.size) {
|
|
53
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/runs?${searchParams}`);
|
|
54
|
+
} else {
|
|
55
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/runs`);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Creates a new legacy workflow run
|
|
61
|
+
* @returns Promise containing the generated run ID
|
|
62
|
+
*/
|
|
63
|
+
createRun(params?: { runId?: string }): Promise<{ runId: string }> {
|
|
64
|
+
const searchParams = new URLSearchParams();
|
|
65
|
+
|
|
66
|
+
if (!!params?.runId) {
|
|
67
|
+
searchParams.set('runId', params.runId);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/create-run?${searchParams.toString()}`, {
|
|
71
|
+
method: 'POST',
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Starts a legacy workflow run synchronously without waiting for the workflow to complete
|
|
77
|
+
* @param params - Object containing the runId and triggerData
|
|
78
|
+
* @returns Promise containing success message
|
|
79
|
+
*/
|
|
80
|
+
start(params: { runId: string; triggerData: Record<string, any> }): Promise<{ message: string }> {
|
|
81
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/start?runId=${params.runId}`, {
|
|
82
|
+
method: 'POST',
|
|
83
|
+
body: params?.triggerData,
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Resumes a suspended legacy workflow step synchronously without waiting for the workflow to complete
|
|
89
|
+
* @param stepId - ID of the step to resume
|
|
90
|
+
* @param runId - ID of the legacy workflow run
|
|
91
|
+
* @param context - Context to resume the legacy workflow with
|
|
92
|
+
* @returns Promise containing the legacy workflow resume results
|
|
93
|
+
*/
|
|
94
|
+
resume({
|
|
95
|
+
stepId,
|
|
96
|
+
runId,
|
|
97
|
+
context,
|
|
98
|
+
}: {
|
|
99
|
+
stepId: string;
|
|
100
|
+
runId: string;
|
|
101
|
+
context: Record<string, any>;
|
|
102
|
+
}): Promise<{ message: string }> {
|
|
103
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/resume?runId=${runId}`, {
|
|
104
|
+
method: 'POST',
|
|
105
|
+
body: {
|
|
106
|
+
stepId,
|
|
107
|
+
context,
|
|
108
|
+
},
|
|
109
|
+
});
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Starts a workflow run asynchronously and returns a promise that resolves when the workflow is complete
|
|
114
|
+
* @param params - Object containing the optional runId and triggerData
|
|
115
|
+
* @returns Promise containing the workflow execution results
|
|
116
|
+
*/
|
|
117
|
+
startAsync(params: { runId?: string; triggerData: Record<string, any> }): Promise<LegacyWorkflowRunResult> {
|
|
118
|
+
const searchParams = new URLSearchParams();
|
|
119
|
+
|
|
120
|
+
if (!!params?.runId) {
|
|
121
|
+
searchParams.set('runId', params.runId);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/start-async?${searchParams.toString()}`, {
|
|
125
|
+
method: 'POST',
|
|
126
|
+
body: params?.triggerData,
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Resumes a suspended legacy workflow step asynchronously and returns a promise that resolves when the workflow is complete
|
|
132
|
+
* @param params - Object containing the runId, stepId, and context
|
|
133
|
+
* @returns Promise containing the workflow resume results
|
|
134
|
+
*/
|
|
135
|
+
resumeAsync(params: {
|
|
136
|
+
runId: string;
|
|
137
|
+
stepId: string;
|
|
138
|
+
context: Record<string, any>;
|
|
139
|
+
}): Promise<LegacyWorkflowRunResult> {
|
|
140
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/resume-async?runId=${params.runId}`, {
|
|
141
|
+
method: 'POST',
|
|
142
|
+
body: {
|
|
143
|
+
stepId: params.stepId,
|
|
144
|
+
context: params.context,
|
|
145
|
+
},
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
/**
|
|
150
|
+
* Creates an async generator that processes a readable stream and yields records
|
|
151
|
+
* separated by the Record Separator character (\x1E)
|
|
152
|
+
*
|
|
153
|
+
* @param stream - The readable stream to process
|
|
154
|
+
* @returns An async generator that yields parsed records
|
|
155
|
+
*/
|
|
156
|
+
private async *streamProcessor(stream: ReadableStream): AsyncGenerator<LegacyWorkflowRunResult, void, unknown> {
|
|
157
|
+
const reader = stream.getReader();
|
|
158
|
+
|
|
159
|
+
// Track if we've finished reading from the stream
|
|
160
|
+
let doneReading = false;
|
|
161
|
+
// Buffer to accumulate partial chunks
|
|
162
|
+
let buffer = '';
|
|
163
|
+
|
|
164
|
+
try {
|
|
165
|
+
while (!doneReading) {
|
|
166
|
+
// Read the next chunk from the stream
|
|
167
|
+
const { done, value } = await reader.read();
|
|
168
|
+
doneReading = done;
|
|
169
|
+
|
|
170
|
+
// Skip processing if we're done and there's no value
|
|
171
|
+
if (done && !value) continue;
|
|
172
|
+
|
|
173
|
+
try {
|
|
174
|
+
// Decode binary data to text
|
|
175
|
+
const decoded = value ? new TextDecoder().decode(value) : '';
|
|
176
|
+
|
|
177
|
+
// Split the combined buffer and new data by record separator
|
|
178
|
+
const chunks = (buffer + decoded).split(RECORD_SEPARATOR);
|
|
179
|
+
|
|
180
|
+
// The last chunk might be incomplete, so save it for the next iteration
|
|
181
|
+
buffer = chunks.pop() || '';
|
|
182
|
+
|
|
183
|
+
// Process complete chunks
|
|
184
|
+
for (const chunk of chunks) {
|
|
185
|
+
if (chunk) {
|
|
186
|
+
// Only process non-empty chunks
|
|
187
|
+
if (typeof chunk === 'string') {
|
|
188
|
+
try {
|
|
189
|
+
const parsedChunk = JSON.parse(chunk);
|
|
190
|
+
yield parsedChunk;
|
|
191
|
+
} catch {
|
|
192
|
+
// Silently ignore parsing errors to maintain stream processing
|
|
193
|
+
// This allows the stream to continue even if one record is malformed
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
} catch {
|
|
199
|
+
// Silently ignore parsing errors to maintain stream processing
|
|
200
|
+
// This allows the stream to continue even if one record is malformed
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Process any remaining data in the buffer after stream is done
|
|
205
|
+
if (buffer) {
|
|
206
|
+
try {
|
|
207
|
+
yield JSON.parse(buffer);
|
|
208
|
+
} catch {
|
|
209
|
+
// Ignore parsing error for final chunk
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
} finally {
|
|
213
|
+
// Always ensure we clean up the reader
|
|
214
|
+
reader.cancel().catch(() => {
|
|
215
|
+
// Ignore cancel errors
|
|
216
|
+
});
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* Watches legacy workflow transitions in real-time
|
|
222
|
+
* @param runId - Optional run ID to filter the watch stream
|
|
223
|
+
* @returns AsyncGenerator that yields parsed records from the legacy workflow watch stream
|
|
224
|
+
*/
|
|
225
|
+
async watch({ runId }: { runId?: string }, onRecord: (record: LegacyWorkflowRunResult) => void) {
|
|
226
|
+
const response: Response = await this.request(`/api/workflows/legacy/${this.workflowId}/watch?runId=${runId}`, {
|
|
227
|
+
stream: true,
|
|
228
|
+
});
|
|
229
|
+
|
|
230
|
+
if (!response.ok) {
|
|
231
|
+
throw new Error(`Failed to watch legacy workflow: ${response.statusText}`);
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
if (!response.body) {
|
|
235
|
+
throw new Error('Response body is null');
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
for await (const record of this.streamProcessor(response.body)) {
|
|
239
|
+
onRecord(record);
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
}
|