@mastra/client-js 0.1.22 → 0.2.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +8 -8
- package/CHANGELOG.md +43 -0
- package/dist/index.cjs +171 -89
- package/dist/index.d.cts +153 -75
- package/dist/index.d.ts +153 -75
- package/dist/index.js +171 -89
- package/package.json +6 -5
- package/src/client.ts +85 -19
- package/src/example.ts +29 -30
- package/src/index.test.ts +91 -1
- package/src/resources/agent.ts +8 -7
- package/src/resources/base.ts +1 -1
- package/src/resources/index.ts +3 -2
- package/src/resources/{vnext-workflow.ts → legacy-workflow.ts} +124 -143
- package/src/resources/mcp-tool.ts +48 -0
- package/src/resources/tool.ts +4 -3
- package/src/resources/workflow.ts +121 -109
- package/src/types.ts +55 -14
- package/src/utils/index.ts +11 -0
package/src/example.ts
CHANGED
|
@@ -1,40 +1,39 @@
|
|
|
1
|
-
|
|
1
|
+
import { MastraClient } from './client';
|
|
2
2
|
// import type { WorkflowRunResult } from './types';
|
|
3
3
|
|
|
4
4
|
// Agent
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
6
|
+
(async () => {
|
|
7
|
+
const client = new MastraClient({
|
|
8
|
+
baseUrl: 'http://localhost:4111',
|
|
9
|
+
});
|
|
10
10
|
|
|
11
|
-
|
|
11
|
+
console.log('Starting agent...');
|
|
12
12
|
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
13
|
+
try {
|
|
14
|
+
const agent = client.getAgent('weatherAgent');
|
|
15
|
+
const response = await agent.stream({
|
|
16
|
+
messages: 'what is the weather in new york?',
|
|
17
|
+
});
|
|
18
18
|
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
// })();
|
|
19
|
+
response.processDataStream({
|
|
20
|
+
onTextPart: text => {
|
|
21
|
+
process.stdout.write(text);
|
|
22
|
+
},
|
|
23
|
+
onFilePart: file => {
|
|
24
|
+
console.log(file);
|
|
25
|
+
},
|
|
26
|
+
onDataPart: data => {
|
|
27
|
+
console.log(data);
|
|
28
|
+
},
|
|
29
|
+
onErrorPart: error => {
|
|
30
|
+
console.error(error);
|
|
31
|
+
},
|
|
32
|
+
});
|
|
33
|
+
} catch (error) {
|
|
34
|
+
console.error(error);
|
|
35
|
+
}
|
|
36
|
+
})();
|
|
38
37
|
|
|
39
38
|
// Workflow
|
|
40
39
|
// (async () => {
|
package/src/index.test.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { describe, expect, beforeEach, it, vi } from 'vitest';
|
|
2
|
-
|
|
3
2
|
import { MastraClient } from './client';
|
|
3
|
+
import type { McpServerListResponse, ServerDetailInfo } from './types';
|
|
4
4
|
|
|
5
5
|
// Mock fetch globally
|
|
6
6
|
global.fetch = vi.fn();
|
|
@@ -737,4 +737,94 @@ describe('MastraClient Resources', () => {
|
|
|
737
737
|
);
|
|
738
738
|
});
|
|
739
739
|
});
|
|
740
|
+
|
|
741
|
+
describe('MCP Server Registry Client Methods', () => {
|
|
742
|
+
const mockServerInfo1 = {
|
|
743
|
+
id: 'mcp-server-1',
|
|
744
|
+
name: 'Test MCP Server 1',
|
|
745
|
+
version_detail: { version: '1.0.0', release_date: '2023-01-01T00:00:00Z', is_latest: true },
|
|
746
|
+
};
|
|
747
|
+
const mockServerInfo2 = {
|
|
748
|
+
id: 'mcp-server-2',
|
|
749
|
+
name: 'Test MCP Server 2',
|
|
750
|
+
version_detail: { version: '1.1.0', release_date: '2023-02-01T00:00:00Z', is_latest: true },
|
|
751
|
+
};
|
|
752
|
+
|
|
753
|
+
const mockServerDetail1: ServerDetailInfo = {
|
|
754
|
+
...mockServerInfo1,
|
|
755
|
+
description: 'Detailed description for server 1',
|
|
756
|
+
package_canonical: 'npm',
|
|
757
|
+
packages: [{ registry_name: 'npm', name: '@example/server1', version: '1.0.0' }],
|
|
758
|
+
remotes: [{ transport_type: 'sse', url: 'http://localhost/sse1' }],
|
|
759
|
+
};
|
|
760
|
+
|
|
761
|
+
describe('getMcpServers()', () => {
|
|
762
|
+
it('should fetch a list of MCP servers', async () => {
|
|
763
|
+
const mockResponse: McpServerListResponse = {
|
|
764
|
+
servers: [mockServerInfo1, mockServerInfo2],
|
|
765
|
+
total_count: 2,
|
|
766
|
+
next: null,
|
|
767
|
+
};
|
|
768
|
+
mockFetchResponse(mockResponse);
|
|
769
|
+
|
|
770
|
+
const result = await client.getMcpServers();
|
|
771
|
+
expect(result).toEqual(mockResponse);
|
|
772
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
773
|
+
`${clientOptions.baseUrl}/api/mcp/v0/servers`,
|
|
774
|
+
expect.objectContaining({
|
|
775
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
776
|
+
}),
|
|
777
|
+
);
|
|
778
|
+
});
|
|
779
|
+
|
|
780
|
+
it('should fetch MCP servers with limit and offset parameters', async () => {
|
|
781
|
+
const mockResponse: McpServerListResponse = {
|
|
782
|
+
servers: [mockServerInfo1],
|
|
783
|
+
total_count: 2,
|
|
784
|
+
next: '/api/mcp/v0/servers?limit=1&offset=1',
|
|
785
|
+
};
|
|
786
|
+
mockFetchResponse(mockResponse);
|
|
787
|
+
|
|
788
|
+
const result = await client.getMcpServers({ limit: 1, offset: 0 });
|
|
789
|
+
expect(result).toEqual(mockResponse);
|
|
790
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
791
|
+
`${clientOptions.baseUrl}/api/mcp/v0/servers?limit=1&offset=0`,
|
|
792
|
+
expect.objectContaining({
|
|
793
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
794
|
+
}),
|
|
795
|
+
);
|
|
796
|
+
});
|
|
797
|
+
});
|
|
798
|
+
|
|
799
|
+
describe('getMcpServerDetails()', () => {
|
|
800
|
+
const serverId = 'mcp-server-1';
|
|
801
|
+
|
|
802
|
+
it('should fetch details for a specific MCP server', async () => {
|
|
803
|
+
mockFetchResponse(mockServerDetail1);
|
|
804
|
+
|
|
805
|
+
const result = await client.getMcpServerDetails(serverId);
|
|
806
|
+
expect(result).toEqual(mockServerDetail1);
|
|
807
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
808
|
+
`${clientOptions.baseUrl}/api/mcp/v0/servers/${serverId}`,
|
|
809
|
+
expect.objectContaining({
|
|
810
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
811
|
+
}),
|
|
812
|
+
);
|
|
813
|
+
});
|
|
814
|
+
|
|
815
|
+
it('should fetch MCP server details with a version parameter', async () => {
|
|
816
|
+
mockFetchResponse(mockServerDetail1);
|
|
817
|
+
const version = '1.0.0';
|
|
818
|
+
|
|
819
|
+
const result = await client.getMcpServerDetails(serverId, { version });
|
|
820
|
+
expect(result).toEqual(mockServerDetail1);
|
|
821
|
+
expect(global.fetch).toHaveBeenCalledWith(
|
|
822
|
+
`${clientOptions.baseUrl}/api/mcp/v0/servers/${serverId}?version=${version}`,
|
|
823
|
+
expect.objectContaining({
|
|
824
|
+
headers: expect.objectContaining(clientOptions.headers),
|
|
825
|
+
}),
|
|
826
|
+
);
|
|
827
|
+
});
|
|
828
|
+
});
|
|
829
|
+
});
|
|
740
830
|
});
|
package/src/resources/agent.ts
CHANGED
|
@@ -14,7 +14,8 @@ import type {
|
|
|
14
14
|
} from '../types';
|
|
15
15
|
|
|
16
16
|
import { BaseResource } from './base';
|
|
17
|
-
import type { RuntimeContext } from '@mastra/core/
|
|
17
|
+
import type { RuntimeContext } from '@mastra/core/runtime-context';
|
|
18
|
+
import { parseClientRuntimeContext } from '../utils';
|
|
18
19
|
|
|
19
20
|
export class AgentVoice extends BaseResource {
|
|
20
21
|
constructor(
|
|
@@ -100,9 +101,9 @@ export class Agent extends BaseResource {
|
|
|
100
101
|
): Promise<GenerateReturn<T>> {
|
|
101
102
|
const processedParams = {
|
|
102
103
|
...params,
|
|
103
|
-
output: zodToJsonSchema(params.output),
|
|
104
|
-
experimental_output: zodToJsonSchema(params.experimental_output),
|
|
105
|
-
runtimeContext:
|
|
104
|
+
output: params.output ? zodToJsonSchema(params.output) : undefined,
|
|
105
|
+
experimental_output: params.experimental_output ? zodToJsonSchema(params.experimental_output) : undefined,
|
|
106
|
+
runtimeContext: parseClientRuntimeContext(params.runtimeContext),
|
|
106
107
|
};
|
|
107
108
|
|
|
108
109
|
return this.request(`/api/agents/${this.agentId}/generate`, {
|
|
@@ -125,9 +126,9 @@ export class Agent extends BaseResource {
|
|
|
125
126
|
> {
|
|
126
127
|
const processedParams = {
|
|
127
128
|
...params,
|
|
128
|
-
output: zodToJsonSchema(params.output),
|
|
129
|
-
experimental_output: zodToJsonSchema(params.experimental_output),
|
|
130
|
-
runtimeContext:
|
|
129
|
+
output: params.output ? zodToJsonSchema(params.output) : undefined,
|
|
130
|
+
experimental_output: params.experimental_output ? zodToJsonSchema(params.experimental_output) : undefined,
|
|
131
|
+
runtimeContext: parseClientRuntimeContext(params.runtimeContext),
|
|
131
132
|
};
|
|
132
133
|
|
|
133
134
|
const response: Response & {
|
package/src/resources/base.ts
CHANGED
|
@@ -21,7 +21,7 @@ export class BaseResource {
|
|
|
21
21
|
|
|
22
22
|
for (let attempt = 0; attempt <= retries; attempt++) {
|
|
23
23
|
try {
|
|
24
|
-
const response = await fetch(`${baseUrl}${path}`, {
|
|
24
|
+
const response = await fetch(`${baseUrl.replace(/\/$/, '')}${path}`, {
|
|
25
25
|
...options,
|
|
26
26
|
headers: {
|
|
27
27
|
...headers,
|
package/src/resources/index.ts
CHANGED
|
@@ -2,8 +2,9 @@ export * from './agent';
|
|
|
2
2
|
export * from './network';
|
|
3
3
|
export * from './memory-thread';
|
|
4
4
|
export * from './vector';
|
|
5
|
-
export * from './workflow';
|
|
5
|
+
export * from './legacy-workflow';
|
|
6
6
|
export * from './tool';
|
|
7
7
|
export * from './base';
|
|
8
|
-
export * from './
|
|
8
|
+
export * from './workflow';
|
|
9
9
|
export * from './a2a';
|
|
10
|
+
export * from './mcp-tool';
|
|
@@ -1,18 +1,16 @@
|
|
|
1
|
-
import { RuntimeContext } from '@mastra/core/runtime-context';
|
|
2
1
|
import type {
|
|
3
2
|
ClientOptions,
|
|
4
|
-
|
|
3
|
+
LegacyWorkflowRunResult,
|
|
4
|
+
GetLegacyWorkflowRunsResponse,
|
|
5
5
|
GetWorkflowRunsParams,
|
|
6
|
-
|
|
7
|
-
VNextWorkflowRunResult,
|
|
8
|
-
VNextWorkflowWatchResult,
|
|
6
|
+
GetLegacyWorkflowResponse,
|
|
9
7
|
} from '../types';
|
|
10
8
|
|
|
11
9
|
import { BaseResource } from './base';
|
|
12
10
|
|
|
13
11
|
const RECORD_SEPARATOR = '\x1E';
|
|
14
12
|
|
|
15
|
-
export class
|
|
13
|
+
export class LegacyWorkflow extends BaseResource {
|
|
16
14
|
constructor(
|
|
17
15
|
options: ClientOptions,
|
|
18
16
|
private workflowId: string,
|
|
@@ -21,90 +19,19 @@ export class VNextWorkflow extends BaseResource {
|
|
|
21
19
|
}
|
|
22
20
|
|
|
23
21
|
/**
|
|
24
|
-
*
|
|
25
|
-
*
|
|
26
|
-
*
|
|
27
|
-
* @param stream - The readable stream to process
|
|
28
|
-
* @returns An async generator that yields parsed records
|
|
22
|
+
* Retrieves details about the legacy workflow
|
|
23
|
+
* @returns Promise containing legacy workflow details including steps and graphs
|
|
29
24
|
*/
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
// Track if we've finished reading from the stream
|
|
34
|
-
let doneReading = false;
|
|
35
|
-
// Buffer to accumulate partial chunks
|
|
36
|
-
let buffer = '';
|
|
37
|
-
|
|
38
|
-
try {
|
|
39
|
-
while (!doneReading) {
|
|
40
|
-
// Read the next chunk from the stream
|
|
41
|
-
const { done, value } = await reader.read();
|
|
42
|
-
doneReading = done;
|
|
43
|
-
|
|
44
|
-
// Skip processing if we're done and there's no value
|
|
45
|
-
if (done && !value) continue;
|
|
46
|
-
|
|
47
|
-
try {
|
|
48
|
-
// Decode binary data to text
|
|
49
|
-
const decoded = value ? new TextDecoder().decode(value) : '';
|
|
50
|
-
|
|
51
|
-
// Split the combined buffer and new data by record separator
|
|
52
|
-
const chunks = (buffer + decoded).split(RECORD_SEPARATOR);
|
|
53
|
-
|
|
54
|
-
// The last chunk might be incomplete, so save it for the next iteration
|
|
55
|
-
buffer = chunks.pop() || '';
|
|
56
|
-
|
|
57
|
-
// Process complete chunks
|
|
58
|
-
for (const chunk of chunks) {
|
|
59
|
-
if (chunk) {
|
|
60
|
-
// Only process non-empty chunks
|
|
61
|
-
if (typeof chunk === 'string') {
|
|
62
|
-
try {
|
|
63
|
-
const parsedChunk = JSON.parse(chunk);
|
|
64
|
-
yield parsedChunk;
|
|
65
|
-
} catch {
|
|
66
|
-
// Silently ignore parsing errors to maintain stream processing
|
|
67
|
-
// This allows the stream to continue even if one record is malformed
|
|
68
|
-
}
|
|
69
|
-
}
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
} catch {
|
|
73
|
-
// Silently ignore parsing errors to maintain stream processing
|
|
74
|
-
// This allows the stream to continue even if one record is malformed
|
|
75
|
-
}
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
// Process any remaining data in the buffer after stream is done
|
|
79
|
-
if (buffer) {
|
|
80
|
-
try {
|
|
81
|
-
yield JSON.parse(buffer);
|
|
82
|
-
} catch {
|
|
83
|
-
// Ignore parsing error for final chunk
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
} finally {
|
|
87
|
-
// Always ensure we clean up the reader
|
|
88
|
-
reader.cancel().catch(() => {
|
|
89
|
-
// Ignore cancel errors
|
|
90
|
-
});
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
/**
|
|
95
|
-
* Retrieves details about the vNext workflow
|
|
96
|
-
* @returns Promise containing vNext workflow details including steps and graphs
|
|
97
|
-
*/
|
|
98
|
-
details(): Promise<GetVNextWorkflowResponse> {
|
|
99
|
-
return this.request(`/api/workflows/v-next/${this.workflowId}`);
|
|
25
|
+
details(): Promise<GetLegacyWorkflowResponse> {
|
|
26
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}`);
|
|
100
27
|
}
|
|
101
28
|
|
|
102
29
|
/**
|
|
103
|
-
* Retrieves all runs for a
|
|
30
|
+
* Retrieves all runs for a legacy workflow
|
|
104
31
|
* @param params - Parameters for filtering runs
|
|
105
|
-
* @returns Promise containing
|
|
32
|
+
* @returns Promise containing legacy workflow runs array
|
|
106
33
|
*/
|
|
107
|
-
runs(params?: GetWorkflowRunsParams): Promise<
|
|
34
|
+
runs(params?: GetWorkflowRunsParams): Promise<GetLegacyWorkflowRunsResponse> {
|
|
108
35
|
const searchParams = new URLSearchParams();
|
|
109
36
|
if (params?.fromDate) {
|
|
110
37
|
searchParams.set('fromDate', params.fromDate.toISOString());
|
|
@@ -123,16 +50,15 @@ export class VNextWorkflow extends BaseResource {
|
|
|
123
50
|
}
|
|
124
51
|
|
|
125
52
|
if (searchParams.size) {
|
|
126
|
-
return this.request(`/api/workflows/
|
|
53
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/runs?${searchParams}`);
|
|
127
54
|
} else {
|
|
128
|
-
return this.request(`/api/workflows/
|
|
55
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/runs`);
|
|
129
56
|
}
|
|
130
57
|
}
|
|
131
58
|
|
|
132
59
|
/**
|
|
133
|
-
* Creates a new
|
|
134
|
-
* @
|
|
135
|
-
* @returns Promise containing the runId of the created run
|
|
60
|
+
* Creates a new legacy workflow run
|
|
61
|
+
* @returns Promise containing the generated run ID
|
|
136
62
|
*/
|
|
137
63
|
createRun(params?: { runId?: string }): Promise<{ runId: string }> {
|
|
138
64
|
const searchParams = new URLSearchParams();
|
|
@@ -141,113 +67,168 @@ export class VNextWorkflow extends BaseResource {
|
|
|
141
67
|
searchParams.set('runId', params.runId);
|
|
142
68
|
}
|
|
143
69
|
|
|
144
|
-
return this.request(`/api/workflows/
|
|
70
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/create-run?${searchParams.toString()}`, {
|
|
145
71
|
method: 'POST',
|
|
146
72
|
});
|
|
147
73
|
}
|
|
148
74
|
|
|
149
75
|
/**
|
|
150
|
-
* Starts a
|
|
151
|
-
* @param params - Object containing the runId
|
|
76
|
+
* Starts a legacy workflow run synchronously without waiting for the workflow to complete
|
|
77
|
+
* @param params - Object containing the runId and triggerData
|
|
152
78
|
* @returns Promise containing success message
|
|
153
79
|
*/
|
|
154
|
-
start(params: {
|
|
155
|
-
runId
|
|
156
|
-
inputData: Record<string, any>;
|
|
157
|
-
runtimeContext?: RuntimeContext;
|
|
158
|
-
}): Promise<{ message: string }> {
|
|
159
|
-
const runtimeContext = params.runtimeContext ? Object.fromEntries(params.runtimeContext.entries()) : undefined;
|
|
160
|
-
return this.request(`/api/workflows/v-next/${this.workflowId}/start?runId=${params.runId}`, {
|
|
80
|
+
start(params: { runId: string; triggerData: Record<string, any> }): Promise<{ message: string }> {
|
|
81
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/start?runId=${params.runId}`, {
|
|
161
82
|
method: 'POST',
|
|
162
|
-
body:
|
|
83
|
+
body: params?.triggerData,
|
|
163
84
|
});
|
|
164
85
|
}
|
|
165
86
|
|
|
166
87
|
/**
|
|
167
|
-
* Resumes a suspended
|
|
168
|
-
* @param
|
|
169
|
-
* @
|
|
88
|
+
* Resumes a suspended legacy workflow step synchronously without waiting for the workflow to complete
|
|
89
|
+
* @param stepId - ID of the step to resume
|
|
90
|
+
* @param runId - ID of the legacy workflow run
|
|
91
|
+
* @param context - Context to resume the legacy workflow with
|
|
92
|
+
* @returns Promise containing the legacy workflow resume results
|
|
170
93
|
*/
|
|
171
94
|
resume({
|
|
172
|
-
|
|
95
|
+
stepId,
|
|
173
96
|
runId,
|
|
174
|
-
|
|
175
|
-
...rest
|
|
97
|
+
context,
|
|
176
98
|
}: {
|
|
177
|
-
|
|
99
|
+
stepId: string;
|
|
178
100
|
runId: string;
|
|
179
|
-
|
|
180
|
-
runtimeContext?: RuntimeContext;
|
|
101
|
+
context: Record<string, any>;
|
|
181
102
|
}): Promise<{ message: string }> {
|
|
182
|
-
|
|
183
|
-
return this.request(`/api/workflows/v-next/${this.workflowId}/resume?runId=${runId}`, {
|
|
103
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/resume?runId=${runId}`, {
|
|
184
104
|
method: 'POST',
|
|
185
|
-
stream: true,
|
|
186
105
|
body: {
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
runtimeContext,
|
|
106
|
+
stepId,
|
|
107
|
+
context,
|
|
190
108
|
},
|
|
191
109
|
});
|
|
192
110
|
}
|
|
193
111
|
|
|
194
112
|
/**
|
|
195
|
-
* Starts a
|
|
196
|
-
* @param params - Object containing the optional runId
|
|
197
|
-
* @returns Promise containing the
|
|
113
|
+
* Starts a workflow run asynchronously and returns a promise that resolves when the workflow is complete
|
|
114
|
+
* @param params - Object containing the optional runId and triggerData
|
|
115
|
+
* @returns Promise containing the workflow execution results
|
|
198
116
|
*/
|
|
199
|
-
startAsync(params: {
|
|
200
|
-
runId?: string;
|
|
201
|
-
inputData: Record<string, any>;
|
|
202
|
-
runtimeContext?: RuntimeContext;
|
|
203
|
-
}): Promise<VNextWorkflowRunResult> {
|
|
117
|
+
startAsync(params: { runId?: string; triggerData: Record<string, any> }): Promise<LegacyWorkflowRunResult> {
|
|
204
118
|
const searchParams = new URLSearchParams();
|
|
205
119
|
|
|
206
120
|
if (!!params?.runId) {
|
|
207
121
|
searchParams.set('runId', params.runId);
|
|
208
122
|
}
|
|
209
123
|
|
|
210
|
-
|
|
211
|
-
return this.request(`/api/workflows/v-next/${this.workflowId}/start-async?${searchParams.toString()}`, {
|
|
124
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/start-async?${searchParams.toString()}`, {
|
|
212
125
|
method: 'POST',
|
|
213
|
-
body:
|
|
126
|
+
body: params?.triggerData,
|
|
214
127
|
});
|
|
215
128
|
}
|
|
216
129
|
|
|
217
130
|
/**
|
|
218
|
-
* Resumes a suspended
|
|
219
|
-
* @param params - Object containing the runId,
|
|
220
|
-
* @returns Promise containing the
|
|
131
|
+
* Resumes a suspended legacy workflow step asynchronously and returns a promise that resolves when the workflow is complete
|
|
132
|
+
* @param params - Object containing the runId, stepId, and context
|
|
133
|
+
* @returns Promise containing the workflow resume results
|
|
221
134
|
*/
|
|
222
135
|
resumeAsync(params: {
|
|
223
136
|
runId: string;
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
const runtimeContext = params.runtimeContext ? Object.fromEntries(params.runtimeContext.entries()) : undefined;
|
|
229
|
-
return this.request(`/api/workflows/v-next/${this.workflowId}/resume-async?runId=${params.runId}`, {
|
|
137
|
+
stepId: string;
|
|
138
|
+
context: Record<string, any>;
|
|
139
|
+
}): Promise<LegacyWorkflowRunResult> {
|
|
140
|
+
return this.request(`/api/workflows/legacy/${this.workflowId}/resume-async?runId=${params.runId}`, {
|
|
230
141
|
method: 'POST',
|
|
231
142
|
body: {
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
runtimeContext,
|
|
143
|
+
stepId: params.stepId,
|
|
144
|
+
context: params.context,
|
|
235
145
|
},
|
|
236
146
|
});
|
|
237
147
|
}
|
|
238
148
|
|
|
239
149
|
/**
|
|
240
|
-
*
|
|
150
|
+
* Creates an async generator that processes a readable stream and yields records
|
|
151
|
+
* separated by the Record Separator character (\x1E)
|
|
152
|
+
*
|
|
153
|
+
* @param stream - The readable stream to process
|
|
154
|
+
* @returns An async generator that yields parsed records
|
|
155
|
+
*/
|
|
156
|
+
private async *streamProcessor(stream: ReadableStream): AsyncGenerator<LegacyWorkflowRunResult, void, unknown> {
|
|
157
|
+
const reader = stream.getReader();
|
|
158
|
+
|
|
159
|
+
// Track if we've finished reading from the stream
|
|
160
|
+
let doneReading = false;
|
|
161
|
+
// Buffer to accumulate partial chunks
|
|
162
|
+
let buffer = '';
|
|
163
|
+
|
|
164
|
+
try {
|
|
165
|
+
while (!doneReading) {
|
|
166
|
+
// Read the next chunk from the stream
|
|
167
|
+
const { done, value } = await reader.read();
|
|
168
|
+
doneReading = done;
|
|
169
|
+
|
|
170
|
+
// Skip processing if we're done and there's no value
|
|
171
|
+
if (done && !value) continue;
|
|
172
|
+
|
|
173
|
+
try {
|
|
174
|
+
// Decode binary data to text
|
|
175
|
+
const decoded = value ? new TextDecoder().decode(value) : '';
|
|
176
|
+
|
|
177
|
+
// Split the combined buffer and new data by record separator
|
|
178
|
+
const chunks = (buffer + decoded).split(RECORD_SEPARATOR);
|
|
179
|
+
|
|
180
|
+
// The last chunk might be incomplete, so save it for the next iteration
|
|
181
|
+
buffer = chunks.pop() || '';
|
|
182
|
+
|
|
183
|
+
// Process complete chunks
|
|
184
|
+
for (const chunk of chunks) {
|
|
185
|
+
if (chunk) {
|
|
186
|
+
// Only process non-empty chunks
|
|
187
|
+
if (typeof chunk === 'string') {
|
|
188
|
+
try {
|
|
189
|
+
const parsedChunk = JSON.parse(chunk);
|
|
190
|
+
yield parsedChunk;
|
|
191
|
+
} catch {
|
|
192
|
+
// Silently ignore parsing errors to maintain stream processing
|
|
193
|
+
// This allows the stream to continue even if one record is malformed
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
} catch {
|
|
199
|
+
// Silently ignore parsing errors to maintain stream processing
|
|
200
|
+
// This allows the stream to continue even if one record is malformed
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Process any remaining data in the buffer after stream is done
|
|
205
|
+
if (buffer) {
|
|
206
|
+
try {
|
|
207
|
+
yield JSON.parse(buffer);
|
|
208
|
+
} catch {
|
|
209
|
+
// Ignore parsing error for final chunk
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
} finally {
|
|
213
|
+
// Always ensure we clean up the reader
|
|
214
|
+
reader.cancel().catch(() => {
|
|
215
|
+
// Ignore cancel errors
|
|
216
|
+
});
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* Watches legacy workflow transitions in real-time
|
|
241
222
|
* @param runId - Optional run ID to filter the watch stream
|
|
242
|
-
* @returns AsyncGenerator that yields parsed records from the
|
|
223
|
+
* @returns AsyncGenerator that yields parsed records from the legacy workflow watch stream
|
|
243
224
|
*/
|
|
244
|
-
async watch({ runId }: { runId?: string }, onRecord: (record:
|
|
245
|
-
const response: Response = await this.request(`/api/workflows/
|
|
225
|
+
async watch({ runId }: { runId?: string }, onRecord: (record: LegacyWorkflowRunResult) => void) {
|
|
226
|
+
const response: Response = await this.request(`/api/workflows/legacy/${this.workflowId}/watch?runId=${runId}`, {
|
|
246
227
|
stream: true,
|
|
247
228
|
});
|
|
248
229
|
|
|
249
230
|
if (!response.ok) {
|
|
250
|
-
throw new Error(`Failed to watch
|
|
231
|
+
throw new Error(`Failed to watch legacy workflow: ${response.statusText}`);
|
|
251
232
|
}
|
|
252
233
|
|
|
253
234
|
if (!response.body) {
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import type { RuntimeContext } from '@mastra/core/runtime-context';
|
|
2
|
+
import type { ClientOptions, McpToolInfo } from '../types';
|
|
3
|
+
import { BaseResource } from './base';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Represents a specific tool available on a specific MCP server.
|
|
7
|
+
* Provides methods to get details and execute the tool.
|
|
8
|
+
*/
|
|
9
|
+
export class MCPTool extends BaseResource {
|
|
10
|
+
private serverId: string;
|
|
11
|
+
private toolId: string;
|
|
12
|
+
|
|
13
|
+
constructor(options: ClientOptions, serverId: string, toolId: string) {
|
|
14
|
+
super(options);
|
|
15
|
+
this.serverId = serverId;
|
|
16
|
+
this.toolId = toolId;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Retrieves details about this specific tool from the MCP server.
|
|
21
|
+
* @returns Promise containing the tool's information (name, description, schema).
|
|
22
|
+
*/
|
|
23
|
+
details(): Promise<McpToolInfo> {
|
|
24
|
+
return this.request(`/api/mcp/${this.serverId}/tools/${this.toolId}`);
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Executes this specific tool on the MCP server.
|
|
29
|
+
* @param params - Parameters for tool execution, including data/args and optional runtimeContext.
|
|
30
|
+
* @returns Promise containing the result of the tool execution.
|
|
31
|
+
*/
|
|
32
|
+
execute(params: { data?: any; runtimeContext?: RuntimeContext }): Promise<any> {
|
|
33
|
+
const body: any = {};
|
|
34
|
+
if (params.data !== undefined) body.data = params.data;
|
|
35
|
+
// If none of data, args the body might be empty or just contain runtimeContext.
|
|
36
|
+
// The handler will look for these, so an empty args object might be appropriate if that's the intent.
|
|
37
|
+
// else body.data = {}; // Or let it be empty if no specific input fields are used
|
|
38
|
+
|
|
39
|
+
if (params.runtimeContext !== undefined) {
|
|
40
|
+
body.runtimeContext = params.runtimeContext;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
return this.request(`/api/mcp/${this.serverId}/tools/${this.toolId}/execute`, {
|
|
44
|
+
method: 'POST',
|
|
45
|
+
body: Object.keys(body).length > 0 ? body : undefined,
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
}
|
package/src/resources/tool.ts
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
import type { RuntimeContext } from '@mastra/core/
|
|
1
|
+
import type { RuntimeContext } from '@mastra/core/runtime-context';
|
|
2
2
|
import type { GetToolResponse, ClientOptions } from '../types';
|
|
3
3
|
|
|
4
4
|
import { BaseResource } from './base';
|
|
5
|
+
import { parseClientRuntimeContext } from '../utils';
|
|
5
6
|
|
|
6
7
|
export class Tool extends BaseResource {
|
|
7
8
|
constructor(
|
|
@@ -24,7 +25,7 @@ export class Tool extends BaseResource {
|
|
|
24
25
|
* @param params - Parameters required for tool execution
|
|
25
26
|
* @returns Promise containing the tool execution results
|
|
26
27
|
*/
|
|
27
|
-
execute(params: { data: any; runId?: string; runtimeContext?: RuntimeContext }): Promise<any> {
|
|
28
|
+
execute(params: { data: any; runId?: string; runtimeContext?: RuntimeContext | Record<string, any> }): Promise<any> {
|
|
28
29
|
const url = new URLSearchParams();
|
|
29
30
|
|
|
30
31
|
if (params.runId) {
|
|
@@ -33,7 +34,7 @@ export class Tool extends BaseResource {
|
|
|
33
34
|
|
|
34
35
|
const body = {
|
|
35
36
|
data: params.data,
|
|
36
|
-
runtimeContext:
|
|
37
|
+
runtimeContext: parseClientRuntimeContext(params.runtimeContext),
|
|
37
38
|
};
|
|
38
39
|
|
|
39
40
|
return this.request(`/api/tools/${this.toolId}/execute?${url.toString()}`, {
|