@agents-at-scale/ark 0.1.43 ā 0.1.44
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/arkServices.js +12 -9
- package/dist/commands/generate/generators/project.js +3 -3
- package/dist/commands/generate/index.js +2 -2
- package/dist/commands/query/index.js +2 -0
- package/dist/commands/query/index.spec.js +24 -0
- package/dist/components/ChatUI.js +12 -2
- package/dist/lib/chatClient.d.ts +4 -0
- package/dist/lib/chatClient.js +23 -7
- package/dist/lib/chatClient.spec.d.ts +1 -0
- package/dist/lib/chatClient.spec.js +108 -0
- package/dist/lib/constants.d.ts +3 -0
- package/dist/lib/constants.js +8 -0
- package/dist/lib/executeQuery.d.ts +1 -0
- package/dist/lib/executeQuery.js +6 -1
- package/dist/lib/executeQuery.spec.js +42 -0
- package/dist/lib/types.d.ts +22 -7
- package/package.json +1 -1
- package/templates/models/azure.yaml +1 -1
- package/templates/project/Makefile +1 -1
- package/templates/project/README.md +1 -1
- package/templates/project/scripts/setup.sh +2 -2
package/dist/arkServices.js
CHANGED
|
@@ -119,6 +119,18 @@ const defaultArkServices = {
|
|
|
119
119
|
k8sDeploymentName: 'ark-mcp',
|
|
120
120
|
k8sDevDeploymentName: 'ark-mcp-devspace',
|
|
121
121
|
},
|
|
122
|
+
'ark-cluster-memory': {
|
|
123
|
+
name: 'ark-cluster-memory',
|
|
124
|
+
helmReleaseName: 'ark-cluster-memory',
|
|
125
|
+
description: 'In-memory storage service with streaming support for Ark queries',
|
|
126
|
+
enabled: true,
|
|
127
|
+
category: 'service',
|
|
128
|
+
// namespace: undefined - uses current context namespace
|
|
129
|
+
chartPath: `${REGISTRY_BASE}/ark-cluster-memory`,
|
|
130
|
+
installArgs: [],
|
|
131
|
+
k8sDeploymentName: 'ark-cluster-memory',
|
|
132
|
+
k8sDevDeploymentName: 'ark-cluster-memory-devspace',
|
|
133
|
+
},
|
|
122
134
|
'mcp-filesystem': {
|
|
123
135
|
name: 'mcp-filesystem',
|
|
124
136
|
helmReleaseName: 'mcp-filesystem',
|
|
@@ -131,15 +143,6 @@ const defaultArkServices = {
|
|
|
131
143
|
k8sDeploymentName: 'mcp-filesystem',
|
|
132
144
|
k8sDevDeploymentName: 'mcp-filesystem-devspace',
|
|
133
145
|
},
|
|
134
|
-
'agents-at-scale': {
|
|
135
|
-
name: 'agents-at-scale',
|
|
136
|
-
helmReleaseName: 'agents-at-scale',
|
|
137
|
-
description: 'Agents @ Scale Platform',
|
|
138
|
-
enabled: false,
|
|
139
|
-
category: 'service',
|
|
140
|
-
chartPath: 'oci://ghcr.io/mck-private/qb-fm-labs-legacyx/charts/legacyx',
|
|
141
|
-
installArgs: [],
|
|
142
|
-
},
|
|
143
146
|
'localhost-gateway': {
|
|
144
147
|
name: 'localhost-gateway',
|
|
145
148
|
helmReleaseName: 'localhost-gateway',
|
|
@@ -727,16 +727,16 @@ Generated with ARK CLI generator`;
|
|
|
727
727
|
},
|
|
728
728
|
];
|
|
729
729
|
if (config.projectType === 'empty') {
|
|
730
|
-
steps.push({ desc: 'Add YAML files to agents/, teams/, queries/ directories' }, { desc: 'Copy model configurations from samples/models/' }, { desc: 'Edit .env file to set your API keys' }, { desc: 'Deploy your project', cmd: '
|
|
730
|
+
steps.push({ desc: 'Add YAML files to agents/, teams/, queries/ directories' }, { desc: 'Copy model configurations from samples/models/' }, { desc: 'Edit .env file to set your API keys' }, { desc: 'Deploy your project', cmd: 'devspace dev' });
|
|
731
731
|
}
|
|
732
732
|
else if (config.selectedModels && config.selectedModels !== 'none') {
|
|
733
|
-
steps.push({ desc: 'Edit .env file to set your API keys' }, { desc: 'Load environment variables', cmd: 'source .env' }, { desc: 'Deploy your project', cmd: '
|
|
733
|
+
steps.push({ desc: 'Edit .env file to set your API keys' }, { desc: 'Load environment variables', cmd: 'source .env' }, { desc: 'Deploy your project', cmd: 'devspace dev' }, {
|
|
734
734
|
desc: 'Test your deployment',
|
|
735
735
|
cmd: `kubectl get query sample-team-query -w --namespace ${config.namespace}`,
|
|
736
736
|
});
|
|
737
737
|
}
|
|
738
738
|
else {
|
|
739
|
-
steps.push({ desc: 'Copy model configurations from samples/models/' }, { desc: 'Edit .env file to set your API keys' }, { desc: 'Deploy your project', cmd: '
|
|
739
|
+
steps.push({ desc: 'Copy model configurations from samples/models/' }, { desc: 'Edit .env file to set your API keys' }, { desc: 'Deploy your project', cmd: 'devspace dev' });
|
|
740
740
|
}
|
|
741
741
|
console.log(chalk.magenta.bold('š NEXT STEPS:\n'));
|
|
742
742
|
let stepNumber = 1;
|
|
@@ -56,7 +56,7 @@ ${chalk.cyan('Getting started:')}
|
|
|
56
56
|
1. ${chalk.yellow('ark generate project my-first-project')} # Create project
|
|
57
57
|
2. ${chalk.yellow('cd my-first-project')} # Enter directory
|
|
58
58
|
3. ${chalk.yellow('source .env')} # Set environment
|
|
59
|
-
4. ${chalk.yellow('
|
|
59
|
+
4. ${chalk.yellow('devspace dev')} # Deploy to cluster
|
|
60
60
|
`);
|
|
61
61
|
// Register generators
|
|
62
62
|
const generators = new Map();
|
|
@@ -345,7 +345,7 @@ ${chalk.cyan('Use Cases:')}
|
|
|
345
345
|
console.log(chalk.cyan('\nš Quick Start:'));
|
|
346
346
|
console.log(chalk.gray(' 1. ark generate project my-first-project'));
|
|
347
347
|
console.log(chalk.gray(' 2. cd my-first-project && source .env'));
|
|
348
|
-
console.log(chalk.gray(' 3.
|
|
348
|
+
console.log(chalk.gray(' 3. devspace dev'));
|
|
349
349
|
console.log(chalk.cyan('\nš§ Usage:'));
|
|
350
350
|
console.log(chalk.gray(' ark generate <type> [name] [options]'));
|
|
351
351
|
console.log(chalk.gray(' ark g <type> [name] [options]'));
|
|
@@ -9,6 +9,7 @@ export function createQueryCommand(_) {
|
|
|
9
9
|
.argument('<target>', 'Query target (e.g., model/default, agent/my-agent)')
|
|
10
10
|
.argument('<message>', 'Message to send')
|
|
11
11
|
.option('-o, --output <format>', 'Output format: yaml, json, or name (prints only resource name)')
|
|
12
|
+
.option('--session-id <sessionId>', 'Session ID to associate with the query for conversation continuity')
|
|
12
13
|
.action(async (target, message, options) => {
|
|
13
14
|
const parsed = parseTarget(target);
|
|
14
15
|
if (!parsed) {
|
|
@@ -20,6 +21,7 @@ export function createQueryCommand(_) {
|
|
|
20
21
|
targetName: parsed.name,
|
|
21
22
|
message,
|
|
22
23
|
outputFormat: options.output,
|
|
24
|
+
sessionId: options.sessionId,
|
|
23
25
|
});
|
|
24
26
|
});
|
|
25
27
|
return queryCommand;
|
|
@@ -68,6 +68,30 @@ describe('createQueryCommand', () => {
|
|
|
68
68
|
outputFormat: 'json',
|
|
69
69
|
});
|
|
70
70
|
});
|
|
71
|
+
it('should pass session-id option to executeQuery', async () => {
|
|
72
|
+
mockParseTarget.mockReturnValue({
|
|
73
|
+
type: 'agent',
|
|
74
|
+
name: 'test-agent',
|
|
75
|
+
});
|
|
76
|
+
mockExecuteQuery.mockResolvedValue(undefined);
|
|
77
|
+
const command = createQueryCommand({});
|
|
78
|
+
await command.parseAsync([
|
|
79
|
+
'node',
|
|
80
|
+
'test',
|
|
81
|
+
'agent/test-agent',
|
|
82
|
+
'Hello world',
|
|
83
|
+
'--session-id',
|
|
84
|
+
'my-session-123',
|
|
85
|
+
]);
|
|
86
|
+
expect(mockParseTarget).toHaveBeenCalledWith('agent/test-agent');
|
|
87
|
+
expect(mockExecuteQuery).toHaveBeenCalledWith({
|
|
88
|
+
targetType: 'agent',
|
|
89
|
+
targetName: 'test-agent',
|
|
90
|
+
message: 'Hello world',
|
|
91
|
+
outputFormat: undefined,
|
|
92
|
+
sessionId: 'my-session-123',
|
|
93
|
+
});
|
|
94
|
+
});
|
|
71
95
|
it('should error on invalid target format', async () => {
|
|
72
96
|
mockParseTarget.mockReturnValue(null);
|
|
73
97
|
const command = createQueryCommand({});
|
|
@@ -61,6 +61,8 @@ const ChatUI = ({ initialTargetId, arkApiClient, arkApiProxy, config, }) => {
|
|
|
61
61
|
streamingEnabled: config?.chat?.streaming ?? true,
|
|
62
62
|
currentTarget: undefined,
|
|
63
63
|
});
|
|
64
|
+
// Track A2A context ID for conversation continuity using ref
|
|
65
|
+
const a2aContextIdRef = React.useRef(undefined);
|
|
64
66
|
React.useEffect(() => {
|
|
65
67
|
if (showAgentSelector && agents.length === 0) {
|
|
66
68
|
setSelectorLoading(true);
|
|
@@ -330,11 +332,13 @@ const ChatUI = ({ initialTargetId, arkApiClient, arkApiProxy, config, }) => {
|
|
|
330
332
|
if (value.startsWith('/reset')) {
|
|
331
333
|
// Clear all messages
|
|
332
334
|
setMessages([]);
|
|
335
|
+
// Clear A2A context ID
|
|
336
|
+
a2aContextIdRef.current = undefined;
|
|
333
337
|
// Add system message to show the reset
|
|
334
338
|
const systemMessage = {
|
|
335
339
|
id: generateMessageId(),
|
|
336
340
|
type: 'system',
|
|
337
|
-
content: 'Message history cleared',
|
|
341
|
+
content: 'Message history and A2A context cleared',
|
|
338
342
|
timestamp: new Date(),
|
|
339
343
|
command: '/reset',
|
|
340
344
|
};
|
|
@@ -462,7 +466,13 @@ const ChatUI = ({ initialTargetId, arkApiClient, arkApiProxy, config, }) => {
|
|
|
462
466
|
setMessages((prev) => [...prev, agentMessage]);
|
|
463
467
|
}
|
|
464
468
|
// Send message and get response with abort signal
|
|
465
|
-
const fullResponse = await chatClientRef.current.sendMessage(target.id, apiMessages, chatConfig, (chunk, toolCalls, arkMetadata) => {
|
|
469
|
+
const fullResponse = await chatClientRef.current.sendMessage(target.id, apiMessages, { ...chatConfig, a2aContextId: a2aContextIdRef.current }, (chunk, toolCalls, arkMetadata) => {
|
|
470
|
+
// Extract A2A context ID from first response
|
|
471
|
+
// Chat TUI always queries a single target, so contextId is in responses[0]
|
|
472
|
+
if (arkMetadata?.completedQuery?.status?.responses?.[0]?.a2a?.contextId) {
|
|
473
|
+
a2aContextIdRef.current =
|
|
474
|
+
arkMetadata.completedQuery.status.responses[0].a2a.contextId;
|
|
475
|
+
}
|
|
466
476
|
// Update message progressively as chunks arrive
|
|
467
477
|
setMessages((prev) => {
|
|
468
478
|
const newMessages = [...prev];
|
package/dist/lib/chatClient.d.ts
CHANGED
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
import { ArkApiClient, QueryTarget } from './arkApiClient.js';
|
|
2
|
+
import type { Query } from './types.js';
|
|
2
3
|
export { QueryTarget };
|
|
3
4
|
export interface ChatConfig {
|
|
4
5
|
streamingEnabled: boolean;
|
|
5
6
|
currentTarget?: QueryTarget;
|
|
7
|
+
a2aContextId?: string;
|
|
8
|
+
sessionId?: string;
|
|
6
9
|
}
|
|
7
10
|
export interface ToolCall {
|
|
8
11
|
id: string;
|
|
@@ -18,6 +21,7 @@ export interface ArkMetadata {
|
|
|
18
21
|
model?: string;
|
|
19
22
|
query?: string;
|
|
20
23
|
target?: string;
|
|
24
|
+
completedQuery?: Query;
|
|
21
25
|
}
|
|
22
26
|
export declare class ChatClient {
|
|
23
27
|
private arkApiClient;
|
package/dist/lib/chatClient.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { QUERY_ANNOTATIONS } from './constants.js';
|
|
1
2
|
export class ChatClient {
|
|
2
3
|
constructor(arkApiClient) {
|
|
3
4
|
this.arkApiClient = arkApiClient;
|
|
@@ -15,6 +16,21 @@ export class ChatClient {
|
|
|
15
16
|
messages: messages,
|
|
16
17
|
signal: signal,
|
|
17
18
|
};
|
|
19
|
+
// Build metadata object - only add if we have something to include
|
|
20
|
+
if (config.sessionId || config.a2aContextId) {
|
|
21
|
+
params.metadata = {};
|
|
22
|
+
// Add sessionId directly to metadata (goes to spec, not annotations)
|
|
23
|
+
if (config.sessionId) {
|
|
24
|
+
params.metadata.sessionId = config.sessionId;
|
|
25
|
+
}
|
|
26
|
+
// Add A2A context ID to queryAnnotations (goes to annotations)
|
|
27
|
+
if (config.a2aContextId) {
|
|
28
|
+
const queryAnnotations = {
|
|
29
|
+
[QUERY_ANNOTATIONS.A2A_CONTEXT_ID]: config.a2aContextId,
|
|
30
|
+
};
|
|
31
|
+
params.metadata.queryAnnotations = JSON.stringify(queryAnnotations);
|
|
32
|
+
}
|
|
33
|
+
}
|
|
18
34
|
if (shouldStream) {
|
|
19
35
|
let fullResponse = '';
|
|
20
36
|
const toolCallsById = new Map();
|
|
@@ -23,16 +39,15 @@ export class ChatClient {
|
|
|
23
39
|
if (signal?.aborted) {
|
|
24
40
|
break;
|
|
25
41
|
}
|
|
26
|
-
const delta = chunk.choices[0]?.delta;
|
|
42
|
+
const delta = chunk.choices?.[0]?.delta;
|
|
27
43
|
// Extract ARK metadata if present
|
|
28
44
|
const arkMetadata = chunk.ark;
|
|
29
|
-
// Handle regular content
|
|
30
45
|
const content = delta?.content || '';
|
|
31
46
|
if (content) {
|
|
32
47
|
fullResponse += content;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
48
|
+
}
|
|
49
|
+
if (onChunk) {
|
|
50
|
+
onChunk(content, undefined, arkMetadata);
|
|
36
51
|
}
|
|
37
52
|
// Handle tool calls
|
|
38
53
|
if (delta?.tool_calls) {
|
|
@@ -68,6 +83,7 @@ export class ChatClient {
|
|
|
68
83
|
const response = await this.arkApiClient.createChatCompletion(params);
|
|
69
84
|
const message = response.choices[0]?.message;
|
|
70
85
|
const content = message?.content || '';
|
|
86
|
+
const arkMetadata = response.ark;
|
|
71
87
|
// Handle tool calls in non-streaming mode
|
|
72
88
|
if (message?.tool_calls && message.tool_calls.length > 0) {
|
|
73
89
|
const toolCalls = message.tool_calls.map((tc) => ({
|
|
@@ -80,12 +96,12 @@ export class ChatClient {
|
|
|
80
96
|
}));
|
|
81
97
|
// Send tool calls first
|
|
82
98
|
if (onChunk) {
|
|
83
|
-
onChunk('', toolCalls);
|
|
99
|
+
onChunk('', toolCalls, arkMetadata);
|
|
84
100
|
}
|
|
85
101
|
}
|
|
86
102
|
// Send content after tool calls
|
|
87
103
|
if (content && onChunk) {
|
|
88
|
-
onChunk(content);
|
|
104
|
+
onChunk(content, undefined, arkMetadata);
|
|
89
105
|
}
|
|
90
106
|
return content;
|
|
91
107
|
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { jest } from '@jest/globals';
|
|
2
|
+
import { QUERY_ANNOTATIONS } from './constants.js';
|
|
3
|
+
const mockCreateChatCompletion = jest.fn();
|
|
4
|
+
const mockArkApiClient = {
|
|
5
|
+
createChatCompletion: mockCreateChatCompletion,
|
|
6
|
+
createChatCompletionStream: jest.fn(),
|
|
7
|
+
getQueryTargets: jest.fn(),
|
|
8
|
+
};
|
|
9
|
+
const { ChatClient } = await import('./chatClient.js');
|
|
10
|
+
describe('ChatClient', () => {
|
|
11
|
+
beforeEach(() => {
|
|
12
|
+
jest.clearAllMocks();
|
|
13
|
+
});
|
|
14
|
+
describe('sendMessage', () => {
|
|
15
|
+
it('should include sessionId directly in metadata when provided', async () => {
|
|
16
|
+
const client = new ChatClient(mockArkApiClient);
|
|
17
|
+
mockCreateChatCompletion.mockResolvedValue({
|
|
18
|
+
id: 'test-id',
|
|
19
|
+
object: 'chat.completion',
|
|
20
|
+
created: 1234567890,
|
|
21
|
+
model: 'test-model',
|
|
22
|
+
choices: [
|
|
23
|
+
{
|
|
24
|
+
index: 0,
|
|
25
|
+
message: { role: 'assistant', content: 'Hello' },
|
|
26
|
+
finish_reason: 'stop',
|
|
27
|
+
},
|
|
28
|
+
],
|
|
29
|
+
usage: {
|
|
30
|
+
prompt_tokens: 10,
|
|
31
|
+
completion_tokens: 5,
|
|
32
|
+
total_tokens: 15,
|
|
33
|
+
},
|
|
34
|
+
});
|
|
35
|
+
await client.sendMessage('agent/test-agent', [{ role: 'user', content: 'Hello' }], { streamingEnabled: false, sessionId: 'test-session-123' });
|
|
36
|
+
expect(mockCreateChatCompletion).toHaveBeenCalledWith(expect.objectContaining({
|
|
37
|
+
model: 'agent/test-agent',
|
|
38
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
39
|
+
metadata: {
|
|
40
|
+
sessionId: 'test-session-123',
|
|
41
|
+
},
|
|
42
|
+
}));
|
|
43
|
+
});
|
|
44
|
+
it('should include both sessionId in metadata and a2aContextId in queryAnnotations when both provided', async () => {
|
|
45
|
+
const client = new ChatClient(mockArkApiClient);
|
|
46
|
+
mockCreateChatCompletion.mockResolvedValue({
|
|
47
|
+
id: 'test-id',
|
|
48
|
+
object: 'chat.completion',
|
|
49
|
+
created: 1234567890,
|
|
50
|
+
model: 'test-model',
|
|
51
|
+
choices: [
|
|
52
|
+
{
|
|
53
|
+
index: 0,
|
|
54
|
+
message: { role: 'assistant', content: 'Hello' },
|
|
55
|
+
finish_reason: 'stop',
|
|
56
|
+
},
|
|
57
|
+
],
|
|
58
|
+
usage: {
|
|
59
|
+
prompt_tokens: 10,
|
|
60
|
+
completion_tokens: 5,
|
|
61
|
+
total_tokens: 15,
|
|
62
|
+
},
|
|
63
|
+
});
|
|
64
|
+
await client.sendMessage('agent/test-agent', [{ role: 'user', content: 'Hello' }], {
|
|
65
|
+
streamingEnabled: false,
|
|
66
|
+
sessionId: 'test-session-123',
|
|
67
|
+
a2aContextId: 'a2a-context-456',
|
|
68
|
+
});
|
|
69
|
+
expect(mockCreateChatCompletion).toHaveBeenCalled();
|
|
70
|
+
const callArgs = mockCreateChatCompletion.mock.calls[0][0];
|
|
71
|
+
expect(callArgs.model).toBe('agent/test-agent');
|
|
72
|
+
expect(callArgs.messages).toEqual([{ role: 'user', content: 'Hello' }]);
|
|
73
|
+
expect(callArgs.metadata).toBeDefined();
|
|
74
|
+
expect(callArgs.metadata.sessionId).toBe('test-session-123');
|
|
75
|
+
expect(callArgs.metadata.queryAnnotations).toBeDefined();
|
|
76
|
+
const queryAnnotations = JSON.parse(callArgs.metadata.queryAnnotations);
|
|
77
|
+
expect(queryAnnotations[QUERY_ANNOTATIONS.A2A_CONTEXT_ID]).toBe('a2a-context-456');
|
|
78
|
+
});
|
|
79
|
+
it('should not include metadata when neither sessionId nor a2aContextId is provided', async () => {
|
|
80
|
+
const client = new ChatClient(mockArkApiClient);
|
|
81
|
+
mockCreateChatCompletion.mockResolvedValue({
|
|
82
|
+
id: 'test-id',
|
|
83
|
+
object: 'chat.completion',
|
|
84
|
+
created: 1234567890,
|
|
85
|
+
model: 'test-model',
|
|
86
|
+
choices: [
|
|
87
|
+
{
|
|
88
|
+
index: 0,
|
|
89
|
+
message: { role: 'assistant', content: 'Hello' },
|
|
90
|
+
finish_reason: 'stop',
|
|
91
|
+
},
|
|
92
|
+
],
|
|
93
|
+
usage: {
|
|
94
|
+
prompt_tokens: 10,
|
|
95
|
+
completion_tokens: 5,
|
|
96
|
+
total_tokens: 15,
|
|
97
|
+
},
|
|
98
|
+
});
|
|
99
|
+
await client.sendMessage('agent/test-agent', [{ role: 'user', content: 'Hello' }], { streamingEnabled: false });
|
|
100
|
+
expect(mockCreateChatCompletion).toHaveBeenCalledWith(expect.objectContaining({
|
|
101
|
+
model: 'agent/test-agent',
|
|
102
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
103
|
+
}));
|
|
104
|
+
const callArgs = mockCreateChatCompletion.mock.calls[0];
|
|
105
|
+
expect(callArgs[0].metadata).toBeUndefined();
|
|
106
|
+
});
|
|
107
|
+
});
|
|
108
|
+
});
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
// ARK annotation prefix - mirrors ark/internal/annotations/annotations.go
|
|
2
|
+
const ARK_PREFIX = 'ark.mckinsey.com/';
|
|
3
|
+
// Query annotation constants for metadata.queryAnnotations
|
|
4
|
+
// Note: sessionId is passed directly in metadata, not in queryAnnotations
|
|
5
|
+
export const QUERY_ANNOTATIONS = {
|
|
6
|
+
// A2A context ID annotation (goes to K8s annotations)
|
|
7
|
+
A2A_CONTEXT_ID: `${ARK_PREFIX}a2a-context-id`,
|
|
8
|
+
};
|
package/dist/lib/executeQuery.js
CHANGED
|
@@ -27,7 +27,9 @@ export async function executeQuery(options) {
|
|
|
27
27
|
let lastAgentName;
|
|
28
28
|
let headerShown = false;
|
|
29
29
|
let firstOutput = true;
|
|
30
|
-
|
|
30
|
+
// Get sessionId from option or environment variable
|
|
31
|
+
const sessionId = options.sessionId || process.env.ARK_SESSION_ID;
|
|
32
|
+
await chatClient.sendMessage(targetId, messages, { streamingEnabled: true, sessionId }, (chunk, toolCalls, arkMetadata) => {
|
|
31
33
|
if (firstOutput) {
|
|
32
34
|
spinner.stop();
|
|
33
35
|
firstOutput = false;
|
|
@@ -100,6 +102,9 @@ async function executeQueryWithFormat(options) {
|
|
|
100
102
|
spec: {
|
|
101
103
|
input: options.message,
|
|
102
104
|
...(options.timeout && { timeout: options.timeout }),
|
|
105
|
+
...((options.sessionId || process.env.ARK_SESSION_ID) && {
|
|
106
|
+
sessionId: options.sessionId || process.env.ARK_SESSION_ID,
|
|
107
|
+
}),
|
|
103
108
|
targets: [
|
|
104
109
|
{
|
|
105
110
|
type: options.targetType,
|
|
@@ -94,6 +94,18 @@ describe('executeQuery', () => {
|
|
|
94
94
|
expect(mockArkApiProxyInstance.start).toHaveBeenCalled();
|
|
95
95
|
expect(mockChatClient).toHaveBeenCalled();
|
|
96
96
|
expect(mockSendMessage).toHaveBeenCalledWith('model/default', [{ role: 'user', content: 'Hello' }], { streamingEnabled: true }, expect.any(Function));
|
|
97
|
+
});
|
|
98
|
+
it('should pass sessionId to sendMessage when provided', async () => {
|
|
99
|
+
mockSendMessage.mockImplementation(async (targetId, messages, options, callback) => {
|
|
100
|
+
callback('Hello', undefined, { agent: 'test-agent' });
|
|
101
|
+
});
|
|
102
|
+
await executeQuery({
|
|
103
|
+
targetType: 'model',
|
|
104
|
+
targetName: 'default',
|
|
105
|
+
message: 'Hello',
|
|
106
|
+
sessionId: 'test-session-123',
|
|
107
|
+
});
|
|
108
|
+
expect(mockSendMessage).toHaveBeenCalledWith('model/default', [{ role: 'user', content: 'Hello' }], { streamingEnabled: true, sessionId: 'test-session-123' }, expect.any(Function));
|
|
97
109
|
expect(mockSpinner.stop).toHaveBeenCalled();
|
|
98
110
|
expect(mockArkApiProxyInstance.stop).toHaveBeenCalled();
|
|
99
111
|
expect(mockStdoutWrite).toHaveBeenCalled();
|
|
@@ -193,6 +205,36 @@ describe('executeQuery', () => {
|
|
|
193
205
|
expect(mockExeca).toHaveBeenCalledWith('kubectl', expect.arrayContaining(['wait', '--for=condition=Completed']), expect.any(Object));
|
|
194
206
|
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringMatching(/cli-query-\d+/));
|
|
195
207
|
});
|
|
208
|
+
it('should include sessionId in query manifest when outputFormat is specified', async () => {
|
|
209
|
+
let appliedManifest = '';
|
|
210
|
+
mockExeca.mockImplementation(async (command, args) => {
|
|
211
|
+
if (args.includes('apply') && args.includes('-f') && args.includes('-')) {
|
|
212
|
+
// Capture the stdin input
|
|
213
|
+
const stdinIndex = args.indexOf('-');
|
|
214
|
+
if (stdinIndex >= 0 && args[stdinIndex + 1]) {
|
|
215
|
+
appliedManifest = args[stdinIndex + 1];
|
|
216
|
+
}
|
|
217
|
+
return { stdout: '', stderr: '', exitCode: 0 };
|
|
218
|
+
}
|
|
219
|
+
if (args.includes('wait')) {
|
|
220
|
+
return { stdout: '', stderr: '', exitCode: 0 };
|
|
221
|
+
}
|
|
222
|
+
return { stdout: '', stderr: '', exitCode: 0 };
|
|
223
|
+
});
|
|
224
|
+
await executeQuery({
|
|
225
|
+
targetType: 'model',
|
|
226
|
+
targetName: 'default',
|
|
227
|
+
message: 'Hello',
|
|
228
|
+
outputFormat: 'name',
|
|
229
|
+
sessionId: 'test-session-456',
|
|
230
|
+
});
|
|
231
|
+
// Check that the manifest includes sessionId in spec
|
|
232
|
+
const applyCall = mockExeca.mock.calls.find((call) => call[1]?.includes('apply'));
|
|
233
|
+
expect(applyCall).toBeDefined();
|
|
234
|
+
// The manifest should be passed via stdin, so we need to check the actual call
|
|
235
|
+
// Since execa handles stdin separately, we verify the call was made
|
|
236
|
+
expect(mockExeca).toHaveBeenCalledWith('kubectl', expect.arrayContaining(['apply', '-f', '-']), expect.any(Object));
|
|
237
|
+
});
|
|
196
238
|
it('should output json format', async () => {
|
|
197
239
|
const mockQuery = {
|
|
198
240
|
apiVersion: 'ark.mckinsey.com/v1alpha1',
|
package/dist/lib/types.d.ts
CHANGED
|
@@ -94,6 +94,25 @@ export interface QueryTarget {
|
|
|
94
94
|
}
|
|
95
95
|
export interface QueryResponse {
|
|
96
96
|
content?: string;
|
|
97
|
+
a2a?: {
|
|
98
|
+
contextId?: string;
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
export interface QueryStatus {
|
|
102
|
+
phase?: 'initializing' | 'running' | 'done' | 'error' | 'canceled';
|
|
103
|
+
conditions?: K8sCondition[];
|
|
104
|
+
responses?: QueryResponse[];
|
|
105
|
+
message?: string;
|
|
106
|
+
error?: string;
|
|
107
|
+
tokenUsage?: {
|
|
108
|
+
promptTokens?: number;
|
|
109
|
+
completionTokens?: number;
|
|
110
|
+
totalTokens?: number;
|
|
111
|
+
};
|
|
112
|
+
a2a?: {
|
|
113
|
+
contextId?: string;
|
|
114
|
+
taskId?: string;
|
|
115
|
+
};
|
|
97
116
|
}
|
|
98
117
|
export interface Query {
|
|
99
118
|
apiVersion: string;
|
|
@@ -102,14 +121,10 @@ export interface Query {
|
|
|
102
121
|
spec?: {
|
|
103
122
|
input: string;
|
|
104
123
|
targets: QueryTarget[];
|
|
124
|
+
sessionId?: string;
|
|
125
|
+
timeout?: string;
|
|
105
126
|
};
|
|
106
|
-
status?:
|
|
107
|
-
phase?: 'initializing' | 'running' | 'done' | 'error' | 'canceled';
|
|
108
|
-
conditions?: K8sCondition[];
|
|
109
|
-
responses?: QueryResponse[];
|
|
110
|
-
message?: string;
|
|
111
|
-
error?: string;
|
|
112
|
-
};
|
|
127
|
+
status?: QueryStatus;
|
|
113
128
|
}
|
|
114
129
|
export interface Tool {
|
|
115
130
|
metadata: K8sMetadata;
|
package/package.json
CHANGED
|
@@ -7,7 +7,7 @@ stringData:
|
|
|
7
7
|
# IMPORTANT: Environment variables must be substituted before applying
|
|
8
8
|
# Use one of these methods:
|
|
9
9
|
# 1. make models-apply (recommended)
|
|
10
|
-
# 2.
|
|
10
|
+
# 2. devspace dev (full deployment)
|
|
11
11
|
# 3. make fix-models (if you have broken queries)
|
|
12
12
|
# Manual: export AZURE_API_KEY="key" && envsubst < models/{{ .Values.modelName }}.yaml | kubectl apply -f -
|
|
13
13
|
token: ${AZURE_API_KEY}
|
|
@@ -29,7 +29,7 @@ DOCKER_BUILD_DIR := $(OUT_DIR)/docker
|
|
|
29
29
|
.PHONY: help
|
|
30
30
|
help: ## Show this help message
|
|
31
31
|
@echo "š Quick Commands:"
|
|
32
|
-
@echo "
|
|
32
|
+
@echo " devspace dev # Deploy ARK to cluster"
|
|
33
33
|
@echo " make resources-apply # Apply all custom resources in correct order"
|
|
34
34
|
@echo " make fix-models # Fix broken models (if you have query errors)"
|
|
35
35
|
@echo ""
|
|
@@ -12,7 +12,7 @@ cd {{ .Values.projectName }}
|
|
|
12
12
|
source .env # Edit this file first with your API keys
|
|
13
13
|
|
|
14
14
|
# 3. Deploy to your ARK cluster
|
|
15
|
-
|
|
15
|
+
devspace dev
|
|
16
16
|
|
|
17
17
|
# 4. Check your deployment
|
|
18
18
|
kubectl get agents,teams,queries --namespace {{ .Values.namespace }}
|
|
@@ -104,5 +104,5 @@ echo -e "${BLUE}Next steps:${NC}"
|
|
|
104
104
|
echo " make install # Deploy to Kubernetes"
|
|
105
105
|
echo " make status # Check deployment"
|
|
106
106
|
echo ""
|
|
107
|
-
echo "Or use
|
|
108
|
-
echo "
|
|
107
|
+
echo "Or use ARK CLI for deployment:"
|
|
108
|
+
echo " devspace dev # Local development"
|