@agentuity/core 2.0.0-beta.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/deprecation.d.ts +20 -0
- package/dist/deprecation.d.ts.map +1 -0
- package/dist/deprecation.js +102 -0
- package/dist/deprecation.js.map +1 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -0
- package/dist/index.js.map +1 -1
- package/dist/services/api.d.ts.map +1 -1
- package/dist/services/api.js +13 -5
- package/dist/services/api.js.map +1 -1
- package/dist/services/oauth/flow.d.ts +82 -0
- package/dist/services/oauth/flow.d.ts.map +1 -0
- package/dist/services/oauth/flow.js +308 -0
- package/dist/services/oauth/flow.js.map +1 -0
- package/dist/services/oauth/index.d.ts +2 -0
- package/dist/services/oauth/index.d.ts.map +1 -1
- package/dist/services/oauth/index.js +2 -0
- package/dist/services/oauth/index.js.map +1 -1
- package/dist/services/oauth/token-storage.d.ts +109 -0
- package/dist/services/oauth/token-storage.d.ts.map +1 -0
- package/dist/services/oauth/token-storage.js +140 -0
- package/dist/services/oauth/token-storage.js.map +1 -0
- package/dist/services/oauth/types.d.ts +63 -0
- package/dist/services/oauth/types.d.ts.map +1 -1
- package/dist/services/oauth/types.js +74 -0
- package/dist/services/oauth/types.js.map +1 -1
- package/dist/services/project/get.d.ts +12 -0
- package/dist/services/project/get.d.ts.map +1 -1
- package/dist/services/project/get.js +9 -0
- package/dist/services/project/get.js.map +1 -1
- package/dist/services/sandbox/client.d.ts +201 -1
- package/dist/services/sandbox/client.d.ts.map +1 -1
- package/dist/services/sandbox/client.js +276 -15
- package/dist/services/sandbox/client.js.map +1 -1
- package/dist/services/sandbox/create.d.ts +5 -0
- package/dist/services/sandbox/create.d.ts.map +1 -1
- package/dist/services/sandbox/create.js +11 -0
- package/dist/services/sandbox/create.js.map +1 -1
- package/dist/services/sandbox/execute.d.ts.map +1 -1
- package/dist/services/sandbox/execute.js +22 -11
- package/dist/services/sandbox/execute.js.map +1 -1
- package/dist/services/sandbox/execution.d.ts +1 -0
- package/dist/services/sandbox/execution.d.ts.map +1 -1
- package/dist/services/sandbox/execution.js +4 -2
- package/dist/services/sandbox/execution.js.map +1 -1
- package/dist/services/sandbox/files.js +1 -1
- package/dist/services/sandbox/files.js.map +1 -1
- package/dist/services/sandbox/index.d.ts +3 -1
- package/dist/services/sandbox/index.d.ts.map +1 -1
- package/dist/services/sandbox/index.js +1 -0
- package/dist/services/sandbox/index.js.map +1 -1
- package/dist/services/sandbox/job.d.ts +227 -0
- package/dist/services/sandbox/job.d.ts.map +1 -0
- package/dist/services/sandbox/job.js +109 -0
- package/dist/services/sandbox/job.js.map +1 -0
- package/dist/services/sandbox/run.d.ts +1 -0
- package/dist/services/sandbox/run.d.ts.map +1 -1
- package/dist/services/sandbox/run.js +83 -30
- package/dist/services/sandbox/run.js.map +1 -1
- package/dist/services/sandbox/types.d.ts +45 -0
- package/dist/services/sandbox/types.d.ts.map +1 -1
- package/dist/services/sandbox/types.js +42 -0
- package/dist/services/sandbox/types.js.map +1 -1
- package/dist/services/sandbox/util.d.ts +1 -0
- package/dist/services/sandbox/util.d.ts.map +1 -1
- package/dist/services/sandbox/util.js +1 -0
- package/dist/services/sandbox/util.js.map +1 -1
- package/dist/services/schedule/service.d.ts +5 -0
- package/dist/services/schedule/service.d.ts.map +1 -1
- package/dist/services/schedule/service.js +16 -0
- package/dist/services/schedule/service.js.map +1 -1
- package/dist/services/schedule/types.d.ts +1 -0
- package/dist/services/schedule/types.d.ts.map +1 -1
- package/package.json +2 -2
- package/src/deprecation.ts +120 -0
- package/src/index.ts +3 -0
- package/src/services/api.ts +15 -5
- package/src/services/oauth/flow.ts +356 -0
- package/src/services/oauth/index.ts +2 -0
- package/src/services/oauth/token-storage.ts +220 -0
- package/src/services/oauth/types.ts +95 -0
- package/src/services/project/get.ts +9 -0
- package/src/services/sandbox/client.ts +446 -16
- package/src/services/sandbox/create.ts +13 -0
- package/src/services/sandbox/execute.ts +26 -12
- package/src/services/sandbox/execution.ts +5 -2
- package/src/services/sandbox/files.ts +1 -1
- package/src/services/sandbox/index.ts +20 -0
- package/src/services/sandbox/job.ts +161 -0
- package/src/services/sandbox/run.ts +129 -34
- package/src/services/sandbox/types.ts +50 -0
- package/src/services/sandbox/util.ts +1 -0
- package/src/services/schedule/service.ts +20 -0
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import { type APIClient, APIResponseSchema } from '../api.ts';
|
|
2
|
+
import { CreateJobOptionsSchema, JobSchema, type Job } from './types.ts';
|
|
3
|
+
import { throwSandboxError } from './util.ts';
|
|
4
|
+
import { z } from 'zod';
|
|
5
|
+
|
|
6
|
+
export const CreateJobRequestSchema = CreateJobOptionsSchema;
|
|
7
|
+
|
|
8
|
+
export const CreateJobDataSchema = JobSchema;
|
|
9
|
+
|
|
10
|
+
export const CreateJobResponseSchema = APIResponseSchema(CreateJobDataSchema);
|
|
11
|
+
|
|
12
|
+
export const JobCreateParamsSchema = z.object({
|
|
13
|
+
sandboxId: z.string().describe('Sandbox ID where the job should run'),
|
|
14
|
+
options: CreateJobOptionsSchema.describe('Job creation options'),
|
|
15
|
+
orgId: z.string().optional().describe('Optional org id for CLI auth context'),
|
|
16
|
+
signal: z.custom<AbortSignal>().optional().describe('Optional abort signal for cancellation'),
|
|
17
|
+
});
|
|
18
|
+
export type JobCreateParams = z.infer<typeof JobCreateParamsSchema>;
|
|
19
|
+
|
|
20
|
+
export async function jobCreate(client: APIClient, params: JobCreateParams): Promise<Job> {
|
|
21
|
+
const { sandboxId, options, orgId, signal } = params;
|
|
22
|
+
const body: z.infer<typeof CreateJobRequestSchema> = {
|
|
23
|
+
command: options.command,
|
|
24
|
+
};
|
|
25
|
+
if (options.streams) {
|
|
26
|
+
body.streams = options.streams;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const queryParams = new URLSearchParams();
|
|
30
|
+
if (orgId) {
|
|
31
|
+
queryParams.set('orgId', orgId);
|
|
32
|
+
}
|
|
33
|
+
const queryString = queryParams.toString();
|
|
34
|
+
const url = `/sandbox/sandboxes/${sandboxId}/jobs${queryString ? `?${queryString}` : ''}`;
|
|
35
|
+
|
|
36
|
+
const resp = await client.post<z.infer<typeof CreateJobResponseSchema>>(
|
|
37
|
+
url,
|
|
38
|
+
body,
|
|
39
|
+
CreateJobResponseSchema,
|
|
40
|
+
CreateJobRequestSchema,
|
|
41
|
+
signal
|
|
42
|
+
);
|
|
43
|
+
|
|
44
|
+
if (resp.success) {
|
|
45
|
+
return resp.data;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
throwSandboxError(resp, { sandboxId });
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
export const JobGetDataSchema = JobSchema;
|
|
52
|
+
|
|
53
|
+
export const JobGetResponseSchema = APIResponseSchema(JobGetDataSchema);
|
|
54
|
+
|
|
55
|
+
export const JobGetParamsSchema = z.object({
|
|
56
|
+
sandboxId: z.string().describe('Sandbox ID'),
|
|
57
|
+
jobId: z.string().describe('Job ID'),
|
|
58
|
+
orgId: z.string().optional().describe('Organization ID'),
|
|
59
|
+
signal: z.custom<AbortSignal>().optional().describe('Abort signal for cancellation'),
|
|
60
|
+
});
|
|
61
|
+
export type JobGetParams = z.infer<typeof JobGetParamsSchema>;
|
|
62
|
+
|
|
63
|
+
export async function jobGet(client: APIClient, params: JobGetParams): Promise<Job> {
|
|
64
|
+
const { sandboxId, jobId, orgId, signal } = params;
|
|
65
|
+
const queryParams = new URLSearchParams();
|
|
66
|
+
if (orgId) {
|
|
67
|
+
queryParams.set('orgId', orgId);
|
|
68
|
+
}
|
|
69
|
+
const queryString = queryParams.toString();
|
|
70
|
+
const url = `/sandbox/sandboxes/${sandboxId}/jobs/${jobId}${queryString ? `?${queryString}` : ''}`;
|
|
71
|
+
|
|
72
|
+
const resp = await client.get<z.infer<typeof JobGetResponseSchema>>(
|
|
73
|
+
url,
|
|
74
|
+
JobGetResponseSchema,
|
|
75
|
+
signal
|
|
76
|
+
);
|
|
77
|
+
|
|
78
|
+
if (resp.success) {
|
|
79
|
+
return resp.data;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
throwSandboxError(resp, { sandboxId, jobId: params.jobId });
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
export const JobListDataSchema = z.object({
|
|
86
|
+
jobs: z.array(JobSchema).describe('List of jobs'),
|
|
87
|
+
});
|
|
88
|
+
export type JobListResponse = z.infer<typeof JobListDataSchema>;
|
|
89
|
+
|
|
90
|
+
export const JobListResponseSchema = APIResponseSchema(JobListDataSchema);
|
|
91
|
+
|
|
92
|
+
export const JobListParamsSchema = z.object({
|
|
93
|
+
sandboxId: z.string().describe('Sandbox ID'),
|
|
94
|
+
orgId: z.string().optional().describe('Organization ID'),
|
|
95
|
+
limit: z.number().optional().describe('Maximum number of results'),
|
|
96
|
+
signal: z.custom<AbortSignal>().optional().describe('Abort signal for cancellation'),
|
|
97
|
+
});
|
|
98
|
+
export type JobListParams = z.infer<typeof JobListParamsSchema>;
|
|
99
|
+
|
|
100
|
+
export async function jobList(client: APIClient, params: JobListParams): Promise<JobListResponse> {
|
|
101
|
+
const { sandboxId, orgId, limit, signal } = params;
|
|
102
|
+
const queryParams = new URLSearchParams();
|
|
103
|
+
if (orgId) {
|
|
104
|
+
queryParams.set('orgId', orgId);
|
|
105
|
+
}
|
|
106
|
+
if (limit !== undefined) {
|
|
107
|
+
queryParams.set('limit', String(limit));
|
|
108
|
+
}
|
|
109
|
+
const queryString = queryParams.toString();
|
|
110
|
+
const url = `/sandbox/sandboxes/${sandboxId}/jobs${queryString ? `?${queryString}` : ''}`;
|
|
111
|
+
|
|
112
|
+
const resp = await client.get<z.infer<typeof JobListResponseSchema>>(
|
|
113
|
+
url,
|
|
114
|
+
JobListResponseSchema,
|
|
115
|
+
signal
|
|
116
|
+
);
|
|
117
|
+
|
|
118
|
+
if (resp.success) {
|
|
119
|
+
return resp.data;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
throwSandboxError(resp, { sandboxId });
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
export const JobStopDataSchema = JobSchema;
|
|
126
|
+
|
|
127
|
+
export const JobStopResponseSchema = APIResponseSchema(JobStopDataSchema);
|
|
128
|
+
|
|
129
|
+
export const JobStopParamsSchema = z.object({
|
|
130
|
+
sandboxId: z.string().describe('Sandbox ID'),
|
|
131
|
+
jobId: z.string().describe('Job ID'),
|
|
132
|
+
force: z.boolean().optional().describe('Force termination (SIGKILL)'),
|
|
133
|
+
orgId: z.string().optional().describe('Organization ID'),
|
|
134
|
+
signal: z.custom<AbortSignal>().optional().describe('Abort signal for cancellation'),
|
|
135
|
+
});
|
|
136
|
+
export type JobStopParams = z.infer<typeof JobStopParamsSchema>;
|
|
137
|
+
|
|
138
|
+
export async function jobStop(client: APIClient, params: JobStopParams): Promise<Job> {
|
|
139
|
+
const { sandboxId, jobId, force, orgId, signal } = params;
|
|
140
|
+
const queryParams = new URLSearchParams();
|
|
141
|
+
if (orgId) {
|
|
142
|
+
queryParams.set('orgId', orgId);
|
|
143
|
+
}
|
|
144
|
+
if (force) {
|
|
145
|
+
queryParams.set('force', 'true');
|
|
146
|
+
}
|
|
147
|
+
const queryString = queryParams.toString();
|
|
148
|
+
const url = `/sandbox/sandboxes/${sandboxId}/jobs/${jobId}${queryString ? `?${queryString}` : ''}`;
|
|
149
|
+
|
|
150
|
+
const resp = await client.delete<z.infer<typeof JobStopResponseSchema>>(
|
|
151
|
+
url,
|
|
152
|
+
JobStopResponseSchema,
|
|
153
|
+
signal
|
|
154
|
+
);
|
|
155
|
+
|
|
156
|
+
if (resp.success) {
|
|
157
|
+
return resp.data;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
throwSandboxError(resp, { sandboxId, jobId: params.jobId });
|
|
161
|
+
}
|
|
@@ -224,20 +224,103 @@ export async function sandboxRun(
|
|
|
224
224
|
logger?.debug('streams completed, fetching final status');
|
|
225
225
|
|
|
226
226
|
// Stream EOF means the sandbox is done — hadron only closes streams after the
|
|
227
|
-
// container exits.
|
|
228
|
-
//
|
|
227
|
+
// container exits. Poll for the exit code with retries because the lifecycle
|
|
228
|
+
// event (carrying the exit code) may still be in flight to Catalyst when the
|
|
229
|
+
// stream completes.
|
|
230
|
+
//
|
|
231
|
+
// Hadron drains container logs for up to 5s after exit, then closes the
|
|
232
|
+
// stream, then sends the lifecycle event in a goroutine. So the exit code
|
|
233
|
+
// typically arrives at Catalyst 5–7s after the container exits. We use a
|
|
234
|
+
// linear 1s polling interval (not exponential backoff) so we don't overshoot
|
|
235
|
+
// the window — 15 attempts × 1s = 15s total, which comfortably covers the
|
|
236
|
+
// drain + lifecycle propagation delay.
|
|
237
|
+
// Abort-aware sleep that rejects when the caller's signal fires.
|
|
238
|
+
const abortAwareSleep = (ms: number): Promise<void> =>
|
|
239
|
+
new Promise((resolve, reject) => {
|
|
240
|
+
if (signal?.aborted) {
|
|
241
|
+
reject(new DOMException('Aborted', 'AbortError'));
|
|
242
|
+
return;
|
|
243
|
+
}
|
|
244
|
+
const timer = setTimeout(resolve, ms);
|
|
245
|
+
signal?.addEventListener(
|
|
246
|
+
'abort',
|
|
247
|
+
() => {
|
|
248
|
+
clearTimeout(timer);
|
|
249
|
+
reject(new DOMException('Aborted', 'AbortError'));
|
|
250
|
+
},
|
|
251
|
+
{ once: true }
|
|
252
|
+
);
|
|
253
|
+
});
|
|
254
|
+
|
|
229
255
|
let exitCode = 0;
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
256
|
+
const maxStatusRetries = 15;
|
|
257
|
+
const statusPollInterval = 1000;
|
|
258
|
+
const statusPollStart = Date.now();
|
|
259
|
+
for (let attempt = 0; attempt < maxStatusRetries; attempt++) {
|
|
260
|
+
if (signal?.aborted) {
|
|
261
|
+
break;
|
|
262
|
+
}
|
|
263
|
+
try {
|
|
264
|
+
const sandboxStatus = await sandboxGetStatus(client, { sandboxId, orgId });
|
|
265
|
+
if (sandboxStatus.exitCode != null) {
|
|
266
|
+
exitCode = sandboxStatus.exitCode;
|
|
267
|
+
logger?.debug(
|
|
268
|
+
'[run] exit code %d found on attempt %d/%d (+%dms)',
|
|
269
|
+
exitCode,
|
|
270
|
+
attempt + 1,
|
|
271
|
+
maxStatusRetries,
|
|
272
|
+
Date.now() - statusPollStart
|
|
273
|
+
);
|
|
274
|
+
break;
|
|
275
|
+
} else if (sandboxStatus.status === 'failed') {
|
|
276
|
+
exitCode = 1;
|
|
277
|
+
logger?.debug(
|
|
278
|
+
'[run] sandbox failed on attempt %d/%d (+%dms)',
|
|
279
|
+
attempt + 1,
|
|
280
|
+
maxStatusRetries,
|
|
281
|
+
Date.now() - statusPollStart
|
|
282
|
+
);
|
|
283
|
+
break;
|
|
284
|
+
} else if (sandboxStatus.status === 'terminated') {
|
|
285
|
+
// Sandbox was destroyed. If exit code is missing, the
|
|
286
|
+
// terminated event may have overwritten it. Stop polling —
|
|
287
|
+
// no further updates will come.
|
|
288
|
+
logger?.debug(
|
|
289
|
+
'[run] sandbox terminated without exit code on attempt %d/%d (+%dms)',
|
|
290
|
+
attempt + 1,
|
|
291
|
+
maxStatusRetries,
|
|
292
|
+
Date.now() - statusPollStart
|
|
293
|
+
);
|
|
294
|
+
break;
|
|
295
|
+
}
|
|
296
|
+
// Exit code not yet propagated — wait before next poll.
|
|
297
|
+
if (attempt < maxStatusRetries - 1) {
|
|
298
|
+
await abortAwareSleep(statusPollInterval);
|
|
299
|
+
}
|
|
300
|
+
} catch (err) {
|
|
301
|
+
if (err instanceof DOMException && err.name === 'AbortError') {
|
|
302
|
+
break;
|
|
303
|
+
}
|
|
304
|
+
// Transient failure (sandbox briefly unavailable, network error).
|
|
305
|
+
// Retry instead of giving up — the lifecycle event may still arrive.
|
|
306
|
+
logger?.debug(
|
|
307
|
+
'[run] sandboxGetStatus attempt %d/%d failed (+%dms): %s',
|
|
308
|
+
attempt + 1,
|
|
309
|
+
maxStatusRetries,
|
|
310
|
+
Date.now() - statusPollStart,
|
|
311
|
+
err
|
|
312
|
+
);
|
|
313
|
+
if (attempt < maxStatusRetries - 1) {
|
|
314
|
+
await abortAwareSleep(statusPollInterval);
|
|
315
|
+
}
|
|
236
316
|
}
|
|
237
|
-
}
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
317
|
+
}
|
|
318
|
+
if (exitCode === 0) {
|
|
319
|
+
logger?.debug(
|
|
320
|
+
'[run] exit code polling finished with default 0 after %d attempts (+%dms)',
|
|
321
|
+
maxStatusRetries,
|
|
322
|
+
Date.now() - statusPollStart
|
|
323
|
+
);
|
|
241
324
|
}
|
|
242
325
|
|
|
243
326
|
if (timingLogsEnabled)
|
|
@@ -387,44 +470,56 @@ async function streamUrlToWritable(
|
|
|
387
470
|
writable: Writable,
|
|
388
471
|
signal: AbortSignal,
|
|
389
472
|
logger?: Logger,
|
|
390
|
-
|
|
473
|
+
_started?: number
|
|
391
474
|
): Promise<void> {
|
|
475
|
+
const streamStart = Date.now();
|
|
392
476
|
try {
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
477
|
+
// Signal to Pulse that this is a v2 stream so it waits for v2 metadata
|
|
478
|
+
// instead of falling back to the legacy download path on a short timeout.
|
|
479
|
+
const v2Url = new URL(url);
|
|
480
|
+
v2Url.searchParams.set('v', '2');
|
|
481
|
+
logger?.debug('[stream] fetching: %s', v2Url.href);
|
|
482
|
+
const response = await fetch(v2Url.href, { signal });
|
|
483
|
+
logger?.debug(
|
|
484
|
+
'[stream] response status=%d in %dms',
|
|
485
|
+
response.status,
|
|
486
|
+
Date.now() - streamStart
|
|
487
|
+
);
|
|
400
488
|
|
|
401
489
|
if (!response.ok || !response.body) {
|
|
402
|
-
logger?.debug('stream
|
|
490
|
+
logger?.debug('[stream] not ok or no body (status=%d) — returning empty', response.status);
|
|
403
491
|
return;
|
|
404
492
|
}
|
|
405
493
|
|
|
406
494
|
const reader = response.body.getReader();
|
|
407
|
-
let
|
|
495
|
+
let chunks = 0;
|
|
496
|
+
let totalBytes = 0;
|
|
408
497
|
|
|
409
498
|
// Read until EOF - Pulse will block until data is available
|
|
410
499
|
while (true) {
|
|
411
500
|
const { done, value } = await reader.read();
|
|
412
501
|
if (done) {
|
|
413
|
-
logger?.debug(
|
|
414
|
-
|
|
415
|
-
|
|
502
|
+
logger?.debug(
|
|
503
|
+
'[stream] EOF after %dms (%d chunks, %d bytes)',
|
|
504
|
+
Date.now() - streamStart,
|
|
505
|
+
chunks,
|
|
506
|
+
totalBytes
|
|
507
|
+
);
|
|
416
508
|
break;
|
|
417
509
|
}
|
|
418
510
|
|
|
419
511
|
if (value) {
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
)
|
|
425
|
-
|
|
512
|
+
chunks++;
|
|
513
|
+
totalBytes += value.length;
|
|
514
|
+
if (chunks <= 3 || chunks % 100 === 0) {
|
|
515
|
+
logger?.debug(
|
|
516
|
+
'[stream] chunk #%d: %d bytes (total: %d bytes, +%dms)',
|
|
517
|
+
chunks,
|
|
518
|
+
value.length,
|
|
519
|
+
totalBytes,
|
|
520
|
+
Date.now() - streamStart
|
|
521
|
+
);
|
|
426
522
|
}
|
|
427
|
-
logger?.debug('stream chunk: %d bytes', value.length);
|
|
428
523
|
await writeAndDrain(writable, value);
|
|
429
524
|
}
|
|
430
525
|
}
|
|
@@ -433,9 +528,9 @@ async function streamUrlToWritable(
|
|
|
433
528
|
writable.end();
|
|
434
529
|
} catch (err) {
|
|
435
530
|
if (err instanceof Error && err.name === 'AbortError') {
|
|
436
|
-
logger?.debug('stream aborted');
|
|
531
|
+
logger?.debug('[stream] aborted after %dms', Date.now() - streamStart);
|
|
437
532
|
return;
|
|
438
533
|
}
|
|
439
|
-
logger?.debug('stream error: %s', err);
|
|
534
|
+
logger?.debug('[stream] error after %dms: %s', Date.now() - streamStart, err);
|
|
440
535
|
}
|
|
441
536
|
}
|
|
@@ -167,6 +167,35 @@ export const ExecutionStatusSchema = z.enum([
|
|
|
167
167
|
]);
|
|
168
168
|
export type ExecutionStatus = z.infer<typeof ExecutionStatusSchema>;
|
|
169
169
|
|
|
170
|
+
export const JobStatusSchema = z.enum(['pending', 'running', 'completed', 'failed', 'cancelled']);
|
|
171
|
+
export type JobStatus = z.infer<typeof JobStatusSchema>;
|
|
172
|
+
|
|
173
|
+
export const JobSchema = z.object({
|
|
174
|
+
jobId: z.string().describe('Unique identifier for the job'),
|
|
175
|
+
sandboxId: z.string().describe('ID of the sandbox where the job is running'),
|
|
176
|
+
command: z.array(z.string()).describe('Command and arguments being executed'),
|
|
177
|
+
status: JobStatusSchema.describe('Current status of the job'),
|
|
178
|
+
exitCode: z.number().nullish().describe('Exit code of the job (set when completed)'),
|
|
179
|
+
startedAt: z.string().nullish().describe('ISO timestamp when the job started'),
|
|
180
|
+
completedAt: z.string().nullish().describe('ISO timestamp when the job completed'),
|
|
181
|
+
error: z.string().nullish().describe('Error message if the job failed'),
|
|
182
|
+
stdoutStreamUrl: z.string().nullish().describe('URL to stream stdout output'),
|
|
183
|
+
stderrStreamUrl: z.string().nullish().describe('URL to stream stderr output'),
|
|
184
|
+
});
|
|
185
|
+
export type Job = z.infer<typeof JobSchema>;
|
|
186
|
+
|
|
187
|
+
export const CreateJobOptionsSchema = z.object({
|
|
188
|
+
command: z.array(z.string()).describe('Command and arguments to execute'),
|
|
189
|
+
streams: z
|
|
190
|
+
.object({
|
|
191
|
+
stdout: z.string().optional().describe('Stream ID for stdout output'),
|
|
192
|
+
stderr: z.string().optional().describe('Stream ID for stderr output'),
|
|
193
|
+
})
|
|
194
|
+
.optional()
|
|
195
|
+
.describe('Stream configuration for output redirection'),
|
|
196
|
+
});
|
|
197
|
+
export type CreateJobOptions = z.infer<typeof CreateJobOptionsSchema>;
|
|
198
|
+
|
|
170
199
|
/** Read-only stream interface for consuming streams without write access */
|
|
171
200
|
export const StreamReaderSchema = z.object({
|
|
172
201
|
/** Unique stream identifier */
|
|
@@ -299,6 +328,13 @@ export const SandboxCreateOptionsSchema = z.object({
|
|
|
299
328
|
.record(z.string(), z.unknown())
|
|
300
329
|
.optional()
|
|
301
330
|
.describe('Optional user-defined metadata to associate with the sandbox.'),
|
|
331
|
+
/** Permission scopes for automatic service access (e.g., "services:read", "services:write"). */
|
|
332
|
+
scopes: z
|
|
333
|
+
.array(z.string())
|
|
334
|
+
.optional()
|
|
335
|
+
.describe(
|
|
336
|
+
'Permission scopes for automatic service access (e.g., "services:read", "services:write").'
|
|
337
|
+
),
|
|
302
338
|
});
|
|
303
339
|
export type SandboxCreateOptions = z.infer<typeof SandboxCreateOptionsSchema>;
|
|
304
340
|
|
|
@@ -369,6 +405,20 @@ export const SandboxSchema = z.object({
|
|
|
369
405
|
.describe('Resume the sandbox from a paused or evacuated state.'),
|
|
370
406
|
/** Destroy the sandbox */
|
|
371
407
|
destroy: z.custom<() => Promise<void>>().describe('Destroy the sandbox'),
|
|
408
|
+
/** Create a new job in the sandbox */
|
|
409
|
+
createJob: z
|
|
410
|
+
.custom<(options: CreateJobOptions) => Promise<Job>>()
|
|
411
|
+
.describe('Create a new job in the sandbox'),
|
|
412
|
+
/** Get a job by ID */
|
|
413
|
+
getJob: z.custom<(jobId: string) => Promise<Job>>().describe('Get a job by ID'),
|
|
414
|
+
/** List jobs in the sandbox */
|
|
415
|
+
listJobs: z
|
|
416
|
+
.custom<(limit?: number) => Promise<{ jobs: Job[] }>>()
|
|
417
|
+
.describe('List jobs in the sandbox'),
|
|
418
|
+
/** Stop a running job */
|
|
419
|
+
stopJob: z
|
|
420
|
+
.custom<(jobId: string, force?: boolean) => Promise<Job>>()
|
|
421
|
+
.describe('Stop a running job'),
|
|
372
422
|
});
|
|
373
423
|
export type Sandbox = z.infer<typeof SandboxSchema>;
|
|
374
424
|
|
|
@@ -176,6 +176,7 @@ export const SnapshotNotFoundError = StructuredError('SnapshotNotFoundError')<{
|
|
|
176
176
|
export const SandboxErrorContextSchema = z.object({
|
|
177
177
|
sandboxId: z.string().optional().describe('sandbox id'),
|
|
178
178
|
executionId: z.string().optional().describe('execution id'),
|
|
179
|
+
jobId: z.string().optional().describe('job id'),
|
|
179
180
|
sessionId: z.string().nullish().describe('session id'),
|
|
180
181
|
snapshotId: z.string().optional().describe('snapshot id'),
|
|
181
182
|
});
|
|
@@ -58,6 +58,18 @@ export const ScheduleSchema = z.object({
|
|
|
58
58
|
* the schedule fires or the expression is changed.
|
|
59
59
|
*/
|
|
60
60
|
due_date: z.string().describe('ISO 8601 timestamp of the next scheduled execution.'),
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Whether this is a system-managed schedule.
|
|
64
|
+
*
|
|
65
|
+
* @remarks Internal schedules are created by the system and cannot be modified
|
|
66
|
+
* or deleted by users.
|
|
67
|
+
*/
|
|
68
|
+
internal: z
|
|
69
|
+
.boolean()
|
|
70
|
+
.describe(
|
|
71
|
+
'Whether this is a system-managed schedule. Internal schedules cannot be modified or deleted users.'
|
|
72
|
+
),
|
|
61
73
|
});
|
|
62
74
|
|
|
63
75
|
export type Schedule = z.infer<typeof ScheduleSchema>;
|
|
@@ -204,6 +216,14 @@ export const CreateScheduleParamsSchema = z.object({
|
|
|
204
216
|
*/
|
|
205
217
|
expression: z.string().describe('Cron expression defining when the schedule fires'),
|
|
206
218
|
|
|
219
|
+
/**
|
|
220
|
+
* Whether this is a system-managed schedule.
|
|
221
|
+
*
|
|
222
|
+
* @remarks Internal schedules are created by the system for workflows and cannot
|
|
223
|
+
* be modified or deleted directly by users.
|
|
224
|
+
*/
|
|
225
|
+
internal: z.boolean().optional().describe('Whether this is a system-managed schedule.'),
|
|
226
|
+
|
|
207
227
|
/**
|
|
208
228
|
* Optional array of destinations to create alongside the schedule.
|
|
209
229
|
*
|