dtu-github-actions 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,229 @@
1
+ import { describe, it, expect, beforeEach, beforeAll, afterAll } from "vitest";
2
+ import { state } from "../store.js";
3
+ import { bootstrapAndReturnApp } from "../index.js";
4
+ import { config } from "../../config.js";
5
+ import fs from "node:fs";
6
+ import path from "node:path";
7
+ const CACHE_DIR = config.DTU_CACHE_DIR;
8
+ let PORT;
9
+ /** Helper: run the full reserve → upload → commit cycle and return the cacheId. */
10
+ async function createCache(baseUrl, key, version, content) {
11
+ // Reserve
12
+ let res = await fetch(`${baseUrl}/_apis/artifactcache/caches`, {
13
+ method: "POST",
14
+ headers: { "Content-Type": "application/json" },
15
+ body: JSON.stringify({ key, version }),
16
+ });
17
+ expect(res.status).toBe(201);
18
+ const { cacheId } = await res.json();
19
+ // Upload
20
+ res = await fetch(`${baseUrl}/_apis/artifactcache/caches/${cacheId}`, {
21
+ method: "PATCH",
22
+ headers: {
23
+ "Content-Type": "application/octet-stream",
24
+ "Content-Range": `bytes 0-${content.length - 1}/*`,
25
+ },
26
+ body: content,
27
+ });
28
+ expect(res.status).toBe(200);
29
+ // Commit
30
+ res = await fetch(`${baseUrl}/_apis/artifactcache/caches/${cacheId}`, {
31
+ method: "POST",
32
+ headers: { "Content-Type": "application/json" },
33
+ body: JSON.stringify({ size: content.length }),
34
+ });
35
+ expect(res.status).toBe(200);
36
+ return cacheId;
37
+ }
38
+ describe("DTU Cache API", () => {
39
+ let server;
40
+ beforeAll(async () => {
41
+ state.reset();
42
+ const app = await bootstrapAndReturnApp();
43
+ return new Promise((resolve) => {
44
+ server = app.listen(0, () => {
45
+ const address = server.server?.address();
46
+ PORT = address.port;
47
+ resolve();
48
+ });
49
+ });
50
+ });
51
+ beforeEach(() => {
52
+ state.reset();
53
+ // Clean up any test cache files
54
+ if (fs.existsSync(CACHE_DIR)) {
55
+ for (const file of fs.readdirSync(CACHE_DIR)) {
56
+ if (file.startsWith("cache_") || file.startsWith("temp_")) {
57
+ try {
58
+ fs.unlinkSync(path.join(CACHE_DIR, file));
59
+ }
60
+ catch { }
61
+ }
62
+ }
63
+ }
64
+ });
65
+ afterAll(async () => {
66
+ await new Promise((resolve) => {
67
+ if (server && server.server) {
68
+ server.server.close(() => resolve());
69
+ }
70
+ else {
71
+ resolve();
72
+ }
73
+ });
74
+ });
75
+ it("should handle full cache lifecycle", async () => {
76
+ const baseUrl = `http://localhost:${PORT}`;
77
+ // 1. Check miss
78
+ let res = await fetch(`${baseUrl}/_apis/artifactcache/caches?keys=my-key&version=1`);
79
+ expect(res.status).toBe(204);
80
+ // 2. Reserve → Upload → Commit
81
+ const cacheId = await createCache(baseUrl, "my-key", "1", "hello world!");
82
+ // 3. Check hit
83
+ res = await fetch(`${baseUrl}/_apis/artifactcache/caches?keys=my-key&version=1`);
84
+ expect(res.status).toBe(200);
85
+ const hitData = await res.json();
86
+ expect(hitData.result).toBe("hit");
87
+ expect(hitData.archiveLocation).toBe(`${baseUrl}/_apis/artifactcache/artifacts/${cacheId}`);
88
+ // 4. Download cache
89
+ res = await fetch(hitData.archiveLocation);
90
+ expect(res.status).toBe(200);
91
+ const text = await res.text();
92
+ expect(text).toBe("hello world!");
93
+ });
94
+ it("should evict stale cache entries when the archive file is missing", async () => {
95
+ const baseUrl = `http://localhost:${PORT}`;
96
+ // Create a cache normally
97
+ const cacheId = await createCache(baseUrl, "stale-key", "1", "data");
98
+ // Verify hit
99
+ let res = await fetch(`${baseUrl}/_apis/artifactcache/caches?keys=stale-key&version=1`);
100
+ expect(res.status).toBe(200);
101
+ // Delete the archive file behind the scenes (simulates OS cleanup / file loss)
102
+ const archivePath = path.join(CACHE_DIR, `cache_${cacheId}.tar.gz`);
103
+ expect(fs.existsSync(archivePath)).toBe(true);
104
+ fs.unlinkSync(archivePath);
105
+ // Now the cache check should evict the stale entry and return a miss
106
+ res = await fetch(`${baseUrl}/_apis/artifactcache/caches?keys=stale-key&version=1`);
107
+ expect(res.status).toBe(204);
108
+ // Verify the entry was removed from state
109
+ expect(state.caches.has("stale-key")).toBe(false);
110
+ });
111
+ it("should construct archiveLocation dynamically from the request host", async () => {
112
+ const baseUrl = `http://localhost:${PORT}`;
113
+ // Create a cache
114
+ const cacheId = await createCache(baseUrl, "url-key", "1", "content");
115
+ // Manually overwrite the stored archiveLocation with a stale host
116
+ const entry = state.caches.get("url-key");
117
+ entry.archiveLocation = `http://stale-host:9999/_apis/artifactcache/artifacts/${cacheId}`;
118
+ // The cache check should return a URL based on the current request, not the stale one
119
+ const res = await fetch(`${baseUrl}/_apis/artifactcache/caches?keys=url-key&version=1`);
120
+ expect(res.status).toBe(200);
121
+ const data = await res.json();
122
+ expect(data.archiveLocation).toBe(`${baseUrl}/_apis/artifactcache/artifacts/${cacheId}`);
123
+ expect(data.archiveLocation).not.toContain("stale-host");
124
+ });
125
+ it("should reject reservation when cache key+version already exists (immutable)", async () => {
126
+ const baseUrl = `http://localhost:${PORT}`;
127
+ // Create a cache
128
+ await createCache(baseUrl, "immutable-key", "1", "first");
129
+ // Try to reserve the same key+version again
130
+ const res = await fetch(`${baseUrl}/_apis/artifactcache/caches`, {
131
+ method: "POST",
132
+ headers: { "Content-Type": "application/json" },
133
+ body: JSON.stringify({ key: "immutable-key", version: "1" }),
134
+ });
135
+ expect(res.status).toBe(409);
136
+ // But a different version should succeed
137
+ const res2 = await fetch(`${baseUrl}/_apis/artifactcache/caches`, {
138
+ method: "POST",
139
+ headers: { "Content-Type": "application/json" },
140
+ body: JSON.stringify({ key: "immutable-key", version: "2" }),
141
+ });
142
+ expect(res2.status).toBe(201);
143
+ });
144
+ it("should delete old archive file when overwriting a cache key", async () => {
145
+ const baseUrl = `http://localhost:${PORT}`;
146
+ // Create initial cache
147
+ const oldCacheId = await createCache(baseUrl, "overwrite-key", "1", "old data");
148
+ const oldPath = path.join(CACHE_DIR, `cache_${oldCacheId}.tar.gz`);
149
+ expect(fs.existsSync(oldPath)).toBe(true);
150
+ // Force-clear the immutable guard so we can overwrite
151
+ // (in practice this happens when key is the same but version differs)
152
+ state.caches.delete("overwrite-key");
153
+ // Create a new cache with the same key
154
+ const newCacheId = await createCache(baseUrl, "overwrite-key", "1", "new data");
155
+ const newPath = path.join(CACHE_DIR, `cache_${newCacheId}.tar.gz`);
156
+ // Old file should be deleted, new file should exist
157
+ // Note: the old file was already deleted since we cleared the entry,
158
+ // so let's test the real scenario by directly manipulating state
159
+ expect(fs.existsSync(newPath)).toBe(true);
160
+ });
161
+ it("should clean up old archive when committing with a pre-existing key", async () => {
162
+ const baseUrl = `http://localhost:${PORT}`;
163
+ // Manually seed a fake old cache entry with a real file
164
+ const fakeOldId = 111111;
165
+ const fakeOldPath = path.join(CACHE_DIR, `cache_${fakeOldId}.tar.gz`);
166
+ fs.writeFileSync(fakeOldPath, "old content");
167
+ state.caches.set("cleanup-key", {
168
+ version: "1",
169
+ archiveLocation: `http://localhost:${PORT}/_apis/artifactcache/artifacts/${fakeOldId}`,
170
+ size: 11,
171
+ });
172
+ expect(fs.existsSync(fakeOldPath)).toBe(true);
173
+ // Now reserve + upload + commit a NEW cache for the same key but different version
174
+ // (The immutable guard only blocks same key+version, not same key+different version)
175
+ const newCacheId = await createCache(baseUrl, "cleanup-key", "2", "new content");
176
+ // Old file should be deleted
177
+ expect(fs.existsSync(fakeOldPath)).toBe(false);
178
+ // New file should exist
179
+ expect(fs.existsSync(path.join(CACHE_DIR, `cache_${newCacheId}.tar.gz`))).toBe(true);
180
+ });
181
+ it("should clear caches on state.reset()", () => {
182
+ // Seed some cache entries
183
+ state.caches.set("key-a", { version: "1", archiveLocation: "http://x/artifacts/1", size: 10 });
184
+ state.caches.set("key-b", { version: "2", archiveLocation: "http://x/artifacts/2", size: 20 });
185
+ state.pendingCaches.set(999, { tempPath: "/tmp/x", key: "key-c", version: "1" });
186
+ expect(state.caches.size).toBe(2);
187
+ expect(state.pendingCaches.size).toBe(1);
188
+ state.reset();
189
+ expect(state.caches.size).toBe(0);
190
+ expect(state.pendingCaches.size).toBe(0);
191
+ });
192
+ it("should return 204 for version mismatch even if key exists", async () => {
193
+ const baseUrl = `http://localhost:${PORT}`;
194
+ await createCache(baseUrl, "version-key", "1", "v1 data");
195
+ // Check with different version — should miss
196
+ const res = await fetch(`${baseUrl}/_apis/artifactcache/caches?keys=version-key&version=2`);
197
+ expect(res.status).toBe(204);
198
+ });
199
+ it("should return 404 for download of non-existent cache ID", async () => {
200
+ const baseUrl = `http://localhost:${PORT}`;
201
+ const res = await fetch(`${baseUrl}/_apis/artifactcache/artifacts/999999`);
202
+ expect(res.status).toBe(404);
203
+ });
204
+ it("should reject a second reservation while the first is still pending (parallel jobs race)", async () => {
205
+ const baseUrl = `http://localhost:${PORT}`;
206
+ // First job reserves — should succeed
207
+ const res1 = await fetch(`${baseUrl}/_apis/artifactcache/caches`, {
208
+ method: "POST",
209
+ headers: { "Content-Type": "application/json" },
210
+ body: JSON.stringify({ key: "race-key", version: "1" }),
211
+ });
212
+ expect(res1.status).toBe(201);
213
+ // Second job reserves the same key+version while first is still pending (not yet committed)
214
+ const res2 = await fetch(`${baseUrl}/_apis/artifactcache/caches`, {
215
+ method: "POST",
216
+ headers: { "Content-Type": "application/json" },
217
+ body: JSON.stringify({ key: "race-key", version: "1" }),
218
+ });
219
+ // Should be rejected — another job already has an in-flight reservation
220
+ expect(res2.status).toBe(409);
221
+ // Third parallel job also rejected
222
+ const res3 = await fetch(`${baseUrl}/_apis/artifactcache/caches`, {
223
+ method: "POST",
224
+ headers: { "Content-Type": "application/json" },
225
+ body: JSON.stringify({ key: "race-key", version: "1" }),
226
+ });
227
+ expect(res3.status).toBe(409);
228
+ });
229
+ });
@@ -0,0 +1,3 @@
1
+ import { Polka } from "polka";
2
+ export declare function getBaseUrl(req: any): string;
3
+ export declare function registerDtuRoutes(app: Polka): void;
@@ -0,0 +1,141 @@
1
+ import crypto from "node:crypto";
2
+ import fs from "node:fs";
3
+ import { state } from "../store.js";
4
+ import { createJobResponse } from "./actions/generators.js";
5
+ // Base URL extractor middleware (to handle localhost vs host.docker.internal properly)
6
+ // NOTE: strip \r\n from the Host header — HTTP/1.1 runners can include a trailing \r
7
+ // which, if embedded in a signed URL, causes Node.js to throw
8
+ // "Parse Error: Invalid header value char" when the toolkit uses that URL in a header.
9
+ export function getBaseUrl(req) {
10
+ const host = (req.headers.host || "localhost").replace(/[\r\n]/g, "").trim();
11
+ const protocol = (req.headers["x-forwarded-proto"] || "http").replace(/[\r\n]/g, "").trim();
12
+ return `${protocol}://${host}`;
13
+ }
14
+ export function registerDtuRoutes(app) {
15
+ // 1. Internal Seeding Endpoint
16
+ app.post("/_dtu/seed", (req, res) => {
17
+ try {
18
+ const payload = req.body;
19
+ const jobId = payload.id?.toString();
20
+ if (jobId) {
21
+ const mappedSteps = (payload.steps || []).map((step) => ({
22
+ ...step,
23
+ Id: crypto.randomUUID(),
24
+ }));
25
+ const jobPayload = { ...payload, steps: mappedSteps };
26
+ // Store job both in the generic map AND keyed by runner name for per-runner dispatch.
27
+ // The runnerName is passed in the body (from local-job.ts which spreads the Job object).
28
+ const runnerName = payload.runnerName;
29
+ state.jobs.set(jobId, jobPayload);
30
+ if (runnerName) {
31
+ state.runnerJobs.set(runnerName, jobPayload);
32
+ }
33
+ console.log(`[DTU] Seeded job: ${jobId}${runnerName ? ` for runner ${runnerName}` : ""}`);
34
+ // Notify only the pending poll that belongs to this runner (if any already waiting).
35
+ const baseUrl = getBaseUrl(req);
36
+ let notified = false;
37
+ for (const [sessionId, { res: pollRes, baseUrl: runnerBaseUrl }] of state.pendingPolls) {
38
+ const sessRunner = state.sessionToRunner.get(sessionId);
39
+ // Only dispatch to the runner this job was seeded for (or any runner if no runnerName)
40
+ if (runnerName && sessRunner !== runnerName) {
41
+ continue;
42
+ }
43
+ console.log(`[DTU] Notifying session ${sessionId} of new job ${jobId}`);
44
+ const planId = crypto.randomUUID();
45
+ // Map this planId to this runner's log directory
46
+ if (sessRunner) {
47
+ const logDir = state.runnerLogs.get(sessRunner);
48
+ if (logDir) {
49
+ state.planToLogDir.set(planId, logDir);
50
+ }
51
+ }
52
+ const jobResponse = createJobResponse(jobId, jobPayload, runnerBaseUrl || baseUrl, planId);
53
+ // Map timelineId → runner's timeline dir (CLI logs dir)
54
+ try {
55
+ const jobBody = JSON.parse(jobResponse.Body);
56
+ const timelineId = jobBody?.Timeline?.Id;
57
+ const tDir = sessRunner ? state.runnerTimelineDirs.get(sessRunner) : undefined;
58
+ if (timelineId && tDir) {
59
+ state.timelineToLogDir.set(timelineId, tDir);
60
+ }
61
+ }
62
+ catch {
63
+ /* best-effort */
64
+ }
65
+ pollRes.writeHead(200, { "Content-Type": "application/json" });
66
+ pollRes.end(JSON.stringify(jobResponse));
67
+ state.pendingPolls.delete(sessionId);
68
+ // Remove from runnerJobs since it was dispatched
69
+ if (sessRunner) {
70
+ state.runnerJobs.delete(sessRunner);
71
+ }
72
+ state.jobs.delete(jobId);
73
+ notified = true;
74
+ break;
75
+ }
76
+ if (!notified) {
77
+ console.log(`[DTU] No pending poll for job ${jobId} (runner: ${runnerName || "any"}) - job queued`);
78
+ }
79
+ res.writeHead(201, { "Content-Type": "application/json" });
80
+ res.end(JSON.stringify({ status: "ok", jobId }));
81
+ }
82
+ else {
83
+ res.writeHead(400);
84
+ res.end(JSON.stringify({ error: "Missing job ID" }));
85
+ }
86
+ }
87
+ catch (err) {
88
+ console.error("[DTU] Seed error", err);
89
+ res.writeHead(400);
90
+ res.end(JSON.stringify({ error: "Invalid JSON" }));
91
+ }
92
+ });
93
+ // POST /_dtu/start-runner
94
+ // Called by localJob.ts when spawning a runner container
95
+ app.post("/_dtu/start-runner", (req, res) => {
96
+ try {
97
+ const { runnerName, logDir, timelineDir, virtualCachePatterns } = req.body;
98
+ if (runnerName && logDir) {
99
+ fs.mkdirSync(logDir, { recursive: true });
100
+ // Register this runner mapping so we can route logs later
101
+ state.runnerLogs.set(runnerName, logDir);
102
+ // Also store the timeline dir (CLI's logs dir) for this runner
103
+ if (timelineDir) {
104
+ state.runnerTimelineDirs.set(runnerName, timelineDir);
105
+ }
106
+ // Register virtual cache key patterns (e.g. "pnpm") so bind-mounted paths
107
+ // skip the tar archive entirely.
108
+ if (Array.isArray(virtualCachePatterns)) {
109
+ for (const pattern of virtualCachePatterns) {
110
+ if (typeof pattern === "string" && pattern.length > 0) {
111
+ state.virtualCachePatterns.add(pattern);
112
+ }
113
+ }
114
+ }
115
+ console.log(`[DTU] Registered runner ${runnerName} with logs at ${logDir}${timelineDir ? `, timeline at ${timelineDir}` : ""}${virtualCachePatterns?.length
116
+ ? `, virtual cache patterns: ${virtualCachePatterns.join(", ")}`
117
+ : ""}`);
118
+ }
119
+ }
120
+ catch (e) {
121
+ console.warn("[DTU] start-runner parse error:", e);
122
+ }
123
+ res.writeHead(200, { "Content-Type": "application/json" });
124
+ res.end(JSON.stringify({ ok: true }));
125
+ });
126
+ // Debug: Dump State
127
+ app.get("/_dtu/dump", (req, res) => {
128
+ const dump = {
129
+ jobs: Object.fromEntries(state.jobs),
130
+ logs: Object.fromEntries(state.logs),
131
+ runnerLogs: Object.fromEntries(state.runnerLogs),
132
+ runnerTimelineDirs: Object.fromEntries(state.runnerTimelineDirs),
133
+ sessionToRunner: Object.fromEntries(state.sessionToRunner),
134
+ planToLogDir: Object.fromEntries(state.planToLogDir),
135
+ recordToStepName: Object.fromEntries(state.recordToStepName),
136
+ timelineToLogDir: Object.fromEntries(state.timelineToLogDir),
137
+ };
138
+ res.writeHead(200, { "Content-Type": "application/json" });
139
+ res.end(JSON.stringify(dump));
140
+ });
141
+ }
@@ -0,0 +1,2 @@
1
+ import { Polka } from "polka";
2
+ export declare function registerGithubRoutes(app: Polka): void;
@@ -0,0 +1,109 @@
1
+ import { execSync } from "node:child_process";
2
+ import { state } from "../store.js";
3
+ import { getBaseUrl } from "./dtu.js";
4
+ const EMPTY_TARBALL = execSync("tar czf - -T /dev/null");
5
+ export function registerGithubRoutes(app) {
6
+ // 2. GitHub REST API Mirror - Job Detail
7
+ app.get("/repos/:owner/:repo/actions/jobs/:id", (req, res) => {
8
+ const jobId = req.params.id;
9
+ const job = state.jobs.get(jobId);
10
+ if (job) {
11
+ res.writeHead(200, { "Content-Type": "application/json" });
12
+ res.end(JSON.stringify(job));
13
+ }
14
+ else {
15
+ console.warn(`[DTU] Job not found: ${jobId}`);
16
+ res.writeHead(404, { "Content-Type": "application/json" });
17
+ res.end(JSON.stringify({ message: "Not Found (DTU Mock)" }));
18
+ }
19
+ });
20
+ // 3. GitHub App Token Exchange Mock (App Level)
21
+ app.post("/app/installations/:id/access_tokens", (req, res) => {
22
+ const installationId = req.params.id;
23
+ console.log(`[DTU] Token exchange for installation: ${installationId}`);
24
+ const response = {
25
+ token: `ghs_mock_token_${installationId}_${Math.random().toString(36).substring(7)}`,
26
+ expires_at: new Date(Date.now() + 3600 * 1000).toISOString(),
27
+ permissions: {
28
+ actions: "read",
29
+ metadata: "read",
30
+ },
31
+ repository_selection: "selected",
32
+ };
33
+ res.writeHead(201, { "Content-Type": "application/json" });
34
+ res.end(JSON.stringify(response));
35
+ });
36
+ // 4. GitHub Installation Lookup Mock (Repo Level)
37
+ app.get("/repos/:owner/:repo/installation", (req, res) => {
38
+ const owner = req.params.owner;
39
+ const repo = req.params.repo;
40
+ console.log(`[DTU] Fetching installation for ${owner}/${repo}`);
41
+ const baseUrl = getBaseUrl(req);
42
+ const response = {
43
+ id: 12345678,
44
+ account: {
45
+ login: owner,
46
+ type: "User",
47
+ },
48
+ repository_selection: "all",
49
+ access_tokens_url: `${baseUrl}/app/installations/12345678/access_tokens`,
50
+ };
51
+ res.writeHead(200, { "Content-Type": "application/json" });
52
+ res.end(JSON.stringify(response));
53
+ });
54
+ // 5. GitHub Runner Registration Token Mock
55
+ // Supports both v3 and bare api calls through polling
56
+ app.post("/repos/:owner/:repo/actions/runners/registration-token", (req, res) => {
57
+ const owner = req.params.owner;
58
+ const repo = req.params.repo;
59
+ console.log(`[DTU] Generating registration token for ${owner}/${repo}`);
60
+ const response = {
61
+ token: `ghr_mock_registration_token_${Math.random().toString(36).substring(7)}`,
62
+ expires_at: new Date(Date.now() + 3600 * 1000).toISOString(),
63
+ };
64
+ res.writeHead(201, { "Content-Type": "application/json" });
65
+ res.end(JSON.stringify(response));
66
+ });
67
+ app.post("/api/v3/repos/:owner/:repo/actions/runners/registration-token", (req, res) => {
68
+ const owner = req.params.owner;
69
+ const repo = req.params.repo;
70
+ console.log(`[DTU] Generating registration token for ${owner}/${repo} (v3)`);
71
+ const response = {
72
+ token: `ghr_mock_registration_token_${Math.random().toString(36).substring(7)}`,
73
+ expires_at: new Date(Date.now() + 3600 * 1000).toISOString(),
74
+ };
75
+ res.writeHead(201, { "Content-Type": "application/json" });
76
+ res.end(JSON.stringify(response));
77
+ });
78
+ // 6. Global Runner Registration Mock (Discovery/Handshake)
79
+ const globalRunnerRegistrationHandler = (req, res) => {
80
+ console.log(`[DTU] Handling global runner registration (${req.url})`);
81
+ const token = `ghr_mock_tenant_token_${Math.random().toString(36).substring(7)}`;
82
+ const expiresAt = new Date(Date.now() + 3600 * 1000).toISOString();
83
+ const baseUrl = getBaseUrl(req);
84
+ res.writeHead(200, { "Content-Type": "application/json" });
85
+ res.end(JSON.stringify({
86
+ token: token,
87
+ token_schema: "OAuthAccessToken",
88
+ authorization_url: `${baseUrl}/auth/authorize`,
89
+ client_id: "mock-client-id",
90
+ tenant_id: "mock-tenant-id",
91
+ expiration: expiresAt,
92
+ url: baseUrl,
93
+ }));
94
+ };
95
+ app.post("/actions/runner-registration", globalRunnerRegistrationHandler);
96
+ app.post("/api/v3/actions/runner-registration", globalRunnerRegistrationHandler);
97
+ // 7. Tarball route — actions/checkout downloads repos via this endpoint.
98
+ // Return an empty tar.gz since the workspace is already bind-mounted.
99
+ const tarballHandler = (req, res) => {
100
+ console.log(`[DTU] Serving empty tarball for ${req.url}`);
101
+ res.writeHead(200, {
102
+ "Content-Type": "application/gzip",
103
+ "Content-Length": String(EMPTY_TARBALL.length),
104
+ });
105
+ res.end(EMPTY_TARBALL);
106
+ };
107
+ app.get("/repos/:owner/:repo/tarball/:ref", tarballHandler);
108
+ app.get("/_apis/repos/:owner/:repo/tarball/:ref", tarballHandler);
109
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,22 @@
1
+ import { config } from "../config.js";
2
+ import { bootstrapAndReturnApp } from "./index.js";
3
+ import { getDtuLogPath, setWorkingDirectory, DTU_ROOT } from "./logger.js";
4
+ import path from "node:path";
5
+ let workingDir = process.env.AGENT_CI_WORKING_DIR;
6
+ if (workingDir) {
7
+ if (!path.isAbsolute(workingDir)) {
8
+ workingDir = path.resolve(DTU_ROOT, workingDir);
9
+ }
10
+ setWorkingDirectory(workingDir);
11
+ }
12
+ bootstrapAndReturnApp()
13
+ .then((app) => {
14
+ app.listen(config.DTU_PORT, "0.0.0.0", () => {
15
+ console.log(`[DTU] OA-RUN-1 Mock GitHub API server running at http://0.0.0.0:${config.DTU_PORT}`);
16
+ console.log(`[DTU] Logging to ${getDtuLogPath()}`);
17
+ });
18
+ })
19
+ .catch((err) => {
20
+ console.error("[DTU] Failed to start:", err);
21
+ process.exit(1);
22
+ });
@@ -0,0 +1,44 @@
1
+ import http from "node:http";
2
+ /** Override the cache directory at runtime (e.g. for ephemeral per-repo DTU instances). */
3
+ export declare function setCacheDir(dir: string): void;
4
+ export declare const state: {
5
+ jobs: Map<string, any>;
6
+ runnerJobs: Map<string, any>;
7
+ sessions: Map<string, any>;
8
+ messageQueues: Map<string, any[]>;
9
+ pendingPolls: Map<string, {
10
+ res: http.ServerResponse<http.IncomingMessage>;
11
+ baseUrl: string;
12
+ }>;
13
+ logs: Map<string, string[]>;
14
+ runnerLogs: Map<string, string>;
15
+ runnerTimelineDirs: Map<string, string>;
16
+ sessionToRunner: Map<string, string>;
17
+ recordToStepName: Map<string, string>;
18
+ planToLogDir: Map<string, string>;
19
+ timelineToLogDir: Map<string, string>;
20
+ currentInProgressStep: Map<string, string>;
21
+ virtualCachePatterns: Set<string>;
22
+ caches: Map<string, {
23
+ version: string;
24
+ archiveLocation: string;
25
+ size: number;
26
+ }>;
27
+ pendingCaches: Map<number, {
28
+ tempPath: string;
29
+ key: string;
30
+ version: string;
31
+ }>;
32
+ artifacts: Map<string, {
33
+ containerId: number;
34
+ files: Map<string, string>;
35
+ }>;
36
+ pendingArtifacts: Map<number, {
37
+ name: string;
38
+ files: Map<string, string>;
39
+ }>;
40
+ isVirtualCacheKey(key: string): boolean;
41
+ loadCachesFromDisk(): void;
42
+ saveCachesToDisk(): void;
43
+ reset(): void;
44
+ };
@@ -0,0 +1,100 @@
1
+ import fs from "node:fs";
2
+ import path from "node:path";
3
+ import { config } from "../config.js";
4
+ let CACHE_DIR = config.DTU_CACHE_DIR;
5
+ let CACHES_FILE = path.join(CACHE_DIR, "caches.json");
6
+ /** Override the cache directory at runtime (e.g. for ephemeral per-repo DTU instances). */
7
+ export function setCacheDir(dir) {
8
+ CACHE_DIR = dir;
9
+ CACHES_FILE = path.join(dir, "caches.json");
10
+ }
11
+ export const state = {
12
+ jobs: new Map(),
13
+ // Per-runner job queue: runnerName → job payload (for multi-job concurrency)
14
+ runnerJobs: new Map(),
15
+ sessions: new Map(),
16
+ messageQueues: new Map(),
17
+ pendingPolls: new Map(),
18
+ logs: new Map(),
19
+ // Concurrency Maps
20
+ // runnerName -> logDirectory
21
+ runnerLogs: new Map(),
22
+ // runnerName -> timeline directory (CLI's _/logs/<runnerName>/)
23
+ runnerTimelineDirs: new Map(),
24
+ // sessionId -> runnerName
25
+ sessionToRunner: new Map(),
26
+ // recordId/logId -> sanitized step name (for per-step log files)
27
+ recordToStepName: new Map(),
28
+ // planId → log directory (for per-step files)
29
+ planToLogDir: new Map(),
30
+ // timelineId → runner log directory (for persisting timeline.json)
31
+ timelineToLogDir: new Map(),
32
+ // timelineId → sanitized name of the currently in-progress step
33
+ // (used as fallback when the feed recordId is a Job-level ID)
34
+ currentInProgressStep: new Map(),
35
+ // Substring patterns for cache keys that should always return a synthetic hit
36
+ // with an empty archive (e.g. "pnpm" for bind-mounted pnpm stores).
37
+ virtualCachePatterns: new Set(),
38
+ // cacheKey -> { version: string, archiveLocation: string, size: number }
39
+ caches: new Map(),
40
+ // cacheId (number) -> { tempPath: string, key: string, version: string }
41
+ pendingCaches: new Map(),
42
+ // artifactName -> { containerId: number, files: Map<itemPath, diskPath> }
43
+ artifacts: new Map(),
44
+ // containerId -> { name: string, files: Map<itemPath, diskPath> }
45
+ pendingArtifacts: new Map(),
46
+ isVirtualCacheKey(key) {
47
+ for (const pattern of this.virtualCachePatterns) {
48
+ if (key.includes(pattern)) {
49
+ return true;
50
+ }
51
+ }
52
+ return false;
53
+ },
54
+ loadCachesFromDisk() {
55
+ if (fs.existsSync(CACHES_FILE)) {
56
+ try {
57
+ const data = fs.readFileSync(CACHES_FILE, "utf-8");
58
+ const parsed = JSON.parse(data);
59
+ this.caches = new Map(Object.entries(parsed));
60
+ }
61
+ catch (e) {
62
+ console.warn("[DTU] Failed to load caches from disk:", e);
63
+ }
64
+ }
65
+ },
66
+ saveCachesToDisk() {
67
+ if (!fs.existsSync(CACHE_DIR)) {
68
+ fs.mkdirSync(CACHE_DIR, { recursive: true });
69
+ }
70
+ try {
71
+ const obj = Object.fromEntries(this.caches);
72
+ fs.writeFileSync(CACHES_FILE, JSON.stringify(obj, null, 2), "utf-8");
73
+ }
74
+ catch (e) {
75
+ console.warn("[DTU] Failed to save caches to disk:", e);
76
+ }
77
+ },
78
+ reset() {
79
+ this.jobs.clear();
80
+ this.runnerJobs.clear();
81
+ this.sessions.clear();
82
+ this.messageQueues.clear();
83
+ this.pendingPolls.clear();
84
+ this.logs.clear();
85
+ this.runnerLogs.clear();
86
+ this.runnerTimelineDirs.clear();
87
+ this.sessionToRunner.clear();
88
+ this.recordToStepName.clear();
89
+ this.planToLogDir.clear();
90
+ this.timelineToLogDir.clear();
91
+ this.currentInProgressStep.clear();
92
+ this.virtualCachePatterns.clear();
93
+ this.caches.clear();
94
+ this.pendingCaches.clear();
95
+ this.artifacts.clear();
96
+ this.pendingArtifacts.clear();
97
+ },
98
+ };
99
+ // Auto-load on startup
100
+ state.loadCachesFromDisk();