@replayio-app-building/netlify-recorder 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +224 -0
- package/dist/index.d.ts +209 -0
- package/dist/index.js +614 -0
- package/package.json +34 -0
package/README.md
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
# @netlify-recorder/core
|
|
2
|
+
|
|
3
|
+
Capture and replay Netlify function executions as [Replay](https://replay.io) recordings. This package intercepts outbound network calls and environment variable reads during handler execution, stores the captured data as a blob, and can later reproduce the exact execution as a Replay recording for debugging and analysis.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @netlify-recorder/core
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
### 1. Wrap your Netlify function with startRequest / finishRequest
|
|
14
|
+
|
|
15
|
+
```typescript
|
|
16
|
+
import { startRequest, finishRequest } from "@netlify-recorder/core";
|
|
17
|
+
import type { Handler } from "@netlify/functions";
|
|
18
|
+
|
|
19
|
+
const handler: Handler = async (event) => {
|
|
20
|
+
const reqContext = startRequest({
|
|
21
|
+
method: event.httpMethod,
|
|
22
|
+
url: event.path,
|
|
23
|
+
headers: event.headers as Record<string, string>,
|
|
24
|
+
body: event.body ?? undefined,
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
try {
|
|
28
|
+
// Your handler logic — all fetch() calls and process.env reads
|
|
29
|
+
// are automatically captured while the context is active.
|
|
30
|
+
const result = await myBusinessLogic();
|
|
31
|
+
|
|
32
|
+
return await finishRequest(
|
|
33
|
+
reqContext,
|
|
34
|
+
{
|
|
35
|
+
uploadBlob: async (data) => {
|
|
36
|
+
// Upload the JSON string to your blob storage (S3, R2, etc.)
|
|
37
|
+
// and return the public URL.
|
|
38
|
+
const res = await originalFetch("https://storage.example.com/upload", {
|
|
39
|
+
method: "PUT",
|
|
40
|
+
body: data,
|
|
41
|
+
});
|
|
42
|
+
const { url } = await res.json();
|
|
43
|
+
return url;
|
|
44
|
+
},
|
|
45
|
+
storeRequestData: async ({ blobUrl, commitSha, branchName, handlerPath }) => {
|
|
46
|
+
// Insert a row into your requests table and return the request ID.
|
|
47
|
+
const [row] = await sql`
|
|
48
|
+
INSERT INTO requests (blob_url, commit_sha, branch_name, handler_path, status)
|
|
49
|
+
VALUES (${blobUrl}, ${commitSha}, ${branchName}, ${handlerPath}, 'captured')
|
|
50
|
+
RETURNING id
|
|
51
|
+
`;
|
|
52
|
+
return row.id;
|
|
53
|
+
},
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
statusCode: 200,
|
|
57
|
+
headers: { "Content-Type": "application/json" },
|
|
58
|
+
body: JSON.stringify(result),
|
|
59
|
+
}
|
|
60
|
+
);
|
|
61
|
+
} catch (err) {
|
|
62
|
+
reqContext.cleanup(); // Always restore globals on error
|
|
63
|
+
throw err;
|
|
64
|
+
}
|
|
65
|
+
};
|
|
66
|
+
|
|
67
|
+
export { handler };
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
The response returned by `finishRequest` includes the `X-Replay-Request-Id` header, which the frontend can read to display the request ID.
|
|
71
|
+
|
|
72
|
+
### 2. Create a requests database table
|
|
73
|
+
|
|
74
|
+
The consuming app must provide a PostgreSQL table to track captured requests:
|
|
75
|
+
|
|
76
|
+
```sql
|
|
77
|
+
CREATE TABLE IF NOT EXISTS requests (
|
|
78
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
79
|
+
blob_url TEXT,
|
|
80
|
+
commit_sha TEXT,
|
|
81
|
+
handler_path TEXT,
|
|
82
|
+
recording_id TEXT,
|
|
83
|
+
status TEXT NOT NULL DEFAULT 'captured'
|
|
84
|
+
CHECK (status IN ('captured', 'processing', 'recorded', 'failed')),
|
|
85
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
86
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
87
|
+
);
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
**Columns:**
|
|
91
|
+
|
|
92
|
+
| Column | Type | Description |
|
|
93
|
+
|---|---|---|
|
|
94
|
+
| `id` | UUID | Unique request ID |
|
|
95
|
+
| `blob_url` | TEXT | URL to the stored captured data blob |
|
|
96
|
+
| `commit_sha` | TEXT | Git commit hash when the request was captured |
|
|
97
|
+
| `branch_name` | TEXT | Git branch name for container cloning |
|
|
98
|
+
| `handler_path` | TEXT | Path to the handler file that was executed |
|
|
99
|
+
| `recording_id` | TEXT | Replay recording ID (null until recording is created) |
|
|
100
|
+
| `status` | TEXT | One of: `captured`, `processing`, `recorded`, `failed` |
|
|
101
|
+
| `created_at` | TIMESTAMPTZ | When the request was captured |
|
|
102
|
+
| `updated_at` | TIMESTAMPTZ | Last status update |
|
|
103
|
+
|
|
104
|
+
### 3. Create a background function to produce recordings
|
|
105
|
+
|
|
106
|
+
```typescript
|
|
107
|
+
import { ensureRequestRecording } from "@netlify-recorder/core";
|
|
108
|
+
import type { Handler } from "@netlify/functions";
|
|
109
|
+
|
|
110
|
+
const handler: Handler = async (event) => {
|
|
111
|
+
const { requestId } = JSON.parse(event.body ?? "{}");
|
|
112
|
+
|
|
113
|
+
const recordingId = await ensureRequestRecording(requestId, {
|
|
114
|
+
repositoryUrl: process.env.APP_REPOSITORY_URL!,
|
|
115
|
+
replayApiKey: process.env.RECORD_REPLAY_API_KEY!,
|
|
116
|
+
lookupRequest: async (id) => {
|
|
117
|
+
const [row] = await sql`
|
|
118
|
+
SELECT blob_url, commit_sha, branch_name, handler_path
|
|
119
|
+
FROM requests WHERE id = ${id}
|
|
120
|
+
`;
|
|
121
|
+
return {
|
|
122
|
+
blobUrl: row.blob_url,
|
|
123
|
+
commitSha: row.commit_sha,
|
|
124
|
+
branchName: row.branch_name ?? "main",
|
|
125
|
+
handlerPath: row.handler_path,
|
|
126
|
+
};
|
|
127
|
+
},
|
|
128
|
+
updateStatus: async (id, status, recordingId) => {
|
|
129
|
+
await sql`
|
|
130
|
+
UPDATE requests
|
|
131
|
+
SET status = ${status},
|
|
132
|
+
recording_id = ${recordingId ?? null},
|
|
133
|
+
updated_at = NOW()
|
|
134
|
+
WHERE id = ${id}
|
|
135
|
+
`;
|
|
136
|
+
},
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
return {
|
|
140
|
+
statusCode: 200,
|
|
141
|
+
body: JSON.stringify({ recordingId }),
|
|
142
|
+
};
|
|
143
|
+
};
|
|
144
|
+
|
|
145
|
+
export { handler };
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
### 4. Create a container script for createRequestRecording
|
|
149
|
+
|
|
150
|
+
This script runs inside the container spawned by `ensureRequestRecording`, under `replay-node`:
|
|
151
|
+
|
|
152
|
+
```typescript
|
|
153
|
+
// scripts/create-request-recording.ts
|
|
154
|
+
import { createRequestRecording } from "@netlify-recorder/core";
|
|
155
|
+
|
|
156
|
+
const args = process.argv.slice(2);
|
|
157
|
+
const blobUrl = args[args.indexOf("--blob-url") + 1]!;
|
|
158
|
+
const handlerPath = args[args.indexOf("--handler-path") + 1]!;
|
|
159
|
+
|
|
160
|
+
await createRequestRecording(blobUrl, handlerPath, {
|
|
161
|
+
method: "POST",
|
|
162
|
+
url: handlerPath,
|
|
163
|
+
headers: {},
|
|
164
|
+
});
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
## API Reference
|
|
168
|
+
|
|
169
|
+
### `startRequest(requestInfo): RequestContext`
|
|
170
|
+
|
|
171
|
+
Begins capturing a Netlify handler execution. Patches `globalThis.fetch` and `process.env` to record all outbound network calls and environment variable reads.
|
|
172
|
+
|
|
173
|
+
**Parameters:**
|
|
174
|
+
- `requestInfo.method` — HTTP method of the incoming request
|
|
175
|
+
- `requestInfo.url` — Request URL/path
|
|
176
|
+
- `requestInfo.headers` — Request headers
|
|
177
|
+
- `requestInfo.body` — Optional request body
|
|
178
|
+
|
|
179
|
+
**Returns:** A `RequestContext` object to pass to `finishRequest`.
|
|
180
|
+
|
|
181
|
+
### `finishRequest(requestContext, callbacks, response): Promise<HandlerResponse>`
|
|
182
|
+
|
|
183
|
+
Finalizes the request capture. Restores original `fetch` and `process.env`, serializes the captured data, uploads it via the `uploadBlob` callback, stores metadata via `storeRequestData`, and returns the response with `X-Replay-Request-Id` header set.
|
|
184
|
+
|
|
185
|
+
**Parameters:**
|
|
186
|
+
- `requestContext` — The context returned by `startRequest`
|
|
187
|
+
- `callbacks.uploadBlob(data)` — Receives serialized JSON, must return a URL
|
|
188
|
+
- `callbacks.storeRequestData({ blobUrl, commitSha, branchName, handlerPath })` — Stores metadata, returns request ID
|
|
189
|
+
- `response` — The handler's response object (`{ statusCode, headers?, body? }`)
|
|
190
|
+
|
|
191
|
+
### `ensureRequestRecording(requestId, options): Promise<string>`
|
|
192
|
+
|
|
193
|
+
Spawns a container via `@replayio/app-building` to create a Replay recording from captured request data. Returns the recording ID.
|
|
194
|
+
|
|
195
|
+
**Parameters:**
|
|
196
|
+
- `requestId` — The request to create a recording for
|
|
197
|
+
- `options.repositoryUrl` — Git repository URL for the container to clone
|
|
198
|
+
- `options.replayApiKey` — Replay API key for recording upload
|
|
199
|
+
- `options.lookupRequest(id)` — Fetches `{ blobUrl, commitSha, branchName, handlerPath }` from the database
|
|
200
|
+
- `options.updateStatus(id, status, recordingId?)` — Updates the request status in the database
|
|
201
|
+
|
|
202
|
+
### `createRequestRecording(blobUrl, handlerPath, requestInfo): Promise<void>`
|
|
203
|
+
|
|
204
|
+
Called inside a container running under `replay-node`. Downloads the captured data blob, installs replay-mode interceptors (which return pre-recorded responses instead of making real calls), and executes the original handler so replay-node can record the execution.
|
|
205
|
+
|
|
206
|
+
**Parameters:**
|
|
207
|
+
- `blobUrl` — URL to the captured data blob
|
|
208
|
+
- `handlerPath` — Path to the handler module to execute
|
|
209
|
+
- `requestInfo` — The original request info to replay
|
|
210
|
+
|
|
211
|
+
## Environment Variables
|
|
212
|
+
|
|
213
|
+
| Variable | Required | Description |
|
|
214
|
+
|---|---|---|
|
|
215
|
+
| `COMMIT_SHA` | No | Git commit hash (defaults to `"HEAD"`) |
|
|
216
|
+
| `BRANCH_NAME` | No | Git branch name for container cloning (defaults to `"main"`) |
|
|
217
|
+
| `RECORD_REPLAY_API_KEY` | For recording | Replay API key for uploading recordings |
|
|
218
|
+
| `APP_REPOSITORY_URL` | For recording | Git repository URL for container cloning |
|
|
219
|
+
|
|
220
|
+
## How It Works
|
|
221
|
+
|
|
222
|
+
1. **Capture phase** (`startRequest` / `finishRequest`): When a Netlify function handles a request, `startRequest` patches `globalThis.fetch` and `process.env` with Proxies that record every outbound network call and environment variable read. When the handler completes, `finishRequest` restores the originals, serializes the captured data to JSON, uploads it as a blob, and records the request in the database.
|
|
223
|
+
|
|
224
|
+
2. **Recording phase** (`ensureRequestRecording` / `createRequestRecording`): A background function calls `ensureRequestRecording`, which spawns a container with `@replayio/app-building`. Inside the container, `createRequestRecording` downloads the blob, installs replay-mode interceptors that return the pre-recorded responses, and re-executes the handler under `replay-node`. Since replay-node records all execution, this produces a Replay recording of the exact same handler execution.
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
interface RequestInfo {
|
|
2
|
+
method: string;
|
|
3
|
+
url: string;
|
|
4
|
+
headers: Record<string, string>;
|
|
5
|
+
body?: string;
|
|
6
|
+
}
|
|
7
|
+
interface RequestContext {
|
|
8
|
+
requestInfo: RequestInfo;
|
|
9
|
+
capturedData: CapturedData;
|
|
10
|
+
startTime: number;
|
|
11
|
+
/** Restores original globals (fetch, process.env). Called automatically by finishRequest. */
|
|
12
|
+
cleanup: () => void;
|
|
13
|
+
}
|
|
14
|
+
interface CapturedData {
|
|
15
|
+
networkCalls: NetworkCall[];
|
|
16
|
+
envReads: EnvRead[];
|
|
17
|
+
}
|
|
18
|
+
interface NetworkCall {
|
|
19
|
+
url: string;
|
|
20
|
+
method: string;
|
|
21
|
+
requestHeaders: Record<string, string>;
|
|
22
|
+
requestBody?: string;
|
|
23
|
+
responseStatus: number;
|
|
24
|
+
responseHeaders: Record<string, string>;
|
|
25
|
+
responseBody?: string;
|
|
26
|
+
timestamp: number;
|
|
27
|
+
}
|
|
28
|
+
interface EnvRead {
|
|
29
|
+
key: string;
|
|
30
|
+
value: string | undefined;
|
|
31
|
+
timestamp: number;
|
|
32
|
+
}
|
|
33
|
+
interface BlobData {
|
|
34
|
+
requestInfo: RequestInfo;
|
|
35
|
+
capturedData: CapturedData;
|
|
36
|
+
/** Git commit SHA of the code that served this request. */
|
|
37
|
+
commitSha: string;
|
|
38
|
+
startTime: number;
|
|
39
|
+
endTime: number;
|
|
40
|
+
}
|
|
41
|
+
interface FinishRequestCallbacks {
|
|
42
|
+
/** Uploads serialized captured data and returns the blob URL. */
|
|
43
|
+
uploadBlob: (data: string) => Promise<string>;
|
|
44
|
+
/** Stores request metadata in the database and returns the request ID. */
|
|
45
|
+
storeRequestData: (data: {
|
|
46
|
+
blobUrl: string;
|
|
47
|
+
commitSha: string;
|
|
48
|
+
branchName: string;
|
|
49
|
+
handlerPath: string;
|
|
50
|
+
}) => Promise<string>;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Infrastructure credentials required to start a recording container.
|
|
54
|
+
*
|
|
55
|
+
* The container is started on Fly.io via the `@replayio/app-building` package.
|
|
56
|
+
* It requires Infisical credentials (for secrets management inside the
|
|
57
|
+
* container) and a Fly.io token + app name.
|
|
58
|
+
*
|
|
59
|
+
* These must be set as environment variables on the Netlify site:
|
|
60
|
+
* INFISICAL_CLIENT_ID, INFISICAL_CLIENT_SECRET,
|
|
61
|
+
* INFISICAL_PROJECT_ID, INFISICAL_ENVIRONMENT,
|
|
62
|
+
* FLY_API_TOKEN, FLY_APP_NAME
|
|
63
|
+
*/
|
|
64
|
+
interface ContainerInfraConfig {
|
|
65
|
+
infisicalClientId: string;
|
|
66
|
+
infisicalClientSecret: string;
|
|
67
|
+
infisicalProjectId: string;
|
|
68
|
+
infisicalEnvironment: string;
|
|
69
|
+
flyToken: string;
|
|
70
|
+
flyApp: string;
|
|
71
|
+
}
|
|
72
|
+
interface EnsureRecordingOptions {
|
|
73
|
+
repositoryUrl: string;
|
|
74
|
+
replayApiKey: string;
|
|
75
|
+
/** Infrastructure credentials for starting the recording container. */
|
|
76
|
+
infraConfig?: ContainerInfraConfig;
|
|
77
|
+
/** Webhook URL the container can POST log entries to (optional). */
|
|
78
|
+
webhookUrl?: string;
|
|
79
|
+
/** Looks up request metadata by ID. */
|
|
80
|
+
lookupRequest: (requestId: string) => Promise<{
|
|
81
|
+
blobUrl: string;
|
|
82
|
+
commitSha: string;
|
|
83
|
+
branchName: string;
|
|
84
|
+
handlerPath: string;
|
|
85
|
+
}>;
|
|
86
|
+
/** Updates the request status (and optionally recording ID) in the database. */
|
|
87
|
+
updateStatus: (requestId: string, status: string, recordingId?: string) => Promise<void>;
|
|
88
|
+
/** Optional callback for the caller to emit structured log entries. */
|
|
89
|
+
onLog?: (level: "info" | "warn" | "error", message: string) => Promise<void>;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Called at the beginning of a Netlify handler execution.
|
|
94
|
+
* Installs interceptors on globalThis.fetch and process.env to capture
|
|
95
|
+
* outbound network calls and environment variable reads made by the handler.
|
|
96
|
+
* Returns a request context used by finishRequest.
|
|
97
|
+
*
|
|
98
|
+
* When running inside createRequestRecording (replay mode), the replay
|
|
99
|
+
* interceptors are already installed. In that case we skip installing
|
|
100
|
+
* capture interceptors to avoid overwriting the replay layer (which would
|
|
101
|
+
* also crash on Node v16 where Headers/Response don't exist).
|
|
102
|
+
*/
|
|
103
|
+
declare function startRequest(requestInfo: RequestInfo): RequestContext;
|
|
104
|
+
|
|
105
|
+
interface HandlerResponse {
|
|
106
|
+
statusCode: number;
|
|
107
|
+
headers?: Record<string, string>;
|
|
108
|
+
body?: string;
|
|
109
|
+
}
|
|
110
|
+
interface FinishRequestOptions {
|
|
111
|
+
handlerPath?: string;
|
|
112
|
+
}
|
|
113
|
+
/**
|
|
114
|
+
* Called at the end of the handler execution.
|
|
115
|
+
* Restores original globals, serializes all captured data,
|
|
116
|
+
* uploads it as a JSON blob via the provided callback,
|
|
117
|
+
* stores the request metadata, and sets the X-Replay-Request-Id header.
|
|
118
|
+
*/
|
|
119
|
+
declare function finishRequest(requestContext: RequestContext, callbacks: FinishRequestCallbacks, response: HandlerResponse, options?: FinishRequestOptions): Promise<HandlerResponse>;
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Redacts sensitive environment variable values from blob data.
|
|
123
|
+
*
|
|
124
|
+
* This function performs two passes:
|
|
125
|
+
*
|
|
126
|
+
* Pass 1 — Redact env reads:
|
|
127
|
+
* For each captured EnvRead whose value should be redacted,
|
|
128
|
+
* replace the value with a "*"-repeated string of the same length.
|
|
129
|
+
*
|
|
130
|
+
* Pass 2 — Scrub the rest of the blob:
|
|
131
|
+
* Any redacted value that appears elsewhere in the blob data
|
|
132
|
+
* (network call headers, bodies, request info headers/body) is
|
|
133
|
+
* also replaced with the same mask. This prevents secrets from
|
|
134
|
+
* leaking through, e.g., an Authorization header that embeds
|
|
135
|
+
* an API key captured via process.env.
|
|
136
|
+
*
|
|
137
|
+
* The function returns a new BlobData object; the input is not mutated.
|
|
138
|
+
*/
|
|
139
|
+
declare function redactBlobData(blobData: BlobData): BlobData;
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* Called by a background function to convert a request ID into a Replay recording ID.
|
|
143
|
+
*
|
|
144
|
+
* The function:
|
|
145
|
+
* 1. Looks up request metadata (blob URL, commit, handler path).
|
|
146
|
+
* 2. Delegates to `spawnRecordingContainer` which starts a detached Fly.io
|
|
147
|
+
* container, runs the recording script under replay-node, and uploads
|
|
148
|
+
* the resulting recording.
|
|
149
|
+
* 3. Updates the request status with the recording ID.
|
|
150
|
+
*
|
|
151
|
+
* **Required infrastructure:** Infisical credentials and a Fly.io token/app.
|
|
152
|
+
* See `ContainerInfraConfig` in types.ts for details. When these are not
|
|
153
|
+
* configured the function fails with an actionable error message listing
|
|
154
|
+
* the missing environment variables.
|
|
155
|
+
*/
|
|
156
|
+
declare function ensureRequestRecording(requestId: string, options: EnsureRecordingOptions): Promise<string>;
|
|
157
|
+
/**
|
|
158
|
+
* Reads infrastructure config from environment variables.
|
|
159
|
+
* Returns undefined if any required variable is missing.
|
|
160
|
+
*/
|
|
161
|
+
declare function readInfraConfigFromEnv(): ContainerInfraConfig | undefined;
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Called from within a container (running under replay-node) to create a Replay recording.
|
|
165
|
+
* Installs replay-mode interceptors that return pre-recorded responses, then executes
|
|
166
|
+
* the original handler so that replay-node can record the execution.
|
|
167
|
+
*
|
|
168
|
+
* Accepts either a blob URL (fetched at runtime) or pre-parsed BlobData (avoids needing
|
|
169
|
+
* globalThis.fetch, which is missing in replay-node's Node v16 environment).
|
|
170
|
+
*/
|
|
171
|
+
declare function createRequestRecording(blobUrlOrData: string | BlobData, handlerPath: string, requestInfo: RequestInfo): Promise<void>;
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* Options for spawning a recording container from a blob URL.
|
|
175
|
+
* This is the core building block — it knows nothing about request IDs or databases.
|
|
176
|
+
*/
|
|
177
|
+
interface SpawnRecordingContainerOptions {
|
|
178
|
+
/** URL (or data: URI) of the captured request blob JSON. */
|
|
179
|
+
blobUrl: string;
|
|
180
|
+
/** Handler file path relative to the app root (e.g. "netlify/functions/generate-haiku"). */
|
|
181
|
+
handlerPath: string;
|
|
182
|
+
/** Git commit SHA to check out inside the container. */
|
|
183
|
+
commitSha: string;
|
|
184
|
+
/** Git branch to clone. */
|
|
185
|
+
branchName: string;
|
|
186
|
+
/** Git repository URL for the app. */
|
|
187
|
+
repositoryUrl: string;
|
|
188
|
+
/** Replay API key for uploading recordings. */
|
|
189
|
+
replayApiKey: string;
|
|
190
|
+
/** Infrastructure credentials for Fly.io + Infisical. */
|
|
191
|
+
infraConfig: ContainerInfraConfig;
|
|
192
|
+
/** Optional webhook URL the container can POST log events to. */
|
|
193
|
+
logWebhookUrl?: string;
|
|
194
|
+
/** Optional callback for structured log entries. */
|
|
195
|
+
onLog?: (level: "info" | "warn" | "error", message: string) => Promise<void>;
|
|
196
|
+
}
|
|
197
|
+
/**
|
|
198
|
+
* Spawns a detached Fly.io container that:
|
|
199
|
+
* 1. Clones the app repo at the correct branch
|
|
200
|
+
* 2. Checks out the exact commit
|
|
201
|
+
* 3. Runs `scripts/create-request-recording.ts` under replay-node
|
|
202
|
+
* 4. Uploads the resulting recording
|
|
203
|
+
* 5. Outputs the recording ID
|
|
204
|
+
*
|
|
205
|
+
* Returns the recording ID on success, or throws on failure.
|
|
206
|
+
*/
|
|
207
|
+
declare function spawnRecordingContainer(options: SpawnRecordingContainerOptions): Promise<string>;
|
|
208
|
+
|
|
209
|
+
export { type BlobData, type CapturedData, type ContainerInfraConfig, type EnsureRecordingOptions, type EnvRead, type FinishRequestCallbacks, type NetworkCall, type RequestContext, type RequestInfo, type SpawnRecordingContainerOptions, createRequestRecording, ensureRequestRecording, finishRequest, readInfraConfigFromEnv, redactBlobData, spawnRecordingContainer, startRequest };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,614 @@
|
|
|
1
|
+
var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
|
|
2
|
+
get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
|
|
3
|
+
}) : x)(function(x) {
|
|
4
|
+
if (typeof require !== "undefined") return require.apply(this, arguments);
|
|
5
|
+
throw Error('Dynamic require of "' + x + '" is not supported');
|
|
6
|
+
});
|
|
7
|
+
|
|
8
|
+
// src/interceptors/network.ts
|
|
9
|
+
function installNetworkInterceptor(mode, calls) {
|
|
10
|
+
const originalFetch = globalThis.fetch;
|
|
11
|
+
if (mode === "capture") {
|
|
12
|
+
const captureFetch = async (input, init) => {
|
|
13
|
+
const url = typeof input === "string" ? input : input instanceof URL ? input.href : input.url;
|
|
14
|
+
const method = init?.method ?? (input instanceof Request ? input.method : "GET");
|
|
15
|
+
const requestHeaders = {};
|
|
16
|
+
if (init?.headers) {
|
|
17
|
+
new Headers(init.headers).forEach((v, k) => {
|
|
18
|
+
requestHeaders[k] = v;
|
|
19
|
+
});
|
|
20
|
+
}
|
|
21
|
+
const requestBody = typeof init?.body === "string" ? init.body : void 0;
|
|
22
|
+
const response = await originalFetch(input, init);
|
|
23
|
+
const responseBody = await response.clone().text();
|
|
24
|
+
const responseHeaders = {};
|
|
25
|
+
response.headers.forEach((v, k) => {
|
|
26
|
+
responseHeaders[k] = v;
|
|
27
|
+
});
|
|
28
|
+
calls.push({
|
|
29
|
+
url,
|
|
30
|
+
method,
|
|
31
|
+
requestHeaders,
|
|
32
|
+
requestBody,
|
|
33
|
+
responseStatus: response.status,
|
|
34
|
+
responseHeaders,
|
|
35
|
+
responseBody,
|
|
36
|
+
timestamp: Date.now()
|
|
37
|
+
});
|
|
38
|
+
return response;
|
|
39
|
+
};
|
|
40
|
+
globalThis.fetch = captureFetch;
|
|
41
|
+
} else {
|
|
42
|
+
let callIndex = 0;
|
|
43
|
+
const replayFetch = async () => {
|
|
44
|
+
const call = calls[callIndex++];
|
|
45
|
+
if (!call) {
|
|
46
|
+
throw new Error(
|
|
47
|
+
`No more recorded network calls to replay (exhausted ${calls.length} calls)`
|
|
48
|
+
);
|
|
49
|
+
}
|
|
50
|
+
const body = call.responseBody ?? "";
|
|
51
|
+
const status = call.responseStatus;
|
|
52
|
+
return {
|
|
53
|
+
ok: status >= 200 && status < 300,
|
|
54
|
+
status,
|
|
55
|
+
statusText: "",
|
|
56
|
+
headers: {
|
|
57
|
+
get: (name) => (call.responseHeaders ?? {})[name.toLowerCase()] ?? null,
|
|
58
|
+
has: (name) => name.toLowerCase() in (call.responseHeaders ?? {}),
|
|
59
|
+
forEach: (cb) => {
|
|
60
|
+
for (const [k, v] of Object.entries(call.responseHeaders ?? {})) cb(v, k);
|
|
61
|
+
}
|
|
62
|
+
},
|
|
63
|
+
text: async () => body,
|
|
64
|
+
json: async () => JSON.parse(body),
|
|
65
|
+
clone: () => ({ text: async () => body, json: async () => JSON.parse(body) }),
|
|
66
|
+
body: null,
|
|
67
|
+
bodyUsed: false,
|
|
68
|
+
redirected: false,
|
|
69
|
+
type: "basic",
|
|
70
|
+
url: call.url,
|
|
71
|
+
arrayBuffer: async () => new ArrayBuffer(0),
|
|
72
|
+
blob: async () => {
|
|
73
|
+
throw new Error("blob() not supported in replay");
|
|
74
|
+
},
|
|
75
|
+
formData: async () => {
|
|
76
|
+
throw new Error("formData() not supported in replay");
|
|
77
|
+
}
|
|
78
|
+
};
|
|
79
|
+
};
|
|
80
|
+
globalThis.fetch = replayFetch;
|
|
81
|
+
}
|
|
82
|
+
return {
|
|
83
|
+
restore() {
|
|
84
|
+
globalThis.fetch = originalFetch;
|
|
85
|
+
}
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// src/interceptors/environment.ts
|
|
90
|
+
function installEnvironmentInterceptor(mode, reads) {
|
|
91
|
+
const originalEnv = process.env;
|
|
92
|
+
if (mode === "capture") {
|
|
93
|
+
process.env = new Proxy(originalEnv, {
|
|
94
|
+
get(target, prop) {
|
|
95
|
+
const value = target[prop];
|
|
96
|
+
if (typeof prop === "string" && prop !== "toJSON") {
|
|
97
|
+
reads.push({ key: prop, value, timestamp: Date.now() });
|
|
98
|
+
}
|
|
99
|
+
return value;
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
} else {
|
|
103
|
+
const readMap = /* @__PURE__ */ new Map();
|
|
104
|
+
for (const read of reads) {
|
|
105
|
+
readMap.set(read.key, read.value);
|
|
106
|
+
}
|
|
107
|
+
process.env = new Proxy(originalEnv, {
|
|
108
|
+
get(target, prop) {
|
|
109
|
+
if (typeof prop === "string" && readMap.has(prop)) {
|
|
110
|
+
return readMap.get(prop);
|
|
111
|
+
}
|
|
112
|
+
return target[prop];
|
|
113
|
+
}
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
return {
|
|
117
|
+
restore() {
|
|
118
|
+
process.env = originalEnv;
|
|
119
|
+
}
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// src/startRequest.ts
|
|
124
|
+
function startRequest(requestInfo) {
|
|
125
|
+
const capturedData = { networkCalls: [], envReads: [] };
|
|
126
|
+
const isReplay = globalThis.__REPLAY_RECORDING_MODE__ === true;
|
|
127
|
+
let cleanup;
|
|
128
|
+
if (isReplay) {
|
|
129
|
+
cleanup = () => {
|
|
130
|
+
};
|
|
131
|
+
} else {
|
|
132
|
+
const networkHandle = installNetworkInterceptor("capture", capturedData.networkCalls);
|
|
133
|
+
const envHandle = installEnvironmentInterceptor("capture", capturedData.envReads);
|
|
134
|
+
cleanup = () => {
|
|
135
|
+
networkHandle.restore();
|
|
136
|
+
envHandle.restore();
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
return {
|
|
140
|
+
requestInfo,
|
|
141
|
+
capturedData,
|
|
142
|
+
startTime: Date.now(),
|
|
143
|
+
cleanup
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// src/redact.ts
|
|
148
|
+
var ENV_ALLOW_LIST = /* @__PURE__ */ new Set([
|
|
149
|
+
// Node / runtime
|
|
150
|
+
"NODE_ENV",
|
|
151
|
+
"NODE_VERSION",
|
|
152
|
+
"NODE_PATH",
|
|
153
|
+
"NODE_OPTIONS",
|
|
154
|
+
"NPM_CONFIG_PREFIX",
|
|
155
|
+
// OS / shell
|
|
156
|
+
"HOME",
|
|
157
|
+
"PATH",
|
|
158
|
+
"USER",
|
|
159
|
+
"SHELL",
|
|
160
|
+
"LANG",
|
|
161
|
+
"LC_ALL",
|
|
162
|
+
"PWD",
|
|
163
|
+
"HOSTNAME",
|
|
164
|
+
"TZ",
|
|
165
|
+
"TERM",
|
|
166
|
+
"EDITOR",
|
|
167
|
+
"TMPDIR",
|
|
168
|
+
"LOGNAME",
|
|
169
|
+
// Deploy metadata (non-secret identifiers)
|
|
170
|
+
"COMMIT_SHA",
|
|
171
|
+
// Common non-secret Netlify build vars
|
|
172
|
+
"NETLIFY",
|
|
173
|
+
"NETLIFY_DEV",
|
|
174
|
+
"CONTEXT",
|
|
175
|
+
"DEPLOY_PRIME_URL",
|
|
176
|
+
"URL",
|
|
177
|
+
"SITE_NAME",
|
|
178
|
+
"BUILD_ID",
|
|
179
|
+
"DEPLOY_ID",
|
|
180
|
+
"DEPLOY_URL",
|
|
181
|
+
"REPOSITORY_URL",
|
|
182
|
+
"APP_REPOSITORY_URL",
|
|
183
|
+
"BRANCH",
|
|
184
|
+
"HEAD",
|
|
185
|
+
"COMMIT_REF",
|
|
186
|
+
"PULL_REQUEST",
|
|
187
|
+
"REVIEW_ID",
|
|
188
|
+
"LAMBDA_TASK_ROOT",
|
|
189
|
+
"AWS_REGION",
|
|
190
|
+
"AWS_EXECUTION_ENV",
|
|
191
|
+
"AWS_LAMBDA_FUNCTION_NAME",
|
|
192
|
+
"AWS_LAMBDA_FUNCTION_VERSION",
|
|
193
|
+
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE"
|
|
194
|
+
]);
|
|
195
|
+
function shouldRedact(key, value) {
|
|
196
|
+
if (ENV_ALLOW_LIST.has(key)) return false;
|
|
197
|
+
if (value === void 0 || value.length <= 8) return false;
|
|
198
|
+
return true;
|
|
199
|
+
}
|
|
200
|
+
function replaceAll(text, searchValue, mask) {
|
|
201
|
+
return text.split(searchValue).join(mask);
|
|
202
|
+
}
|
|
203
|
+
function buildMask(value) {
|
|
204
|
+
return "*".repeat(value.length);
|
|
205
|
+
}
|
|
206
|
+
function redactBlobData(blobData) {
|
|
207
|
+
const redactions = /* @__PURE__ */ new Map();
|
|
208
|
+
const redactedEnvReads = blobData.capturedData.envReads.map(
|
|
209
|
+
(read) => {
|
|
210
|
+
if (shouldRedact(read.key, read.value) && read.value !== void 0) {
|
|
211
|
+
const mask = buildMask(read.value);
|
|
212
|
+
redactions.set(read.value, mask);
|
|
213
|
+
return { ...read, value: mask };
|
|
214
|
+
}
|
|
215
|
+
return { ...read };
|
|
216
|
+
}
|
|
217
|
+
);
|
|
218
|
+
if (redactions.size === 0) {
|
|
219
|
+
return {
|
|
220
|
+
...blobData,
|
|
221
|
+
capturedData: {
|
|
222
|
+
...blobData.capturedData,
|
|
223
|
+
envReads: redactedEnvReads
|
|
224
|
+
}
|
|
225
|
+
};
|
|
226
|
+
}
|
|
227
|
+
const sortedRedactions = [...redactions.entries()].sort(
|
|
228
|
+
(a, b) => b[0].length - a[0].length
|
|
229
|
+
);
|
|
230
|
+
function scrub(text) {
|
|
231
|
+
if (text === void 0) return void 0;
|
|
232
|
+
let result = text;
|
|
233
|
+
for (const [original, mask] of sortedRedactions) {
|
|
234
|
+
result = replaceAll(result, original, mask);
|
|
235
|
+
}
|
|
236
|
+
return result;
|
|
237
|
+
}
|
|
238
|
+
function scrubHeaders(headers) {
|
|
239
|
+
const out = {};
|
|
240
|
+
for (const [k, v] of Object.entries(headers)) {
|
|
241
|
+
out[k] = scrub(v) ?? v;
|
|
242
|
+
}
|
|
243
|
+
return out;
|
|
244
|
+
}
|
|
245
|
+
const redactedRequestInfo = {
|
|
246
|
+
...blobData.requestInfo,
|
|
247
|
+
url: scrub(blobData.requestInfo.url) ?? blobData.requestInfo.url,
|
|
248
|
+
headers: scrubHeaders(blobData.requestInfo.headers),
|
|
249
|
+
body: scrub(blobData.requestInfo.body)
|
|
250
|
+
};
|
|
251
|
+
const redactedNetworkCalls = blobData.capturedData.networkCalls.map(
|
|
252
|
+
(call) => ({
|
|
253
|
+
...call,
|
|
254
|
+
url: scrub(call.url) ?? call.url,
|
|
255
|
+
requestHeaders: scrubHeaders(call.requestHeaders),
|
|
256
|
+
requestBody: scrub(call.requestBody),
|
|
257
|
+
responseHeaders: scrubHeaders(call.responseHeaders),
|
|
258
|
+
responseBody: scrub(call.responseBody)
|
|
259
|
+
})
|
|
260
|
+
);
|
|
261
|
+
return {
|
|
262
|
+
...blobData,
|
|
263
|
+
requestInfo: redactedRequestInfo,
|
|
264
|
+
capturedData: {
|
|
265
|
+
networkCalls: redactedNetworkCalls,
|
|
266
|
+
envReads: redactedEnvReads
|
|
267
|
+
}
|
|
268
|
+
};
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// src/finishRequest.ts
|
|
272
|
+
async function finishRequest(requestContext, callbacks, response, options) {
|
|
273
|
+
requestContext.cleanup();
|
|
274
|
+
if (globalThis.__REPLAY_RECORDING_MODE__ === true) {
|
|
275
|
+
return response;
|
|
276
|
+
}
|
|
277
|
+
const commitSha = process.env.COMMIT_SHA ?? "HEAD";
|
|
278
|
+
const branchName = process.env.BRANCH_NAME ?? "main";
|
|
279
|
+
const rawBlobData = {
|
|
280
|
+
requestInfo: requestContext.requestInfo,
|
|
281
|
+
capturedData: requestContext.capturedData,
|
|
282
|
+
commitSha,
|
|
283
|
+
startTime: requestContext.startTime,
|
|
284
|
+
endTime: Date.now()
|
|
285
|
+
};
|
|
286
|
+
const blobData = redactBlobData(rawBlobData);
|
|
287
|
+
const blobContent = JSON.stringify(blobData);
|
|
288
|
+
const blobUrl = await callbacks.uploadBlob(blobContent);
|
|
289
|
+
const storedRequestId = await callbacks.storeRequestData({
|
|
290
|
+
blobUrl,
|
|
291
|
+
commitSha,
|
|
292
|
+
branchName,
|
|
293
|
+
handlerPath: options?.handlerPath ?? "unknown"
|
|
294
|
+
});
|
|
295
|
+
return {
|
|
296
|
+
...response,
|
|
297
|
+
headers: {
|
|
298
|
+
...response.headers,
|
|
299
|
+
"X-Replay-Request-Id": storedRequestId
|
|
300
|
+
}
|
|
301
|
+
};
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
// src/spawnRecordingContainer.ts
|
|
305
|
+
async function spawnRecordingContainer(options) {
|
|
306
|
+
const {
|
|
307
|
+
blobUrl,
|
|
308
|
+
handlerPath,
|
|
309
|
+
commitSha,
|
|
310
|
+
branchName,
|
|
311
|
+
repositoryUrl,
|
|
312
|
+
replayApiKey,
|
|
313
|
+
infraConfig,
|
|
314
|
+
logWebhookUrl,
|
|
315
|
+
onLog
|
|
316
|
+
} = options;
|
|
317
|
+
const emit = async (level, message) => {
|
|
318
|
+
if (onLog) {
|
|
319
|
+
try {
|
|
320
|
+
await onLog(level, message);
|
|
321
|
+
} catch {
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
};
|
|
325
|
+
await emit("info", "Logging in to Infisical");
|
|
326
|
+
const {
|
|
327
|
+
infisicalLogin,
|
|
328
|
+
startContainer,
|
|
329
|
+
FileContainerRegistry,
|
|
330
|
+
httpGet,
|
|
331
|
+
httpOptsFor
|
|
332
|
+
} = await import("@replayio/app-building");
|
|
333
|
+
const infisicalToken = await infisicalLogin(
|
|
334
|
+
infraConfig.infisicalClientId,
|
|
335
|
+
infraConfig.infisicalClientSecret
|
|
336
|
+
);
|
|
337
|
+
const infisicalConfig = {
|
|
338
|
+
token: infisicalToken,
|
|
339
|
+
projectId: infraConfig.infisicalProjectId,
|
|
340
|
+
environment: infraConfig.infisicalEnvironment
|
|
341
|
+
};
|
|
342
|
+
const registry = new FileContainerRegistry("/tmp/netlify-recorder-containers.json");
|
|
343
|
+
const initialPrompt = [
|
|
344
|
+
`IMPORTANT: Follow these steps EXACTLY. Run each command as shown. Print ALL output.`,
|
|
345
|
+
`Do NOT explore the codebase, read AGENTS.md, or deviate from these steps.`,
|
|
346
|
+
`Do NOT attempt to debug, fix, shim, or work around ANY errors. If a command fails,`,
|
|
347
|
+
`print the full error output and move on to the next step. Errors during handler replay`,
|
|
348
|
+
`(like "No more recorded network calls" or DB errors) are EXPECTED and harmless.`,
|
|
349
|
+
``,
|
|
350
|
+
`=== Step 1: Install dependencies ===`,
|
|
351
|
+
`cd /repo/apps/netlify-recorder && npm install 2>&1`,
|
|
352
|
+
``,
|
|
353
|
+
`=== Step 2: Checkout the exact commit ===`,
|
|
354
|
+
`git fetch origin ${commitSha} 2>&1 || git fetch --all 2>&1`,
|
|
355
|
+
`git checkout ${commitSha} 2>&1`,
|
|
356
|
+
``,
|
|
357
|
+
`=== Step 3: Verify recording script exists ===`,
|
|
358
|
+
`ls -la /repo/apps/netlify-recorder/scripts/create-request-recording.ts`,
|
|
359
|
+
`If the file does NOT exist, print "ERROR: create-request-recording.ts not found" and STOP.`,
|
|
360
|
+
``,
|
|
361
|
+
`=== Step 4: Pre-compile for replay-node (Node v16) ===`,
|
|
362
|
+
`replay-node is Node v16 \u2014 it cannot run TypeScript or use modern APIs directly.`,
|
|
363
|
+
`You MUST compile everything with esbuild first. Run these commands exactly:`,
|
|
364
|
+
``,
|
|
365
|
+
`# Install undici for web API polyfills (fetch, Headers, Response):`,
|
|
366
|
+
`cd /repo/apps/netlify-recorder && npm install undici@5 2>&1`,
|
|
367
|
+
``,
|
|
368
|
+
`# Create the polyfill loader:`,
|
|
369
|
+
`cat > /tmp/web-polyfill.cjs << 'POLYFILL'`,
|
|
370
|
+
`try {`,
|
|
371
|
+
` var u = require("undici");`,
|
|
372
|
+
` if (!globalThis.fetch) globalThis.fetch = u.fetch;`,
|
|
373
|
+
` if (!globalThis.Headers) globalThis.Headers = u.Headers;`,
|
|
374
|
+
` if (!globalThis.Response) globalThis.Response = u.Response;`,
|
|
375
|
+
` if (!globalThis.Request) globalThis.Request = u.Request;`,
|
|
376
|
+
`} catch(e) { console.error("polyfill warning:", e.message); }`,
|
|
377
|
+
`POLYFILL`,
|
|
378
|
+
``,
|
|
379
|
+
`# Compile the recording script (bundles all local TS dependencies):`,
|
|
380
|
+
`npx esbuild scripts/create-request-recording.ts \\`,
|
|
381
|
+
` --bundle --platform=node --target=node16 --format=cjs \\`,
|
|
382
|
+
` --outfile=/tmp/create-recording.cjs 2>&1`,
|
|
383
|
+
``,
|
|
384
|
+
`# Compile the handler (bundles everything including node_modules):`,
|
|
385
|
+
`npx esbuild ${handlerPath}.ts \\`,
|
|
386
|
+
` --bundle --platform=node --target=node16 --format=cjs \\`,
|
|
387
|
+
` --outfile=/tmp/handler.cjs 2>&1`,
|
|
388
|
+
``,
|
|
389
|
+
`=== Step 5: Run under replay-node ===`,
|
|
390
|
+
`This MUST use replay-node so the execution is recorded. Run exactly:`,
|
|
391
|
+
``,
|
|
392
|
+
`cd /repo/apps/netlify-recorder && npx @replayio/node \\`,
|
|
393
|
+
` -r /tmp/web-polyfill.cjs /tmp/create-recording.cjs \\`,
|
|
394
|
+
` --blob-url '${blobUrl}' \\`,
|
|
395
|
+
` --handler-path '/tmp/handler.cjs' 2>&1`,
|
|
396
|
+
``,
|
|
397
|
+
`The output will show captured data being replayed. Errors like "No more recorded`,
|
|
398
|
+
`network calls" or "DATABASE_URL" errors are EXPECTED \u2014 they come from post-handler`,
|
|
399
|
+
`DB operations that were not in the original blob. Do NOT try to fix these.`,
|
|
400
|
+
``,
|
|
401
|
+
`=== Step 6: Upload the recording ===`,
|
|
402
|
+
`RECORD_REPLAY_API_KEY=${replayApiKey} npx replayio upload --all 2>&1`,
|
|
403
|
+
``,
|
|
404
|
+
`Find the recording ID (UUID) in the upload output and print:`,
|
|
405
|
+
` recording: <recording-id>`,
|
|
406
|
+
``,
|
|
407
|
+
`Then output <DONE>.`
|
|
408
|
+
].join("\n");
|
|
409
|
+
await emit("info", "Starting detached container on Fly.io");
|
|
410
|
+
const state = await startContainer(
|
|
411
|
+
{
|
|
412
|
+
infisical: infisicalConfig,
|
|
413
|
+
registry,
|
|
414
|
+
flyToken: infraConfig.flyToken,
|
|
415
|
+
flyApp: infraConfig.flyApp,
|
|
416
|
+
detached: true,
|
|
417
|
+
initialPrompt,
|
|
418
|
+
webhookUrl: logWebhookUrl
|
|
419
|
+
},
|
|
420
|
+
{
|
|
421
|
+
repoUrl: repositoryUrl,
|
|
422
|
+
cloneBranch: branchName
|
|
423
|
+
}
|
|
424
|
+
);
|
|
425
|
+
await emit("info", `Container started: ${state.containerName} at ${state.baseUrl}`);
|
|
426
|
+
const maxWaitMs = 10 * 60 * 1e3;
|
|
427
|
+
const pollIntervalMs = 1e4;
|
|
428
|
+
const deadline = Date.now() + maxWaitMs;
|
|
429
|
+
let containerDone = false;
|
|
430
|
+
while (Date.now() < deadline) {
|
|
431
|
+
try {
|
|
432
|
+
const status = await httpGet(`${state.baseUrl}/status`, httpOptsFor(state));
|
|
433
|
+
if (status?.state === "stopped" || status?.state === "stopping") {
|
|
434
|
+
containerDone = true;
|
|
435
|
+
break;
|
|
436
|
+
}
|
|
437
|
+
} catch {
|
|
438
|
+
containerDone = true;
|
|
439
|
+
break;
|
|
440
|
+
}
|
|
441
|
+
await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
|
|
442
|
+
}
|
|
443
|
+
if (!containerDone) {
|
|
444
|
+
await emit("warn", "Container did not finish within 10 minutes");
|
|
445
|
+
}
|
|
446
|
+
let recordingId = null;
|
|
447
|
+
try {
|
|
448
|
+
const logs = await httpGet(`${state.baseUrl}/logs?offset=0`, httpOptsFor(state));
|
|
449
|
+
if (typeof logs === "string") {
|
|
450
|
+
const match = logs.match(/recording[:\s]+([a-f0-9-]{36})/i);
|
|
451
|
+
if (match?.[1]) {
|
|
452
|
+
recordingId = match[1];
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
} catch {
|
|
456
|
+
await emit("warn", "Could not read container logs after exit");
|
|
457
|
+
}
|
|
458
|
+
if (!recordingId) {
|
|
459
|
+
await emit("error", "Container completed but no recording ID was found in output");
|
|
460
|
+
throw new Error("Recording creation failed: no recording ID returned from container");
|
|
461
|
+
}
|
|
462
|
+
await emit("info", `Container completed \u2014 recording ID: ${recordingId}`);
|
|
463
|
+
return recordingId;
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
// src/ensureRequestRecording.ts
|
|
467
|
+
async function ensureRequestRecording(requestId, options) {
|
|
468
|
+
const { repositoryUrl, replayApiKey, infraConfig, webhookUrl, lookupRequest, updateStatus, onLog } = options;
|
|
469
|
+
const emit = async (level, message) => {
|
|
470
|
+
if (onLog) {
|
|
471
|
+
try {
|
|
472
|
+
await onLog(level, message);
|
|
473
|
+
} catch {
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
};
|
|
477
|
+
await updateStatus(requestId, "processing");
|
|
478
|
+
try {
|
|
479
|
+
if (!infraConfig) {
|
|
480
|
+
const missing = getMissingInfraVars();
|
|
481
|
+
throw new Error(
|
|
482
|
+
`Container infrastructure not configured. Missing environment variables: ${missing.join(", ")}. These must be set on the Netlify site for recording creation to work.`
|
|
483
|
+
);
|
|
484
|
+
}
|
|
485
|
+
const requestData = await lookupRequest(requestId);
|
|
486
|
+
await emit("info", `Request data retrieved \u2014 handler: ${requestData.handlerPath}, branch: ${requestData.branchName}, commit: ${requestData.commitSha}`);
|
|
487
|
+
const recordingId = await spawnRecordingContainer({
|
|
488
|
+
blobUrl: requestData.blobUrl,
|
|
489
|
+
handlerPath: requestData.handlerPath,
|
|
490
|
+
commitSha: requestData.commitSha,
|
|
491
|
+
branchName: requestData.branchName,
|
|
492
|
+
repositoryUrl,
|
|
493
|
+
replayApiKey,
|
|
494
|
+
infraConfig,
|
|
495
|
+
logWebhookUrl: webhookUrl,
|
|
496
|
+
onLog
|
|
497
|
+
});
|
|
498
|
+
await emit("info", `Recording created successfully: ${recordingId}`);
|
|
499
|
+
await updateStatus(requestId, "recorded", recordingId);
|
|
500
|
+
return recordingId;
|
|
501
|
+
} catch (err) {
|
|
502
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
503
|
+
await emit("error", `Recording creation failed: ${message}`);
|
|
504
|
+
await updateStatus(requestId, "failed");
|
|
505
|
+
throw err;
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
function getMissingInfraVars() {
|
|
509
|
+
const required = [
|
|
510
|
+
"INFISICAL_CLIENT_ID",
|
|
511
|
+
"INFISICAL_CLIENT_SECRET",
|
|
512
|
+
"INFISICAL_PROJECT_ID",
|
|
513
|
+
"INFISICAL_ENVIRONMENT",
|
|
514
|
+
"FLY_API_TOKEN",
|
|
515
|
+
"FLY_APP_NAME"
|
|
516
|
+
];
|
|
517
|
+
return required.filter((name) => !process.env[name]);
|
|
518
|
+
}
|
|
519
|
+
function readInfraConfigFromEnv() {
|
|
520
|
+
const clientId = process.env.INFISICAL_CLIENT_ID;
|
|
521
|
+
const clientSecret = process.env.INFISICAL_CLIENT_SECRET;
|
|
522
|
+
const projectId = process.env.INFISICAL_PROJECT_ID;
|
|
523
|
+
const environment = process.env.INFISICAL_ENVIRONMENT;
|
|
524
|
+
const flyToken = process.env.FLY_API_TOKEN;
|
|
525
|
+
const flyApp = process.env.FLY_APP_NAME;
|
|
526
|
+
if (!clientId || !clientSecret || !projectId || !environment || !flyToken || !flyApp) {
|
|
527
|
+
return void 0;
|
|
528
|
+
}
|
|
529
|
+
return {
|
|
530
|
+
infisicalClientId: clientId,
|
|
531
|
+
infisicalClientSecret: clientSecret,
|
|
532
|
+
infisicalProjectId: projectId,
|
|
533
|
+
infisicalEnvironment: environment,
|
|
534
|
+
flyToken,
|
|
535
|
+
flyApp
|
|
536
|
+
};
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
// src/createRequestRecording.ts
|
|
540
|
+
async function createRequestRecording(blobUrlOrData, handlerPath, requestInfo) {
|
|
541
|
+
let blobData;
|
|
542
|
+
if (typeof blobUrlOrData === "string") {
|
|
543
|
+
const response = await fetch(blobUrlOrData);
|
|
544
|
+
if (!response.ok) {
|
|
545
|
+
throw new Error(
|
|
546
|
+
`Failed to download blob from ${blobUrlOrData}: ${response.status}`
|
|
547
|
+
);
|
|
548
|
+
}
|
|
549
|
+
blobData = await response.json();
|
|
550
|
+
} else {
|
|
551
|
+
blobData = blobUrlOrData;
|
|
552
|
+
}
|
|
553
|
+
for (const read of blobData.capturedData.envReads) {
|
|
554
|
+
if (read.value && read.value.length > 8 && /^\*+$/.test(read.value)) {
|
|
555
|
+
if (read.key === "DATABASE_URL" || read.key.endsWith("_DATABASE_URL")) {
|
|
556
|
+
read.value = "postgresql://replay:replay@localhost:5432/replay";
|
|
557
|
+
} else {
|
|
558
|
+
read.value = `placeholder_${read.key.toLowerCase()}`;
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
const networkHandle = installNetworkInterceptor(
|
|
563
|
+
"replay",
|
|
564
|
+
blobData.capturedData.networkCalls
|
|
565
|
+
);
|
|
566
|
+
const envHandle = installEnvironmentInterceptor(
|
|
567
|
+
"replay",
|
|
568
|
+
blobData.capturedData.envReads
|
|
569
|
+
);
|
|
570
|
+
globalThis.__REPLAY_RECORDING_MODE__ = true;
|
|
571
|
+
const g = globalThis;
|
|
572
|
+
if (typeof g.Blob === "undefined") {
|
|
573
|
+
try {
|
|
574
|
+
const { Blob } = __require("buffer");
|
|
575
|
+
g.Blob = Blob;
|
|
576
|
+
} catch {
|
|
577
|
+
}
|
|
578
|
+
}
|
|
579
|
+
if (typeof g.File === "undefined") {
|
|
580
|
+
const B = g.Blob ?? Object;
|
|
581
|
+
const FileShim = class File {
|
|
582
|
+
name;
|
|
583
|
+
lastModified;
|
|
584
|
+
constructor(parts, name, options) {
|
|
585
|
+
Object.assign(this, new B(parts, options));
|
|
586
|
+
this.name = name;
|
|
587
|
+
this.lastModified = Date.now();
|
|
588
|
+
}
|
|
589
|
+
};
|
|
590
|
+
g.File = FileShim;
|
|
591
|
+
}
|
|
592
|
+
try {
|
|
593
|
+
const handlerModule = await import(handlerPath);
|
|
594
|
+
await handlerModule.handler({
|
|
595
|
+
httpMethod: requestInfo.method,
|
|
596
|
+
path: requestInfo.url,
|
|
597
|
+
headers: requestInfo.headers,
|
|
598
|
+
body: requestInfo.body ?? null
|
|
599
|
+
});
|
|
600
|
+
} finally {
|
|
601
|
+
globalThis.__REPLAY_RECORDING_MODE__ = false;
|
|
602
|
+
networkHandle.restore();
|
|
603
|
+
envHandle.restore();
|
|
604
|
+
}
|
|
605
|
+
}
|
|
606
|
+
export {
|
|
607
|
+
createRequestRecording,
|
|
608
|
+
ensureRequestRecording,
|
|
609
|
+
finishRequest,
|
|
610
|
+
readInfraConfigFromEnv,
|
|
611
|
+
redactBlobData,
|
|
612
|
+
spawnRecordingContainer,
|
|
613
|
+
startRequest
|
|
614
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@replayio-app-building/netlify-recorder",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Capture and replay Netlify function executions as Replay recordings",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"import": "./dist/index.js",
|
|
9
|
+
"types": "./dist/index.d.ts"
|
|
10
|
+
}
|
|
11
|
+
},
|
|
12
|
+
"files": [
|
|
13
|
+
"dist"
|
|
14
|
+
],
|
|
15
|
+
"scripts": {
|
|
16
|
+
"build": "tsup",
|
|
17
|
+
"check": "tsc --noEmit",
|
|
18
|
+
"prepublishOnly": "tsup"
|
|
19
|
+
},
|
|
20
|
+
"peerDependencies": {
|
|
21
|
+
"@replayio/app-building": ">=1.0.0"
|
|
22
|
+
},
|
|
23
|
+
"peerDependenciesMeta": {
|
|
24
|
+
"@replayio/app-building": {
|
|
25
|
+
"optional": true
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
"devDependencies": {
|
|
29
|
+
"@replayio/app-building": "^1.28.0",
|
|
30
|
+
"@types/node": "^25.6.0",
|
|
31
|
+
"tsup": "^8.5.1",
|
|
32
|
+
"typescript": "^5.7.3"
|
|
33
|
+
}
|
|
34
|
+
}
|