@logtape/cloudwatch-logs 1.1.3 → 1.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@logtape/cloudwatch-logs",
3
- "version": "1.1.3",
3
+ "version": "1.1.5",
4
4
  "description": "AWS CloudWatch Logs sink for LogTape",
5
5
  "keywords": [
6
6
  "logging",
@@ -45,8 +45,11 @@
45
45
  "./package.json": "./package.json"
46
46
  },
47
47
  "sideEffects": false,
48
+ "files": [
49
+ "dist/"
50
+ ],
48
51
  "peerDependencies": {
49
- "@logtape/logtape": "^1.1.3"
52
+ "@logtape/logtape": "^1.1.5"
50
53
  },
51
54
  "dependencies": {
52
55
  "@aws-sdk/client-cloudwatch-logs": "^3.0.0"
package/deno.json DELETED
@@ -1,36 +0,0 @@
1
- {
2
- "name": "@logtape/cloudwatch-logs",
3
- "version": "1.1.3",
4
- "license": "MIT",
5
- "exports": "./src/mod.ts",
6
- "exclude": [
7
- "./dist/"
8
- ],
9
- "imports": {
10
- "@aws-sdk/client-cloudwatch-logs": "npm:@aws-sdk/client-cloudwatch-logs@^3.0.0",
11
- "@dotenvx/dotenvx": "npm:@dotenvx/dotenvx@^1.44.2"
12
- },
13
- "tasks": {
14
- "build": "pnpm build",
15
- "test": "deno test --allow-read --allow-net --allow-sys --allow-env",
16
- "test:node": {
17
- "dependencies": [
18
- "build"
19
- ],
20
- "command": "node --experimental-transform-types --test"
21
- },
22
- "test:bun": {
23
- "dependencies": [
24
- "build"
25
- ],
26
- "command": "bun test --timeout=10000"
27
- },
28
- "test-all": {
29
- "dependencies": [
30
- "test",
31
- "test:node",
32
- "test:bun"
33
- ]
34
- }
35
- }
36
- }
package/src/mod.ts DELETED
@@ -1,2 +0,0 @@
1
- export type { CloudWatchLogsSinkOptions } from "./types.ts";
2
- export { getCloudWatchLogsSink } from "./sink.ts";
@@ -1,451 +0,0 @@
1
- import { suite } from "@alinea/suite";
2
- import {
3
- CloudWatchLogsClient,
4
- CreateLogGroupCommand,
5
- CreateLogStreamCommand,
6
- DeleteLogGroupCommand,
7
- GetLogEventsCommand,
8
- } from "@aws-sdk/client-cloudwatch-logs";
9
- import "@dotenvx/dotenvx/config";
10
- import type { LogRecord } from "@logtape/logtape";
11
- import { jsonLinesFormatter } from "@logtape/logtape";
12
- import { assertEquals, assertInstanceOf } from "@std/assert";
13
- import process from "node:process";
14
- import { getCloudWatchLogsSink } from "./sink.ts";
15
-
16
- type Describe = (name: string, run: () => void | Promise<void>) => void;
17
-
18
- let test: Describe & { skip?: Describe } = suite(import.meta);
19
-
20
- // Skip integration tests unless AWS credentials are provided
21
- // Also skip on Bun due to AWS SDK response parsing issues
22
- const skipIntegrationTests = !process.env.AWS_ACCESS_KEY_ID ||
23
- !process.env.AWS_SECRET_ACCESS_KEY ||
24
- !process.env.AWS_REGION ||
25
- ("Bun" in globalThis);
26
-
27
- if (skipIntegrationTests) {
28
- if ("Bun" in globalThis) {
29
- console.warn(
30
- "⚠️ Skipping CloudWatch Logs integration tests on Bun runtime due to AWS SDK response parsing issues.",
31
- );
32
- } else {
33
- console.warn(
34
- "⚠️ Skipping CloudWatch Logs integration tests. " +
35
- "Set AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_REGION " +
36
- "environment variables to run integration tests.",
37
- );
38
- }
39
- test = test.skip!;
40
- }
41
-
42
- const testLogGroupName = `/logtape/integration-test-${Date.now()}`;
43
- const testLogStreamName = `test-stream-${Date.now()}`;
44
-
45
- test("Integration: CloudWatch Logs sink with real AWS service", async () => {
46
- const client = new CloudWatchLogsClient({
47
- region: process.env.AWS_REGION,
48
- credentials: {
49
- accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
50
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
51
- sessionToken: process.env.AWS_SESSION_TOKEN,
52
- },
53
- });
54
-
55
- try {
56
- // Create log group and stream for testing
57
- try {
58
- await client.send(
59
- new CreateLogGroupCommand({ logGroupName: testLogGroupName }),
60
- );
61
- } catch (error) {
62
- // Log group might already exist, ignore ResourceAlreadyExistsException
63
- if (
64
- !(error instanceof Error) ||
65
- !("name" in error) ||
66
- error.name !== "ResourceAlreadyExistsException"
67
- ) {
68
- throw error;
69
- }
70
- }
71
-
72
- await client.send(
73
- new CreateLogStreamCommand({
74
- logGroupName: testLogGroupName,
75
- logStreamName: testLogStreamName,
76
- }),
77
- );
78
-
79
- const sink = getCloudWatchLogsSink({
80
- client,
81
- logGroupName: testLogGroupName,
82
- logStreamName: testLogStreamName,
83
- batchSize: 1,
84
- flushInterval: 0,
85
- });
86
-
87
- // Create a fixed log record to avoid timestamp flakiness
88
- const fixedTimestamp = 1672531200000; // 2023-01-01T00:00:00.000Z
89
- const testLogRecord: LogRecord = {
90
- category: ["integration", "test"],
91
- level: "info",
92
- message: [
93
- "Integration test message at ",
94
- new Date(fixedTimestamp).toISOString(),
95
- ],
96
- rawMessage: "Integration test message at {timestamp}",
97
- timestamp: fixedTimestamp,
98
- properties: { testId: "integration-001" },
99
- };
100
-
101
- // Send log record
102
- sink(testLogRecord);
103
- await sink[Symbol.asyncDispose]();
104
-
105
- // Wait longer for AWS to process the log event
106
- await new Promise((resolve) => setTimeout(resolve, 5000));
107
-
108
- // Verify the log event was received by CloudWatch Logs
109
- const getEventsCommand = new GetLogEventsCommand({
110
- logGroupName: testLogGroupName,
111
- logStreamName: testLogStreamName,
112
- });
113
-
114
- const response = await client.send(getEventsCommand);
115
- console.log(
116
- `Found ${response.events?.length ?? 0} events in CloudWatch Logs`,
117
- );
118
- if (response.events?.length === 0) {
119
- console.log(
120
- "No events found. This might be due to CloudWatch Logs propagation delay.",
121
- );
122
- // Make this test more lenient - just verify the sink worked without errors
123
- return;
124
- }
125
-
126
- assertEquals(response.events?.length, 1);
127
- assertEquals(
128
- response.events?.[0].message,
129
- 'Integration test message at "2023-01-01T00:00:00.000Z"',
130
- );
131
- } finally {
132
- // Always cleanup - delete log group (this also deletes log streams)
133
- try {
134
- await client.send(
135
- new DeleteLogGroupCommand({ logGroupName: testLogGroupName }),
136
- );
137
- } catch (error) {
138
- console.warn("Failed to cleanup test log group:", error);
139
- }
140
- }
141
- });
142
-
143
- test("Integration: CloudWatch Logs sink with batch processing", async () => {
144
- const client = new CloudWatchLogsClient({
145
- region: process.env.AWS_REGION,
146
- credentials: {
147
- accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
148
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
149
- sessionToken: process.env.AWS_SESSION_TOKEN,
150
- },
151
- });
152
-
153
- const batchTestLogGroupName = `/logtape/batch-test-${Date.now()}`;
154
- const batchTestLogStreamName = `batch-test-stream-${Date.now()}`;
155
-
156
- try {
157
- // Create log group and stream for testing
158
- await client.send(
159
- new CreateLogGroupCommand({ logGroupName: batchTestLogGroupName }),
160
- );
161
-
162
- await client.send(
163
- new CreateLogStreamCommand({
164
- logGroupName: batchTestLogGroupName,
165
- logStreamName: batchTestLogStreamName,
166
- }),
167
- );
168
-
169
- const sink = getCloudWatchLogsSink({
170
- client,
171
- logGroupName: batchTestLogGroupName,
172
- logStreamName: batchTestLogStreamName,
173
- batchSize: 3,
174
- flushInterval: 100,
175
- });
176
-
177
- // Send multiple log records with fixed timestamps
178
- const baseTimestamp = 1672531200000; // 2023-01-01T00:00:00.000Z
179
- const logRecords = Array.from({ length: 5 }, (_, i) => ({
180
- category: ["batch", "test"],
181
- level: "info" as const,
182
- message: [
183
- `Batch test message ${i + 1} at `,
184
- new Date(baseTimestamp + i * 1000).toISOString(),
185
- ],
186
- rawMessage: `Batch test message ${i + 1} at {timestamp}`,
187
- timestamp: baseTimestamp + i * 1000,
188
- properties: { batchId: "batch-001", index: i },
189
- }));
190
-
191
- logRecords.forEach((record) => sink(record));
192
- await sink[Symbol.asyncDispose]();
193
-
194
- // Wait longer for AWS to process the log events
195
- await new Promise((resolve) => setTimeout(resolve, 5000));
196
-
197
- // Verify all log events were received by CloudWatch Logs
198
- const getEventsCommand = new GetLogEventsCommand({
199
- logGroupName: batchTestLogGroupName,
200
- logStreamName: batchTestLogStreamName,
201
- });
202
-
203
- const response = await client.send(getEventsCommand);
204
- console.log(
205
- `Found ${response.events?.length ?? 0} batch events in CloudWatch Logs`,
206
- );
207
- if ((response.events?.length ?? 0) === 0) {
208
- console.log(
209
- "No batch events found. This might be due to CloudWatch Logs propagation delay.",
210
- );
211
- // Make this test more lenient - just verify the sink worked without errors
212
- return;
213
- }
214
-
215
- assertEquals(response.events?.length, 5);
216
-
217
- // Verify messages are in order and contain expected patterns
218
- response.events?.forEach((event, i) => {
219
- const expectedPattern = `Batch test message ${
220
- i + 1
221
- } at "2023-01-01T00:0${i}:0${i}.000Z"`;
222
- assertEquals(event.message, expectedPattern);
223
- });
224
- } finally {
225
- // Always cleanup - delete log group (this also deletes log streams)
226
- try {
227
- await client.send(
228
- new DeleteLogGroupCommand({ logGroupName: batchTestLogGroupName }),
229
- );
230
- } catch (error) {
231
- console.warn("Failed to cleanup batch test log group:", error);
232
- }
233
- }
234
- });
235
-
236
- test("Integration: CloudWatch Logs sink with credentials from options", async () => {
237
- const credentialsTestLogGroupName = `/logtape/credentials-test-${Date.now()}`;
238
- const credentialsTestLogStreamName = `credentials-test-stream-${Date.now()}`;
239
-
240
- const sink = getCloudWatchLogsSink({
241
- logGroupName: credentialsTestLogGroupName,
242
- logStreamName: credentialsTestLogStreamName,
243
- region: process.env.AWS_REGION,
244
- credentials: {
245
- accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
246
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
247
- sessionToken: process.env.AWS_SESSION_TOKEN,
248
- },
249
- batchSize: 1,
250
- flushInterval: 0,
251
- });
252
-
253
- // Verify sink is created successfully
254
- assertInstanceOf(sink, Function);
255
- assertInstanceOf(sink[Symbol.asyncDispose], Function);
256
-
257
- // Create a separate client for setup/cleanup
258
- const client = new CloudWatchLogsClient({
259
- region: process.env.AWS_REGION,
260
- credentials: {
261
- accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
262
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
263
- sessionToken: process.env.AWS_SESSION_TOKEN,
264
- },
265
- });
266
-
267
- try {
268
- // Create log group and stream for testing
269
- await client.send(
270
- new CreateLogGroupCommand({ logGroupName: credentialsTestLogGroupName }),
271
- );
272
-
273
- await client.send(
274
- new CreateLogStreamCommand({
275
- logGroupName: credentialsTestLogGroupName,
276
- logStreamName: credentialsTestLogStreamName,
277
- }),
278
- );
279
-
280
- // Send log record with fixed timestamp
281
- const fixedTimestamp = 1672531200000; // 2023-01-01T00:00:00.000Z
282
- const credentialsTestLogRecord: LogRecord = {
283
- category: ["credentials", "test"],
284
- level: "info",
285
- message: [
286
- "Credentials test message at ",
287
- new Date(fixedTimestamp).toISOString(),
288
- ],
289
- rawMessage: "Credentials test message at {timestamp}",
290
- timestamp: fixedTimestamp,
291
- properties: { testId: "credentials-001" },
292
- };
293
-
294
- sink(credentialsTestLogRecord);
295
- await sink[Symbol.asyncDispose]();
296
-
297
- // Wait longer for AWS to process the log event
298
- await new Promise((resolve) => setTimeout(resolve, 5000));
299
-
300
- // Verify the log event was received by CloudWatch Logs
301
- const getEventsCommand = new GetLogEventsCommand({
302
- logGroupName: credentialsTestLogGroupName,
303
- logStreamName: credentialsTestLogStreamName,
304
- });
305
-
306
- const response = await client.send(getEventsCommand);
307
- console.log(
308
- `Found ${
309
- response.events?.length ?? 0
310
- } credentials events in CloudWatch Logs`,
311
- );
312
- if (response.events?.length === 0) {
313
- console.log(
314
- "No credentials events found. This might be due to CloudWatch Logs propagation delay.",
315
- );
316
- // Make this test more lenient - just verify the sink worked without errors
317
- return;
318
- }
319
-
320
- assertEquals(response.events?.length, 1);
321
- assertEquals(
322
- response.events?.[0].message,
323
- 'Credentials test message at "2023-01-01T00:00:00.000Z"',
324
- );
325
- } finally {
326
- // Always cleanup - delete log group (this also deletes log streams)
327
- try {
328
- await client.send(
329
- new DeleteLogGroupCommand({
330
- logGroupName: credentialsTestLogGroupName,
331
- }),
332
- );
333
- } catch (error) {
334
- console.warn("Failed to cleanup credentials test log group:", error);
335
- }
336
- }
337
- });
338
-
339
- test("Integration: CloudWatch Logs sink with JSON Lines formatter", async () => {
340
- const structuredTestLogGroupName = `/logtape/structured-test-${Date.now()}`;
341
- const structuredTestLogStreamName = `structured-test-stream-${Date.now()}`;
342
-
343
- const sink = getCloudWatchLogsSink({
344
- logGroupName: structuredTestLogGroupName,
345
- logStreamName: structuredTestLogStreamName,
346
- region: process.env.AWS_REGION,
347
- credentials: {
348
- accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
349
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
350
- sessionToken: process.env.AWS_SESSION_TOKEN,
351
- },
352
- batchSize: 1,
353
- flushInterval: 0,
354
- formatter: jsonLinesFormatter,
355
- });
356
-
357
- // Create a separate client for setup/cleanup
358
- const client = new CloudWatchLogsClient({
359
- region: process.env.AWS_REGION,
360
- credentials: {
361
- accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
362
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
363
- sessionToken: process.env.AWS_SESSION_TOKEN,
364
- },
365
- });
366
-
367
- try {
368
- // Create log group and stream for testing
369
- await client.send(
370
- new CreateLogGroupCommand({ logGroupName: structuredTestLogGroupName }),
371
- );
372
-
373
- await client.send(
374
- new CreateLogStreamCommand({
375
- logGroupName: structuredTestLogGroupName,
376
- logStreamName: structuredTestLogStreamName,
377
- }),
378
- );
379
-
380
- // Send structured log record with fixed timestamp
381
- const fixedTimestamp = 1672531200000; // 2023-01-01T00:00:00.000Z
382
- const structuredLogRecord: LogRecord = {
383
- category: ["api", "auth"],
384
- level: "warning",
385
- message: ["Failed login attempt for user ", {
386
- email: "test@example.com",
387
- id: 456,
388
- }],
389
- rawMessage: "Failed login attempt for user {user}",
390
- timestamp: fixedTimestamp,
391
- properties: {
392
- ip: "192.168.1.1",
393
- userAgent: "TestAgent/1.0",
394
- attempts: 3,
395
- },
396
- };
397
-
398
- sink(structuredLogRecord);
399
- await sink[Symbol.asyncDispose]();
400
-
401
- // Wait longer for AWS to process the log event
402
- await new Promise((resolve) => setTimeout(resolve, 5000));
403
-
404
- // Verify the structured log event was received by CloudWatch Logs
405
- const getEventsCommand = new GetLogEventsCommand({
406
- logGroupName: structuredTestLogGroupName,
407
- logStreamName: structuredTestLogStreamName,
408
- });
409
-
410
- const response = await client.send(getEventsCommand);
411
- console.log(
412
- `Found ${
413
- response.events?.length ?? 0
414
- } structured events in CloudWatch Logs`,
415
- );
416
- if (response.events?.length === 0) {
417
- console.log(
418
- "No structured events found. This might be due to CloudWatch Logs propagation delay.",
419
- );
420
- // Make this test more lenient - just verify the sink worked without errors
421
- return;
422
- }
423
-
424
- assertEquals(response.events?.length, 1);
425
-
426
- // Parse the JSON log message
427
- const logMessage = response.events?.[0].message;
428
- const parsedLog = JSON.parse(logMessage!);
429
-
430
- // Verify structured fields are present (jsonLinesFormatter format)
431
- assertEquals(parsedLog.level, "WARN"); // jsonLinesFormatter uses uppercase
432
- assertEquals(parsedLog.logger, "api.auth"); // category becomes logger
433
- assertEquals(
434
- parsedLog.message,
435
- 'Failed login attempt for user {"email":"test@example.com","id":456}',
436
- ); // pre-formatted message
437
- assertEquals(parsedLog.properties.ip, "192.168.1.1");
438
- assertEquals(parsedLog.properties.userAgent, "TestAgent/1.0");
439
- assertEquals(parsedLog.properties.attempts, 3);
440
- assertEquals(parsedLog["@timestamp"], "2023-01-01T00:00:00.000Z"); // Fixed timestamp
441
- } finally {
442
- // Always cleanup - delete log group (this also deletes log streams)
443
- try {
444
- await client.send(
445
- new DeleteLogGroupCommand({ logGroupName: structuredTestLogGroupName }),
446
- );
447
- } catch (error) {
448
- console.warn("Failed to cleanup structured test log group:", error);
449
- }
450
- }
451
- });
package/src/sink.test.ts DELETED
@@ -1,331 +0,0 @@
1
- import { suite } from "@alinea/suite";
2
- import {
3
- CloudWatchLogsClient,
4
- PutLogEventsCommand,
5
- } from "@aws-sdk/client-cloudwatch-logs";
6
- import type { LogRecord } from "@logtape/logtape";
7
- import { jsonLinesFormatter } from "@logtape/logtape";
8
- import { assertEquals, assertInstanceOf } from "@std/assert";
9
- import { mockClient } from "aws-sdk-client-mock";
10
- import { getCloudWatchLogsSink } from "./sink.ts";
11
-
12
- const test = suite(import.meta);
13
-
14
- const mockLogRecord: LogRecord = {
15
- category: ["test"],
16
- level: "info",
17
- message: ["Hello, ", "world", "!"],
18
- rawMessage: "Hello, {name}!",
19
- timestamp: Date.now(),
20
- properties: {},
21
- };
22
-
23
- test("getCloudWatchLogsSink() creates a working sink", async () => {
24
- const cwlMock = mockClient(CloudWatchLogsClient);
25
- cwlMock.reset();
26
- cwlMock.on(PutLogEventsCommand).resolves({});
27
-
28
- const sink = getCloudWatchLogsSink({
29
- logGroupName: "/test/log-group",
30
- logStreamName: "test-stream",
31
- region: "us-east-1",
32
- batchSize: 1,
33
- flushInterval: 0,
34
- });
35
-
36
- sink(mockLogRecord);
37
- await sink[Symbol.asyncDispose]();
38
-
39
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
40
- const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
41
- assertEquals(call.args[0].input.logGroupName, "/test/log-group");
42
- assertEquals(call.args[0].input.logStreamName, "test-stream");
43
- assertEquals(call.args[0].input.logEvents?.length, 1);
44
- assertEquals(call.args[0].input.logEvents?.[0].message, 'Hello, "world"!');
45
- });
46
-
47
- test("getCloudWatchLogsSink() batches multiple log events", async () => {
48
- const cwlMock = mockClient(CloudWatchLogsClient);
49
- cwlMock.reset();
50
- cwlMock.on(PutLogEventsCommand).resolves({});
51
-
52
- const sink = getCloudWatchLogsSink({
53
- logGroupName: "/test/log-group",
54
- logStreamName: "test-stream",
55
- batchSize: 3,
56
- flushInterval: 0,
57
- });
58
-
59
- sink(mockLogRecord);
60
- sink(mockLogRecord);
61
- sink(mockLogRecord);
62
-
63
- await sink[Symbol.asyncDispose]();
64
-
65
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
66
- const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
67
- assertEquals(call.args[0].input.logEvents?.length, 3);
68
- });
69
-
70
- test("getCloudWatchLogsSink() flushes when batch size is reached", async () => {
71
- const cwlMock = mockClient(CloudWatchLogsClient);
72
- cwlMock.reset();
73
- cwlMock.on(PutLogEventsCommand).resolves({});
74
-
75
- const sink = getCloudWatchLogsSink({
76
- logGroupName: "/test/log-group",
77
- logStreamName: "test-stream",
78
- batchSize: 2,
79
- flushInterval: 0,
80
- });
81
-
82
- sink(mockLogRecord);
83
- sink(mockLogRecord); // Should flush here
84
- sink(mockLogRecord); // Should be in next batch
85
-
86
- await sink[Symbol.asyncDispose](); // Should flush remaining
87
-
88
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 2);
89
- assertEquals(
90
- cwlMock.commandCalls(PutLogEventsCommand)[0].args[0].input.logEvents
91
- ?.length,
92
- 2,
93
- );
94
- assertEquals(
95
- cwlMock.commandCalls(PutLogEventsCommand)[1].args[0].input.logEvents
96
- ?.length,
97
- 1,
98
- );
99
- });
100
-
101
- test("getCloudWatchLogsSink() with custom client", async () => {
102
- const client = new CloudWatchLogsClient({ region: "us-west-2" });
103
- const cwlMock = mockClient(client);
104
- cwlMock.on(PutLogEventsCommand).resolves({});
105
-
106
- const sink = getCloudWatchLogsSink({
107
- client,
108
- logGroupName: "/test/log-group",
109
- logStreamName: "test-stream",
110
- batchSize: 1,
111
- flushInterval: 0,
112
- });
113
-
114
- sink(mockLogRecord);
115
- await sink[Symbol.asyncDispose]();
116
-
117
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
118
- });
119
-
120
- test("getCloudWatchLogsSink() handles credentials", () => {
121
- const sink = getCloudWatchLogsSink({
122
- logGroupName: "/test/log-group",
123
- logStreamName: "test-stream",
124
- region: "eu-west-1",
125
- credentials: {
126
- accessKeyId: "test-key",
127
- secretAccessKey: "test-secret",
128
- },
129
- });
130
-
131
- assertInstanceOf(sink, Function);
132
- assertInstanceOf(sink[Symbol.asyncDispose], Function);
133
- });
134
-
135
- test("getCloudWatchLogsSink() handles errors gracefully", async () => {
136
- const cwlMock = mockClient(CloudWatchLogsClient);
137
- cwlMock.reset();
138
- cwlMock.on(PutLogEventsCommand).rejects(new Error("Permanent failure"));
139
-
140
- const sink = getCloudWatchLogsSink({
141
- logGroupName: "/test/log-group",
142
- logStreamName: "test-stream",
143
- batchSize: 1,
144
- flushInterval: 0,
145
- maxRetries: 0, // No retries
146
- retryDelay: 10,
147
- });
148
-
149
- sink(mockLogRecord);
150
- await sink[Symbol.asyncDispose]();
151
-
152
- // Should attempt once and fail gracefully
153
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
154
- });
155
-
156
- test("getCloudWatchLogsSink() handles large message batches", async () => {
157
- const cwlMock = mockClient(CloudWatchLogsClient);
158
- cwlMock.reset();
159
- cwlMock.on(PutLogEventsCommand).resolves({});
160
-
161
- // Create a message that will exceed 1MB when combined with overhead
162
- const largeMessage = "x".repeat(600000); // ~600KB message
163
- const largeLogRecord: LogRecord = {
164
- category: ["test"],
165
- level: "info",
166
- message: [largeMessage],
167
- rawMessage: largeMessage,
168
- timestamp: Date.now(),
169
- properties: {},
170
- };
171
-
172
- const sink = getCloudWatchLogsSink({
173
- logGroupName: "/test/log-group",
174
- logStreamName: "test-stream",
175
- batchSize: 10,
176
- flushInterval: 0,
177
- });
178
-
179
- // Add two large messages - should exceed 1MB limit
180
- sink(largeLogRecord);
181
- sink(largeLogRecord);
182
-
183
- await sink[Symbol.asyncDispose]();
184
-
185
- const calls = cwlMock.commandCalls(PutLogEventsCommand);
186
- // Should either flush immediately due to size or flush remaining on dispose
187
- assertEquals(calls.length >= 1, true);
188
- });
189
-
190
- test("getCloudWatchLogsSink() formats complex log messages", async () => {
191
- const cwlMock = mockClient(CloudWatchLogsClient);
192
- cwlMock.reset();
193
- cwlMock.on(PutLogEventsCommand).resolves({});
194
-
195
- const complexLogRecord: LogRecord = {
196
- category: ["app", "module"],
197
- level: "error",
198
- message: ["User ", { id: 123, name: "John" }, " failed to login"],
199
- rawMessage: "User {user} failed to login",
200
- timestamp: Date.now(),
201
- properties: { error: "Invalid password" },
202
- };
203
-
204
- const sink = getCloudWatchLogsSink({
205
- logGroupName: "/test/log-group",
206
- logStreamName: "test-stream",
207
- batchSize: 1,
208
- flushInterval: 0,
209
- });
210
-
211
- sink(complexLogRecord);
212
- await sink[Symbol.asyncDispose]();
213
-
214
- const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
215
- assertEquals(
216
- call.args[0].input.logEvents?.[0].message,
217
- 'User {"id":123,"name":"John"} failed to login',
218
- );
219
- });
220
-
221
- test("getCloudWatchLogsSink() respects batch size limits", async () => {
222
- const cwlMock = mockClient(CloudWatchLogsClient);
223
- cwlMock.reset();
224
- cwlMock.on(PutLogEventsCommand).resolves({});
225
-
226
- const sink = getCloudWatchLogsSink({
227
- logGroupName: "/test/log-group",
228
- logStreamName: "test-stream",
229
- batchSize: 50000, // Should be clamped to 10000
230
- flushInterval: 0,
231
- });
232
-
233
- // Verify the sink works (batch size should be internally limited)
234
- sink(mockLogRecord);
235
- await sink[Symbol.asyncDispose]();
236
-
237
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
238
- });
239
-
240
- test("getCloudWatchLogsSink() flushes remaining events on disposal", async () => {
241
- const cwlMock = mockClient(CloudWatchLogsClient);
242
- cwlMock.reset();
243
- cwlMock.on(PutLogEventsCommand).resolves({});
244
-
245
- const sink = getCloudWatchLogsSink({
246
- logGroupName: "/test/log-group",
247
- logStreamName: "test-stream",
248
- batchSize: 10,
249
- flushInterval: 0,
250
- });
251
-
252
- sink(mockLogRecord);
253
- sink(mockLogRecord);
254
-
255
- await sink[Symbol.asyncDispose]();
256
-
257
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
258
- assertEquals(
259
- cwlMock.commandCalls(PutLogEventsCommand)[0].args[0].input.logEvents
260
- ?.length,
261
- 2,
262
- );
263
- });
264
-
265
- test("getCloudWatchLogsSink() supports JSON Lines formatter", async () => {
266
- const cwlMock = mockClient(CloudWatchLogsClient);
267
- cwlMock.reset();
268
- cwlMock.on(PutLogEventsCommand).resolves({});
269
-
270
- const sink = getCloudWatchLogsSink({
271
- logGroupName: "/test/log-group",
272
- logStreamName: "test-stream",
273
- batchSize: 1,
274
- flushInterval: 0,
275
- formatter: jsonLinesFormatter,
276
- });
277
-
278
- const structuredLogRecord: LogRecord = {
279
- category: ["app", "database"],
280
- level: "error",
281
- message: ["User ", { id: 123, name: "John" }, " failed to connect"],
282
- rawMessage: "User {user} failed to connect",
283
- timestamp: 1672531200000, // Fixed timestamp for testing
284
- properties: { error: "Connection timeout", retries: 3 },
285
- };
286
-
287
- sink(structuredLogRecord);
288
- await sink[Symbol.asyncDispose]();
289
-
290
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
291
- const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
292
- const logMessage = call.args[0].input.logEvents?.[0].message;
293
-
294
- // Parse the JSON message to verify structure
295
- const parsedMessage = JSON.parse(logMessage!);
296
-
297
- // Check what fields are actually present in jsonLinesFormatter output
298
- assertEquals(parsedMessage["@timestamp"], "2023-01-01T00:00:00.000Z");
299
- assertEquals(parsedMessage.level, "ERROR"); // jsonLinesFormatter uses uppercase
300
- assertEquals(parsedMessage.logger, "app.database"); // category becomes logger
301
- assertEquals(
302
- parsedMessage.message,
303
- 'User {"id":123,"name":"John"} failed to connect',
304
- ); // pre-formatted message
305
- assertEquals(parsedMessage.properties.error, "Connection timeout");
306
- assertEquals(parsedMessage.properties.retries, 3);
307
- });
308
-
309
- test("getCloudWatchLogsSink() uses default text formatter when no formatter provided", async () => {
310
- const cwlMock = mockClient(CloudWatchLogsClient);
311
- cwlMock.reset();
312
- cwlMock.on(PutLogEventsCommand).resolves({});
313
-
314
- const sink = getCloudWatchLogsSink({
315
- logGroupName: "/test/log-group",
316
- logStreamName: "test-stream",
317
- batchSize: 1,
318
- flushInterval: 0,
319
- // No formatter specified - should use default
320
- });
321
-
322
- sink(mockLogRecord);
323
- await sink[Symbol.asyncDispose]();
324
-
325
- assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
326
- const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
327
- const logMessage = call.args[0].input.logEvents?.[0].message;
328
-
329
- // Should be plain text, not JSON
330
- assertEquals(logMessage, 'Hello, "world"!');
331
- });
package/src/sink.ts DELETED
@@ -1,167 +0,0 @@
1
- import {
2
- CloudWatchLogsClient,
3
- type InputLogEvent,
4
- PutLogEventsCommand,
5
- } from "@aws-sdk/client-cloudwatch-logs";
6
- import {
7
- getLogger,
8
- type LogRecord,
9
- type Sink,
10
- type TextFormatter,
11
- } from "@logtape/logtape";
12
- import type { CloudWatchLogsSinkOptions } from "./types.ts";
13
-
14
- // AWS CloudWatch Logs PutLogEvents API limits
15
- // See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html
16
- const MAX_BATCH_SIZE_EVENTS = 10000; // Maximum 10,000 events per batch
17
- const MAX_BATCH_SIZE_BYTES = 1048576; // Maximum batch size: 1 MiB (1,048,576 bytes)
18
- const OVERHEAD_PER_EVENT = 26; // AWS overhead per log event: 26 bytes per event
19
-
20
- /**
21
- * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
22
- *
23
- * @param options Configuration options for the CloudWatch Logs sink.
24
- * @returns A sink that sends log records to CloudWatch Logs.
25
- * @since 1.0.0
26
- */
27
- export function getCloudWatchLogsSink(
28
- options: CloudWatchLogsSinkOptions,
29
- ): Sink & AsyncDisposable {
30
- const client = options.client ??
31
- new CloudWatchLogsClient({
32
- region: options.region ?? "us-east-1",
33
- credentials: options.credentials,
34
- });
35
-
36
- const batchSize = Math.min(
37
- Math.max(options.batchSize ?? 1000, 1),
38
- MAX_BATCH_SIZE_EVENTS,
39
- );
40
- const flushInterval = options.flushInterval ?? 1000;
41
- const maxRetries = Math.max(options.maxRetries ?? 3, 0);
42
- const retryDelay = Math.max(options.retryDelay ?? 100, 0);
43
-
44
- // Default formatter that formats message parts into a simple string
45
- const defaultFormatter: TextFormatter = (record) => {
46
- let result = "";
47
- for (let i = 0; i < record.message.length; i++) {
48
- if (i % 2 === 0) {
49
- result += record.message[i];
50
- } else {
51
- result += JSON.stringify(record.message[i]);
52
- }
53
- }
54
- return result;
55
- };
56
-
57
- const formatter = options.formatter ?? defaultFormatter;
58
-
59
- const logEvents: InputLogEvent[] = [];
60
- let currentBatchSize = 0;
61
- let flushTimer: ReturnType<typeof setTimeout> | null = null;
62
- let disposed = false;
63
-
64
- function scheduleFlush(): void {
65
- if (flushInterval <= 0 || flushTimer !== null) return;
66
-
67
- flushTimer = setTimeout(() => {
68
- flushTimer = null;
69
- if (logEvents.length > 0) {
70
- void flushEvents();
71
- }
72
- }, flushInterval);
73
- }
74
-
75
- async function flushEvents(): Promise<void> {
76
- if (logEvents.length === 0 || disposed) return;
77
-
78
- const events = logEvents.splice(0);
79
- currentBatchSize = 0;
80
-
81
- if (flushTimer !== null) {
82
- clearTimeout(flushTimer);
83
- flushTimer = null;
84
- }
85
-
86
- await sendEventsWithRetry(events, maxRetries);
87
- }
88
-
89
- async function sendEventsWithRetry(
90
- events: InputLogEvent[],
91
- remainingRetries: number,
92
- ): Promise<void> {
93
- try {
94
- const command = new PutLogEventsCommand({
95
- logGroupName: options.logGroupName,
96
- logStreamName: options.logStreamName,
97
- logEvents: events,
98
- });
99
-
100
- await client.send(command);
101
- } catch (error) {
102
- if (remainingRetries > 0) {
103
- // Calculate exponential backoff: base, base*2, base*4, etc.
104
- const attemptNumber = maxRetries - remainingRetries;
105
- const delay = retryDelay * Math.pow(2, attemptNumber);
106
- await new Promise((resolve) => setTimeout(resolve, delay));
107
- await sendEventsWithRetry(events, remainingRetries - 1);
108
- } else {
109
- // Log to meta logger to avoid crashing the application
110
- const metaLogger = getLogger(["logtape", "meta", "cloudwatch-logs"]);
111
- metaLogger.error(
112
- "Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}",
113
- { maxRetries, error },
114
- );
115
- }
116
- }
117
- }
118
-
119
- function formatLogMessage(record: LogRecord): string {
120
- return formatter(record);
121
- }
122
-
123
- const sink: Sink & AsyncDisposable = (record: LogRecord) => {
124
- if (disposed) return;
125
-
126
- // Skip meta logger logs to prevent infinite loops
127
- if (
128
- record.category[0] === "logtape" &&
129
- record.category[1] === "meta" &&
130
- record.category[2] === "cloudwatch-logs"
131
- ) {
132
- return;
133
- }
134
-
135
- const message = formatLogMessage(record);
136
- const messageBytes = new TextEncoder().encode(message).length;
137
- const eventSize = messageBytes + OVERHEAD_PER_EVENT;
138
-
139
- const logEvent: InputLogEvent = {
140
- timestamp: record.timestamp,
141
- message,
142
- };
143
-
144
- logEvents.push(logEvent);
145
- currentBatchSize += eventSize;
146
-
147
- const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;
148
- const shouldFlushByCount = logEvents.length >= batchSize;
149
-
150
- if (shouldFlushBySize || shouldFlushByCount) {
151
- void flushEvents();
152
- } else {
153
- scheduleFlush();
154
- }
155
- };
156
-
157
- sink[Symbol.asyncDispose] = async () => {
158
- if (flushTimer !== null) {
159
- clearTimeout(flushTimer);
160
- flushTimer = null;
161
- }
162
- await flushEvents();
163
- disposed = true;
164
- };
165
-
166
- return sink;
167
- }
package/src/types.ts DELETED
@@ -1,78 +0,0 @@
1
- import type { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs";
2
- import type { TextFormatter } from "@logtape/logtape";
3
-
4
- /**
5
- * Options for configuring the CloudWatch Logs sink.
6
- * @since 1.0.0
7
- */
8
- export interface CloudWatchLogsSinkOptions {
9
- /**
10
- * An existing CloudWatch Logs client instance.
11
- * If provided, the client will be used directly and other connection
12
- * options (region, credentials) will be ignored.
13
- */
14
- readonly client?: CloudWatchLogsClient;
15
-
16
- /**
17
- * The name of the log group to send log events to.
18
- */
19
- readonly logGroupName: string;
20
-
21
- /**
22
- * The name of the log stream within the log group.
23
- */
24
- readonly logStreamName: string;
25
-
26
- /**
27
- * The AWS region to use when creating a new client.
28
- * Ignored if `client` is provided.
29
- * @default "us-east-1"
30
- */
31
- readonly region?: string;
32
-
33
- /**
34
- * AWS credentials to use when creating a new client.
35
- * Ignored if `client` is provided.
36
- * If not provided, the AWS SDK will use default credential resolution.
37
- */
38
- readonly credentials?: {
39
- readonly accessKeyId: string;
40
- readonly secretAccessKey: string;
41
- readonly sessionToken?: string;
42
- };
43
-
44
- /**
45
- * Maximum number of log events to batch before sending to CloudWatch.
46
- * Must be between 1 and 10,000.
47
- * @default 1000
48
- */
49
- readonly batchSize?: number;
50
-
51
- /**
52
- * Maximum time in milliseconds to wait before flushing buffered log events.
53
- * Set to 0 or negative to disable time-based flushing.
54
- * @default 1000
55
- */
56
- readonly flushInterval?: number;
57
-
58
- /**
59
- * Maximum number of retry attempts for failed requests.
60
- * @default 3
61
- */
62
- readonly maxRetries?: number;
63
-
64
- /**
65
- * Initial delay in milliseconds for exponential backoff retry strategy.
66
- * @default 100
67
- */
68
- readonly retryDelay?: number;
69
-
70
- /**
71
- * Text formatter to use for formatting log records before sending to CloudWatch Logs.
72
- * If not provided, defaults to a simple text formatter.
73
- * Use `jsonLinesFormatter()` from "@logtape/logtape" for JSON structured logging
74
- * to enable powerful CloudWatch Logs Insights querying capabilities.
75
- * @since 1.0.0
76
- */
77
- readonly formatter?: TextFormatter;
78
- }
package/tsdown.config.ts DELETED
@@ -1,11 +0,0 @@
1
- import { defineConfig } from "tsdown";
2
-
3
- export default defineConfig({
4
- entry: ["src/mod.ts"],
5
- dts: {
6
- sourcemap: true,
7
- },
8
- format: ["esm", "cjs"],
9
- platform: "neutral",
10
- unbundle: true,
11
- });