@logtape/cloudwatch-logs 1.0.0-dev.211

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/sink.test.ts ADDED
@@ -0,0 +1,331 @@
1
+ import { suite } from "@alinea/suite";
2
+ import {
3
+ CloudWatchLogsClient,
4
+ PutLogEventsCommand,
5
+ } from "@aws-sdk/client-cloudwatch-logs";
6
+ import type { LogRecord } from "@logtape/logtape";
7
+ import { jsonLinesFormatter } from "@logtape/logtape";
8
+ import { assertEquals, assertInstanceOf } from "@std/assert";
9
+ import { mockClient } from "aws-sdk-client-mock";
10
+ import { getCloudWatchLogsSink } from "./sink.ts";
11
+
12
+ const test = suite(import.meta);
13
+
14
+ const mockLogRecord: LogRecord = {
15
+ category: ["test"],
16
+ level: "info",
17
+ message: ["Hello, ", "world", "!"],
18
+ rawMessage: "Hello, {name}!",
19
+ timestamp: Date.now(),
20
+ properties: {},
21
+ };
22
+
23
+ test("getCloudWatchLogsSink() creates a working sink", async () => {
24
+ const cwlMock = mockClient(CloudWatchLogsClient);
25
+ cwlMock.reset();
26
+ cwlMock.on(PutLogEventsCommand).resolves({});
27
+
28
+ const sink = getCloudWatchLogsSink({
29
+ logGroupName: "/test/log-group",
30
+ logStreamName: "test-stream",
31
+ region: "us-east-1",
32
+ batchSize: 1,
33
+ flushInterval: 0,
34
+ });
35
+
36
+ sink(mockLogRecord);
37
+ await sink[Symbol.asyncDispose]();
38
+
39
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
40
+ const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
41
+ assertEquals(call.args[0].input.logGroupName, "/test/log-group");
42
+ assertEquals(call.args[0].input.logStreamName, "test-stream");
43
+ assertEquals(call.args[0].input.logEvents?.length, 1);
44
+ assertEquals(call.args[0].input.logEvents?.[0].message, 'Hello, "world"!');
45
+ });
46
+
47
+ test("getCloudWatchLogsSink() batches multiple log events", async () => {
48
+ const cwlMock = mockClient(CloudWatchLogsClient);
49
+ cwlMock.reset();
50
+ cwlMock.on(PutLogEventsCommand).resolves({});
51
+
52
+ const sink = getCloudWatchLogsSink({
53
+ logGroupName: "/test/log-group",
54
+ logStreamName: "test-stream",
55
+ batchSize: 3,
56
+ flushInterval: 0,
57
+ });
58
+
59
+ sink(mockLogRecord);
60
+ sink(mockLogRecord);
61
+ sink(mockLogRecord);
62
+
63
+ await sink[Symbol.asyncDispose]();
64
+
65
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
66
+ const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
67
+ assertEquals(call.args[0].input.logEvents?.length, 3);
68
+ });
69
+
70
+ test("getCloudWatchLogsSink() flushes when batch size is reached", async () => {
71
+ const cwlMock = mockClient(CloudWatchLogsClient);
72
+ cwlMock.reset();
73
+ cwlMock.on(PutLogEventsCommand).resolves({});
74
+
75
+ const sink = getCloudWatchLogsSink({
76
+ logGroupName: "/test/log-group",
77
+ logStreamName: "test-stream",
78
+ batchSize: 2,
79
+ flushInterval: 0,
80
+ });
81
+
82
+ sink(mockLogRecord);
83
+ sink(mockLogRecord); // Should flush here
84
+ sink(mockLogRecord); // Should be in next batch
85
+
86
+ await sink[Symbol.asyncDispose](); // Should flush remaining
87
+
88
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 2);
89
+ assertEquals(
90
+ cwlMock.commandCalls(PutLogEventsCommand)[0].args[0].input.logEvents
91
+ ?.length,
92
+ 2,
93
+ );
94
+ assertEquals(
95
+ cwlMock.commandCalls(PutLogEventsCommand)[1].args[0].input.logEvents
96
+ ?.length,
97
+ 1,
98
+ );
99
+ });
100
+
101
+ test("getCloudWatchLogsSink() with custom client", async () => {
102
+ const client = new CloudWatchLogsClient({ region: "us-west-2" });
103
+ const cwlMock = mockClient(client);
104
+ cwlMock.on(PutLogEventsCommand).resolves({});
105
+
106
+ const sink = getCloudWatchLogsSink({
107
+ client,
108
+ logGroupName: "/test/log-group",
109
+ logStreamName: "test-stream",
110
+ batchSize: 1,
111
+ flushInterval: 0,
112
+ });
113
+
114
+ sink(mockLogRecord);
115
+ await sink[Symbol.asyncDispose]();
116
+
117
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
118
+ });
119
+
120
+ test("getCloudWatchLogsSink() handles credentials", () => {
121
+ const sink = getCloudWatchLogsSink({
122
+ logGroupName: "/test/log-group",
123
+ logStreamName: "test-stream",
124
+ region: "eu-west-1",
125
+ credentials: {
126
+ accessKeyId: "test-key",
127
+ secretAccessKey: "test-secret",
128
+ },
129
+ });
130
+
131
+ assertInstanceOf(sink, Function);
132
+ assertInstanceOf(sink[Symbol.asyncDispose], Function);
133
+ });
134
+
135
+ test("getCloudWatchLogsSink() handles errors gracefully", async () => {
136
+ const cwlMock = mockClient(CloudWatchLogsClient);
137
+ cwlMock.reset();
138
+ cwlMock.on(PutLogEventsCommand).rejects(new Error("Permanent failure"));
139
+
140
+ const sink = getCloudWatchLogsSink({
141
+ logGroupName: "/test/log-group",
142
+ logStreamName: "test-stream",
143
+ batchSize: 1,
144
+ flushInterval: 0,
145
+ maxRetries: 0, // No retries
146
+ retryDelay: 10,
147
+ });
148
+
149
+ sink(mockLogRecord);
150
+ await sink[Symbol.asyncDispose]();
151
+
152
+ // Should attempt once and fail gracefully
153
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
154
+ });
155
+
156
+ test("getCloudWatchLogsSink() handles large message batches", async () => {
157
+ const cwlMock = mockClient(CloudWatchLogsClient);
158
+ cwlMock.reset();
159
+ cwlMock.on(PutLogEventsCommand).resolves({});
160
+
161
+ // Create a message that will exceed 1MB when combined with overhead
162
+ const largeMessage = "x".repeat(600000); // ~600KB message
163
+ const largeLogRecord: LogRecord = {
164
+ category: ["test"],
165
+ level: "info",
166
+ message: [largeMessage],
167
+ rawMessage: largeMessage,
168
+ timestamp: Date.now(),
169
+ properties: {},
170
+ };
171
+
172
+ const sink = getCloudWatchLogsSink({
173
+ logGroupName: "/test/log-group",
174
+ logStreamName: "test-stream",
175
+ batchSize: 10,
176
+ flushInterval: 0,
177
+ });
178
+
179
+ // Add two large messages - should exceed 1MB limit
180
+ sink(largeLogRecord);
181
+ sink(largeLogRecord);
182
+
183
+ await sink[Symbol.asyncDispose]();
184
+
185
+ const calls = cwlMock.commandCalls(PutLogEventsCommand);
186
+ // Should either flush immediately due to size or flush remaining on dispose
187
+ assertEquals(calls.length >= 1, true);
188
+ });
189
+
190
+ test("getCloudWatchLogsSink() formats complex log messages", async () => {
191
+ const cwlMock = mockClient(CloudWatchLogsClient);
192
+ cwlMock.reset();
193
+ cwlMock.on(PutLogEventsCommand).resolves({});
194
+
195
+ const complexLogRecord: LogRecord = {
196
+ category: ["app", "module"],
197
+ level: "error",
198
+ message: ["User ", { id: 123, name: "John" }, " failed to login"],
199
+ rawMessage: "User {user} failed to login",
200
+ timestamp: Date.now(),
201
+ properties: { error: "Invalid password" },
202
+ };
203
+
204
+ const sink = getCloudWatchLogsSink({
205
+ logGroupName: "/test/log-group",
206
+ logStreamName: "test-stream",
207
+ batchSize: 1,
208
+ flushInterval: 0,
209
+ });
210
+
211
+ sink(complexLogRecord);
212
+ await sink[Symbol.asyncDispose]();
213
+
214
+ const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
215
+ assertEquals(
216
+ call.args[0].input.logEvents?.[0].message,
217
+ 'User {"id":123,"name":"John"} failed to login',
218
+ );
219
+ });
220
+
221
+ test("getCloudWatchLogsSink() respects batch size limits", async () => {
222
+ const cwlMock = mockClient(CloudWatchLogsClient);
223
+ cwlMock.reset();
224
+ cwlMock.on(PutLogEventsCommand).resolves({});
225
+
226
+ const sink = getCloudWatchLogsSink({
227
+ logGroupName: "/test/log-group",
228
+ logStreamName: "test-stream",
229
+ batchSize: 50000, // Should be clamped to 10000
230
+ flushInterval: 0,
231
+ });
232
+
233
+ // Verify the sink works (batch size should be internally limited)
234
+ sink(mockLogRecord);
235
+ await sink[Symbol.asyncDispose]();
236
+
237
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
238
+ });
239
+
240
+ test("getCloudWatchLogsSink() flushes remaining events on disposal", async () => {
241
+ const cwlMock = mockClient(CloudWatchLogsClient);
242
+ cwlMock.reset();
243
+ cwlMock.on(PutLogEventsCommand).resolves({});
244
+
245
+ const sink = getCloudWatchLogsSink({
246
+ logGroupName: "/test/log-group",
247
+ logStreamName: "test-stream",
248
+ batchSize: 10,
249
+ flushInterval: 0,
250
+ });
251
+
252
+ sink(mockLogRecord);
253
+ sink(mockLogRecord);
254
+
255
+ await sink[Symbol.asyncDispose]();
256
+
257
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
258
+ assertEquals(
259
+ cwlMock.commandCalls(PutLogEventsCommand)[0].args[0].input.logEvents
260
+ ?.length,
261
+ 2,
262
+ );
263
+ });
264
+
265
+ test("getCloudWatchLogsSink() supports JSON Lines formatter", async () => {
266
+ const cwlMock = mockClient(CloudWatchLogsClient);
267
+ cwlMock.reset();
268
+ cwlMock.on(PutLogEventsCommand).resolves({});
269
+
270
+ const sink = getCloudWatchLogsSink({
271
+ logGroupName: "/test/log-group",
272
+ logStreamName: "test-stream",
273
+ batchSize: 1,
274
+ flushInterval: 0,
275
+ formatter: jsonLinesFormatter,
276
+ });
277
+
278
+ const structuredLogRecord: LogRecord = {
279
+ category: ["app", "database"],
280
+ level: "error",
281
+ message: ["User ", { id: 123, name: "John" }, " failed to connect"],
282
+ rawMessage: "User {user} failed to connect",
283
+ timestamp: 1672531200000, // Fixed timestamp for testing
284
+ properties: { error: "Connection timeout", retries: 3 },
285
+ };
286
+
287
+ sink(structuredLogRecord);
288
+ await sink[Symbol.asyncDispose]();
289
+
290
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
291
+ const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
292
+ const logMessage = call.args[0].input.logEvents?.[0].message;
293
+
294
+ // Parse the JSON message to verify structure
295
+ const parsedMessage = JSON.parse(logMessage!);
296
+
297
+ // Check what fields are actually present in jsonLinesFormatter output
298
+ assertEquals(parsedMessage["@timestamp"], "2023-01-01T00:00:00.000Z");
299
+ assertEquals(parsedMessage.level, "ERROR"); // jsonLinesFormatter uses uppercase
300
+ assertEquals(parsedMessage.logger, "app.database"); // category becomes logger
301
+ assertEquals(
302
+ parsedMessage.message,
303
+ 'User {"id":123,"name":"John"} failed to connect',
304
+ ); // pre-formatted message
305
+ assertEquals(parsedMessage.properties.error, "Connection timeout");
306
+ assertEquals(parsedMessage.properties.retries, 3);
307
+ });
308
+
309
+ test("getCloudWatchLogsSink() uses default text formatter when no formatter provided", async () => {
310
+ const cwlMock = mockClient(CloudWatchLogsClient);
311
+ cwlMock.reset();
312
+ cwlMock.on(PutLogEventsCommand).resolves({});
313
+
314
+ const sink = getCloudWatchLogsSink({
315
+ logGroupName: "/test/log-group",
316
+ logStreamName: "test-stream",
317
+ batchSize: 1,
318
+ flushInterval: 0,
319
+ // No formatter specified - should use default
320
+ });
321
+
322
+ sink(mockLogRecord);
323
+ await sink[Symbol.asyncDispose]();
324
+
325
+ assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
326
+ const call = cwlMock.commandCalls(PutLogEventsCommand)[0];
327
+ const logMessage = call.args[0].input.logEvents?.[0].message;
328
+
329
+ // Should be plain text, not JSON
330
+ assertEquals(logMessage, 'Hello, "world"!');
331
+ });
package/sink.ts ADDED
@@ -0,0 +1,140 @@
1
+ import {
2
+ CloudWatchLogsClient,
3
+ type InputLogEvent,
4
+ PutLogEventsCommand,
5
+ } from "@aws-sdk/client-cloudwatch-logs";
6
+ import type { LogRecord, Sink, TextFormatter } from "@logtape/logtape";
7
+ import type { CloudWatchLogsSinkOptions } from "./types.ts";
8
+
9
+ /**
10
+ * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
11
+ *
12
+ * @param options Configuration options for the CloudWatch Logs sink.
13
+ * @returns A sink that sends log records to CloudWatch Logs.
14
+ * @since 1.0.0
15
+ */
16
+ export function getCloudWatchLogsSink(
17
+ options: CloudWatchLogsSinkOptions,
18
+ ): Sink & AsyncDisposable {
19
+ const client = options.client ??
20
+ new CloudWatchLogsClient({
21
+ region: options.region ?? "us-east-1",
22
+ credentials: options.credentials,
23
+ });
24
+
25
+ const batchSize = Math.min(Math.max(options.batchSize ?? 1000, 1), 10000);
26
+ const flushInterval = options.flushInterval ?? 1000;
27
+ const maxRetries = Math.max(options.maxRetries ?? 3, 0);
28
+ const retryDelay = Math.max(options.retryDelay ?? 100, 0);
29
+
30
+ // Default formatter that formats message parts into a simple string
31
+ const defaultFormatter: TextFormatter = (record) => {
32
+ let result = "";
33
+ for (let i = 0; i < record.message.length; i++) {
34
+ if (i % 2 === 0) {
35
+ result += record.message[i];
36
+ } else {
37
+ result += JSON.stringify(record.message[i]);
38
+ }
39
+ }
40
+ return result;
41
+ };
42
+
43
+ const formatter = options.formatter ?? defaultFormatter;
44
+
45
+ const logEvents: InputLogEvent[] = [];
46
+ let currentBatchSize = 0;
47
+ let flushTimer: ReturnType<typeof setTimeout> | null = null;
48
+ let disposed = false;
49
+
50
+ const OVERHEAD_PER_EVENT = 26; // AWS overhead per log event
51
+ const MAX_BATCH_SIZE_BYTES = 1048576; // 1 MiB
52
+
53
+ function scheduleFlush(): void {
54
+ if (flushInterval <= 0 || flushTimer !== null) return;
55
+
56
+ flushTimer = setTimeout(() => {
57
+ flushTimer = null;
58
+ if (logEvents.length > 0) {
59
+ void flushEvents();
60
+ }
61
+ }, flushInterval);
62
+ }
63
+
64
+ async function flushEvents(): Promise<void> {
65
+ if (logEvents.length === 0 || disposed) return;
66
+
67
+ const events = logEvents.splice(0);
68
+ currentBatchSize = 0;
69
+
70
+ if (flushTimer !== null) {
71
+ clearTimeout(flushTimer);
72
+ flushTimer = null;
73
+ }
74
+
75
+ await sendEventsWithRetry(events, maxRetries);
76
+ }
77
+
78
+ async function sendEventsWithRetry(
79
+ events: InputLogEvent[],
80
+ remainingRetries: number,
81
+ ): Promise<void> {
82
+ try {
83
+ const command = new PutLogEventsCommand({
84
+ logGroupName: options.logGroupName,
85
+ logStreamName: options.logStreamName,
86
+ logEvents: events,
87
+ });
88
+
89
+ await client.send(command);
90
+ } catch (error) {
91
+ if (remainingRetries > 0) {
92
+ const delay = retryDelay * Math.pow(2, maxRetries - remainingRetries);
93
+ await new Promise((resolve) => setTimeout(resolve, delay));
94
+ await sendEventsWithRetry(events, remainingRetries - 1);
95
+ } else {
96
+ console.error("Failed to send log events to CloudWatch Logs:", error);
97
+ }
98
+ }
99
+ }
100
+
101
+ function formatLogMessage(record: LogRecord): string {
102
+ return formatter(record);
103
+ }
104
+
105
+ const sink: Sink & AsyncDisposable = (record: LogRecord) => {
106
+ if (disposed) return;
107
+
108
+ const message = formatLogMessage(record);
109
+ const messageBytes = new TextEncoder().encode(message).length;
110
+ const eventSize = messageBytes + OVERHEAD_PER_EVENT;
111
+
112
+ const logEvent: InputLogEvent = {
113
+ timestamp: record.timestamp,
114
+ message,
115
+ };
116
+
117
+ logEvents.push(logEvent);
118
+ currentBatchSize += eventSize;
119
+
120
+ const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;
121
+ const shouldFlushByCount = logEvents.length >= batchSize;
122
+
123
+ if (shouldFlushBySize || shouldFlushByCount) {
124
+ void flushEvents();
125
+ } else {
126
+ scheduleFlush();
127
+ }
128
+ };
129
+
130
+ sink[Symbol.asyncDispose] = async () => {
131
+ if (flushTimer !== null) {
132
+ clearTimeout(flushTimer);
133
+ flushTimer = null;
134
+ }
135
+ await flushEvents();
136
+ disposed = true;
137
+ };
138
+
139
+ return sink;
140
+ }
@@ -0,0 +1,11 @@
1
+ import { defineConfig } from "tsdown";
2
+
3
+ export default defineConfig({
4
+ entry: ["mod.ts"],
5
+ dts: {
6
+ sourcemap: true,
7
+ },
8
+ format: ["esm", "cjs"],
9
+ platform: "neutral",
10
+ unbundle: true,
11
+ });
package/types.ts ADDED
@@ -0,0 +1,78 @@
1
+ import type { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs";
2
+ import type { TextFormatter } from "@logtape/logtape";
3
+
4
+ /**
5
+ * Options for configuring the CloudWatch Logs sink.
6
+ * @since 1.0.0
7
+ */
8
+ export interface CloudWatchLogsSinkOptions {
9
+ /**
10
+ * An existing CloudWatch Logs client instance.
11
+ * If provided, the client will be used directly and other connection
12
+ * options (region, credentials) will be ignored.
13
+ */
14
+ readonly client?: CloudWatchLogsClient;
15
+
16
+ /**
17
+ * The name of the log group to send log events to.
18
+ */
19
+ readonly logGroupName: string;
20
+
21
+ /**
22
+ * The name of the log stream within the log group.
23
+ */
24
+ readonly logStreamName: string;
25
+
26
+ /**
27
+ * The AWS region to use when creating a new client.
28
+ * Ignored if `client` is provided.
29
+ * @default "us-east-1"
30
+ */
31
+ readonly region?: string;
32
+
33
+ /**
34
+ * AWS credentials to use when creating a new client.
35
+ * Ignored if `client` is provided.
36
+ * If not provided, the AWS SDK will use default credential resolution.
37
+ */
38
+ readonly credentials?: {
39
+ readonly accessKeyId: string;
40
+ readonly secretAccessKey: string;
41
+ readonly sessionToken?: string;
42
+ };
43
+
44
+ /**
45
+ * Maximum number of log events to batch before sending to CloudWatch.
46
+ * Must be between 1 and 10,000.
47
+ * @default 1000
48
+ */
49
+ readonly batchSize?: number;
50
+
51
+ /**
52
+ * Maximum time in milliseconds to wait before flushing buffered log events.
53
+ * Set to 0 or negative to disable time-based flushing.
54
+ * @default 1000
55
+ */
56
+ readonly flushInterval?: number;
57
+
58
+ /**
59
+ * Maximum number of retry attempts for failed requests.
60
+ * @default 3
61
+ */
62
+ readonly maxRetries?: number;
63
+
64
+ /**
65
+ * Initial delay in milliseconds for exponential backoff retry strategy.
66
+ * @default 100
67
+ */
68
+ readonly retryDelay?: number;
69
+
70
+ /**
71
+ * Text formatter to use for formatting log records before sending to CloudWatch Logs.
72
+ * If not provided, defaults to a simple text formatter.
73
+ * Use `jsonLinesFormatter()` from "@logtape/logtape" for JSON structured logging
74
+ * to enable powerful CloudWatch Logs Insights querying capabilities.
75
+ * @since 1.0.0
76
+ */
77
+ readonly formatter?: TextFormatter;
78
+ }