@logtape/cloudwatch-logs 1.0.0-dev.211

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,20 @@
1
+ MIT License
2
+
3
+ Copyright 2024 Hong Minhee
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
6
+ this software and associated documentation files (the "Software"), to deal in
7
+ the Software without restriction, including without limitation the rights to
8
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9
+ the Software, and to permit persons to whom the Software is furnished to do so,
10
+ subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,132 @@
1
+ @logtape/cloudwatch-logs: LogTape AWS CloudWatch Logs Sink
2
+ ==========================================================
3
+
4
+ [![JSR][JSR badge]][JSR]
5
+ [![npm][npm badge]][npm]
6
+ [![GitHub Actions][GitHub Actions badge]][GitHub Actions]
7
+
8
+ This package provides an [AWS CloudWatch Logs] sink for [LogTape]. It allows you to
9
+ send your LogTape logs directly to AWS CloudWatch Logs with intelligent batching
10
+ and error handling.
11
+
12
+ [JSR]: https://jsr.io/@logtape/cloudwatch-logs
13
+ [JSR badge]: https://jsr.io/badges/@logtape/cloudwatch-logs
14
+ [npm]: https://www.npmjs.com/package/@logtape/cloudwatch-logs
15
+ [npm badge]: https://img.shields.io/npm/v/@logtape/cloudwatch-logs?logo=npm
16
+ [GitHub Actions]: https://github.com/dahlia/logtape/actions/workflows/main.yaml
17
+ [GitHub Actions badge]: https://github.com/dahlia/logtape/actions/workflows/main.yaml/badge.svg
18
+ [AWS CloudWatch Logs]: https://aws.amazon.com/cloudwatch/
19
+ [LogTape]: https://logtape.org/
20
+
21
+
22
+ Installation
23
+ ------------
24
+
25
+ The package is available on [JSR] and [npm].
26
+
27
+ ~~~~ bash
28
+ deno add jsr:@logtape/cloudwatch-logs # for Deno
29
+ npm add @logtape/cloudwatch-logs # for npm
30
+ pnpm add @logtape/cloudwatch-logs # for pnpm
31
+ yarn add @logtape/cloudwatch-logs # for Yarn
32
+ bun add @logtape/cloudwatch-logs # for Bun
33
+ ~~~~
34
+
35
+
36
+ Usage
37
+ -----
38
+
39
+ The quickest way to get started is to use the `getCloudWatchLogsSink()` function
40
+ with your log group and stream names:
41
+
42
+ ~~~~ typescript
43
+ import { configure } from "@logtape/logtape";
44
+ import { getCloudWatchLogsSink } from "@logtape/cloudwatch-logs";
45
+
46
+ await configure({
47
+ sinks: {
48
+ cloudwatch: getCloudWatchLogsSink({
49
+ logGroupName: "/aws/lambda/my-function",
50
+ logStreamName: "my-stream",
51
+ region: "us-east-1",
52
+ }),
53
+ },
54
+ loggers: [
55
+ { category: [], sinks: ["cloudwatch"], lowestLevel: "info" },
56
+ ],
57
+ });
58
+ ~~~~
59
+
60
+ You can also pass an existing CloudWatch Logs client:
61
+
62
+ ~~~~ typescript
63
+ import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs";
64
+ import { configure } from "@logtape/logtape";
65
+ import { getCloudWatchLogsSink } from "@logtape/cloudwatch-logs";
66
+
67
+ const client = new CloudWatchLogsClient({ region: "us-east-1" });
68
+
69
+ await configure({
70
+ sinks: {
71
+ cloudwatch: getCloudWatchLogsSink({
72
+ client,
73
+ logGroupName: "/aws/lambda/my-function",
74
+ logStreamName: "my-stream",
75
+ }),
76
+ },
77
+ loggers: [
78
+ { category: [], sinks: ["cloudwatch"], lowestLevel: "info" },
79
+ ],
80
+ });
81
+ ~~~~
82
+
83
+
84
+ Testing
85
+ -------
86
+
87
+ The package includes both unit tests and integration tests. Unit tests use
88
+ mocked AWS SDK calls and can be run without AWS credentials:
89
+
90
+ ~~~~ bash
91
+ deno task test
92
+ ~~~~
93
+
94
+ Integration tests require real AWS credentials and will create temporary log
95
+ groups and streams in your AWS account:
96
+
97
+ ~~~~ bash
98
+ export AWS_ACCESS_KEY_ID="your-access-key"
99
+ export AWS_SECRET_ACCESS_KEY="your-secret-key"
100
+ export AWS_REGION="us-east-1"
101
+ deno task test
102
+ ~~~~
103
+
104
+ Integration tests will automatically skip if AWS credentials are not provided.
105
+
106
+ > [!WARNING]
107
+ > Integration tests may incur small AWS charges for CloudWatch Logs usage.
108
+ > Test resources are automatically cleaned up after each test.
109
+
110
+
111
+ Required IAM permissions
112
+ ------------------------
113
+
114
+ To use this sink, your AWS credentials need the following CloudWatch Logs
115
+ permissions:
116
+
117
+ ~~~~ json
118
+ {
119
+ "Version": "2012-10-17",
120
+ "Statement": [
121
+ {
122
+ "Effect": "Allow",
123
+ "Action": [
124
+ "logs:CreateLogGroup",
125
+ "logs:CreateLogStream",
126
+ "logs:PutLogEvents"
127
+ ],
128
+ "Resource": "arn:aws:logs:*:*:*"
129
+ }
130
+ ]
131
+ }
132
+ ~~~~
package/deno.json ADDED
@@ -0,0 +1,36 @@
1
+ {
2
+ "name": "@logtape/cloudwatch-logs",
3
+ "version": "1.0.0-dev.211+7d3e5fdb",
4
+ "license": "MIT",
5
+ "exports": "./mod.ts",
6
+ "exclude": [
7
+ "./dist/"
8
+ ],
9
+ "imports": {
10
+ "@aws-sdk/client-cloudwatch-logs": "npm:@aws-sdk/client-cloudwatch-logs@^3.0.0",
11
+ "@dotenvx/dotenvx": "npm:@dotenvx/dotenvx@^1.44.2"
12
+ },
13
+ "tasks": {
14
+ "build": "pnpm build",
15
+ "test": "deno test --allow-read --allow-net --allow-sys --allow-env",
16
+ "test:node": {
17
+ "dependencies": [
18
+ "build"
19
+ ],
20
+ "command": "node --experimental-transform-types --test"
21
+ },
22
+ "test:bun": {
23
+ "dependencies": [
24
+ "build"
25
+ ],
26
+ "command": "bun test --timeout=10000"
27
+ },
28
+ "test-all": {
29
+ "dependencies": [
30
+ "test",
31
+ "test:node",
32
+ "test:bun"
33
+ ]
34
+ }
35
+ }
36
+ }
@@ -0,0 +1,30 @@
1
+ //#region rolldown:runtime
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __copyProps = (to, from, except, desc) => {
9
+ if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
10
+ key = keys[i];
11
+ if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
12
+ get: ((k) => from[k]).bind(null, key),
13
+ enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
14
+ });
15
+ }
16
+ return to;
17
+ };
18
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
19
+ value: mod,
20
+ enumerable: true
21
+ }) : target, mod));
22
+
23
+ //#endregion
24
+
25
+ Object.defineProperty(exports, '__toESM', {
26
+ enumerable: true,
27
+ get: function () {
28
+ return __toESM;
29
+ }
30
+ });
package/dist/mod.cjs ADDED
@@ -0,0 +1,3 @@
1
+ const require_sink = require('./sink.cjs');
2
+
3
+ exports.getCloudWatchLogsSink = require_sink.getCloudWatchLogsSink;
package/dist/mod.d.cts ADDED
@@ -0,0 +1,3 @@
1
+ import { CloudWatchLogsSinkOptions } from "./types.cjs";
2
+ import { getCloudWatchLogsSink } from "./sink.cjs";
3
+ export { CloudWatchLogsSinkOptions, getCloudWatchLogsSink };
package/dist/mod.d.ts ADDED
@@ -0,0 +1,3 @@
1
+ import { CloudWatchLogsSinkOptions } from "./types.js";
2
+ import { getCloudWatchLogsSink } from "./sink.js";
3
+ export { CloudWatchLogsSinkOptions, getCloudWatchLogsSink };
package/dist/mod.js ADDED
@@ -0,0 +1,3 @@
1
+ import { getCloudWatchLogsSink } from "./sink.js";
2
+
3
+ export { getCloudWatchLogsSink };
package/dist/sink.cjs ADDED
@@ -0,0 +1,98 @@
1
+ const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
2
+ const __aws_sdk_client_cloudwatch_logs = require_rolldown_runtime.__toESM(require("@aws-sdk/client-cloudwatch-logs"));
3
+
4
+ //#region sink.ts
5
+ /**
6
+ * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
7
+ *
8
+ * @param options Configuration options for the CloudWatch Logs sink.
9
+ * @returns A sink that sends log records to CloudWatch Logs.
10
+ * @since 1.0.0
11
+ */
12
+ function getCloudWatchLogsSink(options) {
13
+ const client = options.client ?? new __aws_sdk_client_cloudwatch_logs.CloudWatchLogsClient({
14
+ region: options.region ?? "us-east-1",
15
+ credentials: options.credentials
16
+ });
17
+ const batchSize = Math.min(Math.max(options.batchSize ?? 1e3, 1), 1e4);
18
+ const flushInterval = options.flushInterval ?? 1e3;
19
+ const maxRetries = Math.max(options.maxRetries ?? 3, 0);
20
+ const retryDelay = Math.max(options.retryDelay ?? 100, 0);
21
+ const defaultFormatter = (record) => {
22
+ let result = "";
23
+ for (let i = 0; i < record.message.length; i++) if (i % 2 === 0) result += record.message[i];
24
+ else result += JSON.stringify(record.message[i]);
25
+ return result;
26
+ };
27
+ const formatter = options.formatter ?? defaultFormatter;
28
+ const logEvents = [];
29
+ let currentBatchSize = 0;
30
+ let flushTimer = null;
31
+ let disposed = false;
32
+ const OVERHEAD_PER_EVENT = 26;
33
+ const MAX_BATCH_SIZE_BYTES = 1048576;
34
+ function scheduleFlush() {
35
+ if (flushInterval <= 0 || flushTimer !== null) return;
36
+ flushTimer = setTimeout(() => {
37
+ flushTimer = null;
38
+ if (logEvents.length > 0) flushEvents();
39
+ }, flushInterval);
40
+ }
41
+ async function flushEvents() {
42
+ if (logEvents.length === 0 || disposed) return;
43
+ const events = logEvents.splice(0);
44
+ currentBatchSize = 0;
45
+ if (flushTimer !== null) {
46
+ clearTimeout(flushTimer);
47
+ flushTimer = null;
48
+ }
49
+ await sendEventsWithRetry(events, maxRetries);
50
+ }
51
+ async function sendEventsWithRetry(events, remainingRetries) {
52
+ try {
53
+ const command = new __aws_sdk_client_cloudwatch_logs.PutLogEventsCommand({
54
+ logGroupName: options.logGroupName,
55
+ logStreamName: options.logStreamName,
56
+ logEvents: events
57
+ });
58
+ await client.send(command);
59
+ } catch (error) {
60
+ if (remainingRetries > 0) {
61
+ const delay = retryDelay * Math.pow(2, maxRetries - remainingRetries);
62
+ await new Promise((resolve) => setTimeout(resolve, delay));
63
+ await sendEventsWithRetry(events, remainingRetries - 1);
64
+ } else console.error("Failed to send log events to CloudWatch Logs:", error);
65
+ }
66
+ }
67
+ function formatLogMessage(record) {
68
+ return formatter(record);
69
+ }
70
+ const sink = (record) => {
71
+ if (disposed) return;
72
+ const message = formatLogMessage(record);
73
+ const messageBytes = new TextEncoder().encode(message).length;
74
+ const eventSize = messageBytes + OVERHEAD_PER_EVENT;
75
+ const logEvent = {
76
+ timestamp: record.timestamp,
77
+ message
78
+ };
79
+ logEvents.push(logEvent);
80
+ currentBatchSize += eventSize;
81
+ const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;
82
+ const shouldFlushByCount = logEvents.length >= batchSize;
83
+ if (shouldFlushBySize || shouldFlushByCount) flushEvents();
84
+ else scheduleFlush();
85
+ };
86
+ sink[Symbol.asyncDispose] = async () => {
87
+ if (flushTimer !== null) {
88
+ clearTimeout(flushTimer);
89
+ flushTimer = null;
90
+ }
91
+ await flushEvents();
92
+ disposed = true;
93
+ };
94
+ return sink;
95
+ }
96
+
97
+ //#endregion
98
+ exports.getCloudWatchLogsSink = getCloudWatchLogsSink;
@@ -0,0 +1,18 @@
1
+ import { CloudWatchLogsSinkOptions } from "./types.cjs";
2
+ import { Sink } from "@logtape/logtape";
3
+
4
+ //#region sink.d.ts
5
+
6
+ /**
7
+ * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
8
+ *
9
+ * @param options Configuration options for the CloudWatch Logs sink.
10
+ * @returns A sink that sends log records to CloudWatch Logs.
11
+ * @since 1.0.0
12
+ */
13
+ declare function getCloudWatchLogsSink(options: CloudWatchLogsSinkOptions): Sink & AsyncDisposable;
14
+ //# sourceMappingURL=sink.d.ts.map
15
+
16
+ //#endregion
17
+ export { getCloudWatchLogsSink };
18
+ //# sourceMappingURL=sink.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sink.d.cts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;AAeA;;;;;AAEyB,iBAFT,qBAAA,CAES,OAAA,EADd,yBACc,CAAA,EAAtB,IAAsB,GAAf,eAAe"}
package/dist/sink.d.ts ADDED
@@ -0,0 +1,18 @@
1
+ import { CloudWatchLogsSinkOptions } from "./types.js";
2
+ import { Sink } from "@logtape/logtape";
3
+
4
+ //#region sink.d.ts
5
+
6
+ /**
7
+ * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
8
+ *
9
+ * @param options Configuration options for the CloudWatch Logs sink.
10
+ * @returns A sink that sends log records to CloudWatch Logs.
11
+ * @since 1.0.0
12
+ */
13
+ declare function getCloudWatchLogsSink(options: CloudWatchLogsSinkOptions): Sink & AsyncDisposable;
14
+ //# sourceMappingURL=sink.d.ts.map
15
+
16
+ //#endregion
17
+ export { getCloudWatchLogsSink };
18
+ //# sourceMappingURL=sink.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sink.d.ts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;AAeA;;;;;AAEyB,iBAFT,qBAAA,CAES,OAAA,EADd,yBACc,CAAA,EAAtB,IAAsB,GAAf,eAAe"}
package/dist/sink.js ADDED
@@ -0,0 +1,98 @@
1
+ import { CloudWatchLogsClient, PutLogEventsCommand } from "@aws-sdk/client-cloudwatch-logs";
2
+
3
+ //#region sink.ts
4
+ /**
5
+ * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
6
+ *
7
+ * @param options Configuration options for the CloudWatch Logs sink.
8
+ * @returns A sink that sends log records to CloudWatch Logs.
9
+ * @since 1.0.0
10
+ */
11
+ function getCloudWatchLogsSink(options) {
12
+ const client = options.client ?? new CloudWatchLogsClient({
13
+ region: options.region ?? "us-east-1",
14
+ credentials: options.credentials
15
+ });
16
+ const batchSize = Math.min(Math.max(options.batchSize ?? 1e3, 1), 1e4);
17
+ const flushInterval = options.flushInterval ?? 1e3;
18
+ const maxRetries = Math.max(options.maxRetries ?? 3, 0);
19
+ const retryDelay = Math.max(options.retryDelay ?? 100, 0);
20
+ const defaultFormatter = (record) => {
21
+ let result = "";
22
+ for (let i = 0; i < record.message.length; i++) if (i % 2 === 0) result += record.message[i];
23
+ else result += JSON.stringify(record.message[i]);
24
+ return result;
25
+ };
26
+ const formatter = options.formatter ?? defaultFormatter;
27
+ const logEvents = [];
28
+ let currentBatchSize = 0;
29
+ let flushTimer = null;
30
+ let disposed = false;
31
+ const OVERHEAD_PER_EVENT = 26;
32
+ const MAX_BATCH_SIZE_BYTES = 1048576;
33
+ function scheduleFlush() {
34
+ if (flushInterval <= 0 || flushTimer !== null) return;
35
+ flushTimer = setTimeout(() => {
36
+ flushTimer = null;
37
+ if (logEvents.length > 0) flushEvents();
38
+ }, flushInterval);
39
+ }
40
+ async function flushEvents() {
41
+ if (logEvents.length === 0 || disposed) return;
42
+ const events = logEvents.splice(0);
43
+ currentBatchSize = 0;
44
+ if (flushTimer !== null) {
45
+ clearTimeout(flushTimer);
46
+ flushTimer = null;
47
+ }
48
+ await sendEventsWithRetry(events, maxRetries);
49
+ }
50
+ async function sendEventsWithRetry(events, remainingRetries) {
51
+ try {
52
+ const command = new PutLogEventsCommand({
53
+ logGroupName: options.logGroupName,
54
+ logStreamName: options.logStreamName,
55
+ logEvents: events
56
+ });
57
+ await client.send(command);
58
+ } catch (error) {
59
+ if (remainingRetries > 0) {
60
+ const delay = retryDelay * Math.pow(2, maxRetries - remainingRetries);
61
+ await new Promise((resolve) => setTimeout(resolve, delay));
62
+ await sendEventsWithRetry(events, remainingRetries - 1);
63
+ } else console.error("Failed to send log events to CloudWatch Logs:", error);
64
+ }
65
+ }
66
+ function formatLogMessage(record) {
67
+ return formatter(record);
68
+ }
69
+ const sink = (record) => {
70
+ if (disposed) return;
71
+ const message = formatLogMessage(record);
72
+ const messageBytes = new TextEncoder().encode(message).length;
73
+ const eventSize = messageBytes + OVERHEAD_PER_EVENT;
74
+ const logEvent = {
75
+ timestamp: record.timestamp,
76
+ message
77
+ };
78
+ logEvents.push(logEvent);
79
+ currentBatchSize += eventSize;
80
+ const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;
81
+ const shouldFlushByCount = logEvents.length >= batchSize;
82
+ if (shouldFlushBySize || shouldFlushByCount) flushEvents();
83
+ else scheduleFlush();
84
+ };
85
+ sink[Symbol.asyncDispose] = async () => {
86
+ if (flushTimer !== null) {
87
+ clearTimeout(flushTimer);
88
+ flushTimer = null;
89
+ }
90
+ await flushEvents();
91
+ disposed = true;
92
+ };
93
+ return sink;
94
+ }
95
+
96
+ //#endregion
97
+ export { getCloudWatchLogsSink };
98
+ //# sourceMappingURL=sink.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sink.js","names":["options: CloudWatchLogsSinkOptions","defaultFormatter: TextFormatter","logEvents: InputLogEvent[]","flushTimer: ReturnType<typeof setTimeout> | null","events: InputLogEvent[]","remainingRetries: number","record: LogRecord","sink: Sink & AsyncDisposable","logEvent: InputLogEvent"],"sources":["../sink.ts"],"sourcesContent":["import {\n CloudWatchLogsClient,\n type InputLogEvent,\n PutLogEventsCommand,\n} from \"@aws-sdk/client-cloudwatch-logs\";\nimport type { LogRecord, Sink, TextFormatter } from \"@logtape/logtape\";\nimport type { CloudWatchLogsSinkOptions } from \"./types.ts\";\n\n/**\n * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.\n *\n * @param options Configuration options for the CloudWatch Logs sink.\n * @returns A sink that sends log records to CloudWatch Logs.\n * @since 1.0.0\n */\nexport function getCloudWatchLogsSink(\n options: CloudWatchLogsSinkOptions,\n): Sink & AsyncDisposable {\n const client = options.client ??\n new CloudWatchLogsClient({\n region: options.region ?? \"us-east-1\",\n credentials: options.credentials,\n });\n\n const batchSize = Math.min(Math.max(options.batchSize ?? 1000, 1), 10000);\n const flushInterval = options.flushInterval ?? 1000;\n const maxRetries = Math.max(options.maxRetries ?? 3, 0);\n const retryDelay = Math.max(options.retryDelay ?? 100, 0);\n\n // Default formatter that formats message parts into a simple string\n const defaultFormatter: TextFormatter = (record) => {\n let result = \"\";\n for (let i = 0; i < record.message.length; i++) {\n if (i % 2 === 0) {\n result += record.message[i];\n } else {\n result += JSON.stringify(record.message[i]);\n }\n }\n return result;\n };\n\n const formatter = options.formatter ?? defaultFormatter;\n\n const logEvents: InputLogEvent[] = [];\n let currentBatchSize = 0;\n let flushTimer: ReturnType<typeof setTimeout> | null = null;\n let disposed = false;\n\n const OVERHEAD_PER_EVENT = 26; // AWS overhead per log event\n const MAX_BATCH_SIZE_BYTES = 1048576; // 1 MiB\n\n function scheduleFlush(): void {\n if (flushInterval <= 0 || flushTimer !== null) return;\n\n flushTimer = setTimeout(() => {\n flushTimer = null;\n if (logEvents.length > 0) {\n void flushEvents();\n }\n }, flushInterval);\n }\n\n async function flushEvents(): Promise<void> {\n if (logEvents.length === 0 || disposed) return;\n\n const events = logEvents.splice(0);\n currentBatchSize = 0;\n\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n\n await sendEventsWithRetry(events, maxRetries);\n }\n\n async function sendEventsWithRetry(\n events: InputLogEvent[],\n remainingRetries: number,\n ): Promise<void> {\n try {\n const command = new PutLogEventsCommand({\n logGroupName: options.logGroupName,\n logStreamName: options.logStreamName,\n logEvents: events,\n });\n\n await client.send(command);\n } catch (error) {\n if (remainingRetries > 0) {\n const delay = retryDelay * Math.pow(2, maxRetries - remainingRetries);\n await new Promise((resolve) => setTimeout(resolve, delay));\n await sendEventsWithRetry(events, remainingRetries - 1);\n } else {\n console.error(\"Failed to send log events to CloudWatch Logs:\", error);\n }\n }\n }\n\n function formatLogMessage(record: LogRecord): string {\n return formatter(record);\n }\n\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n if (disposed) return;\n\n const message = formatLogMessage(record);\n const messageBytes = new TextEncoder().encode(message).length;\n const eventSize = messageBytes + OVERHEAD_PER_EVENT;\n\n const logEvent: InputLogEvent = {\n timestamp: record.timestamp,\n message,\n };\n\n logEvents.push(logEvent);\n currentBatchSize += eventSize;\n\n const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;\n const shouldFlushByCount = logEvents.length >= batchSize;\n\n if (shouldFlushBySize || shouldFlushByCount) {\n void flushEvents();\n } else {\n scheduleFlush();\n }\n };\n\n sink[Symbol.asyncDispose] = async () => {\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n await flushEvents();\n disposed = true;\n };\n\n return sink;\n}\n"],"mappings":";;;;;;;;;;AAeA,SAAgB,sBACdA,SACwB;CACxB,MAAM,SAAS,QAAQ,UACrB,IAAI,qBAAqB;EACvB,QAAQ,QAAQ,UAAU;EAC1B,aAAa,QAAQ;CACtB;CAEH,MAAM,YAAY,KAAK,IAAI,KAAK,IAAI,QAAQ,aAAa,KAAM,EAAE,EAAE,IAAM;CACzE,MAAM,gBAAgB,QAAQ,iBAAiB;CAC/C,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,GAAG,EAAE;CACvD,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,KAAK,EAAE;CAGzD,MAAMC,mBAAkC,CAAC,WAAW;EAClD,IAAI,SAAS;AACb,OAAK,IAAI,IAAI,GAAG,IAAI,OAAO,QAAQ,QAAQ,IACzC,KAAI,IAAI,MAAM,EACZ,WAAU,OAAO,QAAQ;MAEzB,WAAU,KAAK,UAAU,OAAO,QAAQ,GAAG;AAG/C,SAAO;CACR;CAED,MAAM,YAAY,QAAQ,aAAa;CAEvC,MAAMC,YAA6B,CAAE;CACrC,IAAI,mBAAmB;CACvB,IAAIC,aAAmD;CACvD,IAAI,WAAW;CAEf,MAAM,qBAAqB;CAC3B,MAAM,uBAAuB;CAE7B,SAAS,gBAAsB;AAC7B,MAAI,iBAAiB,KAAK,eAAe,KAAM;AAE/C,eAAa,WAAW,MAAM;AAC5B,gBAAa;AACb,OAAI,UAAU,SAAS,EACrB,CAAK,aAAa;EAErB,GAAE,cAAc;CAClB;CAED,eAAe,cAA6B;AAC1C,MAAI,UAAU,WAAW,KAAK,SAAU;EAExC,MAAM,SAAS,UAAU,OAAO,EAAE;AAClC,qBAAmB;AAEnB,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AAED,QAAM,oBAAoB,QAAQ,WAAW;CAC9C;CAED,eAAe,oBACbC,QACAC,kBACe;AACf,MAAI;GACF,MAAM,UAAU,IAAI,oBAAoB;IACtC,cAAc,QAAQ;IACtB,eAAe,QAAQ;IACvB,WAAW;GACZ;AAED,SAAM,OAAO,KAAK,QAAQ;EAC3B,SAAQ,OAAO;AACd,OAAI,mBAAmB,GAAG;IACxB,MAAM,QAAQ,aAAa,KAAK,IAAI,GAAG,aAAa,iBAAiB;AACrE,UAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,MAAM;AACzD,UAAM,oBAAoB,QAAQ,mBAAmB,EAAE;GACxD,MACC,SAAQ,MAAM,iDAAiD,MAAM;EAExE;CACF;CAED,SAAS,iBAAiBC,QAA2B;AACnD,SAAO,UAAU,OAAO;CACzB;CAED,MAAMC,OAA+B,CAACD,WAAsB;AAC1D,MAAI,SAAU;EAEd,MAAM,UAAU,iBAAiB,OAAO;EACxC,MAAM,eAAe,IAAI,cAAc,OAAO,QAAQ,CAAC;EACvD,MAAM,YAAY,eAAe;EAEjC,MAAME,WAA0B;GAC9B,WAAW,OAAO;GAClB;EACD;AAED,YAAU,KAAK,SAAS;AACxB,sBAAoB;EAEpB,MAAM,oBAAoB,mBAAmB;EAC7C,MAAM,qBAAqB,UAAU,UAAU;AAE/C,MAAI,qBAAqB,mBACvB,CAAK,aAAa;MAElB,gBAAe;CAElB;AAED,MAAK,OAAO,gBAAgB,YAAY;AACtC,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AACD,QAAM,aAAa;AACnB,aAAW;CACZ;AAED,QAAO;AACR"}
@@ -0,0 +1,75 @@
1
+ import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs";
2
+ import { TextFormatter } from "@logtape/logtape";
3
+
4
+ //#region types.d.ts
5
+
6
+ /**
7
+ * Options for configuring the CloudWatch Logs sink.
8
+ * @since 1.0.0
9
+ */
10
+ interface CloudWatchLogsSinkOptions {
11
+ /**
12
+ * An existing CloudWatch Logs client instance.
13
+ * If provided, the client will be used directly and other connection
14
+ * options (region, credentials) will be ignored.
15
+ */
16
+ readonly client?: CloudWatchLogsClient;
17
+ /**
18
+ * The name of the log group to send log events to.
19
+ */
20
+ readonly logGroupName: string;
21
+ /**
22
+ * The name of the log stream within the log group.
23
+ */
24
+ readonly logStreamName: string;
25
+ /**
26
+ * The AWS region to use when creating a new client.
27
+ * Ignored if `client` is provided.
28
+ * @default "us-east-1"
29
+ */
30
+ readonly region?: string;
31
+ /**
32
+ * AWS credentials to use when creating a new client.
33
+ * Ignored if `client` is provided.
34
+ * If not provided, the AWS SDK will use default credential resolution.
35
+ */
36
+ readonly credentials?: {
37
+ readonly accessKeyId: string;
38
+ readonly secretAccessKey: string;
39
+ readonly sessionToken?: string;
40
+ };
41
+ /**
42
+ * Maximum number of log events to batch before sending to CloudWatch.
43
+ * Must be between 1 and 10,000.
44
+ * @default 1000
45
+ */
46
+ readonly batchSize?: number;
47
+ /**
48
+ * Maximum time in milliseconds to wait before flushing buffered log events.
49
+ * Set to 0 or negative to disable time-based flushing.
50
+ * @default 1000
51
+ */
52
+ readonly flushInterval?: number;
53
+ /**
54
+ * Maximum number of retry attempts for failed requests.
55
+ * @default 3
56
+ */
57
+ readonly maxRetries?: number;
58
+ /**
59
+ * Initial delay in milliseconds for exponential backoff retry strategy.
60
+ * @default 100
61
+ */
62
+ readonly retryDelay?: number;
63
+ /**
64
+ * Text formatter to use for formatting log records before sending to CloudWatch Logs.
65
+ * If not provided, defaults to a simple text formatter.
66
+ * Use `jsonLinesFormatter()` from "@logtape/logtape" for JSON structured logging
67
+ * to enable powerful CloudWatch Logs Insights querying capabilities.
68
+ * @since 1.0.0
69
+ */
70
+ readonly formatter?: TextFormatter;
71
+ }
72
+ //# sourceMappingURL=types.d.ts.map
73
+ //#endregion
74
+ export { CloudWatchLogsSinkOptions };
75
+ //# sourceMappingURL=types.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.cts","names":[],"sources":["../types.ts"],"sourcesContent":[],"mappings":";;;;;;;AAOA;;AAMoB,UANH,yBAAA,CAMG;EAAoB;AA+DJ;;;;oBA/DhB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA+DG"}
@@ -0,0 +1,75 @@
1
+ import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs";
2
+ import { TextFormatter } from "@logtape/logtape";
3
+
4
+ //#region types.d.ts
5
+
6
+ /**
7
+ * Options for configuring the CloudWatch Logs sink.
8
+ * @since 1.0.0
9
+ */
10
+ interface CloudWatchLogsSinkOptions {
11
+ /**
12
+ * An existing CloudWatch Logs client instance.
13
+ * If provided, the client will be used directly and other connection
14
+ * options (region, credentials) will be ignored.
15
+ */
16
+ readonly client?: CloudWatchLogsClient;
17
+ /**
18
+ * The name of the log group to send log events to.
19
+ */
20
+ readonly logGroupName: string;
21
+ /**
22
+ * The name of the log stream within the log group.
23
+ */
24
+ readonly logStreamName: string;
25
+ /**
26
+ * The AWS region to use when creating a new client.
27
+ * Ignored if `client` is provided.
28
+ * @default "us-east-1"
29
+ */
30
+ readonly region?: string;
31
+ /**
32
+ * AWS credentials to use when creating a new client.
33
+ * Ignored if `client` is provided.
34
+ * If not provided, the AWS SDK will use default credential resolution.
35
+ */
36
+ readonly credentials?: {
37
+ readonly accessKeyId: string;
38
+ readonly secretAccessKey: string;
39
+ readonly sessionToken?: string;
40
+ };
41
+ /**
42
+ * Maximum number of log events to batch before sending to CloudWatch.
43
+ * Must be between 1 and 10,000.
44
+ * @default 1000
45
+ */
46
+ readonly batchSize?: number;
47
+ /**
48
+ * Maximum time in milliseconds to wait before flushing buffered log events.
49
+ * Set to 0 or negative to disable time-based flushing.
50
+ * @default 1000
51
+ */
52
+ readonly flushInterval?: number;
53
+ /**
54
+ * Maximum number of retry attempts for failed requests.
55
+ * @default 3
56
+ */
57
+ readonly maxRetries?: number;
58
+ /**
59
+ * Initial delay in milliseconds for exponential backoff retry strategy.
60
+ * @default 100
61
+ */
62
+ readonly retryDelay?: number;
63
+ /**
64
+ * Text formatter to use for formatting log records before sending to CloudWatch Logs.
65
+ * If not provided, defaults to a simple text formatter.
66
+ * Use `jsonLinesFormatter()` from "@logtape/logtape" for JSON structured logging
67
+ * to enable powerful CloudWatch Logs Insights querying capabilities.
68
+ * @since 1.0.0
69
+ */
70
+ readonly formatter?: TextFormatter;
71
+ }
72
+ //# sourceMappingURL=types.d.ts.map
73
+ //#endregion
74
+ export { CloudWatchLogsSinkOptions };
75
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","names":[],"sources":["../types.ts"],"sourcesContent":[],"mappings":";;;;;;;AAOA;;AAMoB,UANH,yBAAA,CAMG;EAAoB;AA+DJ;;;;oBA/DhB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA+DG"}
package/mod.ts ADDED
@@ -0,0 +1,2 @@
1
+ export type { CloudWatchLogsSinkOptions } from "./types.ts";
2
+ export { getCloudWatchLogsSink } from "./sink.ts";