@logtape/cloudwatch-logs 1.0.0-dev.214 → 1.0.0-dev.218
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/deno.json +1 -1
- package/dist/sink.cjs +19 -5
- package/dist/sink.d.cts.map +1 -1
- package/dist/sink.d.ts.map +1 -1
- package/dist/sink.js +19 -5
- package/dist/sink.js.map +1 -1
- package/package.json +3 -3
- package/sink.integration.test.ts +2 -1
- package/sink.ts +34 -7
package/deno.json
CHANGED
package/dist/sink.cjs
CHANGED
|
@@ -1,7 +1,11 @@
|
|
|
1
1
|
const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
|
|
2
2
|
const __aws_sdk_client_cloudwatch_logs = require_rolldown_runtime.__toESM(require("@aws-sdk/client-cloudwatch-logs"));
|
|
3
|
+
const __logtape_logtape = require_rolldown_runtime.__toESM(require("@logtape/logtape"));
|
|
3
4
|
|
|
4
5
|
//#region sink.ts
|
|
6
|
+
const MAX_BATCH_SIZE_EVENTS = 1e4;
|
|
7
|
+
const MAX_BATCH_SIZE_BYTES = 1048576;
|
|
8
|
+
const OVERHEAD_PER_EVENT = 26;
|
|
5
9
|
/**
|
|
6
10
|
* Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
|
|
7
11
|
*
|
|
@@ -14,7 +18,7 @@ function getCloudWatchLogsSink(options) {
|
|
|
14
18
|
region: options.region ?? "us-east-1",
|
|
15
19
|
credentials: options.credentials
|
|
16
20
|
});
|
|
17
|
-
const batchSize = Math.min(Math.max(options.batchSize ?? 1e3, 1),
|
|
21
|
+
const batchSize = Math.min(Math.max(options.batchSize ?? 1e3, 1), MAX_BATCH_SIZE_EVENTS);
|
|
18
22
|
const flushInterval = options.flushInterval ?? 1e3;
|
|
19
23
|
const maxRetries = Math.max(options.maxRetries ?? 3, 0);
|
|
20
24
|
const retryDelay = Math.max(options.retryDelay ?? 100, 0);
|
|
@@ -29,8 +33,6 @@ function getCloudWatchLogsSink(options) {
|
|
|
29
33
|
let currentBatchSize = 0;
|
|
30
34
|
let flushTimer = null;
|
|
31
35
|
let disposed = false;
|
|
32
|
-
const OVERHEAD_PER_EVENT = 26;
|
|
33
|
-
const MAX_BATCH_SIZE_BYTES = 1048576;
|
|
34
36
|
function scheduleFlush() {
|
|
35
37
|
if (flushInterval <= 0 || flushTimer !== null) return;
|
|
36
38
|
flushTimer = setTimeout(() => {
|
|
@@ -58,10 +60,21 @@ function getCloudWatchLogsSink(options) {
|
|
|
58
60
|
await client.send(command);
|
|
59
61
|
} catch (error) {
|
|
60
62
|
if (remainingRetries > 0) {
|
|
61
|
-
const
|
|
63
|
+
const attemptNumber = maxRetries - remainingRetries;
|
|
64
|
+
const delay = retryDelay * Math.pow(2, attemptNumber);
|
|
62
65
|
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
63
66
|
await sendEventsWithRetry(events, remainingRetries - 1);
|
|
64
|
-
} else
|
|
67
|
+
} else {
|
|
68
|
+
const metaLogger = (0, __logtape_logtape.getLogger)([
|
|
69
|
+
"logtape",
|
|
70
|
+
"meta",
|
|
71
|
+
"cloudwatch-logs"
|
|
72
|
+
]);
|
|
73
|
+
metaLogger.error("Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}", {
|
|
74
|
+
maxRetries,
|
|
75
|
+
error
|
|
76
|
+
});
|
|
77
|
+
}
|
|
65
78
|
}
|
|
66
79
|
}
|
|
67
80
|
function formatLogMessage(record) {
|
|
@@ -69,6 +82,7 @@ function getCloudWatchLogsSink(options) {
|
|
|
69
82
|
}
|
|
70
83
|
const sink = (record) => {
|
|
71
84
|
if (disposed) return;
|
|
85
|
+
if (record.category[0] === "logtape" && record.category[1] === "meta" && record.category[2] === "cloudwatch-logs") return;
|
|
72
86
|
const message = formatLogMessage(record);
|
|
73
87
|
const messageBytes = new TextEncoder().encode(message).length;
|
|
74
88
|
const eventSize = messageBytes + OVERHEAD_PER_EVENT;
|
package/dist/sink.d.cts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.d.cts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;
|
|
1
|
+
{"version":3,"file":"sink.d.cts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;AA0BA;;;;;AAEyB,iBAFT,qBAAA,CAES,OAAA,EADd,yBACc,CAAA,EAAtB,IAAsB,GAAf,eAAe"}
|
package/dist/sink.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.d.ts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;
|
|
1
|
+
{"version":3,"file":"sink.d.ts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;AA0BA;;;;;AAEyB,iBAFT,qBAAA,CAES,OAAA,EADd,yBACc,CAAA,EAAtB,IAAsB,GAAf,eAAe"}
|
package/dist/sink.js
CHANGED
|
@@ -1,6 +1,10 @@
|
|
|
1
1
|
import { CloudWatchLogsClient, PutLogEventsCommand } from "@aws-sdk/client-cloudwatch-logs";
|
|
2
|
+
import { getLogger } from "@logtape/logtape";
|
|
2
3
|
|
|
3
4
|
//#region sink.ts
|
|
5
|
+
const MAX_BATCH_SIZE_EVENTS = 1e4;
|
|
6
|
+
const MAX_BATCH_SIZE_BYTES = 1048576;
|
|
7
|
+
const OVERHEAD_PER_EVENT = 26;
|
|
4
8
|
/**
|
|
5
9
|
* Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
|
|
6
10
|
*
|
|
@@ -13,7 +17,7 @@ function getCloudWatchLogsSink(options) {
|
|
|
13
17
|
region: options.region ?? "us-east-1",
|
|
14
18
|
credentials: options.credentials
|
|
15
19
|
});
|
|
16
|
-
const batchSize = Math.min(Math.max(options.batchSize ?? 1e3, 1),
|
|
20
|
+
const batchSize = Math.min(Math.max(options.batchSize ?? 1e3, 1), MAX_BATCH_SIZE_EVENTS);
|
|
17
21
|
const flushInterval = options.flushInterval ?? 1e3;
|
|
18
22
|
const maxRetries = Math.max(options.maxRetries ?? 3, 0);
|
|
19
23
|
const retryDelay = Math.max(options.retryDelay ?? 100, 0);
|
|
@@ -28,8 +32,6 @@ function getCloudWatchLogsSink(options) {
|
|
|
28
32
|
let currentBatchSize = 0;
|
|
29
33
|
let flushTimer = null;
|
|
30
34
|
let disposed = false;
|
|
31
|
-
const OVERHEAD_PER_EVENT = 26;
|
|
32
|
-
const MAX_BATCH_SIZE_BYTES = 1048576;
|
|
33
35
|
function scheduleFlush() {
|
|
34
36
|
if (flushInterval <= 0 || flushTimer !== null) return;
|
|
35
37
|
flushTimer = setTimeout(() => {
|
|
@@ -57,10 +59,21 @@ function getCloudWatchLogsSink(options) {
|
|
|
57
59
|
await client.send(command);
|
|
58
60
|
} catch (error) {
|
|
59
61
|
if (remainingRetries > 0) {
|
|
60
|
-
const
|
|
62
|
+
const attemptNumber = maxRetries - remainingRetries;
|
|
63
|
+
const delay = retryDelay * Math.pow(2, attemptNumber);
|
|
61
64
|
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
62
65
|
await sendEventsWithRetry(events, remainingRetries - 1);
|
|
63
|
-
} else
|
|
66
|
+
} else {
|
|
67
|
+
const metaLogger = getLogger([
|
|
68
|
+
"logtape",
|
|
69
|
+
"meta",
|
|
70
|
+
"cloudwatch-logs"
|
|
71
|
+
]);
|
|
72
|
+
metaLogger.error("Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}", {
|
|
73
|
+
maxRetries,
|
|
74
|
+
error
|
|
75
|
+
});
|
|
76
|
+
}
|
|
64
77
|
}
|
|
65
78
|
}
|
|
66
79
|
function formatLogMessage(record) {
|
|
@@ -68,6 +81,7 @@ function getCloudWatchLogsSink(options) {
|
|
|
68
81
|
}
|
|
69
82
|
const sink = (record) => {
|
|
70
83
|
if (disposed) return;
|
|
84
|
+
if (record.category[0] === "logtape" && record.category[1] === "meta" && record.category[2] === "cloudwatch-logs") return;
|
|
71
85
|
const message = formatLogMessage(record);
|
|
72
86
|
const messageBytes = new TextEncoder().encode(message).length;
|
|
73
87
|
const eventSize = messageBytes + OVERHEAD_PER_EVENT;
|
package/dist/sink.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.js","names":["options: CloudWatchLogsSinkOptions","defaultFormatter: TextFormatter","logEvents: InputLogEvent[]","flushTimer: ReturnType<typeof setTimeout> | null","events: InputLogEvent[]","remainingRetries: number","record: LogRecord","sink: Sink & AsyncDisposable","logEvent: InputLogEvent"],"sources":["../sink.ts"],"sourcesContent":["import {\n CloudWatchLogsClient,\n type InputLogEvent,\n PutLogEventsCommand,\n} from \"@aws-sdk/client-cloudwatch-logs\";\nimport type
|
|
1
|
+
{"version":3,"file":"sink.js","names":["options: CloudWatchLogsSinkOptions","defaultFormatter: TextFormatter","logEvents: InputLogEvent[]","flushTimer: ReturnType<typeof setTimeout> | null","events: InputLogEvent[]","remainingRetries: number","record: LogRecord","sink: Sink & AsyncDisposable","logEvent: InputLogEvent"],"sources":["../sink.ts"],"sourcesContent":["import {\n CloudWatchLogsClient,\n type InputLogEvent,\n PutLogEventsCommand,\n} from \"@aws-sdk/client-cloudwatch-logs\";\nimport {\n getLogger,\n type LogRecord,\n type Sink,\n type TextFormatter,\n} from \"@logtape/logtape\";\nimport type { CloudWatchLogsSinkOptions } from \"./types.ts\";\n\n// AWS CloudWatch Logs PutLogEvents API limits\n// See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html\nconst MAX_BATCH_SIZE_EVENTS = 10000; // Maximum 10,000 events per batch\nconst MAX_BATCH_SIZE_BYTES = 1048576; // Maximum batch size: 1 MiB (1,048,576 bytes)\nconst OVERHEAD_PER_EVENT = 26; // AWS overhead per log event: 26 bytes per event\n\n/**\n * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.\n *\n * @param options Configuration options for the CloudWatch Logs sink.\n * @returns A sink that sends log records to CloudWatch Logs.\n * @since 1.0.0\n */\nexport function getCloudWatchLogsSink(\n options: CloudWatchLogsSinkOptions,\n): Sink & AsyncDisposable {\n const client = options.client ??\n new CloudWatchLogsClient({\n region: options.region ?? \"us-east-1\",\n credentials: options.credentials,\n });\n\n const batchSize = Math.min(\n Math.max(options.batchSize ?? 1000, 1),\n MAX_BATCH_SIZE_EVENTS,\n );\n const flushInterval = options.flushInterval ?? 1000;\n const maxRetries = Math.max(options.maxRetries ?? 3, 0);\n const retryDelay = Math.max(options.retryDelay ?? 100, 0);\n\n // Default formatter that formats message parts into a simple string\n const defaultFormatter: TextFormatter = (record) => {\n let result = \"\";\n for (let i = 0; i < record.message.length; i++) {\n if (i % 2 === 0) {\n result += record.message[i];\n } else {\n result += JSON.stringify(record.message[i]);\n }\n }\n return result;\n };\n\n const formatter = options.formatter ?? defaultFormatter;\n\n const logEvents: InputLogEvent[] = [];\n let currentBatchSize = 0;\n let flushTimer: ReturnType<typeof setTimeout> | null = null;\n let disposed = false;\n\n function scheduleFlush(): void {\n if (flushInterval <= 0 || flushTimer !== null) return;\n\n flushTimer = setTimeout(() => {\n flushTimer = null;\n if (logEvents.length > 0) {\n void flushEvents();\n }\n }, flushInterval);\n }\n\n async function flushEvents(): Promise<void> {\n if (logEvents.length === 0 || disposed) return;\n\n const events = logEvents.splice(0);\n currentBatchSize = 0;\n\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n\n await sendEventsWithRetry(events, maxRetries);\n }\n\n async function sendEventsWithRetry(\n events: InputLogEvent[],\n remainingRetries: number,\n ): Promise<void> {\n try {\n const command = new PutLogEventsCommand({\n logGroupName: options.logGroupName,\n logStreamName: options.logStreamName,\n logEvents: events,\n });\n\n await client.send(command);\n } catch (error) {\n if (remainingRetries > 0) {\n // Calculate exponential backoff: base, base*2, base*4, etc.\n const attemptNumber = maxRetries - remainingRetries;\n const delay = retryDelay * Math.pow(2, attemptNumber);\n await new Promise((resolve) => setTimeout(resolve, delay));\n await sendEventsWithRetry(events, remainingRetries - 1);\n } else {\n // Log to meta logger to avoid crashing the application\n const metaLogger = getLogger([\"logtape\", \"meta\", \"cloudwatch-logs\"]);\n metaLogger.error(\n \"Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}\",\n { maxRetries, error },\n );\n }\n }\n }\n\n function formatLogMessage(record: LogRecord): string {\n return formatter(record);\n }\n\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Skip meta logger logs to prevent infinite loops\n if (\n record.category[0] === \"logtape\" &&\n record.category[1] === \"meta\" &&\n record.category[2] === \"cloudwatch-logs\"\n ) {\n return;\n }\n\n const message = formatLogMessage(record);\n const messageBytes = new TextEncoder().encode(message).length;\n const eventSize = messageBytes + OVERHEAD_PER_EVENT;\n\n const logEvent: InputLogEvent = {\n timestamp: record.timestamp,\n message,\n };\n\n logEvents.push(logEvent);\n currentBatchSize += eventSize;\n\n const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;\n const shouldFlushByCount = logEvents.length >= batchSize;\n\n if (shouldFlushBySize || shouldFlushByCount) {\n void flushEvents();\n } else {\n scheduleFlush();\n }\n };\n\n sink[Symbol.asyncDispose] = async () => {\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n await flushEvents();\n disposed = true;\n };\n\n return sink;\n}\n"],"mappings":";;;;AAeA,MAAM,wBAAwB;AAC9B,MAAM,uBAAuB;AAC7B,MAAM,qBAAqB;;;;;;;;AAS3B,SAAgB,sBACdA,SACwB;CACxB,MAAM,SAAS,QAAQ,UACrB,IAAI,qBAAqB;EACvB,QAAQ,QAAQ,UAAU;EAC1B,aAAa,QAAQ;CACtB;CAEH,MAAM,YAAY,KAAK,IACrB,KAAK,IAAI,QAAQ,aAAa,KAAM,EAAE,EACtC,sBACD;CACD,MAAM,gBAAgB,QAAQ,iBAAiB;CAC/C,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,GAAG,EAAE;CACvD,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,KAAK,EAAE;CAGzD,MAAMC,mBAAkC,CAAC,WAAW;EAClD,IAAI,SAAS;AACb,OAAK,IAAI,IAAI,GAAG,IAAI,OAAO,QAAQ,QAAQ,IACzC,KAAI,IAAI,MAAM,EACZ,WAAU,OAAO,QAAQ;MAEzB,WAAU,KAAK,UAAU,OAAO,QAAQ,GAAG;AAG/C,SAAO;CACR;CAED,MAAM,YAAY,QAAQ,aAAa;CAEvC,MAAMC,YAA6B,CAAE;CACrC,IAAI,mBAAmB;CACvB,IAAIC,aAAmD;CACvD,IAAI,WAAW;CAEf,SAAS,gBAAsB;AAC7B,MAAI,iBAAiB,KAAK,eAAe,KAAM;AAE/C,eAAa,WAAW,MAAM;AAC5B,gBAAa;AACb,OAAI,UAAU,SAAS,EACrB,CAAK,aAAa;EAErB,GAAE,cAAc;CAClB;CAED,eAAe,cAA6B;AAC1C,MAAI,UAAU,WAAW,KAAK,SAAU;EAExC,MAAM,SAAS,UAAU,OAAO,EAAE;AAClC,qBAAmB;AAEnB,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AAED,QAAM,oBAAoB,QAAQ,WAAW;CAC9C;CAED,eAAe,oBACbC,QACAC,kBACe;AACf,MAAI;GACF,MAAM,UAAU,IAAI,oBAAoB;IACtC,cAAc,QAAQ;IACtB,eAAe,QAAQ;IACvB,WAAW;GACZ;AAED,SAAM,OAAO,KAAK,QAAQ;EAC3B,SAAQ,OAAO;AACd,OAAI,mBAAmB,GAAG;IAExB,MAAM,gBAAgB,aAAa;IACnC,MAAM,QAAQ,aAAa,KAAK,IAAI,GAAG,cAAc;AACrD,UAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,MAAM;AACzD,UAAM,oBAAoB,QAAQ,mBAAmB,EAAE;GACxD,OAAM;IAEL,MAAM,aAAa,UAAU;KAAC;KAAW;KAAQ;IAAkB,EAAC;AACpE,eAAW,MACT,oFACA;KAAE;KAAY;IAAO,EACtB;GACF;EACF;CACF;CAED,SAAS,iBAAiBC,QAA2B;AACnD,SAAO,UAAU,OAAO;CACzB;CAED,MAAMC,OAA+B,CAACD,WAAsB;AAC1D,MAAI,SAAU;AAGd,MACE,OAAO,SAAS,OAAO,aACvB,OAAO,SAAS,OAAO,UACvB,OAAO,SAAS,OAAO,kBAEvB;EAGF,MAAM,UAAU,iBAAiB,OAAO;EACxC,MAAM,eAAe,IAAI,cAAc,OAAO,QAAQ,CAAC;EACvD,MAAM,YAAY,eAAe;EAEjC,MAAME,WAA0B;GAC9B,WAAW,OAAO;GAClB;EACD;AAED,YAAU,KAAK,SAAS;AACxB,sBAAoB;EAEpB,MAAM,oBAAoB,mBAAmB;EAC7C,MAAM,qBAAqB,UAAU,UAAU;AAE/C,MAAI,qBAAqB,mBACvB,CAAK,aAAa;MAElB,gBAAe;CAElB;AAED,MAAK,OAAO,gBAAgB,YAAY;AACtC,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AACD,QAAM,aAAa;AACnB,aAAW;CACZ;AAED,QAAO;AACR"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@logtape/cloudwatch-logs",
|
|
3
|
-
"version": "1.0.0-dev.
|
|
3
|
+
"version": "1.0.0-dev.218+eebe1baa",
|
|
4
4
|
"description": "AWS CloudWatch Logs sink for LogTape",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"logging",
|
|
@@ -42,7 +42,7 @@
|
|
|
42
42
|
"./package.json": "./package.json"
|
|
43
43
|
},
|
|
44
44
|
"peerDependencies": {
|
|
45
|
-
"@logtape/logtape": "1.0.0-dev.
|
|
45
|
+
"@logtape/logtape": "1.0.0-dev.218+eebe1baa"
|
|
46
46
|
},
|
|
47
47
|
"dependencies": {
|
|
48
48
|
"@aws-sdk/client-cloudwatch-logs": "^3.0.0"
|
|
@@ -61,7 +61,7 @@
|
|
|
61
61
|
"prepublish": "tsdown",
|
|
62
62
|
"test": "tsdown && node --experimental-transform-types --test",
|
|
63
63
|
"test:bun": "tsdown && bun test --timeout=10000",
|
|
64
|
-
"test:deno": "deno test --allow-read --allow-
|
|
64
|
+
"test:deno": "deno test --allow-read --allow-net --allow-sys --allow-env",
|
|
65
65
|
"test-all": "tsdown && node --experimental-transform-types --test && bun test && deno test"
|
|
66
66
|
}
|
|
67
67
|
}
|
package/sink.integration.test.ts
CHANGED
|
@@ -62,7 +62,8 @@ test("Integration: CloudWatch Logs sink with real AWS service", async () => {
|
|
|
62
62
|
// Log group might already exist, ignore ResourceAlreadyExistsException
|
|
63
63
|
if (
|
|
64
64
|
!(error instanceof Error) ||
|
|
65
|
-
!
|
|
65
|
+
!("name" in error) ||
|
|
66
|
+
error.name !== "ResourceAlreadyExistsException"
|
|
66
67
|
) {
|
|
67
68
|
throw error;
|
|
68
69
|
}
|
package/sink.ts
CHANGED
|
@@ -3,9 +3,20 @@ import {
|
|
|
3
3
|
type InputLogEvent,
|
|
4
4
|
PutLogEventsCommand,
|
|
5
5
|
} from "@aws-sdk/client-cloudwatch-logs";
|
|
6
|
-
import
|
|
6
|
+
import {
|
|
7
|
+
getLogger,
|
|
8
|
+
type LogRecord,
|
|
9
|
+
type Sink,
|
|
10
|
+
type TextFormatter,
|
|
11
|
+
} from "@logtape/logtape";
|
|
7
12
|
import type { CloudWatchLogsSinkOptions } from "./types.ts";
|
|
8
13
|
|
|
14
|
+
// AWS CloudWatch Logs PutLogEvents API limits
|
|
15
|
+
// See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html
|
|
16
|
+
const MAX_BATCH_SIZE_EVENTS = 10000; // Maximum 10,000 events per batch
|
|
17
|
+
const MAX_BATCH_SIZE_BYTES = 1048576; // Maximum batch size: 1 MiB (1,048,576 bytes)
|
|
18
|
+
const OVERHEAD_PER_EVENT = 26; // AWS overhead per log event: 26 bytes per event
|
|
19
|
+
|
|
9
20
|
/**
|
|
10
21
|
* Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
|
|
11
22
|
*
|
|
@@ -22,7 +33,10 @@ export function getCloudWatchLogsSink(
|
|
|
22
33
|
credentials: options.credentials,
|
|
23
34
|
});
|
|
24
35
|
|
|
25
|
-
const batchSize = Math.min(
|
|
36
|
+
const batchSize = Math.min(
|
|
37
|
+
Math.max(options.batchSize ?? 1000, 1),
|
|
38
|
+
MAX_BATCH_SIZE_EVENTS,
|
|
39
|
+
);
|
|
26
40
|
const flushInterval = options.flushInterval ?? 1000;
|
|
27
41
|
const maxRetries = Math.max(options.maxRetries ?? 3, 0);
|
|
28
42
|
const retryDelay = Math.max(options.retryDelay ?? 100, 0);
|
|
@@ -47,9 +61,6 @@ export function getCloudWatchLogsSink(
|
|
|
47
61
|
let flushTimer: ReturnType<typeof setTimeout> | null = null;
|
|
48
62
|
let disposed = false;
|
|
49
63
|
|
|
50
|
-
const OVERHEAD_PER_EVENT = 26; // AWS overhead per log event
|
|
51
|
-
const MAX_BATCH_SIZE_BYTES = 1048576; // 1 MiB
|
|
52
|
-
|
|
53
64
|
function scheduleFlush(): void {
|
|
54
65
|
if (flushInterval <= 0 || flushTimer !== null) return;
|
|
55
66
|
|
|
@@ -89,11 +100,18 @@ export function getCloudWatchLogsSink(
|
|
|
89
100
|
await client.send(command);
|
|
90
101
|
} catch (error) {
|
|
91
102
|
if (remainingRetries > 0) {
|
|
92
|
-
|
|
103
|
+
// Calculate exponential backoff: base, base*2, base*4, etc.
|
|
104
|
+
const attemptNumber = maxRetries - remainingRetries;
|
|
105
|
+
const delay = retryDelay * Math.pow(2, attemptNumber);
|
|
93
106
|
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
94
107
|
await sendEventsWithRetry(events, remainingRetries - 1);
|
|
95
108
|
} else {
|
|
96
|
-
|
|
109
|
+
// Log to meta logger to avoid crashing the application
|
|
110
|
+
const metaLogger = getLogger(["logtape", "meta", "cloudwatch-logs"]);
|
|
111
|
+
metaLogger.error(
|
|
112
|
+
"Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}",
|
|
113
|
+
{ maxRetries, error },
|
|
114
|
+
);
|
|
97
115
|
}
|
|
98
116
|
}
|
|
99
117
|
}
|
|
@@ -105,6 +123,15 @@ export function getCloudWatchLogsSink(
|
|
|
105
123
|
const sink: Sink & AsyncDisposable = (record: LogRecord) => {
|
|
106
124
|
if (disposed) return;
|
|
107
125
|
|
|
126
|
+
// Skip meta logger logs to prevent infinite loops
|
|
127
|
+
if (
|
|
128
|
+
record.category[0] === "logtape" &&
|
|
129
|
+
record.category[1] === "meta" &&
|
|
130
|
+
record.category[2] === "cloudwatch-logs"
|
|
131
|
+
) {
|
|
132
|
+
return;
|
|
133
|
+
}
|
|
134
|
+
|
|
108
135
|
const message = formatLogMessage(record);
|
|
109
136
|
const messageBytes = new TextEncoder().encode(message).length;
|
|
110
137
|
const eventSize = messageBytes + OVERHEAD_PER_EVENT;
|