@logtape/cloudwatch-logs 1.1.0-dev.306 → 1.1.0-dev.311
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/deno.json +1 -1
- package/dist/sink.cjs +62 -1
- package/dist/sink.d.cts.map +1 -1
- package/dist/sink.d.ts.map +1 -1
- package/dist/sink.js +63 -2
- package/dist/sink.js.map +1 -1
- package/dist/types.d.cts +48 -6
- package/dist/types.d.cts.map +1 -1
- package/dist/types.d.ts +48 -6
- package/dist/types.d.ts.map +1 -1
- package/package.json +2 -2
- package/sink.integration.test.ts +211 -0
- package/sink.test.ts +214 -1
- package/sink.ts +105 -1
- package/types.ts +110 -62
package/deno.json
CHANGED
package/dist/sink.cjs
CHANGED
|
@@ -7,6 +7,53 @@ const MAX_BATCH_SIZE_EVENTS = 1e4;
|
|
|
7
7
|
const MAX_BATCH_SIZE_BYTES = 1048576;
|
|
8
8
|
const OVERHEAD_PER_EVENT = 26;
|
|
9
9
|
/**
|
|
10
|
+
* Resolves the log stream name from template.
|
|
11
|
+
* @param logStreamNameTemplate Template for generating stream names
|
|
12
|
+
* @returns Resolved log stream name
|
|
13
|
+
*/
|
|
14
|
+
function resolveLogStreamName(logStreamNameTemplate) {
|
|
15
|
+
const now = /* @__PURE__ */ new Date();
|
|
16
|
+
const year = now.getFullYear().toString();
|
|
17
|
+
const month = (now.getMonth() + 1).toString().padStart(2, "0");
|
|
18
|
+
const day = now.getDate().toString().padStart(2, "0");
|
|
19
|
+
const timestamp = now.getTime().toString();
|
|
20
|
+
return logStreamNameTemplate.replace(/\{YYYY\}/g, year).replace(/\{MM\}/g, month).replace(/\{DD\}/g, day).replace(/\{YYYY-MM-DD\}/g, `${year}-${month}-${day}`).replace(/\{timestamp\}/g, timestamp);
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* Ensures that the log stream exists, creating it if necessary.
|
|
24
|
+
* @param client CloudWatch Logs client
|
|
25
|
+
* @param logGroupName Log group name
|
|
26
|
+
* @param logStreamName Log stream name
|
|
27
|
+
* @param createdStreams Set to track already created streams
|
|
28
|
+
*/
|
|
29
|
+
async function ensureLogStreamExists(client, logGroupName, logStreamName, createdStreams) {
|
|
30
|
+
const streamKey = `${logGroupName}/${logStreamName}`;
|
|
31
|
+
if (createdStreams.has(streamKey)) return;
|
|
32
|
+
try {
|
|
33
|
+
const command = new __aws_sdk_client_cloudwatch_logs.CreateLogStreamCommand({
|
|
34
|
+
logGroupName,
|
|
35
|
+
logStreamName
|
|
36
|
+
});
|
|
37
|
+
await client.send(command);
|
|
38
|
+
createdStreams.add(streamKey);
|
|
39
|
+
} catch (error) {
|
|
40
|
+
if (error instanceof __aws_sdk_client_cloudwatch_logs.ResourceAlreadyExistsException) createdStreams.add(streamKey);
|
|
41
|
+
else {
|
|
42
|
+
const metaLogger = (0, __logtape_logtape.getLogger)([
|
|
43
|
+
"logtape",
|
|
44
|
+
"meta",
|
|
45
|
+
"cloudwatch-logs"
|
|
46
|
+
]);
|
|
47
|
+
metaLogger.error("Failed to create log stream {logStreamName} in group {logGroupName}: {error}", {
|
|
48
|
+
logStreamName,
|
|
49
|
+
logGroupName,
|
|
50
|
+
error
|
|
51
|
+
});
|
|
52
|
+
throw error;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
10
57
|
* Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
|
|
11
58
|
*
|
|
12
59
|
* @param options Configuration options for the CloudWatch Logs sink.
|
|
@@ -22,6 +69,8 @@ function getCloudWatchLogsSink(options) {
|
|
|
22
69
|
const flushInterval = options.flushInterval ?? 1e3;
|
|
23
70
|
const maxRetries = Math.max(options.maxRetries ?? 3, 0);
|
|
24
71
|
const retryDelay = Math.max(options.retryDelay ?? 100, 0);
|
|
72
|
+
const logStreamName = options.autoCreateLogStream && "logStreamNameTemplate" in options ? resolveLogStreamName(options.logStreamNameTemplate) : options.logStreamName;
|
|
73
|
+
const createdStreams = /* @__PURE__ */ new Set();
|
|
25
74
|
const defaultFormatter = (record) => {
|
|
26
75
|
let result = "";
|
|
27
76
|
for (let i = 0; i < record.message.length; i++) if (i % 2 === 0) result += record.message[i];
|
|
@@ -33,6 +82,7 @@ function getCloudWatchLogsSink(options) {
|
|
|
33
82
|
let currentBatchSize = 0;
|
|
34
83
|
let flushTimer = null;
|
|
35
84
|
let disposed = false;
|
|
85
|
+
let flushPromise = null;
|
|
36
86
|
function scheduleFlush() {
|
|
37
87
|
if (flushInterval <= 0 || flushTimer !== null) return;
|
|
38
88
|
flushTimer = setTimeout(() => {
|
|
@@ -41,6 +91,16 @@ function getCloudWatchLogsSink(options) {
|
|
|
41
91
|
}, flushInterval);
|
|
42
92
|
}
|
|
43
93
|
async function flushEvents() {
|
|
94
|
+
if (logEvents.length === 0 || disposed) return;
|
|
95
|
+
if (flushPromise !== null) {
|
|
96
|
+
await flushPromise;
|
|
97
|
+
return;
|
|
98
|
+
}
|
|
99
|
+
flushPromise = doFlush();
|
|
100
|
+
await flushPromise;
|
|
101
|
+
flushPromise = null;
|
|
102
|
+
}
|
|
103
|
+
async function doFlush() {
|
|
44
104
|
if (logEvents.length === 0 || disposed) return;
|
|
45
105
|
const events = logEvents.splice(0);
|
|
46
106
|
currentBatchSize = 0;
|
|
@@ -48,13 +108,14 @@ function getCloudWatchLogsSink(options) {
|
|
|
48
108
|
clearTimeout(flushTimer);
|
|
49
109
|
flushTimer = null;
|
|
50
110
|
}
|
|
111
|
+
if (options.autoCreateLogStream) await ensureLogStreamExists(client, options.logGroupName, logStreamName, createdStreams);
|
|
51
112
|
await sendEventsWithRetry(events, maxRetries);
|
|
52
113
|
}
|
|
53
114
|
async function sendEventsWithRetry(events, remainingRetries) {
|
|
54
115
|
try {
|
|
55
116
|
const command = new __aws_sdk_client_cloudwatch_logs.PutLogEventsCommand({
|
|
56
117
|
logGroupName: options.logGroupName,
|
|
57
|
-
logStreamName
|
|
118
|
+
logStreamName,
|
|
58
119
|
logEvents: events
|
|
59
120
|
});
|
|
60
121
|
await client.send(command);
|
package/dist/sink.d.cts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.d.cts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;
|
|
1
|
+
{"version":3,"file":"sink.d.cts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;AA+FA;;;;;AAEyB,iBAFT,qBAAA,CAES,OAAA,EADd,yBACc,CAAA,EAAtB,IAAsB,GAAf,eAAe"}
|
package/dist/sink.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.d.ts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;
|
|
1
|
+
{"version":3,"file":"sink.d.ts","names":[],"sources":["../sink.ts"],"sourcesContent":[],"mappings":";;;;;;;AA+FA;;;;;AAEyB,iBAFT,qBAAA,CAES,OAAA,EADd,yBACc,CAAA,EAAtB,IAAsB,GAAf,eAAe"}
|
package/dist/sink.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { CloudWatchLogsClient, PutLogEventsCommand } from "@aws-sdk/client-cloudwatch-logs";
|
|
1
|
+
import { CloudWatchLogsClient, CreateLogStreamCommand, PutLogEventsCommand, ResourceAlreadyExistsException } from "@aws-sdk/client-cloudwatch-logs";
|
|
2
2
|
import { getLogger } from "@logtape/logtape";
|
|
3
3
|
|
|
4
4
|
//#region sink.ts
|
|
@@ -6,6 +6,53 @@ const MAX_BATCH_SIZE_EVENTS = 1e4;
|
|
|
6
6
|
const MAX_BATCH_SIZE_BYTES = 1048576;
|
|
7
7
|
const OVERHEAD_PER_EVENT = 26;
|
|
8
8
|
/**
|
|
9
|
+
* Resolves the log stream name from template.
|
|
10
|
+
* @param logStreamNameTemplate Template for generating stream names
|
|
11
|
+
* @returns Resolved log stream name
|
|
12
|
+
*/
|
|
13
|
+
function resolveLogStreamName(logStreamNameTemplate) {
|
|
14
|
+
const now = /* @__PURE__ */ new Date();
|
|
15
|
+
const year = now.getFullYear().toString();
|
|
16
|
+
const month = (now.getMonth() + 1).toString().padStart(2, "0");
|
|
17
|
+
const day = now.getDate().toString().padStart(2, "0");
|
|
18
|
+
const timestamp = now.getTime().toString();
|
|
19
|
+
return logStreamNameTemplate.replace(/\{YYYY\}/g, year).replace(/\{MM\}/g, month).replace(/\{DD\}/g, day).replace(/\{YYYY-MM-DD\}/g, `${year}-${month}-${day}`).replace(/\{timestamp\}/g, timestamp);
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Ensures that the log stream exists, creating it if necessary.
|
|
23
|
+
* @param client CloudWatch Logs client
|
|
24
|
+
* @param logGroupName Log group name
|
|
25
|
+
* @param logStreamName Log stream name
|
|
26
|
+
* @param createdStreams Set to track already created streams
|
|
27
|
+
*/
|
|
28
|
+
async function ensureLogStreamExists(client, logGroupName, logStreamName, createdStreams) {
|
|
29
|
+
const streamKey = `${logGroupName}/${logStreamName}`;
|
|
30
|
+
if (createdStreams.has(streamKey)) return;
|
|
31
|
+
try {
|
|
32
|
+
const command = new CreateLogStreamCommand({
|
|
33
|
+
logGroupName,
|
|
34
|
+
logStreamName
|
|
35
|
+
});
|
|
36
|
+
await client.send(command);
|
|
37
|
+
createdStreams.add(streamKey);
|
|
38
|
+
} catch (error) {
|
|
39
|
+
if (error instanceof ResourceAlreadyExistsException) createdStreams.add(streamKey);
|
|
40
|
+
else {
|
|
41
|
+
const metaLogger = getLogger([
|
|
42
|
+
"logtape",
|
|
43
|
+
"meta",
|
|
44
|
+
"cloudwatch-logs"
|
|
45
|
+
]);
|
|
46
|
+
metaLogger.error("Failed to create log stream {logStreamName} in group {logGroupName}: {error}", {
|
|
47
|
+
logStreamName,
|
|
48
|
+
logGroupName,
|
|
49
|
+
error
|
|
50
|
+
});
|
|
51
|
+
throw error;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
9
56
|
* Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
|
|
10
57
|
*
|
|
11
58
|
* @param options Configuration options for the CloudWatch Logs sink.
|
|
@@ -21,6 +68,8 @@ function getCloudWatchLogsSink(options) {
|
|
|
21
68
|
const flushInterval = options.flushInterval ?? 1e3;
|
|
22
69
|
const maxRetries = Math.max(options.maxRetries ?? 3, 0);
|
|
23
70
|
const retryDelay = Math.max(options.retryDelay ?? 100, 0);
|
|
71
|
+
const logStreamName = options.autoCreateLogStream && "logStreamNameTemplate" in options ? resolveLogStreamName(options.logStreamNameTemplate) : options.logStreamName;
|
|
72
|
+
const createdStreams = /* @__PURE__ */ new Set();
|
|
24
73
|
const defaultFormatter = (record) => {
|
|
25
74
|
let result = "";
|
|
26
75
|
for (let i = 0; i < record.message.length; i++) if (i % 2 === 0) result += record.message[i];
|
|
@@ -32,6 +81,7 @@ function getCloudWatchLogsSink(options) {
|
|
|
32
81
|
let currentBatchSize = 0;
|
|
33
82
|
let flushTimer = null;
|
|
34
83
|
let disposed = false;
|
|
84
|
+
let flushPromise = null;
|
|
35
85
|
function scheduleFlush() {
|
|
36
86
|
if (flushInterval <= 0 || flushTimer !== null) return;
|
|
37
87
|
flushTimer = setTimeout(() => {
|
|
@@ -40,6 +90,16 @@ function getCloudWatchLogsSink(options) {
|
|
|
40
90
|
}, flushInterval);
|
|
41
91
|
}
|
|
42
92
|
async function flushEvents() {
|
|
93
|
+
if (logEvents.length === 0 || disposed) return;
|
|
94
|
+
if (flushPromise !== null) {
|
|
95
|
+
await flushPromise;
|
|
96
|
+
return;
|
|
97
|
+
}
|
|
98
|
+
flushPromise = doFlush();
|
|
99
|
+
await flushPromise;
|
|
100
|
+
flushPromise = null;
|
|
101
|
+
}
|
|
102
|
+
async function doFlush() {
|
|
43
103
|
if (logEvents.length === 0 || disposed) return;
|
|
44
104
|
const events = logEvents.splice(0);
|
|
45
105
|
currentBatchSize = 0;
|
|
@@ -47,13 +107,14 @@ function getCloudWatchLogsSink(options) {
|
|
|
47
107
|
clearTimeout(flushTimer);
|
|
48
108
|
flushTimer = null;
|
|
49
109
|
}
|
|
110
|
+
if (options.autoCreateLogStream) await ensureLogStreamExists(client, options.logGroupName, logStreamName, createdStreams);
|
|
50
111
|
await sendEventsWithRetry(events, maxRetries);
|
|
51
112
|
}
|
|
52
113
|
async function sendEventsWithRetry(events, remainingRetries) {
|
|
53
114
|
try {
|
|
54
115
|
const command = new PutLogEventsCommand({
|
|
55
116
|
logGroupName: options.logGroupName,
|
|
56
|
-
logStreamName
|
|
117
|
+
logStreamName,
|
|
57
118
|
logEvents: events
|
|
58
119
|
});
|
|
59
120
|
await client.send(command);
|
package/dist/sink.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.js","names":["options: CloudWatchLogsSinkOptions","defaultFormatter: TextFormatter","logEvents: InputLogEvent[]","flushTimer: ReturnType<typeof setTimeout> | null","events: InputLogEvent[]","remainingRetries: number","record: LogRecord","sink: Sink & AsyncDisposable","logEvent: InputLogEvent"],"sources":["../sink.ts"],"sourcesContent":["import {\n CloudWatchLogsClient,\n type InputLogEvent,\n PutLogEventsCommand,\n} from \"@aws-sdk/client-cloudwatch-logs\";\nimport {\n getLogger,\n type LogRecord,\n type Sink,\n type TextFormatter,\n} from \"@logtape/logtape\";\nimport type { CloudWatchLogsSinkOptions } from \"./types.ts\";\n\n// AWS CloudWatch Logs PutLogEvents API limits\n// See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html\nconst MAX_BATCH_SIZE_EVENTS = 10000; // Maximum 10,000 events per batch\nconst MAX_BATCH_SIZE_BYTES = 1048576; // Maximum batch size: 1 MiB (1,048,576 bytes)\nconst OVERHEAD_PER_EVENT = 26; // AWS overhead per log event: 26 bytes per event\n\n/**\n * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.\n *\n * @param options Configuration options for the CloudWatch Logs sink.\n * @returns A sink that sends log records to CloudWatch Logs.\n * @since 1.0.0\n */\nexport function getCloudWatchLogsSink(\n options: CloudWatchLogsSinkOptions,\n): Sink & AsyncDisposable {\n const client = options.client ??\n new CloudWatchLogsClient({\n region: options.region ?? \"us-east-1\",\n credentials: options.credentials,\n });\n\n const batchSize = Math.min(\n Math.max(options.batchSize ?? 1000, 1),\n MAX_BATCH_SIZE_EVENTS,\n );\n const flushInterval = options.flushInterval ?? 1000;\n const maxRetries = Math.max(options.maxRetries ?? 3, 0);\n const retryDelay = Math.max(options.retryDelay ?? 100, 0);\n\n // Default formatter that formats message parts into a simple string\n const defaultFormatter: TextFormatter = (record) => {\n let result = \"\";\n for (let i = 0; i < record.message.length; i++) {\n if (i % 2 === 0) {\n result += record.message[i];\n } else {\n result += JSON.stringify(record.message[i]);\n }\n }\n return result;\n };\n\n const formatter = options.formatter ?? defaultFormatter;\n\n const logEvents: InputLogEvent[] = [];\n let currentBatchSize = 0;\n let flushTimer: ReturnType<typeof setTimeout> | null = null;\n let disposed = false;\n\n function scheduleFlush(): void {\n if (flushInterval <= 0 || flushTimer !== null) return;\n\n flushTimer = setTimeout(() => {\n flushTimer = null;\n if (logEvents.length > 0) {\n void flushEvents();\n }\n }, flushInterval);\n }\n\n async function flushEvents(): Promise<void> {\n if (logEvents.length === 0 || disposed) return;\n\n const events = logEvents.splice(0);\n currentBatchSize = 0;\n\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n\n await sendEventsWithRetry(events, maxRetries);\n }\n\n async function sendEventsWithRetry(\n events: InputLogEvent[],\n remainingRetries: number,\n ): Promise<void> {\n try {\n const command = new PutLogEventsCommand({\n logGroupName: options.logGroupName,\n logStreamName: options.logStreamName,\n logEvents: events,\n });\n\n await client.send(command);\n } catch (error) {\n if (remainingRetries > 0) {\n // Calculate exponential backoff: base, base*2, base*4, etc.\n const attemptNumber = maxRetries - remainingRetries;\n const delay = retryDelay * Math.pow(2, attemptNumber);\n await new Promise((resolve) => setTimeout(resolve, delay));\n await sendEventsWithRetry(events, remainingRetries - 1);\n } else {\n // Log to meta logger to avoid crashing the application\n const metaLogger = getLogger([\"logtape\", \"meta\", \"cloudwatch-logs\"]);\n metaLogger.error(\n \"Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}\",\n { maxRetries, error },\n );\n }\n }\n }\n\n function formatLogMessage(record: LogRecord): string {\n return formatter(record);\n }\n\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Skip meta logger logs to prevent infinite loops\n if (\n record.category[0] === \"logtape\" &&\n record.category[1] === \"meta\" &&\n record.category[2] === \"cloudwatch-logs\"\n ) {\n return;\n }\n\n const message = formatLogMessage(record);\n const messageBytes = new TextEncoder().encode(message).length;\n const eventSize = messageBytes + OVERHEAD_PER_EVENT;\n\n const logEvent: InputLogEvent = {\n timestamp: record.timestamp,\n message,\n };\n\n logEvents.push(logEvent);\n currentBatchSize += eventSize;\n\n const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;\n const shouldFlushByCount = logEvents.length >= batchSize;\n\n if (shouldFlushBySize || shouldFlushByCount) {\n void flushEvents();\n } else {\n scheduleFlush();\n }\n };\n\n sink[Symbol.asyncDispose] = async () => {\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n await flushEvents();\n disposed = true;\n };\n\n return sink;\n}\n"],"mappings":";;;;AAeA,MAAM,wBAAwB;AAC9B,MAAM,uBAAuB;AAC7B,MAAM,qBAAqB;;;;;;;;AAS3B,SAAgB,sBACdA,SACwB;CACxB,MAAM,SAAS,QAAQ,UACrB,IAAI,qBAAqB;EACvB,QAAQ,QAAQ,UAAU;EAC1B,aAAa,QAAQ;CACtB;CAEH,MAAM,YAAY,KAAK,IACrB,KAAK,IAAI,QAAQ,aAAa,KAAM,EAAE,EACtC,sBACD;CACD,MAAM,gBAAgB,QAAQ,iBAAiB;CAC/C,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,GAAG,EAAE;CACvD,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,KAAK,EAAE;CAGzD,MAAMC,mBAAkC,CAAC,WAAW;EAClD,IAAI,SAAS;AACb,OAAK,IAAI,IAAI,GAAG,IAAI,OAAO,QAAQ,QAAQ,IACzC,KAAI,IAAI,MAAM,EACZ,WAAU,OAAO,QAAQ;MAEzB,WAAU,KAAK,UAAU,OAAO,QAAQ,GAAG;AAG/C,SAAO;CACR;CAED,MAAM,YAAY,QAAQ,aAAa;CAEvC,MAAMC,YAA6B,CAAE;CACrC,IAAI,mBAAmB;CACvB,IAAIC,aAAmD;CACvD,IAAI,WAAW;CAEf,SAAS,gBAAsB;AAC7B,MAAI,iBAAiB,KAAK,eAAe,KAAM;AAE/C,eAAa,WAAW,MAAM;AAC5B,gBAAa;AACb,OAAI,UAAU,SAAS,EACrB,CAAK,aAAa;EAErB,GAAE,cAAc;CAClB;CAED,eAAe,cAA6B;AAC1C,MAAI,UAAU,WAAW,KAAK,SAAU;EAExC,MAAM,SAAS,UAAU,OAAO,EAAE;AAClC,qBAAmB;AAEnB,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AAED,QAAM,oBAAoB,QAAQ,WAAW;CAC9C;CAED,eAAe,oBACbC,QACAC,kBACe;AACf,MAAI;GACF,MAAM,UAAU,IAAI,oBAAoB;IACtC,cAAc,QAAQ;IACtB,eAAe,QAAQ;IACvB,WAAW;GACZ;AAED,SAAM,OAAO,KAAK,QAAQ;EAC3B,SAAQ,OAAO;AACd,OAAI,mBAAmB,GAAG;IAExB,MAAM,gBAAgB,aAAa;IACnC,MAAM,QAAQ,aAAa,KAAK,IAAI,GAAG,cAAc;AACrD,UAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,MAAM;AACzD,UAAM,oBAAoB,QAAQ,mBAAmB,EAAE;GACxD,OAAM;IAEL,MAAM,aAAa,UAAU;KAAC;KAAW;KAAQ;IAAkB,EAAC;AACpE,eAAW,MACT,oFACA;KAAE;KAAY;IAAO,EACtB;GACF;EACF;CACF;CAED,SAAS,iBAAiBC,QAA2B;AACnD,SAAO,UAAU,OAAO;CACzB;CAED,MAAMC,OAA+B,CAACD,WAAsB;AAC1D,MAAI,SAAU;AAGd,MACE,OAAO,SAAS,OAAO,aACvB,OAAO,SAAS,OAAO,UACvB,OAAO,SAAS,OAAO,kBAEvB;EAGF,MAAM,UAAU,iBAAiB,OAAO;EACxC,MAAM,eAAe,IAAI,cAAc,OAAO,QAAQ,CAAC;EACvD,MAAM,YAAY,eAAe;EAEjC,MAAME,WAA0B;GAC9B,WAAW,OAAO;GAClB;EACD;AAED,YAAU,KAAK,SAAS;AACxB,sBAAoB;EAEpB,MAAM,oBAAoB,mBAAmB;EAC7C,MAAM,qBAAqB,UAAU,UAAU;AAE/C,MAAI,qBAAqB,mBACvB,CAAK,aAAa;MAElB,gBAAe;CAElB;AAED,MAAK,OAAO,gBAAgB,YAAY;AACtC,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AACD,QAAM,aAAa;AACnB,aAAW;CACZ;AAED,QAAO;AACR"}
|
|
1
|
+
{"version":3,"file":"sink.js","names":["logStreamNameTemplate: string","client: CloudWatchLogsClient","logGroupName: string","logStreamName: string","createdStreams: Set<string>","options: CloudWatchLogsSinkOptions","defaultFormatter: TextFormatter","logEvents: InputLogEvent[]","flushTimer: ReturnType<typeof setTimeout> | null","flushPromise: Promise<void> | null","events: InputLogEvent[]","remainingRetries: number","record: LogRecord","sink: Sink & AsyncDisposable","logEvent: InputLogEvent"],"sources":["../sink.ts"],"sourcesContent":["import {\n CloudWatchLogsClient,\n CreateLogStreamCommand,\n type InputLogEvent,\n PutLogEventsCommand,\n ResourceAlreadyExistsException,\n} from \"@aws-sdk/client-cloudwatch-logs\";\nimport {\n getLogger,\n type LogRecord,\n type Sink,\n type TextFormatter,\n} from \"@logtape/logtape\";\nimport type { CloudWatchLogsSinkOptions } from \"./types.ts\";\n\n// AWS CloudWatch Logs PutLogEvents API limits\n// See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html\nconst MAX_BATCH_SIZE_EVENTS = 10000; // Maximum 10,000 events per batch\nconst MAX_BATCH_SIZE_BYTES = 1048576; // Maximum batch size: 1 MiB (1,048,576 bytes)\nconst OVERHEAD_PER_EVENT = 26; // AWS overhead per log event: 26 bytes per event\n\n/**\n * Resolves the log stream name from template.\n * @param logStreamNameTemplate Template for generating stream names\n * @returns Resolved log stream name\n */\nfunction resolveLogStreamName(\n logStreamNameTemplate: string,\n): string {\n const now = new Date();\n const year = now.getFullYear().toString();\n const month = (now.getMonth() + 1).toString().padStart(2, \"0\");\n const day = now.getDate().toString().padStart(2, \"0\");\n const timestamp = now.getTime().toString();\n\n return logStreamNameTemplate\n .replace(/\\{YYYY\\}/g, year)\n .replace(/\\{MM\\}/g, month)\n .replace(/\\{DD\\}/g, day)\n .replace(/\\{YYYY-MM-DD\\}/g, `${year}-${month}-${day}`)\n .replace(/\\{timestamp\\}/g, timestamp);\n}\n\n/**\n * Ensures that the log stream exists, creating it if necessary.\n * @param client CloudWatch Logs client\n * @param logGroupName Log group name\n * @param logStreamName Log stream name\n * @param createdStreams Set to track already created streams\n */\nasync function ensureLogStreamExists(\n client: CloudWatchLogsClient,\n logGroupName: string,\n logStreamName: string,\n createdStreams: Set<string>,\n): Promise<void> {\n const streamKey = `${logGroupName}/${logStreamName}`;\n\n // If we've already created this stream, skip\n if (createdStreams.has(streamKey)) {\n return;\n }\n\n try {\n const command = new CreateLogStreamCommand({\n logGroupName,\n logStreamName,\n });\n\n await client.send(command);\n createdStreams.add(streamKey);\n } catch (error) {\n if (error instanceof ResourceAlreadyExistsException) {\n // Stream already exists, this is fine\n createdStreams.add(streamKey);\n } else {\n // Log stream creation failure to meta logger\n const metaLogger = getLogger([\"logtape\", \"meta\", \"cloudwatch-logs\"]);\n metaLogger.error(\n \"Failed to create log stream {logStreamName} in group {logGroupName}: {error}\",\n { logStreamName, logGroupName, error },\n );\n // Re-throw other errors\n throw error;\n }\n }\n}\n\n/**\n * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.\n *\n * @param options Configuration options for the CloudWatch Logs sink.\n * @returns A sink that sends log records to CloudWatch Logs.\n * @since 1.0.0\n */\nexport function getCloudWatchLogsSink(\n options: CloudWatchLogsSinkOptions,\n): Sink & AsyncDisposable {\n const client = options.client ??\n new CloudWatchLogsClient({\n region: options.region ?? \"us-east-1\",\n credentials: options.credentials,\n });\n\n const batchSize = Math.min(\n Math.max(options.batchSize ?? 1000, 1),\n MAX_BATCH_SIZE_EVENTS,\n );\n const flushInterval = options.flushInterval ?? 1000;\n const maxRetries = Math.max(options.maxRetries ?? 3, 0);\n const retryDelay = Math.max(options.retryDelay ?? 100, 0);\n\n // Resolve the log stream name\n const logStreamName =\n options.autoCreateLogStream && \"logStreamNameTemplate\" in options\n ? resolveLogStreamName(options.logStreamNameTemplate)\n : options.logStreamName;\n\n // Track created streams to avoid redundant API calls\n const createdStreams = new Set<string>();\n\n // Default formatter that formats message parts into a simple string\n const defaultFormatter: TextFormatter = (record) => {\n let result = \"\";\n for (let i = 0; i < record.message.length; i++) {\n if (i % 2 === 0) {\n result += record.message[i];\n } else {\n result += JSON.stringify(record.message[i]);\n }\n }\n return result;\n };\n\n const formatter = options.formatter ?? defaultFormatter;\n\n const logEvents: InputLogEvent[] = [];\n let currentBatchSize = 0;\n let flushTimer: ReturnType<typeof setTimeout> | null = null;\n let disposed = false;\n let flushPromise: Promise<void> | null = null;\n\n function scheduleFlush(): void {\n if (flushInterval <= 0 || flushTimer !== null) return;\n\n flushTimer = setTimeout(() => {\n flushTimer = null;\n if (logEvents.length > 0) {\n void flushEvents();\n }\n }, flushInterval);\n }\n\n async function flushEvents(): Promise<void> {\n if (logEvents.length === 0 || disposed) return;\n\n // If there's already a flush in progress, wait for it\n if (flushPromise !== null) {\n await flushPromise;\n return;\n }\n\n // Start a new flush operation\n flushPromise = doFlush();\n await flushPromise;\n flushPromise = null;\n }\n\n async function doFlush(): Promise<void> {\n if (logEvents.length === 0 || disposed) return;\n\n const events = logEvents.splice(0);\n currentBatchSize = 0;\n\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n\n // Auto-create log stream if enabled (only once per stream)\n if (options.autoCreateLogStream) {\n await ensureLogStreamExists(\n client,\n options.logGroupName,\n logStreamName,\n createdStreams,\n );\n }\n\n await sendEventsWithRetry(events, maxRetries);\n }\n\n async function sendEventsWithRetry(\n events: InputLogEvent[],\n remainingRetries: number,\n ): Promise<void> {\n try {\n const command = new PutLogEventsCommand({\n logGroupName: options.logGroupName,\n logStreamName: logStreamName,\n logEvents: events,\n });\n\n await client.send(command);\n } catch (error) {\n if (remainingRetries > 0) {\n // Calculate exponential backoff: base, base*2, base*4, etc.\n const attemptNumber = maxRetries - remainingRetries;\n const delay = retryDelay * Math.pow(2, attemptNumber);\n await new Promise((resolve) => setTimeout(resolve, delay));\n await sendEventsWithRetry(events, remainingRetries - 1);\n } else {\n // Log to meta logger to avoid crashing the application\n const metaLogger = getLogger([\"logtape\", \"meta\", \"cloudwatch-logs\"]);\n metaLogger.error(\n \"Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}\",\n { maxRetries, error },\n );\n }\n }\n }\n\n function formatLogMessage(record: LogRecord): string {\n return formatter(record);\n }\n\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Skip meta logger logs to prevent infinite loops\n if (\n record.category[0] === \"logtape\" &&\n record.category[1] === \"meta\" &&\n record.category[2] === \"cloudwatch-logs\"\n ) {\n return;\n }\n\n const message = formatLogMessage(record);\n const messageBytes = new TextEncoder().encode(message).length;\n const eventSize = messageBytes + OVERHEAD_PER_EVENT;\n\n const logEvent: InputLogEvent = {\n timestamp: record.timestamp,\n message,\n };\n\n logEvents.push(logEvent);\n currentBatchSize += eventSize;\n\n const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;\n const shouldFlushByCount = logEvents.length >= batchSize;\n\n if (shouldFlushBySize || shouldFlushByCount) {\n void flushEvents();\n } else {\n scheduleFlush();\n }\n };\n\n sink[Symbol.asyncDispose] = async () => {\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n await flushEvents();\n disposed = true;\n };\n\n return sink;\n}\n"],"mappings":";;;;AAiBA,MAAM,wBAAwB;AAC9B,MAAM,uBAAuB;AAC7B,MAAM,qBAAqB;;;;;;AAO3B,SAAS,qBACPA,uBACQ;CACR,MAAM,sBAAM,IAAI;CAChB,MAAM,OAAO,IAAI,aAAa,CAAC,UAAU;CACzC,MAAM,QAAQ,CAAC,IAAI,UAAU,GAAG,GAAG,UAAU,CAAC,SAAS,GAAG,IAAI;CAC9D,MAAM,MAAM,IAAI,SAAS,CAAC,UAAU,CAAC,SAAS,GAAG,IAAI;CACrD,MAAM,YAAY,IAAI,SAAS,CAAC,UAAU;AAE1C,QAAO,sBACJ,QAAQ,aAAa,KAAK,CAC1B,QAAQ,WAAW,MAAM,CACzB,QAAQ,WAAW,IAAI,CACvB,QAAQ,oBAAoB,EAAE,KAAK,GAAG,MAAM,GAAG,IAAI,EAAE,CACrD,QAAQ,kBAAkB,UAAU;AACxC;;;;;;;;AASD,eAAe,sBACbC,QACAC,cACAC,eACAC,gBACe;CACf,MAAM,aAAa,EAAE,aAAa,GAAG,cAAc;AAGnD,KAAI,eAAe,IAAI,UAAU,CAC/B;AAGF,KAAI;EACF,MAAM,UAAU,IAAI,uBAAuB;GACzC;GACA;EACD;AAED,QAAM,OAAO,KAAK,QAAQ;AAC1B,iBAAe,IAAI,UAAU;CAC9B,SAAQ,OAAO;AACd,MAAI,iBAAiB,+BAEnB,gBAAe,IAAI,UAAU;OACxB;GAEL,MAAM,aAAa,UAAU;IAAC;IAAW;IAAQ;GAAkB,EAAC;AACpE,cAAW,MACT,gFACA;IAAE;IAAe;IAAc;GAAO,EACvC;AAED,SAAM;EACP;CACF;AACF;;;;;;;;AASD,SAAgB,sBACdC,SACwB;CACxB,MAAM,SAAS,QAAQ,UACrB,IAAI,qBAAqB;EACvB,QAAQ,QAAQ,UAAU;EAC1B,aAAa,QAAQ;CACtB;CAEH,MAAM,YAAY,KAAK,IACrB,KAAK,IAAI,QAAQ,aAAa,KAAM,EAAE,EACtC,sBACD;CACD,MAAM,gBAAgB,QAAQ,iBAAiB;CAC/C,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,GAAG,EAAE;CACvD,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,KAAK,EAAE;CAGzD,MAAM,gBACJ,QAAQ,uBAAuB,2BAA2B,UACtD,qBAAqB,QAAQ,sBAAsB,GACnD,QAAQ;CAGd,MAAM,iCAAiB,IAAI;CAG3B,MAAMC,mBAAkC,CAAC,WAAW;EAClD,IAAI,SAAS;AACb,OAAK,IAAI,IAAI,GAAG,IAAI,OAAO,QAAQ,QAAQ,IACzC,KAAI,IAAI,MAAM,EACZ,WAAU,OAAO,QAAQ;MAEzB,WAAU,KAAK,UAAU,OAAO,QAAQ,GAAG;AAG/C,SAAO;CACR;CAED,MAAM,YAAY,QAAQ,aAAa;CAEvC,MAAMC,YAA6B,CAAE;CACrC,IAAI,mBAAmB;CACvB,IAAIC,aAAmD;CACvD,IAAI,WAAW;CACf,IAAIC,eAAqC;CAEzC,SAAS,gBAAsB;AAC7B,MAAI,iBAAiB,KAAK,eAAe,KAAM;AAE/C,eAAa,WAAW,MAAM;AAC5B,gBAAa;AACb,OAAI,UAAU,SAAS,EACrB,CAAK,aAAa;EAErB,GAAE,cAAc;CAClB;CAED,eAAe,cAA6B;AAC1C,MAAI,UAAU,WAAW,KAAK,SAAU;AAGxC,MAAI,iBAAiB,MAAM;AACzB,SAAM;AACN;EACD;AAGD,iBAAe,SAAS;AACxB,QAAM;AACN,iBAAe;CAChB;CAED,eAAe,UAAyB;AACtC,MAAI,UAAU,WAAW,KAAK,SAAU;EAExC,MAAM,SAAS,UAAU,OAAO,EAAE;AAClC,qBAAmB;AAEnB,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AAGD,MAAI,QAAQ,oBACV,OAAM,sBACJ,QACA,QAAQ,cACR,eACA,eACD;AAGH,QAAM,oBAAoB,QAAQ,WAAW;CAC9C;CAED,eAAe,oBACbC,QACAC,kBACe;AACf,MAAI;GACF,MAAM,UAAU,IAAI,oBAAoB;IACtC,cAAc,QAAQ;IACP;IACf,WAAW;GACZ;AAED,SAAM,OAAO,KAAK,QAAQ;EAC3B,SAAQ,OAAO;AACd,OAAI,mBAAmB,GAAG;IAExB,MAAM,gBAAgB,aAAa;IACnC,MAAM,QAAQ,aAAa,KAAK,IAAI,GAAG,cAAc;AACrD,UAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,MAAM;AACzD,UAAM,oBAAoB,QAAQ,mBAAmB,EAAE;GACxD,OAAM;IAEL,MAAM,aAAa,UAAU;KAAC;KAAW;KAAQ;IAAkB,EAAC;AACpE,eAAW,MACT,oFACA;KAAE;KAAY;IAAO,EACtB;GACF;EACF;CACF;CAED,SAAS,iBAAiBC,QAA2B;AACnD,SAAO,UAAU,OAAO;CACzB;CAED,MAAMC,OAA+B,CAACD,WAAsB;AAC1D,MAAI,SAAU;AAGd,MACE,OAAO,SAAS,OAAO,aACvB,OAAO,SAAS,OAAO,UACvB,OAAO,SAAS,OAAO,kBAEvB;EAGF,MAAM,UAAU,iBAAiB,OAAO;EACxC,MAAM,eAAe,IAAI,cAAc,OAAO,QAAQ,CAAC;EACvD,MAAM,YAAY,eAAe;EAEjC,MAAME,WAA0B;GAC9B,WAAW,OAAO;GAClB;EACD;AAED,YAAU,KAAK,SAAS;AACxB,sBAAoB;EAEpB,MAAM,oBAAoB,mBAAmB;EAC7C,MAAM,qBAAqB,UAAU,UAAU;AAE/C,MAAI,qBAAqB,mBACvB,CAAK,aAAa;MAElB,gBAAe;CAElB;AAED,MAAK,OAAO,gBAAgB,YAAY;AACtC,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AACD,QAAM,aAAa;AACnB,aAAW;CACZ;AAED,QAAO;AACR"}
|
package/dist/types.d.cts
CHANGED
|
@@ -7,7 +7,7 @@ import { TextFormatter } from "@logtape/logtape";
|
|
|
7
7
|
* Options for configuring the CloudWatch Logs sink.
|
|
8
8
|
* @since 1.0.0
|
|
9
9
|
*/
|
|
10
|
-
|
|
10
|
+
type CloudWatchLogsSinkOptions = {
|
|
11
11
|
/**
|
|
12
12
|
* An existing CloudWatch Logs client instance.
|
|
13
13
|
* If provided, the client will be used directly and other connection
|
|
@@ -18,10 +18,6 @@ interface CloudWatchLogsSinkOptions {
|
|
|
18
18
|
* The name of the log group to send log events to.
|
|
19
19
|
*/
|
|
20
20
|
readonly logGroupName: string;
|
|
21
|
-
/**
|
|
22
|
-
* The name of the log stream within the log group.
|
|
23
|
-
*/
|
|
24
|
-
readonly logStreamName: string;
|
|
25
21
|
/**
|
|
26
22
|
* The AWS region to use when creating a new client.
|
|
27
23
|
* Ignored if `client` is provided.
|
|
@@ -68,7 +64,53 @@ interface CloudWatchLogsSinkOptions {
|
|
|
68
64
|
* @since 1.0.0
|
|
69
65
|
*/
|
|
70
66
|
readonly formatter?: TextFormatter;
|
|
71
|
-
}
|
|
67
|
+
} & ({
|
|
68
|
+
/**
|
|
69
|
+
* Whether to automatically create the log stream if it doesn't exist.
|
|
70
|
+
* When enabled, the sink will attempt to create the specified log stream
|
|
71
|
+
* before sending log events. If the stream already exists, the creation
|
|
72
|
+
* attempt will be safely ignored.
|
|
73
|
+
* @default false
|
|
74
|
+
* @since 1.1.0
|
|
75
|
+
*/
|
|
76
|
+
readonly autoCreateLogStream?: false;
|
|
77
|
+
/**
|
|
78
|
+
* The name of the log stream within the log group.
|
|
79
|
+
* Required unless `logStreamNameTemplate` is provided.
|
|
80
|
+
*/
|
|
81
|
+
readonly logStreamName: string;
|
|
82
|
+
} | {
|
|
83
|
+
/**
|
|
84
|
+
* Whether to automatically create the log stream if it doesn't exist.
|
|
85
|
+
* When enabled, the sink will attempt to create the specified log stream
|
|
86
|
+
* before sending log events. If the stream already exists, the creation
|
|
87
|
+
* attempt will be safely ignored.
|
|
88
|
+
* @default false
|
|
89
|
+
* @since 1.1.0
|
|
90
|
+
*/
|
|
91
|
+
readonly autoCreateLogStream: true;
|
|
92
|
+
} & ({
|
|
93
|
+
/**
|
|
94
|
+
* The name of the log stream within the log group.
|
|
95
|
+
* Required unless `logStreamNameTemplate` is provided.
|
|
96
|
+
*/
|
|
97
|
+
readonly logStreamName: string;
|
|
98
|
+
} | {
|
|
99
|
+
/**
|
|
100
|
+
* Template for generating dynamic log stream names.
|
|
101
|
+
* Supports the following placeholders:
|
|
102
|
+
* - `{YYYY}`: 4-digit year
|
|
103
|
+
* - `{MM}`: 2-digit month (01-12)
|
|
104
|
+
* - `{DD}`: 2-digit day (01-31)
|
|
105
|
+
* - `{YYYY-MM-DD}`: Date in YYYY-MM-DD format
|
|
106
|
+
* - `{timestamp}`: Unix timestamp in milliseconds
|
|
107
|
+
*
|
|
108
|
+
* If provided, this will be used instead of `logStreamName`.
|
|
109
|
+
* Only used when `autoCreateLogStream` is true.
|
|
110
|
+
* @since 1.1.0
|
|
111
|
+
*/
|
|
112
|
+
readonly logStreamNameTemplate: string;
|
|
113
|
+
}));
|
|
72
114
|
//# sourceMappingURL=types.d.ts.map
|
|
73
115
|
//#endregion
|
|
74
116
|
export { CloudWatchLogsSinkOptions };
|
package/dist/types.d.cts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.cts","names":[],"sources":["../types.ts"],"sourcesContent":[],"mappings":";;;;;;;AAOA;;
|
|
1
|
+
{"version":3,"file":"types.d.cts","names":[],"sources":["../types.ts"],"sourcesContent":[],"mappings":";;;;;;;AAOA;;AAOsB,KAPV,yBAAA,GAOU;EAAoB;AA0DJ;;;;oBA1DhB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA0DG"}
|
package/dist/types.d.ts
CHANGED
|
@@ -7,7 +7,7 @@ import { TextFormatter } from "@logtape/logtape";
|
|
|
7
7
|
* Options for configuring the CloudWatch Logs sink.
|
|
8
8
|
* @since 1.0.0
|
|
9
9
|
*/
|
|
10
|
-
|
|
10
|
+
type CloudWatchLogsSinkOptions = {
|
|
11
11
|
/**
|
|
12
12
|
* An existing CloudWatch Logs client instance.
|
|
13
13
|
* If provided, the client will be used directly and other connection
|
|
@@ -18,10 +18,6 @@ interface CloudWatchLogsSinkOptions {
|
|
|
18
18
|
* The name of the log group to send log events to.
|
|
19
19
|
*/
|
|
20
20
|
readonly logGroupName: string;
|
|
21
|
-
/**
|
|
22
|
-
* The name of the log stream within the log group.
|
|
23
|
-
*/
|
|
24
|
-
readonly logStreamName: string;
|
|
25
21
|
/**
|
|
26
22
|
* The AWS region to use when creating a new client.
|
|
27
23
|
* Ignored if `client` is provided.
|
|
@@ -68,7 +64,53 @@ interface CloudWatchLogsSinkOptions {
|
|
|
68
64
|
* @since 1.0.0
|
|
69
65
|
*/
|
|
70
66
|
readonly formatter?: TextFormatter;
|
|
71
|
-
}
|
|
67
|
+
} & ({
|
|
68
|
+
/**
|
|
69
|
+
* Whether to automatically create the log stream if it doesn't exist.
|
|
70
|
+
* When enabled, the sink will attempt to create the specified log stream
|
|
71
|
+
* before sending log events. If the stream already exists, the creation
|
|
72
|
+
* attempt will be safely ignored.
|
|
73
|
+
* @default false
|
|
74
|
+
* @since 1.1.0
|
|
75
|
+
*/
|
|
76
|
+
readonly autoCreateLogStream?: false;
|
|
77
|
+
/**
|
|
78
|
+
* The name of the log stream within the log group.
|
|
79
|
+
* Required unless `logStreamNameTemplate` is provided.
|
|
80
|
+
*/
|
|
81
|
+
readonly logStreamName: string;
|
|
82
|
+
} | {
|
|
83
|
+
/**
|
|
84
|
+
* Whether to automatically create the log stream if it doesn't exist.
|
|
85
|
+
* When enabled, the sink will attempt to create the specified log stream
|
|
86
|
+
* before sending log events. If the stream already exists, the creation
|
|
87
|
+
* attempt will be safely ignored.
|
|
88
|
+
* @default false
|
|
89
|
+
* @since 1.1.0
|
|
90
|
+
*/
|
|
91
|
+
readonly autoCreateLogStream: true;
|
|
92
|
+
} & ({
|
|
93
|
+
/**
|
|
94
|
+
* The name of the log stream within the log group.
|
|
95
|
+
* Required unless `logStreamNameTemplate` is provided.
|
|
96
|
+
*/
|
|
97
|
+
readonly logStreamName: string;
|
|
98
|
+
} | {
|
|
99
|
+
/**
|
|
100
|
+
* Template for generating dynamic log stream names.
|
|
101
|
+
* Supports the following placeholders:
|
|
102
|
+
* - `{YYYY}`: 4-digit year
|
|
103
|
+
* - `{MM}`: 2-digit month (01-12)
|
|
104
|
+
* - `{DD}`: 2-digit day (01-31)
|
|
105
|
+
* - `{YYYY-MM-DD}`: Date in YYYY-MM-DD format
|
|
106
|
+
* - `{timestamp}`: Unix timestamp in milliseconds
|
|
107
|
+
*
|
|
108
|
+
* If provided, this will be used instead of `logStreamName`.
|
|
109
|
+
* Only used when `autoCreateLogStream` is true.
|
|
110
|
+
* @since 1.1.0
|
|
111
|
+
*/
|
|
112
|
+
readonly logStreamNameTemplate: string;
|
|
113
|
+
}));
|
|
72
114
|
//# sourceMappingURL=types.d.ts.map
|
|
73
115
|
//#endregion
|
|
74
116
|
export { CloudWatchLogsSinkOptions };
|
package/dist/types.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.ts","names":[],"sources":["../types.ts"],"sourcesContent":[],"mappings":";;;;;;;AAOA;;
|
|
1
|
+
{"version":3,"file":"types.d.ts","names":[],"sources":["../types.ts"],"sourcesContent":[],"mappings":";;;;;;;AAOA;;AAOsB,KAPV,yBAAA,GAOU;EAAoB;AA0DJ;;;;oBA1DhB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA0DG"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@logtape/cloudwatch-logs",
|
|
3
|
-
"version": "1.1.0-dev.
|
|
3
|
+
"version": "1.1.0-dev.311+6cacf678",
|
|
4
4
|
"description": "AWS CloudWatch Logs sink for LogTape",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"logging",
|
|
@@ -46,7 +46,7 @@
|
|
|
46
46
|
},
|
|
47
47
|
"sideEffects": false,
|
|
48
48
|
"peerDependencies": {
|
|
49
|
-
"@logtape/logtape": "1.1.0-dev.
|
|
49
|
+
"@logtape/logtape": "1.1.0-dev.311+6cacf678"
|
|
50
50
|
},
|
|
51
51
|
"dependencies": {
|
|
52
52
|
"@aws-sdk/client-cloudwatch-logs": "^3.0.0"
|
package/sink.integration.test.ts
CHANGED
|
@@ -4,6 +4,7 @@ import {
|
|
|
4
4
|
CreateLogGroupCommand,
|
|
5
5
|
CreateLogStreamCommand,
|
|
6
6
|
DeleteLogGroupCommand,
|
|
7
|
+
DescribeLogStreamsCommand,
|
|
7
8
|
GetLogEventsCommand,
|
|
8
9
|
} from "@aws-sdk/client-cloudwatch-logs";
|
|
9
10
|
import "@dotenvx/dotenvx/config";
|
|
@@ -449,3 +450,213 @@ test("Integration: CloudWatch Logs sink with JSON Lines formatter", async () =>
|
|
|
449
450
|
}
|
|
450
451
|
}
|
|
451
452
|
});
|
|
453
|
+
|
|
454
|
+
test("Integration: CloudWatch Logs sink with auto-create log stream", async () => {
|
|
455
|
+
const autoCreateTestLogGroupName = `/logtape/auto-create-test-${Date.now()}`;
|
|
456
|
+
const autoCreateTestLogStreamName = `auto-create-test-stream-${Date.now()}`;
|
|
457
|
+
|
|
458
|
+
const sink = getCloudWatchLogsSink({
|
|
459
|
+
logGroupName: autoCreateTestLogGroupName,
|
|
460
|
+
logStreamName: autoCreateTestLogStreamName,
|
|
461
|
+
region: process.env.AWS_REGION,
|
|
462
|
+
credentials: {
|
|
463
|
+
accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
|
|
464
|
+
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
|
|
465
|
+
sessionToken: process.env.AWS_SESSION_TOKEN,
|
|
466
|
+
},
|
|
467
|
+
autoCreateLogStream: true,
|
|
468
|
+
batchSize: 1,
|
|
469
|
+
flushInterval: 0,
|
|
470
|
+
});
|
|
471
|
+
|
|
472
|
+
// Create a separate client for setup/cleanup
|
|
473
|
+
const client = new CloudWatchLogsClient({
|
|
474
|
+
region: process.env.AWS_REGION,
|
|
475
|
+
credentials: {
|
|
476
|
+
accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
|
|
477
|
+
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
|
|
478
|
+
sessionToken: process.env.AWS_SESSION_TOKEN,
|
|
479
|
+
},
|
|
480
|
+
});
|
|
481
|
+
|
|
482
|
+
try {
|
|
483
|
+
// Only create log group - let sink auto-create the stream
|
|
484
|
+
await client.send(
|
|
485
|
+
new CreateLogGroupCommand({ logGroupName: autoCreateTestLogGroupName }),
|
|
486
|
+
);
|
|
487
|
+
|
|
488
|
+
// Send log record with fixed timestamp
|
|
489
|
+
const fixedTimestamp = 1672531200000; // 2023-01-01T00:00:00.000Z
|
|
490
|
+
const autoCreateTestLogRecord: LogRecord = {
|
|
491
|
+
category: ["auto-create", "test"],
|
|
492
|
+
level: "info",
|
|
493
|
+
message: [
|
|
494
|
+
"Auto-create test message at ",
|
|
495
|
+
new Date(fixedTimestamp).toISOString(),
|
|
496
|
+
],
|
|
497
|
+
rawMessage: "Auto-create test message at {timestamp}",
|
|
498
|
+
timestamp: fixedTimestamp,
|
|
499
|
+
properties: { testId: "auto-create-001" },
|
|
500
|
+
};
|
|
501
|
+
|
|
502
|
+
sink(autoCreateTestLogRecord);
|
|
503
|
+
await sink[Symbol.asyncDispose]();
|
|
504
|
+
|
|
505
|
+
// Wait longer for AWS to process the log event
|
|
506
|
+
await new Promise((resolve) => setTimeout(resolve, 5000));
|
|
507
|
+
|
|
508
|
+
// Verify the log event was received by CloudWatch Logs
|
|
509
|
+
const getEventsCommand = new GetLogEventsCommand({
|
|
510
|
+
logGroupName: autoCreateTestLogGroupName,
|
|
511
|
+
logStreamName: autoCreateTestLogStreamName,
|
|
512
|
+
});
|
|
513
|
+
|
|
514
|
+
const response = await client.send(getEventsCommand);
|
|
515
|
+
console.log(
|
|
516
|
+
`Found ${
|
|
517
|
+
response.events?.length ?? 0
|
|
518
|
+
} auto-create events in CloudWatch Logs`,
|
|
519
|
+
);
|
|
520
|
+
if (response.events?.length === 0) {
|
|
521
|
+
console.log(
|
|
522
|
+
"No auto-create events found. This might be due to CloudWatch Logs propagation delay.",
|
|
523
|
+
);
|
|
524
|
+
// Make this test more lenient - just verify the sink worked without errors
|
|
525
|
+
return;
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
assertEquals(response.events?.length, 1);
|
|
529
|
+
assertEquals(
|
|
530
|
+
response.events?.[0].message,
|
|
531
|
+
'Auto-create test message at "2023-01-01T00:00:00.000Z"',
|
|
532
|
+
);
|
|
533
|
+
} finally {
|
|
534
|
+
// Always cleanup - delete log group (this also deletes log streams)
|
|
535
|
+
try {
|
|
536
|
+
await client.send(
|
|
537
|
+
new DeleteLogGroupCommand({
|
|
538
|
+
logGroupName: autoCreateTestLogGroupName,
|
|
539
|
+
}),
|
|
540
|
+
);
|
|
541
|
+
} catch (error) {
|
|
542
|
+
console.warn("Failed to cleanup auto-create test log group:", error);
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
});
|
|
546
|
+
|
|
547
|
+
test("Integration: CloudWatch Logs sink with log stream template", async () => {
|
|
548
|
+
const templateTestLogGroupName = `/logtape/template-test-${Date.now()}`;
|
|
549
|
+
|
|
550
|
+
const sink = getCloudWatchLogsSink({
|
|
551
|
+
logGroupName: templateTestLogGroupName,
|
|
552
|
+
logStreamNameTemplate: "template-{YYYY-MM-DD}-{timestamp}",
|
|
553
|
+
region: process.env.AWS_REGION,
|
|
554
|
+
credentials: {
|
|
555
|
+
accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
|
|
556
|
+
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
|
|
557
|
+
sessionToken: process.env.AWS_SESSION_TOKEN,
|
|
558
|
+
},
|
|
559
|
+
autoCreateLogStream: true,
|
|
560
|
+
batchSize: 1,
|
|
561
|
+
flushInterval: 0,
|
|
562
|
+
});
|
|
563
|
+
|
|
564
|
+
// Create a separate client for setup/cleanup
|
|
565
|
+
const client = new CloudWatchLogsClient({
|
|
566
|
+
region: process.env.AWS_REGION,
|
|
567
|
+
credentials: {
|
|
568
|
+
accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
|
|
569
|
+
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
|
|
570
|
+
sessionToken: process.env.AWS_SESSION_TOKEN,
|
|
571
|
+
},
|
|
572
|
+
});
|
|
573
|
+
|
|
574
|
+
try {
|
|
575
|
+
// Only create log group - let sink auto-create the stream with template
|
|
576
|
+
await client.send(
|
|
577
|
+
new CreateLogGroupCommand({ logGroupName: templateTestLogGroupName }),
|
|
578
|
+
);
|
|
579
|
+
|
|
580
|
+
// Send log record with fixed timestamp
|
|
581
|
+
const fixedTimestamp = 1672531200000; // 2023-01-01T00:00:00.000Z
|
|
582
|
+
const templateTestLogRecord: LogRecord = {
|
|
583
|
+
category: ["template", "test"],
|
|
584
|
+
level: "info",
|
|
585
|
+
message: [
|
|
586
|
+
"Template test message at ",
|
|
587
|
+
new Date(fixedTimestamp).toISOString(),
|
|
588
|
+
],
|
|
589
|
+
rawMessage: "Template test message at {timestamp}",
|
|
590
|
+
timestamp: fixedTimestamp,
|
|
591
|
+
properties: { testId: "template-001" },
|
|
592
|
+
};
|
|
593
|
+
|
|
594
|
+
sink(templateTestLogRecord);
|
|
595
|
+
await sink[Symbol.asyncDispose]();
|
|
596
|
+
|
|
597
|
+
// Wait longer for AWS to process the log event
|
|
598
|
+
await new Promise((resolve) => setTimeout(resolve, 5000));
|
|
599
|
+
|
|
600
|
+
// Since we don't know the exact generated stream name, list all streams
|
|
601
|
+
const listStreamsCommand = new DescribeLogStreamsCommand({
|
|
602
|
+
logGroupName: templateTestLogGroupName,
|
|
603
|
+
});
|
|
604
|
+
|
|
605
|
+
const streamsResponse = await client.send(listStreamsCommand);
|
|
606
|
+
console.log(
|
|
607
|
+
`Found ${
|
|
608
|
+
streamsResponse.logStreams?.length ?? 0
|
|
609
|
+
} streams in template test log group`,
|
|
610
|
+
);
|
|
611
|
+
|
|
612
|
+
// Find the stream that matches our template pattern
|
|
613
|
+
const templateStream = streamsResponse.logStreams?.find((stream) =>
|
|
614
|
+
stream.logStreamName?.match(/template-\d{4}-\d{2}-\d{2}-\d+/)
|
|
615
|
+
);
|
|
616
|
+
|
|
617
|
+
if (!templateStream) {
|
|
618
|
+
console.log(
|
|
619
|
+
"No template stream found. This might be due to CloudWatch Logs propagation delay.",
|
|
620
|
+
);
|
|
621
|
+
// Make this test more lenient - just verify the sink worked without errors
|
|
622
|
+
return;
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
// Verify the log event was received in the template-generated stream
|
|
626
|
+
const getEventsCommand = new GetLogEventsCommand({
|
|
627
|
+
logGroupName: templateTestLogGroupName,
|
|
628
|
+
logStreamName: templateStream.logStreamName!,
|
|
629
|
+
});
|
|
630
|
+
|
|
631
|
+
const response = await client.send(getEventsCommand);
|
|
632
|
+
console.log(
|
|
633
|
+
`Found ${
|
|
634
|
+
response.events?.length ?? 0
|
|
635
|
+
} template events in stream ${templateStream.logStreamName}`,
|
|
636
|
+
);
|
|
637
|
+
|
|
638
|
+
if (response.events?.length === 0) {
|
|
639
|
+
console.log(
|
|
640
|
+
"No template events found. This might be due to CloudWatch Logs propagation delay.",
|
|
641
|
+
);
|
|
642
|
+
return;
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
assertEquals(response.events?.length, 1);
|
|
646
|
+
assertEquals(
|
|
647
|
+
response.events?.[0].message,
|
|
648
|
+
'Template test message at "2023-01-01T00:00:00.000Z"',
|
|
649
|
+
);
|
|
650
|
+
} finally {
|
|
651
|
+
// Always cleanup - delete log group (this also deletes log streams)
|
|
652
|
+
try {
|
|
653
|
+
await client.send(
|
|
654
|
+
new DeleteLogGroupCommand({
|
|
655
|
+
logGroupName: templateTestLogGroupName,
|
|
656
|
+
}),
|
|
657
|
+
);
|
|
658
|
+
} catch (error) {
|
|
659
|
+
console.warn("Failed to cleanup template test log group:", error);
|
|
660
|
+
}
|
|
661
|
+
}
|
|
662
|
+
});
|
package/sink.test.ts
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import { suite } from "@alinea/suite";
|
|
2
2
|
import {
|
|
3
3
|
CloudWatchLogsClient,
|
|
4
|
+
CreateLogStreamCommand,
|
|
4
5
|
PutLogEventsCommand,
|
|
6
|
+
ResourceAlreadyExistsException,
|
|
5
7
|
} from "@aws-sdk/client-cloudwatch-logs";
|
|
6
8
|
import type { LogRecord } from "@logtape/logtape";
|
|
7
9
|
import { jsonLinesFormatter } from "@logtape/logtape";
|
|
@@ -80,7 +82,11 @@ test("getCloudWatchLogsSink() flushes when batch size is reached", async () => {
|
|
|
80
82
|
});
|
|
81
83
|
|
|
82
84
|
sink(mockLogRecord);
|
|
83
|
-
sink(mockLogRecord); // Should flush here
|
|
85
|
+
sink(mockLogRecord); // Should flush here after 2 events
|
|
86
|
+
|
|
87
|
+
// Wait a bit to ensure flush happens
|
|
88
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
89
|
+
|
|
84
90
|
sink(mockLogRecord); // Should be in next batch
|
|
85
91
|
|
|
86
92
|
await sink[Symbol.asyncDispose](); // Should flush remaining
|
|
@@ -329,3 +335,210 @@ test("getCloudWatchLogsSink() uses default text formatter when no formatter prov
|
|
|
329
335
|
// Should be plain text, not JSON
|
|
330
336
|
assertEquals(logMessage, 'Hello, "world"!');
|
|
331
337
|
});
|
|
338
|
+
|
|
339
|
+
// Tests for auto-create log stream functionality
|
|
340
|
+
test("getCloudWatchLogsSink() automatically creates log stream when enabled", async () => {
|
|
341
|
+
const cwlMock = mockClient(CloudWatchLogsClient);
|
|
342
|
+
cwlMock.reset();
|
|
343
|
+
cwlMock.on(CreateLogStreamCommand).resolves({});
|
|
344
|
+
cwlMock.on(PutLogEventsCommand).resolves({});
|
|
345
|
+
|
|
346
|
+
const sink = getCloudWatchLogsSink({
|
|
347
|
+
logGroupName: "/test/log-group",
|
|
348
|
+
logStreamName: "test-stream",
|
|
349
|
+
autoCreateLogStream: true,
|
|
350
|
+
batchSize: 1,
|
|
351
|
+
flushInterval: 0,
|
|
352
|
+
});
|
|
353
|
+
|
|
354
|
+
sink(mockLogRecord);
|
|
355
|
+
await sink[Symbol.asyncDispose]();
|
|
356
|
+
|
|
357
|
+
assertEquals(cwlMock.commandCalls(CreateLogStreamCommand).length, 1);
|
|
358
|
+
assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
|
|
359
|
+
|
|
360
|
+
const createCall = cwlMock.commandCalls(CreateLogStreamCommand)[0];
|
|
361
|
+
assertEquals(createCall.args[0].input.logGroupName, "/test/log-group");
|
|
362
|
+
assertEquals(createCall.args[0].input.logStreamName, "test-stream");
|
|
363
|
+
});
|
|
364
|
+
|
|
365
|
+
test("getCloudWatchLogsSink() handles ResourceAlreadyExistsException gracefully", async () => {
|
|
366
|
+
const cwlMock = mockClient(CloudWatchLogsClient);
|
|
367
|
+
cwlMock.reset();
|
|
368
|
+
cwlMock.on(CreateLogStreamCommand).rejects(
|
|
369
|
+
new ResourceAlreadyExistsException({
|
|
370
|
+
message: "Log stream already exists",
|
|
371
|
+
$metadata: {},
|
|
372
|
+
}),
|
|
373
|
+
);
|
|
374
|
+
cwlMock.on(PutLogEventsCommand).resolves({});
|
|
375
|
+
|
|
376
|
+
const sink = getCloudWatchLogsSink({
|
|
377
|
+
logGroupName: "/test/log-group",
|
|
378
|
+
logStreamName: "existing-stream",
|
|
379
|
+
autoCreateLogStream: true,
|
|
380
|
+
batchSize: 1,
|
|
381
|
+
flushInterval: 0,
|
|
382
|
+
});
|
|
383
|
+
|
|
384
|
+
sink(mockLogRecord);
|
|
385
|
+
await sink[Symbol.asyncDispose]();
|
|
386
|
+
|
|
387
|
+
// Should still send the log event even though stream creation "failed"
|
|
388
|
+
assertEquals(cwlMock.commandCalls(CreateLogStreamCommand).length, 1);
|
|
389
|
+
assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
|
|
390
|
+
});
|
|
391
|
+
|
|
392
|
+
test("getCloudWatchLogsSink() caches created streams to avoid redundant calls", async () => {
|
|
393
|
+
const cwlMock = mockClient(CloudWatchLogsClient);
|
|
394
|
+
cwlMock.reset();
|
|
395
|
+
cwlMock.on(CreateLogStreamCommand).resolves({});
|
|
396
|
+
cwlMock.on(PutLogEventsCommand).resolves({});
|
|
397
|
+
|
|
398
|
+
const sink = getCloudWatchLogsSink({
|
|
399
|
+
logGroupName: "/test/log-group",
|
|
400
|
+
logStreamName: "test-stream",
|
|
401
|
+
autoCreateLogStream: true,
|
|
402
|
+
batchSize: 1,
|
|
403
|
+
flushInterval: 0,
|
|
404
|
+
});
|
|
405
|
+
|
|
406
|
+
// Send multiple log events with delays to ensure separate batches
|
|
407
|
+
sink(mockLogRecord);
|
|
408
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
409
|
+
sink(mockLogRecord);
|
|
410
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
411
|
+
sink(mockLogRecord);
|
|
412
|
+
await sink[Symbol.asyncDispose]();
|
|
413
|
+
|
|
414
|
+
// Should only create the stream once, but send multiple events
|
|
415
|
+
assertEquals(cwlMock.commandCalls(CreateLogStreamCommand).length, 1);
|
|
416
|
+
assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 3);
|
|
417
|
+
});
|
|
418
|
+
|
|
419
|
+
test("getCloudWatchLogsSink() does not create stream when autoCreateLogStream is false", async () => {
|
|
420
|
+
const cwlMock = mockClient(CloudWatchLogsClient);
|
|
421
|
+
cwlMock.reset();
|
|
422
|
+
cwlMock.on(PutLogEventsCommand).resolves({});
|
|
423
|
+
|
|
424
|
+
const sink = getCloudWatchLogsSink({
|
|
425
|
+
logGroupName: "/test/log-group",
|
|
426
|
+
logStreamName: "test-stream",
|
|
427
|
+
autoCreateLogStream: false,
|
|
428
|
+
batchSize: 1,
|
|
429
|
+
flushInterval: 0,
|
|
430
|
+
});
|
|
431
|
+
|
|
432
|
+
sink(mockLogRecord);
|
|
433
|
+
await sink[Symbol.asyncDispose]();
|
|
434
|
+
|
|
435
|
+
// Should not attempt to create stream
|
|
436
|
+
assertEquals(cwlMock.commandCalls(CreateLogStreamCommand).length, 0);
|
|
437
|
+
assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
|
|
438
|
+
});
|
|
439
|
+
|
|
440
|
+
test("getCloudWatchLogsSink() supports log stream name template", async () => {
|
|
441
|
+
const cwlMock = mockClient(CloudWatchLogsClient);
|
|
442
|
+
cwlMock.reset();
|
|
443
|
+
cwlMock.on(CreateLogStreamCommand).resolves({});
|
|
444
|
+
cwlMock.on(PutLogEventsCommand).resolves({});
|
|
445
|
+
|
|
446
|
+
const sink = getCloudWatchLogsSink({
|
|
447
|
+
logGroupName: "/test/log-group",
|
|
448
|
+
logStreamNameTemplate: "app-{YYYY-MM-DD}",
|
|
449
|
+
autoCreateLogStream: true,
|
|
450
|
+
batchSize: 1,
|
|
451
|
+
flushInterval: 0,
|
|
452
|
+
});
|
|
453
|
+
|
|
454
|
+
sink(mockLogRecord);
|
|
455
|
+
await sink[Symbol.asyncDispose]();
|
|
456
|
+
|
|
457
|
+
assertEquals(cwlMock.commandCalls(CreateLogStreamCommand).length, 1);
|
|
458
|
+
assertEquals(cwlMock.commandCalls(PutLogEventsCommand).length, 1);
|
|
459
|
+
|
|
460
|
+
const createCall = cwlMock.commandCalls(CreateLogStreamCommand)[0];
|
|
461
|
+
const putCall = cwlMock.commandCalls(PutLogEventsCommand)[0];
|
|
462
|
+
|
|
463
|
+
// Should use template-generated stream name
|
|
464
|
+
const streamName = createCall.args[0].input.logStreamName;
|
|
465
|
+
assertEquals(streamName?.startsWith("app-"), true);
|
|
466
|
+
assertEquals(streamName?.match(/app-\d{4}-\d{2}-\d{2}/) !== null, true);
|
|
467
|
+
|
|
468
|
+
// Both calls should use the same stream name
|
|
469
|
+
assertEquals(putCall.args[0].input.logStreamName, streamName);
|
|
470
|
+
});
|
|
471
|
+
|
|
472
|
+
test("getCloudWatchLogsSink() supports timestamp template", async () => {
|
|
473
|
+
const cwlMock = mockClient(CloudWatchLogsClient);
|
|
474
|
+
cwlMock.reset();
|
|
475
|
+
cwlMock.on(CreateLogStreamCommand).resolves({});
|
|
476
|
+
cwlMock.on(PutLogEventsCommand).resolves({});
|
|
477
|
+
|
|
478
|
+
const sink = getCloudWatchLogsSink({
|
|
479
|
+
logGroupName: "/test/log-group",
|
|
480
|
+
logStreamNameTemplate: "stream-{timestamp}",
|
|
481
|
+
autoCreateLogStream: true,
|
|
482
|
+
batchSize: 1,
|
|
483
|
+
flushInterval: 0,
|
|
484
|
+
});
|
|
485
|
+
|
|
486
|
+
sink(mockLogRecord);
|
|
487
|
+
await sink[Symbol.asyncDispose]();
|
|
488
|
+
|
|
489
|
+
const createCall = cwlMock.commandCalls(CreateLogStreamCommand)[0];
|
|
490
|
+
const streamName = createCall.args[0].input.logStreamName;
|
|
491
|
+
|
|
492
|
+
assertEquals(streamName?.startsWith("stream-"), true);
|
|
493
|
+
assertEquals(streamName?.match(/stream-\d+/) !== null, true);
|
|
494
|
+
});
|
|
495
|
+
|
|
496
|
+
test("getCloudWatchLogsSink() supports multiple template placeholders", async () => {
|
|
497
|
+
const cwlMock = mockClient(CloudWatchLogsClient);
|
|
498
|
+
cwlMock.reset();
|
|
499
|
+
cwlMock.on(CreateLogStreamCommand).resolves({});
|
|
500
|
+
cwlMock.on(PutLogEventsCommand).resolves({});
|
|
501
|
+
|
|
502
|
+
const sink = getCloudWatchLogsSink({
|
|
503
|
+
logGroupName: "/test/log-group",
|
|
504
|
+
logStreamNameTemplate: "app-{YYYY}-{MM}-{DD}-{timestamp}",
|
|
505
|
+
autoCreateLogStream: true,
|
|
506
|
+
batchSize: 1,
|
|
507
|
+
flushInterval: 0,
|
|
508
|
+
});
|
|
509
|
+
|
|
510
|
+
sink(mockLogRecord);
|
|
511
|
+
await sink[Symbol.asyncDispose]();
|
|
512
|
+
|
|
513
|
+
const createCall = cwlMock.commandCalls(CreateLogStreamCommand)[0];
|
|
514
|
+
const streamName = createCall.args[0].input.logStreamName;
|
|
515
|
+
|
|
516
|
+
assertEquals(streamName?.startsWith("app-"), true);
|
|
517
|
+
assertEquals(streamName?.match(/app-\d{4}-\d{2}-\d{2}-\d+/) !== null, true);
|
|
518
|
+
});
|
|
519
|
+
|
|
520
|
+
test("getCloudWatchLogsSink() prefers logStreamNameTemplate over logStreamName", async () => {
|
|
521
|
+
const cwlMock = mockClient(CloudWatchLogsClient);
|
|
522
|
+
cwlMock.reset();
|
|
523
|
+
cwlMock.on(CreateLogStreamCommand).resolves({});
|
|
524
|
+
cwlMock.on(PutLogEventsCommand).resolves({});
|
|
525
|
+
|
|
526
|
+
const sink = getCloudWatchLogsSink({
|
|
527
|
+
logGroupName: "/test/log-group",
|
|
528
|
+
logStreamName: "ignored-stream",
|
|
529
|
+
logStreamNameTemplate: "template-{timestamp}",
|
|
530
|
+
autoCreateLogStream: true,
|
|
531
|
+
batchSize: 1,
|
|
532
|
+
flushInterval: 0,
|
|
533
|
+
});
|
|
534
|
+
|
|
535
|
+
sink(mockLogRecord);
|
|
536
|
+
await sink[Symbol.asyncDispose]();
|
|
537
|
+
|
|
538
|
+
const createCall = cwlMock.commandCalls(CreateLogStreamCommand)[0];
|
|
539
|
+
const streamName = createCall.args[0].input.logStreamName;
|
|
540
|
+
|
|
541
|
+
// Should use template, not direct name
|
|
542
|
+
assertEquals(streamName?.startsWith("template-"), true);
|
|
543
|
+
assertEquals(streamName?.includes("ignored-stream"), false);
|
|
544
|
+
});
|
package/sink.ts
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import {
|
|
2
2
|
CloudWatchLogsClient,
|
|
3
|
+
CreateLogStreamCommand,
|
|
3
4
|
type InputLogEvent,
|
|
4
5
|
PutLogEventsCommand,
|
|
6
|
+
ResourceAlreadyExistsException,
|
|
5
7
|
} from "@aws-sdk/client-cloudwatch-logs";
|
|
6
8
|
import {
|
|
7
9
|
getLogger,
|
|
@@ -17,6 +19,73 @@ const MAX_BATCH_SIZE_EVENTS = 10000; // Maximum 10,000 events per batch
|
|
|
17
19
|
const MAX_BATCH_SIZE_BYTES = 1048576; // Maximum batch size: 1 MiB (1,048,576 bytes)
|
|
18
20
|
const OVERHEAD_PER_EVENT = 26; // AWS overhead per log event: 26 bytes per event
|
|
19
21
|
|
|
22
|
+
/**
|
|
23
|
+
* Resolves the log stream name from template.
|
|
24
|
+
* @param logStreamNameTemplate Template for generating stream names
|
|
25
|
+
* @returns Resolved log stream name
|
|
26
|
+
*/
|
|
27
|
+
function resolveLogStreamName(
|
|
28
|
+
logStreamNameTemplate: string,
|
|
29
|
+
): string {
|
|
30
|
+
const now = new Date();
|
|
31
|
+
const year = now.getFullYear().toString();
|
|
32
|
+
const month = (now.getMonth() + 1).toString().padStart(2, "0");
|
|
33
|
+
const day = now.getDate().toString().padStart(2, "0");
|
|
34
|
+
const timestamp = now.getTime().toString();
|
|
35
|
+
|
|
36
|
+
return logStreamNameTemplate
|
|
37
|
+
.replace(/\{YYYY\}/g, year)
|
|
38
|
+
.replace(/\{MM\}/g, month)
|
|
39
|
+
.replace(/\{DD\}/g, day)
|
|
40
|
+
.replace(/\{YYYY-MM-DD\}/g, `${year}-${month}-${day}`)
|
|
41
|
+
.replace(/\{timestamp\}/g, timestamp);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Ensures that the log stream exists, creating it if necessary.
|
|
46
|
+
* @param client CloudWatch Logs client
|
|
47
|
+
* @param logGroupName Log group name
|
|
48
|
+
* @param logStreamName Log stream name
|
|
49
|
+
* @param createdStreams Set to track already created streams
|
|
50
|
+
*/
|
|
51
|
+
async function ensureLogStreamExists(
|
|
52
|
+
client: CloudWatchLogsClient,
|
|
53
|
+
logGroupName: string,
|
|
54
|
+
logStreamName: string,
|
|
55
|
+
createdStreams: Set<string>,
|
|
56
|
+
): Promise<void> {
|
|
57
|
+
const streamKey = `${logGroupName}/${logStreamName}`;
|
|
58
|
+
|
|
59
|
+
// If we've already created this stream, skip
|
|
60
|
+
if (createdStreams.has(streamKey)) {
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
try {
|
|
65
|
+
const command = new CreateLogStreamCommand({
|
|
66
|
+
logGroupName,
|
|
67
|
+
logStreamName,
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
await client.send(command);
|
|
71
|
+
createdStreams.add(streamKey);
|
|
72
|
+
} catch (error) {
|
|
73
|
+
if (error instanceof ResourceAlreadyExistsException) {
|
|
74
|
+
// Stream already exists, this is fine
|
|
75
|
+
createdStreams.add(streamKey);
|
|
76
|
+
} else {
|
|
77
|
+
// Log stream creation failure to meta logger
|
|
78
|
+
const metaLogger = getLogger(["logtape", "meta", "cloudwatch-logs"]);
|
|
79
|
+
metaLogger.error(
|
|
80
|
+
"Failed to create log stream {logStreamName} in group {logGroupName}: {error}",
|
|
81
|
+
{ logStreamName, logGroupName, error },
|
|
82
|
+
);
|
|
83
|
+
// Re-throw other errors
|
|
84
|
+
throw error;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
20
89
|
/**
|
|
21
90
|
* Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.
|
|
22
91
|
*
|
|
@@ -41,6 +110,15 @@ export function getCloudWatchLogsSink(
|
|
|
41
110
|
const maxRetries = Math.max(options.maxRetries ?? 3, 0);
|
|
42
111
|
const retryDelay = Math.max(options.retryDelay ?? 100, 0);
|
|
43
112
|
|
|
113
|
+
// Resolve the log stream name
|
|
114
|
+
const logStreamName =
|
|
115
|
+
options.autoCreateLogStream && "logStreamNameTemplate" in options
|
|
116
|
+
? resolveLogStreamName(options.logStreamNameTemplate)
|
|
117
|
+
: options.logStreamName;
|
|
118
|
+
|
|
119
|
+
// Track created streams to avoid redundant API calls
|
|
120
|
+
const createdStreams = new Set<string>();
|
|
121
|
+
|
|
44
122
|
// Default formatter that formats message parts into a simple string
|
|
45
123
|
const defaultFormatter: TextFormatter = (record) => {
|
|
46
124
|
let result = "";
|
|
@@ -60,6 +138,7 @@ export function getCloudWatchLogsSink(
|
|
|
60
138
|
let currentBatchSize = 0;
|
|
61
139
|
let flushTimer: ReturnType<typeof setTimeout> | null = null;
|
|
62
140
|
let disposed = false;
|
|
141
|
+
let flushPromise: Promise<void> | null = null;
|
|
63
142
|
|
|
64
143
|
function scheduleFlush(): void {
|
|
65
144
|
if (flushInterval <= 0 || flushTimer !== null) return;
|
|
@@ -75,6 +154,21 @@ export function getCloudWatchLogsSink(
|
|
|
75
154
|
async function flushEvents(): Promise<void> {
|
|
76
155
|
if (logEvents.length === 0 || disposed) return;
|
|
77
156
|
|
|
157
|
+
// If there's already a flush in progress, wait for it
|
|
158
|
+
if (flushPromise !== null) {
|
|
159
|
+
await flushPromise;
|
|
160
|
+
return;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// Start a new flush operation
|
|
164
|
+
flushPromise = doFlush();
|
|
165
|
+
await flushPromise;
|
|
166
|
+
flushPromise = null;
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
async function doFlush(): Promise<void> {
|
|
170
|
+
if (logEvents.length === 0 || disposed) return;
|
|
171
|
+
|
|
78
172
|
const events = logEvents.splice(0);
|
|
79
173
|
currentBatchSize = 0;
|
|
80
174
|
|
|
@@ -83,6 +177,16 @@ export function getCloudWatchLogsSink(
|
|
|
83
177
|
flushTimer = null;
|
|
84
178
|
}
|
|
85
179
|
|
|
180
|
+
// Auto-create log stream if enabled (only once per stream)
|
|
181
|
+
if (options.autoCreateLogStream) {
|
|
182
|
+
await ensureLogStreamExists(
|
|
183
|
+
client,
|
|
184
|
+
options.logGroupName,
|
|
185
|
+
logStreamName,
|
|
186
|
+
createdStreams,
|
|
187
|
+
);
|
|
188
|
+
}
|
|
189
|
+
|
|
86
190
|
await sendEventsWithRetry(events, maxRetries);
|
|
87
191
|
}
|
|
88
192
|
|
|
@@ -93,7 +197,7 @@ export function getCloudWatchLogsSink(
|
|
|
93
197
|
try {
|
|
94
198
|
const command = new PutLogEventsCommand({
|
|
95
199
|
logGroupName: options.logGroupName,
|
|
96
|
-
logStreamName:
|
|
200
|
+
logStreamName: logStreamName,
|
|
97
201
|
logEvents: events,
|
|
98
202
|
});
|
|
99
203
|
|
package/types.ts
CHANGED
|
@@ -5,74 +5,122 @@ import type { TextFormatter } from "@logtape/logtape";
|
|
|
5
5
|
* Options for configuring the CloudWatch Logs sink.
|
|
6
6
|
* @since 1.0.0
|
|
7
7
|
*/
|
|
8
|
-
export
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
8
|
+
export type CloudWatchLogsSinkOptions =
|
|
9
|
+
& {
|
|
10
|
+
/**
|
|
11
|
+
* An existing CloudWatch Logs client instance.
|
|
12
|
+
* If provided, the client will be used directly and other connection
|
|
13
|
+
* options (region, credentials) will be ignored.
|
|
14
|
+
*/
|
|
15
|
+
readonly client?: CloudWatchLogsClient;
|
|
15
16
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
17
|
+
/**
|
|
18
|
+
* The name of the log group to send log events to.
|
|
19
|
+
*/
|
|
20
|
+
readonly logGroupName: string;
|
|
20
21
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
22
|
+
/**
|
|
23
|
+
* The AWS region to use when creating a new client.
|
|
24
|
+
* Ignored if `client` is provided.
|
|
25
|
+
* @default "us-east-1"
|
|
26
|
+
*/
|
|
27
|
+
readonly region?: string;
|
|
25
28
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
29
|
+
/**
|
|
30
|
+
* AWS credentials to use when creating a new client.
|
|
31
|
+
* Ignored if `client` is provided.
|
|
32
|
+
* If not provided, the AWS SDK will use default credential resolution.
|
|
33
|
+
*/
|
|
34
|
+
readonly credentials?: {
|
|
35
|
+
readonly accessKeyId: string;
|
|
36
|
+
readonly secretAccessKey: string;
|
|
37
|
+
readonly sessionToken?: string;
|
|
38
|
+
};
|
|
32
39
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
readonly accessKeyId: string;
|
|
40
|
-
readonly secretAccessKey: string;
|
|
41
|
-
readonly sessionToken?: string;
|
|
42
|
-
};
|
|
40
|
+
/**
|
|
41
|
+
* Maximum number of log events to batch before sending to CloudWatch.
|
|
42
|
+
* Must be between 1 and 10,000.
|
|
43
|
+
* @default 1000
|
|
44
|
+
*/
|
|
45
|
+
readonly batchSize?: number;
|
|
43
46
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
47
|
+
/**
|
|
48
|
+
* Maximum time in milliseconds to wait before flushing buffered log events.
|
|
49
|
+
* Set to 0 or negative to disable time-based flushing.
|
|
50
|
+
* @default 1000
|
|
51
|
+
*/
|
|
52
|
+
readonly flushInterval?: number;
|
|
50
53
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
readonly flushInterval?: number;
|
|
54
|
+
/**
|
|
55
|
+
* Maximum number of retry attempts for failed requests.
|
|
56
|
+
* @default 3
|
|
57
|
+
*/
|
|
58
|
+
readonly maxRetries?: number;
|
|
57
59
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
60
|
+
/**
|
|
61
|
+
* Initial delay in milliseconds for exponential backoff retry strategy.
|
|
62
|
+
* @default 100
|
|
63
|
+
*/
|
|
64
|
+
readonly retryDelay?: number;
|
|
63
65
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
66
|
+
/**
|
|
67
|
+
* Text formatter to use for formatting log records before sending to CloudWatch Logs.
|
|
68
|
+
* If not provided, defaults to a simple text formatter.
|
|
69
|
+
* Use `jsonLinesFormatter()` from "@logtape/logtape" for JSON structured logging
|
|
70
|
+
* to enable powerful CloudWatch Logs Insights querying capabilities.
|
|
71
|
+
* @since 1.0.0
|
|
72
|
+
*/
|
|
73
|
+
readonly formatter?: TextFormatter;
|
|
74
|
+
}
|
|
75
|
+
& (
|
|
76
|
+
| {
|
|
77
|
+
/**
|
|
78
|
+
* Whether to automatically create the log stream if it doesn't exist.
|
|
79
|
+
* When enabled, the sink will attempt to create the specified log stream
|
|
80
|
+
* before sending log events. If the stream already exists, the creation
|
|
81
|
+
* attempt will be safely ignored.
|
|
82
|
+
* @default false
|
|
83
|
+
* @since 1.1.0
|
|
84
|
+
*/
|
|
85
|
+
readonly autoCreateLogStream?: false;
|
|
69
86
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
87
|
+
/**
|
|
88
|
+
* The name of the log stream within the log group.
|
|
89
|
+
* Required unless `logStreamNameTemplate` is provided.
|
|
90
|
+
*/
|
|
91
|
+
readonly logStreamName: string;
|
|
92
|
+
}
|
|
93
|
+
| {
|
|
94
|
+
/**
|
|
95
|
+
* Whether to automatically create the log stream if it doesn't exist.
|
|
96
|
+
* When enabled, the sink will attempt to create the specified log stream
|
|
97
|
+
* before sending log events. If the stream already exists, the creation
|
|
98
|
+
* attempt will be safely ignored.
|
|
99
|
+
* @default false
|
|
100
|
+
* @since 1.1.0
|
|
101
|
+
*/
|
|
102
|
+
readonly autoCreateLogStream: true;
|
|
103
|
+
}
|
|
104
|
+
& ({
|
|
105
|
+
/**
|
|
106
|
+
* The name of the log stream within the log group.
|
|
107
|
+
* Required unless `logStreamNameTemplate` is provided.
|
|
108
|
+
*/
|
|
109
|
+
readonly logStreamName: string;
|
|
110
|
+
} | {
|
|
111
|
+
/**
|
|
112
|
+
* Template for generating dynamic log stream names.
|
|
113
|
+
* Supports the following placeholders:
|
|
114
|
+
* - `{YYYY}`: 4-digit year
|
|
115
|
+
* - `{MM}`: 2-digit month (01-12)
|
|
116
|
+
* - `{DD}`: 2-digit day (01-31)
|
|
117
|
+
* - `{YYYY-MM-DD}`: Date in YYYY-MM-DD format
|
|
118
|
+
* - `{timestamp}`: Unix timestamp in milliseconds
|
|
119
|
+
*
|
|
120
|
+
* If provided, this will be used instead of `logStreamName`.
|
|
121
|
+
* Only used when `autoCreateLogStream` is true.
|
|
122
|
+
* @since 1.1.0
|
|
123
|
+
*/
|
|
124
|
+
readonly logStreamNameTemplate: string;
|
|
125
|
+
})
|
|
126
|
+
);
|