@logtape/cloudwatch-logs 1.3.4 → 1.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/sink.cjs CHANGED
@@ -14,6 +14,7 @@ const OVERHEAD_PER_EVENT = 26;
14
14
  * @since 1.0.0
15
15
  */
16
16
  function getCloudWatchLogsSink(options) {
17
+ const ownClient = options.client == null;
17
18
  const client = options.client ?? new __aws_sdk_client_cloudwatch_logs.CloudWatchLogsClient({
18
19
  region: options.region ?? "us-east-1",
19
20
  credentials: options.credentials
@@ -104,6 +105,7 @@ function getCloudWatchLogsSink(options) {
104
105
  }
105
106
  await flushEvents();
106
107
  disposed = true;
108
+ if (ownClient) client.destroy();
107
109
  };
108
110
  return sink;
109
111
  }
package/dist/sink.js CHANGED
@@ -13,6 +13,7 @@ const OVERHEAD_PER_EVENT = 26;
13
13
  * @since 1.0.0
14
14
  */
15
15
  function getCloudWatchLogsSink(options) {
16
+ const ownClient = options.client == null;
16
17
  const client = options.client ?? new CloudWatchLogsClient({
17
18
  region: options.region ?? "us-east-1",
18
19
  credentials: options.credentials
@@ -103,6 +104,7 @@ function getCloudWatchLogsSink(options) {
103
104
  }
104
105
  await flushEvents();
105
106
  disposed = true;
107
+ if (ownClient) client.destroy();
106
108
  };
107
109
  return sink;
108
110
  }
package/dist/sink.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"sink.js","names":["options: CloudWatchLogsSinkOptions","defaultFormatter: TextFormatter","logEvents: InputLogEvent[]","flushTimer: ReturnType<typeof setTimeout> | null","events: InputLogEvent[]","remainingRetries: number","record: LogRecord","sink: Sink & AsyncDisposable","logEvent: InputLogEvent"],"sources":["../src/sink.ts"],"sourcesContent":["import {\n CloudWatchLogsClient,\n type InputLogEvent,\n PutLogEventsCommand,\n} from \"@aws-sdk/client-cloudwatch-logs\";\nimport {\n getLogger,\n type LogRecord,\n type Sink,\n type TextFormatter,\n} from \"@logtape/logtape\";\nimport type { CloudWatchLogsSinkOptions } from \"./types.ts\";\n\n// AWS CloudWatch Logs PutLogEvents API limits\n// See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html\nconst MAX_BATCH_SIZE_EVENTS = 10000; // Maximum 10,000 events per batch\nconst MAX_BATCH_SIZE_BYTES = 1048576; // Maximum batch size: 1 MiB (1,048,576 bytes)\nconst OVERHEAD_PER_EVENT = 26; // AWS overhead per log event: 26 bytes per event\n\n/**\n * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.\n *\n * @param options Configuration options for the CloudWatch Logs sink.\n * @returns A sink that sends log records to CloudWatch Logs.\n * @since 1.0.0\n */\nexport function getCloudWatchLogsSink(\n options: CloudWatchLogsSinkOptions,\n): Sink & AsyncDisposable {\n const client = options.client ??\n new CloudWatchLogsClient({\n region: options.region ?? \"us-east-1\",\n credentials: options.credentials,\n });\n\n const batchSize = Math.min(\n Math.max(options.batchSize ?? 1000, 1),\n MAX_BATCH_SIZE_EVENTS,\n );\n const flushInterval = options.flushInterval ?? 1000;\n const maxRetries = Math.max(options.maxRetries ?? 3, 0);\n const retryDelay = Math.max(options.retryDelay ?? 100, 0);\n\n // Default formatter that formats message parts into a simple string\n const defaultFormatter: TextFormatter = (record) => {\n let result = \"\";\n for (let i = 0; i < record.message.length; i++) {\n if (i % 2 === 0) {\n result += record.message[i];\n } else {\n result += JSON.stringify(record.message[i]);\n }\n }\n return result;\n };\n\n const formatter = options.formatter ?? defaultFormatter;\n\n const logEvents: InputLogEvent[] = [];\n let currentBatchSize = 0;\n let flushTimer: ReturnType<typeof setTimeout> | null = null;\n let disposed = false;\n\n function scheduleFlush(): void {\n if (flushInterval <= 0 || flushTimer !== null) return;\n\n flushTimer = setTimeout(() => {\n flushTimer = null;\n if (logEvents.length > 0) {\n void flushEvents();\n }\n }, flushInterval);\n }\n\n async function flushEvents(): Promise<void> {\n if (logEvents.length === 0 || disposed) return;\n\n const events = logEvents.splice(0);\n currentBatchSize = 0;\n\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n\n await sendEventsWithRetry(events, maxRetries);\n }\n\n async function sendEventsWithRetry(\n events: InputLogEvent[],\n remainingRetries: number,\n ): Promise<void> {\n try {\n const command = new PutLogEventsCommand({\n logGroupName: options.logGroupName,\n logStreamName: options.logStreamName,\n logEvents: events,\n });\n\n await client.send(command);\n } catch (error) {\n if (remainingRetries > 0) {\n // Calculate exponential backoff: base, base*2, base*4, etc.\n const attemptNumber = maxRetries - remainingRetries;\n const delay = retryDelay * Math.pow(2, attemptNumber);\n await new Promise((resolve) => setTimeout(resolve, delay));\n await sendEventsWithRetry(events, remainingRetries - 1);\n } else {\n // Log to meta logger to avoid crashing the application\n const metaLogger = getLogger([\"logtape\", \"meta\", \"cloudwatch-logs\"]);\n metaLogger.error(\n \"Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}\",\n { maxRetries, error },\n );\n }\n }\n }\n\n function formatLogMessage(record: LogRecord): string {\n return formatter(record);\n }\n\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Skip meta logger logs to prevent infinite loops\n if (\n record.category[0] === \"logtape\" &&\n record.category[1] === \"meta\" &&\n record.category[2] === \"cloudwatch-logs\"\n ) {\n return;\n }\n\n const message = formatLogMessage(record);\n const messageBytes = new TextEncoder().encode(message).length;\n const eventSize = messageBytes + OVERHEAD_PER_EVENT;\n\n const logEvent: InputLogEvent = {\n timestamp: record.timestamp,\n message,\n };\n\n logEvents.push(logEvent);\n currentBatchSize += eventSize;\n\n const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;\n const shouldFlushByCount = logEvents.length >= batchSize;\n\n if (shouldFlushBySize || shouldFlushByCount) {\n void flushEvents();\n } else {\n scheduleFlush();\n }\n };\n\n sink[Symbol.asyncDispose] = async () => {\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n await flushEvents();\n disposed = true;\n };\n\n return sink;\n}\n"],"mappings":";;;;AAeA,MAAM,wBAAwB;AAC9B,MAAM,uBAAuB;AAC7B,MAAM,qBAAqB;;;;;;;;AAS3B,SAAgB,sBACdA,SACwB;CACxB,MAAM,SAAS,QAAQ,UACrB,IAAI,qBAAqB;EACvB,QAAQ,QAAQ,UAAU;EAC1B,aAAa,QAAQ;CACtB;CAEH,MAAM,YAAY,KAAK,IACrB,KAAK,IAAI,QAAQ,aAAa,KAAM,EAAE,EACtC,sBACD;CACD,MAAM,gBAAgB,QAAQ,iBAAiB;CAC/C,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,GAAG,EAAE;CACvD,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,KAAK,EAAE;CAGzD,MAAMC,mBAAkC,CAAC,WAAW;EAClD,IAAI,SAAS;AACb,OAAK,IAAI,IAAI,GAAG,IAAI,OAAO,QAAQ,QAAQ,IACzC,KAAI,IAAI,MAAM,EACZ,WAAU,OAAO,QAAQ;MAEzB,WAAU,KAAK,UAAU,OAAO,QAAQ,GAAG;AAG/C,SAAO;CACR;CAED,MAAM,YAAY,QAAQ,aAAa;CAEvC,MAAMC,YAA6B,CAAE;CACrC,IAAI,mBAAmB;CACvB,IAAIC,aAAmD;CACvD,IAAI,WAAW;CAEf,SAAS,gBAAsB;AAC7B,MAAI,iBAAiB,KAAK,eAAe,KAAM;AAE/C,eAAa,WAAW,MAAM;AAC5B,gBAAa;AACb,OAAI,UAAU,SAAS,EACrB,CAAK,aAAa;EAErB,GAAE,cAAc;CAClB;CAED,eAAe,cAA6B;AAC1C,MAAI,UAAU,WAAW,KAAK,SAAU;EAExC,MAAM,SAAS,UAAU,OAAO,EAAE;AAClC,qBAAmB;AAEnB,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AAED,QAAM,oBAAoB,QAAQ,WAAW;CAC9C;CAED,eAAe,oBACbC,QACAC,kBACe;AACf,MAAI;GACF,MAAM,UAAU,IAAI,oBAAoB;IACtC,cAAc,QAAQ;IACtB,eAAe,QAAQ;IACvB,WAAW;GACZ;AAED,SAAM,OAAO,KAAK,QAAQ;EAC3B,SAAQ,OAAO;AACd,OAAI,mBAAmB,GAAG;IAExB,MAAM,gBAAgB,aAAa;IACnC,MAAM,QAAQ,aAAa,KAAK,IAAI,GAAG,cAAc;AACrD,UAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,MAAM;AACzD,UAAM,oBAAoB,QAAQ,mBAAmB,EAAE;GACxD,OAAM;IAEL,MAAM,aAAa,UAAU;KAAC;KAAW;KAAQ;IAAkB,EAAC;AACpE,eAAW,MACT,oFACA;KAAE;KAAY;IAAO,EACtB;GACF;EACF;CACF;CAED,SAAS,iBAAiBC,QAA2B;AACnD,SAAO,UAAU,OAAO;CACzB;CAED,MAAMC,OAA+B,CAACD,WAAsB;AAC1D,MAAI,SAAU;AAGd,MACE,OAAO,SAAS,OAAO,aACvB,OAAO,SAAS,OAAO,UACvB,OAAO,SAAS,OAAO,kBAEvB;EAGF,MAAM,UAAU,iBAAiB,OAAO;EACxC,MAAM,eAAe,IAAI,cAAc,OAAO,QAAQ,CAAC;EACvD,MAAM,YAAY,eAAe;EAEjC,MAAME,WAA0B;GAC9B,WAAW,OAAO;GAClB;EACD;AAED,YAAU,KAAK,SAAS;AACxB,sBAAoB;EAEpB,MAAM,oBAAoB,mBAAmB;EAC7C,MAAM,qBAAqB,UAAU,UAAU;AAE/C,MAAI,qBAAqB,mBACvB,CAAK,aAAa;MAElB,gBAAe;CAElB;AAED,MAAK,OAAO,gBAAgB,YAAY;AACtC,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AACD,QAAM,aAAa;AACnB,aAAW;CACZ;AAED,QAAO;AACR"}
1
+ {"version":3,"file":"sink.js","names":["options: CloudWatchLogsSinkOptions","defaultFormatter: TextFormatter","logEvents: InputLogEvent[]","flushTimer: ReturnType<typeof setTimeout> | null","events: InputLogEvent[]","remainingRetries: number","record: LogRecord","sink: Sink & AsyncDisposable","logEvent: InputLogEvent"],"sources":["../src/sink.ts"],"sourcesContent":["import {\n CloudWatchLogsClient,\n type InputLogEvent,\n PutLogEventsCommand,\n} from \"@aws-sdk/client-cloudwatch-logs\";\nimport {\n getLogger,\n type LogRecord,\n type Sink,\n type TextFormatter,\n} from \"@logtape/logtape\";\nimport type { CloudWatchLogsSinkOptions } from \"./types.ts\";\n\n// AWS CloudWatch Logs PutLogEvents API limits\n// See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html\nconst MAX_BATCH_SIZE_EVENTS = 10000; // Maximum 10,000 events per batch\nconst MAX_BATCH_SIZE_BYTES = 1048576; // Maximum batch size: 1 MiB (1,048,576 bytes)\nconst OVERHEAD_PER_EVENT = 26; // AWS overhead per log event: 26 bytes per event\n\n/**\n * Gets a CloudWatch Logs sink that sends log records to AWS CloudWatch Logs.\n *\n * @param options Configuration options for the CloudWatch Logs sink.\n * @returns A sink that sends log records to CloudWatch Logs.\n * @since 1.0.0\n */\nexport function getCloudWatchLogsSink(\n options: CloudWatchLogsSinkOptions,\n): Sink & AsyncDisposable {\n const ownClient = options.client == null;\n const client = options.client ??\n new CloudWatchLogsClient({\n region: options.region ?? \"us-east-1\",\n credentials: options.credentials,\n });\n\n const batchSize = Math.min(\n Math.max(options.batchSize ?? 1000, 1),\n MAX_BATCH_SIZE_EVENTS,\n );\n const flushInterval = options.flushInterval ?? 1000;\n const maxRetries = Math.max(options.maxRetries ?? 3, 0);\n const retryDelay = Math.max(options.retryDelay ?? 100, 0);\n\n // Default formatter that formats message parts into a simple string\n const defaultFormatter: TextFormatter = (record) => {\n let result = \"\";\n for (let i = 0; i < record.message.length; i++) {\n if (i % 2 === 0) {\n result += record.message[i];\n } else {\n result += JSON.stringify(record.message[i]);\n }\n }\n return result;\n };\n\n const formatter = options.formatter ?? defaultFormatter;\n\n const logEvents: InputLogEvent[] = [];\n let currentBatchSize = 0;\n let flushTimer: ReturnType<typeof setTimeout> | null = null;\n let disposed = false;\n\n function scheduleFlush(): void {\n if (flushInterval <= 0 || flushTimer !== null) return;\n\n flushTimer = setTimeout(() => {\n flushTimer = null;\n if (logEvents.length > 0) {\n void flushEvents();\n }\n }, flushInterval);\n }\n\n async function flushEvents(): Promise<void> {\n if (logEvents.length === 0 || disposed) return;\n\n const events = logEvents.splice(0);\n currentBatchSize = 0;\n\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n\n await sendEventsWithRetry(events, maxRetries);\n }\n\n async function sendEventsWithRetry(\n events: InputLogEvent[],\n remainingRetries: number,\n ): Promise<void> {\n try {\n const command = new PutLogEventsCommand({\n logGroupName: options.logGroupName,\n logStreamName: options.logStreamName,\n logEvents: events,\n });\n\n await client.send(command);\n } catch (error) {\n if (remainingRetries > 0) {\n // Calculate exponential backoff: base, base*2, base*4, etc.\n const attemptNumber = maxRetries - remainingRetries;\n const delay = retryDelay * Math.pow(2, attemptNumber);\n await new Promise((resolve) => setTimeout(resolve, delay));\n await sendEventsWithRetry(events, remainingRetries - 1);\n } else {\n // Log to meta logger to avoid crashing the application\n const metaLogger = getLogger([\"logtape\", \"meta\", \"cloudwatch-logs\"]);\n metaLogger.error(\n \"Failed to send log events to CloudWatch Logs after {maxRetries} retries: {error}\",\n { maxRetries, error },\n );\n }\n }\n }\n\n function formatLogMessage(record: LogRecord): string {\n return formatter(record);\n }\n\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Skip meta logger logs to prevent infinite loops\n if (\n record.category[0] === \"logtape\" &&\n record.category[1] === \"meta\" &&\n record.category[2] === \"cloudwatch-logs\"\n ) {\n return;\n }\n\n const message = formatLogMessage(record);\n const messageBytes = new TextEncoder().encode(message).length;\n const eventSize = messageBytes + OVERHEAD_PER_EVENT;\n\n const logEvent: InputLogEvent = {\n timestamp: record.timestamp,\n message,\n };\n\n logEvents.push(logEvent);\n currentBatchSize += eventSize;\n\n const shouldFlushBySize = currentBatchSize > MAX_BATCH_SIZE_BYTES;\n const shouldFlushByCount = logEvents.length >= batchSize;\n\n if (shouldFlushBySize || shouldFlushByCount) {\n void flushEvents();\n } else {\n scheduleFlush();\n }\n };\n\n sink[Symbol.asyncDispose] = async () => {\n if (flushTimer !== null) {\n clearTimeout(flushTimer);\n flushTimer = null;\n }\n await flushEvents();\n disposed = true;\n if (ownClient) client.destroy();\n };\n\n return sink;\n}\n"],"mappings":";;;;AAeA,MAAM,wBAAwB;AAC9B,MAAM,uBAAuB;AAC7B,MAAM,qBAAqB;;;;;;;;AAS3B,SAAgB,sBACdA,SACwB;CACxB,MAAM,YAAY,QAAQ,UAAU;CACpC,MAAM,SAAS,QAAQ,UACrB,IAAI,qBAAqB;EACvB,QAAQ,QAAQ,UAAU;EAC1B,aAAa,QAAQ;CACtB;CAEH,MAAM,YAAY,KAAK,IACrB,KAAK,IAAI,QAAQ,aAAa,KAAM,EAAE,EACtC,sBACD;CACD,MAAM,gBAAgB,QAAQ,iBAAiB;CAC/C,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,GAAG,EAAE;CACvD,MAAM,aAAa,KAAK,IAAI,QAAQ,cAAc,KAAK,EAAE;CAGzD,MAAMC,mBAAkC,CAAC,WAAW;EAClD,IAAI,SAAS;AACb,OAAK,IAAI,IAAI,GAAG,IAAI,OAAO,QAAQ,QAAQ,IACzC,KAAI,IAAI,MAAM,EACZ,WAAU,OAAO,QAAQ;MAEzB,WAAU,KAAK,UAAU,OAAO,QAAQ,GAAG;AAG/C,SAAO;CACR;CAED,MAAM,YAAY,QAAQ,aAAa;CAEvC,MAAMC,YAA6B,CAAE;CACrC,IAAI,mBAAmB;CACvB,IAAIC,aAAmD;CACvD,IAAI,WAAW;CAEf,SAAS,gBAAsB;AAC7B,MAAI,iBAAiB,KAAK,eAAe,KAAM;AAE/C,eAAa,WAAW,MAAM;AAC5B,gBAAa;AACb,OAAI,UAAU,SAAS,EACrB,CAAK,aAAa;EAErB,GAAE,cAAc;CAClB;CAED,eAAe,cAA6B;AAC1C,MAAI,UAAU,WAAW,KAAK,SAAU;EAExC,MAAM,SAAS,UAAU,OAAO,EAAE;AAClC,qBAAmB;AAEnB,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AAED,QAAM,oBAAoB,QAAQ,WAAW;CAC9C;CAED,eAAe,oBACbC,QACAC,kBACe;AACf,MAAI;GACF,MAAM,UAAU,IAAI,oBAAoB;IACtC,cAAc,QAAQ;IACtB,eAAe,QAAQ;IACvB,WAAW;GACZ;AAED,SAAM,OAAO,KAAK,QAAQ;EAC3B,SAAQ,OAAO;AACd,OAAI,mBAAmB,GAAG;IAExB,MAAM,gBAAgB,aAAa;IACnC,MAAM,QAAQ,aAAa,KAAK,IAAI,GAAG,cAAc;AACrD,UAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,MAAM;AACzD,UAAM,oBAAoB,QAAQ,mBAAmB,EAAE;GACxD,OAAM;IAEL,MAAM,aAAa,UAAU;KAAC;KAAW;KAAQ;IAAkB,EAAC;AACpE,eAAW,MACT,oFACA;KAAE;KAAY;IAAO,EACtB;GACF;EACF;CACF;CAED,SAAS,iBAAiBC,QAA2B;AACnD,SAAO,UAAU,OAAO;CACzB;CAED,MAAMC,OAA+B,CAACD,WAAsB;AAC1D,MAAI,SAAU;AAGd,MACE,OAAO,SAAS,OAAO,aACvB,OAAO,SAAS,OAAO,UACvB,OAAO,SAAS,OAAO,kBAEvB;EAGF,MAAM,UAAU,iBAAiB,OAAO;EACxC,MAAM,eAAe,IAAI,cAAc,OAAO,QAAQ,CAAC;EACvD,MAAM,YAAY,eAAe;EAEjC,MAAME,WAA0B;GAC9B,WAAW,OAAO;GAClB;EACD;AAED,YAAU,KAAK,SAAS;AACxB,sBAAoB;EAEpB,MAAM,oBAAoB,mBAAmB;EAC7C,MAAM,qBAAqB,UAAU,UAAU;AAE/C,MAAI,qBAAqB,mBACvB,CAAK,aAAa;MAElB,gBAAe;CAElB;AAED,MAAK,OAAO,gBAAgB,YAAY;AACtC,MAAI,eAAe,MAAM;AACvB,gBAAa,WAAW;AACxB,gBAAa;EACd;AACD,QAAM,aAAa;AACnB,aAAW;AACX,MAAI,UAAW,QAAO,SAAS;CAChC;AAED,QAAO;AACR"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@logtape/cloudwatch-logs",
3
- "version": "1.3.4",
3
+ "version": "1.3.6",
4
4
  "description": "AWS CloudWatch Logs sink for LogTape",
5
5
  "keywords": [
6
6
  "logging",
@@ -49,7 +49,7 @@
49
49
  "dist/"
50
50
  ],
51
51
  "peerDependencies": {
52
- "@logtape/logtape": "^1.3.4"
52
+ "@logtape/logtape": "^1.3.6"
53
53
  },
54
54
  "dependencies": {
55
55
  "@aws-sdk/client-cloudwatch-logs": "^3.0.0"