@replikanti/flowlint-core 0.9.0 → 0.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -678,8 +678,15 @@ function createHardcodedStringRule({
678
678
  return createNodeRule(ruleId, configKey, logic);
679
679
  }
680
680
 
681
- // src/rules/index.ts
682
- var r1Retry = createNodeRule("R1", "rate_limit_retry", (node, graph, ctx) => {
681
+ // src/rules/lib/r1-retry.ts
682
+ var metadata = {
683
+ id: "R1",
684
+ name: "rate_limit_retry",
685
+ severity: "must",
686
+ description: "Ensures that nodes making external API calls have a retry mechanism configured.",
687
+ details: "Critical for building resilient workflows that can handle transient network issues or temporary service unavailability."
688
+ };
689
+ var r1Retry = createNodeRule(metadata.id, metadata.name, (node, graph, ctx) => {
683
690
  if (!isApiNode(node.type)) return null;
684
691
  const params = node.params ?? {};
685
692
  const options = params.options ?? {};
@@ -699,8 +706,8 @@ var r1Retry = createNodeRule("R1", "rate_limit_retry", (node, graph, ctx) => {
699
706
  }
700
707
  }
701
708
  return {
702
- rule: "R1",
703
- severity: "must",
709
+ rule: metadata.id,
710
+ severity: metadata.severity,
704
711
  path: ctx.path,
705
712
  message: `Node ${node.name || node.id} is missing retry/backoff configuration`,
706
713
  raw_details: `In the node properties, enable "Retry on Fail" under Options.`,
@@ -708,11 +715,20 @@ var r1Retry = createNodeRule("R1", "rate_limit_retry", (node, graph, ctx) => {
708
715
  line: ctx.nodeLines?.[node.id]
709
716
  };
710
717
  });
711
- var r2ErrorHandling = createNodeRule("R2", "error_handling", (node, graph, ctx) => {
718
+
719
+ // src/rules/lib/r2-error-handling.ts
720
+ var metadata2 = {
721
+ id: "R2",
722
+ name: "error_handling",
723
+ severity: "must",
724
+ description: "Prevents the use of configurations that might hide errors.",
725
+ details: "Workflows should explicitly handle errors rather than ignoring them with continueOnFail: true."
726
+ };
727
+ var r2ErrorHandling = createNodeRule(metadata2.id, metadata2.name, (node, graph, ctx) => {
712
728
  if (ctx.cfg.rules.error_handling.forbid_continue_on_fail && node.flags?.continueOnFail) {
713
729
  return {
714
- rule: "R2",
715
- severity: "must",
730
+ rule: metadata2.id,
731
+ severity: metadata2.severity,
716
732
  path: ctx.path,
717
733
  message: `Node ${node.name || node.id} has continueOnFail enabled (disable it and route errors explicitly)`,
718
734
  nodeId: node.id,
@@ -722,152 +738,15 @@ var r2ErrorHandling = createNodeRule("R2", "error_handling", (node, graph, ctx)
722
738
  }
723
739
  return null;
724
740
  });
725
- var r4Secrets = createHardcodedStringRule({
726
- ruleId: "R4",
727
- severity: "must",
728
- configKey: "secrets",
729
- messageFn: (node) => `Node ${node.name || node.id} contains a hardcoded secret (move it to credentials/env vars)`,
730
- details: "Move API keys/tokens into Credentials or environment variables; the workflow should only reference {{$credentials.*}} expressions."
731
- });
732
- var r9ConfigLiterals = createHardcodedStringRule({
733
- ruleId: "R9",
741
+
742
+ // src/rules/lib/r3-idempotency.ts
743
+ var metadata3 = {
744
+ id: "R3",
745
+ name: "idempotency",
734
746
  severity: "should",
735
- configKey: "config_literals",
736
- messageFn: (node, value) => `Node ${node.name || node.id} contains env-specific literal "${value.substring(0, 40)}" (move to expression/credential)`,
737
- details: "Move environment-specific URLs/IDs into expressions or credentials (e.g., {{$env.API_BASE_URL}}) so the workflow is portable."
738
- });
739
- var r10NamingConvention = createNodeRule("R10", "naming_convention", (node, graph, ctx) => {
740
- const genericNames = new Set(ctx.cfg.rules.naming_convention.generic_names ?? []);
741
- if (!node.name || genericNames.has(node.name.toLowerCase())) {
742
- return {
743
- rule: "R10",
744
- severity: "nit",
745
- path: ctx.path,
746
- message: `Node ${node.id} uses a generic name "${node.name ?? ""}" (rename it to describe the action)`,
747
- nodeId: node.id,
748
- line: ctx.nodeLines?.[node.id],
749
- raw_details: 'Rename the node to describe its purpose (e.g., "Check subscription status" instead of "IF") for easier reviews and debugging.'
750
- };
751
- }
752
- return null;
753
- });
754
- var DEPRECATED_NODES = {
755
- "n8n-nodes-base.splitInBatches": "Use Loop over items instead",
756
- "n8n-nodes-base.executeWorkflow": "Use Execute Workflow (Sub-Workflow) instead"
747
+ description: "Guards against operations that are not idempotent with retries configured.",
748
+ details: "Detects patterns where a webhook trigger could lead to duplicate processing in databases or external services."
757
749
  };
758
- var r11DeprecatedNodes = createNodeRule("R11", "deprecated_nodes", (node, graph, ctx) => {
759
- if (DEPRECATED_NODES[node.type]) {
760
- return {
761
- rule: "R11",
762
- severity: "should",
763
- path: ctx.path,
764
- message: `Node ${node.name || node.id} uses deprecated type ${node.type} (replace with ${DEPRECATED_NODES[node.type]})`,
765
- nodeId: node.id,
766
- line: ctx.nodeLines?.[node.id],
767
- raw_details: `Replace this node with ${DEPRECATED_NODES[node.type]} so future n8n upgrades don\xE2\u20AC\u2122t break the workflow.`
768
- };
769
- }
770
- return null;
771
- });
772
- var r12UnhandledErrorPath = createNodeRule("R12", "unhandled_error_path", (node, graph, ctx) => {
773
- if (!isErrorProneNode(node.type)) return null;
774
- const hasErrorPath = graph.edges.some((edge) => {
775
- if (edge.from !== node.id) return false;
776
- if (edge.on === "error") return true;
777
- const targetNode = graph.nodes.find((candidate) => candidate.id === edge.to);
778
- return targetNode ? isErrorHandlerNode(targetNode.type, targetNode.name) : false;
779
- });
780
- if (!hasErrorPath) {
781
- return {
782
- rule: "R12",
783
- severity: "must",
784
- path: ctx.path,
785
- message: `Node ${node.name || node.id} has no error branch (add a red connector to handler)`,
786
- nodeId: node.id,
787
- line: ctx.nodeLines?.[node.id],
788
- raw_details: "Add an error (red) branch to a Stop and Error or logging/alert node so failures do not disappear silently."
789
- };
790
- }
791
- return null;
792
- });
793
- function r13WebhookAcknowledgment(graph, ctx) {
794
- const cfg = ctx.cfg.rules.webhook_acknowledgment;
795
- if (!cfg?.enabled) return [];
796
- const findings = [];
797
- const webhookNodes = graph.nodes.filter(
798
- (node) => node.type === "n8n-nodes-base.webhook" || node.type.includes("webhook") && !node.type.includes("respondToWebhook")
799
- );
800
- for (const webhookNode of webhookNodes) {
801
- const directDownstream = graph.edges.filter((edge) => edge.from === webhookNode.id).map((edge) => graph.nodes.find((n) => n.id === edge.to)).filter((n) => !!n);
802
- if (directDownstream.length === 0) continue;
803
- const hasImmediateResponse = directDownstream.some(
804
- (node) => node.type === "n8n-nodes-base.respondToWebhook" || /respond.*webhook/i.test(node.type) || /respond.*webhook/i.test(node.name || "")
805
- );
806
- if (hasImmediateResponse) continue;
807
- const heavyNodeTypes = cfg.heavy_node_types || [
808
- "n8n-nodes-base.httpRequest",
809
- "n8n-nodes-base.postgres",
810
- "n8n-nodes-base.mysql",
811
- "n8n-nodes-base.mongodb",
812
- "n8n-nodes-base.openAi",
813
- "n8n-nodes-base.anthropic"
814
- ];
815
- const hasHeavyProcessing = directDownstream.some(
816
- (node) => heavyNodeTypes.includes(node.type) || /loop|batch/i.test(node.type)
817
- );
818
- if (hasHeavyProcessing) {
819
- findings.push({
820
- rule: "R13",
821
- severity: "must",
822
- path: ctx.path,
823
- message: `Webhook "${webhookNode.name || webhookNode.id}" performs heavy processing before acknowledgment (risk of timeout/duplicates)`,
824
- nodeId: webhookNode.id,
825
- line: ctx.nodeLines?.[webhookNode.id],
826
- raw_details: `Add a "Respond to Webhook" node immediately after the webhook trigger (return 200/204), then perform heavy processing. This prevents webhook timeouts and duplicate events.`
827
- });
828
- }
829
- }
830
- return findings;
831
- }
832
- var r14RetryAfterCompliance = createNodeRule("R14", "retry_after_compliance", (node, graph, ctx) => {
833
- if (!isApiNode(node.type)) return null;
834
- const params = node.params ?? {};
835
- const options = params.options ?? {};
836
- const retryCandidates = [
837
- options.retryOnFail,
838
- params.retryOnFail,
839
- node.flags?.retryOnFail
840
- ];
841
- const retryOnFail = retryCandidates.find((value) => value !== void 0 && value !== null);
842
- if (!retryOnFail || retryOnFail === false) return null;
843
- if (typeof retryOnFail === "string") {
844
- const normalized = retryOnFail.trim().toLowerCase();
845
- if (retryOnFail.includes("{{") && normalized !== "true") {
846
- return null;
847
- }
848
- }
849
- const waitBetweenTries = node.flags?.waitBetweenTries;
850
- if (waitBetweenTries !== void 0 && waitBetweenTries !== null) {
851
- if (typeof waitBetweenTries === "number") return null;
852
- if (typeof waitBetweenTries === "string" && !isNaN(Number(waitBetweenTries)) && !waitBetweenTries.includes("{{")) {
853
- return null;
854
- }
855
- }
856
- const nodeStr = JSON.stringify(node);
857
- const hasRetryAfterLogic = /retry[-_]?after|retryafter/i.test(nodeStr);
858
- if (hasRetryAfterLogic) {
859
- return null;
860
- }
861
- return {
862
- rule: "R14",
863
- severity: "should",
864
- path: ctx.path,
865
- message: `Node ${node.name || node.id} has retry logic but ignores Retry-After headers (429/503 responses)`,
866
- raw_details: `Add expression to parse Retry-After header: const retryAfter = $json.headers['retry-after']; const delay = retryAfter ? (parseInt(retryAfter) || new Date(retryAfter) - Date.now()) : Math.min(1000 * Math.pow(2, $execution.retryCount), 60000); This prevents API bans and respects server rate limits.`,
867
- nodeId: node.id,
868
- line: ctx.nodeLines?.[node.id]
869
- };
870
- });
871
750
  function r3Idempotency(graph, ctx) {
872
751
  const cfg = ctx.cfg.rules.idempotency;
873
752
  if (!cfg?.enabled) return [];
@@ -884,8 +763,8 @@ function r3Idempotency(graph, ctx) {
884
763
  );
885
764
  if (!hasGuard) {
886
765
  findings.push({
887
- rule: "R3",
888
- severity: "must",
766
+ rule: metadata3.id,
767
+ severity: metadata3.severity,
889
768
  path: ctx.path,
890
769
  message: `The mutation path ending at "${mutationNode.name || mutationNode.id}" appears to be missing an idempotency guard.`,
891
770
  raw_details: `Ensure one of the upstream nodes or the mutation node itself uses an idempotency key, such as one of: ${(cfg.key_field_candidates ?? []).join(
@@ -898,6 +777,31 @@ function r3Idempotency(graph, ctx) {
898
777
  }
899
778
  return findings;
900
779
  }
780
+
781
+ // src/rules/lib/r4-secrets.ts
782
+ var metadata4 = {
783
+ id: "R4",
784
+ name: "secrets",
785
+ severity: "must",
786
+ description: "Detects hardcoded secrets, API keys, or credentials within node parameters.",
787
+ details: "All secrets should be stored securely using credential management systems."
788
+ };
789
+ var r4Secrets = createHardcodedStringRule({
790
+ ruleId: metadata4.id,
791
+ severity: metadata4.severity,
792
+ configKey: "secrets",
793
+ messageFn: (node) => `Node ${node.name || node.id} contains a hardcoded secret (move it to credentials/env vars)`,
794
+ details: "Move API keys/tokens into Credentials or environment variables; the workflow should only reference {{$credentials.*}} expressions."
795
+ });
796
+
797
+ // src/rules/lib/r5-dead-ends.ts
798
+ var metadata5 = {
799
+ id: "R5",
800
+ name: "dead_ends",
801
+ severity: "should",
802
+ description: "Finds nodes or workflow branches not connected to any other node.",
803
+ details: "Indicates incomplete or dead logic that should be reviewed or removed."
804
+ };
901
805
  function r5DeadEnds(graph, ctx) {
902
806
  const cfg = ctx.cfg.rules.dead_ends;
903
807
  if (!cfg?.enabled) return [];
@@ -909,8 +813,8 @@ function r5DeadEnds(graph, ctx) {
909
813
  for (const node of graph.nodes) {
910
814
  if ((outgoing.get(node.id) || 0) === 0 && !isTerminalNode(node.type, node.name)) {
911
815
  findings.push({
912
- rule: "R5",
913
- severity: "nit",
816
+ rule: metadata5.id,
817
+ severity: metadata5.severity,
914
818
  path: ctx.path,
915
819
  message: `Node ${node.name || node.id} has no outgoing connections (either wire it up or remove it)`,
916
820
  nodeId: node.id,
@@ -921,6 +825,15 @@ function r5DeadEnds(graph, ctx) {
921
825
  }
922
826
  return findings;
923
827
  }
828
+
829
+ // src/rules/lib/r6-long-running.ts
830
+ var metadata6 = {
831
+ id: "R6",
832
+ name: "long_running",
833
+ severity: "should",
834
+ description: "Flags workflows with potential for excessive runtime.",
835
+ details: "Detects loops with high iteration counts or long timeouts that could cause performance issues."
836
+ };
924
837
  function r6LongRunning(graph, ctx) {
925
838
  const cfg = ctx.cfg.rules.long_running;
926
839
  if (!cfg?.enabled) return [];
@@ -935,32 +848,41 @@ function r6LongRunning(graph, ctx) {
935
848
  ]);
936
849
  if (!iterations || cfg.max_iterations && iterations > cfg.max_iterations) {
937
850
  findings.push({
938
- rule: "R6",
939
- severity: "should",
851
+ rule: metadata6.id,
852
+ severity: metadata6.severity,
940
853
  path: ctx.path,
941
854
  message: `Node ${node.name || node.id} allows ${iterations ?? "unbounded"} iterations (limit ${cfg.max_iterations}; set a lower cap)`,
942
855
  nodeId: node.id,
943
856
  line: ctx.nodeLines?.[node.id],
944
- raw_details: `Set Options > Max iterations to \xE2\u2030\xA4 ${cfg.max_iterations} or split the processing into smaller batches.`
857
+ raw_details: `Set Options > Max iterations to \u2264 ${cfg.max_iterations} or split the processing into smaller batches.`
945
858
  });
946
859
  }
947
860
  if (cfg.timeout_ms) {
948
861
  const timeout = readNumber(node.params, ["timeout", "timeoutMs", "options.timeout"]);
949
862
  if (timeout && timeout > cfg.timeout_ms) {
950
863
  findings.push({
951
- rule: "R6",
952
- severity: "should",
864
+ rule: metadata6.id,
865
+ severity: metadata6.severity,
953
866
  path: ctx.path,
954
867
  message: `Node ${node.name || node.id} uses timeout ${timeout}ms (limit ${cfg.timeout_ms}ms; shorten the timeout or break work apart)`,
955
868
  nodeId: node.id,
956
869
  line: ctx.nodeLines?.[node.id],
957
- raw_details: `Lower the timeout to \xE2\u2030\xA4 ${cfg.timeout_ms}ms or split the workflow so no single step blocks for too long.`
870
+ raw_details: `Lower the timeout to \u2264 ${cfg.timeout_ms}ms or split the workflow so no single step blocks for too long.`
958
871
  });
959
872
  }
960
873
  }
961
874
  }
962
875
  return findings;
963
876
  }
877
+
878
+ // src/rules/lib/r7-alert-log-enforcement.ts
879
+ var metadata7 = {
880
+ id: "R7",
881
+ name: "alert_log_enforcement",
882
+ severity: "should",
883
+ description: "Ensures critical paths include logging or alerting steps.",
884
+ details: "For example, a failed payment processing branch should trigger an alert for monitoring."
885
+ };
964
886
  function r7AlertLogEnforcement(graph, ctx) {
965
887
  const cfg = ctx.cfg.rules.alert_log_enforcement;
966
888
  if (!cfg?.enabled) return [];
@@ -992,8 +914,8 @@ function r7AlertLogEnforcement(graph, ctx) {
992
914
  }
993
915
  if (!isHandled) {
994
916
  findings.push({
995
- rule: "R7",
996
- severity: "should",
917
+ rule: metadata7.id,
918
+ severity: metadata7.severity,
997
919
  path: ctx.path,
998
920
  message: `Error path from node ${fromNode.name || fromNode.id} has no log/alert before rejoining (add notification node)`,
999
921
  nodeId: fromNode.id,
@@ -1004,6 +926,15 @@ function r7AlertLogEnforcement(graph, ctx) {
1004
926
  }
1005
927
  return findings;
1006
928
  }
929
+
930
+ // src/rules/lib/r8-unused-data.ts
931
+ var metadata8 = {
932
+ id: "R8",
933
+ name: "unused_data",
934
+ severity: "nit",
935
+ description: "Detects when node output data is not consumed by subsequent nodes.",
936
+ details: "Identifies unnecessary data processing that could be optimized or removed."
937
+ };
1007
938
  function r8UnusedData(graph, ctx) {
1008
939
  const cfg = ctx.cfg.rules.unused_data;
1009
940
  if (!cfg?.enabled) return [];
@@ -1020,18 +951,213 @@ function r8UnusedData(graph, ctx) {
1020
951
  });
1021
952
  if (!leadsToConsumer) {
1022
953
  findings.push({
1023
- rule: "R8",
1024
- severity: "nit",
954
+ rule: metadata8.id,
955
+ severity: metadata8.severity,
1025
956
  path: ctx.path,
1026
957
  message: `Node "${node.name || node.id}" produces data that never reaches any consumer`,
1027
958
  nodeId: node.id,
1028
959
  line: ctx.nodeLines?.[node.id],
1029
- raw_details: "Wire this branch into a consumer (DB/API/response) or remove it\xE2\u20AC\u201Dotherwise the data produced here is never used."
960
+ raw_details: "Wire this branch into a consumer (DB/API/response) or remove it\u2014otherwise the data produced here is never used."
961
+ });
962
+ }
963
+ }
964
+ return findings;
965
+ }
966
+
967
+ // src/rules/lib/r9-config-literals.ts
968
+ var metadata9 = {
969
+ id: "R9",
970
+ name: "config_literals",
971
+ severity: "should",
972
+ description: "Flags hardcoded literals (URLs, environment tags, tenant IDs) that should come from configuration.",
973
+ details: "Promotes externalized configuration and prevents hardcoded environment-specific values."
974
+ };
975
+ var r9ConfigLiterals = createHardcodedStringRule({
976
+ ruleId: metadata9.id,
977
+ severity: metadata9.severity,
978
+ configKey: "config_literals",
979
+ messageFn: (node, value) => `Node ${node.name || node.id} contains env-specific literal "${value.substring(0, 40)}" (move to expression/credential)`,
980
+ details: "Move environment-specific URLs/IDs into expressions or credentials (e.g., {{$env.API_BASE_URL}}) so the workflow is portable."
981
+ });
982
+
983
+ // src/rules/lib/r10-naming-convention.ts
984
+ var metadata10 = {
985
+ id: "R10",
986
+ name: "naming_convention",
987
+ severity: "nit",
988
+ description: "Enforces consistent and descriptive naming for nodes.",
989
+ details: "Enforces consistent and descriptive naming for nodes. Improves workflow readability and maintainability (e.g., 'Fetch Customer Data from CRM' vs 'HTTP Request')."
990
+ };
991
+ var r10NamingConvention = createNodeRule(metadata10.id, metadata10.name, (node, graph, ctx) => {
992
+ const genericNames = new Set(ctx.cfg.rules.naming_convention.generic_names ?? []);
993
+ if (!node.name || genericNames.has(node.name.toLowerCase())) {
994
+ return {
995
+ rule: metadata10.id,
996
+ severity: metadata10.severity,
997
+ path: ctx.path,
998
+ message: `Node ${node.id} uses a generic name "${node.name ?? ""}" (rename it to describe the action)`,
999
+ nodeId: node.id,
1000
+ line: ctx.nodeLines?.[node.id],
1001
+ raw_details: 'Rename the node to describe its purpose (e.g., "Check subscription status" instead of "IF") for easier reviews and debugging.'
1002
+ };
1003
+ }
1004
+ return null;
1005
+ });
1006
+
1007
+ // src/rules/lib/r11-deprecated-nodes.ts
1008
+ var metadata11 = {
1009
+ id: "R11",
1010
+ name: "deprecated_nodes",
1011
+ severity: "should",
1012
+ description: "Warns about deprecated node types and suggests alternatives.",
1013
+ details: "Helps maintain workflows using current, supported node implementations."
1014
+ };
1015
+ var DEPRECATED_NODES = {
1016
+ "n8n-nodes-base.splitInBatches": "Use Loop over items instead",
1017
+ "n8n-nodes-base.executeWorkflow": "Use Execute Workflow (Sub-Workflow) instead"
1018
+ };
1019
+ var r11DeprecatedNodes = createNodeRule(metadata11.id, metadata11.name, (node, graph, ctx) => {
1020
+ if (DEPRECATED_NODES[node.type]) {
1021
+ return {
1022
+ rule: metadata11.id,
1023
+ severity: metadata11.severity,
1024
+ path: ctx.path,
1025
+ message: `Node ${node.name || node.id} uses deprecated type ${node.type} (replace with ${DEPRECATED_NODES[node.type]})`,
1026
+ nodeId: node.id,
1027
+ line: ctx.nodeLines?.[node.id],
1028
+ raw_details: `Replace this node with ${DEPRECATED_NODES[node.type]} so future n8n upgrades don\u2019t break the workflow.`
1029
+ };
1030
+ }
1031
+ return null;
1032
+ });
1033
+
1034
+ // src/rules/lib/r12-unhandled-error-path.ts
1035
+ var metadata12 = {
1036
+ id: "R12",
1037
+ name: "unhandled_error_path",
1038
+ severity: "must",
1039
+ description: "Ensures nodes with error outputs have connected error handling branches.",
1040
+ details: "Prevents silent failures by requiring explicit error path handling."
1041
+ };
1042
+ var r12UnhandledErrorPath = createNodeRule(metadata12.id, metadata12.name, (node, graph, ctx) => {
1043
+ if (!isErrorProneNode(node.type)) return null;
1044
+ const hasErrorPath = graph.edges.some((edge) => {
1045
+ if (edge.from !== node.id) return false;
1046
+ if (edge.on === "error") return true;
1047
+ const targetNode = graph.nodes.find((candidate) => candidate.id === edge.to);
1048
+ return targetNode ? isErrorHandlerNode(targetNode.type, targetNode.name) : false;
1049
+ });
1050
+ if (!hasErrorPath) {
1051
+ return {
1052
+ rule: metadata12.id,
1053
+ severity: metadata12.severity,
1054
+ path: ctx.path,
1055
+ message: `Node ${node.name || node.id} has no error branch (add a red connector to handler)`,
1056
+ nodeId: node.id,
1057
+ line: ctx.nodeLines?.[node.id],
1058
+ raw_details: "Add an error (red) branch to a Stop and Error or logging/alert node so failures do not disappear silently."
1059
+ };
1060
+ }
1061
+ return null;
1062
+ });
1063
+
1064
+ // src/rules/lib/r13-webhook-acknowledgment.ts
1065
+ var metadata13 = {
1066
+ id: "R13",
1067
+ name: "webhook_acknowledgment",
1068
+ severity: "must",
1069
+ description: "Detects webhooks performing heavy processing without immediate acknowledgment.",
1070
+ details: "Prevents timeout and duplicate events by requiring 'Respond to Webhook' node before heavy operations (HTTP requests, database queries, AI/LLM calls)."
1071
+ };
1072
+ function r13WebhookAcknowledgment(graph, ctx) {
1073
+ const cfg = ctx.cfg.rules.webhook_acknowledgment;
1074
+ if (!cfg?.enabled) return [];
1075
+ const findings = [];
1076
+ const webhookNodes = graph.nodes.filter(
1077
+ (node) => node.type === "n8n-nodes-base.webhook" || node.type.includes("webhook") && !node.type.includes("respondToWebhook")
1078
+ );
1079
+ for (const webhookNode of webhookNodes) {
1080
+ const directDownstream = graph.edges.filter((edge) => edge.from === webhookNode.id).map((edge) => graph.nodes.find((n) => n.id === edge.to)).filter((n) => !!n);
1081
+ if (directDownstream.length === 0) continue;
1082
+ const hasImmediateResponse = directDownstream.some(
1083
+ (node) => node.type === "n8n-nodes-base.respondToWebhook" || /respond.*webhook/i.test(node.type) || /respond.*webhook/i.test(node.name || "")
1084
+ );
1085
+ if (hasImmediateResponse) continue;
1086
+ const heavyNodeTypes = cfg.heavy_node_types || [
1087
+ "n8n-nodes-base.httpRequest",
1088
+ "n8n-nodes-base.postgres",
1089
+ "n8n-nodes-base.mysql",
1090
+ "n8n-nodes-base.mongodb",
1091
+ "n8n-nodes-base.openAi",
1092
+ "n8n-nodes-base.anthropic"
1093
+ ];
1094
+ const hasHeavyProcessing = directDownstream.some(
1095
+ (node) => heavyNodeTypes.includes(node.type) || /loop|batch/i.test(node.type)
1096
+ );
1097
+ if (hasHeavyProcessing) {
1098
+ findings.push({
1099
+ rule: metadata13.id,
1100
+ severity: metadata13.severity,
1101
+ path: ctx.path,
1102
+ message: `Webhook "${webhookNode.name || webhookNode.id}" performs heavy processing before acknowledgment (risk of timeout/duplicates)`,
1103
+ nodeId: webhookNode.id,
1104
+ line: ctx.nodeLines?.[webhookNode.id],
1105
+ raw_details: `Add a "Respond to Webhook" node immediately after the webhook trigger (return 200/204), then perform heavy processing. This prevents webhook timeouts and duplicate events.`
1030
1106
  });
1031
1107
  }
1032
1108
  }
1033
1109
  return findings;
1034
1110
  }
1111
+
1112
+ // src/rules/lib/r14-retry-after-compliance.ts
1113
+ var metadata14 = {
1114
+ id: "R14",
1115
+ name: "retry_after_compliance",
1116
+ severity: "should",
1117
+ description: "Detects HTTP nodes with retry logic that ignore Retry-After headers from 429/503 responses.",
1118
+ details: "APIs return Retry-After headers (seconds or HTTP date) to indicate when to retry. Ignoring these causes aggressive retry storms, wasted attempts, and potential API bans. Respecting server guidance prevents IP blocking and extended backoffs."
1119
+ };
1120
+ var r14RetryAfterCompliance = createNodeRule(metadata14.id, metadata14.name, (node, graph, ctx) => {
1121
+ if (!isApiNode(node.type)) return null;
1122
+ const params = node.params ?? {};
1123
+ const options = params.options ?? {};
1124
+ const retryCandidates = [
1125
+ options.retryOnFail,
1126
+ params.retryOnFail,
1127
+ node.flags?.retryOnFail
1128
+ ];
1129
+ const retryOnFail = retryCandidates.find((value) => value !== void 0 && value !== null);
1130
+ if (!retryOnFail || retryOnFail === false) return null;
1131
+ if (typeof retryOnFail === "string") {
1132
+ const normalized = retryOnFail.trim().toLowerCase();
1133
+ if (retryOnFail.includes("{{") && normalized !== "true") {
1134
+ return null;
1135
+ }
1136
+ }
1137
+ const waitBetweenTries = node.flags?.waitBetweenTries;
1138
+ if (waitBetweenTries !== void 0 && waitBetweenTries !== null) {
1139
+ if (typeof waitBetweenTries === "number") return null;
1140
+ if (typeof waitBetweenTries === "string" && !isNaN(Number(waitBetweenTries)) && !waitBetweenTries.includes("{{")) {
1141
+ return null;
1142
+ }
1143
+ }
1144
+ const nodeStr = JSON.stringify(node);
1145
+ const hasRetryAfterLogic = /retry[-_]?after|retryafter/i.test(nodeStr);
1146
+ if (hasRetryAfterLogic) {
1147
+ return null;
1148
+ }
1149
+ return {
1150
+ rule: metadata14.id,
1151
+ severity: metadata14.severity,
1152
+ path: ctx.path,
1153
+ message: `Node ${node.name || node.id} has retry logic but ignores Retry-After headers (429/503 responses)`,
1154
+ raw_details: `Add expression to parse Retry-After header: const retryAfter = $json.headers['retry-after']; const delay = retryAfter ? (parseInt(retryAfter) || new Date(retryAfter) - Date.now()) : Math.min(1000 * Math.pow(2, $execution.retryCount), 60000); This prevents API bans and respects server rate limits.`,
1155
+ nodeId: node.id,
1156
+ line: ctx.nodeLines?.[node.id]
1157
+ };
1158
+ });
1159
+
1160
+ // src/rules/index.ts
1035
1161
  var rules = [
1036
1162
  r1Retry,
1037
1163
  r2ErrorHandling,
@@ -1054,104 +1180,20 @@ function runAllRules(graph, ctx) {
1054
1180
 
1055
1181
  // src/rules/metadata.ts
1056
1182
  var RULES_METADATA = [
1057
- {
1058
- id: "R1",
1059
- name: "rate_limit_retry",
1060
- severity: "must",
1061
- description: "Ensures that nodes making external API calls have a retry mechanism configured.",
1062
- details: "Critical for building resilient workflows that can handle transient network issues or temporary service unavailability."
1063
- },
1064
- {
1065
- id: "R2",
1066
- name: "error_handling",
1067
- severity: "must",
1068
- description: "Prevents the use of configurations that might hide errors.",
1069
- details: "Workflows should explicitly handle errors rather than ignoring them with continueOnFail: true."
1070
- },
1071
- {
1072
- id: "R3",
1073
- name: "idempotency",
1074
- severity: "should",
1075
- description: "Guards against operations that are not idempotent with retries configured.",
1076
- details: "Detects patterns where a webhook trigger could lead to duplicate processing in databases or external services."
1077
- },
1078
- {
1079
- id: "R4",
1080
- name: "secrets",
1081
- severity: "must",
1082
- description: "Detects hardcoded secrets, API keys, or credentials within node parameters.",
1083
- details: "All secrets should be stored securely using credential management systems."
1084
- },
1085
- {
1086
- id: "R5",
1087
- name: "dead_ends",
1088
- severity: "should",
1089
- description: "Finds nodes or workflow branches not connected to any other node.",
1090
- details: "Indicates incomplete or dead logic that should be reviewed or removed."
1091
- },
1092
- {
1093
- id: "R6",
1094
- name: "long_running",
1095
- severity: "should",
1096
- description: "Flags workflows with potential for excessive runtime.",
1097
- details: "Detects loops with high iteration counts or long timeouts that could cause performance issues."
1098
- },
1099
- {
1100
- id: "R7",
1101
- name: "alert_log_enforcement",
1102
- severity: "should",
1103
- description: "Ensures critical paths include logging or alerting steps.",
1104
- details: "For example, a failed payment processing branch should trigger an alert for monitoring."
1105
- },
1106
- {
1107
- id: "R8",
1108
- name: "unused_data",
1109
- severity: "nit",
1110
- description: "Detects when node output data is not consumed by subsequent nodes.",
1111
- details: "Identifies unnecessary data processing that could be optimized or removed."
1112
- },
1113
- {
1114
- id: "R9",
1115
- name: "config_literals",
1116
- severity: "should",
1117
- description: "Flags hardcoded literals (URLs, environment tags, tenant IDs) that should come from configuration.",
1118
- details: "Promotes externalized configuration and prevents hardcoded environment-specific values."
1119
- },
1120
- {
1121
- id: "R10",
1122
- name: "naming_convention",
1123
- severity: "nit",
1124
- description: "Enforces consistent and descriptive naming for nodes.",
1125
- details: "Improves workflow readability and maintainability (e.g., 'Fetch Customer Data from CRM' vs 'HTTP Request')."
1126
- },
1127
- {
1128
- id: "R11",
1129
- name: "deprecated_nodes",
1130
- severity: "should",
1131
- description: "Warns about deprecated node types and suggests alternatives.",
1132
- details: "Helps maintain workflows using current, supported node implementations."
1133
- },
1134
- {
1135
- id: "R12",
1136
- name: "unhandled_error_path",
1137
- severity: "must",
1138
- description: "Ensures nodes with error outputs have connected error handling branches.",
1139
- details: "Prevents silent failures by requiring explicit error path handling."
1140
- },
1141
- {
1142
- id: "R13",
1143
- name: "webhook_acknowledgment",
1144
- severity: "must",
1145
- description: "Detects webhooks performing heavy processing without immediate acknowledgment.",
1146
- details: "Prevents timeout and duplicate events by requiring 'Respond to Webhook' node before heavy operations (HTTP requests, database queries, AI/LLM calls)."
1147
- },
1148
- {
1149
- id: "R14",
1150
- name: "retry_after_compliance",
1151
- severity: "should",
1152
- description: "Detects HTTP nodes with retry logic that ignore Retry-After headers from 429/503 responses.",
1153
- details: "APIs return Retry-After headers (seconds or HTTP date) to indicate when to retry. Ignoring these causes aggressive retry storms, wasted attempts, and potential API bans. Respecting server guidance prevents IP blocking and extended backoffs."
1154
- }
1183
+ metadata,
1184
+ metadata2,
1185
+ metadata3,
1186
+ metadata4,
1187
+ metadata5,
1188
+ metadata6,
1189
+ metadata7,
1190
+ metadata8,
1191
+ metadata9,
1192
+ metadata10,
1193
+ metadata11,
1194
+ metadata12,
1195
+ metadata13,
1196
+ metadata14
1155
1197
  ];
1156
1198
 
1157
1199
  // src/config/default-config.ts