@replikanti/flowlint-core 0.8.0 → 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +10 -1
- package/dist/index.d.ts +10 -1
- package/dist/index.js +311 -166
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +311 -166
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -630,8 +630,15 @@ function createHardcodedStringRule({
|
|
|
630
630
|
return createNodeRule(ruleId, configKey, logic);
|
|
631
631
|
}
|
|
632
632
|
|
|
633
|
-
// src/rules/
|
|
634
|
-
var
|
|
633
|
+
// src/rules/lib/r1-retry.ts
|
|
634
|
+
var metadata = {
|
|
635
|
+
id: "R1",
|
|
636
|
+
name: "rate_limit_retry",
|
|
637
|
+
severity: "must",
|
|
638
|
+
description: "Ensures that nodes making external API calls have a retry mechanism configured.",
|
|
639
|
+
details: "Critical for building resilient workflows that can handle transient network issues or temporary service unavailability."
|
|
640
|
+
};
|
|
641
|
+
var r1Retry = createNodeRule(metadata.id, metadata.name, (node, graph, ctx) => {
|
|
635
642
|
if (!isApiNode(node.type)) return null;
|
|
636
643
|
const params = node.params ?? {};
|
|
637
644
|
const options = params.options ?? {};
|
|
@@ -651,8 +658,8 @@ var r1Retry = createNodeRule("R1", "rate_limit_retry", (node, graph, ctx) => {
|
|
|
651
658
|
}
|
|
652
659
|
}
|
|
653
660
|
return {
|
|
654
|
-
rule:
|
|
655
|
-
severity:
|
|
661
|
+
rule: metadata.id,
|
|
662
|
+
severity: metadata.severity,
|
|
656
663
|
path: ctx.path,
|
|
657
664
|
message: `Node ${node.name || node.id} is missing retry/backoff configuration`,
|
|
658
665
|
raw_details: `In the node properties, enable "Retry on Fail" under Options.`,
|
|
@@ -660,11 +667,20 @@ var r1Retry = createNodeRule("R1", "rate_limit_retry", (node, graph, ctx) => {
|
|
|
660
667
|
line: ctx.nodeLines?.[node.id]
|
|
661
668
|
};
|
|
662
669
|
});
|
|
663
|
-
|
|
670
|
+
|
|
671
|
+
// src/rules/lib/r2-error-handling.ts
|
|
672
|
+
var metadata2 = {
|
|
673
|
+
id: "R2",
|
|
674
|
+
name: "error_handling",
|
|
675
|
+
severity: "must",
|
|
676
|
+
description: "Prevents the use of configurations that might hide errors.",
|
|
677
|
+
details: "Workflows should explicitly handle errors rather than ignoring them with continueOnFail: true."
|
|
678
|
+
};
|
|
679
|
+
var r2ErrorHandling = createNodeRule(metadata2.id, metadata2.name, (node, graph, ctx) => {
|
|
664
680
|
if (ctx.cfg.rules.error_handling.forbid_continue_on_fail && node.flags?.continueOnFail) {
|
|
665
681
|
return {
|
|
666
|
-
rule:
|
|
667
|
-
severity:
|
|
682
|
+
rule: metadata2.id,
|
|
683
|
+
severity: metadata2.severity,
|
|
668
684
|
path: ctx.path,
|
|
669
685
|
message: `Node ${node.name || node.id} has continueOnFail enabled (disable it and route errors explicitly)`,
|
|
670
686
|
nodeId: node.id,
|
|
@@ -674,152 +690,15 @@ var r2ErrorHandling = createNodeRule("R2", "error_handling", (node, graph, ctx)
|
|
|
674
690
|
}
|
|
675
691
|
return null;
|
|
676
692
|
});
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
details: "Move API keys/tokens into Credentials or environment variables; the workflow should only reference {{$credentials.*}} expressions."
|
|
683
|
-
});
|
|
684
|
-
var r9ConfigLiterals = createHardcodedStringRule({
|
|
685
|
-
ruleId: "R9",
|
|
693
|
+
|
|
694
|
+
// src/rules/lib/r3-idempotency.ts
|
|
695
|
+
var metadata3 = {
|
|
696
|
+
id: "R3",
|
|
697
|
+
name: "idempotency",
|
|
686
698
|
severity: "should",
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
details: "Move environment-specific URLs/IDs into expressions or credentials (e.g., {{$env.API_BASE_URL}}) so the workflow is portable."
|
|
690
|
-
});
|
|
691
|
-
var r10NamingConvention = createNodeRule("R10", "naming_convention", (node, graph, ctx) => {
|
|
692
|
-
const genericNames = new Set(ctx.cfg.rules.naming_convention.generic_names ?? []);
|
|
693
|
-
if (!node.name || genericNames.has(node.name.toLowerCase())) {
|
|
694
|
-
return {
|
|
695
|
-
rule: "R10",
|
|
696
|
-
severity: "nit",
|
|
697
|
-
path: ctx.path,
|
|
698
|
-
message: `Node ${node.id} uses a generic name "${node.name ?? ""}" (rename it to describe the action)`,
|
|
699
|
-
nodeId: node.id,
|
|
700
|
-
line: ctx.nodeLines?.[node.id],
|
|
701
|
-
raw_details: 'Rename the node to describe its purpose (e.g., "Check subscription status" instead of "IF") for easier reviews and debugging.'
|
|
702
|
-
};
|
|
703
|
-
}
|
|
704
|
-
return null;
|
|
705
|
-
});
|
|
706
|
-
var DEPRECATED_NODES = {
|
|
707
|
-
"n8n-nodes-base.splitInBatches": "Use Loop over items instead",
|
|
708
|
-
"n8n-nodes-base.executeWorkflow": "Use Execute Workflow (Sub-Workflow) instead"
|
|
699
|
+
description: "Guards against operations that are not idempotent with retries configured.",
|
|
700
|
+
details: "Detects patterns where a webhook trigger could lead to duplicate processing in databases or external services."
|
|
709
701
|
};
|
|
710
|
-
var r11DeprecatedNodes = createNodeRule("R11", "deprecated_nodes", (node, graph, ctx) => {
|
|
711
|
-
if (DEPRECATED_NODES[node.type]) {
|
|
712
|
-
return {
|
|
713
|
-
rule: "R11",
|
|
714
|
-
severity: "should",
|
|
715
|
-
path: ctx.path,
|
|
716
|
-
message: `Node ${node.name || node.id} uses deprecated type ${node.type} (replace with ${DEPRECATED_NODES[node.type]})`,
|
|
717
|
-
nodeId: node.id,
|
|
718
|
-
line: ctx.nodeLines?.[node.id],
|
|
719
|
-
raw_details: `Replace this node with ${DEPRECATED_NODES[node.type]} so future n8n upgrades don\xE2\u20AC\u2122t break the workflow.`
|
|
720
|
-
};
|
|
721
|
-
}
|
|
722
|
-
return null;
|
|
723
|
-
});
|
|
724
|
-
var r12UnhandledErrorPath = createNodeRule("R12", "unhandled_error_path", (node, graph, ctx) => {
|
|
725
|
-
if (!isErrorProneNode(node.type)) return null;
|
|
726
|
-
const hasErrorPath = graph.edges.some((edge) => {
|
|
727
|
-
if (edge.from !== node.id) return false;
|
|
728
|
-
if (edge.on === "error") return true;
|
|
729
|
-
const targetNode = graph.nodes.find((candidate) => candidate.id === edge.to);
|
|
730
|
-
return targetNode ? isErrorHandlerNode(targetNode.type, targetNode.name) : false;
|
|
731
|
-
});
|
|
732
|
-
if (!hasErrorPath) {
|
|
733
|
-
return {
|
|
734
|
-
rule: "R12",
|
|
735
|
-
severity: "must",
|
|
736
|
-
path: ctx.path,
|
|
737
|
-
message: `Node ${node.name || node.id} has no error branch (add a red connector to handler)`,
|
|
738
|
-
nodeId: node.id,
|
|
739
|
-
line: ctx.nodeLines?.[node.id],
|
|
740
|
-
raw_details: "Add an error (red) branch to a Stop and Error or logging/alert node so failures do not disappear silently."
|
|
741
|
-
};
|
|
742
|
-
}
|
|
743
|
-
return null;
|
|
744
|
-
});
|
|
745
|
-
function r13WebhookAcknowledgment(graph, ctx) {
|
|
746
|
-
const cfg = ctx.cfg.rules.webhook_acknowledgment;
|
|
747
|
-
if (!cfg?.enabled) return [];
|
|
748
|
-
const findings = [];
|
|
749
|
-
const webhookNodes = graph.nodes.filter(
|
|
750
|
-
(node) => node.type === "n8n-nodes-base.webhook" || node.type.includes("webhook") && !node.type.includes("respondToWebhook")
|
|
751
|
-
);
|
|
752
|
-
for (const webhookNode of webhookNodes) {
|
|
753
|
-
const directDownstream = graph.edges.filter((edge) => edge.from === webhookNode.id).map((edge) => graph.nodes.find((n) => n.id === edge.to)).filter((n) => !!n);
|
|
754
|
-
if (directDownstream.length === 0) continue;
|
|
755
|
-
const hasImmediateResponse = directDownstream.some(
|
|
756
|
-
(node) => node.type === "n8n-nodes-base.respondToWebhook" || /respond.*webhook/i.test(node.type) || /respond.*webhook/i.test(node.name || "")
|
|
757
|
-
);
|
|
758
|
-
if (hasImmediateResponse) continue;
|
|
759
|
-
const heavyNodeTypes = cfg.heavy_node_types || [
|
|
760
|
-
"n8n-nodes-base.httpRequest",
|
|
761
|
-
"n8n-nodes-base.postgres",
|
|
762
|
-
"n8n-nodes-base.mysql",
|
|
763
|
-
"n8n-nodes-base.mongodb",
|
|
764
|
-
"n8n-nodes-base.openAi",
|
|
765
|
-
"n8n-nodes-base.anthropic"
|
|
766
|
-
];
|
|
767
|
-
const hasHeavyProcessing = directDownstream.some(
|
|
768
|
-
(node) => heavyNodeTypes.includes(node.type) || /loop|batch/i.test(node.type)
|
|
769
|
-
);
|
|
770
|
-
if (hasHeavyProcessing) {
|
|
771
|
-
findings.push({
|
|
772
|
-
rule: "R13",
|
|
773
|
-
severity: "must",
|
|
774
|
-
path: ctx.path,
|
|
775
|
-
message: `Webhook "${webhookNode.name || webhookNode.id}" performs heavy processing before acknowledgment (risk of timeout/duplicates)`,
|
|
776
|
-
nodeId: webhookNode.id,
|
|
777
|
-
line: ctx.nodeLines?.[webhookNode.id],
|
|
778
|
-
raw_details: `Add a "Respond to Webhook" node immediately after the webhook trigger (return 200/204), then perform heavy processing. This prevents webhook timeouts and duplicate events.`
|
|
779
|
-
});
|
|
780
|
-
}
|
|
781
|
-
}
|
|
782
|
-
return findings;
|
|
783
|
-
}
|
|
784
|
-
var r14RetryAfterCompliance = createNodeRule("R14", "retry_after_compliance", (node, graph, ctx) => {
|
|
785
|
-
if (!isApiNode(node.type)) return null;
|
|
786
|
-
const params = node.params ?? {};
|
|
787
|
-
const options = params.options ?? {};
|
|
788
|
-
const retryCandidates = [
|
|
789
|
-
options.retryOnFail,
|
|
790
|
-
params.retryOnFail,
|
|
791
|
-
node.flags?.retryOnFail
|
|
792
|
-
];
|
|
793
|
-
const retryOnFail = retryCandidates.find((value) => value !== void 0 && value !== null);
|
|
794
|
-
if (!retryOnFail || retryOnFail === false) return null;
|
|
795
|
-
if (typeof retryOnFail === "string") {
|
|
796
|
-
const normalized = retryOnFail.trim().toLowerCase();
|
|
797
|
-
if (retryOnFail.includes("{{") && normalized !== "true") {
|
|
798
|
-
return null;
|
|
799
|
-
}
|
|
800
|
-
}
|
|
801
|
-
const waitBetweenTries = node.flags?.waitBetweenTries;
|
|
802
|
-
if (waitBetweenTries !== void 0 && waitBetweenTries !== null) {
|
|
803
|
-
if (typeof waitBetweenTries === "number") return null;
|
|
804
|
-
if (typeof waitBetweenTries === "string" && !isNaN(Number(waitBetweenTries)) && !waitBetweenTries.includes("{{")) {
|
|
805
|
-
return null;
|
|
806
|
-
}
|
|
807
|
-
}
|
|
808
|
-
const nodeStr = JSON.stringify(node);
|
|
809
|
-
const hasRetryAfterLogic = /retry[-_]?after|retryafter/i.test(nodeStr);
|
|
810
|
-
if (hasRetryAfterLogic) {
|
|
811
|
-
return null;
|
|
812
|
-
}
|
|
813
|
-
return {
|
|
814
|
-
rule: "R14",
|
|
815
|
-
severity: "should",
|
|
816
|
-
path: ctx.path,
|
|
817
|
-
message: `Node ${node.name || node.id} has retry logic but ignores Retry-After headers (429/503 responses)`,
|
|
818
|
-
raw_details: `Add expression to parse Retry-After header: const retryAfter = $json.headers['retry-after']; const delay = retryAfter ? (parseInt(retryAfter) || new Date(retryAfter) - Date.now()) : Math.min(1000 * Math.pow(2, $execution.retryCount), 60000); This prevents API bans and respects server rate limits.`,
|
|
819
|
-
nodeId: node.id,
|
|
820
|
-
line: ctx.nodeLines?.[node.id]
|
|
821
|
-
};
|
|
822
|
-
});
|
|
823
702
|
function r3Idempotency(graph, ctx) {
|
|
824
703
|
const cfg = ctx.cfg.rules.idempotency;
|
|
825
704
|
if (!cfg?.enabled) return [];
|
|
@@ -836,8 +715,8 @@ function r3Idempotency(graph, ctx) {
|
|
|
836
715
|
);
|
|
837
716
|
if (!hasGuard) {
|
|
838
717
|
findings.push({
|
|
839
|
-
rule:
|
|
840
|
-
severity:
|
|
718
|
+
rule: metadata3.id,
|
|
719
|
+
severity: metadata3.severity,
|
|
841
720
|
path: ctx.path,
|
|
842
721
|
message: `The mutation path ending at "${mutationNode.name || mutationNode.id}" appears to be missing an idempotency guard.`,
|
|
843
722
|
raw_details: `Ensure one of the upstream nodes or the mutation node itself uses an idempotency key, such as one of: ${(cfg.key_field_candidates ?? []).join(
|
|
@@ -850,6 +729,31 @@ function r3Idempotency(graph, ctx) {
|
|
|
850
729
|
}
|
|
851
730
|
return findings;
|
|
852
731
|
}
|
|
732
|
+
|
|
733
|
+
// src/rules/lib/r4-secrets.ts
|
|
734
|
+
var metadata4 = {
|
|
735
|
+
id: "R4",
|
|
736
|
+
name: "secrets",
|
|
737
|
+
severity: "must",
|
|
738
|
+
description: "Detects hardcoded secrets, API keys, or credentials within node parameters.",
|
|
739
|
+
details: "All secrets should be stored securely using credential management systems."
|
|
740
|
+
};
|
|
741
|
+
var r4Secrets = createHardcodedStringRule({
|
|
742
|
+
ruleId: metadata4.id,
|
|
743
|
+
severity: metadata4.severity,
|
|
744
|
+
configKey: "secrets",
|
|
745
|
+
messageFn: (node) => `Node ${node.name || node.id} contains a hardcoded secret (move it to credentials/env vars)`,
|
|
746
|
+
details: "Move API keys/tokens into Credentials or environment variables; the workflow should only reference {{$credentials.*}} expressions."
|
|
747
|
+
});
|
|
748
|
+
|
|
749
|
+
// src/rules/lib/r5-dead-ends.ts
|
|
750
|
+
var metadata5 = {
|
|
751
|
+
id: "R5",
|
|
752
|
+
name: "dead_ends",
|
|
753
|
+
severity: "should",
|
|
754
|
+
description: "Finds nodes or workflow branches not connected to any other node.",
|
|
755
|
+
details: "Indicates incomplete or dead logic that should be reviewed or removed."
|
|
756
|
+
};
|
|
853
757
|
function r5DeadEnds(graph, ctx) {
|
|
854
758
|
const cfg = ctx.cfg.rules.dead_ends;
|
|
855
759
|
if (!cfg?.enabled) return [];
|
|
@@ -861,8 +765,8 @@ function r5DeadEnds(graph, ctx) {
|
|
|
861
765
|
for (const node of graph.nodes) {
|
|
862
766
|
if ((outgoing.get(node.id) || 0) === 0 && !isTerminalNode(node.type, node.name)) {
|
|
863
767
|
findings.push({
|
|
864
|
-
rule:
|
|
865
|
-
severity:
|
|
768
|
+
rule: metadata5.id,
|
|
769
|
+
severity: metadata5.severity,
|
|
866
770
|
path: ctx.path,
|
|
867
771
|
message: `Node ${node.name || node.id} has no outgoing connections (either wire it up or remove it)`,
|
|
868
772
|
nodeId: node.id,
|
|
@@ -873,6 +777,15 @@ function r5DeadEnds(graph, ctx) {
|
|
|
873
777
|
}
|
|
874
778
|
return findings;
|
|
875
779
|
}
|
|
780
|
+
|
|
781
|
+
// src/rules/lib/r6-long-running.ts
|
|
782
|
+
var metadata6 = {
|
|
783
|
+
id: "R6",
|
|
784
|
+
name: "long_running",
|
|
785
|
+
severity: "should",
|
|
786
|
+
description: "Flags workflows with potential for excessive runtime.",
|
|
787
|
+
details: "Detects loops with high iteration counts or long timeouts that could cause performance issues."
|
|
788
|
+
};
|
|
876
789
|
function r6LongRunning(graph, ctx) {
|
|
877
790
|
const cfg = ctx.cfg.rules.long_running;
|
|
878
791
|
if (!cfg?.enabled) return [];
|
|
@@ -887,32 +800,41 @@ function r6LongRunning(graph, ctx) {
|
|
|
887
800
|
]);
|
|
888
801
|
if (!iterations || cfg.max_iterations && iterations > cfg.max_iterations) {
|
|
889
802
|
findings.push({
|
|
890
|
-
rule:
|
|
891
|
-
severity:
|
|
803
|
+
rule: metadata6.id,
|
|
804
|
+
severity: metadata6.severity,
|
|
892
805
|
path: ctx.path,
|
|
893
806
|
message: `Node ${node.name || node.id} allows ${iterations ?? "unbounded"} iterations (limit ${cfg.max_iterations}; set a lower cap)`,
|
|
894
807
|
nodeId: node.id,
|
|
895
808
|
line: ctx.nodeLines?.[node.id],
|
|
896
|
-
raw_details: `Set Options > Max iterations to \
|
|
809
|
+
raw_details: `Set Options > Max iterations to \u2264 ${cfg.max_iterations} or split the processing into smaller batches.`
|
|
897
810
|
});
|
|
898
811
|
}
|
|
899
812
|
if (cfg.timeout_ms) {
|
|
900
813
|
const timeout = readNumber(node.params, ["timeout", "timeoutMs", "options.timeout"]);
|
|
901
814
|
if (timeout && timeout > cfg.timeout_ms) {
|
|
902
815
|
findings.push({
|
|
903
|
-
rule:
|
|
904
|
-
severity:
|
|
816
|
+
rule: metadata6.id,
|
|
817
|
+
severity: metadata6.severity,
|
|
905
818
|
path: ctx.path,
|
|
906
819
|
message: `Node ${node.name || node.id} uses timeout ${timeout}ms (limit ${cfg.timeout_ms}ms; shorten the timeout or break work apart)`,
|
|
907
820
|
nodeId: node.id,
|
|
908
821
|
line: ctx.nodeLines?.[node.id],
|
|
909
|
-
raw_details: `Lower the timeout to \
|
|
822
|
+
raw_details: `Lower the timeout to \u2264 ${cfg.timeout_ms}ms or split the workflow so no single step blocks for too long.`
|
|
910
823
|
});
|
|
911
824
|
}
|
|
912
825
|
}
|
|
913
826
|
}
|
|
914
827
|
return findings;
|
|
915
828
|
}
|
|
829
|
+
|
|
830
|
+
// src/rules/lib/r7-alert-log-enforcement.ts
|
|
831
|
+
var metadata7 = {
|
|
832
|
+
id: "R7",
|
|
833
|
+
name: "alert_log_enforcement",
|
|
834
|
+
severity: "should",
|
|
835
|
+
description: "Ensures critical paths include logging or alerting steps.",
|
|
836
|
+
details: "For example, a failed payment processing branch should trigger an alert for monitoring."
|
|
837
|
+
};
|
|
916
838
|
function r7AlertLogEnforcement(graph, ctx) {
|
|
917
839
|
const cfg = ctx.cfg.rules.alert_log_enforcement;
|
|
918
840
|
if (!cfg?.enabled) return [];
|
|
@@ -944,8 +866,8 @@ function r7AlertLogEnforcement(graph, ctx) {
|
|
|
944
866
|
}
|
|
945
867
|
if (!isHandled) {
|
|
946
868
|
findings.push({
|
|
947
|
-
rule:
|
|
948
|
-
severity:
|
|
869
|
+
rule: metadata7.id,
|
|
870
|
+
severity: metadata7.severity,
|
|
949
871
|
path: ctx.path,
|
|
950
872
|
message: `Error path from node ${fromNode.name || fromNode.id} has no log/alert before rejoining (add notification node)`,
|
|
951
873
|
nodeId: fromNode.id,
|
|
@@ -956,6 +878,15 @@ function r7AlertLogEnforcement(graph, ctx) {
|
|
|
956
878
|
}
|
|
957
879
|
return findings;
|
|
958
880
|
}
|
|
881
|
+
|
|
882
|
+
// src/rules/lib/r8-unused-data.ts
|
|
883
|
+
var metadata8 = {
|
|
884
|
+
id: "R8",
|
|
885
|
+
name: "unused_data",
|
|
886
|
+
severity: "nit",
|
|
887
|
+
description: "Detects when node output data is not consumed by subsequent nodes.",
|
|
888
|
+
details: "Identifies unnecessary data processing that could be optimized or removed."
|
|
889
|
+
};
|
|
959
890
|
function r8UnusedData(graph, ctx) {
|
|
960
891
|
const cfg = ctx.cfg.rules.unused_data;
|
|
961
892
|
if (!cfg?.enabled) return [];
|
|
@@ -972,18 +903,213 @@ function r8UnusedData(graph, ctx) {
|
|
|
972
903
|
});
|
|
973
904
|
if (!leadsToConsumer) {
|
|
974
905
|
findings.push({
|
|
975
|
-
rule:
|
|
976
|
-
severity:
|
|
906
|
+
rule: metadata8.id,
|
|
907
|
+
severity: metadata8.severity,
|
|
977
908
|
path: ctx.path,
|
|
978
909
|
message: `Node "${node.name || node.id}" produces data that never reaches any consumer`,
|
|
979
910
|
nodeId: node.id,
|
|
980
911
|
line: ctx.nodeLines?.[node.id],
|
|
981
|
-
raw_details: "Wire this branch into a consumer (DB/API/response) or remove it\
|
|
912
|
+
raw_details: "Wire this branch into a consumer (DB/API/response) or remove it\u2014otherwise the data produced here is never used."
|
|
913
|
+
});
|
|
914
|
+
}
|
|
915
|
+
}
|
|
916
|
+
return findings;
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
// src/rules/lib/r9-config-literals.ts
|
|
920
|
+
var metadata9 = {
|
|
921
|
+
id: "R9",
|
|
922
|
+
name: "config_literals",
|
|
923
|
+
severity: "should",
|
|
924
|
+
description: "Flags hardcoded literals (URLs, environment tags, tenant IDs) that should come from configuration.",
|
|
925
|
+
details: "Promotes externalized configuration and prevents hardcoded environment-specific values."
|
|
926
|
+
};
|
|
927
|
+
var r9ConfigLiterals = createHardcodedStringRule({
|
|
928
|
+
ruleId: metadata9.id,
|
|
929
|
+
severity: metadata9.severity,
|
|
930
|
+
configKey: "config_literals",
|
|
931
|
+
messageFn: (node, value) => `Node ${node.name || node.id} contains env-specific literal "${value.substring(0, 40)}" (move to expression/credential)`,
|
|
932
|
+
details: "Move environment-specific URLs/IDs into expressions or credentials (e.g., {{$env.API_BASE_URL}}) so the workflow is portable."
|
|
933
|
+
});
|
|
934
|
+
|
|
935
|
+
// src/rules/lib/r10-naming-convention.ts
|
|
936
|
+
var metadata10 = {
|
|
937
|
+
id: "R10",
|
|
938
|
+
name: "naming_convention",
|
|
939
|
+
severity: "nit",
|
|
940
|
+
description: "Enforces consistent and descriptive naming for nodes.",
|
|
941
|
+
details: "Enforces consistent and descriptive naming for nodes. Improves workflow readability and maintainability (e.g., 'Fetch Customer Data from CRM' vs 'HTTP Request')."
|
|
942
|
+
};
|
|
943
|
+
var r10NamingConvention = createNodeRule(metadata10.id, metadata10.name, (node, graph, ctx) => {
|
|
944
|
+
const genericNames = new Set(ctx.cfg.rules.naming_convention.generic_names ?? []);
|
|
945
|
+
if (!node.name || genericNames.has(node.name.toLowerCase())) {
|
|
946
|
+
return {
|
|
947
|
+
rule: metadata10.id,
|
|
948
|
+
severity: metadata10.severity,
|
|
949
|
+
path: ctx.path,
|
|
950
|
+
message: `Node ${node.id} uses a generic name "${node.name ?? ""}" (rename it to describe the action)`,
|
|
951
|
+
nodeId: node.id,
|
|
952
|
+
line: ctx.nodeLines?.[node.id],
|
|
953
|
+
raw_details: 'Rename the node to describe its purpose (e.g., "Check subscription status" instead of "IF") for easier reviews and debugging.'
|
|
954
|
+
};
|
|
955
|
+
}
|
|
956
|
+
return null;
|
|
957
|
+
});
|
|
958
|
+
|
|
959
|
+
// src/rules/lib/r11-deprecated-nodes.ts
|
|
960
|
+
var metadata11 = {
|
|
961
|
+
id: "R11",
|
|
962
|
+
name: "deprecated_nodes",
|
|
963
|
+
severity: "should",
|
|
964
|
+
description: "Warns about deprecated node types and suggests alternatives.",
|
|
965
|
+
details: "Helps maintain workflows using current, supported node implementations."
|
|
966
|
+
};
|
|
967
|
+
var DEPRECATED_NODES = {
|
|
968
|
+
"n8n-nodes-base.splitInBatches": "Use Loop over items instead",
|
|
969
|
+
"n8n-nodes-base.executeWorkflow": "Use Execute Workflow (Sub-Workflow) instead"
|
|
970
|
+
};
|
|
971
|
+
var r11DeprecatedNodes = createNodeRule(metadata11.id, metadata11.name, (node, graph, ctx) => {
|
|
972
|
+
if (DEPRECATED_NODES[node.type]) {
|
|
973
|
+
return {
|
|
974
|
+
rule: metadata11.id,
|
|
975
|
+
severity: metadata11.severity,
|
|
976
|
+
path: ctx.path,
|
|
977
|
+
message: `Node ${node.name || node.id} uses deprecated type ${node.type} (replace with ${DEPRECATED_NODES[node.type]})`,
|
|
978
|
+
nodeId: node.id,
|
|
979
|
+
line: ctx.nodeLines?.[node.id],
|
|
980
|
+
raw_details: `Replace this node with ${DEPRECATED_NODES[node.type]} so future n8n upgrades don\u2019t break the workflow.`
|
|
981
|
+
};
|
|
982
|
+
}
|
|
983
|
+
return null;
|
|
984
|
+
});
|
|
985
|
+
|
|
986
|
+
// src/rules/lib/r12-unhandled-error-path.ts
|
|
987
|
+
var metadata12 = {
|
|
988
|
+
id: "R12",
|
|
989
|
+
name: "unhandled_error_path",
|
|
990
|
+
severity: "must",
|
|
991
|
+
description: "Ensures nodes with error outputs have connected error handling branches.",
|
|
992
|
+
details: "Prevents silent failures by requiring explicit error path handling."
|
|
993
|
+
};
|
|
994
|
+
var r12UnhandledErrorPath = createNodeRule(metadata12.id, metadata12.name, (node, graph, ctx) => {
|
|
995
|
+
if (!isErrorProneNode(node.type)) return null;
|
|
996
|
+
const hasErrorPath = graph.edges.some((edge) => {
|
|
997
|
+
if (edge.from !== node.id) return false;
|
|
998
|
+
if (edge.on === "error") return true;
|
|
999
|
+
const targetNode = graph.nodes.find((candidate) => candidate.id === edge.to);
|
|
1000
|
+
return targetNode ? isErrorHandlerNode(targetNode.type, targetNode.name) : false;
|
|
1001
|
+
});
|
|
1002
|
+
if (!hasErrorPath) {
|
|
1003
|
+
return {
|
|
1004
|
+
rule: metadata12.id,
|
|
1005
|
+
severity: metadata12.severity,
|
|
1006
|
+
path: ctx.path,
|
|
1007
|
+
message: `Node ${node.name || node.id} has no error branch (add a red connector to handler)`,
|
|
1008
|
+
nodeId: node.id,
|
|
1009
|
+
line: ctx.nodeLines?.[node.id],
|
|
1010
|
+
raw_details: "Add an error (red) branch to a Stop and Error or logging/alert node so failures do not disappear silently."
|
|
1011
|
+
};
|
|
1012
|
+
}
|
|
1013
|
+
return null;
|
|
1014
|
+
});
|
|
1015
|
+
|
|
1016
|
+
// src/rules/lib/r13-webhook-acknowledgment.ts
|
|
1017
|
+
var metadata13 = {
|
|
1018
|
+
id: "R13",
|
|
1019
|
+
name: "webhook_acknowledgment",
|
|
1020
|
+
severity: "must",
|
|
1021
|
+
description: "Detects webhooks performing heavy processing without immediate acknowledgment.",
|
|
1022
|
+
details: "Prevents timeout and duplicate events by requiring 'Respond to Webhook' node before heavy operations (HTTP requests, database queries, AI/LLM calls)."
|
|
1023
|
+
};
|
|
1024
|
+
function r13WebhookAcknowledgment(graph, ctx) {
|
|
1025
|
+
const cfg = ctx.cfg.rules.webhook_acknowledgment;
|
|
1026
|
+
if (!cfg?.enabled) return [];
|
|
1027
|
+
const findings = [];
|
|
1028
|
+
const webhookNodes = graph.nodes.filter(
|
|
1029
|
+
(node) => node.type === "n8n-nodes-base.webhook" || node.type.includes("webhook") && !node.type.includes("respondToWebhook")
|
|
1030
|
+
);
|
|
1031
|
+
for (const webhookNode of webhookNodes) {
|
|
1032
|
+
const directDownstream = graph.edges.filter((edge) => edge.from === webhookNode.id).map((edge) => graph.nodes.find((n) => n.id === edge.to)).filter((n) => !!n);
|
|
1033
|
+
if (directDownstream.length === 0) continue;
|
|
1034
|
+
const hasImmediateResponse = directDownstream.some(
|
|
1035
|
+
(node) => node.type === "n8n-nodes-base.respondToWebhook" || /respond.*webhook/i.test(node.type) || /respond.*webhook/i.test(node.name || "")
|
|
1036
|
+
);
|
|
1037
|
+
if (hasImmediateResponse) continue;
|
|
1038
|
+
const heavyNodeTypes = cfg.heavy_node_types || [
|
|
1039
|
+
"n8n-nodes-base.httpRequest",
|
|
1040
|
+
"n8n-nodes-base.postgres",
|
|
1041
|
+
"n8n-nodes-base.mysql",
|
|
1042
|
+
"n8n-nodes-base.mongodb",
|
|
1043
|
+
"n8n-nodes-base.openAi",
|
|
1044
|
+
"n8n-nodes-base.anthropic"
|
|
1045
|
+
];
|
|
1046
|
+
const hasHeavyProcessing = directDownstream.some(
|
|
1047
|
+
(node) => heavyNodeTypes.includes(node.type) || /loop|batch/i.test(node.type)
|
|
1048
|
+
);
|
|
1049
|
+
if (hasHeavyProcessing) {
|
|
1050
|
+
findings.push({
|
|
1051
|
+
rule: metadata13.id,
|
|
1052
|
+
severity: metadata13.severity,
|
|
1053
|
+
path: ctx.path,
|
|
1054
|
+
message: `Webhook "${webhookNode.name || webhookNode.id}" performs heavy processing before acknowledgment (risk of timeout/duplicates)`,
|
|
1055
|
+
nodeId: webhookNode.id,
|
|
1056
|
+
line: ctx.nodeLines?.[webhookNode.id],
|
|
1057
|
+
raw_details: `Add a "Respond to Webhook" node immediately after the webhook trigger (return 200/204), then perform heavy processing. This prevents webhook timeouts and duplicate events.`
|
|
982
1058
|
});
|
|
983
1059
|
}
|
|
984
1060
|
}
|
|
985
1061
|
return findings;
|
|
986
1062
|
}
|
|
1063
|
+
|
|
1064
|
+
// src/rules/lib/r14-retry-after-compliance.ts
|
|
1065
|
+
var metadata14 = {
|
|
1066
|
+
id: "R14",
|
|
1067
|
+
name: "retry_after_compliance",
|
|
1068
|
+
severity: "should",
|
|
1069
|
+
description: "Detects HTTP nodes with retry logic that ignore Retry-After headers from 429/503 responses.",
|
|
1070
|
+
details: "APIs return Retry-After headers (seconds or HTTP date) to indicate when to retry. Ignoring these causes aggressive retry storms, wasted attempts, and potential API bans. Respecting server guidance prevents IP blocking and extended backoffs."
|
|
1071
|
+
};
|
|
1072
|
+
var r14RetryAfterCompliance = createNodeRule(metadata14.id, metadata14.name, (node, graph, ctx) => {
|
|
1073
|
+
if (!isApiNode(node.type)) return null;
|
|
1074
|
+
const params = node.params ?? {};
|
|
1075
|
+
const options = params.options ?? {};
|
|
1076
|
+
const retryCandidates = [
|
|
1077
|
+
options.retryOnFail,
|
|
1078
|
+
params.retryOnFail,
|
|
1079
|
+
node.flags?.retryOnFail
|
|
1080
|
+
];
|
|
1081
|
+
const retryOnFail = retryCandidates.find((value) => value !== void 0 && value !== null);
|
|
1082
|
+
if (!retryOnFail || retryOnFail === false) return null;
|
|
1083
|
+
if (typeof retryOnFail === "string") {
|
|
1084
|
+
const normalized = retryOnFail.trim().toLowerCase();
|
|
1085
|
+
if (retryOnFail.includes("{{") && normalized !== "true") {
|
|
1086
|
+
return null;
|
|
1087
|
+
}
|
|
1088
|
+
}
|
|
1089
|
+
const waitBetweenTries = node.flags?.waitBetweenTries;
|
|
1090
|
+
if (waitBetweenTries !== void 0 && waitBetweenTries !== null) {
|
|
1091
|
+
if (typeof waitBetweenTries === "number") return null;
|
|
1092
|
+
if (typeof waitBetweenTries === "string" && !isNaN(Number(waitBetweenTries)) && !waitBetweenTries.includes("{{")) {
|
|
1093
|
+
return null;
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
const nodeStr = JSON.stringify(node);
|
|
1097
|
+
const hasRetryAfterLogic = /retry[-_]?after|retryafter/i.test(nodeStr);
|
|
1098
|
+
if (hasRetryAfterLogic) {
|
|
1099
|
+
return null;
|
|
1100
|
+
}
|
|
1101
|
+
return {
|
|
1102
|
+
rule: metadata14.id,
|
|
1103
|
+
severity: metadata14.severity,
|
|
1104
|
+
path: ctx.path,
|
|
1105
|
+
message: `Node ${node.name || node.id} has retry logic but ignores Retry-After headers (429/503 responses)`,
|
|
1106
|
+
raw_details: `Add expression to parse Retry-After header: const retryAfter = $json.headers['retry-after']; const delay = retryAfter ? (parseInt(retryAfter) || new Date(retryAfter) - Date.now()) : Math.min(1000 * Math.pow(2, $execution.retryCount), 60000); This prevents API bans and respects server rate limits.`,
|
|
1107
|
+
nodeId: node.id,
|
|
1108
|
+
line: ctx.nodeLines?.[node.id]
|
|
1109
|
+
};
|
|
1110
|
+
});
|
|
1111
|
+
|
|
1112
|
+
// src/rules/index.ts
|
|
987
1113
|
var rules = [
|
|
988
1114
|
r1Retry,
|
|
989
1115
|
r2ErrorHandling,
|
|
@@ -1004,6 +1130,24 @@ function runAllRules(graph, ctx) {
|
|
|
1004
1130
|
return rules.flatMap((rule) => rule(graph, ctx));
|
|
1005
1131
|
}
|
|
1006
1132
|
|
|
1133
|
+
// src/rules/metadata.ts
|
|
1134
|
+
var RULES_METADATA = [
|
|
1135
|
+
metadata,
|
|
1136
|
+
metadata2,
|
|
1137
|
+
metadata3,
|
|
1138
|
+
metadata4,
|
|
1139
|
+
metadata5,
|
|
1140
|
+
metadata6,
|
|
1141
|
+
metadata7,
|
|
1142
|
+
metadata8,
|
|
1143
|
+
metadata9,
|
|
1144
|
+
metadata10,
|
|
1145
|
+
metadata11,
|
|
1146
|
+
metadata12,
|
|
1147
|
+
metadata13,
|
|
1148
|
+
metadata14
|
|
1149
|
+
];
|
|
1150
|
+
|
|
1007
1151
|
// src/config/default-config.ts
|
|
1008
1152
|
var defaultConfig = {
|
|
1009
1153
|
files: {
|
|
@@ -1219,6 +1363,7 @@ function mapSeverity(severity) {
|
|
|
1219
1363
|
return "notice";
|
|
1220
1364
|
}
|
|
1221
1365
|
export {
|
|
1366
|
+
RULES_METADATA,
|
|
1222
1367
|
ValidationError,
|
|
1223
1368
|
buildAnnotations,
|
|
1224
1369
|
buildCheckOutput,
|