autonomous-coding-toolkit 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +22 -0
- package/.claude-plugin/plugin.json +13 -0
- package/LICENSE +21 -0
- package/Makefile +21 -0
- package/README.md +140 -0
- package/SECURITY.md +28 -0
- package/agents/bash-expert.md +113 -0
- package/agents/dependency-auditor.md +138 -0
- package/agents/integration-tester.md +120 -0
- package/agents/lesson-scanner.md +149 -0
- package/agents/python-expert.md +179 -0
- package/agents/service-monitor.md +141 -0
- package/agents/shell-expert.md +147 -0
- package/benchmarks/runner.sh +147 -0
- package/benchmarks/tasks/01-rest-endpoint/rubric.sh +29 -0
- package/benchmarks/tasks/01-rest-endpoint/task.md +17 -0
- package/benchmarks/tasks/02-refactor-module/task.md +8 -0
- package/benchmarks/tasks/03-fix-integration-bug/task.md +8 -0
- package/benchmarks/tasks/04-add-test-coverage/task.md +8 -0
- package/benchmarks/tasks/05-multi-file-feature/task.md +8 -0
- package/bin/act.js +238 -0
- package/commands/autocode.md +6 -0
- package/commands/cancel-ralph.md +18 -0
- package/commands/code-factory.md +53 -0
- package/commands/create-prd.md +55 -0
- package/commands/ralph-loop.md +18 -0
- package/commands/run-plan.md +117 -0
- package/commands/submit-lesson.md +122 -0
- package/docs/ARCHITECTURE.md +630 -0
- package/docs/CONTRIBUTING.md +125 -0
- package/docs/lessons/0001-bare-exception-swallowing.md +34 -0
- package/docs/lessons/0002-async-def-without-await.md +28 -0
- package/docs/lessons/0003-create-task-without-callback.md +28 -0
- package/docs/lessons/0004-hardcoded-test-counts.md +28 -0
- package/docs/lessons/0005-sqlite-without-closing.md +33 -0
- package/docs/lessons/0006-venv-pip-path.md +27 -0
- package/docs/lessons/0007-runner-state-self-rejection.md +35 -0
- package/docs/lessons/0008-quality-gate-blind-spot.md +33 -0
- package/docs/lessons/0009-parser-overcount-empty-batches.md +36 -0
- package/docs/lessons/0010-local-outside-function-bash.md +33 -0
- package/docs/lessons/0011-batch-tests-for-unimplemented-code.md +36 -0
- package/docs/lessons/0012-api-markdown-unescaped-chars.md +33 -0
- package/docs/lessons/0013-export-prefix-env-parsing.md +33 -0
- package/docs/lessons/0014-decorator-registry-import-side-effect.md +43 -0
- package/docs/lessons/0015-frontend-backend-schema-drift.md +43 -0
- package/docs/lessons/0016-event-driven-cold-start-seeding.md +44 -0
- package/docs/lessons/0017-copy-paste-logic-diverges.md +43 -0
- package/docs/lessons/0018-layer-passes-pipeline-broken.md +45 -0
- package/docs/lessons/0019-systemd-envfile-ignores-export.md +41 -0
- package/docs/lessons/0020-persist-state-incrementally.md +44 -0
- package/docs/lessons/0021-dual-axis-testing.md +48 -0
- package/docs/lessons/0022-jsx-factory-shadowing.md +43 -0
- package/docs/lessons/0023-static-analysis-spiral.md +51 -0
- package/docs/lessons/0024-shared-pipeline-implementation.md +55 -0
- package/docs/lessons/0025-defense-in-depth-all-entry-points.md +65 -0
- package/docs/lessons/0026-linter-no-rules-false-enforcement.md +54 -0
- package/docs/lessons/0027-jsx-silent-prop-drop.md +64 -0
- package/docs/lessons/0028-no-infrastructure-in-client-code.md +49 -0
- package/docs/lessons/0029-never-write-secrets-to-files.md +61 -0
- package/docs/lessons/0030-cache-merge-not-replace.md +62 -0
- package/docs/lessons/0031-verify-units-at-boundaries.md +66 -0
- package/docs/lessons/0032-module-lifecycle-subscribe-unsubscribe.md +89 -0
- package/docs/lessons/0033-async-iteration-mutable-snapshot.md +72 -0
- package/docs/lessons/0034-caller-missing-await-silent-discard.md +65 -0
- package/docs/lessons/0035-duplicate-registration-silent-overwrite.md +85 -0
- package/docs/lessons/0036-websocket-dirty-disconnect.md +33 -0
- package/docs/lessons/0037-parallel-agents-worktree-corruption.md +31 -0
- package/docs/lessons/0038-subscribe-no-stored-ref.md +36 -0
- package/docs/lessons/0039-fallback-or-default-hides-bugs.md +34 -0
- package/docs/lessons/0040-event-firehose-filter-first.md +36 -0
- package/docs/lessons/0041-ambiguous-base-dir-path-nesting.md +32 -0
- package/docs/lessons/0042-spec-compliance-insufficient.md +36 -0
- package/docs/lessons/0043-exact-count-extensible-collections.md +32 -0
- package/docs/lessons/0044-relative-file-deps-worktree.md +39 -0
- package/docs/lessons/0045-iterative-design-improvement.md +33 -0
- package/docs/lessons/0046-plan-assertion-math-bugs.md +38 -0
- package/docs/lessons/0047-pytest-single-threaded-default.md +37 -0
- package/docs/lessons/0048-integration-wiring-batch.md +40 -0
- package/docs/lessons/0049-ab-verification.md +41 -0
- package/docs/lessons/0050-editing-sourced-files-during-execution.md +33 -0
- package/docs/lessons/0051-infrastructure-fixes-cant-self-heal.md +30 -0
- package/docs/lessons/0052-uncommitted-changes-poison-quality-gates.md +31 -0
- package/docs/lessons/0053-jq-compact-flag-inconsistency.md +31 -0
- package/docs/lessons/0054-parser-matches-inside-code-blocks.md +30 -0
- package/docs/lessons/0055-agents-compensate-for-garbled-prompts.md +31 -0
- package/docs/lessons/0056-grep-count-exit-code-on-zero.md +42 -0
- package/docs/lessons/0057-new-artifacts-break-git-clean-gates.md +42 -0
- package/docs/lessons/0058-dead-config-keys-never-consumed.md +49 -0
- package/docs/lessons/0059-contract-test-shared-structures.md +53 -0
- package/docs/lessons/0060-set-e-silent-death-in-runners.md +53 -0
- package/docs/lessons/0061-context-injection-dirty-state.md +50 -0
- package/docs/lessons/0062-sibling-bug-neighborhood-scan.md +29 -0
- package/docs/lessons/0063-one-flag-two-lifetimes.md +31 -0
- package/docs/lessons/0064-test-passes-wrong-reason.md +31 -0
- package/docs/lessons/0065-pipefail-grep-count-double-output.md +39 -0
- package/docs/lessons/0066-local-keyword-outside-function.md +37 -0
- package/docs/lessons/0067-stdin-hang-non-interactive-shell.md +36 -0
- package/docs/lessons/0068-agent-builds-wrong-thing-correctly.md +31 -0
- package/docs/lessons/0069-plan-quality-dominates-execution.md +30 -0
- package/docs/lessons/0070-spec-echo-back-prevents-drift.md +31 -0
- package/docs/lessons/0071-positive-instructions-outperform-negative.md +30 -0
- package/docs/lessons/0072-lost-in-the-middle-context-placement.md +30 -0
- package/docs/lessons/0073-unscoped-lessons-cause-false-positives.md +30 -0
- package/docs/lessons/0074-stale-context-injection-wrong-batch.md +32 -0
- package/docs/lessons/0075-research-artifacts-must-persist.md +32 -0
- package/docs/lessons/0076-wrong-decomposition-contaminates-downstream.md +30 -0
- package/docs/lessons/0077-cherry-pick-merges-need-manual-resolution.md +30 -0
- package/docs/lessons/0078-static-review-without-live-test.md +30 -0
- package/docs/lessons/0079-integration-wiring-batch-required.md +32 -0
- package/docs/lessons/FRAMEWORK.md +161 -0
- package/docs/lessons/SUMMARY.md +201 -0
- package/docs/lessons/TEMPLATE.md +85 -0
- package/docs/plans/2026-02-21-code-factory-v2-design.md +204 -0
- package/docs/plans/2026-02-21-code-factory-v2-implementation-plan.md +2189 -0
- package/docs/plans/2026-02-21-code-factory-v2-phase4-design.md +537 -0
- package/docs/plans/2026-02-21-code-factory-v2-phase4-implementation-plan.md +2012 -0
- package/docs/plans/2026-02-21-hardening-pass-design.md +108 -0
- package/docs/plans/2026-02-21-hardening-pass-plan.md +1378 -0
- package/docs/plans/2026-02-21-mab-research-report.md +406 -0
- package/docs/plans/2026-02-21-marketplace-restructure-design.md +240 -0
- package/docs/plans/2026-02-21-marketplace-restructure-plan.md +832 -0
- package/docs/plans/2026-02-21-phase4-completion-plan.md +697 -0
- package/docs/plans/2026-02-21-validator-suite-design.md +148 -0
- package/docs/plans/2026-02-21-validator-suite-plan.md +540 -0
- package/docs/plans/2026-02-22-mab-research-round2.md +556 -0
- package/docs/plans/2026-02-22-mab-run-design.md +462 -0
- package/docs/plans/2026-02-22-mab-run-plan.md +2046 -0
- package/docs/plans/2026-02-22-operations-design-methodology-research.md +681 -0
- package/docs/plans/2026-02-22-research-agent-failure-taxonomy.md +532 -0
- package/docs/plans/2026-02-22-research-code-guideline-policies.md +886 -0
- package/docs/plans/2026-02-22-research-codebase-audit-refactoring.md +908 -0
- package/docs/plans/2026-02-22-research-coding-standards-documentation.md +541 -0
- package/docs/plans/2026-02-22-research-competitive-landscape.md +687 -0
- package/docs/plans/2026-02-22-research-comprehensive-testing.md +1076 -0
- package/docs/plans/2026-02-22-research-context-utilization.md +459 -0
- package/docs/plans/2026-02-22-research-cost-quality-tradeoff.md +548 -0
- package/docs/plans/2026-02-22-research-lesson-transferability.md +508 -0
- package/docs/plans/2026-02-22-research-multi-agent-coordination.md +312 -0
- package/docs/plans/2026-02-22-research-phase-integration.md +602 -0
- package/docs/plans/2026-02-22-research-plan-quality.md +428 -0
- package/docs/plans/2026-02-22-research-prompt-engineering.md +558 -0
- package/docs/plans/2026-02-22-research-unconventional-perspectives.md +528 -0
- package/docs/plans/2026-02-22-research-user-adoption.md +638 -0
- package/docs/plans/2026-02-22-research-verification-effectiveness.md +433 -0
- package/docs/plans/2026-02-23-agent-suite-design.md +299 -0
- package/docs/plans/2026-02-23-agent-suite-plan.md +578 -0
- package/docs/plans/2026-02-23-phase3-cost-infrastructure-design.md +148 -0
- package/docs/plans/2026-02-23-phase3-cost-infrastructure-plan.md +1062 -0
- package/docs/plans/2026-02-23-research-bash-expert-agent.md +543 -0
- package/docs/plans/2026-02-23-research-dependency-auditor-agent.md +564 -0
- package/docs/plans/2026-02-23-research-improving-existing-agents.md +503 -0
- package/docs/plans/2026-02-23-research-integration-tester-agent.md +454 -0
- package/docs/plans/2026-02-23-research-python-expert-agent.md +429 -0
- package/docs/plans/2026-02-23-research-service-monitor-agent.md +425 -0
- package/docs/plans/2026-02-23-research-shell-expert-agent.md +533 -0
- package/docs/plans/2026-02-23-roadmap-to-completion.md +530 -0
- package/docs/plans/2026-02-24-headless-module-split-design.md +98 -0
- package/docs/plans/2026-02-24-headless-module-split.md +443 -0
- package/docs/plans/2026-02-24-lesson-scope-metadata-design.md +228 -0
- package/docs/plans/2026-02-24-lesson-scope-metadata-plan.md +968 -0
- package/docs/plans/2026-02-24-npm-packaging-design.md +841 -0
- package/docs/plans/2026-02-24-npm-packaging-plan.md +1965 -0
- package/docs/plans/audit-findings.md +186 -0
- package/docs/telegram-notification-format.md +98 -0
- package/examples/example-plan.md +51 -0
- package/examples/example-prd.json +72 -0
- package/examples/example-roadmap.md +33 -0
- package/examples/quickstart-plan.md +63 -0
- package/hooks/hooks.json +26 -0
- package/hooks/setup-symlinks.sh +48 -0
- package/hooks/stop-hook.sh +135 -0
- package/package.json +47 -0
- package/policies/bash.md +71 -0
- package/policies/python.md +71 -0
- package/policies/testing.md +61 -0
- package/policies/universal.md +60 -0
- package/scripts/analyze-report.sh +97 -0
- package/scripts/architecture-map.sh +145 -0
- package/scripts/auto-compound.sh +273 -0
- package/scripts/batch-audit.sh +42 -0
- package/scripts/batch-test.sh +101 -0
- package/scripts/entropy-audit.sh +221 -0
- package/scripts/failure-digest.sh +51 -0
- package/scripts/generate-ast-rules.sh +96 -0
- package/scripts/init.sh +112 -0
- package/scripts/lesson-check.sh +428 -0
- package/scripts/lib/common.sh +61 -0
- package/scripts/lib/cost-tracking.sh +153 -0
- package/scripts/lib/ollama.sh +60 -0
- package/scripts/lib/progress-writer.sh +128 -0
- package/scripts/lib/run-plan-context.sh +215 -0
- package/scripts/lib/run-plan-echo-back.sh +231 -0
- package/scripts/lib/run-plan-headless.sh +396 -0
- package/scripts/lib/run-plan-notify.sh +57 -0
- package/scripts/lib/run-plan-parser.sh +81 -0
- package/scripts/lib/run-plan-prompt.sh +215 -0
- package/scripts/lib/run-plan-quality-gate.sh +132 -0
- package/scripts/lib/run-plan-routing.sh +315 -0
- package/scripts/lib/run-plan-sampling.sh +170 -0
- package/scripts/lib/run-plan-scoring.sh +146 -0
- package/scripts/lib/run-plan-state.sh +142 -0
- package/scripts/lib/run-plan-team.sh +199 -0
- package/scripts/lib/telegram.sh +54 -0
- package/scripts/lib/thompson-sampling.sh +176 -0
- package/scripts/license-check.sh +74 -0
- package/scripts/mab-run.sh +575 -0
- package/scripts/module-size-check.sh +146 -0
- package/scripts/patterns/async-no-await.yml +5 -0
- package/scripts/patterns/bare-except.yml +6 -0
- package/scripts/patterns/empty-catch.yml +6 -0
- package/scripts/patterns/hardcoded-localhost.yml +9 -0
- package/scripts/patterns/retry-loop-no-backoff.yml +12 -0
- package/scripts/pipeline-status.sh +197 -0
- package/scripts/policy-check.sh +226 -0
- package/scripts/prior-art-search.sh +133 -0
- package/scripts/promote-mab-lessons.sh +126 -0
- package/scripts/prompts/agent-a-superpowers.md +29 -0
- package/scripts/prompts/agent-b-ralph.md +29 -0
- package/scripts/prompts/judge-agent.md +61 -0
- package/scripts/prompts/planner-agent.md +44 -0
- package/scripts/pull-community-lessons.sh +90 -0
- package/scripts/quality-gate.sh +266 -0
- package/scripts/research-gate.sh +90 -0
- package/scripts/run-plan.sh +329 -0
- package/scripts/scope-infer.sh +159 -0
- package/scripts/setup-ralph-loop.sh +155 -0
- package/scripts/telemetry.sh +230 -0
- package/scripts/tests/run-all-tests.sh +52 -0
- package/scripts/tests/test-act-cli.sh +46 -0
- package/scripts/tests/test-agents-md.sh +87 -0
- package/scripts/tests/test-analyze-report.sh +114 -0
- package/scripts/tests/test-architecture-map.sh +89 -0
- package/scripts/tests/test-auto-compound.sh +169 -0
- package/scripts/tests/test-batch-test.sh +65 -0
- package/scripts/tests/test-benchmark-runner.sh +25 -0
- package/scripts/tests/test-common.sh +168 -0
- package/scripts/tests/test-cost-tracking.sh +158 -0
- package/scripts/tests/test-echo-back.sh +180 -0
- package/scripts/tests/test-entropy-audit.sh +146 -0
- package/scripts/tests/test-failure-digest.sh +66 -0
- package/scripts/tests/test-generate-ast-rules.sh +145 -0
- package/scripts/tests/test-helpers.sh +82 -0
- package/scripts/tests/test-init.sh +47 -0
- package/scripts/tests/test-lesson-check.sh +278 -0
- package/scripts/tests/test-lesson-local.sh +55 -0
- package/scripts/tests/test-license-check.sh +109 -0
- package/scripts/tests/test-mab-run.sh +182 -0
- package/scripts/tests/test-ollama-lib.sh +49 -0
- package/scripts/tests/test-ollama.sh +60 -0
- package/scripts/tests/test-pipeline-status.sh +198 -0
- package/scripts/tests/test-policy-check.sh +124 -0
- package/scripts/tests/test-prior-art-search.sh +96 -0
- package/scripts/tests/test-progress-writer.sh +140 -0
- package/scripts/tests/test-promote-mab-lessons.sh +110 -0
- package/scripts/tests/test-pull-community-lessons.sh +149 -0
- package/scripts/tests/test-quality-gate.sh +241 -0
- package/scripts/tests/test-research-gate.sh +132 -0
- package/scripts/tests/test-run-plan-cli.sh +86 -0
- package/scripts/tests/test-run-plan-context.sh +305 -0
- package/scripts/tests/test-run-plan-e2e.sh +153 -0
- package/scripts/tests/test-run-plan-headless.sh +424 -0
- package/scripts/tests/test-run-plan-notify.sh +124 -0
- package/scripts/tests/test-run-plan-parser.sh +217 -0
- package/scripts/tests/test-run-plan-prompt.sh +254 -0
- package/scripts/tests/test-run-plan-quality-gate.sh +222 -0
- package/scripts/tests/test-run-plan-routing.sh +178 -0
- package/scripts/tests/test-run-plan-scoring.sh +148 -0
- package/scripts/tests/test-run-plan-state.sh +261 -0
- package/scripts/tests/test-run-plan-team.sh +157 -0
- package/scripts/tests/test-scope-infer.sh +150 -0
- package/scripts/tests/test-setup-ralph-loop.sh +63 -0
- package/scripts/tests/test-telegram-env.sh +38 -0
- package/scripts/tests/test-telegram.sh +121 -0
- package/scripts/tests/test-telemetry.sh +46 -0
- package/scripts/tests/test-thompson-sampling.sh +139 -0
- package/scripts/tests/test-validate-all.sh +60 -0
- package/scripts/tests/test-validate-commands.sh +89 -0
- package/scripts/tests/test-validate-hooks.sh +98 -0
- package/scripts/tests/test-validate-lessons.sh +150 -0
- package/scripts/tests/test-validate-plan-quality.sh +235 -0
- package/scripts/tests/test-validate-plans.sh +187 -0
- package/scripts/tests/test-validate-plugin.sh +106 -0
- package/scripts/tests/test-validate-prd.sh +184 -0
- package/scripts/tests/test-validate-skills.sh +134 -0
- package/scripts/validate-all.sh +57 -0
- package/scripts/validate-commands.sh +67 -0
- package/scripts/validate-hooks.sh +89 -0
- package/scripts/validate-lessons.sh +98 -0
- package/scripts/validate-plan-quality.sh +369 -0
- package/scripts/validate-plans.sh +120 -0
- package/scripts/validate-plugin.sh +86 -0
- package/scripts/validate-policies.sh +42 -0
- package/scripts/validate-prd.sh +118 -0
- package/scripts/validate-skills.sh +96 -0
- package/skills/autocode/SKILL.md +285 -0
- package/skills/autocode/ab-verification.md +51 -0
- package/skills/autocode/code-quality-standards.md +37 -0
- package/skills/autocode/competitive-mode.md +364 -0
- package/skills/brainstorming/SKILL.md +97 -0
- package/skills/capture-lesson/SKILL.md +187 -0
- package/skills/check-lessons/SKILL.md +116 -0
- package/skills/dispatching-parallel-agents/SKILL.md +110 -0
- package/skills/executing-plans/SKILL.md +85 -0
- package/skills/finishing-a-development-branch/SKILL.md +201 -0
- package/skills/receiving-code-review/SKILL.md +72 -0
- package/skills/requesting-code-review/SKILL.md +59 -0
- package/skills/requesting-code-review/code-reviewer.md +82 -0
- package/skills/research/SKILL.md +145 -0
- package/skills/roadmap/SKILL.md +115 -0
- package/skills/subagent-driven-development/SKILL.md +98 -0
- package/skills/subagent-driven-development/code-quality-reviewer-prompt.md +18 -0
- package/skills/subagent-driven-development/implementer-prompt.md +73 -0
- package/skills/subagent-driven-development/spec-reviewer-prompt.md +57 -0
- package/skills/systematic-debugging/SKILL.md +134 -0
- package/skills/systematic-debugging/condition-based-waiting.md +64 -0
- package/skills/systematic-debugging/defense-in-depth.md +32 -0
- package/skills/systematic-debugging/root-cause-tracing.md +55 -0
- package/skills/test-driven-development/SKILL.md +167 -0
- package/skills/using-git-worktrees/SKILL.md +219 -0
- package/skills/using-superpowers/SKILL.md +54 -0
- package/skills/verification-before-completion/SKILL.md +140 -0
- package/skills/verify/SKILL.md +82 -0
- package/skills/writing-plans/SKILL.md +128 -0
- package/skills/writing-skills/SKILL.md +93 -0
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 16
|
|
3
|
+
title: "Event-driven systems must seed current state on startup"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [python, javascript, all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: integration-boundaries
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Event-driven system produces empty/wrong output on first boot before any events arrive"
|
|
11
|
+
fix: "On startup, seed current state by fetching a snapshot via REST/query before subscribing to events"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
# WebSocket event-driven system
|
|
15
|
+
class Dashboard:
|
|
16
|
+
def __init__(self):
|
|
17
|
+
self.users = [] # Empty on startup!
|
|
18
|
+
|
|
19
|
+
def on_event(self, event):
|
|
20
|
+
if event.type == "user_join":
|
|
21
|
+
self.users.append(event.user)
|
|
22
|
+
|
|
23
|
+
# On first boot, dashboard is empty until first user joins
|
|
24
|
+
good: |
|
|
25
|
+
# Seed current state on startup
|
|
26
|
+
class Dashboard:
|
|
27
|
+
def __init__(self):
|
|
28
|
+
self.users = []
|
|
29
|
+
# Fetch current state before subscribing to events
|
|
30
|
+
self.users = api.get_all_users() # Seed!
|
|
31
|
+
|
|
32
|
+
def on_event(self, event):
|
|
33
|
+
if event.type == "user_join":
|
|
34
|
+
self.users.append(event.user)
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## Observation
|
|
38
|
+
An event-driven system (e.g., WebSocket, MQTT, event stream) maintains state by processing events. On startup, before any events arrive, the system has no state. It produces an empty result, wrong result, or waits until an event triggers. Users see an empty dashboard or broken state until the first event.
|
|
39
|
+
|
|
40
|
+
## Insight
|
|
41
|
+
The root cause is treating events as the only state source. Events represent *changes*, not current state. In steady state, events keep the system up-to-date. But on first boot, there's no baseline — the system must fetch current state separately before subscribing to changes.
|
|
42
|
+
|
|
43
|
+
## Lesson
|
|
44
|
+
Event-driven systems must seed current state on startup. Before subscribing to events, fetch a snapshot (via REST API, database query, or cache) to populate initial state. Then subscribe to events to handle incremental changes. This ensures the system has correct state from the first moment it's needed, not after the first event.
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 17
|
|
3
|
+
title: "Copy-pasted logic between modules diverges silently"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: integration-boundaries
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Two modules independently implement the same logic and diverge when only one is updated"
|
|
11
|
+
fix: "Extract shared logic to a single module imported by both consumers"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
# module_a.py
|
|
15
|
+
def parse_date(s):
|
|
16
|
+
return datetime.strptime(s, "%Y-%m-%d").date()
|
|
17
|
+
|
|
18
|
+
# module_b.py (copy-paste)
|
|
19
|
+
def parse_date(s):
|
|
20
|
+
return datetime.strptime(s, "%Y-%m-%d").date()
|
|
21
|
+
|
|
22
|
+
# Later, module_a is updated to handle ISO format
|
|
23
|
+
# module_b is forgotten and still only handles %Y-%m-%d
|
|
24
|
+
good: |
|
|
25
|
+
# utils.py (shared)
|
|
26
|
+
def parse_date(s):
|
|
27
|
+
return datetime.strptime(s, "%Y-%m-%d").date()
|
|
28
|
+
|
|
29
|
+
# module_a.py
|
|
30
|
+
from utils import parse_date
|
|
31
|
+
|
|
32
|
+
# module_b.py
|
|
33
|
+
from utils import parse_date
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## Observation
|
|
37
|
+
Two modules that compute the same thing independently will diverge silently over time. One module gets updated to handle a new case or bug fix, the other doesn't. Now they behave differently for the same input, and there's no error — both modules are "working" within their own scope.
|
|
38
|
+
|
|
39
|
+
## Insight
|
|
40
|
+
The root cause is code duplication at creation time. Copy-pasting logic is faster initially but creates a maintenance burden: every fix must be applied twice. If the person fixing module_a doesn't know module_b exists, the fix isn't applied there. The divergence is invisible until the different behaviors cause a bug.
|
|
41
|
+
|
|
42
|
+
## Lesson
|
|
43
|
+
Never copy-paste logic between modules. Extract it to a shared utility that both import. This ensures changes are made once and benefit both consumers. If you find the same logic in two places, refactor immediately — treat it as a red flag that future divergence is likely.
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 18
|
|
3
|
+
title: "Every layer passes its test while full pipeline is broken"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: integration-boundaries
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Each pipeline layer passes unit tests independently while the full pipeline is broken at integration seams"
|
|
11
|
+
fix: "Add at least one end-to-end test tracing a single input through every layer"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
# Layer 1: Data fetch (passes)
|
|
15
|
+
test_fetch: reads from mock DB, returns [User, User, User] ✓
|
|
16
|
+
|
|
17
|
+
# Layer 2: Transform (passes)
|
|
18
|
+
test_transform: receives list, returns transformed list ✓
|
|
19
|
+
|
|
20
|
+
# Layer 3: Store (passes)
|
|
21
|
+
test_store: receives list, writes to mock storage ✓
|
|
22
|
+
|
|
23
|
+
# Integration: Broken!
|
|
24
|
+
# Layer 2 returns dict, Layer 3 expects list → crash
|
|
25
|
+
good: |
|
|
26
|
+
# Unit tests for each layer (all pass)
|
|
27
|
+
test_fetch, test_transform, test_store (as above)
|
|
28
|
+
|
|
29
|
+
# Plus: E2E test tracing one record through all layers
|
|
30
|
+
test_full_pipeline:
|
|
31
|
+
input = create_test_user()
|
|
32
|
+
result = fetch(input) # Layer 1
|
|
33
|
+
result = transform(result) # Layer 2
|
|
34
|
+
store(result) # Layer 3
|
|
35
|
+
assert result_in_storage(result) # Verify end-to-end
|
|
36
|
+
---
|
|
37
|
+
|
|
38
|
+
## Observation
|
|
39
|
+
A multi-layer pipeline (data fetch → transform → store → API → UI) has each layer passing its unit tests independently. The full pipeline is broken at the integration seams: layer 1 returns a list, layer 2 expects a dict; layer 3 stores to file, layer 4 queries a database; fields have different names at each boundary.
|
|
40
|
+
|
|
41
|
+
## Insight
|
|
42
|
+
The root cause is testing each layer in isolation with mocked inputs/outputs. Each layer is correct for its inputs, but the outputs of one layer don't match the inputs of the next. The seams are never tested because each test stops at layer boundaries.
|
|
43
|
+
|
|
44
|
+
## Lesson
|
|
45
|
+
Always add at least one end-to-end test that traces a single input through every layer of the pipeline. Don't mock layer outputs — let real data flow through the entire system. This catches integration mismatches immediately. E2E tests are not a replacement for unit tests, they're a mandatory complement.
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 19
|
|
3
|
+
title: "systemd EnvironmentFile ignores `export` keyword"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [shell]
|
|
6
|
+
scope: [framework:systemd]
|
|
7
|
+
category: silent-failures
|
|
8
|
+
pattern:
|
|
9
|
+
type: syntactic
|
|
10
|
+
regex: "EnvironmentFile="
|
|
11
|
+
description: "systemd EnvironmentFile silently ignores lines with export prefix"
|
|
12
|
+
fix: "Use a bash wrapper (ExecStart=/bin/bash -c '. ~/.env && exec binary') or strip export from the file"
|
|
13
|
+
example:
|
|
14
|
+
bad: |
|
|
15
|
+
# ~/.env file
|
|
16
|
+
export API_KEY=secret123
|
|
17
|
+
export DEBUG=true
|
|
18
|
+
|
|
19
|
+
# systemd service
|
|
20
|
+
[Service]
|
|
21
|
+
EnvironmentFile=~/.env
|
|
22
|
+
# systemd ignores export prefix, loads nothing
|
|
23
|
+
good: |
|
|
24
|
+
# Either: strip export from the file
|
|
25
|
+
# ~/.env
|
|
26
|
+
API_KEY=secret123
|
|
27
|
+
DEBUG=true
|
|
28
|
+
|
|
29
|
+
# Or: use bash wrapper
|
|
30
|
+
[Service]
|
|
31
|
+
ExecStart=/bin/bash -c '. ~/.env && exec /usr/bin/myapp'
|
|
32
|
+
---
|
|
33
|
+
|
|
34
|
+
## Observation
|
|
35
|
+
A systemd service uses `EnvironmentFile=~/.env` with a `.env` file that contains `export VAR=value` syntax. The service starts without error but has empty environment variables. The `export` prefix is silently ignored by systemd's EnvironmentFile parser.
|
|
36
|
+
|
|
37
|
+
## Insight
|
|
38
|
+
systemd `EnvironmentFile` expects the format `KEY=value` only. The `export` keyword is shell syntax, not systemd syntax. When systemd sees `export KEY=value`, it either ignores the entire line or only parses the part after the `=`, leaving the variable unset. No error is logged — the service just runs with an incomplete environment.
|
|
39
|
+
|
|
40
|
+
## Lesson
|
|
41
|
+
systemd `EnvironmentFile` requires `KEY=value` format without `export`. Either: (1) maintain two files — one for shell-sourcing (with `export`) and one for systemd (without `export`), or (2) use a bash wrapper (`ExecStart=/bin/bash -c '. ~/.env && exec myapp'`) that sources the shell-format file. Never mix systemd EnvironmentFile with shell export syntax.
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 20
|
|
3
|
+
title: "Persist state incrementally before expensive work"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: silent-failures
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Long-running process saves state only at the end, losing all progress on crash"
|
|
11
|
+
fix: "Checkpoint state after each logical unit of work"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
def process_large_dataset(items):
|
|
15
|
+
results = []
|
|
16
|
+
for item in items:
|
|
17
|
+
result = expensive_operation(item)
|
|
18
|
+
results.append(result)
|
|
19
|
+
save_results(results) # All progress lost if crash occurs here
|
|
20
|
+
good: |
|
|
21
|
+
def process_large_dataset(items):
|
|
22
|
+
for i, item in enumerate(items):
|
|
23
|
+
result = expensive_operation(item)
|
|
24
|
+
save_checkpoint(result, i) # State saved after each unit
|
|
25
|
+
---
|
|
26
|
+
|
|
27
|
+
## Observation
|
|
28
|
+
|
|
29
|
+
Long-running processes that accumulate work and save state only at the end are vulnerable to catastrophic data loss. A 2-hour batch job that crashes during the final save step restarts from zero, repeating 2 hours of work.
|
|
30
|
+
|
|
31
|
+
## Insight
|
|
32
|
+
|
|
33
|
+
State persistence is a trade-off between granularity and overhead. The instinct is to minimize I/O by batching writes, but this violates the fundamental reliability principle: *work that has been completed should not be lost*. Progress checkpoints have minimal overhead (typically <5% in database-backed systems) and prevent the infinite-restart failure mode.
|
|
34
|
+
|
|
35
|
+
## Lesson
|
|
36
|
+
|
|
37
|
+
Checkpoint state after each logical unit of work, not just at the end. Use one of these patterns:
|
|
38
|
+
|
|
39
|
+
- **Database transactions**: Commit after each logical unit, not at the end
|
|
40
|
+
- **State files**: Write incremental snapshots (e.g., `batch_001_complete.json`)
|
|
41
|
+
- **Message queue acknowledgment**: Ack each message after processing, not at the end of the batch
|
|
42
|
+
- **Progress markers**: Track completed work in a separate file, read on startup to resume
|
|
43
|
+
|
|
44
|
+
Always verify the checkpoint write succeeds before proceeding. If a process crashes, it should resume from the last checkpoint, never restart from zero.
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 21
|
|
3
|
+
title: "Dual-axis testing: horizontal sweep + vertical trace"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: integration-boundaries
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Testing only endpoints (horizontal) or only data flow (vertical) misses entire bug classes"
|
|
11
|
+
fix: "Run both horizontal sweep (every endpoint) and vertical trace (one input through all layers)"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
# Only test endpoints exist
|
|
15
|
+
def test_api_responses():
|
|
16
|
+
assert client.get('/users').status_code == 200
|
|
17
|
+
assert client.get('/users/1').status_code == 200
|
|
18
|
+
# Missing: verify data actually flows and transforms correctly
|
|
19
|
+
good: |
|
|
20
|
+
# Horizontal: every endpoint responds
|
|
21
|
+
assert client.get('/users').status_code == 200
|
|
22
|
+
# Vertical: one user through all layers
|
|
23
|
+
response = client.post('/users', data={'name': 'Alice'})
|
|
24
|
+
user_id = response.json()['id']
|
|
25
|
+
assert client.get(f'/users/{user_id}').json()['name'] == 'Alice'
|
|
26
|
+
---
|
|
27
|
+
|
|
28
|
+
## Observation
|
|
29
|
+
|
|
30
|
+
Many test suites validate that endpoints exist and return 2xx status codes, but never verify that data flows end-to-end. A bug where data enters the pipeline but never reaches the database passes horizontal testing but fails in production.
|
|
31
|
+
|
|
32
|
+
## Insight
|
|
33
|
+
|
|
34
|
+
Integration bugs exist at layer boundaries: serialization, deserialization, state transitions, and persistence. Horizontal testing (every endpoint exists) confirms the surface. Vertical testing (one input through all layers) confirms the pipeline. Both are required because they catch different bug classes:
|
|
35
|
+
|
|
36
|
+
- **Horizontal** → missing endpoints, wrong status codes
|
|
37
|
+
- **Vertical** → data transformation bugs, missing persistence, state inconsistency
|
|
38
|
+
|
|
39
|
+
Testing only one axis misses 50% of integration bugs.
|
|
40
|
+
|
|
41
|
+
## Lesson
|
|
42
|
+
|
|
43
|
+
After implementing a multi-layer system (API → logic → database, or UI → service → cache), always run dual-axis testing:
|
|
44
|
+
|
|
45
|
+
1. **Horizontal sweep**: Hit every endpoint/CLI command/UI action. Confirm each responds correctly.
|
|
46
|
+
2. **Vertical trace**: Submit one real input and trace it through every layer to the final output. Confirm data flows end-to-end and state accumulates correctly.
|
|
47
|
+
|
|
48
|
+
Execute vertical first (catches more bugs per minute), then horizontal (completeness check). Both must pass before claiming readiness. Document the vertical trace as a test case you can re-run.
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 22
|
|
3
|
+
title: "Build tool JSX factory shadowed by arrow params"
|
|
4
|
+
severity: blocker
|
|
5
|
+
languages: [javascript, typescript]
|
|
6
|
+
scope: [framework:preact]
|
|
7
|
+
category: silent-failures
|
|
8
|
+
pattern:
|
|
9
|
+
type: syntactic
|
|
10
|
+
regex: "\.map\\(h\\s*=>"
|
|
11
|
+
description: "Arrow function parameter shadows build tool JSX factory injection"
|
|
12
|
+
fix: "Never use single-letter variable names that match build tool injections (h, React)"
|
|
13
|
+
example:
|
|
14
|
+
bad: |
|
|
15
|
+
import { h } from 'preact';
|
|
16
|
+
const items = users.map(h => (
|
|
17
|
+
<div key={h.id}>{h.name}</div> // h refers to user, not JSX factory
|
|
18
|
+
)); // JSX transform fails silently
|
|
19
|
+
good: |
|
|
20
|
+
import { h } from 'preact';
|
|
21
|
+
const items = users.map(user => (
|
|
22
|
+
<div key={user.id}>{user.name}</div>
|
|
23
|
+
)); // Clear intent, no shadowing
|
|
24
|
+
---
|
|
25
|
+
|
|
26
|
+
## Observation
|
|
27
|
+
|
|
28
|
+
Build tools like esbuild inject `h` (or `React` in some configs) as a JSX factory at the top of each file. When code uses `h` as an arrow function parameter (`.map(h => ...)`), the parameter shadows the injected factory. JSX elements become malformed, rendering silently fails with no error.
|
|
29
|
+
|
|
30
|
+
## Insight
|
|
31
|
+
|
|
32
|
+
This is a tooling footgun: the injection is invisible but syntactically valid. A user object parameter named `h` is reasonable in isolation, but creates a silent failure when combined with JSX. The build tool cannot detect this because it happens after transformation.
|
|
33
|
+
|
|
34
|
+
## Lesson
|
|
35
|
+
|
|
36
|
+
Never use `h`, `React`, or other build-tool-injected names as local variable or parameter names:
|
|
37
|
+
|
|
38
|
+
- Avoid: `.map(h => ...)`, `.forEach(React => ...)`
|
|
39
|
+
- Use: `.map(user => ...)`, `.forEach(Component => ...)`
|
|
40
|
+
|
|
41
|
+
Apply this consistently across your codebase. In code review, flag any single-letter parameter names that match build tool injections. The cost of renaming is zero; the cost of debug time is unbounded.
|
|
42
|
+
|
|
43
|
+
For teams using JSX, add a linter rule (ESLint with `no-shadow` + `no-loop-func`) to catch this pattern automatically.
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 23
|
|
3
|
+
title: "Static analysis spiral -- chasing lint fixes creates more bugs"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: test-anti-patterns
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Implementing lint fixes triggers new warnings in a cascading spiral"
|
|
11
|
+
fix: "Set a lint baseline, only fix violations in code you're actively changing"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
# Run linter, find 150 issues
|
|
15
|
+
pylint mymodule.py # 150 violations
|
|
16
|
+
# Start fixing: add type hints, remove unused imports, refactor
|
|
17
|
+
# After 1 hour: 140 violations, but 3 bugs introduced in refactoring
|
|
18
|
+
# Keep fixing: now 120 violations, but more subtle bugs
|
|
19
|
+
good: |
|
|
20
|
+
# Establish baseline
|
|
21
|
+
pylint mymodule.py > baseline.txt # 150 violations recorded
|
|
22
|
+
# Only fix violations in code you touch during feature work
|
|
23
|
+
# When implementing a function, clean that function's lints
|
|
24
|
+
# New commits don't expand scope beyond the feature
|
|
25
|
+
---
|
|
26
|
+
|
|
27
|
+
## Observation
|
|
28
|
+
|
|
29
|
+
Linting systems are designed to improve code quality incrementally, but aggressive lint-chasing creates a secondary spiral: fixing style violations in unrelated code introduces logic bugs, which are harder to catch than style violations.
|
|
30
|
+
|
|
31
|
+
## Insight
|
|
32
|
+
|
|
33
|
+
Linting has two modes:
|
|
34
|
+
|
|
35
|
+
1. **Prophylactic** (new code): enforce rules as you write
|
|
36
|
+
2. **Curative** (old code): bulk-fix accumulated violations
|
|
37
|
+
|
|
38
|
+
Curative mode is expensive when applied to a large codebase. Each refactor is a chance to introduce bugs, and scope expands unbounded. The instinct is to "make the codebase better while I'm at it," but that trades quality for coverage and usually loses the trade.
|
|
39
|
+
|
|
40
|
+
## Lesson
|
|
41
|
+
|
|
42
|
+
Set a lint baseline and fix violations only in code you're actively changing:
|
|
43
|
+
|
|
44
|
+
1. Run the linter and record the baseline (e.g., `pylint mymodule.py > baseline.txt`)
|
|
45
|
+
2. During feature work, when you touch a function, also fix that function's lints
|
|
46
|
+
3. Never expand scope to fix unrelated violations
|
|
47
|
+
4. New commits should show code cleanup *in the changed regions only*
|
|
48
|
+
|
|
49
|
+
If you want to tackle accumulated tech debt, do it in a separate PR with a clear scope: "Refactor payment module for clarity — no logic changes." Run tests before and after to verify behavior is identical. Otherwise, lint fixes stay scoped to feature work.
|
|
50
|
+
|
|
51
|
+
Avoid automated "fix all lints" commits — they're high-risk, low-review, and merge conflicts nightmare. Humans fix code they understand; linters fix code they can parse.
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 24
|
|
3
|
+
title: "Shared pipeline features must share implementation"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: integration-boundaries
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Two pipeline stages independently implement the same feature logic and produce different results"
|
|
11
|
+
fix: "Both stages import from one module; if different languages, add contract tests"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
# Python: batch pipeline
|
|
15
|
+
def process_batch(items):
|
|
16
|
+
results = [transform(item) for item in items]
|
|
17
|
+
return results
|
|
18
|
+
|
|
19
|
+
# JavaScript: real-time pipeline (independently implemented)
|
|
20
|
+
function processStream(item) {
|
|
21
|
+
return transform(item); // Slightly different logic
|
|
22
|
+
}
|
|
23
|
+
good: |
|
|
24
|
+
# Python shared logic
|
|
25
|
+
# pipeline/transform.py
|
|
26
|
+
def transform(item):
|
|
27
|
+
return item.value * 2
|
|
28
|
+
|
|
29
|
+
# batch.py
|
|
30
|
+
from pipeline.transform import transform
|
|
31
|
+
results = [transform(item) for item in items]
|
|
32
|
+
|
|
33
|
+
# For JavaScript, if needed separately:
|
|
34
|
+
# Add contract test: both versions produce identical output on test set
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## Observation
|
|
38
|
+
|
|
39
|
+
Pipelines often have multiple paths: batch processing, streaming, scheduled jobs. When each path independently implements logic like filtering, validation, or transformation, they diverge. One handles edge cases the other misses, producing inconsistent results.
|
|
40
|
+
|
|
41
|
+
## Insight
|
|
42
|
+
|
|
43
|
+
Feature logic encoded in multiple places creates a maintenance burden and a correctness risk. Each implementation is an opportunity for a bug; each update requires changes in N places. The root cause is treating pipeline stages as independent systems when they should share a common contract.
|
|
44
|
+
|
|
45
|
+
## Lesson
|
|
46
|
+
|
|
47
|
+
When multiple pipeline stages implement the same feature:
|
|
48
|
+
|
|
49
|
+
1. **Same language**: Extract logic to a shared module; all stages import from it.
|
|
50
|
+
2. **Different languages**: Implement once in the language closest to the data source (usually Python for batch), then wrap in contract tests that verify the other language's implementation produces identical output on a test dataset.
|
|
51
|
+
3. **External service**: Deploy once, both stages call the API.
|
|
52
|
+
|
|
53
|
+
Document the contract: "transform() must handle null, empty string, and values >1000." Then verify both implementations satisfy it. When bugs are found, fix once, verify all paths, deploy once.
|
|
54
|
+
|
|
55
|
+
This is the DRY principle applied to distributed systems: don't repeat business logic across process boundaries.
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 25
|
|
3
|
+
title: "Defense-in-depth: validate at all entry points"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: integration-boundaries
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Input validation exists at one entry point but not others (API, CLI, WebSocket, cron)"
|
|
11
|
+
fix: "Centralize validation in a shared function called by all entry points"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
# REST API: validates user_id
|
|
15
|
+
@app.post('/users/<user_id>')
|
|
16
|
+
def update_user(user_id):
|
|
17
|
+
validate_user_id(user_id) # Validation here
|
|
18
|
+
return process_update(user_id)
|
|
19
|
+
|
|
20
|
+
# But CLI skips validation
|
|
21
|
+
def cli_update(user_id):
|
|
22
|
+
return process_update(user_id) # No validation!
|
|
23
|
+
good: |
|
|
24
|
+
# Shared validation
|
|
25
|
+
def process_update(user_id):
|
|
26
|
+
validate_user_id(user_id) # Always validated
|
|
27
|
+
# ... actual logic
|
|
28
|
+
|
|
29
|
+
# REST API
|
|
30
|
+
@app.post('/users/<user_id>')
|
|
31
|
+
def update_user(user_id):
|
|
32
|
+
return process_update(user_id)
|
|
33
|
+
|
|
34
|
+
# CLI
|
|
35
|
+
def cli_update(user_id):
|
|
36
|
+
return process_update(user_id) # Validation inherited
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## Observation
|
|
40
|
+
|
|
41
|
+
Services often have multiple entry points: REST API, CLI, WebSocket, scheduled jobs. Validation logic gets implemented at one entry point (usually REST, where frameworks make it easy) but bypassed at others. Invalid data flows to the core logic, causing unexpected behavior.
|
|
42
|
+
|
|
43
|
+
## Insight
|
|
44
|
+
|
|
45
|
+
Entry point diversity is a feature (flexibility), but it creates a validation surface. Each entry point is a potential bypass. Without centralized validation, the defense is only as strong as the most permissive entry point.
|
|
46
|
+
|
|
47
|
+
## Lesson
|
|
48
|
+
|
|
49
|
+
Apply defense-in-depth to input validation:
|
|
50
|
+
|
|
51
|
+
1. **Centralize**: Move validation into the core logic function, not the entry point handler.
|
|
52
|
+
2. **All entry points**: Every path to the core logic must pass through validation — API, CLI, WebSocket, cron, admin UI.
|
|
53
|
+
3. **Explicit validation**: Don't rely on type hints or schema inference; call a validation function explicitly.
|
|
54
|
+
|
|
55
|
+
Pattern:
|
|
56
|
+
|
|
57
|
+
```
|
|
58
|
+
REST API → validate() → process()
|
|
59
|
+
CLI → validate() → process()
|
|
60
|
+
WebSocket → validate() → process()
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
If validation is expensive (e.g., database lookup), cache the result. If validation differs per entry point, that's a smell — it means the entry points have different semantics. Either merge them or document the difference explicitly.
|
|
64
|
+
|
|
65
|
+
Test all entry points with the same invalid input set to verify they all reject it consistently.
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 26
|
|
3
|
+
title: "Linter with no rules enabled = false enforcement"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [all]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: silent-failures
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "Linter installed with zero rules configured reports 0 issues regardless of code quality"
|
|
11
|
+
fix: "Always configure rules explicitly; test that the linter catches a known-bad sample"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
# pylint installed, no .pylintrc
|
|
15
|
+
$ pylint mymodule.py
|
|
16
|
+
Your code has been rated at 10.00/10 (previous run: 10.00/10)
|
|
17
|
+
# No issues reported, but code is full of undefined variables and bad practices
|
|
18
|
+
|
|
19
|
+
# .pylintrc exists but all rules disabled
|
|
20
|
+
[MESSAGES CONTROL]
|
|
21
|
+
disable=all
|
|
22
|
+
good: |
|
|
23
|
+
# .pylintrc with explicit rules
|
|
24
|
+
[MESSAGES CONTROL]
|
|
25
|
+
disable=fixme,too-many-arguments,line-too-long
|
|
26
|
+
|
|
27
|
+
# Test the linter catches known issues
|
|
28
|
+
$ cat > test_bad.py << 'EOF'
|
|
29
|
+
x = undefined_variable
|
|
30
|
+
EOF
|
|
31
|
+
$ pylint test_bad.py
|
|
32
|
+
E0602: Undefined variable 'undefined_variable'
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## Observation
|
|
36
|
+
|
|
37
|
+
Linters installed but misconfigured (no config file, or config with all rules disabled) report perfect scores on any code. Teams believe they have linting enforcement when they have none.
|
|
38
|
+
|
|
39
|
+
## Insight
|
|
40
|
+
|
|
41
|
+
Linting is a visibility layer — it surfaces code quality issues. An unconfigured or disabled linter creates false visibility: problems exist but the linter doesn't report them. This is worse than no linting, because teams act on the false signal.
|
|
42
|
+
|
|
43
|
+
## Lesson
|
|
44
|
+
|
|
45
|
+
When setting up a linter:
|
|
46
|
+
|
|
47
|
+
1. **Explicit configuration**: Create a config file (`.pylintrc`, `.eslintrc.json`, etc.) with at least one rule enabled.
|
|
48
|
+
2. **Baseline test**: Create a file with a known-bad pattern (undefined variable, unused import, etc.), run the linter, verify it catches it.
|
|
49
|
+
3. **Disable intentionally**: Start with defaults, then disable rules that conflict with your style (not all rules).
|
|
50
|
+
4. **CI integration**: Run the linter in CI/pre-commit hooks; fail builds on lint errors.
|
|
51
|
+
|
|
52
|
+
Don't disable large categories of rules unless you have a specific reason. "We don't care about line length" is a reason; "linting is too strict" is not. If the default rules feel too strict, discuss as a team and pick the ones that matter.
|
|
53
|
+
|
|
54
|
+
Verify linting is working by occasionally committing code that violates an enabled rule and confirming CI catches it.
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 27
|
|
3
|
+
title: "JSX silently drops wrong prop names"
|
|
4
|
+
severity: should-fix
|
|
5
|
+
languages: [javascript, typescript]
|
|
6
|
+
scope: [framework:preact]
|
|
7
|
+
category: silent-failures
|
|
8
|
+
pattern:
|
|
9
|
+
type: semantic
|
|
10
|
+
description: "JSX component receives prop with wrong name and silently ignores it"
|
|
11
|
+
fix: "Use TypeScript with strict component prop types; without TS, verify prop names against component signature"
|
|
12
|
+
example:
|
|
13
|
+
bad: |
|
|
14
|
+
// Component definition
|
|
15
|
+
function UserCard({ name, email }) {
|
|
16
|
+
return <div>{name} - {email}</div>;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
// Caller: typo in prop name
|
|
20
|
+
<UserCard name="Alice" emial="alice@example.com" />
|
|
21
|
+
// Renders as "Alice - undefined" with no error
|
|
22
|
+
good: |
|
|
23
|
+
// TypeScript: catches typo at build time
|
|
24
|
+
interface Props {
|
|
25
|
+
name: string;
|
|
26
|
+
email: string;
|
|
27
|
+
}
|
|
28
|
+
function UserCard({ name, email }: Props) {
|
|
29
|
+
return <div>{name} - {email}</div>;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// TypeScript error: "emial" is not assignable to type 'Props'
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## Observation
|
|
36
|
+
|
|
37
|
+
JSX silently ignores props that don't match the component's destructuring. A typo in a prop name (e.g., `emial` instead of `email`) renders as an empty or undefined value with no warning. The component silently degrades instead of surfacing the error.
|
|
38
|
+
|
|
39
|
+
## Insight
|
|
40
|
+
|
|
41
|
+
JSX is syntactic sugar over function calls. Passing an unknown prop is like passing an unused argument — JavaScript doesn't care. The component receives `{ name, email }` destructured from the props object; any other keys are ignored. Without a type system, there's no way to know a prop was missed.
|
|
42
|
+
|
|
43
|
+
## Lesson
|
|
44
|
+
|
|
45
|
+
Guard against silent prop drops:
|
|
46
|
+
|
|
47
|
+
1. **Use TypeScript**: Define component props as interfaces and use strict mode. TypeScript will catch unknown props at build time.
|
|
48
|
+
2. **Code review**: Without TypeScript, manually verify prop names. List them in a comment or doc.
|
|
49
|
+
3. **PropTypes** (React): Use PropTypes in development to catch missing/wrong props at runtime.
|
|
50
|
+
|
|
51
|
+
Example with PropTypes:
|
|
52
|
+
|
|
53
|
+
```javascript
|
|
54
|
+
function UserCard({ name, email }) {
|
|
55
|
+
return <div>{name} - {email}</div>;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
UserCard.propTypes = {
|
|
59
|
+
name: PropTypes.string.isRequired,
|
|
60
|
+
email: PropTypes.string.isRequired,
|
|
61
|
+
};
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
TypeScript is better (caught at build time), but PropTypes is better than nothing. Test with a known-bad sample (e.g., `<UserCard name="Alice" />` missing email) and verify the error is caught.
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 28
|
|
3
|
+
title: "Never embed infrastructure details in client-side code"
|
|
4
|
+
severity: blocker
|
|
5
|
+
languages: [javascript, typescript]
|
|
6
|
+
scope: [universal]
|
|
7
|
+
category: silent-failures
|
|
8
|
+
pattern:
|
|
9
|
+
type: syntactic
|
|
10
|
+
regex: "['\"]https?://\\d+\\.\\d+\\.\\d+\\.\\d+"
|
|
11
|
+
description: "Hardcoded IP addresses or localhost URLs in client-side code"
|
|
12
|
+
fix: "Use relative URLs, environment variables, or a config endpoint"
|
|
13
|
+
example:
|
|
14
|
+
bad: |
|
|
15
|
+
// hardcoded IP in client code
|
|
16
|
+
const API_URL = 'http://192.168.1.100:8080';
|
|
17
|
+
|
|
18
|
+
fetch(`${API_URL}/users`)
|
|
19
|
+
.then(r => r.json())
|
|
20
|
+
.then(data => console.log(data));
|
|
21
|
+
good: |
|
|
22
|
+
// Use relative URL or environment variable
|
|
23
|
+
const API_URL = process.env.REACT_APP_API_URL || '/api';
|
|
24
|
+
|
|
25
|
+
fetch(`${API_URL}/users`)
|
|
26
|
+
.then(r => r.json())
|
|
27
|
+
.then(data => console.log(data));
|
|
28
|
+
---
|
|
29
|
+
|
|
30
|
+
## Observation
|
|
31
|
+
|
|
32
|
+
Client-side code containing hardcoded IP addresses, `localhost:port` URLs, or internal hostnames breaks when deployed to different environments. These details change between development, staging, and production, but hardcoding them means shipping different code for each environment.
|
|
33
|
+
|
|
34
|
+
## Insight
|
|
35
|
+
|
|
36
|
+
Client-side code is delivered to users' browsers and cannot be changed post-deployment. Infrastructure details (which IP, which port) are deployment decisions, not code decisions. Embedding them couples code to infrastructure and breaks portability.
|
|
37
|
+
|
|
38
|
+
## Lesson
|
|
39
|
+
|
|
40
|
+
Never hardcode infrastructure details in client code:
|
|
41
|
+
|
|
42
|
+
1. **Relative URLs**: Use `fetch('/api/users')` instead of `fetch('http://192.168.1.100:8080/api/users')`. The browser sends requests to the same origin.
|
|
43
|
+
2. **Environment variables**: Use `process.env.REACT_APP_API_URL` (React) or similar, set at build time per environment.
|
|
44
|
+
3. **Config endpoint**: On app startup, fetch config from a well-known endpoint, then use returned URLs.
|
|
45
|
+
4. **DNS names**: Use domain names (`api.example.com`), not IP addresses. IPs change; domains don't.
|
|
46
|
+
|
|
47
|
+
Verification: Deploy the same compiled artifact to three environments (dev, staging, prod) and verify it connects to the correct backend in each. If you need to rebuild for each environment, you've embedded infrastructure.
|
|
48
|
+
|
|
49
|
+
This applies equally to API keys — never hardcode them. Use environment variables or secure token exchange.
|