ado-workflows 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. ado_workflows-0.1.0/.envrc +51 -0
  2. ado_workflows-0.1.0/.github/ISSUE_TEMPLATE/bug_report.md +37 -0
  3. ado_workflows-0.1.0/.github/ISSUE_TEMPLATE/feature_request.md +25 -0
  4. ado_workflows-0.1.0/.github/copilot-instructions.md +1 -0
  5. ado_workflows-0.1.0/.github/pull_request_template.md +22 -0
  6. ado_workflows-0.1.0/.github/secret_scanning.yml +2 -0
  7. ado_workflows-0.1.0/.github/skills/bdd-feedback-loop/SKILL.md +259 -0
  8. ado_workflows-0.1.0/.github/skills/bdd-testing/SKILL.md +438 -0
  9. ado_workflows-0.1.0/.github/skills/bdd-testing/references/test-patterns.md +548 -0
  10. ado_workflows-0.1.0/.github/skills/conventional-commits/SKILL.md +157 -0
  11. ado_workflows-0.1.0/.github/skills/feature-workflow/SKILL.md +183 -0
  12. ado_workflows-0.1.0/.github/skills/plan-updates/SKILL.md +79 -0
  13. ado_workflows-0.1.0/.github/skills/skill-compliance/SKILL.md +67 -0
  14. ado_workflows-0.1.0/.github/skills/tool-usage/SKILL.md +94 -0
  15. ado_workflows-0.1.0/.github/workflows/ci.yml +61 -0
  16. ado_workflows-0.1.0/.github/workflows/publish.yml +58 -0
  17. ado_workflows-0.1.0/.github/workflows/release.yml +53 -0
  18. ado_workflows-0.1.0/.gitignore +36 -0
  19. ado_workflows-0.1.0/.pre-commit-config.yaml +15 -0
  20. ado_workflows-0.1.0/CODE_OF_CONDUCT.md +40 -0
  21. ado_workflows-0.1.0/CONTRIBUTING.md +135 -0
  22. ado_workflows-0.1.0/LICENSE +21 -0
  23. ado_workflows-0.1.0/PKG-INFO +176 -0
  24. ado_workflows-0.1.0/README.md +146 -0
  25. ado_workflows-0.1.0/SECURITY.md +31 -0
  26. ado_workflows-0.1.0/docs/ARCHITECTURE.md +124 -0
  27. ado_workflows-0.1.0/docs/PUBLISHING.md +71 -0
  28. ado_workflows-0.1.0/pyproject.toml +113 -0
  29. ado_workflows-0.1.0/src/ado_workflows/__init__.py +99 -0
  30. ado_workflows-0.1.0/src/ado_workflows/auth.py +83 -0
  31. ado_workflows-0.1.0/src/ado_workflows/client.py +76 -0
  32. ado_workflows-0.1.0/src/ado_workflows/comments.py +346 -0
  33. ado_workflows-0.1.0/src/ado_workflows/context.py +258 -0
  34. ado_workflows-0.1.0/src/ado_workflows/discovery.py +156 -0
  35. ado_workflows-0.1.0/src/ado_workflows/lifecycle.py +92 -0
  36. ado_workflows-0.1.0/src/ado_workflows/models.py +217 -0
  37. ado_workflows-0.1.0/src/ado_workflows/parsing.py +135 -0
  38. ado_workflows-0.1.0/src/ado_workflows/pr.py +201 -0
  39. ado_workflows-0.1.0/src/ado_workflows/py.typed +0 -0
  40. ado_workflows-0.1.0/src/ado_workflows/review.py +577 -0
  41. ado_workflows-0.1.0/src/ado_workflows/votes.py +112 -0
  42. ado_workflows-0.1.0/tests/__init__.py +1 -0
  43. ado_workflows-0.1.0/tests/test_auth.py +368 -0
  44. ado_workflows-0.1.0/tests/test_client.py +247 -0
  45. ado_workflows-0.1.0/tests/test_comments.py +1089 -0
  46. ado_workflows-0.1.0/tests/test_context.py +1028 -0
  47. ado_workflows-0.1.0/tests/test_discovery.py +554 -0
  48. ado_workflows-0.1.0/tests/test_lifecycle.py +245 -0
  49. ado_workflows-0.1.0/tests/test_models.py +765 -0
  50. ado_workflows-0.1.0/tests/test_parsing.py +413 -0
  51. ado_workflows-0.1.0/tests/test_pending_review.py +845 -0
  52. ado_workflows-0.1.0/tests/test_pr.py +575 -0
  53. ado_workflows-0.1.0/tests/test_review.py +1025 -0
  54. ado_workflows-0.1.0/tests/test_votes.py +621 -0
  55. ado_workflows-0.1.0/uv.lock +840 -0
@@ -0,0 +1,51 @@
1
+ #!/usr/bin/env bash
2
+
3
+ # Auto-activate uv virtual environment
4
+ if ! has uv; then
5
+ echo "❌ uv is not installed. Please install it first:"
6
+ echo " curl -LsSf https://astral.sh/uv/install.sh | sh"
7
+ exit 1
8
+ fi
9
+
10
+ # Create virtual environment if it doesn't exist
11
+ if [[ ! -d ".venv" ]]; then
12
+ echo "📦 Creating virtual environment with uv..."
13
+ uv venv
14
+ fi
15
+
16
+ # Activate the virtual environment
17
+ source .venv/bin/activate
18
+ # Source .envrc.local if present (for user/machine-specific settings)
19
+ if [[ -f ".envrc.local" ]]; then
20
+ echo "🔒 Sourcing .envrc.local..."
21
+ source .envrc.local
22
+ fi
23
+
24
+ # Sync dependencies (including dev dependencies)
25
+ echo "🔄 Syncing dependencies..."
26
+ uv sync --all-extras
27
+
28
+ # Install pre-commit hooks if they don't exist
29
+ if [[ -f ".pre-commit-config.yaml" ]] && ! uv run pre-commit --version >/dev/null 2>&1; then
30
+ echo "🪝 Installing pre-commit hooks..."
31
+ uv run pre-commit install
32
+ fi
33
+
34
+ # Export environment variables
35
+ export PYTHONPATH="${PWD}/src:${PYTHONPATH}"
36
+ export UV_PROJECT_ENVIRONMENT="${PWD}/.venv"
37
+
38
+ echo "✅ Environment activated!"
39
+ echo "📁 Virtual environment: ${VIRTUAL_ENV}"
40
+ echo "🐍 Python: $(python --version)"
41
+ echo "📦 uv: $(uv --version)"
42
+
43
+ # Show available commands
44
+ echo ""
45
+ echo "🚀 Available commands:"
46
+ echo " uv run task lint # Lint (with auto-fix)"
47
+ echo " uv run task format # Format code"
48
+ echo " uv run task type # Type check"
49
+ echo " uv run task test # Run tests"
50
+ echo " uv run task cov # Run tests with coverage"
51
+ echo " uv run task check # lint + type + test"
@@ -0,0 +1,37 @@
1
+ ---
2
+ name: Bug Report
3
+ about: Something isn't working as expected
4
+ title: ""
5
+ labels: bug
6
+ assignees: ""
7
+ ---
8
+
9
+ ## Who / What / Why
10
+
11
+ - **WHO:** Who is affected? (e.g., developer using the library, CI pipeline consumer)
12
+ - **WHAT:** What behavior is broken or unexpected?
13
+ - **WHY:** Why does it matter? What's the impact?
14
+
15
+ ## Steps to Reproduce
16
+
17
+ 1. ...
18
+ 2. ...
19
+ 3. ...
20
+
21
+ ## Expected Behavior
22
+
23
+ What you expected to happen.
24
+
25
+ ## Actual Behavior
26
+
27
+ What actually happened. Include the full error message if applicable.
28
+
29
+ ## Environment
30
+
31
+ - OS: [e.g., Ubuntu 24.04]
32
+ - Python version: [e.g., 3.12.x]
33
+ - Package version: [e.g., 0.1.0]
34
+
35
+ ## Additional Context
36
+
37
+ Any other context — logs, error tracebacks, or minimal reproduction scripts.
@@ -0,0 +1,25 @@
1
+ ---
2
+ name: Feature Request
3
+ about: Suggest a new feature or improvement
4
+ title: ""
5
+ labels: enhancement
6
+ assignees: ""
7
+ ---
8
+
9
+ ## Who / What / Why
10
+
11
+ - **WHO:** Who needs this? (e.g., developer, MCP server consumer, CI pipeline)
12
+ - **WHAT:** What capability or behavior is needed?
13
+ - **WHY:** Why does it matter? What problem does it solve?
14
+
15
+ ## Proposed Solution
16
+
17
+ How do you think it should work?
18
+
19
+ ## Alternatives Considered
20
+
21
+ Any other approaches you thought about and why they're less ideal.
22
+
23
+ ## Additional Context
24
+
25
+ Anything else — related issues, API design sketches, or workflow examples.
@@ -0,0 +1 @@
1
+ Before starting any task, read and follow the skill-compliance skill.
@@ -0,0 +1,22 @@
1
+ ## What
2
+
3
+ Brief description of the change.
4
+
5
+ ## Why
6
+
7
+ What problem does this solve or what behavior does it add?
8
+
9
+ ## How
10
+
11
+ Key implementation details — what approach was taken and why.
12
+
13
+ ## Testing
14
+
15
+ - [ ] All checks pass (`task check`)
16
+ - [ ] New BDD tests added, fully specifying the new behavior
17
+ - [ ] Existing tests unmodified (or explain why changes were needed)
18
+ - [ ] Coverage maintained at target level
19
+
20
+ ## Notes
21
+
22
+ Anything reviewers should pay attention to.
@@ -0,0 +1,2 @@
1
+ paths-ignore:
2
+ - "tests/**"
@@ -0,0 +1,259 @@
1
+ ---
2
+ name: bdd-feedback-loop
3
+ description: "Feedback loop procedure for implementing BDD test modules. Use when implementing a spec doc — covering one test module from spec to Pyright-clean, self-audited output."
4
+ ---
5
+
6
+ # BDD Feedback Loop — Test Implementation Procedure
7
+
8
+ ## When This Skill Applies
9
+
10
+ Whenever implementing tests from a BDD spec document. Each iteration of this loop
11
+ covers one test module: read the spec, implement, verify, audit, log, and hand off.
12
+
13
+ Do not proceed to the next module if unresolved failures remain from Steps 4, 5 or 7.
14
+
15
+ ---
16
+
17
+ ## The Loop (Per Module)
18
+
19
+ ### Step 1 — Read the Spec Doc
20
+
21
+ Read the module's spec doc in full before writing any code.
22
+
23
+ The spec is the authoritative source of truth. It defines:
24
+ - Which test classes to write
25
+ - The REQUIREMENT / WHO / WHAT / WHY for each class
26
+ - The MOCK BOUNDARY contract for each class
27
+ - The scenario signatures (Given / When / Then) for each test method
28
+
29
+ Do not invent test classes or scenarios not present in the spec. Do not silently
30
+ correct what appears to be a spec error — flag it in the deviation log (Step 6)
31
+ and implement what the spec says.
32
+
33
+ ---
34
+
35
+ ### Step 2 — Discover the Public API
36
+
37
+ Read the relevant `src/` files for the module under test. Extract:
38
+ - Public method signatures (name, parameters, return type)
39
+ - Public class constructors (required and optional parameters)
40
+ - Public names (no `_` prefix)
41
+ - Return types — specifically whether methods return dataclasses, primitives,
42
+ or raise exceptions
43
+
44
+ **This is the only permitted reason to read `src/` during test implementation.**
45
+
46
+ Do not use `src/` to find internal functions to mock. If a failure condition
47
+ cannot be induced through public API inputs alone, note it in the deviation log
48
+ rather than patching around it.
49
+
50
+ Record the discovered API surface as a brief comment block at the top of the
51
+ test file, for traceability:
52
+
53
+ ```python
54
+ # Public API surface (from src/myapp/services/processor.py):
55
+ # Processor(client: Client, store: DataStore, config: Settings)
56
+ # processor.process(item: Item) -> Result
57
+ # processor.compute_score(value: float | None, baseline: float) -> float
58
+ ```
59
+
60
+ ---
61
+
62
+ ### Step 3 — Implement the Tests
63
+
64
+ Implement each test class and method exactly as specified in the spec doc.
65
+
66
+ **File creation is always incremental. Do not attempt to write the entire test
67
+ file in a single tool call — doing so will cause a timeout and produce no output.**
68
+
69
+ The required sequence for every module:
70
+ 1. Create the file with the header (module docstring, API surface comment,
71
+ imports, and any shared helpers) using `create_file`. No test classes yet.
72
+ 2. Add one test class at a time using `replace_string_in_file`.
73
+ A single tool call must contain exactly one test class — no more.
74
+ 3. Repeat step 2 until all test classes from the spec are in the file.
75
+
76
+ For each class:
77
+ 1. Copy the REQUIREMENT / WHO / WHAT / WHY docstring from the spec
78
+ 2. Copy the MOCK BOUNDARY contract from the spec
79
+ 3. Implement each scenario from the spec's Given / When / Then signatures
80
+
81
+ For each method:
82
+ 1. Use the scenario signature from the spec as the docstring
83
+ 2. Write Given / When / Then body comments
84
+ 3. Use real instances per the MOCK BOUNDARY (Step 2 revealed the constructors)
85
+ 4. Assert on SUT output, not on objects you constructed yourself
86
+ 5. Include a diagnostic message on every assertion
87
+
88
+ Refer to `.github/skills/bdd-testing/SKILL.md` and
89
+ `.github/skills/bdd-testing/references/test-patterns.md` for all conventions.
90
+ Refer to tool-usage skill for how to use the tools to validate the tests fail
91
+ as expected.
92
+
93
+ ---
94
+
95
+ ### Step 4 — Run Type Checks
96
+
97
+ After implementing each test file:
98
+
99
+ 1. Run the `get_errors` tool on the test file to surface Pylance diagnostics.
100
+ 2. Then run `pyright` in the terminal on the test file — Pylance does not surface
101
+ all Pyright diagnostics through `get_errors`.
102
+
103
+ For each reported error from either step, attempt to resolve it. If an error
104
+ cannot be resolved after three attempts, log it as a deviation (Step 6) and
105
+ continue to the next error. Once all errors have been iterated, if any remain
106
+ unresolved, do not proceed to the next module — the module is blocked pending
107
+ human review.
108
+
109
+ Common issues to fix:
110
+ - Missing imports (module not imported at top of file)
111
+ - Wrong argument types passed to constructors or methods
112
+ - Incompatible return type assignments
113
+ - Undefined names (typos in fixture names, method names)
114
+ - `AsyncMock` vs `MagicMock` mismatches on async methods
115
+
116
+ ---
117
+
118
+ ### Step 5 — Self-Audit Against BDD Principles
119
+
120
+ Read the completed test file and work through each checklist item below. For each
121
+ violation found, attempt to resolve it. If a violation cannot be resolved after
122
+ three attempts, log it as a deviation (Step 6) and continue to the next item.
123
+ Once all items have been iterated, if any violations remain unresolved, do not
124
+ proceed to the next module — the module is blocked pending human review.
125
+
126
+ **Tautology check — the most important:**
127
+ For every test method, ask: *if I deleted the module under test entirely, would
128
+ this test still pass?* If yes, it is a tautology. The When step must invoke
129
+ production code. The Then step must assert on what that production code returned.
130
+
131
+ Checklist — work through every item:
132
+
133
+ - [ ] Every test method's When step calls production code (no tautologies)
134
+ - [ ] No test constructs the expected output and asserts on the constructed object
135
+ - [ ] No test accesses `_`-prefixed attributes or methods on the SUT (`._store`, `._registry`, etc.)
136
+ - [ ] No test imports `_`-prefixed names from production modules
137
+ - [ ] Mock boundaries match the class-level MOCK BOUNDARY contract exactly
138
+ - [ ] Every assertion includes a diagnostic message
139
+ - [ ] All Given / When / Then body comments are present
140
+ - [ ] No local imports inside test methods or helper functions
141
+ - [ ] `pytest.approx` used for all float comparisons
142
+ - [ ] Error tests verify message content, not just exception type
143
+ - [ ] No `assert exc_info.value is not None` — this always passes inside `pytest.raises`
144
+
145
+ ---
146
+
147
+ ### Step 6 — Log Deviations
148
+
149
+ After Steps 4 and 5, record every item that could not be resolved. A deviation is
150
+ anything that prevented full compliance with the spec or with the BDD principles.
151
+
152
+ Append to the module's deviation log section in the orchestration doc:
153
+
154
+ ```
155
+ ## Deviations — test_processor.py
156
+
157
+ ### [DEVIATION] TestScoringBehavior.test_negative_signals_penalize_score
158
+ Could not induce negative_score > 0.3 through public API alone. The production
159
+ code path requires at least 3 documents in the negative_signals collection, but
160
+ the store fixture only seeds 1. The test currently seeds manually via
161
+ store.add() — this bypasses the indexer but is the only path available.
162
+ Recommendation: add a multi-document fixture to conftest, or expose a
163
+ batch-seed method on DataStore.
164
+
165
+ ### [DEVIATION] TestScoreComputation — entire class
166
+ compute_score() is not exposed as a public method on Processor. It appears
167
+ to be internal. All computation tests currently call the private method
168
+ _compute_score() directly, violating the public API rule.
169
+ Recommendation: either promote to public API or test exclusively through
170
+ processor.process() with appropriate input data.
171
+ ```
172
+
173
+ A deviation log entry must include:
174
+ - The specific test or class affected
175
+ - What the spec requires
176
+ - Why full compliance was not achievable
177
+ - A concrete recommendation for resolution
178
+
179
+ Vague entries ("couldn't make it work") are not acceptable. The log is the
180
+ handoff artifact — it must give the next person enough context to act without
181
+ re-investigation.
182
+
183
+ ---
184
+
185
+ ### Step 7 - Perform a Coverage Check
186
+
187
+ After logging deviations, perform a coverage check on the test file. For each
188
+ uncovered line, determine whether it is:
189
+ - A real requirement that should be added to the spec (write the new scenario in the spec
190
+ and log the gap as a deviation)
191
+ - Dead code that should be removed (remove it and log the change as a deviation)
192
+ - Over-engineering that should be removed (remove it and log the change as a deviation)
193
+
194
+ Whether a line existed before your changes is irrelevant — if it is uncovered after your work, it is uncovered. The only valid dispositions are: real requirement (write the spec), dead code (remove it), or over-engineering (remove it). "It was already there" is not a disposition.
195
+
196
+ **Explicit steps to document uncovered lines:**
197
+ 1. Triage all uncovered lines — assign each a disposition
198
+ 2. For every "real requirement" disposition: update the BDD spec doc with the new scenario — do not write any tests yet
199
+ 3. Present the spec additions to the human for review and wait for explicit approval
200
+ 4. Only after approval: write the tests to match the new scenarios
201
+
202
+ ---
203
+
204
+ ### Step 8 — Proceed to Next Module
205
+
206
+ If Steps 4, 5, and 7 are all clean (or all remaining issues are logged in Step 6),
207
+ the module is complete. Proceed to the next module in the orchestration doc.
208
+
209
+ If any unresolved failures exist that were not logged, stop and complete Step 6
210
+ before proceeding.
211
+
212
+ **Logged deviations do not authorize proceeding.** A deviation that cannot be
213
+ resolved after three attempts — including coverage gaps that cannot be closed,
214
+ spec errors, or mock boundary conflicts — requires a human decision before the
215
+ module is considered complete. Do not advance to the next module. Present the
216
+ unresolved deviations and wait.
217
+
218
+ The orchestration doc defines the module order. Do not reorder modules without
219
+ updating the orchestration doc.
220
+
221
+ ---
222
+
223
+ ## Spec Immutability
224
+
225
+ The spec doc is an input to this loop, not an output. If the spec appears to be
226
+ wrong:
227
+
228
+ - **Minor wording issues** — correct silently
229
+ - **A scenario that seems incomplete** — implement what is written, note the gap
230
+ in the deviation log
231
+ - **A scenario that is impossible to implement** — implement the closest compliant
232
+ approximation, log the deviation with full explanation
233
+ - **A genuine error in a REQUIREMENT or MOCK BOUNDARY** — do not silently correct
234
+ it; log the deviation and stop work on that class until the spec is updated
235
+
236
+ The spec encodes domain knowledge and behavioral contracts that were authored with
237
+ full system understanding. A test that contradicts the spec is more likely wrong
238
+ than the spec is.
239
+
240
+ ---
241
+
242
+ ## Deviation Log Format
243
+
244
+ The orchestration doc contains a `## Deviation Log` section. Each module gets its
245
+ own subsection. Use this format:
246
+
247
+ ```markdown
248
+ ## Deviation Log
249
+
250
+ ### test_scorer.py
251
+ - [DEVIATION] TestSemanticScoring.test_culture_score_... — <one-line summary>
252
+ <explanation and recommendation>
253
+
254
+ ### test_config.py
255
+ - [CLEAN] No deviations.
256
+ ```
257
+
258
+ Mark clean modules explicitly. A missing entry is ambiguous — it could mean clean
259
+ or could mean the loop was not completed.