707-command-center-mcp 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/dist/server.js +16 -19
  2. package/package.json +1 -1
  3. package/skills/.claude-plugin/plugin.json +8 -0
  4. package/skills/.github/workflows/trigger-mcp-rebuild.yml +16 -0
  5. package/skills/.gitignore +29 -0
  6. package/skills/commands/assign-deals.md +187 -0
  7. package/skills/commands/deal-intake.md +67 -0
  8. package/skills/commands/nm-pooling.md +158 -0
  9. package/skills/commands/review.md +131 -0
  10. package/skills/commands/session-recovery.md +38 -0
  11. package/skills/commands/think.md +49 -0
  12. package/skills/commands/title.md +56 -0
  13. package/skills/skills/deal-intake/SKILL.md +139 -0
  14. package/skills/skills/deal-intake/references/allocation-table-mapping.md +57 -0
  15. package/skills/skills/hubspot-upload/SKILL.md +172 -0
  16. package/skills/skills/hubspot-upload/references/idi-csv-format.md +69 -0
  17. package/skills/skills/nm-pooling/SKILL.md +86 -0
  18. package/skills/skills/ptolemy/SKILL.md +220 -0
  19. package/skills/skills/ptolemy/references/html-template.md +222 -0
  20. package/skills/skills/ptolemy/references/traverse-formulas.md +265 -0
  21. package/skills/skills/ptolemy/skill.md +220 -0
  22. package/skills/skills/seller-portal/SKILL.md +164 -0
  23. package/skills/skills/session-recovery/SKILL.md +53 -0
  24. package/skills/skills/session-recovery/references/001-orphaned-tool-result.md +153 -0
  25. package/skills/skills/session-recovery/references/jsonl-format.md +73 -0
  26. package/skills/skills/title/SKILL.md +108 -0
  27. package/skills/skills/title/references/cursory-title-format.md +70 -0
  28. package/skills/skills/title/references/document-extraction.md +219 -0
  29. package/skills/skills/title/references/nri-from-title.md +138 -0
  30. package/skills/skills/title/references/review-workflow.md +168 -0
  31. package/skills/skills/title/references/title-red-flags.md +45 -0
  32. package/skills/skills/valuation/SKILL.md +225 -0
  33. package/skills/skills/valuation/references/allocation-table-columns.md +211 -0
  34. package/skills/skills/valuation/references/core-formulas.md +81 -0
  35. package/skills/skills/valuation/references/deal-summary-template.md +117 -0
  36. package/skills/skills/valuation/references/eur-workflow.md +141 -0
  37. package/skills/skills/valuation/references/owner-resolution.md +90 -0
  38. package/skills/skills/valuation/references/pdp-workflow.md +119 -0
  39. package/skills/skills/valuation/references/red-flags.md +51 -0
package/dist/server.js CHANGED
@@ -21831,33 +21831,30 @@ function createMcpServer(api) {
21831
21831
  }
21832
21832
 
21833
21833
  // ../src/mcp/server.ts
21834
- import { existsSync, mkdirSync, readdirSync, readFileSync, writeFileSync, statSync } from "fs";
21834
+ import { existsSync, cpSync } from "fs";
21835
21835
  import { join, dirname } from "path";
21836
21836
  import { homedir } from "os";
21837
21837
  import { fileURLToPath } from "url";
21838
21838
  function syncSkills() {
21839
21839
  const __filename = fileURLToPath(import.meta.url);
21840
21840
  const __dirname = dirname(__filename);
21841
- const bundledSkillsDir = join(__dirname, "..", "skills");
21842
- if (!existsSync(bundledSkillsDir)) {
21843
- return;
21844
- }
21845
- const targetDir = join(homedir(), ".claude", "skills");
21846
- mkdirSync(targetDir, { recursive: true });
21847
- function copyDir(src, dest) {
21848
- mkdirSync(dest, { recursive: true });
21849
- for (const entry of readdirSync(src)) {
21850
- const srcPath = join(src, entry);
21851
- const destPath = join(dest, entry);
21852
- if (statSync(srcPath).isDirectory()) {
21853
- copyDir(srcPath, destPath);
21854
- } else {
21855
- writeFileSync(destPath, readFileSync(srcPath));
21856
- }
21841
+ const bundledDir = join(__dirname, "..", "skills");
21842
+ if (!existsSync(bundledDir)) return;
21843
+ const claudeDir = join(homedir(), ".claude");
21844
+ try {
21845
+ const bundledSkills = join(bundledDir, "skills");
21846
+ if (existsSync(bundledSkills)) {
21847
+ cpSync(bundledSkills, join(claudeDir, "skills"), { recursive: true, force: true });
21848
+ console.error(`Skills synced to ${join(claudeDir, "skills")}`);
21849
+ }
21850
+ const bundledCommands = join(bundledDir, "commands");
21851
+ if (existsSync(bundledCommands)) {
21852
+ cpSync(bundledCommands, join(claudeDir, "commands"), { recursive: true, force: true });
21853
+ console.error(`Commands synced to ${join(claudeDir, "commands")}`);
21857
21854
  }
21855
+ } catch (err) {
21856
+ console.error(`Failed to sync skills: ${err.message}`);
21858
21857
  }
21859
- copyDir(bundledSkillsDir, targetDir);
21860
- console.error(`Skills synced to ${targetDir}`);
21861
21858
  }
21862
21859
  async function main() {
21863
21860
  const apiKey = process.env.CC_API_KEY;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "707-command-center-mcp",
3
- "version": "1.0.0",
3
+ "version": "1.0.2",
4
4
  "description": "MCP server for 707 Command Center — deal management, valuation, and mineral data tools",
5
5
  "type": "module",
6
6
  "bin": {
@@ -0,0 +1,8 @@
1
+ {
2
+ "name": "show-goat-skills",
3
+ "description": "Deal valuation skills and commands for Show Goat Capital",
4
+ "version": "1.0.0",
5
+ "author": {
6
+ "name": "Show Goat Capital"
7
+ }
8
+ }
@@ -0,0 +1,16 @@
1
+ name: Trigger MCP Package Rebuild
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+
7
+ jobs:
8
+ dispatch:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - name: Trigger MCP rebuild in command center
12
+ uses: peter-evans/repository-dispatch@v3
13
+ with:
14
+ token: ${{ secrets.DISPATCH_TOKEN }}
15
+ repository: landman-coder/707-command-center
16
+ event-type: skills-updated
@@ -0,0 +1,29 @@
1
+ # Only track commands and skills — everything else is local state
2
+ *
3
+ !.gitignore
4
+ !.claude-plugin/
5
+ !.claude-plugin/**
6
+ !commands/
7
+ !commands/**
8
+ !skills/
9
+ !skills/**
10
+ !.github/
11
+ !.github/**
12
+
13
+ # Claude Code internal state (never track)
14
+ cache/
15
+ debug/
16
+ file-history/
17
+ paste-cache/
18
+ plans/
19
+ plugins/
20
+ projects/
21
+ shell-snapshots/
22
+ statsig/
23
+ tasks/
24
+ telemetry/
25
+ todos/
26
+ history.jsonl
27
+ settings.json
28
+ settings.local.json
29
+ stats-cache.json
@@ -0,0 +1,187 @@
1
+ # Assign Deals to Caller
2
+
3
+ Bulk assign HubSpot deals to a caller by county, date, and amount filters.
4
+
5
+ ## Usage
6
+ ```
7
+ /assign-deals
8
+ ```
9
+
10
+ No arguments — the command asks questions interactively.
11
+
12
+ ## What This Does
13
+ 1. Asks who to assign to, which counties, how many, and date range
14
+ 2. Searches HubSpot for matching deals owned by Reid
15
+ 3. Filters out trusts/estates client-side
16
+ 4. Shows summary + sample deals for confirmation
17
+ 5. Batch updates deal ownership
18
+
19
+ ## Instructions for Claude
20
+
21
+ When the user runs this command:
22
+
23
+ ### 1. Ask Questions (Single Message)
24
+
25
+ Ask all four questions at once using `AskUserQuestion`:
26
+
27
+ - **Caller**: Who should these deals be assigned to?
28
+ - **Counties**: Which TX counties? (comma-separated, e.g., "Madison, Leon, Grimes")
29
+ - **Count**: How many deals to assign? (default: all matching)
30
+ - **Date range**: How far back? (default: last 90 days)
31
+
32
+ ### 2. Resolve Caller to Owner ID
33
+
34
+ Use `mcp__claude_ai_HubSpot__search_owners` to find the caller's HubSpot owner ID.
35
+
36
+ **Known callers (quick reference — verify against HubSpot):**
37
+
38
+ | Name | Likely ID | Role |
39
+ |------|-----------|------|
40
+ | Reid Valentine | 193538161 | Owner (source for deals) |
41
+
42
+ If the caller name doesn't match any owner, show the full owner list and ask the user to pick.
43
+
44
+ ### 3. Search HubSpot for Deals
45
+
46
+ Use `mcp__claude_ai_HubSpot__search_crm_objects` with these filters:
47
+
48
+ ```
49
+ objectType: deals
50
+ searchRequest:
51
+ filterGroups:
52
+ - filters:
53
+ - propertyName: hubspot_owner_id
54
+ operator: EQ
55
+ value: "193538161" # Reid = source owner
56
+ - propertyName: county
57
+ operator: IN
58
+ values: [{user_counties}] # From question
59
+ - propertyName: amount
60
+ operator: GT
61
+ value: "50000"
62
+ - propertyName: amount
63
+ operator: LT
64
+ value: "5000000"
65
+ - propertyName: createdate
66
+ operator: GTE
67
+ value: "{epoch_ms}" # Based on date range
68
+ sorts:
69
+ - propertyName: createdate
70
+ direction: DESCENDING
71
+ properties:
72
+ - dealname
73
+ - amount
74
+ - county
75
+ - createdate
76
+ - hubspot_owner_id
77
+ - dealstage
78
+ limit: 200
79
+ ```
80
+
81
+ **Pagination**: If `total` > 200, paginate using `after` cursor until all results collected.
82
+
83
+ ### 4. Filter Names Client-Side
84
+
85
+ Exclude deals where `dealname` matches ANY of these patterns (case-insensitive):
86
+
87
+ - `Trust`
88
+ - `Living`
89
+ - `Life Estate`
90
+ - `Revocable`
91
+ - `Irrev` (catches Irrevocable)
92
+ - `Estate`
93
+
94
+ Keep a count of excluded deals for reporting.
95
+
96
+ ### 5. Apply Count Limit
97
+
98
+ If the user requested a specific count and eligible deals exceed it, take only that many (already sorted by createdate DESC = freshest first).
99
+
100
+ If eligible deals are fewer than requested, stop and report:
101
+ ```
102
+ Found only {eligible} eligible deals (requested {count}). Proceed with all {eligible}?
103
+ ```
104
+
105
+ ### 6. Confirm Before Executing
106
+
107
+ Present a summary table:
108
+
109
+ ```
110
+ ## Deal Assignment Summary
111
+
112
+ **Caller:** {caller_name}
113
+ **Counties:** {county_list}
114
+ **Date range:** {start_date} to today
115
+
116
+ | Metric | Count |
117
+ |--------|-------|
118
+ | Total matches | {total_found} |
119
+ | Excluded (trusts/estates) | {excluded_count} |
120
+ | Eligible | {eligible_count} |
121
+ | **Assigning** | **{assigning_count}** |
122
+
123
+ ### Sample Deals (first 5)
124
+ | Deal Name | County | Amount | Created |
125
+ |-----------|--------|--------|---------|
126
+ | {name} | {county} | ${amount} | {date} |
127
+ | ... | ... | ... | ... |
128
+
129
+ Proceed with assigning {assigning_count} deals to {caller_name}?
130
+ ```
131
+
132
+ **Wait for explicit user confirmation before proceeding.**
133
+
134
+ ### 7. Batch Update Ownership
135
+
136
+ Use `mcp__hubspot__hubspot-batch-update-objects` to update deals in batches of 100:
137
+
138
+ ```
139
+ objectType: deals
140
+ inputs: [
141
+ { id: "{deal_id}", properties: { hubspot_owner_id: "{caller_owner_id}" } },
142
+ ...
143
+ ]
144
+ ```
145
+
146
+ Report progress for large batches:
147
+ ```
148
+ Batch 1/3: Updated 100 deals...
149
+ Batch 2/3: Updated 100 deals...
150
+ Batch 3/3: Updated 47 deals...
151
+ ```
152
+
153
+ ### 8. Final Summary
154
+
155
+ ```
156
+ ## Assignment Complete
157
+
158
+ Assigned {count} deals to {caller_name}.
159
+
160
+ | Detail | Value |
161
+ |--------|-------|
162
+ | Caller | {caller_name} |
163
+ | Counties | {county_list} |
164
+ | Deals assigned | {count} |
165
+ | Trusts/estates skipped | {excluded_count} |
166
+ ```
167
+
168
+ ## Edge Cases
169
+
170
+ | Situation | Action |
171
+ |-----------|--------|
172
+ | 0 matches found | Report "No deals match these filters." Suggest adjusting county list or date range. |
173
+ | Fewer than requested | Stop and ask user if they want to proceed with all available. |
174
+ | Caller not found in HubSpot | Show owner list from `search_owners` and ask user to pick. |
175
+ | Large batch (>1000 deals) | Warn: "This will update {count} deals. Are you sure?" before proceeding. |
176
+ | API rate limit / error | Stop, report the error, and show how many were successfully updated. |
177
+
178
+ ## Fixed Parameters (Not Asked)
179
+
180
+ These don't change between runs:
181
+
182
+ | Parameter | Value | Reason |
183
+ |-----------|-------|--------|
184
+ | Source owner | Reid Valentine (193538161) | All unassigned deals start with Reid |
185
+ | Min amount | $50,000 | Below this = not worth calling |
186
+ | Max amount | $5,000,000 | Above this = Reid handles directly |
187
+ | Name exclusions | Trust, Living, Life Estate, Revocable, Irrev, Estate | Trusts/estates require specialized handling |
@@ -0,0 +1,67 @@
1
+ # Deal Intake
2
+
3
+ Process a closed deal into the inventory database.
4
+
5
+ ## Usage
6
+ ```
7
+ /deal-intake [deal_name_or_path]
8
+ ```
9
+
10
+ ## Arguments
11
+ - `deal_name_or_path` — Deal name (searches HubSpot/Command Center) or path to the deal folder
12
+
13
+ ## Instructions for Claude
14
+
15
+ When the user runs this command:
16
+
17
+ ### 1. Load the deal-intake skill
18
+ Load the `deal-intake` skill for field specs and workflow details.
19
+
20
+ ### 2. Locate the deal folder
21
+ If a name was provided, use `/deal-context` to find it. If a path was provided, use that directly.
22
+
23
+ Expected location: `1 - Qualified/{Deal Name}/`
24
+
25
+ ### 3. Inventory the folder contents
26
+ List what's in the deal folder. Look for:
27
+ - `_Claude_Analysis.md` — read if it exists (has prior analysis)
28
+ - `*.xlsx` or `*.csv` — conveyance exhibit / property schedule
29
+ - `Stubs/` folder — revenue stubs (NRI, operators, lease IDs)
30
+ - `Title/` folder — title opinions (legal descriptions, depth clauses)
31
+ - Division orders, bank statements, etc.
32
+
33
+ Not all folders will have the same documents. Work with what's available.
34
+
35
+ ### 4. Extract what you can
36
+ Use Python openpyxl for Excel files. Read PDFs with the Read tool. Extract:
37
+ - Legal descriptions, counties, interest amounts
38
+ - Operator names, lease IDs, NRI from stubs
39
+ - Well names and API numbers
40
+
41
+ ### 5. Ask the user to fill gaps
42
+ After extracting available data, tell the user:
43
+ - What fields you were able to fill
44
+ - What fields are missing and where they typically come from
45
+ - Ask the user to provide missing required fields (entity_short_name, NRI, etc.)
46
+
47
+ **Track what was missing and where it came from** — record this in a note at the end so we can improve the skill for future deals.
48
+
49
+ ### 5b. Include Ptolemy geometry (if available)
50
+ If `{deal}_tracts.geojson` exists in the deal folder, read it and include it as the `tract_geojson` field in the payload. This populates `tract_geometries` with geometry immediately during intake instead of requiring a separate backfill step.
51
+
52
+ ### 6. Build and POST payload
53
+ Once all required fields are confirmed, POST to the inventory API using the MCP `inventory` → `intake_deal` action.
54
+ Include `tract_geojson` in the payload if a Ptolemy GeoJSON file was found in step 5b.
55
+
56
+ ### 7. Report results
57
+ - Show counts: legals, leases, wells, recordings created
58
+ - Show any master lease diffs and ask user to confirm/reject
59
+
60
+ ### 8. Record learnings
61
+ After completing the intake, append to `Dev/notes/deal-intake-learnings.md`:
62
+ - Deal name and date
63
+ - What documents were available
64
+ - What data was missing and how it was obtained
65
+ - Any new patterns discovered (file naming, folder structure, data locations)
66
+
67
+ This log helps us make the skill more robust over time.
@@ -0,0 +1,158 @@
1
+ # NM Compulsory Pooling Contact Extractor
2
+
3
+ Extract owner contacts from NM OCD compulsory pooling case files for mail merge.
4
+
5
+ ## Usage
6
+ ```
7
+ /nm-pooling [input_file] [--workers N]
8
+ ```
9
+
10
+ ## Arguments
11
+ - `input_file` - Path to Excel file with case URLs (optional - will prompt if not provided)
12
+ - `--workers N` - Number of parallel workers (default: 4)
13
+
14
+ ## What This Does
15
+ 1. Reads the input Excel file containing NM OCD case URLs
16
+ 2. Downloads the largest PDF from each case (contains mailing lists)
17
+ 3. OCRs mailing list pages and extracts contacts
18
+ 4. Handles multiple formats (PS Form 3877, Postal Delivery Reports, etc.)
19
+ 5. Deduplicates contacts and flags oil companies/government entities
20
+ 6. Outputs formatted Excel ready for CRM upload and mail merge
21
+
22
+ ## Output Location
23
+ ```
24
+ C:\Users\ReidValentine\OneDrive - 707 Advisors, LLC\Show Goat Capital, LP - Show Goat Capital, LP\01 Land\99 Drilling Permit & Compulsory Pooling Query\NM - Mail Merge Lists\2026\{YYYYMMDD} - NMCompPooling - All\
25
+ ```
26
+
27
+ ## Instructions for Claude
28
+
29
+ When the user runs this command:
30
+
31
+ 1. If no input file specified, ask for the path to the Excel file containing the cases.
32
+
33
+ 2. Run the extraction script:
34
+ ```bash
35
+ cd C:\Users\ReidValentine\Dev\automation\nm-pooling
36
+ python nm_pooling_extractor.py "<input_file>" --workers 4
37
+ ```
38
+
39
+ 3. The script will:
40
+ - Process cases in parallel (4 workers by default)
41
+ - Show progress as it downloads and processes each case
42
+ - Create output in the 2026 folder with today's date
43
+
44
+ 4. After completion, report:
45
+ - Total cases processed
46
+ - Total unique contacts extracted
47
+ - Number flagged for review (oil companies/government)
48
+ - Any cases with errors that need manual review
49
+ - Path to output file
50
+
51
+ 5. If there are errors, offer to help troubleshoot specific cases by examining their PDFs.
52
+
53
+ ## Known Format Variations
54
+
55
+ | Operator/Law Firm | Format | Pages | Status |
56
+ |-------------------|--------|-------|--------|
57
+ | Modrall Sperling | PS Form 3877 | Last 50-100 pages | ✅ Supported |
58
+ | MRC/Matador | Postal Delivery Report | Pages 190-220 | ✅ Supported |
59
+ | Hardy McLean LLC | USPS Electronic Return Receipt | Exhibit C-3 (varies) | ⚠️ Needs parser |
60
+ | Spencer Fane LLP | PS Form 3811 | Pages 60-120 | ⚠️ Needs parser |
61
+ | Spencer Fane LLP | Tabular Certified Mail List | Last 70 pages | ⚠️ Needs parser |
62
+
63
+ ### Format Details
64
+
65
+ #### 1. PS Form 3877 (Modrall Sperling)
66
+ - **Detection**: Look for "PS Form 3877" header, rotated 90 degrees
67
+ - **Location**: Last 50-100 pages of PDF
68
+ - **Pattern**: Name and address in columnar format
69
+ - **OCR Required**: Yes (Type3 embedded fonts)
70
+
71
+ #### 2. Postal Delivery Report (MRC/Matador)
72
+ - **Detection**: "Postal Delivery Report" header with table columns
73
+ - **Location**: Pages 180-220
74
+ - **Pattern**: Table with Name, Address, City, State, Zip columns
75
+ - **OCR Required**: Yes
76
+
77
+ #### 3. USPS Electronic Return Receipt (Hardy McLean LLC)
78
+ - **Operators**: Permian Resources Operating, LLC
79
+ - **Detection**: "UNITED STATES POSTAL SERVICE" + "Simple Certified" or "proof of delivery"
80
+ - **Location**: Exhibit C-3 (pages 40-60 typically)
81
+ - **Pattern**: Footer block on each page:
82
+ ```
83
+ [Company Name]
84
+ [Street Address]
85
+ [City, State ZIP]
86
+ Reference #: PR [Well Name]
87
+ ```
88
+ - **OCR Required**: No (text extractable)
89
+ - **Notes**: One recipient per page, ~10-20 recipients per case
90
+
91
+ #### 4. PS Form 3811 (Spencer Fane LLP)
92
+ - **Operators**: Longfellow Energy, various
93
+ - **Detection**: "Article Addressed to:" header
94
+ - **Location**: Pages 60-120 (Exhibit C-3)
95
+ - **Pattern**:
96
+ ```
97
+ 1. Article Addressed to:
98
+
99
+ [Company Name]
100
+ [Street Address]
101
+ [City, State ZIP]
102
+ ```
103
+ - **OCR Required**: Yes
104
+ - **Notes**: Individual tracking pages per recipient, ~40 pages = ~40 recipients
105
+
106
+ #### 5. Spencer Fane Tabular Format
107
+ - **Operators**: Tumbler Operating Partners, various
108
+ - **Detection**: Column headers "Entity / Individual | Date Notice Letter Mailed | Certified Mail Number"
109
+ - **Location**: Last 70 pages (Exhibit E-2)
110
+ - **Pattern**:
111
+ ```
112
+ [Entity Name] June 20, 2025 9314 7699 0430... Delivered
113
+ [Street Address]
114
+ [City, State ZIP]
115
+ ```
116
+ - **Sections**: WORKING INTERESTS, ROYALTY INTEREST OWNERS, ORRI, FEDERAL/STATE AGENCIES
117
+ - **OCR Required**: Yes
118
+ - **Notes**: ~40 contacts per case, handles "Texas" and "TX" state formats
119
+
120
+ ## Troubleshooting
121
+
122
+ If a case fails with "No contacts found":
123
+
124
+ 1. **Identify the law firm** from the PDF cover page or application
125
+ 2. **Check the format table above** to see if it's a known format
126
+ 3. **Find the mailing list pages**:
127
+ - Look for Exhibit C (Proof of Service) or Exhibit E (Certified Mail)
128
+ - Check last 50-100 pages first
129
+ - Look for USPS tracking numbers (9314..., 9590...)
130
+ 4. **If it's a new format**, document:
131
+ - Law firm name
132
+ - Page numbers where contacts appear
133
+ - Text pattern/structure
134
+ - Whether OCR is needed
135
+ 5. **Add to this table** for future reference
136
+
137
+ ### Known Edge Cases
138
+
139
+ **Upside-down pages (Case 25803 pattern)**: Some Modrall Sperling PDFs have pages rotated 180°. If a case shows 0 contacts but you know mailing lists exist:
140
+ 1. Download the PDF manually
141
+ 2. Check if pages are upside-down
142
+ 3. Rotate in PDF editor or use: `img.rotate(180, expand=True)` before OCR
143
+
144
+ ### Manual Extraction Fallback
145
+
146
+ If automated extraction fails, use Claude to manually OCR specific pages:
147
+ ```python
148
+ import fitz
149
+ import pytesseract
150
+ from PIL import Image
151
+
152
+ pdf = fitz.open("path/to/pdf.pdf")
153
+ page = pdf[PAGE_NUMBER - 1] # 0-indexed
154
+ pix = page.get_pixmap(dpi=300)
155
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
156
+ text = pytesseract.image_to_string(img)
157
+ print(text)
158
+ ```
@@ -0,0 +1,131 @@
1
+ # Session Review
2
+
3
+ Review all work done during this session and surface messy/weak spots.
4
+
5
+ ## Usage
6
+ ```
7
+ /review
8
+ ```
9
+
10
+ ## Arguments
11
+ None. Reviews everything touched in the current session.
12
+
13
+ ## What This Does
14
+ 1. Identifies all files modified during the session (git-tracked and untracked)
15
+ 2. Collects diffs and reads modified files in full
16
+ 3. Launches 3 parallel review agents (Shape, Mechanics, Cleanup)
17
+ 4. Aggregates findings, deduplicates, applies confidence scoring
18
+ 5. Outputs a structured review report
19
+ 6. Interactive issue resolution with the user
20
+
21
+ ## Instructions for Claude
22
+
23
+ When the user runs this command:
24
+
25
+ ### 1. Gather Session Context
26
+
27
+ First, identify which repos under `C:\Users\ReidValentine\Dev` have changes:
28
+
29
+ ```bash
30
+ # Check each known repo for changes
31
+ for repo in 707-command-center seller-portal automation config; do
32
+ echo "=== $repo ==="
33
+ git -C /c/Users/ReidValentine/Dev/$repo status --short 2>/dev/null
34
+ git -C /c/Users/ReidValentine/Dev/$repo diff --stat 2>/dev/null
35
+ done
36
+ ```
37
+
38
+ Also check for changes in non-git locations:
39
+ - `.claude/commands/` — new or modified commands
40
+ - `.claude/skills/` — new or modified skills
41
+ - `Dev/notes/` — modified notes
42
+
43
+ Use conversation history to identify files touched during this session that may already be committed.
44
+
45
+ ### 2. Get Recent Commit Context
46
+
47
+ For each repo with changes or recent commits:
48
+
49
+ ```bash
50
+ git -C /c/Users/ReidValentine/Dev/$repo log --oneline -10
51
+ ```
52
+
53
+ This shows what was already committed during the session.
54
+
55
+ ### 3. Collect Full Diffs
56
+
57
+ For uncommitted changes:
58
+ ```bash
59
+ git -C /c/Users/ReidValentine/Dev/$repo diff
60
+ git -C /c/Users/ReidValentine/Dev/$repo diff --cached
61
+ ```
62
+
63
+ ### 4. Launch 3 Parallel Review Agents
64
+
65
+ Launch all three agents in a **single message** using the Agent tool. Pass each agent the full diff and the list of changed files.
66
+
67
+ #### Agent 1: Shape Review (Architecture + Naming + Consistency)
68
+ Prompt the agent with:
69
+ - The full diff
70
+ - Instructions to read each changed file in full (diffs alone lack context for naming/architecture judgment)
71
+ - Check: Does new code follow established codebase patterns? Are there abstraction leaks, wrong-layer logic, or CLAUDE.md convention violations?
72
+ - Check: Are names clear, consistent, and non-misleading? Flag generic names, inconsistent casing, abbreviation soup.
73
+ - Check: Does new code match surrounding style? Mixed conventions? New patterns where existing ones already work?
74
+ - Apply confidence scoring (only report findings > 0.7)
75
+ - Reference: `.claude/skills/session-review/references/review-checklist.md` for Architecture, Naming anti-patterns
76
+
77
+ #### Agent 2: Mechanics Review (Reuse + Efficiency)
78
+ Prompt the agent with:
79
+ - The full diff
80
+ - Instructions to **Grep the codebase** for existing utilities and helpers that match newly written code. Check utility directories, shared modules, adjacent files.
81
+ - Flag: new functions that duplicate existing functionality (name the existing function), inline logic reimplementing an existing utility, copy-paste with slight variation, stringly-typed code where constants/enums exist
82
+ - Flag: N+1 queries, sequential-but-independent awaits, redundant computation, duplicate I/O, TOCTOU existence checks, startup bloat, unbounded accumulation, overly broad reads
83
+ - Apply confidence scoring (only report findings > 0.7)
84
+ - Reference: `.claude/skills/session-review/references/review-checklist.md` for Reuse, Efficiency anti-patterns
85
+
86
+ #### Agent 3: Cleanup Review (Simplicity + Completeness + Cruft)
87
+ Prompt the agent with:
88
+ - The full diff
89
+ - Instructions to read each changed file in full
90
+ - Check: YAGNI violations, premature abstractions, unnecessary indirection, config where a constant would do
91
+ - Check: Unhandled cases at system boundaries, missing validation on external input, silent error swallowing, partial implementations without TODO markers
92
+ - Check: Dead comments, contextless TODOs, debug artifacts, unused imports/variables, hardcoded values that should be constants
93
+ - Apply confidence scoring (only report findings > 0.7)
94
+ - Reference: `.claude/skills/session-review/references/review-checklist.md` for Over-Engineering, Completeness, Cruft anti-patterns
95
+
96
+ ### 5. Aggregate & Report
97
+
98
+ After all 3 agents return:
99
+ 1. Deduplicate findings (agents may flag the same issue from different angles)
100
+ 2. Assign category tags: `ARCHITECTURE`, `REUSE`, `NAMING`, `EFFICIENCY`, `SIMPLICITY`, `COMPLETENESS`, `CONSISTENCY`, `CRUFT`
101
+ 3. Output using the structured format in SKILL.md, grouped by file, ordered by severity
102
+
103
+ ### 6. Interactive Issue Resolution
104
+
105
+ After presenting the review report, ask the user: **"Ready to go through the issues, or any pushback/context first?"**
106
+
107
+ Wait for the user to respond. They may:
108
+ - Push back on findings ("Issue #2 isn't actually a problem because...")
109
+ - Add context that changes the assessment
110
+ - Remove or re-prioritize issues
111
+ - Say they're ready to proceed
112
+
113
+ Once the user confirms they're ready, use `AskUserQuestion` to let them act on each finding. Group related issues where possible (max 4 questions per call).
114
+
115
+ For each issue, present options like:
116
+ - **Fix now** — Claude makes the change immediately
117
+ - **Verify first** — Claude checks assumptions before acting (e.g., confirm an endpoint exists)
118
+ - **Skip** — Not worth addressing
119
+
120
+ If there are more than 4 issues, batch them across multiple AskUserQuestion calls. Process the first batch, apply fixes, then ask about remaining issues.
121
+
122
+ After all issues are resolved or skipped, output a one-line summary: "Review complete — X fixed, Y skipped."
123
+
124
+ ## Auto-Suggest Behavior
125
+
126
+ Claude should proactively suggest running `/review` in these situations:
127
+ - Before the user makes a commit (if substantial work was done)
128
+ - At the natural end of a session when work is wrapping up
129
+ - After completing a multi-file change
130
+
131
+ This is a **suggestion only** — do not run automatically without the user invoking `/review`.