appydave-tools 0.36.0 → 0.37.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c9081147c515c93a4c3a45f8ca2f8f94dfc4c69b8025d78212a121dde8b70927
4
- data.tar.gz: 88e44bc2c634968665123e1dcedda8bf57c415b524c14c78752f456439df712e
3
+ metadata.gz: 4446eafccf18838c5cdb317afd41e907bf87fcdbd7714920184f665f9f30cb1c
4
+ data.tar.gz: fd10e3dac35034888853c30a90b65b1b545d5e2b43e69d53003b53b181cf8ee1
5
5
  SHA512:
6
- metadata.gz: 418d0e01e3773faeb31c966e7875fa6d985d9fc4af9a697f48b15658a10bb55efdfc8d5f8c8c88df62854d9b8189b0d6a4df0761421db90fb4f5fda671b13086
7
- data.tar.gz: 7bef2c885d0d89df18b6c9ebd91fe2c9821f91973ddb7421a6184db13e1db7f8ec4289ff4a755b3b6930be28549f7bd4786bc018f08f6b096dfc140cb167c4ab
6
+ metadata.gz: 4ac1ebdaf9beae7c6f121d2479ac14dd21dfd2a3ed1fcb32ae1f79917f96649a78c4c519ec7ea179ccfe21c9e94031de8ba057c3e6c49c7f708c941e121cd54e
7
+ data.tar.gz: c9bcf5b1f6462dd9842311f5ef8338d1ac7664eeec0e7b028bc97f43e9027d4afc013d0c255f695f5230c4d6721db5e09c164a69418d67cb3399f95b35d8018d
data/CHANGELOG.md CHANGED
@@ -1,3 +1,10 @@
1
+ # [0.36.0](https://github.com/appydave/appydave-tools/compare/v0.35.0...v0.36.0) (2025-11-21)
2
+
3
+
4
+ ### Features
5
+
6
+ * add debug logging for config loading with DAM_DEBUG env var for remote debugging ([8844aa9](https://github.com/appydave/appydave-tools/commit/8844aa9ae891ff22b36e824feff64f103565cec8))
7
+
1
8
  # [0.35.0](https://github.com/appydave/appydave-tools/compare/v0.34.1...v0.35.0) (2025-11-21)
2
9
 
3
10
 
data/bin/dam CHANGED
@@ -399,6 +399,7 @@ class VatCLI
399
399
  end
400
400
 
401
401
  # Parse S3 command arguments
402
+ # rubocop:disable Metrics/AbcSize, Metrics/MethodLength
402
403
  def parse_s3_args(args, command)
403
404
  dry_run = args.include?('--dry-run')
404
405
  force = args.include?('--force')
@@ -417,6 +418,22 @@ class VatCLI
417
418
  end
418
419
  brand_key = brand # Already detected, use as-is
419
420
  else
421
+ # Validate brand exists before trying to resolve project
422
+ unless valid_brand?(brand_arg)
423
+ puts "❌ Invalid brand: '#{brand_arg}'"
424
+ puts ''
425
+ puts 'Valid brands:'
426
+ puts ' appydave → v-appydave (AppyDave brand)'
427
+ puts ' voz → v-voz (VOZ client)'
428
+ puts ' aitldr → v-aitldr (AITLDR brand)'
429
+ puts ' kiros → v-kiros (Kiros client)'
430
+ puts ' joy → v-beauty-and-joy (Beauty & Joy)'
431
+ puts ' ss → v-supportsignal (SupportSignal)'
432
+ puts ''
433
+ puts "Usage: dam #{command} <brand> <project> [--dry-run]"
434
+ exit 1
435
+ end
436
+
420
437
  brand_key = brand_arg # Use the shortcut/key (e.g., 'appydave')
421
438
  brand = Appydave::Tools::Dam::Config.expand_brand(brand_arg) # Expand for path resolution
422
439
  project_id = Appydave::Tools::Dam::ProjectResolver.resolve(brand_arg, project_arg)
@@ -427,6 +444,70 @@ class VatCLI
427
444
 
428
445
  { brand: brand_key, project: project_id, dry_run: dry_run, force: force }
429
446
  end
447
+ # rubocop:enable Metrics/AbcSize, Metrics/MethodLength
448
+
449
+ def valid_brand?(brand_key)
450
+ Appydave::Tools::Configuration::Config.configure
451
+ brands = Appydave::Tools::Configuration::Config.brands
452
+ brands.key?(brand_key) || brands.shortcut?(brand_key)
453
+ end
454
+
455
+ # Add local sync status to matched projects data
456
+ # Mutates the matched_projects hash to add :local_status key
457
+ # @param matched_projects [Hash] Map of project_id => S3 data
458
+ # @param brand_key [String] Brand key (e.g., 'appydave', 'ss')
459
+ # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
460
+ def add_local_sync_status!(matched_projects, brand_key)
461
+ matched_projects.each do |project_id, data|
462
+ project_path = Appydave::Tools::Dam::Config.project_path(brand_key, project_id)
463
+ s3_staging_path = File.join(project_path, 's3-staging')
464
+
465
+ if !Dir.exist?(project_path)
466
+ data[:local_status] = :no_project # Project directory doesn't exist
467
+ elsif !Dir.exist?(s3_staging_path)
468
+ data[:local_status] = :no_files # Project exists but no downloads yet
469
+ else
470
+ # Count local files in s3-staging
471
+ local_files = Dir.glob(File.join(s3_staging_path, '**', '*'))
472
+ .select { |f| File.file?(f) }
473
+ .reject { |f| File.basename(f).include?('Zone.Identifier') } # Exclude Windows metadata
474
+
475
+ s3_file_count = data[:file_count]
476
+ local_file_count = local_files.size
477
+
478
+ data[:local_status] = if local_file_count.zero?
479
+ :no_files
480
+ elsif local_file_count == s3_file_count
481
+ :synced # Fully synced
482
+ else
483
+ :partial # Some files downloaded
484
+ end
485
+
486
+ data[:local_file_count] = local_file_count
487
+ end
488
+ end
489
+ end
490
+ # rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
491
+
492
+ # Format local sync status for display
493
+ # @param status [Symbol] :synced, :no_files, :partial, :no_project
494
+ # @param local_count [Integer, nil] Number of local files
495
+ # @param s3_count [Integer] Number of S3 files
496
+ # @return [String] Formatted status string
497
+ def format_local_status(status, local_count, s3_count)
498
+ case status
499
+ when :synced
500
+ '✓ Synced'
501
+ when :no_files
502
+ '⚠ None'
503
+ when :partial
504
+ "⚠ #{local_count}/#{s3_count}"
505
+ when :no_project
506
+ '✗ Missing'
507
+ else
508
+ 'Unknown'
509
+ end
510
+ end
430
511
 
431
512
  def parse_share_args(args)
432
513
  # Extract --expires flag
@@ -1244,6 +1325,9 @@ class VatCLI
1244
1325
  # Write updated manifest
1245
1326
  File.write(manifest_path, JSON.pretty_generate(manifest))
1246
1327
 
1328
+ # Add local sync status to matched projects
1329
+ add_local_sync_status!(matched_projects, brand_key)
1330
+
1247
1331
  # Display table
1248
1332
  display_s3_scan_table(matched_projects, orphaned_projects, bucket, prefix, region)
1249
1333
 
@@ -1265,30 +1349,30 @@ class VatCLI
1265
1349
  def display_s3_scan_table(matched_projects, orphaned_projects, bucket, prefix, region)
1266
1350
  puts '✅ S3 Projects Report'
1267
1351
  puts ''
1268
- puts 'PROJECT FILES SIZE STATUS LAST MODIFIED'
1269
- puts '-' * 80
1352
+ puts 'PROJECT FILES SIZE LOCAL S3 MODIFIED'
1353
+ puts '-' * 92
1270
1354
 
1271
1355
  # Display matched projects first (sorted alphabetically)
1272
1356
  matched_projects.sort.each do |project_id, data|
1273
1357
  files = data[:file_count].to_s.rjust(5)
1274
1358
  size = format_bytes(data[:total_bytes]).rjust(10)
1275
- status = '✓'
1359
+ local_status = format_local_status(data[:local_status], data[:local_file_count], data[:file_count])
1276
1360
  modified = data[:last_modified] ? Time.parse(data[:last_modified]).strftime('%Y-%m-%d %H:%M') : 'N/A'
1277
1361
 
1278
- puts format('%-26s %5s %10s %s %s', project_id, files, size, status, modified)
1362
+ puts format('%-36s %5s %10s %-9s %s', project_id, files, size, local_status, modified)
1279
1363
  end
1280
1364
 
1281
1365
  # Display orphaned projects (sorted alphabetically)
1282
1366
  return if orphaned_projects.empty?
1283
1367
 
1284
- puts '-' * 80
1368
+ puts '-' * 92
1285
1369
  orphaned_projects.sort.each do |project_id, data|
1286
1370
  files = data[:file_count].to_s.rjust(5)
1287
1371
  size = format_bytes(data[:total_bytes]).rjust(10)
1288
- status = ''
1372
+ local_status = 'N/A'
1289
1373
  modified = data[:last_modified] ? Time.parse(data[:last_modified]).strftime('%Y-%m-%d %H:%M') : 'N/A'
1290
1374
 
1291
- puts format('%-26s %5s %10s %s %s', project_id, files, size, status, modified)
1375
+ puts format('%-36s %5s %10s %-9s %s', project_id, files, size, local_status, modified)
1292
1376
  end
1293
1377
 
1294
1378
  puts ''
@@ -0,0 +1,402 @@
1
+ # Code Quality Retrospective Analysis
2
+
3
+ ## Objective
4
+ Analyze recent development history to identify architectural issues, code duplication, pattern inconsistencies, and other AI-generated code problems.
5
+
6
+ ## Context
7
+ This is a Ruby gem project (`appydave-tools`) that provides CLI tools for YouTube content creation workflows. The codebase is maintained by humans with AI assistance. AI-generated code excels at writing individual implementations but often fails to:
8
+ - Look around the system to reuse existing patterns
9
+ - Follow established testing conventions
10
+ - Avoid duplicating functionality that already exists elsewhere
11
+ - Maintain architectural consistency across features
12
+
13
+ ## What You Need From the User
14
+ 1. **Time range:** How many days back to analyze (default: last 3-7 days)
15
+ 2. **Focus areas:** Specific parts of codebase (e.g., "DAM/configuration changes" or "all of lib/")
16
+
17
+ ## Analysis Process
18
+
19
+ ### Phase 1: Discovery (Understand What Changed)
20
+ ```bash
21
+ # Get git log with stats for the specified period
22
+ git log --since="3 days ago" --stat --oneline
23
+
24
+ # Identify most-changed files
25
+ git log --since="3 days ago" --name-only --pretty=format: | sort | uniq -c | sort -rn | head -20
26
+
27
+ # Get commit messages to understand intent
28
+ git log --since="3 days ago" --pretty=format:"%h - %s (%an, %ar)"
29
+ ```
30
+
31
+ **Output:**
32
+ - List of changed files grouped by frequency
33
+ - Summary of feature areas touched
34
+ - Commit themes (new features, refactorings, bug fixes)
35
+
36
+ ### Phase 2: Code Duplication Analysis
37
+
38
+ **What to search for:**
39
+ 1. **Similar method names** across different files:
40
+ - `brand.*path`, `resolve.*brand`, `get.*config`
41
+ - `load.*`, `find.*`, `lookup.*`
42
+ - `validate.*`, `check.*`, `ensure.*`
43
+
44
+ 2. **Repeated logic patterns:**
45
+ - Brand/project path resolution
46
+ - Configuration loading/parsing
47
+ - File path manipulation (joining, expanding, validating)
48
+ - Error handling patterns
49
+ - Nil checking patterns
50
+
51
+ 3. **Copy-paste indicators:**
52
+ - Similar code blocks with minor variable name changes
53
+ - Identical error messages in multiple places
54
+ - Repeated conditional logic
55
+ - Similar method signatures with slight variations
56
+
57
+ **Tools to use:**
58
+ ```bash
59
+ # Search for method definitions with similar names
60
+ grep -rn "def.*brand.*path" lib/
61
+
62
+ # Find repeated string patterns (error messages, etc.)
63
+ grep -rn "Configuration.*not found" lib/
64
+
65
+ # Look for similar class structures
66
+ find lib/ -name "*.rb" -exec grep -l "class.*Resolver" {} \;
67
+ ```
68
+
69
+ **What to report:**
70
+ - File pairs with duplicated logic
71
+ - Line numbers of similar implementations
72
+ - Severity: Critical (exact duplication) vs Moderate (similar patterns)
73
+ - Suggested consolidation approach
74
+
75
+ ### Phase 3: Pattern Consistency Analysis
76
+
77
+ #### Testing Patterns
78
+ Compare test files to identify inconsistencies:
79
+
80
+ 1. **Spec structure:**
81
+ - Are all specs using `describe`/`context`/`it` consistently?
82
+ - Are test descriptions following the same format?
83
+ - Are expectations using consistent matchers (`expect().to eq()` vs `expect().to be()`)?
84
+
85
+ 2. **Test setup:**
86
+ - Are mocks/stubs handled consistently?
87
+ - Is test data creation following DRY principles?
88
+ - Are `let` blocks used consistently vs instance variables?
89
+ - Are `before` blocks structured similarly?
90
+
91
+ 3. **Test coverage patterns:**
92
+ - Are edge cases tested consistently (nil values, empty strings, missing files)?
93
+ - Are error conditions tested?
94
+ - Are success and failure paths both covered?
95
+
96
+ **Example patterns to check:**
97
+ ```ruby
98
+ # Pattern A (preferred?)
99
+ let(:config) { described_class.new }
100
+ subject { config.load }
101
+
102
+ # Pattern B (alternative?)
103
+ before do
104
+ @config = described_class.new
105
+ end
106
+
107
+ # Are both patterns used? Should we standardize?
108
+ ```
109
+
110
+ #### Code Patterns
111
+ 1. **Error handling:**
112
+ - Consistent use of `raise` vs `return nil` vs `puts` for errors
113
+ - Error message formatting
114
+ - Use of custom exceptions vs standard library
115
+
116
+ 2. **Logging:**
117
+ - Consistent logging levels (debug, info, warn, error)
118
+ - Consistent log message formatting
119
+ - Protected debugging infrastructure (DO NOT flag as bloat)
120
+
121
+ 3. **Class initialization:**
122
+ - Consistent parameter handling
123
+ - Consistent use of keyword arguments vs positional
124
+ - Consistent defaults handling
125
+
126
+ 4. **Method naming:**
127
+ - `get_x` vs `fetch_x` vs `find_x` vs `x` (getter)
128
+ - `set_x` vs `update_x` vs `x=` (setter)
129
+ - Boolean methods ending in `?`
130
+ - Dangerous methods ending in `!`
131
+
132
+ ### Phase 4: Architectural Concerns
133
+
134
+ **What to flag:**
135
+
136
+ 1. **Methods that are too large:**
137
+ - \>20 lines of actual logic (not counting comments/whitespace)
138
+ - Complex nested conditionals
139
+ - Multiple concerns mixed together
140
+
141
+ 2. **Classes with too many responsibilities:**
142
+ - Classes with \>10 public methods
143
+ - Classes that do multiple unrelated things
144
+ - God objects that know too much about the system
145
+
146
+ 3. **Tight coupling:**
147
+ - Classes that directly instantiate other classes (should use dependency injection)
148
+ - Hard-coded paths or configuration
149
+ - Direct file system access instead of using abstractions
150
+
151
+ 4. **Missing abstractions:**
152
+ - Repeated conditional logic that should be polymorphic
153
+ - Switch statements on type that should use inheritance/composition
154
+ - Duplicated algorithms that should be extracted
155
+
156
+ 5. **Inconsistent use of existing utilities:**
157
+ - Code that reimplements functionality available in the standard library
158
+ - Code that doesn't use existing helper methods in the project
159
+
160
+ **Example checks:**
161
+ ```ruby
162
+ # Bad: Repeated conditional
163
+ if brand == "appydave"
164
+ "v-appydave"
165
+ elsif brand == "voz"
166
+ "v-voz"
167
+ # ... repeated in 3 different files
168
+
169
+ # Better: Use existing BrandResolver or similar
170
+ BrandResolver.resolve(brand)
171
+ ```
172
+
173
+ ### Phase 5: Protected Code Patterns ⚠️
174
+
175
+ **DO NOT flag these as problems:**
176
+ - Debug logging statements (even if verbose)
177
+ - Nil-check logging before operations
178
+ - Configuration loading trace logs
179
+ - Error context enrichment
180
+ - Defensive programming patterns (explicit nil checks, argument validation)
181
+
182
+ **Why:** These patterns exist for remote debugging and production issue diagnosis. They may look like "code bloat" but they're intentional safety nets.
183
+
184
+ ## Report Format
185
+
186
+ ```markdown
187
+ # Code Quality Analysis - [Date Range]
188
+
189
+ ## Summary
190
+ - **Commits analyzed:** X commits
191
+ - **Files changed:** Y files in Z directories
192
+ - **Key areas:** [list main feature areas]
193
+ - **Analysis date:** [current date]
194
+
195
+ ## Critical Issues 🔴
196
+ ### 1. Code Duplication: [Short Description]
197
+ - **Found in:**
198
+ - `lib/path/to/file1.rb:123-145`
199
+ - `lib/path/to/file2.rb:456-478`
200
+ - **Description:** [What functionality is duplicated]
201
+ - **Impact:**
202
+ - Maintenance burden (changes must be made in 2+ places)
203
+ - Bug risk (fixes might miss one location)
204
+ - Code bloat (X lines duplicated)
205
+ - **Recommendation:**
206
+ - Extract to `lib/appydave/tools/utils/[name].rb`
207
+ - Or add to existing `[ExistingClass]` if related
208
+ - **Estimated effort:** [Small/Medium/Large]
209
+
210
+ ### 2. Architectural Issue: [Description]
211
+ [Similar format]
212
+
213
+ ## Moderate Issues 🟡
214
+ ### 1. Pattern Inconsistency: [Description]
215
+ - **Found in:** [file locations]
216
+ - **Pattern A:** [description with code example]
217
+ - **Pattern B:** [description with code example]
218
+ - **Recommendation:** Standardize on Pattern [A/B] because [reason]
219
+ - **Files to update:** [list]
220
+
221
+ ### 2. Missing Test Coverage: [Description]
222
+ [Similar format]
223
+
224
+ ## Minor Observations 🔵
225
+ ### 1. [Observation]
226
+ - **Description:** [what was noticed]
227
+ - **Impact:** Low priority but worth noting
228
+ - **Recommendation:** [optional improvement]
229
+
230
+ ## Positive Patterns ✅
231
+ ### 1. [Good Pattern]
232
+ - **Found in:** [file locations]
233
+ - **Why it's good:** [explanation]
234
+ - **Recommend:** Continue using this approach
235
+
236
+ ### 2. [Another Good Pattern]
237
+ [Similar format]
238
+
239
+ ## Prioritized Action Items
240
+
241
+ ### High Priority (Do First)
242
+ 1. [ ] [Action item from Critical Issues]
243
+ 2. [ ] [Action item from Critical Issues]
244
+
245
+ ### Medium Priority (Do Soon)
246
+ 1. [ ] [Action item from Moderate Issues]
247
+ 2. [ ] [Action item from Moderate Issues]
248
+
249
+ ### Low Priority (Future Improvement)
250
+ 1. [ ] [Action item from Minor Observations]
251
+
252
+ ## Statistics
253
+ - **Total duplicated code:** ~X lines across Y locations
254
+ - **Test coverage gaps:** Z files missing tests
255
+ - **Pattern inconsistencies:** N different patterns found for [thing]
256
+ - **Large methods:** M methods over 20 lines
257
+ - **Large classes:** K classes over 10 public methods
258
+ ```
259
+
260
+ ## Example Usage Instructions
261
+
262
+ ### Example 1: After Feature Development
263
+ ```
264
+ Analyze the last 3 days of git history. Focus on the DAM migration work and
265
+ configuration changes. Look for code duplication (especially around brand/path
266
+ resolution), inconsistent testing patterns, and places where we're not reusing
267
+ existing utilities. Give me a prioritized list of refactoring opportunities.
268
+ ```
269
+
270
+ ### Example 2: Specific Concern
271
+ ```
272
+ Review commits from Nov 18-21. The main work was DAM CLI and configuration.
273
+ I'm particularly concerned we might have written brand resolution logic multiple
274
+ times in different places. Also check if the new specs are following the same
275
+ patterns as existing ones in spec/appydave/tools/
276
+ ```
277
+
278
+ ### Example 3: General Health Check
279
+ ```
280
+ Analyze the last week of development. Look for the usual AI code problems:
281
+ duplication, pattern drift, missing abstractions. Focus on lib/appydave/tools/
282
+ and corresponding specs.
283
+ ```
284
+
285
+ ### Phase 6: Testing Anti-Patterns ⚠️
286
+
287
+ **CRITICAL: Don't Hide Bugs With Mocks**
288
+
289
+ The most dangerous testing anti-pattern is **using mocks to hide real bugs instead of fixing them**.
290
+
291
+ #### Red Flags in Tests:
292
+
293
+ 1. **Over-mocking that masks bugs:**
294
+ - Mocking away the exact behavior that contains the bug
295
+ - Tests pass but production code still fails
296
+ - "Fix" involves adding mocks instead of fixing logic
297
+
298
+ **Example (BAD):**
299
+ ```ruby
300
+ # Bug: Regexp.last_match gets reset by .sub() call
301
+ # "Fix": Mock the entire method instead of fixing the bug
302
+ allow(resolver).to receive(:detect_from_pwd).and_return(['brand', 'project'])
303
+ ```
304
+
305
+ **Better approach:**
306
+ ```ruby
307
+ # Fix the actual bug: capture regex match BEFORE .sub() call
308
+ project = Regexp.last_match(2) # Capture BEFORE
309
+ brand_key = brand_with_prefix.sub(/^v-/, '') # Then modify
310
+ ```
311
+
312
+ 2. **Mixed mock/test data systems:**
313
+ - Real file system + mocked configuration
314
+ - Real objects + stubbed methods on same objects
315
+ - Partial mocking that creates impossible states
316
+ - Tests use different data sources than production
317
+
318
+ **Example (BAD):**
319
+ ```ruby
320
+ # Mix of real filesystem and stubbed config
321
+ let(:real_path) { File.expand_path('../fixtures', __dir__) }
322
+ before do
323
+ allow(config).to receive(:video_projects_root).and_return(real_path)
324
+ allow(File).to receive(:exist?).and_return(true) # But checking different paths!
325
+ end
326
+ ```
327
+
328
+ **Better approach:**
329
+ ```ruby
330
+ # Consistent test data system: Either all real or all mocked
331
+ let(:temp_dir) { Dir.mktmpdir }
332
+ before do
333
+ FileUtils.mkdir_p("#{temp_dir}/v-appydave/b65")
334
+ config.video_projects_root = temp_dir
335
+ end
336
+ after { FileUtils.rm_rf(temp_dir) }
337
+ ```
338
+
339
+ 3. **Complex mock setups that don't reflect reality:**
340
+ - Mocks that configure behavior that never happens in production
341
+ - Chained stubs that create impossible scenarios
342
+ - Mock expectations that don't match actual method signatures
343
+
344
+ 4. **Tests that mock what they should be testing:**
345
+ - Mocking the primary behavior under test
346
+ - Stubbing return values instead of testing logic
347
+ - Mocking away all collaborators (unit test tunnel vision)
348
+
349
+ #### When Mocking Is Appropriate:
350
+
351
+ - **External services** (API calls, network requests)
352
+ - **Slow operations** (database queries in unit tests)
353
+ - **Non-deterministic behavior** (timestamps, random values)
354
+ - **Expensive resources** (file I/O in focused unit tests)
355
+
356
+ #### When to Use Real Objects:
357
+
358
+ - **Testing integration** between components
359
+ - **Debugging failures** - Reproduce with real objects first
360
+ - **Configuration resolution** - Real config objects, real paths
361
+ - **Business logic** - Real domain objects, real calculations
362
+
363
+ #### How to Spot This Problem:
364
+
365
+ ```bash
366
+ # Search for excessive mocking in specs
367
+ grep -rn "allow.*to receive" spec/ | wc -l
368
+ grep -rn "double\|instance_double" spec/ | wc -l
369
+
370
+ # Find specs with high mock-to-assertion ratios
371
+ # Red flag: 5+ mocks, 1-2 expectations
372
+ ```
373
+
374
+ **Remember:** If you're adding mocks to make a test pass, stop and ask:
375
+ 1. Is there a real bug I'm hiding?
376
+ 2. Am I testing integration but using unit test mocks?
377
+ 3. Would real objects expose the actual problem?
378
+
379
+ ## Tips for Effective Analysis
380
+
381
+ 1. **Start with git stats** - Let the data guide you to hot spots
382
+ 2. **Read commit messages** - Understand intent before judging implementation
383
+ 3. **Compare similar files** - Look at files changed in the same commit
384
+ 4. **Check for existing utilities** - Before flagging duplication, verify there isn't already a helper
385
+ 5. **Consider context** - Some "duplication" is intentional (tests, CLI commands)
386
+ 6. **Be specific** - Don't just say "improve X", show exactly what and where
387
+ 7. **Suggest solutions** - Include specific refactoring recommendations with file/method names
388
+ 8. **Watch for mock overuse** - Tests with more mocks than assertions are a red flag
389
+
390
+ ## Notes for AI Assistants
391
+
392
+ - **Be thorough but pragmatic** - Not every small similarity is worth flagging
393
+ - **Show your work** - Include the git commands and grep searches you ran
394
+ - **Provide evidence** - Show actual code snippets, not just descriptions
395
+ - **Prioritize impact** - Focus on issues that affect maintainability, not style preferences
396
+ - **Respect protected patterns** - Don't flag defensive logging/debugging code
397
+ - **Consider effort** - Note whether fixes are quick wins or major refactors
398
+ - **Be constructive** - Frame as learning opportunities, not criticism
399
+
400
+ ---
401
+
402
+ **Last updated:** 2025-01-21