ai-flow-dev 2.8.0 → 2.8.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -117,6 +117,7 @@ echo ""
117
117
  ### Step 1: Smart Validation
118
118
 
119
119
  **Only execute `/flow-check` if:**
120
+
120
121
  - `TESTS_EXECUTED == false` (never executed), **OR**
121
122
  - `NEEDS_REVALIDATION == true` (commits after last validation)
122
123
 
@@ -137,11 +138,11 @@ if [ "$SHOULD_RUN_CHECK" = "true" ]; then
137
138
  # INVOKE /flow-check HERE
138
139
  # Execute the complete /flow-check workflow
139
140
  # This will update status.json with results
140
-
141
+
141
142
  # After execution, re-read validation results
142
143
  TESTS_PASSED=$(jq -r '.validation.tests.passed' "$TASK_PATH/status.json" 2>/dev/null || echo "0")
143
144
  TESTS_FAILED=$(jq -r '.validation.tests.failed' "$TASK_PATH/status.json" 2>/dev/null || echo "0")
144
-
145
+
145
146
  # If tests FAIL → STOP EVERYTHING
146
147
  if [ "$TESTS_FAILED" -gt 0 ]; then
147
148
  echo ""
@@ -161,12 +162,13 @@ fi
161
162
  ### Step 2: Smart Commit
162
163
 
163
164
  **Only execute `/flow-commit` if:**
165
+
164
166
  - `HAS_UNCOMMITTED_CHANGES == true`
165
167
 
166
168
  ```bash
167
169
  if [ "$HAS_UNCOMMITTED_CHANGES" = "true" ]; then
168
170
  echo "📝 Cambios sin commitear detectados. Ejecutando /flow-commit..."
169
-
171
+
170
172
  # INVOKE /flow-commit HERE
171
173
  # Execute the complete /flow-commit workflow
172
174
  # This will update status.json with new commits
@@ -204,12 +206,12 @@ if [ -f "$TASK_PATH/status.json" ]; then
204
206
  TASK_SOURCE=$(jq -r '.source' "$TASK_PATH/status.json")
205
207
  CREATED_AT=$(jq -r '.timestamps.created' "$TASK_PATH/status.json")
206
208
  COMPLETED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
207
-
209
+
208
210
  # Calculate duration in minutes
209
211
  CREATED_TS=$(date -d "$CREATED_AT" +%s 2>/dev/null || date -j -f "%Y-%m-%dT%H:%M:%SZ" "$CREATED_AT" +%s 2>/dev/null || echo "0")
210
212
  COMPLETED_TS=$(date +%s)
211
213
  DURATION_MIN=$(( ($COMPLETED_TS - $CREATED_TS) / 60 ))
212
-
214
+
213
215
  TOTAL_TASKS=$(jq -r '.progress.totalTasks' "$TASK_PATH/status.json")
214
216
  COMMIT_COUNT=$(jq -r '.git.commits | length' "$TASK_PATH/status.json")
215
217
  VALIDATION_PASSED=$( [ "$TESTS_FAILED" -eq 0 ] && echo "true" || echo "false" )
@@ -224,24 +226,24 @@ else
224
226
  elif echo "$TASK_FOLDER" | grep -qiE '^refactor'; then
225
227
  TASK_TYPE="refactor"
226
228
  fi
227
-
229
+
228
230
  TASK_SOURCE="manual"
229
-
231
+
230
232
  # First commit timestamp
231
233
  FIRST_COMMIT=$(git log --reverse --format=%ct --all -- "$TASK_PATH/work.md" 2>/dev/null | head -n 1)
232
234
  CREATED_AT=$(date -u -d "@$FIRST_COMMIT" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || date -u +"%Y-%m-%dT%H:%M:%SZ")
233
235
  COMPLETED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
234
-
236
+
235
237
  CREATED_TS=$FIRST_COMMIT
236
238
  COMPLETED_TS=$(date +%s)
237
239
  DURATION_MIN=$(( ($COMPLETED_TS - $CREATED_TS) / 60 ))
238
-
240
+
239
241
  # Count checkboxes in work.md
240
242
  TOTAL_TASKS=$(grep -c '^\- \[ \]' "$TASK_PATH/work.md" 2>/dev/null || echo "0")
241
-
243
+
242
244
  # Count commits in branch
243
245
  COMMIT_COUNT=$(git log --oneline "$CURRENT_BRANCH" ^main 2>/dev/null | wc -l | tr -d ' ')
244
-
246
+
245
247
  VALIDATION_PASSED="true"
246
248
  fi
247
249
 
@@ -314,7 +316,7 @@ function extract_objective_from_work_md() {
314
316
  git log "$CURRENT_BRANCH" --format="%B" -1 | head -n 3 | tr '\n' ' ' | sed 's/ */ /g'
315
317
  return
316
318
  fi
317
-
319
+
318
320
  # Extract Objective section
319
321
  awk '/^## Objective$/,/^## [^O]/' ".ai-flow/work/$TASK_FOLDER/work.md" 2>/dev/null | \
320
322
  grep -v '^##' | sed '/^$/d' | head -n 3 | tr '\n' ' ' | sed 's/ */ /g' | sed 's/^ *//;s/ *$//'
@@ -326,7 +328,7 @@ function extract_completed_tasks() {
326
328
  echo "Tareas completadas (ver commits)"
327
329
  return
328
330
  fi
329
-
331
+
330
332
  awk '/^## Tasks$/,/^## [^T]/' ".ai-flow/work/$TASK_FOLDER/work.md" 2>/dev/null | \
331
333
  grep '^\- \[x\]' | sed 's/^\- \[x\] /✅ /' | head -n 8
332
334
  }
@@ -334,14 +336,14 @@ function extract_completed_tasks() {
334
336
  # Categorize changed files
335
337
  function categorize_changed_files() {
336
338
  local all_files=$(git diff --name-only main..HEAD 2>/dev/null || git diff --name-only --staged)
337
-
339
+
338
340
  local backend_count=$(echo "$all_files" | grep -icE '(controller|service|repository|handler|route|api)' 2>/dev/null || echo 0)
339
341
  local frontend_count=$(echo "$all_files" | grep -icE '(component|view|page|screen|widget)' 2>/dev/null || echo 0)
340
342
  local db_count=$(echo "$all_files" | grep -icE '(migration|entity|model|schema|\.sql)' 2>/dev/null || echo 0)
341
343
  local test_count=$(echo "$all_files" | grep -icE '(test|spec|e2e)' 2>/dev/null || echo 0)
342
344
  local doc_count=$(echo "$all_files" | grep -icE '\.md$' 2>/dev/null || echo 0)
343
345
  local config_count=$(echo "$all_files" | grep -icE '(\.json|\.yaml|\.yml|\.env|docker|k8s)' 2>/dev/null || echo 0)
344
-
346
+
345
347
  cat <<EOF
346
348
  - Backend: $backend_count files
347
349
  - Frontend: $frontend_count files
@@ -355,7 +357,7 @@ EOF
355
357
  # Detect file purpose
356
358
  function detect_file_purpose() {
357
359
  local file=$1
358
-
360
+
359
361
  case "$file" in
360
362
  *controller*|*route*|*handler*) echo "API endpoint" ;;
361
363
  *service*|*repository*) echo "Business logic" ;;
@@ -370,7 +372,7 @@ function detect_file_purpose() {
370
372
  # Show top 3 files by impact
371
373
  function show_top_3_files_summary() {
372
374
  local top_files=$(git diff --stat main..HEAD 2>/dev/null | sort -rn -k3 | head -n 3 | awk '{print $1}')
373
-
375
+
374
376
  echo "### Most Impacted Files"
375
377
  for file in $top_files; do
376
378
  local lines_changed=$(git diff --stat main..HEAD -- "$file" 2>/dev/null | tail -n 1 | awk '{print $4}')
@@ -382,7 +384,7 @@ function show_top_3_files_summary() {
382
384
  # Detect deployment requirements
383
385
  function detect_deployment_requirements() {
384
386
  local changed_files=$(git diff --name-only main..HEAD 2>/dev/null || echo "")
385
-
387
+
386
388
  # Migrations (universal)
387
389
  HAS_MIGRATIONS=false
388
390
  MIGRATION_FILES=""
@@ -390,18 +392,18 @@ function detect_deployment_requirements() {
390
392
  HAS_MIGRATIONS=true
391
393
  MIGRATION_FILES=$(echo "$changed_files" | grep -iE '(migration|migrate)' | wc -l | tr -d ' ')
392
394
  fi
393
-
395
+
394
396
  # Environment variables (universal)
395
397
  NEW_ENV_VARS=""
396
398
  ENV_FILES=$(echo "$changed_files" | grep -iE '(\.env\.example|\.env\.template|\.env\.sample|env\.example|env\.sample)')
397
399
  if [ -n "$ENV_FILES" ]; then
398
400
  NEW_ENV_VARS=$(git diff main..HEAD -- $ENV_FILES 2>/dev/null | grep -E '^\+[A-Z_0-9]+=' | sed 's/^+//' | cut -d'=' -f1 | sort -u)
399
401
  fi
400
-
402
+
401
403
  # Dependencies (language-agnostic)
402
404
  HAS_NEW_DEPS=false
403
405
  INSTALL_CMD=""
404
-
406
+
405
407
  if echo "$changed_files" | grep -qE 'package\.json'; then
406
408
  HAS_NEW_DEPS=true
407
409
  INSTALL_CMD="npm install"
@@ -430,13 +432,13 @@ function detect_deployment_requirements() {
430
432
  HAS_NEW_DEPS=true
431
433
  INSTALL_CMD="gradle build"
432
434
  fi
433
-
435
+
434
436
  # Determine if showing deployment section
435
437
  SHOW_DEPLOYMENT_NOTES=false
436
438
  if [ "$HAS_MIGRATIONS" = "true" ] || [ -n "$NEW_ENV_VARS" ] || [ "$HAS_NEW_DEPS" = "true" ]; then
437
439
  SHOW_DEPLOYMENT_NOTES=true
438
440
  fi
439
-
441
+
440
442
  # Export variables
441
443
  export HAS_MIGRATIONS
442
444
  export MIGRATION_FILES
@@ -451,11 +453,11 @@ function detect_impact_area() {
451
453
  local changed_files=$(git diff --name-only main..HEAD 2>/dev/null || echo "")
452
454
  local area="General"
453
455
  local module=""
454
-
456
+
455
457
  # Backend API (framework-agnostic)
456
458
  if echo "$changed_files" | grep -qiE '(controller|service|repository|handler|route|api|endpoint)'; then
457
459
  area="Backend API"
458
-
460
+
459
461
  # Module by subdirectory or filename
460
462
  if echo "$changed_files" | grep -qiE '(auth|login|jwt|user|session)'; then
461
463
  module="Authentication"
@@ -466,11 +468,11 @@ function detect_impact_area() {
466
468
  elif echo "$changed_files" | grep -qiE '(report|analytics|dashboard)'; then
467
469
  module="Analytics"
468
470
  fi
469
-
471
+
470
472
  # Frontend (framework-agnostic)
471
473
  elif echo "$changed_files" | grep -qiE '(component|view|page|screen|widget|template)'; then
472
474
  area="Frontend"
473
-
475
+
474
476
  if echo "$changed_files" | grep -qiE '(auth|login)'; then
475
477
  module="Authentication UI"
476
478
  elif echo "$changed_files" | grep -qiE '(dashboard|home)'; then
@@ -478,30 +480,30 @@ function detect_impact_area() {
478
480
  elif echo "$changed_files" | grep -qiE '(profile|account|settings)'; then
479
481
  module="User Profile"
480
482
  fi
481
-
483
+
482
484
  # Mobile (agnostic: React Native, Flutter, Native)
483
485
  elif echo "$changed_files" | grep -qiE '(ios/|android/|mobile/|\.swift|\.kt|\.dart)'; then
484
486
  area="Mobile"
485
-
487
+
486
488
  # Database (agnostic)
487
489
  elif echo "$changed_files" | grep -qiE '(migration|schema|seed|model|entity|\.sql)'; then
488
490
  area="Database"
489
491
  module="Schema"
490
-
492
+
491
493
  # Infrastructure (agnostic)
492
494
  elif echo "$changed_files" | grep -qiE '(docker|k8s|kubernetes|terraform|ansible|\.yaml|\.yml|ci|cd|\.github|\.gitlab)'; then
493
495
  area="Infrastructure"
494
496
  module="DevOps"
495
-
497
+
496
498
  # Testing (agnostic)
497
499
  elif echo "$changed_files" | grep -qiE '(test|spec|\.test\.|\.spec\.|e2e|integration)'; then
498
500
  area="Testing"
499
-
501
+
500
502
  # Documentation
501
503
  elif echo "$changed_files" | grep -qiE '(\.md$|docs?/|README)'; then
502
504
  area="Documentation"
503
505
  fi
504
-
506
+
505
507
  # Final format
506
508
  if [ -n "$module" ]; then
507
509
  echo "$area - $module"
@@ -513,60 +515,60 @@ function detect_impact_area() {
513
515
  # Detect Git platform and generate commit URLs
514
516
  function get_commit_urls() {
515
517
  local remote_url=$(git config --get remote.origin.url 2>/dev/null)
516
-
518
+
517
519
  if [ -z "$remote_url" ]; then
518
520
  echo "⚠️ No se detectó remote origin, commits sin links"
519
521
  COMMIT_URL_PATTERN=""
520
522
  PLATFORM="Unknown"
521
523
  return 1
522
524
  fi
523
-
525
+
524
526
  # Normalize URL (SSH -> HTTPS)
525
527
  local base_url=""
526
-
528
+
527
529
  # GitHub
528
530
  if echo "$remote_url" | grep -qE 'github\.com'; then
529
531
  base_url=$(echo "$remote_url" | sed -E 's|git@github.com:(.*)|https://github.com/\1|' | sed 's|\.git$||')
530
532
  COMMIT_URL_PATTERN="${base_url}/commit/"
531
533
  PLATFORM="GitHub"
532
-
534
+
533
535
  # GitLab
534
536
  elif echo "$remote_url" | grep -qE 'gitlab\.com'; then
535
537
  base_url=$(echo "$remote_url" | sed -E 's|git@gitlab.com:(.*)|https://gitlab.com/\1|' | sed 's|\.git$||')
536
538
  COMMIT_URL_PATTERN="${base_url}/-/commit/"
537
539
  PLATFORM="GitLab"
538
-
540
+
539
541
  # Bitbucket
540
542
  elif echo "$remote_url" | grep -qE 'bitbucket\.org'; then
541
543
  base_url=$(echo "$remote_url" | sed -E 's|git@bitbucket.org:(.*)|https://bitbucket.org/\1|' | sed 's|\.git$||')
542
544
  COMMIT_URL_PATTERN="${base_url}/commits/"
543
545
  PLATFORM="Bitbucket"
544
-
546
+
545
547
  # Azure DevOps
546
548
  elif echo "$remote_url" | grep -qE 'dev\.azure\.com'; then
547
549
  base_url=$(echo "$remote_url" | sed -E 's|git@ssh\.dev\.azure\.com:v3/(.*)|https://dev.azure.com/\1|' | sed 's|\.git$||')
548
550
  COMMIT_URL_PATTERN="${base_url}/commit/"
549
551
  PLATFORM="Azure DevOps"
550
-
552
+
551
553
  # GitLab Self-Hosted
552
554
  elif echo "$remote_url" | grep -qE 'gitlab'; then
553
555
  base_url=$(echo "$remote_url" | sed -E 's|git@([^:]+):(.*)|https://\1/\2|' | sed 's|\.git$||')
554
556
  COMMIT_URL_PATTERN="${base_url}/-/commit/"
555
557
  PLATFORM="GitLab (Self-Hosted)"
556
-
558
+
557
559
  # GitHub Enterprise
558
560
  elif echo "$remote_url" | grep -qE 'github'; then
559
561
  base_url=$(echo "$remote_url" | sed -E 's|git@([^:]+):(.*)|https://\1/\2|' | sed 's|\.git$||')
560
562
  COMMIT_URL_PATTERN="${base_url}/commit/"
561
563
  PLATFORM="GitHub Enterprise"
562
-
564
+
563
565
  else
564
566
  echo "⚠️ Plataforma Git no reconocida, commits sin links"
565
567
  COMMIT_URL_PATTERN=""
566
568
  PLATFORM="Unknown"
567
569
  return 1
568
570
  fi
569
-
571
+
570
572
  echo "✅ Detectado: $PLATFORM"
571
573
  export COMMIT_URL_PATTERN
572
574
  export PLATFORM
@@ -577,26 +579,32 @@ function generate_commit_links() {
577
579
  local max_commits=${1:-5}
578
580
  local commits=$(git log main..HEAD --format="%h" -${max_commits} 2>/dev/null)
579
581
  local total_commits=$(git log main..HEAD --format="%h" 2>/dev/null | wc -l | tr -d ' ')
580
-
582
+
581
583
  # For summary line (first 5 hashes)
582
584
  COMMIT_HASHES_SUMMARY=""
583
585
  local count=0
584
-
586
+
585
587
  for hash in $commits; do
586
588
  if [ $count -lt 5 ]; then
587
589
  if [ -n "$COMMIT_HASHES_SUMMARY" ]; then
588
- COMMIT_HASHES_SUMMARY="${COMMIT_HASHES_SUMMARY}, "
590
+ COMMIT_HASHES_SUMMARY+=", "
589
591
  fi
590
-
592
+
591
593
  if [ -n "$COMMIT_URL_PATTERN" ]; then
592
- COMMIT_HASHES_SUMMARY="${COMMIT_HASHES_SUMMARY}[${hash}](${COMMIT_URL_PATTERN}${hash})"
594
+ # Build markdown link in parts to avoid VSCode link detection
595
+ COMMIT_HASHES_SUMMARY+="["
596
+ COMMIT_HASHES_SUMMARY+="$hash"
597
+ COMMIT_HASHES_SUMMARY+="]("
598
+ COMMIT_HASHES_SUMMARY+="$COMMIT_URL_PATTERN"
599
+ COMMIT_HASHES_SUMMARY+="$hash"
600
+ COMMIT_HASHES_SUMMARY+=")"
593
601
  else
594
- COMMIT_HASHES_SUMMARY="${COMMIT_HASHES_SUMMARY}\`${hash}\`"
602
+ COMMIT_HASHES_SUMMARY+='`'"${hash}"'`'
595
603
  fi
596
604
  fi
597
605
  count=$((count + 1))
598
606
  done
599
-
607
+
600
608
  # Add indicator if more commits
601
609
  if [ $total_commits -gt 5 ]; then
602
610
  COMMIT_HASHES_SUMMARY="${COMMIT_HASHES_SUMMARY}, ... (${total_commits} total)"
@@ -605,7 +613,7 @@ function generate_commit_links() {
605
613
  else
606
614
  COMMIT_HASHES_SUMMARY="No commits"
607
615
  fi
608
-
616
+
609
617
  export COMMIT_HASHES_SUMMARY
610
618
  export TOTAL_COMMITS=$total_commits
611
619
  }
@@ -656,13 +664,13 @@ cat > /tmp/ai-context-summary.md <<EOF
656
664
 
657
665
  ## Work Overview
658
666
  Objective: $WORK_OBJECTIVE
659
- Completed Tasks:
667
+ Completed Tasks:
660
668
  $WORK_TASKS
661
669
  Type: $TASK_TYPE
662
670
  Story Points: $STORY_POINTS
663
671
 
664
672
  ## Changes Made
665
- Commits (subjects only):
673
+ Commits (subjects only):
666
674
  $COMMIT_SUBJECTS
667
675
 
668
676
  Breaking Changes: $([ "$HAS_BREAKING_CHANGES" = true ] && echo "YES" || echo "NO")
@@ -700,7 +708,7 @@ Read the structured summary from `/tmp/ai-context-summary.md` (400-600 words) an
700
708
 
701
709
  **AI Prompt:**
702
710
 
703
- ```markdown
711
+ ```````markdown
704
712
  Genera dos descripciones profesionales (PR y Jira) basándote en este resumen estructurado:
705
713
 
706
714
  <context-summary>
@@ -745,6 +753,7 @@ Commit Hashes Summary: $COMMIT_HASHES_SUMMARY
745
753
  - Referencias con commits
746
754
 
747
755
  **Reglas Importantes:**
756
+
748
757
  - Usa lenguaje profesional pero claro
749
758
  - Sé específico con cambios técnicos
750
759
  - Usa los commit links ya formateados en $COMMIT_HASHES_SUMMARY
@@ -753,49 +762,50 @@ Commit Hashes Summary: $COMMIT_HASHES_SUMMARY
753
762
  - Si breaking changes, resáltalos con ⚠️ en sección Métricas
754
763
  - Si deployment notes, sé específico con cada requirement
755
764
 
756
- **Output en formato (CRÍTICO - respetar delimitadores):**
765
+ **Output Format:**
757
766
 
758
- \`\`\`markdown
759
- <!-- PR_DESCRIPTION_START -->
760
- [contenido completo de PR description aquí]
761
- <!-- PR_DESCRIPTION_END -->
767
+ IMPORTANTE: Responde directamente con este formato EXACTO usando 5 BACKTICKS (máxima robustez):
762
768
 
763
- <!-- JIRA_DESCRIPTION_START -->
764
- [contenido completo de Jira description aquí]
765
- <!-- JIRA_DESCRIPTION_END -->
766
- \`\`\`
769
+ ---
767
770
 
768
- Analiza el contexto y genera las descripciones óptimas ahora.
769
- ```
771
+ ## 📋 PULL REQUEST DESCRIPTION
770
772
 
771
- **After AI generates the descriptions, extract and save them:**
773
+ \`\`\`\`\`markdown
774
+ [Aquí va el contenido completo de la PR description, empezando con ## Refactor: ...]
775
+ \`\`\`\`\`
772
776
 
773
- ```bash
774
- # Extract PR description
775
- sed -n '/<!-- PR_DESCRIPTION_START -->/,/<!-- PR_DESCRIPTION_END -->/p' /tmp/ai-output.md | \
776
- sed '1d;$d' > /tmp/pr-description.md
777
+ ---
777
778
 
778
- # Extract Jira description
779
- sed -n '/<!-- JIRA_DESCRIPTION_START -->/,/<!-- JIRA_DESCRIPTION_END -->/p' /tmp/ai-output.md | \
780
- sed '1d;$d' > /tmp/jira-description.md
779
+ ## 🎫 JIRA DESCRIPTION
781
780
 
782
- # Display descriptions
783
- echo ""
784
- echo "---"
785
- echo "📋 DESCRIPCIÓN PARA PULL REQUEST (GitHub/GitLab/Bitbucket)"
786
- echo "---"
787
- echo ""
788
- cat /tmp/pr-description.md
789
- echo ""
790
- echo ""
791
- echo "---"
792
- echo "🎫 DESCRIPCIÓN PARA JIRA/CLICKUP/LINEAR (Markdown)"
793
- echo "---"
794
- echo ""
795
- cat /tmp/jira-description.md
796
- echo ""
781
+ \`\`\`\`\`markdown
782
+ [Aquí va el contenido completo de la Jira description, empezando con ## Refactor: ...]
783
+ \`\`\`\`\`
784
+
785
+ ---
786
+
787
+ **Listo para copiar y pegar** 📋 Solo copia el contenido dentro de los bloques de código markdown.
788
+
789
+ **CRÍTICO PARA EVITAR CONFLICTOS:**
790
+
791
+ 1. Los encabezados "## 📋 PULL REQUEST DESCRIPTION" y "## 🎫 JIRA DESCRIPTION" deben estar FUERA de los bloques de código
792
+ 2. USA EXACTAMENTE 5 BACKTICKS (\`\`\`\`\`) para abrir/cerrar cada bloque
793
+ 3. ¿Por qué 5? Cubre hasta 4 backticks internos (bloques anidados) sin conflictos
794
+ 4. Código normal usa 3 (```), nested usa 4 (````), entonces 5 (``````) es prácticamente imposible de conflictuar
795
+ 5. Más de 5 es innecesario (over-engineering)
796
+
797
+ Analiza el contexto y genera las descripciones óptimas ahora.
798
+ ```````
799
+
800
+ **After AI generates the descriptions:**
801
+
802
+ ```bash
803
+ # La IA responde directamente en el chat con las descripciones
804
+ # en formato markdown listo para copiar y pegar.
805
+ # No requiere post-procesamiento ni archivos temporales.
797
806
  echo ""
798
- echo "💡 Copia las descripciones de arriba para tus tickets"
807
+ echo "💡 Las descripciones han sido generadas arriba en el chat."
808
+ echo "📋 Copia directamente el contenido de los bloques markdown."
799
809
  echo ""
800
810
  ```
801
811
 
@@ -819,9 +829,9 @@ if [[ "$CONFIRM_PUSH" =~ ^[Yy]$ ]]; then
819
829
  echo ""
820
830
  echo "⬆️ Subiendo cambios a origin/$CURRENT_BRANCH..."
821
831
  echo ""
822
-
832
+
823
833
  git push origin "$CURRENT_BRANCH"
824
-
834
+
825
835
  if [ $? -eq 0 ]; then
826
836
  echo ""
827
837
  echo "---"
@@ -43,6 +43,15 @@ Parse OpenAPI backend specification and return structured analysis data that `fl
43
43
  2. Execute CRUD implementation with mobile-specific patterns (React Native)
44
44
  3. Ensure type-safety between mobile app and backend
45
45
 
46
+ **⚠️ CRITICAL: Do NOT create ANY files in this sub-prompt**
47
+
48
+ - This sub-prompt ONLY returns a JSON object (`OpenAPIAnalysisResult`)
49
+ - ❌ NO files like `.ai-flow/analysis/*.md` should be created
50
+ - ❌ NO files like `.ai-flow/work/[task]/analysis.md` should be created
51
+ - ❌ NO files like `.ai-flow/work/[task]/work.md` should be created here
52
+ - ✅ The parent prompt (`flow-work`) will create `.ai-flow/work/[task]/work.md` and `status.json` in Phase 2
53
+ - All analysis data is returned as JSON and will be embedded into `work.md` by the parent prompt
54
+
46
55
  ---
47
56
 
48
57
  ## Input Parameters
@@ -491,6 +491,15 @@ await saveToCache(cacheFile, {
491
491
  lastVerified: new Date().toISOString(),
492
492
  projectType: 'mobile',
493
493
  });
494
+
495
+ // ⚠️ IMPORTANT: Do NOT create SEPARATE ANALYSIS FILES
496
+ // The analysis result is stored in memory (workflow_context.analysis)
497
+ // and will be merged into work.md during Phase 2.
498
+ //
499
+ // ❌ DO NOT CREATE: .ai-flow/analysis/*.md or .ai-flow/work/[task]/analysis.md
500
+ // ✅ MUST CREATE (in Phase 2): .ai-flow/work/[task]/work.md and status.json
501
+ //
502
+ // The analysis data is included INSIDE work.md, not as a separate file.
494
503
  ```
495
504
 
496
505
  **IF `analysisResult.success === false`:**
@@ -875,7 +884,22 @@ Find similar features/patterns in codebase:
875
884
 
876
885
  **3. Generate work.md (Conditional)**
877
886
 
878
- **IF complexity == "MEDIUM":**
887
+ **⚠️ IMPORTANT: work.md is the ONLY planning file**
888
+
889
+ - All analysis results, API specs, field specifications, and implementation plan go into `work.md`
890
+ - Do NOT create separate files like `analysis.md`, `api-analysis.md`, or `spec.md`
891
+ - The `work.md` file should be comprehensive and self-contained
892
+ - For COMPLEX tasks with API analysis, include all OpenAPI metadata directly in `work.md`
893
+
894
+ **📦 For API_MODULE mode specifically:**
895
+
896
+ - The analysis from `flow-work-api.md` sub-prompt is stored in `workflow_context.analysis`
897
+ - ✅ YOU MUST CREATE `.ai-flow/work/api-[module-name]/work.md` with all analysis data embedded
898
+ - ✅ YOU MUST CREATE `.ai-flow/work/api-[module-name]/status.json` (API modules are COMPLEX)
899
+ - Include OpenAPI endpoints, schemas, field specs, validation rules, and relationships in `work.md`
900
+ - Do NOT skip file creation just because you received pre-analyzed data
901
+
902
+ **IF complexity == "MEDIUM":**\*\*\*\*
879
903
 
880
904
  - Create simplified `.ai-flow/work/[task-name]/work.md` (~15-20 lines)
881
905
  - Skip status.json
@@ -1,33 +1,46 @@
1
+ <!--
2
+ ⚠️ IMPORTANT: This is the ONLY planning/analysis file for this task
3
+ Do NOT create separate analysis.md, api-analysis.md, or spec.md files.
4
+ All analysis results, specifications, and implementation details belong here.
5
+ -->
6
+
1
7
  # [Type]: [Feature/Fix/Refactor Name]
2
8
 
3
9
  ## Context
10
+
4
11
  **Source**: HU-XXX-XXX | Roadmap X.X | Manual
5
12
  **SP**: X | **Branch**: [type]/[slug] | **Deps**: None | TXXX
6
13
 
7
14
  ## Objective
15
+
8
16
  [1-2 clear paragraphs describing WHAT will be implemented/fixed/refactored]
9
17
 
10
18
  ## Documentation Constraints
19
+
11
20
  **Read**: ai-instructions.md, architecture.md, code-standards.md
12
21
 
13
22
  **Key Rules**:
23
+
14
24
  - ✅ ALWAYS: [List specific rules that apply]
15
25
  - ❌ NEVER: [List specific prohibitions]
16
26
  - 📐 Pattern: [Architectural pattern from docs]
17
27
  - 📁 Location: [File structure from architecture.md]
18
28
 
19
29
  ## Approach
30
+
20
31
  **Layer**: [Data | Business Logic | API | UI]
21
32
  **Files**: [List files to create/modify]
22
33
  **Reference**: [Existing file to follow as pattern]
23
34
 
24
35
  **Phases**:
36
+
25
37
  1. [Phase 1 description]
26
38
  2. [Phase 2 description]
27
39
  3. [Phase 3 description]
28
40
  4. [Phase 4 description]
29
41
 
30
42
  ## Tasks
43
+
31
44
  **Source**: [Manual | Roadmap X.X (expanded) | HU-XXX-XXX (expanded) | HU-XXX-XXX (reference)]
32
45
 
33
46
  - [ ] T001 [D] [Description] → [file path] • X SP
@@ -40,6 +53,7 @@
40
53
  - [Pattern/constraint details] (deps: T002)
41
54
 
42
55
  ## Validation
56
+
43
57
  - [ ] All NEVER/ALWAYS rules followed
44
58
  - [ ] Tests pass (coverage per docs/testing.md)
45
59
  - [ ] No hardcoded secrets