aline-ai 0.6.2__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/METADATA +1 -1
  2. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/RECORD +38 -37
  3. realign/__init__.py +1 -1
  4. realign/adapters/__init__.py +0 -3
  5. realign/adapters/codex.py +14 -9
  6. realign/cli.py +42 -236
  7. realign/codex_detector.py +72 -32
  8. realign/codex_home.py +85 -0
  9. realign/codex_terminal_linker.py +172 -0
  10. realign/commands/__init__.py +2 -2
  11. realign/commands/add.py +89 -9
  12. realign/commands/doctor.py +495 -0
  13. realign/commands/export_shares.py +154 -226
  14. realign/commands/init.py +66 -4
  15. realign/commands/watcher.py +30 -80
  16. realign/config.py +9 -46
  17. realign/dashboard/app.py +7 -11
  18. realign/dashboard/screens/event_detail.py +0 -3
  19. realign/dashboard/screens/session_detail.py +0 -1
  20. realign/dashboard/tmux_manager.py +129 -4
  21. realign/dashboard/widgets/config_panel.py +175 -241
  22. realign/dashboard/widgets/events_table.py +71 -128
  23. realign/dashboard/widgets/sessions_table.py +77 -136
  24. realign/dashboard/widgets/terminal_panel.py +349 -27
  25. realign/dashboard/widgets/watcher_panel.py +0 -2
  26. realign/db/sqlite_db.py +77 -2
  27. realign/events/event_summarizer.py +76 -35
  28. realign/events/session_summarizer.py +73 -32
  29. realign/hooks.py +334 -647
  30. realign/llm_client.py +201 -520
  31. realign/triggers/__init__.py +0 -2
  32. realign/triggers/next_turn_trigger.py +4 -5
  33. realign/triggers/registry.py +1 -4
  34. realign/watcher_core.py +53 -35
  35. realign/adapters/antigravity.py +0 -159
  36. realign/triggers/antigravity_trigger.py +0 -140
  37. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/WHEEL +0 -0
  38. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/entry_points.txt +0 -0
  39. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/licenses/LICENSE +0 -0
  40. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/top_level.txt +0 -0
realign/hooks.py CHANGED
@@ -21,7 +21,7 @@ from .config import ReAlignConfig
21
21
  from .adapters import get_adapter_registry
22
22
  from .claude_detector import find_claude_sessions_dir
23
23
  from .logging_config import setup_logger
24
- from .llm_client import call_llm, call_llm_json, extract_json
24
+ from .llm_client import extract_json, call_llm_cloud
25
25
 
26
26
  try:
27
27
  from .redactor import check_and_redact_session, save_original_session
@@ -73,59 +73,6 @@ def _emit_llm_debug(
73
73
  logger.debug("LLM debug callback failed for payload=%s", payload, exc_info=True)
74
74
 
75
75
 
76
- def _invoke_llm(
77
- *,
78
- provider: str,
79
- system_prompt: str,
80
- user_prompt: str,
81
- debug_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
82
- purpose: str = "summary",
83
- silent: bool = False,
84
- ) -> Tuple[Optional[str], Optional[str]]:
85
- """
86
- [Deprecated] Wrapper around the new unified call_llm() function.
87
- Kept for backwards compatibility.
88
- """
89
- return call_llm(
90
- system_prompt=system_prompt,
91
- user_prompt=user_prompt,
92
- provider=provider,
93
- debug_callback=debug_callback,
94
- purpose=purpose,
95
- json_mode=True, # Original function always used JSON mode for OpenAI
96
- silent=silent,
97
- )
98
-
99
-
100
- def _extract_json_object(response_text: str) -> Dict[str, Any]:
101
- """
102
- [Deprecated] Wrapper around the new unified extract_json() function.
103
- Kept for backwards compatibility.
104
- """
105
- return extract_json(response_text)
106
-
107
-
108
- def invoke_llm_json_object(
109
- *,
110
- provider: str,
111
- system_prompt: str,
112
- user_prompt: str,
113
- purpose: str = "generic",
114
- debug_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
115
- ) -> Tuple[Optional[str], Optional[Dict[str, Any]]]:
116
- """
117
- [Deprecated] Wrapper around the new unified call_llm_json() function.
118
- Kept for backwards compatibility.
119
- """
120
- return call_llm_json(
121
- system_prompt=system_prompt,
122
- user_prompt=user_prompt,
123
- provider=provider,
124
- debug_callback=debug_callback,
125
- purpose=purpose,
126
- )
127
-
128
-
129
76
  def _normalize_if_last_task(raw_value: Any) -> str:
130
77
  """Normalize if_last_task values from LLM output."""
131
78
  if isinstance(raw_value, bool):
@@ -210,102 +157,93 @@ def _classify_task_metadata(
210
157
  ) -> Tuple[str, str]:
211
158
  """
212
159
  Run a dedicated LLM classification pass for if_last_task and satisfaction tags.
213
- """
214
- defaults = ("no", "fine")
215
-
216
- if not (user_messages or assistant_replies or code_changes):
217
- return defaults
218
-
219
- def _get_metadata_prompt() -> str:
220
- global _METADATA_PROMPT_CACHE
221
- if system_prompt is not None:
222
- return system_prompt
223
- if _METADATA_PROMPT_CACHE is not None:
224
- return _METADATA_PROMPT_CACHE
225
-
226
- # Try user-customized prompt first (~/.aline/prompts/metadata.md)
227
- user_prompt_path = Path.home() / ".aline" / "prompts" / "metadata.md"
228
- try:
229
- if user_prompt_path.exists():
230
- text = user_prompt_path.read_text(encoding="utf-8").strip()
231
- if text:
232
- _METADATA_PROMPT_CACHE = text
233
- logger.debug(f"Loaded user-customized metadata prompt from {user_prompt_path}")
234
- return text
235
- except Exception:
236
- logger.debug(
237
- "Failed to load user-customized metadata prompt, falling back", exc_info=True
238
- )
239
-
240
- # Fall back to built-in prompt (tools/commit_message_prompts/metadata_default.md)
241
- candidate = (
242
- Path(__file__).resolve().parents[2]
243
- / "tools"
244
- / "commit_message_prompts"
245
- / "metadata_default.md"
246
- )
247
- try:
248
- text = candidate.read_text(encoding="utf-8").strip()
249
- if text:
250
- _METADATA_PROMPT_CACHE = text
251
- return text
252
- except Exception:
253
- logger.debug("Falling back to built-in metadata prompt", exc_info=True)
254
- _METADATA_PROMPT_CACHE = DEFAULT_METADATA_PROMPT_TEXT
255
- return _METADATA_PROMPT_CACHE
256
-
257
- classification_system_prompt = _get_metadata_prompt()
258
160
 
259
- def _clip_text(text: str, limit: int) -> str:
260
- text = (text or "").strip()
261
- if not text:
262
- return ""
263
- if len(text) <= limit:
264
- return text
265
- return text[: max(0, limit - 3)].rstrip() + "..."
266
-
267
- clipped_user = _clip_text(user_messages, 2000) or "(missing)"
268
- current_title = (summary_title or "").strip() or "(missing)"
269
- previous_title = (previous_commit_title or "").strip() or "(none)"
270
-
271
- prompt_parts: List[str] = [
272
- f"Previous commit title: {previous_title}",
273
- "User request:\n" + clipped_user,
274
- f"Current commit title: {current_title}",
275
- 'Return strict JSON with exactly these fields:\n{"if_last_task": "yes|no", "satisfaction": "good|fine|bad"}',
276
- ]
277
- user_prompt = "\n\n".join(prompt_parts)
278
-
279
- model_name, response_text = _invoke_llm(
280
- provider=provider,
281
- system_prompt=classification_system_prompt,
282
- user_prompt=user_prompt,
283
- debug_callback=debug_callback,
284
- purpose="metadata",
285
- )
286
- if not response_text:
287
- return defaults
288
-
289
- try:
290
- metadata = _extract_json_object(response_text)
291
- except json.JSONDecodeError as exc:
292
- logger.warning("Failed to parse metadata JSON: %s", exc)
293
- logger.debug("Raw metadata response: %s", response_text)
294
- return defaults
295
-
296
- if_last_task = _normalize_if_last_task(metadata.get("if_last_task"))
297
- satisfaction = _normalize_satisfaction(metadata.get("satisfaction"))
298
- logger.info("LLM metadata response: %s", json.dumps(metadata, ensure_ascii=False))
299
- print(
300
- f" 🔍 Metadata classification: if_last_task={metadata.get('if_last_task')}→{if_last_task}, "
301
- f"satisfaction={metadata.get('satisfaction')}→{satisfaction}",
302
- file=sys.stderr,
303
- )
304
- if model_name:
305
- print(f" ✅ LLM metadata classification successful ({model_name})", file=sys.stderr)
306
- else:
307
- print(" ✅ LLM metadata classification successful", file=sys.stderr)
308
- return if_last_task, satisfaction
161
+ NOTE: LLM-based metadata classification is disabled. Always returns defaults.
162
+ """
163
+ # Metadata LLM classification disabled - always return defaults
164
+ return ("no", "fine")
165
+
166
+ # =========================================================================
167
+ # LOCAL LLM FALLBACK DISABLED - Code kept for reference
168
+ # =========================================================================
169
+ # def _get_metadata_prompt() -> str:
170
+ # global _METADATA_PROMPT_CACHE
171
+ # if system_prompt is not None:
172
+ # return system_prompt
173
+ # if _METADATA_PROMPT_CACHE is not None:
174
+ # return _METADATA_PROMPT_CACHE
175
+ #
176
+ # # Try user-customized prompt first (~/.aline/prompts/metadata.md)
177
+ # user_prompt_path = Path.home() / ".aline" / "prompts" / "metadata.md"
178
+ # try:
179
+ # if user_prompt_path.exists():
180
+ # text = user_prompt_path.read_text(encoding="utf-8").strip()
181
+ # if text:
182
+ # _METADATA_PROMPT_CACHE = text
183
+ # logger.debug(f"Loaded user-customized metadata prompt from {user_prompt_path}")
184
+ # return text
185
+ # except Exception:
186
+ # logger.debug(
187
+ # "Failed to load user-customized metadata prompt, falling back", exc_info=True
188
+ # )
189
+ #
190
+ # # Fall back to built-in prompt (tools/commit_message_prompts/metadata_default.md)
191
+ # candidate = (
192
+ # Path(__file__).resolve().parents[2]
193
+ # / "tools"
194
+ # / "commit_message_prompts"
195
+ # / "metadata_default.md"
196
+ # )
197
+ # try:
198
+ # text = candidate.read_text(encoding="utf-8").strip()
199
+ # if text:
200
+ # _METADATA_PROMPT_CACHE = text
201
+ # return text
202
+ # except Exception:
203
+ # logger.debug("Falling back to built-in metadata prompt", exc_info=True)
204
+ # _METADATA_PROMPT_CACHE = DEFAULT_METADATA_PROMPT_TEXT
205
+ # return _METADATA_PROMPT_CACHE
206
+ #
207
+ # classification_system_prompt = _get_metadata_prompt()
208
+ #
209
+ # prompt_parts: List[str] = [
210
+ # f"Previous commit title: {previous_title}",
211
+ # "User request:\n" + clipped_user,
212
+ # f"Current commit title: {current_title}",
213
+ # 'Return strict JSON with exactly these fields:\n{"if_last_task": "yes|no", "satisfaction": "good|fine|bad"}',
214
+ # ]
215
+ # user_prompt = "\n\n".join(prompt_parts)
216
+ #
217
+ # model_name, response_text = _invoke_llm(
218
+ # provider=provider,
219
+ # system_prompt=classification_system_prompt,
220
+ # user_prompt=user_prompt,
221
+ # debug_callback=debug_callback,
222
+ # purpose="metadata",
223
+ # )
224
+ # if not response_text:
225
+ # return defaults
226
+ #
227
+ # try:
228
+ # metadata = _extract_json_object(response_text)
229
+ # except json.JSONDecodeError as exc:
230
+ # logger.warning("Failed to parse metadata JSON: %s", exc)
231
+ # logger.debug("Raw metadata response: %s", response_text)
232
+ # return defaults
233
+ #
234
+ # if_last_task = _normalize_if_last_task(metadata.get("if_last_task"))
235
+ # satisfaction = _normalize_satisfaction(metadata.get("satisfaction"))
236
+ # logger.info("LLM metadata response: %s", json.dumps(metadata, ensure_ascii=False))
237
+ # print(
238
+ # f" 🔍 Metadata classification: if_last_task={metadata.get('if_last_task')}→{if_last_task}, "
239
+ # f"satisfaction={metadata.get('satisfaction')}→{satisfaction}",
240
+ # file=sys.stderr,
241
+ # )
242
+ # if model_name:
243
+ # print(f" ✅ LLM metadata classification successful ({model_name})", file=sys.stderr)
244
+ # else:
245
+ # print(" ✅ LLM metadata classification successful", file=sys.stderr)
246
+ # return if_last_task, satisfaction
309
247
 
310
248
 
311
249
  # ============================================================================
@@ -443,63 +381,10 @@ def find_codex_latest_session(project_path: Path, days_back: int = 7) -> Optiona
443
381
  Returns:
444
382
  Path to the most recent session file, or None if not found
445
383
  """
446
- from datetime import datetime, timedelta
384
+ from .codex_detector import get_latest_codex_session
447
385
 
448
386
  logger.debug(f"Searching for Codex sessions for project: {project_path}")
449
-
450
- codex_sessions_base = Path.home() / ".codex" / "sessions"
451
-
452
- if not codex_sessions_base.exists():
453
- logger.debug(f"Codex sessions directory not found: {codex_sessions_base}")
454
- return None
455
-
456
- # Normalize project path for comparison
457
- abs_project_path = str(project_path.resolve())
458
-
459
- matching_sessions = []
460
-
461
- # Search through recent days
462
- for days_ago in range(days_back + 1):
463
- target_date = datetime.now() - timedelta(days=days_ago)
464
- date_path = (
465
- codex_sessions_base
466
- / str(target_date.year)
467
- / f"{target_date.month:02d}"
468
- / f"{target_date.day:02d}"
469
- )
470
-
471
- if not date_path.exists():
472
- continue
473
-
474
- # Check all session files in this date directory
475
- for session_file in date_path.glob("rollout-*.jsonl"):
476
- try:
477
- # Read first line to get session metadata
478
- with open(session_file, "r", encoding="utf-8") as f:
479
- first_line = f.readline()
480
- if first_line:
481
- data = json.loads(first_line)
482
- if data.get("type") == "session_meta":
483
- session_cwd = data.get("payload", {}).get("cwd", "")
484
- # Match the project path
485
- if session_cwd == abs_project_path:
486
- matching_sessions.append(session_file)
487
- logger.debug(f"Found matching Codex session: {session_file}")
488
- except (json.JSONDecodeError, IOError) as e:
489
- logger.debug(f"Skipping malformed session file {session_file}: {e}")
490
- continue
491
-
492
- # Sort by modification time, newest first
493
- matching_sessions.sort(key=lambda p: p.stat().st_mtime, reverse=True)
494
-
495
- if matching_sessions:
496
- logger.info(
497
- f"Found {len(matching_sessions)} Codex session(s), using latest: {matching_sessions[0]}"
498
- )
499
- else:
500
- logger.debug("No matching Codex sessions found")
501
-
502
- return matching_sessions[0] if matching_sessions else None
387
+ return get_latest_codex_session(project_path, days_back=days_back)
503
388
 
504
389
 
505
390
  def find_all_claude_sessions() -> List[Path]:
@@ -527,18 +412,6 @@ def find_all_codex_sessions(days_back: int = 1) -> List[Path]:
527
412
  return []
528
413
 
529
414
 
530
- def find_all_antigravity_sessions() -> List[Path]:
531
- """
532
- Find all active Antigravity IDE sessions.
533
-
534
- (Legacy wrapper for AntigravityAdapter)
535
- """
536
- adapter = get_adapter_registry().get_adapter("antigravity")
537
- if adapter:
538
- return adapter.discover_sessions()
539
- return []
540
-
541
-
542
415
  def find_all_gemini_cli_sessions() -> List[Path]:
543
416
  """
544
417
  Find all active Gemini CLI sessions.
@@ -578,8 +451,6 @@ def find_all_active_sessions(
578
451
  enabled_adapters.append("codex")
579
452
  if config.auto_detect_gemini:
580
453
  enabled_adapters.append("gemini")
581
- if config.auto_detect_antigravity:
582
- enabled_adapters.append("antigravity")
583
454
 
584
455
  for name in enabled_adapters:
585
456
  adapter = registry.get_adapter(name)
@@ -653,242 +524,6 @@ def find_latest_session(history_path: Path, explicit_path: Optional[str] = None)
653
524
  return max(session_files, key=lambda p: p.stat().st_mtime)
654
525
 
655
526
 
656
- def _is_antigravity_content(content: str) -> bool:
657
- """
658
- Detect if content is from Antigravity IDE (Markdown artifacts format).
659
-
660
- Antigravity content is identified by the presence of section markers
661
- like "--- task.md ---", "--- walkthrough.md ---", or "--- implementation_plan.md ---".
662
-
663
- Args:
664
- content: The content string to check
665
-
666
- Returns:
667
- True if this is Antigravity Markdown content, False otherwise
668
- """
669
- if not content:
670
- return False
671
-
672
- antigravity_markers = [
673
- "--- task.md ---",
674
- "--- walkthrough.md ---",
675
- "--- implementation_plan.md ---",
676
- ]
677
-
678
- for marker in antigravity_markers:
679
- if marker in content:
680
- return True
681
-
682
- return False
683
-
684
-
685
- def _parse_antigravity_sections(content: str) -> Dict[str, str]:
686
- """
687
- Parse Antigravity content into separate sections.
688
-
689
- Args:
690
- content: Combined content with section markers like "--- task.md ---"
691
-
692
- Returns:
693
- Dictionary mapping filename (without .md) to content
694
- """
695
- sections = {}
696
-
697
- # Split by section markers
698
- import re
699
-
700
- pattern = r"--- (task\.md|walkthrough\.md|implementation_plan\.md) ---\n?"
701
- parts = re.split(pattern, content)
702
-
703
- # parts will be: ['', 'task.md', '<content>', 'walkthrough.md', '<content>', ...]
704
- i = 1
705
- while i < len(parts) - 1:
706
- filename = parts[i].replace(".md", "")
707
- section_content = parts[i + 1].strip()
708
- if section_content:
709
- sections[filename] = section_content
710
- i += 2
711
-
712
- return sections
713
-
714
-
715
- def _generate_antigravity_summary(
716
- full_content: str,
717
- turn_status: str,
718
- provider: str = "auto",
719
- system_prompt: Optional[str] = None,
720
- debug_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
721
- metadata_system_prompt: Optional[str] = None,
722
- previous_commit_title: Optional[str] = None,
723
- previous_records: Optional[list] = None,
724
- ) -> Tuple[Optional[str], Optional[str], Optional[str], str, str]:
725
- """
726
- Generate summary for Antigravity IDE sessions using Markdown artifacts directly.
727
-
728
- Unlike JSONL-based sessions, Antigravity uses task.md, walkthrough.md, and
729
- implementation_plan.md files. This function builds a structured prompt from
730
- these Markdown files.
731
-
732
- Args:
733
- full_content: Combined MD content with section markers
734
- turn_status: Status of the turn (completed, etc.)
735
- provider: LLM provider to use
736
- system_prompt: Optional custom system prompt
737
- debug_callback: Optional debug callback
738
- metadata_system_prompt: Optional metadata system prompt
739
- previous_commit_title: Previous commit title for context
740
- previous_records: Previous commit records for context
741
-
742
- Returns:
743
- Tuple of (title, model_name, description, if_last_task, satisfaction)
744
- """
745
- # Parse sections from combined content
746
- sections = _parse_antigravity_sections(full_content)
747
-
748
- # Build structured user prompt with labeled sections
749
- prompt_parts = ["Summarize this Antigravity IDE session:\n"]
750
-
751
- # Task description (from task.md)
752
- if "task" in sections:
753
- prompt_parts.append(f"任务描述 (Task description):\n{sections['task']}\n")
754
-
755
- # Implementation plan (from implementation_plan.md)
756
- if "implementation_plan" in sections:
757
- prompt_parts.append(f"实现计划 (Implementation plan):\n{sections['implementation_plan']}\n")
758
-
759
- # Work completed (from walkthrough.md)
760
- if "walkthrough" in sections:
761
- prompt_parts.append(f"完成的工作 (Work completed):\n{sections['walkthrough']}\n")
762
-
763
- # Add turn status
764
- prompt_parts.append(f"\nTurn status: {turn_status or 'completed'}")
765
-
766
- # Add previous records context
767
- if previous_records is not None:
768
- if len(previous_records) > 0:
769
- records_text = "\n".join(f"- {rec}" for rec in previous_records[-5:])
770
- prompt_parts.append(f"\nLast {len(previous_records[-5:])} records:\n{records_text}")
771
- else:
772
- prompt_parts.append("\nNo previous record")
773
-
774
- user_prompt = "\n\n".join(prompt_parts)
775
-
776
- # Load system prompt if not provided
777
- system_prompt_to_use = system_prompt
778
- if system_prompt_to_use is None:
779
- global _COMMIT_MESSAGE_PROMPT_CACHE
780
- if _COMMIT_MESSAGE_PROMPT_CACHE is not None:
781
- system_prompt_to_use = _COMMIT_MESSAGE_PROMPT_CACHE
782
- else:
783
- candidate = (
784
- Path(__file__).resolve().parents[2]
785
- / "tools"
786
- / "commit_message_prompts"
787
- / "default.md"
788
- )
789
- try:
790
- text = candidate.read_text(encoding="utf-8").strip()
791
- if text:
792
- _COMMIT_MESSAGE_PROMPT_CACHE = text
793
- system_prompt_to_use = text
794
- except Exception:
795
- pass
796
-
797
- if system_prompt_to_use is None:
798
- # Fallback to built-in prompt
799
- system_prompt_to_use = """You are a progress record generator.
800
-
801
- Your job is to use all previous records and turn this turn's agent work into a follow-up progress record that continues from what has already been written, instead of starting a new, standalone record.
802
-
803
- Each record should read like one more sentence or paragraph added to an ongoing note, not a fresh entry. Write as if the reader has already read all previous records. Do not repeat wording, structure, or phrasing used before. Focus only on what is new, different, or newly discovered in this turn
804
-
805
- Follow this order:
806
- 1. Start with the agent's understanding of the user's request based on the previous records.
807
- 2. Add only the new information introduced this turn
808
- 3. End with what the agent did this time
809
- 4. Do not write generic outcome claims such as "now it's properly implemented" or "it fits user expectations." Ending with factual details of what the agent did is enough.
810
-
811
- Critical rule (this is the core)
812
- - Reject all fluff. Describe only concrete facts. The shorter the better. Maximize signal-to-noise. Keep the language sharp and focused.
813
- - Based on the given previous records, write the follow-up record as if you are adding one more line to the existing records
814
-
815
- Generate ONLY the structured summary (metadata is handled separately). Respond with JSON:
816
- {
817
- "record": "A follow-up progress record. Typically 50-100 words, but fewer words are acceptable if sufficient."
818
- }
819
-
820
- Return JSON only, no other text."""
821
- _COMMIT_MESSAGE_PROMPT_CACHE = system_prompt_to_use
822
-
823
- # Emit debug info
824
- _emit_llm_debug(
825
- debug_callback,
826
- {
827
- "event": "turn_context",
828
- "mode": "antigravity_markdown",
829
- "turn_status": turn_status or "unknown",
830
- "sections_found": list(sections.keys()),
831
- "total_content_length": len(full_content),
832
- },
833
- )
834
-
835
- # Call LLM
836
- model_name, response_text = _invoke_llm(
837
- provider=provider,
838
- system_prompt=system_prompt_to_use,
839
- user_prompt=user_prompt,
840
- purpose="summary",
841
- debug_callback=debug_callback,
842
- )
843
-
844
- if not response_text:
845
- return None, model_name, None, "no", "fine"
846
-
847
- # Parse response
848
- try:
849
- summary_data = _extract_json_object(response_text)
850
-
851
- # Try "record" field first (new format)
852
- record = (summary_data.get("record") or "").strip()
853
-
854
- # Fallback to title + description (old format)
855
- if not record:
856
- title = (summary_data.get("title") or "").strip()
857
- description = summary_data.get("description") or ""
858
- if title:
859
- record = title
860
- else:
861
- title = record
862
- description = summary_data.get("description") or ""
863
-
864
- if not record or len(record) < 2:
865
- raise json.JSONDecodeError("Record validation failed", response_text, 0)
866
-
867
- except json.JSONDecodeError:
868
- # Fallback: use first line
869
- first_line = response_text.split("\n")[0][:150].strip()
870
- return first_line, model_name, "", "no", "fine"
871
-
872
- # Classify metadata (if_last_task, satisfaction)
873
- # For Antigravity, extract a summary from sections for classification
874
- user_summary = sections.get("task", "")[:500]
875
- assistant_summary = sections.get("walkthrough", "")[:500]
876
-
877
- if_last_task, satisfaction = _classify_task_metadata(
878
- provider=provider,
879
- user_messages=user_summary,
880
- assistant_replies=assistant_summary,
881
- code_changes="",
882
- summary_title=title,
883
- summary_description=description,
884
- previous_commit_title=previous_commit_title,
885
- debug_callback=debug_callback,
886
- system_prompt=metadata_system_prompt,
887
- )
888
-
889
- return title or record, model_name, description, if_last_task, satisfaction
890
-
891
-
892
527
  def filter_session_content(content: str) -> Tuple[str, str, str]:
893
528
  """
894
529
  Filter session content to extract meaningful information for LLM summarization.
@@ -1167,189 +802,254 @@ def generate_summary_with_llm(
1167
802
  "fine",
1168
803
  )
1169
804
 
1170
- # Try to load system prompt from default.md file
1171
- def _get_commit_message_prompt() -> str:
1172
- global _COMMIT_MESSAGE_PROMPT_CACHE
1173
- if system_prompt is not None:
1174
- return system_prompt
1175
- if _COMMIT_MESSAGE_PROMPT_CACHE is not None:
1176
- return _COMMIT_MESSAGE_PROMPT_CACHE
1177
-
1178
- # Try user-customized prompt first (~/.aline/prompts/commit_message.md)
1179
- user_prompt_path = Path.home() / ".aline" / "prompts" / "commit_message.md"
805
+ # Try cloud provider first if provider is "auto" or "cloud" and user is logged in
806
+ if provider in ("auto", "cloud"):
1180
807
  try:
1181
- if user_prompt_path.exists():
1182
- text = user_prompt_path.read_text(encoding="utf-8").strip()
1183
- if text:
1184
- _COMMIT_MESSAGE_PROMPT_CACHE = text
1185
- logger.debug(
1186
- f"Loaded user-customized commit message prompt from {user_prompt_path}"
1187
- )
1188
- return text
1189
- except Exception:
1190
- logger.debug(
1191
- "Failed to load user-customized commit message prompt, falling back", exc_info=True
1192
- )
1193
-
1194
- # Fall back to built-in prompt (tools/commit_message_prompts/default.md)
1195
- candidate = (
1196
- Path(__file__).resolve().parents[2] / "tools" / "commit_message_prompts" / "default.md"
1197
- )
1198
- try:
1199
- text = candidate.read_text(encoding="utf-8").strip()
1200
- if text:
1201
- _COMMIT_MESSAGE_PROMPT_CACHE = text
1202
- logger.debug(f"Loaded commit message prompt from {candidate}")
1203
- return text
1204
- except Exception:
1205
- logger.debug("Falling back to built-in commit message prompt", exc_info=True)
1206
-
1207
- # Fallback to built-in prompt
1208
- default_system_prompt = """You are a git commit message generator for AI chat sessions.
1209
- You will receive content for ONE dialogue turn (user request, assistant recap, optional recent commit context).
1210
-
1211
- Guidelines:
1212
- - Prefer the assistant recap for factual details.
1213
- - If the assistant recap includes "Turn status: ...", mirror it exactly in the description's first line.
1214
- - Keep continuity with any provided recent commit context, but avoid repeating unchanged background.
1215
-
1216
- Return JSON with EXACTLY two fields:
1217
- {
1218
- "title": "One-line summary (imperative mood, 25-60 chars preferred, max 80).",
1219
- "description": "Status line + 3-7 concise bullets describing what changed in THIS turn."
1220
- }
1221
-
1222
- Rules for title:
1223
- - Imperative, concrete, no vague fillers like "Update session".
1224
- - Mention if the turn continues work (e.g., "Continue fixing ...") or is blocked.
1225
-
1226
- Rules for description:
1227
- - First line MUST be "Status: <completed|user_interrupted|rate_limited|compacted|unknown>".
1228
- - Follow with short "- " bullets explaining WHAT changed and WHY it matters.
1229
- - Include concrete technical anchors (files, functions) when available.
1230
- - If continuing prior work, dedicate one bullet to explain the relationship.
1231
-
1232
- Respond with JSON only."""
1233
- _COMMIT_MESSAGE_PROMPT_CACHE = default_system_prompt
1234
- return _COMMIT_MESSAGE_PROMPT_CACHE
1235
-
1236
- system_prompt_to_use = _get_commit_message_prompt()
1237
-
1238
- user_prompt_parts = ["Summarize this AI chat session:\n"]
1239
- if user_messages:
1240
- user_prompt_parts.append(f"User requests:\n{user_messages[:4000]}\n")
1241
- if assistant_replies:
1242
- user_prompt_parts.append(f"Assistant recap / responses:\n{assistant_replies[:8000]}\n")
1243
- # Note: code_changes are excluded from LLM input per user preference
1244
- # if code_changes:
1245
- # user_prompt_parts.append(f"Code changes:\n{code_changes[:4000]}\n")
1246
- # The output format instruction is now in the system prompt (default.md)
1247
- # user_prompt_parts.append("\nReturn JSON with exactly two fields: title and description. No other text.")
1248
- user_prompt = "\n".join(user_prompt_parts)
1249
-
1250
- model_name, response_text = _invoke_llm(
1251
- provider=provider,
1252
- system_prompt=system_prompt_to_use,
1253
- user_prompt=user_prompt,
1254
- debug_callback=debug_callback,
1255
- purpose="summary",
1256
- )
1257
- if not response_text:
1258
- return None, None, None, None, None
1259
-
1260
- try:
1261
- summary_data = _extract_json_object(response_text)
1262
- # New format: single "record" field instead of title+description
1263
- record = (summary_data.get("record") or "").strip()
1264
-
1265
- # Fallback to old format for backwards compatibility
1266
- if not record:
1267
- title = (summary_data.get("title") or "").strip()
1268
- description = summary_data.get("description") or ""
1269
- if title:
1270
- record = title # Use title as record if present
1271
- else:
1272
- title = record # Use record as title
1273
- description = ""
1274
-
1275
- if not record or len(record) < 2:
1276
- raise json.JSONDecodeError("Record validation failed", response_text, 0)
1277
- except json.JSONDecodeError as exc:
1278
- # Construct detailed error information for debugging
1279
- error_type = type(exc).__name__
1280
- error_msg = str(exc)
1281
-
1282
- logger.warning("Failed to parse JSON from LLM summary: %s", exc)
1283
- logger.debug("Raw summary response: %s", response_text)
1284
-
1285
- # Try to extract partial information from the broken JSON
1286
- import re
1287
-
1288
- record_match = re.search(r'"(?:record|title)"\s*:\s*"([^"]{10,})"', response_text)
1289
- extracted_content = record_match.group(1)[:80] if record_match else None
1290
-
1291
- # Construct informative error title and description
1292
- if "control character" in error_msg.lower():
1293
- error_title = "⚠ JSON Parse Error: Invalid control character"
1294
- error_detail = f"LLM response contained unescaped control characters. Error at {error_msg.split('at:')[-1].strip() if 'at:' in error_msg else 'unknown position'}"
1295
- elif "expecting" in error_msg.lower():
1296
- error_title = "⚠ JSON Parse Error: Malformed JSON"
1297
- error_detail = f"LLM response had invalid JSON syntax: {error_msg[:100]}"
1298
- elif "Record validation failed" in error_msg:
1299
- error_title = "⚠ LLM Error: Empty or invalid record"
1300
- error_detail = "LLM returned JSON but record/title field was empty or too short"
1301
- else:
1302
- error_title = f"⚠ JSON Parse Error: {error_type}"
1303
- error_detail = f"Failed to parse LLM response: {error_msg[:200]}"
1304
-
1305
- # Add extracted content if available
1306
- if extracted_content:
1307
- error_detail += f"\n\nPartial content extracted: {extracted_content}..."
1308
-
1309
- # Add response preview for debugging
1310
- response_preview = (
1311
- response_text[:200].replace("\n", "\\n")
1312
- if len(response_text) > 200
1313
- else response_text.replace("\n", "\\n")
1314
- )
1315
- error_detail += f"\n\nResponse preview: {response_preview}"
1316
- if len(response_text) > 200:
1317
- error_detail += "..."
1318
-
1319
- # Print to stderr for immediate visibility
1320
- print(f" ⚠️ {error_title}", file=sys.stderr)
1321
- print(f" ⚠️ {error_detail.split(chr(10))[0]}", file=sys.stderr)
1322
-
1323
- # Try simple fallback: use first non-JSON line as title
1324
- first_line = response_text.split("\n")[0][:150].strip()
1325
- if first_line and len(first_line) >= 2 and not first_line.startswith("{"):
1326
- print(" ⚠️ Using first line as fallback title", file=sys.stderr)
1327
- return first_line, model_name, error_detail, "no", "fine"
1328
-
1329
- # Return structured error information instead of None
1330
- logger.error("JSON parse error with no fallback: %s", error_title)
1331
- return error_title, model_name, error_detail, "no", "fine"
1332
-
1333
- logger.info("LLM summary response: %s", json.dumps(summary_data, ensure_ascii=False))
1334
- if model_name:
1335
- print(f" ✅ LLM summary successful ({model_name})", file=sys.stderr)
1336
- else:
1337
- print(" ✅ LLM summary successful", file=sys.stderr)
808
+ from .auth import is_logged_in
809
+
810
+ if is_logged_in():
811
+ logger.debug("Attempting cloud LLM for summary generation")
812
+ # Load user custom prompt if available
813
+ custom_prompt = None
814
+ if system_prompt is not None:
815
+ custom_prompt = system_prompt
816
+ else:
817
+ user_prompt_path = Path.home() / ".aline" / "prompts" / "commit_message.md"
818
+ try:
819
+ if user_prompt_path.exists():
820
+ custom_prompt = user_prompt_path.read_text(encoding="utf-8").strip()
821
+ except Exception:
822
+ pass
823
+
824
+ model_name, result = call_llm_cloud(
825
+ task="summary",
826
+ payload={
827
+ "user_messages": user_messages[:4000],
828
+ "assistant_replies": assistant_replies[:8000],
829
+ },
830
+ custom_prompt=custom_prompt,
831
+ silent=False,
832
+ )
1338
833
 
1339
- if_last_task, satisfaction = _classify_task_metadata(
1340
- provider=provider,
1341
- user_messages=user_messages,
1342
- assistant_replies=assistant_replies,
1343
- code_changes=code_changes,
1344
- summary_title=title,
1345
- summary_description=description,
1346
- debug_callback=debug_callback,
1347
- system_prompt=metadata_system_prompt,
1348
- previous_commit_title=previous_commit_title,
1349
- )
834
+ if result:
835
+ title = result.get("title", "")
836
+ description = result.get("description", "")
837
+ logger.info("Cloud LLM summary success: title=%s", title[:50] if title else "")
838
+
839
+ # Now classify metadata using cloud
840
+ if_last_task, satisfaction = _classify_task_metadata(
841
+ provider=provider,
842
+ user_messages=user_messages,
843
+ assistant_replies=assistant_replies,
844
+ code_changes=code_changes,
845
+ summary_title=title,
846
+ summary_description=description,
847
+ debug_callback=debug_callback,
848
+ system_prompt=metadata_system_prompt,
849
+ previous_commit_title=previous_commit_title,
850
+ )
1350
851
 
1351
- # Return record as title, keep description for backwards compatibility
1352
- return title or record, model_name, description, if_last_task, satisfaction
852
+ return title, model_name, description, if_last_task, satisfaction
853
+ else:
854
+ # Cloud LLM failed, return None (local fallback disabled)
855
+ logger.warning("Cloud LLM summary failed, returning None")
856
+ print(" ⚠️ Cloud LLM summary failed", file=sys.stderr)
857
+ return None, None, None, None, None
858
+ except ImportError:
859
+ logger.debug("Auth module not available, skipping cloud LLM")
860
+
861
+ # User not logged in, return None (local fallback disabled)
862
+ logger.warning("Not logged in, cannot use cloud LLM for summary")
863
+ print(" ⚠️ Please login with 'aline login' to use LLM features", file=sys.stderr)
864
+ return None, None, None, None, None
865
+
866
+ # =========================================================================
867
+ # LOCAL LLM FALLBACK DISABLED - Code kept for reference
868
+ # =========================================================================
869
+ # # Fall back to local LLM call
870
+ # # Try to load system prompt from default.md file
871
+ # def _get_commit_message_prompt() -> str:
872
+ # global _COMMIT_MESSAGE_PROMPT_CACHE
873
+ # if system_prompt is not None:
874
+ # return system_prompt
875
+ # if _COMMIT_MESSAGE_PROMPT_CACHE is not None:
876
+ # return _COMMIT_MESSAGE_PROMPT_CACHE
877
+ #
878
+ # # Try user-customized prompt first (~/.aline/prompts/commit_message.md)
879
+ # user_prompt_path = Path.home() / ".aline" / "prompts" / "commit_message.md"
880
+ # try:
881
+ # if user_prompt_path.exists():
882
+ # text = user_prompt_path.read_text(encoding="utf-8").strip()
883
+ # if text:
884
+ # _COMMIT_MESSAGE_PROMPT_CACHE = text
885
+ # logger.debug(
886
+ # f"Loaded user-customized commit message prompt from {user_prompt_path}"
887
+ # )
888
+ # return text
889
+ # except Exception:
890
+ # logger.debug(
891
+ # "Failed to load user-customized commit message prompt, falling back", exc_info=True
892
+ # )
893
+ #
894
+ # # Fall back to built-in prompt (tools/commit_message_prompts/default.md)
895
+ # candidate = (
896
+ # Path(__file__).resolve().parents[2] / "tools" / "commit_message_prompts" / "default.md"
897
+ # )
898
+ # try:
899
+ # text = candidate.read_text(encoding="utf-8").strip()
900
+ # if text:
901
+ # _COMMIT_MESSAGE_PROMPT_CACHE = text
902
+ # logger.debug(f"Loaded commit message prompt from {candidate}")
903
+ # return text
904
+ # except Exception:
905
+ # logger.debug("Falling back to built-in commit message prompt", exc_info=True)
906
+ #
907
+ # # Fallback to built-in prompt
908
+ # default_system_prompt = """You are a git commit message generator for AI chat sessions.
909
+ # You will receive content for ONE dialogue turn (user request, assistant recap, optional recent commit context).
910
+ #
911
+ # Guidelines:
912
+ # - Prefer the assistant recap for factual details.
913
+ # - If the assistant recap includes "Turn status: ...", mirror it exactly in the description's first line.
914
+ # - Keep continuity with any provided recent commit context, but avoid repeating unchanged background.
915
+ #
916
+ # Return JSON with EXACTLY two fields:
917
+ # {
918
+ # "title": "One-line summary (imperative mood, 25-60 chars preferred, max 80).",
919
+ # "description": "Status line + 3-7 concise bullets describing what changed in THIS turn."
920
+ # }
921
+ #
922
+ # Rules for title:
923
+ # - Imperative, concrete, no vague fillers like "Update session".
924
+ # - Mention if the turn continues work (e.g., "Continue fixing ...") or is blocked.
925
+ #
926
+ # Rules for description:
927
+ # - First line MUST be "Status: <completed|user_interrupted|rate_limited|compacted|unknown>".
928
+ # - Follow with short "- " bullets explaining WHAT changed and WHY it matters.
929
+ # - Include concrete technical anchors (files, functions) when available.
930
+ # - If continuing prior work, dedicate one bullet to explain the relationship.
931
+ #
932
+ # Respond with JSON only."""
933
+ # _COMMIT_MESSAGE_PROMPT_CACHE = default_system_prompt
934
+ # return _COMMIT_MESSAGE_PROMPT_CACHE
935
+ #
936
+ # system_prompt_to_use = _get_commit_message_prompt()
937
+ #
938
+ # user_prompt_parts = ["Summarize this AI chat session:\n"]
939
+ # if user_messages:
940
+ # user_prompt_parts.append(f"User requests:\n{user_messages[:4000]}\n")
941
+ # if assistant_replies:
942
+ # user_prompt_parts.append(f"Assistant recap / responses:\n{assistant_replies[:8000]}\n")
943
+ # # Note: code_changes are excluded from LLM input per user preference
944
+ # # if code_changes:
945
+ # # user_prompt_parts.append(f"Code changes:\n{code_changes[:4000]}\n")
946
+ # # The output format instruction is now in the system prompt (default.md)
947
+ # # user_prompt_parts.append("\nReturn JSON with exactly two fields: title and description. No other text.")
948
+ # user_prompt = "\n".join(user_prompt_parts)
949
+ #
950
+ # model_name, response_text = _invoke_llm(
951
+ # provider=provider,
952
+ # system_prompt=system_prompt_to_use,
953
+ # user_prompt=user_prompt,
954
+ # debug_callback=debug_callback,
955
+ # purpose="summary",
956
+ # )
957
+ # if not response_text:
958
+ # return None, None, None, None, None
959
+ #
960
+ # try:
961
+ # summary_data = _extract_json_object(response_text)
962
+ # # New format: single "record" field instead of title+description
963
+ # record = (summary_data.get("record") or "").strip()
964
+ #
965
+ # # Fallback to old format for backwards compatibility
966
+ # if not record:
967
+ # title = (summary_data.get("title") or "").strip()
968
+ # description = summary_data.get("description") or ""
969
+ # if title:
970
+ # record = title # Use title as record if present
971
+ # else:
972
+ # title = record # Use record as title
973
+ # description = ""
974
+ #
975
+ # if not record or len(record) < 2:
976
+ # raise json.JSONDecodeError("Record validation failed", response_text, 0)
977
+ # except json.JSONDecodeError as exc:
978
+ # # Construct detailed error information for debugging
979
+ # error_type = type(exc).__name__
980
+ # error_msg = str(exc)
981
+ #
982
+ # logger.warning("Failed to parse JSON from LLM summary: %s", exc)
983
+ # logger.debug("Raw summary response: %s", response_text)
984
+ #
985
+ # # Try to extract partial information from the broken JSON
986
+ # import re
987
+ #
988
+ # record_match = re.search(r'"(?:record|title)"\s*:\s*"([^"]{10,})"', response_text)
989
+ # extracted_content = record_match.group(1)[:80] if record_match else None
990
+ #
991
+ # # Construct informative error title and description
992
+ # if "control character" in error_msg.lower():
993
+ # error_title = "⚠ JSON Parse Error: Invalid control character"
994
+ # error_detail = f"LLM response contained unescaped control characters. Error at {error_msg.split('at:')[-1].strip() if 'at:' in error_msg else 'unknown position'}"
995
+ # elif "expecting" in error_msg.lower():
996
+ # error_title = "⚠ JSON Parse Error: Malformed JSON"
997
+ # error_detail = f"LLM response had invalid JSON syntax: {error_msg[:100]}"
998
+ # elif "Record validation failed" in error_msg:
999
+ # error_title = "⚠ LLM Error: Empty or invalid record"
1000
+ # error_detail = "LLM returned JSON but record/title field was empty or too short"
1001
+ # else:
1002
+ # error_title = f"⚠ JSON Parse Error: {error_type}"
1003
+ # error_detail = f"Failed to parse LLM response: {error_msg[:200]}"
1004
+ #
1005
+ # # Add extracted content if available
1006
+ # if extracted_content:
1007
+ # error_detail += f"\n\nPartial content extracted: {extracted_content}..."
1008
+ #
1009
+ # # Add response preview for debugging
1010
+ # response_preview = (
1011
+ # response_text[:200].replace("\n", "\\n")
1012
+ # if len(response_text) > 200
1013
+ # else response_text.replace("\n", "\\n")
1014
+ # )
1015
+ # error_detail += f"\n\nResponse preview: {response_preview}"
1016
+ # if len(response_text) > 200:
1017
+ # error_detail += "..."
1018
+ #
1019
+ # # Print to stderr for immediate visibility
1020
+ # print(f" ⚠️ {error_title}", file=sys.stderr)
1021
+ # print(f" ⚠️ {error_detail.split(chr(10))[0]}", file=sys.stderr)
1022
+ #
1023
+ # # Try simple fallback: use first non-JSON line as title
1024
+ # first_line = response_text.split("\n")[0][:150].strip()
1025
+ # if first_line and len(first_line) >= 2 and not first_line.startswith("{"):
1026
+ # print(" ⚠️ Using first line as fallback title", file=sys.stderr)
1027
+ # return first_line, model_name, error_detail, "no", "fine"
1028
+ #
1029
+ # # Return structured error information instead of None
1030
+ # logger.error("JSON parse error with no fallback: %s", error_title)
1031
+ # return error_title, model_name, error_detail, "no", "fine"
1032
+ #
1033
+ # logger.info("LLM summary response: %s", json.dumps(summary_data, ensure_ascii=False))
1034
+ # if model_name:
1035
+ # print(f" ✅ LLM summary successful ({model_name})", file=sys.stderr)
1036
+ # else:
1037
+ # print(" ✅ LLM summary successful", file=sys.stderr)
1038
+ #
1039
+ # if_last_task, satisfaction = _classify_task_metadata(
1040
+ # provider=provider,
1041
+ # user_messages=user_messages,
1042
+ # assistant_replies=assistant_replies,
1043
+ # code_changes=code_changes,
1044
+ # summary_title=title,
1045
+ # summary_description=description,
1046
+ # debug_callback=debug_callback,
1047
+ # system_prompt=metadata_system_prompt,
1048
+ # previous_commit_title=previous_commit_title,
1049
+ # )
1050
+ #
1051
+ # # Return record as title, keep description for backwards compatibility
1052
+ # return title or record, model_name, description, if_last_task, satisfaction
1353
1053
 
1354
1054
 
1355
1055
  def generate_session_filename(user: str, agent: str = "claude") -> str:
@@ -1753,19 +1453,6 @@ def generate_summary_with_llm_from_turn_context(
1753
1453
  Returns:
1754
1454
  Tuple of (title, model_name, description, if_last_task, satisfaction)
1755
1455
  """
1756
- # Check for Antigravity Markdown format first (before JSONL processing)
1757
- if full_turn_content and _is_antigravity_content(full_turn_content):
1758
- return _generate_antigravity_summary(
1759
- full_content=full_turn_content,
1760
- turn_status=turn_status,
1761
- provider=provider,
1762
- system_prompt=system_prompt,
1763
- debug_callback=debug_callback,
1764
- metadata_system_prompt=metadata_system_prompt,
1765
- previous_commit_title=previous_commit_title,
1766
- previous_records=previous_records,
1767
- )
1768
-
1769
1456
  # If full turn content is provided, extract ALL assistant messages
1770
1457
  # (thinking + text responses) and combine with the final conclusion
1771
1458
  if full_turn_content: