aline-ai 0.6.2__py3-none-any.whl → 0.6.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {aline_ai-0.6.2.dist-info → aline_ai-0.6.3.dist-info}/METADATA +1 -1
- {aline_ai-0.6.2.dist-info → aline_ai-0.6.3.dist-info}/RECORD +28 -30
- realign/__init__.py +1 -1
- realign/adapters/__init__.py +0 -3
- realign/cli.py +0 -1
- realign/commands/export_shares.py +154 -226
- realign/commands/watcher.py +28 -79
- realign/config.py +1 -47
- realign/dashboard/app.py +2 -8
- realign/dashboard/screens/event_detail.py +0 -3
- realign/dashboard/screens/session_detail.py +0 -1
- realign/dashboard/widgets/config_panel.py +109 -249
- realign/dashboard/widgets/events_table.py +71 -128
- realign/dashboard/widgets/sessions_table.py +76 -135
- realign/dashboard/widgets/watcher_panel.py +0 -2
- realign/db/sqlite_db.py +1 -2
- realign/events/event_summarizer.py +76 -35
- realign/events/session_summarizer.py +73 -32
- realign/hooks.py +383 -574
- realign/llm_client.py +201 -520
- realign/triggers/__init__.py +0 -2
- realign/triggers/next_turn_trigger.py +4 -5
- realign/triggers/registry.py +1 -4
- realign/watcher_core.py +3 -35
- realign/adapters/antigravity.py +0 -159
- realign/triggers/antigravity_trigger.py +0 -140
- {aline_ai-0.6.2.dist-info → aline_ai-0.6.3.dist-info}/WHEEL +0 -0
- {aline_ai-0.6.2.dist-info → aline_ai-0.6.3.dist-info}/entry_points.txt +0 -0
- {aline_ai-0.6.2.dist-info → aline_ai-0.6.3.dist-info}/licenses/LICENSE +0 -0
- {aline_ai-0.6.2.dist-info → aline_ai-0.6.3.dist-info}/top_level.txt +0 -0
realign/hooks.py
CHANGED
|
@@ -21,7 +21,7 @@ from .config import ReAlignConfig
|
|
|
21
21
|
from .adapters import get_adapter_registry
|
|
22
22
|
from .claude_detector import find_claude_sessions_dir
|
|
23
23
|
from .logging_config import setup_logger
|
|
24
|
-
from .llm_client import
|
|
24
|
+
from .llm_client import extract_json, call_llm_cloud
|
|
25
25
|
|
|
26
26
|
try:
|
|
27
27
|
from .redactor import check_and_redact_session, save_original_session
|
|
@@ -73,59 +73,6 @@ def _emit_llm_debug(
|
|
|
73
73
|
logger.debug("LLM debug callback failed for payload=%s", payload, exc_info=True)
|
|
74
74
|
|
|
75
75
|
|
|
76
|
-
def _invoke_llm(
|
|
77
|
-
*,
|
|
78
|
-
provider: str,
|
|
79
|
-
system_prompt: str,
|
|
80
|
-
user_prompt: str,
|
|
81
|
-
debug_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
|
|
82
|
-
purpose: str = "summary",
|
|
83
|
-
silent: bool = False,
|
|
84
|
-
) -> Tuple[Optional[str], Optional[str]]:
|
|
85
|
-
"""
|
|
86
|
-
[Deprecated] Wrapper around the new unified call_llm() function.
|
|
87
|
-
Kept for backwards compatibility.
|
|
88
|
-
"""
|
|
89
|
-
return call_llm(
|
|
90
|
-
system_prompt=system_prompt,
|
|
91
|
-
user_prompt=user_prompt,
|
|
92
|
-
provider=provider,
|
|
93
|
-
debug_callback=debug_callback,
|
|
94
|
-
purpose=purpose,
|
|
95
|
-
json_mode=True, # Original function always used JSON mode for OpenAI
|
|
96
|
-
silent=silent,
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
def _extract_json_object(response_text: str) -> Dict[str, Any]:
|
|
101
|
-
"""
|
|
102
|
-
[Deprecated] Wrapper around the new unified extract_json() function.
|
|
103
|
-
Kept for backwards compatibility.
|
|
104
|
-
"""
|
|
105
|
-
return extract_json(response_text)
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
def invoke_llm_json_object(
|
|
109
|
-
*,
|
|
110
|
-
provider: str,
|
|
111
|
-
system_prompt: str,
|
|
112
|
-
user_prompt: str,
|
|
113
|
-
purpose: str = "generic",
|
|
114
|
-
debug_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
|
|
115
|
-
) -> Tuple[Optional[str], Optional[Dict[str, Any]]]:
|
|
116
|
-
"""
|
|
117
|
-
[Deprecated] Wrapper around the new unified call_llm_json() function.
|
|
118
|
-
Kept for backwards compatibility.
|
|
119
|
-
"""
|
|
120
|
-
return call_llm_json(
|
|
121
|
-
system_prompt=system_prompt,
|
|
122
|
-
user_prompt=user_prompt,
|
|
123
|
-
provider=provider,
|
|
124
|
-
debug_callback=debug_callback,
|
|
125
|
-
purpose=purpose,
|
|
126
|
-
)
|
|
127
|
-
|
|
128
|
-
|
|
129
76
|
def _normalize_if_last_task(raw_value: Any) -> str:
|
|
130
77
|
"""Normalize if_last_task values from LLM output."""
|
|
131
78
|
if isinstance(raw_value, bool):
|
|
@@ -216,46 +163,6 @@ def _classify_task_metadata(
|
|
|
216
163
|
if not (user_messages or assistant_replies or code_changes):
|
|
217
164
|
return defaults
|
|
218
165
|
|
|
219
|
-
def _get_metadata_prompt() -> str:
|
|
220
|
-
global _METADATA_PROMPT_CACHE
|
|
221
|
-
if system_prompt is not None:
|
|
222
|
-
return system_prompt
|
|
223
|
-
if _METADATA_PROMPT_CACHE is not None:
|
|
224
|
-
return _METADATA_PROMPT_CACHE
|
|
225
|
-
|
|
226
|
-
# Try user-customized prompt first (~/.aline/prompts/metadata.md)
|
|
227
|
-
user_prompt_path = Path.home() / ".aline" / "prompts" / "metadata.md"
|
|
228
|
-
try:
|
|
229
|
-
if user_prompt_path.exists():
|
|
230
|
-
text = user_prompt_path.read_text(encoding="utf-8").strip()
|
|
231
|
-
if text:
|
|
232
|
-
_METADATA_PROMPT_CACHE = text
|
|
233
|
-
logger.debug(f"Loaded user-customized metadata prompt from {user_prompt_path}")
|
|
234
|
-
return text
|
|
235
|
-
except Exception:
|
|
236
|
-
logger.debug(
|
|
237
|
-
"Failed to load user-customized metadata prompt, falling back", exc_info=True
|
|
238
|
-
)
|
|
239
|
-
|
|
240
|
-
# Fall back to built-in prompt (tools/commit_message_prompts/metadata_default.md)
|
|
241
|
-
candidate = (
|
|
242
|
-
Path(__file__).resolve().parents[2]
|
|
243
|
-
/ "tools"
|
|
244
|
-
/ "commit_message_prompts"
|
|
245
|
-
/ "metadata_default.md"
|
|
246
|
-
)
|
|
247
|
-
try:
|
|
248
|
-
text = candidate.read_text(encoding="utf-8").strip()
|
|
249
|
-
if text:
|
|
250
|
-
_METADATA_PROMPT_CACHE = text
|
|
251
|
-
return text
|
|
252
|
-
except Exception:
|
|
253
|
-
logger.debug("Falling back to built-in metadata prompt", exc_info=True)
|
|
254
|
-
_METADATA_PROMPT_CACHE = DEFAULT_METADATA_PROMPT_TEXT
|
|
255
|
-
return _METADATA_PROMPT_CACHE
|
|
256
|
-
|
|
257
|
-
classification_system_prompt = _get_metadata_prompt()
|
|
258
|
-
|
|
259
166
|
def _clip_text(text: str, limit: int) -> str:
|
|
260
167
|
text = (text or "").strip()
|
|
261
168
|
if not text:
|
|
@@ -268,44 +175,144 @@ def _classify_task_metadata(
|
|
|
268
175
|
current_title = (summary_title or "").strip() or "(missing)"
|
|
269
176
|
previous_title = (previous_commit_title or "").strip() or "(none)"
|
|
270
177
|
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
178
|
+
# Try cloud provider first if provider is "auto" or "cloud" and user is logged in
|
|
179
|
+
if provider in ("auto", "cloud"):
|
|
180
|
+
try:
|
|
181
|
+
from .auth import is_logged_in
|
|
182
|
+
|
|
183
|
+
if is_logged_in():
|
|
184
|
+
logger.debug("Attempting cloud LLM for metadata classification")
|
|
185
|
+
# Load user custom prompt if available
|
|
186
|
+
custom_prompt = None
|
|
187
|
+
if system_prompt is not None:
|
|
188
|
+
custom_prompt = system_prompt
|
|
189
|
+
else:
|
|
190
|
+
user_prompt_path = Path.home() / ".aline" / "prompts" / "metadata.md"
|
|
191
|
+
try:
|
|
192
|
+
if user_prompt_path.exists():
|
|
193
|
+
custom_prompt = user_prompt_path.read_text(encoding="utf-8").strip()
|
|
194
|
+
except Exception:
|
|
195
|
+
pass
|
|
196
|
+
|
|
197
|
+
model_name, result = call_llm_cloud(
|
|
198
|
+
task="metadata",
|
|
199
|
+
payload={
|
|
200
|
+
"previous_title": previous_title,
|
|
201
|
+
"user_messages": clipped_user,
|
|
202
|
+
"current_title": current_title,
|
|
203
|
+
},
|
|
204
|
+
custom_prompt=custom_prompt,
|
|
205
|
+
silent=False,
|
|
206
|
+
)
|
|
295
207
|
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
208
|
+
if result:
|
|
209
|
+
if_last_task = result.get("if_last_task", "no")
|
|
210
|
+
satisfaction = result.get("satisfaction", "fine")
|
|
211
|
+
logger.info(
|
|
212
|
+
"Cloud LLM metadata response: if_last_task=%s, satisfaction=%s",
|
|
213
|
+
if_last_task,
|
|
214
|
+
satisfaction,
|
|
215
|
+
)
|
|
216
|
+
print(
|
|
217
|
+
f" 🔍 Metadata classification: if_last_task={if_last_task}, "
|
|
218
|
+
f"satisfaction={satisfaction}",
|
|
219
|
+
file=sys.stderr,
|
|
220
|
+
)
|
|
221
|
+
return if_last_task, satisfaction
|
|
222
|
+
else:
|
|
223
|
+
# Cloud LLM failed, return defaults (local fallback disabled)
|
|
224
|
+
logger.warning("Cloud LLM metadata failed, returning defaults")
|
|
225
|
+
print(" ⚠️ Cloud LLM metadata failed, using defaults", file=sys.stderr)
|
|
226
|
+
return defaults
|
|
227
|
+
except ImportError:
|
|
228
|
+
logger.debug("Auth module not available, skipping cloud LLM")
|
|
229
|
+
|
|
230
|
+
# User not logged in, return defaults (local fallback disabled)
|
|
231
|
+
logger.warning("Not logged in, cannot use cloud LLM for metadata")
|
|
232
|
+
print(" ⚠️ Please login with 'aline login' to use LLM features", file=sys.stderr)
|
|
233
|
+
return defaults
|
|
234
|
+
|
|
235
|
+
# =========================================================================
|
|
236
|
+
# LOCAL LLM FALLBACK DISABLED - Code kept for reference
|
|
237
|
+
# =========================================================================
|
|
238
|
+
# def _get_metadata_prompt() -> str:
|
|
239
|
+
# global _METADATA_PROMPT_CACHE
|
|
240
|
+
# if system_prompt is not None:
|
|
241
|
+
# return system_prompt
|
|
242
|
+
# if _METADATA_PROMPT_CACHE is not None:
|
|
243
|
+
# return _METADATA_PROMPT_CACHE
|
|
244
|
+
#
|
|
245
|
+
# # Try user-customized prompt first (~/.aline/prompts/metadata.md)
|
|
246
|
+
# user_prompt_path = Path.home() / ".aline" / "prompts" / "metadata.md"
|
|
247
|
+
# try:
|
|
248
|
+
# if user_prompt_path.exists():
|
|
249
|
+
# text = user_prompt_path.read_text(encoding="utf-8").strip()
|
|
250
|
+
# if text:
|
|
251
|
+
# _METADATA_PROMPT_CACHE = text
|
|
252
|
+
# logger.debug(f"Loaded user-customized metadata prompt from {user_prompt_path}")
|
|
253
|
+
# return text
|
|
254
|
+
# except Exception:
|
|
255
|
+
# logger.debug(
|
|
256
|
+
# "Failed to load user-customized metadata prompt, falling back", exc_info=True
|
|
257
|
+
# )
|
|
258
|
+
#
|
|
259
|
+
# # Fall back to built-in prompt (tools/commit_message_prompts/metadata_default.md)
|
|
260
|
+
# candidate = (
|
|
261
|
+
# Path(__file__).resolve().parents[2]
|
|
262
|
+
# / "tools"
|
|
263
|
+
# / "commit_message_prompts"
|
|
264
|
+
# / "metadata_default.md"
|
|
265
|
+
# )
|
|
266
|
+
# try:
|
|
267
|
+
# text = candidate.read_text(encoding="utf-8").strip()
|
|
268
|
+
# if text:
|
|
269
|
+
# _METADATA_PROMPT_CACHE = text
|
|
270
|
+
# return text
|
|
271
|
+
# except Exception:
|
|
272
|
+
# logger.debug("Falling back to built-in metadata prompt", exc_info=True)
|
|
273
|
+
# _METADATA_PROMPT_CACHE = DEFAULT_METADATA_PROMPT_TEXT
|
|
274
|
+
# return _METADATA_PROMPT_CACHE
|
|
275
|
+
#
|
|
276
|
+
# classification_system_prompt = _get_metadata_prompt()
|
|
277
|
+
#
|
|
278
|
+
# prompt_parts: List[str] = [
|
|
279
|
+
# f"Previous commit title: {previous_title}",
|
|
280
|
+
# "User request:\n" + clipped_user,
|
|
281
|
+
# f"Current commit title: {current_title}",
|
|
282
|
+
# 'Return strict JSON with exactly these fields:\n{"if_last_task": "yes|no", "satisfaction": "good|fine|bad"}',
|
|
283
|
+
# ]
|
|
284
|
+
# user_prompt = "\n\n".join(prompt_parts)
|
|
285
|
+
#
|
|
286
|
+
# model_name, response_text = _invoke_llm(
|
|
287
|
+
# provider=provider,
|
|
288
|
+
# system_prompt=classification_system_prompt,
|
|
289
|
+
# user_prompt=user_prompt,
|
|
290
|
+
# debug_callback=debug_callback,
|
|
291
|
+
# purpose="metadata",
|
|
292
|
+
# )
|
|
293
|
+
# if not response_text:
|
|
294
|
+
# return defaults
|
|
295
|
+
#
|
|
296
|
+
# try:
|
|
297
|
+
# metadata = _extract_json_object(response_text)
|
|
298
|
+
# except json.JSONDecodeError as exc:
|
|
299
|
+
# logger.warning("Failed to parse metadata JSON: %s", exc)
|
|
300
|
+
# logger.debug("Raw metadata response: %s", response_text)
|
|
301
|
+
# return defaults
|
|
302
|
+
#
|
|
303
|
+
# if_last_task = _normalize_if_last_task(metadata.get("if_last_task"))
|
|
304
|
+
# satisfaction = _normalize_satisfaction(metadata.get("satisfaction"))
|
|
305
|
+
# logger.info("LLM metadata response: %s", json.dumps(metadata, ensure_ascii=False))
|
|
306
|
+
# print(
|
|
307
|
+
# f" 🔍 Metadata classification: if_last_task={metadata.get('if_last_task')}→{if_last_task}, "
|
|
308
|
+
# f"satisfaction={metadata.get('satisfaction')}→{satisfaction}",
|
|
309
|
+
# file=sys.stderr,
|
|
310
|
+
# )
|
|
311
|
+
# if model_name:
|
|
312
|
+
# print(f" ✅ LLM metadata classification successful ({model_name})", file=sys.stderr)
|
|
313
|
+
# else:
|
|
314
|
+
# print(" ✅ LLM metadata classification successful", file=sys.stderr)
|
|
315
|
+
# return if_last_task, satisfaction
|
|
309
316
|
|
|
310
317
|
|
|
311
318
|
# ============================================================================
|
|
@@ -527,18 +534,6 @@ def find_all_codex_sessions(days_back: int = 1) -> List[Path]:
|
|
|
527
534
|
return []
|
|
528
535
|
|
|
529
536
|
|
|
530
|
-
def find_all_antigravity_sessions() -> List[Path]:
|
|
531
|
-
"""
|
|
532
|
-
Find all active Antigravity IDE sessions.
|
|
533
|
-
|
|
534
|
-
(Legacy wrapper for AntigravityAdapter)
|
|
535
|
-
"""
|
|
536
|
-
adapter = get_adapter_registry().get_adapter("antigravity")
|
|
537
|
-
if adapter:
|
|
538
|
-
return adapter.discover_sessions()
|
|
539
|
-
return []
|
|
540
|
-
|
|
541
|
-
|
|
542
537
|
def find_all_gemini_cli_sessions() -> List[Path]:
|
|
543
538
|
"""
|
|
544
539
|
Find all active Gemini CLI sessions.
|
|
@@ -578,8 +573,6 @@ def find_all_active_sessions(
|
|
|
578
573
|
enabled_adapters.append("codex")
|
|
579
574
|
if config.auto_detect_gemini:
|
|
580
575
|
enabled_adapters.append("gemini")
|
|
581
|
-
if config.auto_detect_antigravity:
|
|
582
|
-
enabled_adapters.append("antigravity")
|
|
583
576
|
|
|
584
577
|
for name in enabled_adapters:
|
|
585
578
|
adapter = registry.get_adapter(name)
|
|
@@ -653,242 +646,6 @@ def find_latest_session(history_path: Path, explicit_path: Optional[str] = None)
|
|
|
653
646
|
return max(session_files, key=lambda p: p.stat().st_mtime)
|
|
654
647
|
|
|
655
648
|
|
|
656
|
-
def _is_antigravity_content(content: str) -> bool:
|
|
657
|
-
"""
|
|
658
|
-
Detect if content is from Antigravity IDE (Markdown artifacts format).
|
|
659
|
-
|
|
660
|
-
Antigravity content is identified by the presence of section markers
|
|
661
|
-
like "--- task.md ---", "--- walkthrough.md ---", or "--- implementation_plan.md ---".
|
|
662
|
-
|
|
663
|
-
Args:
|
|
664
|
-
content: The content string to check
|
|
665
|
-
|
|
666
|
-
Returns:
|
|
667
|
-
True if this is Antigravity Markdown content, False otherwise
|
|
668
|
-
"""
|
|
669
|
-
if not content:
|
|
670
|
-
return False
|
|
671
|
-
|
|
672
|
-
antigravity_markers = [
|
|
673
|
-
"--- task.md ---",
|
|
674
|
-
"--- walkthrough.md ---",
|
|
675
|
-
"--- implementation_plan.md ---",
|
|
676
|
-
]
|
|
677
|
-
|
|
678
|
-
for marker in antigravity_markers:
|
|
679
|
-
if marker in content:
|
|
680
|
-
return True
|
|
681
|
-
|
|
682
|
-
return False
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
def _parse_antigravity_sections(content: str) -> Dict[str, str]:
|
|
686
|
-
"""
|
|
687
|
-
Parse Antigravity content into separate sections.
|
|
688
|
-
|
|
689
|
-
Args:
|
|
690
|
-
content: Combined content with section markers like "--- task.md ---"
|
|
691
|
-
|
|
692
|
-
Returns:
|
|
693
|
-
Dictionary mapping filename (without .md) to content
|
|
694
|
-
"""
|
|
695
|
-
sections = {}
|
|
696
|
-
|
|
697
|
-
# Split by section markers
|
|
698
|
-
import re
|
|
699
|
-
|
|
700
|
-
pattern = r"--- (task\.md|walkthrough\.md|implementation_plan\.md) ---\n?"
|
|
701
|
-
parts = re.split(pattern, content)
|
|
702
|
-
|
|
703
|
-
# parts will be: ['', 'task.md', '<content>', 'walkthrough.md', '<content>', ...]
|
|
704
|
-
i = 1
|
|
705
|
-
while i < len(parts) - 1:
|
|
706
|
-
filename = parts[i].replace(".md", "")
|
|
707
|
-
section_content = parts[i + 1].strip()
|
|
708
|
-
if section_content:
|
|
709
|
-
sections[filename] = section_content
|
|
710
|
-
i += 2
|
|
711
|
-
|
|
712
|
-
return sections
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
def _generate_antigravity_summary(
|
|
716
|
-
full_content: str,
|
|
717
|
-
turn_status: str,
|
|
718
|
-
provider: str = "auto",
|
|
719
|
-
system_prompt: Optional[str] = None,
|
|
720
|
-
debug_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
|
|
721
|
-
metadata_system_prompt: Optional[str] = None,
|
|
722
|
-
previous_commit_title: Optional[str] = None,
|
|
723
|
-
previous_records: Optional[list] = None,
|
|
724
|
-
) -> Tuple[Optional[str], Optional[str], Optional[str], str, str]:
|
|
725
|
-
"""
|
|
726
|
-
Generate summary for Antigravity IDE sessions using Markdown artifacts directly.
|
|
727
|
-
|
|
728
|
-
Unlike JSONL-based sessions, Antigravity uses task.md, walkthrough.md, and
|
|
729
|
-
implementation_plan.md files. This function builds a structured prompt from
|
|
730
|
-
these Markdown files.
|
|
731
|
-
|
|
732
|
-
Args:
|
|
733
|
-
full_content: Combined MD content with section markers
|
|
734
|
-
turn_status: Status of the turn (completed, etc.)
|
|
735
|
-
provider: LLM provider to use
|
|
736
|
-
system_prompt: Optional custom system prompt
|
|
737
|
-
debug_callback: Optional debug callback
|
|
738
|
-
metadata_system_prompt: Optional metadata system prompt
|
|
739
|
-
previous_commit_title: Previous commit title for context
|
|
740
|
-
previous_records: Previous commit records for context
|
|
741
|
-
|
|
742
|
-
Returns:
|
|
743
|
-
Tuple of (title, model_name, description, if_last_task, satisfaction)
|
|
744
|
-
"""
|
|
745
|
-
# Parse sections from combined content
|
|
746
|
-
sections = _parse_antigravity_sections(full_content)
|
|
747
|
-
|
|
748
|
-
# Build structured user prompt with labeled sections
|
|
749
|
-
prompt_parts = ["Summarize this Antigravity IDE session:\n"]
|
|
750
|
-
|
|
751
|
-
# Task description (from task.md)
|
|
752
|
-
if "task" in sections:
|
|
753
|
-
prompt_parts.append(f"任务描述 (Task description):\n{sections['task']}\n")
|
|
754
|
-
|
|
755
|
-
# Implementation plan (from implementation_plan.md)
|
|
756
|
-
if "implementation_plan" in sections:
|
|
757
|
-
prompt_parts.append(f"实现计划 (Implementation plan):\n{sections['implementation_plan']}\n")
|
|
758
|
-
|
|
759
|
-
# Work completed (from walkthrough.md)
|
|
760
|
-
if "walkthrough" in sections:
|
|
761
|
-
prompt_parts.append(f"完成的工作 (Work completed):\n{sections['walkthrough']}\n")
|
|
762
|
-
|
|
763
|
-
# Add turn status
|
|
764
|
-
prompt_parts.append(f"\nTurn status: {turn_status or 'completed'}")
|
|
765
|
-
|
|
766
|
-
# Add previous records context
|
|
767
|
-
if previous_records is not None:
|
|
768
|
-
if len(previous_records) > 0:
|
|
769
|
-
records_text = "\n".join(f"- {rec}" for rec in previous_records[-5:])
|
|
770
|
-
prompt_parts.append(f"\nLast {len(previous_records[-5:])} records:\n{records_text}")
|
|
771
|
-
else:
|
|
772
|
-
prompt_parts.append("\nNo previous record")
|
|
773
|
-
|
|
774
|
-
user_prompt = "\n\n".join(prompt_parts)
|
|
775
|
-
|
|
776
|
-
# Load system prompt if not provided
|
|
777
|
-
system_prompt_to_use = system_prompt
|
|
778
|
-
if system_prompt_to_use is None:
|
|
779
|
-
global _COMMIT_MESSAGE_PROMPT_CACHE
|
|
780
|
-
if _COMMIT_MESSAGE_PROMPT_CACHE is not None:
|
|
781
|
-
system_prompt_to_use = _COMMIT_MESSAGE_PROMPT_CACHE
|
|
782
|
-
else:
|
|
783
|
-
candidate = (
|
|
784
|
-
Path(__file__).resolve().parents[2]
|
|
785
|
-
/ "tools"
|
|
786
|
-
/ "commit_message_prompts"
|
|
787
|
-
/ "default.md"
|
|
788
|
-
)
|
|
789
|
-
try:
|
|
790
|
-
text = candidate.read_text(encoding="utf-8").strip()
|
|
791
|
-
if text:
|
|
792
|
-
_COMMIT_MESSAGE_PROMPT_CACHE = text
|
|
793
|
-
system_prompt_to_use = text
|
|
794
|
-
except Exception:
|
|
795
|
-
pass
|
|
796
|
-
|
|
797
|
-
if system_prompt_to_use is None:
|
|
798
|
-
# Fallback to built-in prompt
|
|
799
|
-
system_prompt_to_use = """You are a progress record generator.
|
|
800
|
-
|
|
801
|
-
Your job is to use all previous records and turn this turn's agent work into a follow-up progress record that continues from what has already been written, instead of starting a new, standalone record.
|
|
802
|
-
|
|
803
|
-
Each record should read like one more sentence or paragraph added to an ongoing note, not a fresh entry. Write as if the reader has already read all previous records. Do not repeat wording, structure, or phrasing used before. Focus only on what is new, different, or newly discovered in this turn
|
|
804
|
-
|
|
805
|
-
Follow this order:
|
|
806
|
-
1. Start with the agent's understanding of the user's request based on the previous records.
|
|
807
|
-
2. Add only the new information introduced this turn
|
|
808
|
-
3. End with what the agent did this time
|
|
809
|
-
4. Do not write generic outcome claims such as "now it's properly implemented" or "it fits user expectations." Ending with factual details of what the agent did is enough.
|
|
810
|
-
|
|
811
|
-
Critical rule (this is the core)
|
|
812
|
-
- Reject all fluff. Describe only concrete facts. The shorter the better. Maximize signal-to-noise. Keep the language sharp and focused.
|
|
813
|
-
- Based on the given previous records, write the follow-up record as if you are adding one more line to the existing records
|
|
814
|
-
|
|
815
|
-
Generate ONLY the structured summary (metadata is handled separately). Respond with JSON:
|
|
816
|
-
{
|
|
817
|
-
"record": "A follow-up progress record. Typically 50-100 words, but fewer words are acceptable if sufficient."
|
|
818
|
-
}
|
|
819
|
-
|
|
820
|
-
Return JSON only, no other text."""
|
|
821
|
-
_COMMIT_MESSAGE_PROMPT_CACHE = system_prompt_to_use
|
|
822
|
-
|
|
823
|
-
# Emit debug info
|
|
824
|
-
_emit_llm_debug(
|
|
825
|
-
debug_callback,
|
|
826
|
-
{
|
|
827
|
-
"event": "turn_context",
|
|
828
|
-
"mode": "antigravity_markdown",
|
|
829
|
-
"turn_status": turn_status or "unknown",
|
|
830
|
-
"sections_found": list(sections.keys()),
|
|
831
|
-
"total_content_length": len(full_content),
|
|
832
|
-
},
|
|
833
|
-
)
|
|
834
|
-
|
|
835
|
-
# Call LLM
|
|
836
|
-
model_name, response_text = _invoke_llm(
|
|
837
|
-
provider=provider,
|
|
838
|
-
system_prompt=system_prompt_to_use,
|
|
839
|
-
user_prompt=user_prompt,
|
|
840
|
-
purpose="summary",
|
|
841
|
-
debug_callback=debug_callback,
|
|
842
|
-
)
|
|
843
|
-
|
|
844
|
-
if not response_text:
|
|
845
|
-
return None, model_name, None, "no", "fine"
|
|
846
|
-
|
|
847
|
-
# Parse response
|
|
848
|
-
try:
|
|
849
|
-
summary_data = _extract_json_object(response_text)
|
|
850
|
-
|
|
851
|
-
# Try "record" field first (new format)
|
|
852
|
-
record = (summary_data.get("record") or "").strip()
|
|
853
|
-
|
|
854
|
-
# Fallback to title + description (old format)
|
|
855
|
-
if not record:
|
|
856
|
-
title = (summary_data.get("title") or "").strip()
|
|
857
|
-
description = summary_data.get("description") or ""
|
|
858
|
-
if title:
|
|
859
|
-
record = title
|
|
860
|
-
else:
|
|
861
|
-
title = record
|
|
862
|
-
description = summary_data.get("description") or ""
|
|
863
|
-
|
|
864
|
-
if not record or len(record) < 2:
|
|
865
|
-
raise json.JSONDecodeError("Record validation failed", response_text, 0)
|
|
866
|
-
|
|
867
|
-
except json.JSONDecodeError:
|
|
868
|
-
# Fallback: use first line
|
|
869
|
-
first_line = response_text.split("\n")[0][:150].strip()
|
|
870
|
-
return first_line, model_name, "", "no", "fine"
|
|
871
|
-
|
|
872
|
-
# Classify metadata (if_last_task, satisfaction)
|
|
873
|
-
# For Antigravity, extract a summary from sections for classification
|
|
874
|
-
user_summary = sections.get("task", "")[:500]
|
|
875
|
-
assistant_summary = sections.get("walkthrough", "")[:500]
|
|
876
|
-
|
|
877
|
-
if_last_task, satisfaction = _classify_task_metadata(
|
|
878
|
-
provider=provider,
|
|
879
|
-
user_messages=user_summary,
|
|
880
|
-
assistant_replies=assistant_summary,
|
|
881
|
-
code_changes="",
|
|
882
|
-
summary_title=title,
|
|
883
|
-
summary_description=description,
|
|
884
|
-
previous_commit_title=previous_commit_title,
|
|
885
|
-
debug_callback=debug_callback,
|
|
886
|
-
system_prompt=metadata_system_prompt,
|
|
887
|
-
)
|
|
888
|
-
|
|
889
|
-
return title or record, model_name, description, if_last_task, satisfaction
|
|
890
|
-
|
|
891
|
-
|
|
892
649
|
def filter_session_content(content: str) -> Tuple[str, str, str]:
|
|
893
650
|
"""
|
|
894
651
|
Filter session content to extract meaningful information for LLM summarization.
|
|
@@ -1167,189 +924,254 @@ def generate_summary_with_llm(
|
|
|
1167
924
|
"fine",
|
|
1168
925
|
)
|
|
1169
926
|
|
|
1170
|
-
# Try
|
|
1171
|
-
|
|
1172
|
-
global _COMMIT_MESSAGE_PROMPT_CACHE
|
|
1173
|
-
if system_prompt is not None:
|
|
1174
|
-
return system_prompt
|
|
1175
|
-
if _COMMIT_MESSAGE_PROMPT_CACHE is not None:
|
|
1176
|
-
return _COMMIT_MESSAGE_PROMPT_CACHE
|
|
1177
|
-
|
|
1178
|
-
# Try user-customized prompt first (~/.aline/prompts/commit_message.md)
|
|
1179
|
-
user_prompt_path = Path.home() / ".aline" / "prompts" / "commit_message.md"
|
|
1180
|
-
try:
|
|
1181
|
-
if user_prompt_path.exists():
|
|
1182
|
-
text = user_prompt_path.read_text(encoding="utf-8").strip()
|
|
1183
|
-
if text:
|
|
1184
|
-
_COMMIT_MESSAGE_PROMPT_CACHE = text
|
|
1185
|
-
logger.debug(
|
|
1186
|
-
f"Loaded user-customized commit message prompt from {user_prompt_path}"
|
|
1187
|
-
)
|
|
1188
|
-
return text
|
|
1189
|
-
except Exception:
|
|
1190
|
-
logger.debug(
|
|
1191
|
-
"Failed to load user-customized commit message prompt, falling back", exc_info=True
|
|
1192
|
-
)
|
|
1193
|
-
|
|
1194
|
-
# Fall back to built-in prompt (tools/commit_message_prompts/default.md)
|
|
1195
|
-
candidate = (
|
|
1196
|
-
Path(__file__).resolve().parents[2] / "tools" / "commit_message_prompts" / "default.md"
|
|
1197
|
-
)
|
|
927
|
+
# Try cloud provider first if provider is "auto" or "cloud" and user is logged in
|
|
928
|
+
if provider in ("auto", "cloud"):
|
|
1198
929
|
try:
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
logger.debug(
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
{
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
}
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
- Mention if the turn continues work (e.g., "Continue fixing ...") or is blocked.
|
|
1225
|
-
|
|
1226
|
-
Rules for description:
|
|
1227
|
-
- First line MUST be "Status: <completed|user_interrupted|rate_limited|compacted|unknown>".
|
|
1228
|
-
- Follow with short "- " bullets explaining WHAT changed and WHY it matters.
|
|
1229
|
-
- Include concrete technical anchors (files, functions) when available.
|
|
1230
|
-
- If continuing prior work, dedicate one bullet to explain the relationship.
|
|
1231
|
-
|
|
1232
|
-
Respond with JSON only."""
|
|
1233
|
-
_COMMIT_MESSAGE_PROMPT_CACHE = default_system_prompt
|
|
1234
|
-
return _COMMIT_MESSAGE_PROMPT_CACHE
|
|
1235
|
-
|
|
1236
|
-
system_prompt_to_use = _get_commit_message_prompt()
|
|
1237
|
-
|
|
1238
|
-
user_prompt_parts = ["Summarize this AI chat session:\n"]
|
|
1239
|
-
if user_messages:
|
|
1240
|
-
user_prompt_parts.append(f"User requests:\n{user_messages[:4000]}\n")
|
|
1241
|
-
if assistant_replies:
|
|
1242
|
-
user_prompt_parts.append(f"Assistant recap / responses:\n{assistant_replies[:8000]}\n")
|
|
1243
|
-
# Note: code_changes are excluded from LLM input per user preference
|
|
1244
|
-
# if code_changes:
|
|
1245
|
-
# user_prompt_parts.append(f"Code changes:\n{code_changes[:4000]}\n")
|
|
1246
|
-
# The output format instruction is now in the system prompt (default.md)
|
|
1247
|
-
# user_prompt_parts.append("\nReturn JSON with exactly two fields: title and description. No other text.")
|
|
1248
|
-
user_prompt = "\n".join(user_prompt_parts)
|
|
1249
|
-
|
|
1250
|
-
model_name, response_text = _invoke_llm(
|
|
1251
|
-
provider=provider,
|
|
1252
|
-
system_prompt=system_prompt_to_use,
|
|
1253
|
-
user_prompt=user_prompt,
|
|
1254
|
-
debug_callback=debug_callback,
|
|
1255
|
-
purpose="summary",
|
|
1256
|
-
)
|
|
1257
|
-
if not response_text:
|
|
1258
|
-
return None, None, None, None, None
|
|
1259
|
-
|
|
1260
|
-
try:
|
|
1261
|
-
summary_data = _extract_json_object(response_text)
|
|
1262
|
-
# New format: single "record" field instead of title+description
|
|
1263
|
-
record = (summary_data.get("record") or "").strip()
|
|
1264
|
-
|
|
1265
|
-
# Fallback to old format for backwards compatibility
|
|
1266
|
-
if not record:
|
|
1267
|
-
title = (summary_data.get("title") or "").strip()
|
|
1268
|
-
description = summary_data.get("description") or ""
|
|
1269
|
-
if title:
|
|
1270
|
-
record = title # Use title as record if present
|
|
1271
|
-
else:
|
|
1272
|
-
title = record # Use record as title
|
|
1273
|
-
description = ""
|
|
1274
|
-
|
|
1275
|
-
if not record or len(record) < 2:
|
|
1276
|
-
raise json.JSONDecodeError("Record validation failed", response_text, 0)
|
|
1277
|
-
except json.JSONDecodeError as exc:
|
|
1278
|
-
# Construct detailed error information for debugging
|
|
1279
|
-
error_type = type(exc).__name__
|
|
1280
|
-
error_msg = str(exc)
|
|
1281
|
-
|
|
1282
|
-
logger.warning("Failed to parse JSON from LLM summary: %s", exc)
|
|
1283
|
-
logger.debug("Raw summary response: %s", response_text)
|
|
1284
|
-
|
|
1285
|
-
# Try to extract partial information from the broken JSON
|
|
1286
|
-
import re
|
|
1287
|
-
|
|
1288
|
-
record_match = re.search(r'"(?:record|title)"\s*:\s*"([^"]{10,})"', response_text)
|
|
1289
|
-
extracted_content = record_match.group(1)[:80] if record_match else None
|
|
1290
|
-
|
|
1291
|
-
# Construct informative error title and description
|
|
1292
|
-
if "control character" in error_msg.lower():
|
|
1293
|
-
error_title = "⚠ JSON Parse Error: Invalid control character"
|
|
1294
|
-
error_detail = f"LLM response contained unescaped control characters. Error at {error_msg.split('at:')[-1].strip() if 'at:' in error_msg else 'unknown position'}"
|
|
1295
|
-
elif "expecting" in error_msg.lower():
|
|
1296
|
-
error_title = "⚠ JSON Parse Error: Malformed JSON"
|
|
1297
|
-
error_detail = f"LLM response had invalid JSON syntax: {error_msg[:100]}"
|
|
1298
|
-
elif "Record validation failed" in error_msg:
|
|
1299
|
-
error_title = "⚠ LLM Error: Empty or invalid record"
|
|
1300
|
-
error_detail = "LLM returned JSON but record/title field was empty or too short"
|
|
1301
|
-
else:
|
|
1302
|
-
error_title = f"⚠ JSON Parse Error: {error_type}"
|
|
1303
|
-
error_detail = f"Failed to parse LLM response: {error_msg[:200]}"
|
|
1304
|
-
|
|
1305
|
-
# Add extracted content if available
|
|
1306
|
-
if extracted_content:
|
|
1307
|
-
error_detail += f"\n\nPartial content extracted: {extracted_content}..."
|
|
1308
|
-
|
|
1309
|
-
# Add response preview for debugging
|
|
1310
|
-
response_preview = (
|
|
1311
|
-
response_text[:200].replace("\n", "\\n")
|
|
1312
|
-
if len(response_text) > 200
|
|
1313
|
-
else response_text.replace("\n", "\\n")
|
|
1314
|
-
)
|
|
1315
|
-
error_detail += f"\n\nResponse preview: {response_preview}"
|
|
1316
|
-
if len(response_text) > 200:
|
|
1317
|
-
error_detail += "..."
|
|
1318
|
-
|
|
1319
|
-
# Print to stderr for immediate visibility
|
|
1320
|
-
print(f" ⚠️ {error_title}", file=sys.stderr)
|
|
1321
|
-
print(f" ⚠️ {error_detail.split(chr(10))[0]}", file=sys.stderr)
|
|
1322
|
-
|
|
1323
|
-
# Try simple fallback: use first non-JSON line as title
|
|
1324
|
-
first_line = response_text.split("\n")[0][:150].strip()
|
|
1325
|
-
if first_line and len(first_line) >= 2 and not first_line.startswith("{"):
|
|
1326
|
-
print(" ⚠️ Using first line as fallback title", file=sys.stderr)
|
|
1327
|
-
return first_line, model_name, error_detail, "no", "fine"
|
|
1328
|
-
|
|
1329
|
-
# Return structured error information instead of None
|
|
1330
|
-
logger.error("JSON parse error with no fallback: %s", error_title)
|
|
1331
|
-
return error_title, model_name, error_detail, "no", "fine"
|
|
1332
|
-
|
|
1333
|
-
logger.info("LLM summary response: %s", json.dumps(summary_data, ensure_ascii=False))
|
|
1334
|
-
if model_name:
|
|
1335
|
-
print(f" ✅ LLM summary successful ({model_name})", file=sys.stderr)
|
|
1336
|
-
else:
|
|
1337
|
-
print(" ✅ LLM summary successful", file=sys.stderr)
|
|
930
|
+
from .auth import is_logged_in
|
|
931
|
+
|
|
932
|
+
if is_logged_in():
|
|
933
|
+
logger.debug("Attempting cloud LLM for summary generation")
|
|
934
|
+
# Load user custom prompt if available
|
|
935
|
+
custom_prompt = None
|
|
936
|
+
if system_prompt is not None:
|
|
937
|
+
custom_prompt = system_prompt
|
|
938
|
+
else:
|
|
939
|
+
user_prompt_path = Path.home() / ".aline" / "prompts" / "commit_message.md"
|
|
940
|
+
try:
|
|
941
|
+
if user_prompt_path.exists():
|
|
942
|
+
custom_prompt = user_prompt_path.read_text(encoding="utf-8").strip()
|
|
943
|
+
except Exception:
|
|
944
|
+
pass
|
|
945
|
+
|
|
946
|
+
model_name, result = call_llm_cloud(
|
|
947
|
+
task="summary",
|
|
948
|
+
payload={
|
|
949
|
+
"user_messages": user_messages[:4000],
|
|
950
|
+
"assistant_replies": assistant_replies[:8000],
|
|
951
|
+
},
|
|
952
|
+
custom_prompt=custom_prompt,
|
|
953
|
+
silent=False,
|
|
954
|
+
)
|
|
1338
955
|
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
956
|
+
if result:
|
|
957
|
+
title = result.get("title", "")
|
|
958
|
+
description = result.get("description", "")
|
|
959
|
+
logger.info("Cloud LLM summary success: title=%s", title[:50] if title else "")
|
|
960
|
+
|
|
961
|
+
# Now classify metadata using cloud
|
|
962
|
+
if_last_task, satisfaction = _classify_task_metadata(
|
|
963
|
+
provider=provider,
|
|
964
|
+
user_messages=user_messages,
|
|
965
|
+
assistant_replies=assistant_replies,
|
|
966
|
+
code_changes=code_changes,
|
|
967
|
+
summary_title=title,
|
|
968
|
+
summary_description=description,
|
|
969
|
+
debug_callback=debug_callback,
|
|
970
|
+
system_prompt=metadata_system_prompt,
|
|
971
|
+
previous_commit_title=previous_commit_title,
|
|
972
|
+
)
|
|
1350
973
|
|
|
1351
|
-
|
|
1352
|
-
|
|
974
|
+
return title, model_name, description, if_last_task, satisfaction
|
|
975
|
+
else:
|
|
976
|
+
# Cloud LLM failed, return None (local fallback disabled)
|
|
977
|
+
logger.warning("Cloud LLM summary failed, returning None")
|
|
978
|
+
print(" ⚠️ Cloud LLM summary failed", file=sys.stderr)
|
|
979
|
+
return None, None, None, None, None
|
|
980
|
+
except ImportError:
|
|
981
|
+
logger.debug("Auth module not available, skipping cloud LLM")
|
|
982
|
+
|
|
983
|
+
# User not logged in, return None (local fallback disabled)
|
|
984
|
+
logger.warning("Not logged in, cannot use cloud LLM for summary")
|
|
985
|
+
print(" ⚠️ Please login with 'aline login' to use LLM features", file=sys.stderr)
|
|
986
|
+
return None, None, None, None, None
|
|
987
|
+
|
|
988
|
+
# =========================================================================
|
|
989
|
+
# LOCAL LLM FALLBACK DISABLED - Code kept for reference
|
|
990
|
+
# =========================================================================
|
|
991
|
+
# # Fall back to local LLM call
|
|
992
|
+
# # Try to load system prompt from default.md file
|
|
993
|
+
# def _get_commit_message_prompt() -> str:
|
|
994
|
+
# global _COMMIT_MESSAGE_PROMPT_CACHE
|
|
995
|
+
# if system_prompt is not None:
|
|
996
|
+
# return system_prompt
|
|
997
|
+
# if _COMMIT_MESSAGE_PROMPT_CACHE is not None:
|
|
998
|
+
# return _COMMIT_MESSAGE_PROMPT_CACHE
|
|
999
|
+
#
|
|
1000
|
+
# # Try user-customized prompt first (~/.aline/prompts/commit_message.md)
|
|
1001
|
+
# user_prompt_path = Path.home() / ".aline" / "prompts" / "commit_message.md"
|
|
1002
|
+
# try:
|
|
1003
|
+
# if user_prompt_path.exists():
|
|
1004
|
+
# text = user_prompt_path.read_text(encoding="utf-8").strip()
|
|
1005
|
+
# if text:
|
|
1006
|
+
# _COMMIT_MESSAGE_PROMPT_CACHE = text
|
|
1007
|
+
# logger.debug(
|
|
1008
|
+
# f"Loaded user-customized commit message prompt from {user_prompt_path}"
|
|
1009
|
+
# )
|
|
1010
|
+
# return text
|
|
1011
|
+
# except Exception:
|
|
1012
|
+
# logger.debug(
|
|
1013
|
+
# "Failed to load user-customized commit message prompt, falling back", exc_info=True
|
|
1014
|
+
# )
|
|
1015
|
+
#
|
|
1016
|
+
# # Fall back to built-in prompt (tools/commit_message_prompts/default.md)
|
|
1017
|
+
# candidate = (
|
|
1018
|
+
# Path(__file__).resolve().parents[2] / "tools" / "commit_message_prompts" / "default.md"
|
|
1019
|
+
# )
|
|
1020
|
+
# try:
|
|
1021
|
+
# text = candidate.read_text(encoding="utf-8").strip()
|
|
1022
|
+
# if text:
|
|
1023
|
+
# _COMMIT_MESSAGE_PROMPT_CACHE = text
|
|
1024
|
+
# logger.debug(f"Loaded commit message prompt from {candidate}")
|
|
1025
|
+
# return text
|
|
1026
|
+
# except Exception:
|
|
1027
|
+
# logger.debug("Falling back to built-in commit message prompt", exc_info=True)
|
|
1028
|
+
#
|
|
1029
|
+
# # Fallback to built-in prompt
|
|
1030
|
+
# default_system_prompt = """You are a git commit message generator for AI chat sessions.
|
|
1031
|
+
# You will receive content for ONE dialogue turn (user request, assistant recap, optional recent commit context).
|
|
1032
|
+
#
|
|
1033
|
+
# Guidelines:
|
|
1034
|
+
# - Prefer the assistant recap for factual details.
|
|
1035
|
+
# - If the assistant recap includes "Turn status: ...", mirror it exactly in the description's first line.
|
|
1036
|
+
# - Keep continuity with any provided recent commit context, but avoid repeating unchanged background.
|
|
1037
|
+
#
|
|
1038
|
+
# Return JSON with EXACTLY two fields:
|
|
1039
|
+
# {
|
|
1040
|
+
# "title": "One-line summary (imperative mood, 25-60 chars preferred, max 80).",
|
|
1041
|
+
# "description": "Status line + 3-7 concise bullets describing what changed in THIS turn."
|
|
1042
|
+
# }
|
|
1043
|
+
#
|
|
1044
|
+
# Rules for title:
|
|
1045
|
+
# - Imperative, concrete, no vague fillers like "Update session".
|
|
1046
|
+
# - Mention if the turn continues work (e.g., "Continue fixing ...") or is blocked.
|
|
1047
|
+
#
|
|
1048
|
+
# Rules for description:
|
|
1049
|
+
# - First line MUST be "Status: <completed|user_interrupted|rate_limited|compacted|unknown>".
|
|
1050
|
+
# - Follow with short "- " bullets explaining WHAT changed and WHY it matters.
|
|
1051
|
+
# - Include concrete technical anchors (files, functions) when available.
|
|
1052
|
+
# - If continuing prior work, dedicate one bullet to explain the relationship.
|
|
1053
|
+
#
|
|
1054
|
+
# Respond with JSON only."""
|
|
1055
|
+
# _COMMIT_MESSAGE_PROMPT_CACHE = default_system_prompt
|
|
1056
|
+
# return _COMMIT_MESSAGE_PROMPT_CACHE
|
|
1057
|
+
#
|
|
1058
|
+
# system_prompt_to_use = _get_commit_message_prompt()
|
|
1059
|
+
#
|
|
1060
|
+
# user_prompt_parts = ["Summarize this AI chat session:\n"]
|
|
1061
|
+
# if user_messages:
|
|
1062
|
+
# user_prompt_parts.append(f"User requests:\n{user_messages[:4000]}\n")
|
|
1063
|
+
# if assistant_replies:
|
|
1064
|
+
# user_prompt_parts.append(f"Assistant recap / responses:\n{assistant_replies[:8000]}\n")
|
|
1065
|
+
# # Note: code_changes are excluded from LLM input per user preference
|
|
1066
|
+
# # if code_changes:
|
|
1067
|
+
# # user_prompt_parts.append(f"Code changes:\n{code_changes[:4000]}\n")
|
|
1068
|
+
# # The output format instruction is now in the system prompt (default.md)
|
|
1069
|
+
# # user_prompt_parts.append("\nReturn JSON with exactly two fields: title and description. No other text.")
|
|
1070
|
+
# user_prompt = "\n".join(user_prompt_parts)
|
|
1071
|
+
#
|
|
1072
|
+
# model_name, response_text = _invoke_llm(
|
|
1073
|
+
# provider=provider,
|
|
1074
|
+
# system_prompt=system_prompt_to_use,
|
|
1075
|
+
# user_prompt=user_prompt,
|
|
1076
|
+
# debug_callback=debug_callback,
|
|
1077
|
+
# purpose="summary",
|
|
1078
|
+
# )
|
|
1079
|
+
# if not response_text:
|
|
1080
|
+
# return None, None, None, None, None
|
|
1081
|
+
#
|
|
1082
|
+
# try:
|
|
1083
|
+
# summary_data = _extract_json_object(response_text)
|
|
1084
|
+
# # New format: single "record" field instead of title+description
|
|
1085
|
+
# record = (summary_data.get("record") or "").strip()
|
|
1086
|
+
#
|
|
1087
|
+
# # Fallback to old format for backwards compatibility
|
|
1088
|
+
# if not record:
|
|
1089
|
+
# title = (summary_data.get("title") or "").strip()
|
|
1090
|
+
# description = summary_data.get("description") or ""
|
|
1091
|
+
# if title:
|
|
1092
|
+
# record = title # Use title as record if present
|
|
1093
|
+
# else:
|
|
1094
|
+
# title = record # Use record as title
|
|
1095
|
+
# description = ""
|
|
1096
|
+
#
|
|
1097
|
+
# if not record or len(record) < 2:
|
|
1098
|
+
# raise json.JSONDecodeError("Record validation failed", response_text, 0)
|
|
1099
|
+
# except json.JSONDecodeError as exc:
|
|
1100
|
+
# # Construct detailed error information for debugging
|
|
1101
|
+
# error_type = type(exc).__name__
|
|
1102
|
+
# error_msg = str(exc)
|
|
1103
|
+
#
|
|
1104
|
+
# logger.warning("Failed to parse JSON from LLM summary: %s", exc)
|
|
1105
|
+
# logger.debug("Raw summary response: %s", response_text)
|
|
1106
|
+
#
|
|
1107
|
+
# # Try to extract partial information from the broken JSON
|
|
1108
|
+
# import re
|
|
1109
|
+
#
|
|
1110
|
+
# record_match = re.search(r'"(?:record|title)"\s*:\s*"([^"]{10,})"', response_text)
|
|
1111
|
+
# extracted_content = record_match.group(1)[:80] if record_match else None
|
|
1112
|
+
#
|
|
1113
|
+
# # Construct informative error title and description
|
|
1114
|
+
# if "control character" in error_msg.lower():
|
|
1115
|
+
# error_title = "⚠ JSON Parse Error: Invalid control character"
|
|
1116
|
+
# error_detail = f"LLM response contained unescaped control characters. Error at {error_msg.split('at:')[-1].strip() if 'at:' in error_msg else 'unknown position'}"
|
|
1117
|
+
# elif "expecting" in error_msg.lower():
|
|
1118
|
+
# error_title = "⚠ JSON Parse Error: Malformed JSON"
|
|
1119
|
+
# error_detail = f"LLM response had invalid JSON syntax: {error_msg[:100]}"
|
|
1120
|
+
# elif "Record validation failed" in error_msg:
|
|
1121
|
+
# error_title = "⚠ LLM Error: Empty or invalid record"
|
|
1122
|
+
# error_detail = "LLM returned JSON but record/title field was empty or too short"
|
|
1123
|
+
# else:
|
|
1124
|
+
# error_title = f"⚠ JSON Parse Error: {error_type}"
|
|
1125
|
+
# error_detail = f"Failed to parse LLM response: {error_msg[:200]}"
|
|
1126
|
+
#
|
|
1127
|
+
# # Add extracted content if available
|
|
1128
|
+
# if extracted_content:
|
|
1129
|
+
# error_detail += f"\n\nPartial content extracted: {extracted_content}..."
|
|
1130
|
+
#
|
|
1131
|
+
# # Add response preview for debugging
|
|
1132
|
+
# response_preview = (
|
|
1133
|
+
# response_text[:200].replace("\n", "\\n")
|
|
1134
|
+
# if len(response_text) > 200
|
|
1135
|
+
# else response_text.replace("\n", "\\n")
|
|
1136
|
+
# )
|
|
1137
|
+
# error_detail += f"\n\nResponse preview: {response_preview}"
|
|
1138
|
+
# if len(response_text) > 200:
|
|
1139
|
+
# error_detail += "..."
|
|
1140
|
+
#
|
|
1141
|
+
# # Print to stderr for immediate visibility
|
|
1142
|
+
# print(f" ⚠️ {error_title}", file=sys.stderr)
|
|
1143
|
+
# print(f" ⚠️ {error_detail.split(chr(10))[0]}", file=sys.stderr)
|
|
1144
|
+
#
|
|
1145
|
+
# # Try simple fallback: use first non-JSON line as title
|
|
1146
|
+
# first_line = response_text.split("\n")[0][:150].strip()
|
|
1147
|
+
# if first_line and len(first_line) >= 2 and not first_line.startswith("{"):
|
|
1148
|
+
# print(" ⚠️ Using first line as fallback title", file=sys.stderr)
|
|
1149
|
+
# return first_line, model_name, error_detail, "no", "fine"
|
|
1150
|
+
#
|
|
1151
|
+
# # Return structured error information instead of None
|
|
1152
|
+
# logger.error("JSON parse error with no fallback: %s", error_title)
|
|
1153
|
+
# return error_title, model_name, error_detail, "no", "fine"
|
|
1154
|
+
#
|
|
1155
|
+
# logger.info("LLM summary response: %s", json.dumps(summary_data, ensure_ascii=False))
|
|
1156
|
+
# if model_name:
|
|
1157
|
+
# print(f" ✅ LLM summary successful ({model_name})", file=sys.stderr)
|
|
1158
|
+
# else:
|
|
1159
|
+
# print(" ✅ LLM summary successful", file=sys.stderr)
|
|
1160
|
+
#
|
|
1161
|
+
# if_last_task, satisfaction = _classify_task_metadata(
|
|
1162
|
+
# provider=provider,
|
|
1163
|
+
# user_messages=user_messages,
|
|
1164
|
+
# assistant_replies=assistant_replies,
|
|
1165
|
+
# code_changes=code_changes,
|
|
1166
|
+
# summary_title=title,
|
|
1167
|
+
# summary_description=description,
|
|
1168
|
+
# debug_callback=debug_callback,
|
|
1169
|
+
# system_prompt=metadata_system_prompt,
|
|
1170
|
+
# previous_commit_title=previous_commit_title,
|
|
1171
|
+
# )
|
|
1172
|
+
#
|
|
1173
|
+
# # Return record as title, keep description for backwards compatibility
|
|
1174
|
+
# return title or record, model_name, description, if_last_task, satisfaction
|
|
1353
1175
|
|
|
1354
1176
|
|
|
1355
1177
|
def generate_session_filename(user: str, agent: str = "claude") -> str:
|
|
@@ -1753,19 +1575,6 @@ def generate_summary_with_llm_from_turn_context(
|
|
|
1753
1575
|
Returns:
|
|
1754
1576
|
Tuple of (title, model_name, description, if_last_task, satisfaction)
|
|
1755
1577
|
"""
|
|
1756
|
-
# Check for Antigravity Markdown format first (before JSONL processing)
|
|
1757
|
-
if full_turn_content and _is_antigravity_content(full_turn_content):
|
|
1758
|
-
return _generate_antigravity_summary(
|
|
1759
|
-
full_content=full_turn_content,
|
|
1760
|
-
turn_status=turn_status,
|
|
1761
|
-
provider=provider,
|
|
1762
|
-
system_prompt=system_prompt,
|
|
1763
|
-
debug_callback=debug_callback,
|
|
1764
|
-
metadata_system_prompt=metadata_system_prompt,
|
|
1765
|
-
previous_commit_title=previous_commit_title,
|
|
1766
|
-
previous_records=previous_records,
|
|
1767
|
-
)
|
|
1768
|
-
|
|
1769
1578
|
# If full turn content is provided, extract ALL assistant messages
|
|
1770
1579
|
# (thinking + text responses) and combine with the final conclusion
|
|
1771
1580
|
if full_turn_content:
|