aline-ai 0.6.2__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/METADATA +1 -1
  2. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/RECORD +38 -37
  3. realign/__init__.py +1 -1
  4. realign/adapters/__init__.py +0 -3
  5. realign/adapters/codex.py +14 -9
  6. realign/cli.py +42 -236
  7. realign/codex_detector.py +72 -32
  8. realign/codex_home.py +85 -0
  9. realign/codex_terminal_linker.py +172 -0
  10. realign/commands/__init__.py +2 -2
  11. realign/commands/add.py +89 -9
  12. realign/commands/doctor.py +495 -0
  13. realign/commands/export_shares.py +154 -226
  14. realign/commands/init.py +66 -4
  15. realign/commands/watcher.py +30 -80
  16. realign/config.py +9 -46
  17. realign/dashboard/app.py +7 -11
  18. realign/dashboard/screens/event_detail.py +0 -3
  19. realign/dashboard/screens/session_detail.py +0 -1
  20. realign/dashboard/tmux_manager.py +129 -4
  21. realign/dashboard/widgets/config_panel.py +175 -241
  22. realign/dashboard/widgets/events_table.py +71 -128
  23. realign/dashboard/widgets/sessions_table.py +77 -136
  24. realign/dashboard/widgets/terminal_panel.py +349 -27
  25. realign/dashboard/widgets/watcher_panel.py +0 -2
  26. realign/db/sqlite_db.py +77 -2
  27. realign/events/event_summarizer.py +76 -35
  28. realign/events/session_summarizer.py +73 -32
  29. realign/hooks.py +334 -647
  30. realign/llm_client.py +201 -520
  31. realign/triggers/__init__.py +0 -2
  32. realign/triggers/next_turn_trigger.py +4 -5
  33. realign/triggers/registry.py +1 -4
  34. realign/watcher_core.py +53 -35
  35. realign/adapters/antigravity.py +0 -159
  36. realign/triggers/antigravity_trigger.py +0 -140
  37. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/WHEEL +0 -0
  38. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/entry_points.txt +0 -0
  39. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/licenses/LICENSE +0 -0
  40. {aline_ai-0.6.2.dist-info → aline_ai-0.6.4.dist-info}/top_level.txt +0 -0
@@ -24,7 +24,7 @@ from typing import Any, List, Dict, Optional, Tuple, Set, Callable
24
24
 
25
25
  from ..logging_config import setup_logger
26
26
  from ..db.base import SessionRecord, TurnRecord
27
- from ..llm_client import call_llm, extract_json
27
+ from ..llm_client import extract_json, call_llm_cloud
28
28
  from ..auth import get_auth_headers, is_logged_in
29
29
 
30
30
  logger = setup_logger("realign.commands.export_shares", "export_shares.log")
@@ -2160,10 +2160,10 @@ def generate_ui_metadata_with_llm(
2160
2160
  user_requests.append(cleaned_request)
2161
2161
 
2162
2162
  # 如果没有 commit summary,回退到提取消息样本
2163
+ user_messages = []
2164
+ assistant_messages = []
2163
2165
  if not commit_summaries:
2164
2166
  logger.warning("No commit summaries found, falling back to message samples")
2165
- user_messages = []
2166
- assistant_messages = []
2167
2167
 
2168
2168
  for session in sessions[:5]: # 只看前5个session
2169
2169
  for msg in session.get("messages", [])[:10]: # 每个session前10条消息
@@ -2176,238 +2176,166 @@ def generate_ui_metadata_with_llm(
2176
2176
  elif msg.get("role") == "assistant":
2177
2177
  assistant_messages.append(cleaned_content)
2178
2178
 
2179
- # 根据 preset_id 定制 system_prompt
2180
- preset_configs = {
2181
- "default": {
2182
- "role_description": "a general-purpose conversation assistant",
2183
- "title_style": "a neutral, descriptive summary of the topic",
2184
- "welcome_tone": "friendly and informative, with a brief overview of the conversation",
2185
- "description_focus": "what information can be found and how the assistant can help",
2186
- "question_angles": [
2187
- "high-level summary",
2188
- "technical or implementation details",
2189
- "decision-making or reasoning",
2190
- "results, impact, or follow-up",
2191
- ],
2192
- },
2193
- "work-report": {
2194
- "role_description": "a professional work report agent representing the user to colleagues/managers",
2195
- "title_style": "a professional, achievement-oriented summary (e.g., 'Progress on Project X', 'Completed Tasks for Week Y')",
2196
- "welcome_tone": "professional and confident, highlighting accomplishments and progress",
2197
- "description_focus": "what work was done, what value was created, and how the assistant represents the user's contributions",
2198
- "question_angles": [
2199
- "overall progress and achievements",
2200
- "technical solutions implemented",
2201
- "challenges overcome and decisions made",
2202
- "next steps and impact on project goals",
2203
- ],
2204
- },
2205
- "knowledge-agent": {
2206
- "role_description": "a knowledge-sharing agent representing the user's deep thinking as founder/architect/author",
2207
- "title_style": "a thought-provoking, conceptual title (e.g., 'Design Philosophy of Feature X', 'Architectural Decisions for System Y')",
2208
- "welcome_tone": "insightful and educational, emphasizing the thinking process and context behind decisions",
2209
- "description_focus": "the knowledge and insights shared, the reasoning behind decisions, and how the assistant helps others understand the user's thought process",
2210
- "question_angles": [
2211
- "core concepts and philosophy",
2212
- "design rationale and trade-offs",
2213
- "key insights and learning",
2214
- "practical implications and applications",
2215
- ],
2216
- },
2217
- "personality-analyzer": {
2218
- "role_description": "a personality analysis assistant that understands the user's characteristics based on conversation",
2219
- "title_style": "an analytical, personality-focused title (e.g., 'Minhao's Working Style Analysis', 'Communication Pattern Insights')",
2220
- "welcome_tone": "analytical yet friendly, introducing what aspects of personality can be explored",
2221
- "description_focus": "what personality traits, working styles, and communication patterns can be discovered from the conversation",
2222
- "question_angles": [
2223
- "overall personality traits and characteristics",
2224
- "working style and approach to problem-solving",
2225
- "communication patterns and preferences",
2226
- "strengths, growth areas, and unique qualities",
2227
- ],
2228
- },
2229
- }
2230
-
2231
- # Check for user-customized prompt first
2232
- custom_prompt = _get_share_ui_metadata_prompt_template()
2233
- if custom_prompt:
2234
- # Use custom prompt directly
2235
- system_prompt = custom_prompt
2236
- logger.info("Using user-customized share UI metadata prompt")
2237
- else:
2238
- # 获取 preset 配置,如果没有则使用默认
2239
- preset_config = preset_configs.get(preset_id, preset_configs["default"])
2240
-
2241
- # 构建 LLM prompt
2242
- system_prompt = f"""You are a conversation interface copy generator for {preset_config["role_description"]}.
2243
-
2244
- Your task is to analyze a given conversation history (with title and description provided) and generate
2245
- personalized content for sharing and exploring that conversation.
2246
-
2247
- The conversation title and description are already provided. You only need to generate:
2248
- 1. Four preset questions
2249
- 2. A Slack share message
2250
-
2251
- Return the result strictly in JSON format:
2252
-
2253
- {{
2254
- "preset_questions": [
2255
- "Question 1: About {preset_config["question_angles"][0]} (15–30 characters)",
2256
- "Question 2: About {preset_config["question_angles"][1]} (15–30 characters)",
2257
- "Question 3: About {preset_config["question_angles"][2]} (15–30 characters)",
2258
- "Question 4: About {preset_config["question_angles"][3]} (15–30 characters)"
2259
- ],
2260
-
2261
- "slack_message": "A friendly, casual Slack message sharing your work update with the team.
2262
-
2263
- Style Guidelines:
2264
- - Tone: Casual, friendly, conversational (like chatting with teammates)
2265
- - Length: 3-8 sentences depending on conversation complexity
2266
- - Emoji: Use 1-3 relevant emojis to add personality (don't overdo it)
2267
- - Technical depth: Mention key technologies but avoid unnecessary jargon
2268
- - Clarity: Team members should understand the impact without deep technical knowledge
2269
-
2270
- Content Structure:
2271
- 1. Start with context - Give an overview of what you've been working on
2272
- 2. Highlight accomplishments - What did you achieve? (use casual language)
2273
- 3. Mention key details - Important technical decisions or challenges overcome
2274
- 4. End with status/next steps - What's the current state or what's coming next?
2275
-
2276
- Example Good Messages:
2277
- - '🎉 Just wrapped up the session summary feature! Built a new command that automatically generates event summaries from recent commits - both short emotional messages and detailed reports. Had to fix a bunch of commit parsing quirks along the way, but it\\'s solid now. Next up: testing the UI expand/collapse to make sure everything feels smooth.'
2278
- - '🚀 Big update, team! Been cranking on several fronts: First, got the vertical split terminal feature live - you can now split, resize, and close terminal panes as needed. Also revamped the onboarding flow and share dialog to be more compact and user-friendly. Fixed a bunch of UI quirks along the way. Next up: making sure multi-event sharing works seamlessly! 💪'
2279
-
2280
- Bad Examples to Avoid:
2281
- - Too formal: 'I have successfully completed the implementation...'
2282
- - Too technical: 'Implemented event summarization via LLM-driven clustering...'"
2283
- }}
2284
-
2285
- Requirements:
2286
- 1. Preset questions must be based on the actual conversation content, concrete, and useful from the specified angles.
2287
- 2. The Slack message should be casual, friendly, and share your work progress like you're updating teammates.
2288
- 3. All text must be in English or Chinese, depending on the conversation language.
2289
- 4. Output JSON only. Do not include any additional explanation or text."""
2290
-
2291
- # 构建 user prompt - 优先使用 commit summaries
2292
- if commit_summaries:
2293
- user_prompt = f"""Analyze the following conversation and generate content (preset_questions, slack_message).
2294
-
2295
- Event Title (already provided, do not regenerate):
2296
- "{event_title}"
2297
-
2298
- Event Description (already provided, do not regenerate):
2299
- "{event_description}"
2300
-
2301
- Conversation Details:
2302
- - Number of sessions: {len(sessions)}
2303
- - Total messages: {total_messages}
2304
- - Number of commits included: {len(commit_summaries)}
2305
-
2306
- LLM summaries from each commit:
2307
- {chr(10).join(f"{i + 1}. {summary}" for i, summary in enumerate(commit_summaries))}
2308
-
2309
- User's main requests:
2310
- {chr(10).join(f"- {req}" for req in user_requests[:10]) if user_requests else "None"}
2311
-
2312
- Based on the event title, description, and conversation content above, generate:
2313
- 1. Four preset questions users might ask
2314
- 2. A Slack share message
2315
-
2316
- Return the content in JSON format."""
2317
- else:
2318
- # 回退到使用消息样本
2319
- user_prompt = f"""Analyze the following conversation and generate content (preset_questions, slack_message).
2320
-
2321
- Event Title (already provided, do not regenerate):
2322
- "{event_title}"
2323
-
2324
- Event Description (already provided, do not regenerate):
2325
- "{event_description}"
2326
-
2327
- Conversation Details:
2328
- - Number of sessions: {len(sessions)}
2329
- - Total messages: {total_messages}
2179
+ # Try cloud provider first if logged in
2180
+ if provider in ("auto", "cloud") and is_logged_in():
2181
+ logger.debug("Attempting cloud LLM for UI metadata generation")
2182
+ # Load user custom prompt if available
2183
+ custom_prompt = _get_share_ui_metadata_prompt_template()
2330
2184
 
2331
- User message samples:
2332
- {chr(10).join(user_messages[:10])}
2333
-
2334
- Assistant reply samples:
2335
- {chr(10).join(assistant_messages[:10])}
2336
-
2337
- Based on the event title, description, and conversation content above, generate:
2338
- 1. Four preset questions users might ask
2339
- 2. A Slack share message
2185
+ # Build payload
2186
+ cloud_payload = {
2187
+ "event_title": event_title,
2188
+ "event_description": event_description,
2189
+ "sessions_count": len(sessions),
2190
+ "total_messages": total_messages,
2191
+ }
2340
2192
 
2341
- Return the content in JSON format."""
2193
+ if commit_summaries:
2194
+ cloud_payload["commit_summaries"] = commit_summaries
2195
+ cloud_payload["user_requests"] = user_requests
2196
+ else:
2197
+ cloud_payload["user_messages"] = user_messages
2198
+ cloud_payload["assistant_messages"] = assistant_messages
2342
2199
 
2343
- # Use unified LLM client
2344
- try:
2345
- model_name, response_text = call_llm(
2346
- system_prompt=system_prompt,
2347
- user_prompt=user_prompt,
2348
- provider=provider,
2349
- max_tokens=1000,
2350
- temperature=0.7, # Higher temperature for creative UI text
2351
- purpose="ui_metadata",
2200
+ model_name, result = call_llm_cloud(
2201
+ task="ui_metadata",
2202
+ payload=cloud_payload,
2203
+ custom_prompt=custom_prompt,
2204
+ preset_id=preset_id,
2352
2205
  silent=silent,
2353
2206
  )
2354
2207
 
2355
- if not response_text:
2208
+ if result:
2209
+ # Build ui_metadata from result
2210
+ ui_metadata = {
2211
+ "title": event_title,
2212
+ "description": event_description,
2213
+ "welcome": "", # No longer generated by LLM
2214
+ "preset_questions": result.get("preset_questions", []),
2215
+ "slack_message": result.get("slack_message", ""),
2216
+ }
2217
+ debug_info = {
2218
+ "system_prompt": "(cloud)",
2219
+ "user_prompt": "(cloud)",
2220
+ "response_text": str(result),
2221
+ "provider": model_name or "cloud",
2222
+ }
2223
+ logger.info(f"Cloud LLM UI metadata generation success ({model_name})")
2224
+ return ui_metadata, debug_info
2225
+ else:
2226
+ # Cloud LLM failed, return None (local fallback disabled)
2227
+ logger.warning("Cloud LLM UI metadata failed")
2356
2228
  if not silent:
2357
- logger.warning("LLM returned empty response for UI metadata generation")
2358
- print(
2359
- " ⚠️ LLM returned empty response, using default UI text",
2360
- file=sys.stderr,
2361
- )
2229
+ print(" ⚠️ Cloud LLM UI metadata failed", file=sys.stderr)
2362
2230
  return None, None
2363
2231
 
2364
- # Parse JSON response
2365
- ui_metadata = extract_json(response_text)
2366
-
2367
- # Add event title, description, and empty welcome
2368
- ui_metadata["title"] = event_title
2369
- ui_metadata["description"] = event_description
2370
- ui_metadata["welcome"] = "" # No longer generated by LLM
2371
-
2372
- # Return with debug info
2373
- debug_info = {
2374
- "system_prompt": system_prompt,
2375
- "user_prompt": user_prompt,
2376
- "response_text": response_text,
2377
- "provider": model_name or provider,
2378
- }
2379
- return ui_metadata, debug_info
2380
-
2381
- except Exception as e:
2382
- logger.error(f"LLM UI metadata generation failed: {e}", exc_info=True)
2383
- if not silent:
2384
- print(
2385
- f" ⚠️ LLM generation failed: {e}, using default UI text",
2386
- file=sys.stderr,
2387
- )
2388
- return None, None
2389
-
2390
- # Parse JSON response
2391
- ui_metadata = extract_json(response_text)
2392
-
2393
- # Add event title, description, and empty welcome
2394
- ui_metadata["title"] = event_title
2395
- ui_metadata["description"] = event_description
2396
- ui_metadata["welcome"] = "" # No longer generated by LLM
2397
-
2398
- # Return with debug info
2399
- debug_info = {
2400
- "system_prompt": system_prompt,
2401
- "user_prompt": user_prompt,
2402
- "response_text": response_text,
2403
- "provider": model_name or provider,
2404
- }
2405
- return ui_metadata, debug_info
2406
-
2407
- except Exception as e:
2408
- logger.error(f"LLM UI metadata generation failed: {e}", exc_info=True)
2409
- print(f" ⚠️ LLM generation failed: {e}, using default UI text", file=sys.stderr)
2410
- return None, None
2232
+ # User not logged in, return None (local fallback disabled)
2233
+ logger.warning("Not logged in, cannot use cloud LLM for UI metadata")
2234
+ if not silent:
2235
+ print(" ⚠️ Please login with 'aline login' to use LLM features", file=sys.stderr)
2236
+ return None, None
2237
+
2238
+ # =========================================================================
2239
+ # LOCAL LLM FALLBACK DISABLED - Code kept for reference
2240
+ # =========================================================================
2241
+ # # 根据 preset_id 定制 system_prompt
2242
+ # preset_configs = {
2243
+ # "default": {
2244
+ # "role_description": "a general-purpose conversation assistant",
2245
+ # "title_style": "a neutral, descriptive summary of the topic",
2246
+ # "welcome_tone": "friendly and informative, with a brief overview of the conversation",
2247
+ # "description_focus": "what information can be found and how the assistant can help",
2248
+ # "question_angles": [
2249
+ # "high-level summary",
2250
+ # "technical or implementation details",
2251
+ # "decision-making or reasoning",
2252
+ # "results, impact, or follow-up",
2253
+ # ],
2254
+ # },
2255
+ # "work-report": {
2256
+ # "role_description": "a professional work report agent representing the user to colleagues/managers",
2257
+ # "title_style": "a professional, achievement-oriented summary",
2258
+ # "welcome_tone": "professional and confident, highlighting accomplishments and progress",
2259
+ # "description_focus": "what work was done, what value was created",
2260
+ # "question_angles": [
2261
+ # "overall progress and achievements",
2262
+ # "technical solutions implemented",
2263
+ # "challenges overcome and decisions made",
2264
+ # "next steps and impact on project goals",
2265
+ # ],
2266
+ # },
2267
+ # "knowledge-agent": {
2268
+ # "role_description": "a knowledge-sharing agent representing the user's deep thinking",
2269
+ # "title_style": "a thought-provoking, conceptual title",
2270
+ # "welcome_tone": "insightful and educational",
2271
+ # "description_focus": "the knowledge and insights shared",
2272
+ # "question_angles": [
2273
+ # "core concepts and philosophy",
2274
+ # "design rationale and trade-offs",
2275
+ # "key insights and learning",
2276
+ # "practical implications and applications",
2277
+ # ],
2278
+ # },
2279
+ # "personality-analyzer": {
2280
+ # "role_description": "a personality analysis assistant",
2281
+ # "title_style": "an analytical, personality-focused title",
2282
+ # "welcome_tone": "analytical yet friendly",
2283
+ # "description_focus": "personality traits, working styles, and communication patterns",
2284
+ # "question_angles": [
2285
+ # "overall personality traits and characteristics",
2286
+ # "working style and approach to problem-solving",
2287
+ # "communication patterns and preferences",
2288
+ # "strengths, growth areas, and unique qualities",
2289
+ # ],
2290
+ # },
2291
+ # }
2292
+ #
2293
+ # # Check for user-customized prompt first
2294
+ # custom_prompt = _get_share_ui_metadata_prompt_template()
2295
+ # if custom_prompt:
2296
+ # system_prompt = custom_prompt
2297
+ # logger.info("Using user-customized share UI metadata prompt")
2298
+ # else:
2299
+ # preset_config = preset_configs.get(preset_id, preset_configs["default"])
2300
+ # system_prompt = f"""You are a conversation interface copy generator for {preset_config["role_description"]}..."""
2301
+ #
2302
+ # # 构建 user prompt - 优先使用 commit summaries
2303
+ # if commit_summaries:
2304
+ # user_prompt = f"""Analyze the following conversation..."""
2305
+ # else:
2306
+ # user_prompt = f"""Analyze the following conversation..."""
2307
+ #
2308
+ # # Use unified LLM client
2309
+ # try:
2310
+ # model_name, response_text = call_llm(
2311
+ # system_prompt=system_prompt,
2312
+ # user_prompt=user_prompt,
2313
+ # provider=provider,
2314
+ # max_tokens=1000,
2315
+ # temperature=0.7,
2316
+ # purpose="ui_metadata",
2317
+ # silent=silent,
2318
+ # )
2319
+ #
2320
+ # if not response_text:
2321
+ # return None, None
2322
+ #
2323
+ # ui_metadata = extract_json(response_text)
2324
+ # ui_metadata["title"] = event_title
2325
+ # ui_metadata["description"] = event_description
2326
+ # ui_metadata["welcome"] = ""
2327
+ #
2328
+ # debug_info = {
2329
+ # "system_prompt": system_prompt,
2330
+ # "user_prompt": user_prompt,
2331
+ # "response_text": response_text,
2332
+ # "provider": model_name or provider,
2333
+ # }
2334
+ # return ui_metadata, debug_info
2335
+ #
2336
+ # except Exception as e:
2337
+ # logger.error(f"LLM UI metadata generation failed: {e}", exc_info=True)
2338
+ # return None, None
2411
2339
 
2412
2340
 
2413
2341
  def display_selection_statistics(
realign/commands/init.py CHANGED
@@ -1,6 +1,8 @@
1
1
  """ReAlign init command - Initialize ReAlign tracking system."""
2
2
 
3
- from typing import Dict, Any, Optional, Tuple
3
+ import shutil
4
+ import sys
5
+ from typing import Annotated, Any, Dict, Optional, Tuple
4
6
  from pathlib import Path
5
7
  import re
6
8
  import typer
@@ -706,9 +708,21 @@ def init_global(
706
708
 
707
709
 
708
710
  def init_command(
709
- force: bool = typer.Option(
710
- False, "--force", "-f", help="Overwrite global config with defaults"
711
- ),
711
+ force: bool = typer.Option(False, "--force", "-f", help="Overwrite global config with defaults"),
712
+ doctor: Annotated[
713
+ bool,
714
+ typer.Option(
715
+ "--doctor/--no-doctor",
716
+ help="Run 'aline doctor' after init (best for upgrades)",
717
+ ),
718
+ ] = False,
719
+ install_tmux: Annotated[
720
+ bool,
721
+ typer.Option(
722
+ "--install-tmux/--no-install-tmux",
723
+ help="Auto-install tmux via Homebrew if missing (macOS only)",
724
+ ),
725
+ ] = True,
712
726
  start_watcher: Optional[bool] = typer.Option(
713
727
  None,
714
728
  "--start-watcher/--no-start-watcher",
@@ -726,6 +740,54 @@ def init_command(
726
740
  force=force,
727
741
  )
728
742
 
743
+ # First-time UX: tmux is required for the default dashboard experience (tmux mode).
744
+ # Only attempt on macOS; on other platforms, leave it to user.
745
+ if (
746
+ result.get("success")
747
+ and install_tmux
748
+ and result.get("tmux_conf")
749
+ and sys.platform == "darwin"
750
+ and shutil.which("tmux") is None
751
+ ):
752
+ console.print("\n[bold]tmux not found. Installing via Homebrew...[/bold]")
753
+ try:
754
+ from . import add as add_cmd
755
+
756
+ rc = add_cmd.add_tmux_command(install_brew=True)
757
+ if rc != 0:
758
+ result["errors"] = (result.get("errors") or []) + [
759
+ "tmux install failed (required for the default tmux dashboard)",
760
+ "Tip: set ALINE_TERMINAL_MODE=native to run without tmux",
761
+ ]
762
+ except Exception as e:
763
+ result["errors"] = (result.get("errors") or []) + [
764
+ f"tmux install failed: {e}",
765
+ "Tip: set ALINE_TERMINAL_MODE=native to run without tmux",
766
+ ]
767
+
768
+ if doctor and result.get("success"):
769
+ # Run doctor in "safe" mode: restart only if already running, and keep init fast.
770
+ try:
771
+ from . import doctor as doctor_cmd
772
+
773
+ restart_daemons = start_watcher is not False
774
+ doctor_exit = doctor_cmd.run_doctor(
775
+ restart_daemons=restart_daemons,
776
+ start_if_not_running=False,
777
+ verbose=False,
778
+ clear_cache=False,
779
+ )
780
+ if doctor_exit != 0:
781
+ result["success"] = False
782
+ result["errors"] = (result.get("errors") or []) + [
783
+ "aline doctor failed (see output above)"
784
+ ]
785
+ result["message"] = f"{result.get('message', '').strip()} (doctor failed)".strip()
786
+ except Exception as e:
787
+ result["success"] = False
788
+ result["errors"] = (result.get("errors") or []) + [f"aline doctor failed: {e}"]
789
+ result["message"] = f"{result.get('message', '').strip()} (doctor failed)".strip()
790
+
729
791
  watcher_started: Optional[bool] = None
730
792
  watcher_start_exit: Optional[int] = None
731
793
  worker_started: Optional[bool] = None