aline-ai 0.6.2__py3-none-any.whl → 0.6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -524,7 +524,6 @@ def watcher_status_command(verbose: bool = False) -> int:
524
524
  "claude": "Claude Code",
525
525
  "codex": "Codex",
526
526
  "gemini": "Gemini",
527
- "antigravity": "Antigravity",
528
527
  }
529
528
  source = source_map.get(session_type, session_type)
530
529
 
@@ -583,7 +582,6 @@ def watcher_status_command(verbose: bool = False) -> int:
583
582
  "claude": "Claude Code",
584
583
  "codex": "Codex",
585
584
  "gemini": "Gemini",
586
- "antigravity": "Antigravity",
587
585
  }
588
586
  source = source_map.get(session_type, session_type)
589
587
 
@@ -1069,7 +1067,6 @@ def _get_session_tracking_status_batch(
1069
1067
  "claude": registry.get_adapter("claude"),
1070
1068
  "codex": registry.get_adapter("codex"),
1071
1069
  "gemini": registry.get_adapter("gemini"),
1072
- "antigravity": registry.get_adapter("antigravity"),
1073
1070
  }
1074
1071
 
1075
1072
  def _infer_adapter_name(session_file: Path) -> str:
@@ -1081,9 +1078,6 @@ def _get_session_tracking_status_batch(
1081
1078
  return "codex"
1082
1079
  if ".gemini" in parts:
1083
1080
  return "gemini"
1084
- # Antigravity "sessions" may be directories with markdown artifacts.
1085
- if session_file.is_dir():
1086
- return "antigravity"
1087
1081
  return "unknown"
1088
1082
 
1089
1083
  session_infos = []
@@ -1258,7 +1252,7 @@ def _get_session_tracking_status(session_file: Path, config: ReAlignConfig, db=N
1258
1252
  - committed_turns: int
1259
1253
  - total_turns: int
1260
1254
  - session_id: str
1261
- - source: str (claude/codex/gemini/antigravity)
1255
+ - source: str (claude/codex/gemini)
1262
1256
  - project_name: str | None
1263
1257
  - last_activity: datetime
1264
1258
  - session_file: Path
@@ -1653,16 +1647,9 @@ def watcher_session_list_command(
1653
1647
  ]
1654
1648
 
1655
1649
  if detect_turns:
1656
- # Handle Antigravity sessions using timestamps as turn counts
1657
1650
  total_turns = info.get("total_turns") or 0
1658
1651
  committed_turns = info.get("committed_turns") or 0
1659
- if info["source"] == "antigravity" or total_turns > 1000000000:
1660
- # Normalize display for timestamp-based tracking
1661
- norm_total = 1
1662
- norm_committed = 1 if committed_turns > 0 else 0
1663
- turns_str = f"{norm_committed}/{norm_total}"
1664
- else:
1665
- turns_str = f"{committed_turns}/{total_turns}"
1652
+ turns_str = f"{committed_turns}/{total_turns}"
1666
1653
  row_data.append(turns_str)
1667
1654
 
1668
1655
  row_data.extend(
@@ -2212,7 +2199,6 @@ def watcher_event_list_command(
2212
2199
  def watcher_event_revise_slack_command(
2213
2200
  input_json: dict,
2214
2201
  instruction: str,
2215
- provider: str = "auto",
2216
2202
  json_output: bool = False,
2217
2203
  ) -> int:
2218
2204
  """
@@ -2227,7 +2213,6 @@ def watcher_event_revise_slack_command(
2227
2213
  - slack_message: Current Slack message
2228
2214
  - password: Optional password
2229
2215
  instruction: User's revision instructions
2230
- provider: LLM provider (auto, claude, openai)
2231
2216
  json_output: If True, output JSON (same format as input)
2232
2217
 
2233
2218
  Returns:
@@ -2237,7 +2222,7 @@ def watcher_event_revise_slack_command(
2237
2222
 
2238
2223
  try:
2239
2224
  from ..db import get_database
2240
- from ..hooks import _invoke_llm, _extract_json_object
2225
+ from ..llm_client import call_llm_cloud
2241
2226
 
2242
2227
  # Extract data from input JSON
2243
2228
  event_id = input_json.get("event_id")
@@ -2258,24 +2243,14 @@ def watcher_event_revise_slack_command(
2258
2243
  console.print("[red]Error: Missing 'slack_message' in input JSON[/red]")
2259
2244
  return 1
2260
2245
 
2261
- # Load prompt template
2246
+ # Load optional custom prompt
2247
+ custom_prompt = None
2262
2248
  prompt_path = Path.home() / ".aline" / "prompts" / "slack_share_revise.md"
2263
- if not prompt_path.exists():
2264
- # Fallback to example file
2265
- prompt_path = Path.home() / ".aline" / "prompts" / "slack_share_revise.md.example"
2266
-
2267
- if not prompt_path.exists():
2268
- if not json_output:
2269
- console.print(f"[red]Prompt file not found: {prompt_path}[/red]")
2270
- console.print("[dim]Run 'aline init' to initialize prompts[/dim]")
2271
- return 1
2272
-
2273
2249
  try:
2274
- system_prompt = prompt_path.read_text(encoding="utf-8")
2275
- except Exception as e:
2276
- if not json_output:
2277
- console.print(f"[red]Failed to read prompt file: {e}[/red]")
2278
- return 1
2250
+ if prompt_path.exists():
2251
+ custom_prompt = prompt_path.read_text(encoding="utf-8").strip()
2252
+ except Exception:
2253
+ pass
2279
2254
 
2280
2255
  # Build event context from input JSON
2281
2256
  context_parts = []
@@ -2286,35 +2261,21 @@ def watcher_event_revise_slack_command(
2286
2261
 
2287
2262
  event_context = "\n".join(context_parts) if context_parts else "No additional context"
2288
2263
 
2289
- # Construct user prompt
2290
- user_prompt = f"""**Original Event Context:**
2291
- ```
2292
- {event_context}
2293
- ```
2294
-
2295
- **Previous Message:**
2296
- ```
2297
- {slack_message}
2298
- ```
2299
-
2300
- **Revision Request:**
2301
- ```
2302
- {instruction}
2303
- ```
2304
- """
2305
-
2306
- # Call LLM to revise the message
2264
+ # Call cloud LLM to revise the message
2307
2265
  if not json_output:
2308
2266
  console.print(f"→ Revising Slack message for event: [cyan]{event_title}[/cyan]")
2309
- logger.info(f"Calling LLM to revise Slack message for event {event_id}")
2267
+ logger.info(f"Calling cloud LLM to revise Slack message for event {event_id}")
2310
2268
 
2311
2269
  try:
2312
- model_name, response_text = _invoke_llm(
2313
- provider=provider,
2314
- system_prompt=system_prompt,
2315
- user_prompt=user_prompt,
2316
- purpose="revise_slack_message",
2317
- silent=True, # Always silent - suppress LLM provider messages
2270
+ model_name, result = call_llm_cloud(
2271
+ task="revise_slack_message",
2272
+ payload={
2273
+ "event_context": event_context,
2274
+ "current_message": slack_message,
2275
+ "revision_instruction": instruction,
2276
+ },
2277
+ custom_prompt=custom_prompt,
2278
+ silent=True,
2318
2279
  )
2319
2280
  except Exception as e:
2320
2281
  if not json_output:
@@ -2322,27 +2283,18 @@ def watcher_event_revise_slack_command(
2322
2283
  logger.error(f"LLM invocation failed: {e}", exc_info=True)
2323
2284
  return 1
2324
2285
 
2325
- if not response_text:
2286
+ if not result:
2326
2287
  if not json_output:
2327
2288
  console.print("[red]LLM did not return a response[/red]")
2328
2289
  logger.error("LLM returned empty response")
2329
2290
  return 1
2330
2291
 
2331
- # Parse JSON response to extract the revised message
2332
- try:
2333
- parsed = _extract_json_object(response_text)
2334
- if isinstance(parsed, dict) and "message" in parsed:
2335
- revised_message = parsed["message"]
2336
- else:
2337
- # Fallback: use the raw response if JSON parsing fails or no message field
2338
- revised_message = response_text
2339
- logger.warning(
2340
- "Response did not contain expected 'message' field, using raw response"
2341
- )
2342
- except Exception as e:
2343
- # Fallback: use the raw response if JSON parsing fails
2344
- revised_message = response_text
2345
- logger.warning(f"Failed to parse JSON response: {e}, using raw response")
2292
+ revised_message = result.get("message", "")
2293
+ if not revised_message:
2294
+ if not json_output:
2295
+ console.print("[red]LLM response missing 'message' field[/red]")
2296
+ logger.error("LLM response missing 'message' field")
2297
+ return 1
2346
2298
 
2347
2299
  # Update the event in the database
2348
2300
  db = get_database()
@@ -3201,8 +3153,7 @@ def watcher_session_show_command(
3201
3153
  "claude": "Claude Code",
3202
3154
  "codex": "Codex",
3203
3155
  "gemini": "Gemini",
3204
- "antigravity": "Antigravity",
3205
- }
3156
+ }
3206
3157
  source = source_map.get(info["source"], info["source"])
3207
3158
 
3208
3159
  # Build turns data
@@ -3301,7 +3252,6 @@ def watcher_session_show_command(
3301
3252
  "claude": "Claude Code",
3302
3253
  "codex": "Codex",
3303
3254
  "gemini": "Gemini",
3304
- "antigravity": "Antigravity",
3305
3255
  }
3306
3256
  source = source_map.get(info["source"], info["source"])
3307
3257
 
@@ -3662,7 +3612,6 @@ def watcher_session_delete_command(
3662
3612
  "claude": "Claude Code",
3663
3613
  "codex": "Codex",
3664
3614
  "gemini": "Gemini",
3665
- "antigravity": "Antigravity",
3666
3615
  }
3667
3616
  source = source_map.get(info.get("source", ""), info.get("source", "unknown"))
3668
3617
 
realign/config.py CHANGED
@@ -20,7 +20,6 @@ class ReAlignConfig:
20
20
  auto_detect_claude: bool = True # Enable Claude Code session auto-detection
21
21
  auto_detect_codex: bool = True # Enable Codex session auto-detection
22
22
  auto_detect_gemini: bool = True # Enable Gemini CLI session auto-detection
23
- auto_detect_antigravity: bool = False # Enable Antigravity IDE brain artifact monitoring
24
23
  mcp_auto_commit: bool = True # Enable watcher auto-commit after each user request completes
25
24
  enable_temp_turn_titles: bool = True # Generate temporary turn titles on user prompt submit
26
25
  share_backend_url: str = (
@@ -34,17 +33,6 @@ class ReAlignConfig:
34
33
  # Session catch-up settings
35
34
  max_catchup_sessions: int = 3 # Max sessions to auto-import on watcher startup
36
35
 
37
- # LLM API Keys
38
- anthropic_api_key: Optional[str] = None # Anthropic API key (set in config, not environment)
39
- openai_api_key: Optional[str] = None # OpenAI API key (set in config, not environment)
40
-
41
- # LLM Model Configuration
42
- llm_anthropic_model: str = "claude-3-5-haiku-20241022" # Claude model to use
43
- llm_openai_model: str = "gpt-4o-mini" # OpenAI model to use
44
- llm_openai_use_responses: bool = False # Use OpenAI Responses API for reasoning models
45
- llm_max_tokens: int = 1000 # Default max tokens
46
- llm_temperature: float = 0.0 # Default temperature (0.0 = deterministic)
47
-
48
36
  @classmethod
49
37
  def load(cls, config_path: Optional[Path] = None) -> "ReAlignConfig":
50
38
  """Load configuration from file with environment variable overrides."""
@@ -87,38 +75,26 @@ class ReAlignConfig:
87
75
  "auto_detect_claude": os.getenv("REALIGN_AUTO_DETECT_CLAUDE"),
88
76
  "auto_detect_codex": os.getenv("REALIGN_AUTO_DETECT_CODEX"),
89
77
  "auto_detect_gemini": os.getenv("REALIGN_AUTO_DETECT_GEMINI"),
90
- "auto_detect_antigravity": os.getenv("REALIGN_AUTO_DETECT_ANTIGRAVITY"),
91
78
  "mcp_auto_commit": os.getenv("REALIGN_MCP_AUTO_COMMIT"),
92
79
  "enable_temp_turn_titles": os.getenv("REALIGN_ENABLE_TEMP_TURN_TITLES"),
93
80
  "share_backend_url": os.getenv("REALIGN_SHARE_BACKEND_URL"),
94
81
  "user_name": os.getenv("REALIGN_USER_NAME"),
95
82
  "uid": os.getenv("REALIGN_UID"),
96
83
  "max_catchup_sessions": os.getenv("REALIGN_MAX_CATCHUP_SESSIONS"),
97
- "anthropic_api_key": os.getenv("REALIGN_ANTHROPIC_API_KEY"),
98
- "openai_api_key": os.getenv("REALIGN_OPENAI_API_KEY"),
99
- "llm_anthropic_model": os.getenv("REALIGN_ANTHROPIC_MODEL"),
100
- "llm_openai_model": os.getenv("REALIGN_OPENAI_MODEL"),
101
- "llm_openai_use_responses": os.getenv("REALIGN_OPENAI_USE_RESPONSES"),
102
- "llm_max_tokens": os.getenv("REALIGN_LLM_MAX_TOKENS"),
103
- "llm_temperature": os.getenv("REALIGN_LLM_TEMPERATURE"),
104
84
  }
105
85
 
106
86
  for key, value in env_overrides.items():
107
87
  if value is not None:
108
- if key in ["summary_max_chars", "max_catchup_sessions", "llm_max_tokens"]:
88
+ if key in ["summary_max_chars", "max_catchup_sessions"]:
109
89
  config_dict[key] = int(value)
110
- elif key in ["llm_temperature"]:
111
- config_dict[key] = float(value)
112
90
  elif key in [
113
91
  "redact_on_match",
114
92
  "use_LLM",
115
93
  "auto_detect_claude",
116
94
  "auto_detect_codex",
117
95
  "auto_detect_gemini",
118
- "auto_detect_antigravity",
119
96
  "mcp_auto_commit",
120
97
  "enable_temp_turn_titles",
121
- "llm_openai_use_responses",
122
98
  ]:
123
99
  config_dict[key] = value.lower() in ("true", "1", "yes")
124
100
  else:
@@ -150,20 +126,12 @@ class ReAlignConfig:
150
126
  "auto_detect_claude": self.auto_detect_claude,
151
127
  "auto_detect_codex": self.auto_detect_codex,
152
128
  "auto_detect_gemini": self.auto_detect_gemini,
153
- "auto_detect_antigravity": self.auto_detect_antigravity,
154
129
  "mcp_auto_commit": self.mcp_auto_commit,
155
130
  "enable_temp_turn_titles": self.enable_temp_turn_titles,
156
131
  "share_backend_url": self.share_backend_url,
157
132
  "user_name": self.user_name,
158
133
  "uid": self.uid,
159
134
  "max_catchup_sessions": self.max_catchup_sessions,
160
- "anthropic_api_key": self.anthropic_api_key,
161
- "openai_api_key": self.openai_api_key,
162
- "llm_anthropic_model": self.llm_anthropic_model,
163
- "llm_openai_model": self.llm_openai_model,
164
- "llm_openai_use_responses": self.llm_openai_use_responses,
165
- "llm_max_tokens": self.llm_max_tokens,
166
- "llm_temperature": self.llm_temperature,
167
135
  }
168
136
 
169
137
  with open(config_path, "w", encoding="utf-8") as f:
@@ -200,7 +168,6 @@ use_LLM: true # Whether to use a cloud LLM to generate summar
200
168
  llm_provider: "auto" # LLM provider: "auto" (try Claude then OpenAI), "claude", or "openai"
201
169
  auto_detect_claude: true # Automatically detect Claude Code session directory (~/.claude/projects/)
202
170
  auto_detect_codex: true # Automatically detect Codex session files (~/.codex/sessions/)
203
- auto_detect_antigravity: false # Automatically detect Antigravity IDE brain artifacts (~/.gemini/antigravity/brain/)
204
171
  mcp_auto_commit: true # Enable watcher to auto-commit after each user request completes
205
172
  enable_temp_turn_titles: true # Generate temporary turn titles on user prompt submit
206
173
  share_backend_url: "https://realign-server.vercel.app" # Backend URL for interactive share export
@@ -211,19 +178,6 @@ max_catchup_sessions: 3 # Max sessions to auto-import on watcher
211
178
  # Use 'aline watcher session list' to see all sessions
212
179
  # Use 'aline watcher session import <id>' to import specific sessions
213
180
 
214
- # LLM API Keys (configured in this file only, NOT from environment variables):
215
- # anthropic_api_key: "your-anthropic-api-key" # For Claude (Anthropic)
216
- # openai_api_key: "your-openai-api-key" # For OpenAI (GPT)
217
- # Note: API keys are read ONLY from this config file, not from system environment variables
218
- # Alternative: Use REALIGN_ANTHROPIC_API_KEY or REALIGN_OPENAI_API_KEY env vars to override
219
-
220
- # LLM Model Configuration:
221
- llm_anthropic_model: "claude-3-5-haiku-20241022" # Claude model to use
222
- llm_openai_model: "gpt-4o-mini" # OpenAI model to use
223
- llm_openai_use_responses: false # Use OpenAI Responses API for reasoning models (GPT-5+)
224
- llm_max_tokens: 1000 # Default max tokens for LLM responses
225
- llm_temperature: 0.0 # Default temperature (0.0 = deterministic, 1.0 = creative)
226
-
227
181
  # Secret Detection & Redaction:
228
182
  # ReAlign can use detect-secrets to automatically scan for and redact:
229
183
  # - API keys, tokens, passwords
realign/dashboard/app.py CHANGED
@@ -242,10 +242,7 @@ class AlineDashboard(App):
242
242
  self.query_one(WatcherPanel).action_next_page()
243
243
  elif active_tab_id == "worker":
244
244
  self.query_one(WorkerPanel).action_next_page()
245
- elif active_tab_id == "sessions":
246
- self.query_one(SessionsTable).action_next_page()
247
- elif active_tab_id == "events":
248
- self.query_one(EventsTable).action_next_page()
245
+ # sessions and events tabs use scrolling instead of pagination
249
246
 
250
247
  def action_page_prev(self) -> None:
251
248
  """Go to previous page in current panel."""
@@ -256,10 +253,7 @@ class AlineDashboard(App):
256
253
  self.query_one(WatcherPanel).action_prev_page()
257
254
  elif active_tab_id == "worker":
258
255
  self.query_one(WorkerPanel).action_prev_page()
259
- elif active_tab_id == "sessions":
260
- self.query_one(SessionsTable).action_prev_page()
261
- elif active_tab_id == "events":
262
- self.query_one(EventsTable).action_prev_page()
256
+ # sessions and events tabs use scrolling instead of pagination
263
257
 
264
258
  def action_switch_view(self) -> None:
265
259
  """Switch view in current panel (if supported)."""
@@ -364,7 +364,6 @@ class EventDetailScreen(ModalScreen):
364
364
  "claude": "Claude",
365
365
  "codex": "Codex",
366
366
  "gemini": "Gemini",
367
- "antigravity": "Antigravity",
368
367
  }
369
368
  source = source_map.get(session_type, session_type)
370
369
  project = workspace.split("/")[-1] if workspace else "-"
@@ -398,7 +397,6 @@ class EventDetailScreen(ModalScreen):
398
397
  "claude": "Claude",
399
398
  "codex": "Codex",
400
399
  "gemini": "Gemini",
401
- "antigravity": "Antigravity",
402
400
  }
403
401
  source = source_map.get(session_type, session_type)
404
402
  project = str(workspace).split("/")[-1] if workspace else "-"
@@ -533,7 +531,6 @@ class EventDetailScreen(ModalScreen):
533
531
  "claude": "Claude",
534
532
  "codex": "Codex",
535
533
  "gemini": "Gemini",
536
- "antigravity": "Antigravity",
537
534
  }
538
535
  source = source_map.get(
539
536
  record_type or "", record_type or session.get("source") or "unknown"
@@ -171,7 +171,6 @@ class SessionDetailScreen(ModalScreen):
171
171
  "claude": "Claude",
172
172
  "codex": "Codex",
173
173
  "gemini": "Gemini",
174
- "antigravity": "Antigravity",
175
174
  }
176
175
  source = source_map.get(session_type or "", session_type or "unknown")
177
176