lean-explore 0.2.2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lean_explore/__init__.py +14 -1
- lean_explore/api/__init__.py +12 -1
- lean_explore/api/client.py +60 -80
- lean_explore/cli/__init__.py +10 -1
- lean_explore/cli/data_commands.py +157 -479
- lean_explore/cli/display.py +171 -0
- lean_explore/cli/main.py +51 -608
- lean_explore/config.py +244 -0
- lean_explore/extract/__init__.py +5 -0
- lean_explore/extract/__main__.py +368 -0
- lean_explore/extract/doc_gen4.py +200 -0
- lean_explore/extract/doc_parser.py +499 -0
- lean_explore/extract/embeddings.py +371 -0
- lean_explore/extract/github.py +110 -0
- lean_explore/extract/index.py +317 -0
- lean_explore/extract/informalize.py +653 -0
- lean_explore/extract/package_config.py +59 -0
- lean_explore/extract/package_registry.py +45 -0
- lean_explore/extract/package_utils.py +105 -0
- lean_explore/extract/types.py +25 -0
- lean_explore/mcp/__init__.py +11 -1
- lean_explore/mcp/app.py +14 -46
- lean_explore/mcp/server.py +20 -35
- lean_explore/mcp/tools.py +70 -177
- lean_explore/models/__init__.py +9 -0
- lean_explore/models/search_db.py +76 -0
- lean_explore/models/search_types.py +53 -0
- lean_explore/search/__init__.py +32 -0
- lean_explore/search/engine.py +655 -0
- lean_explore/search/scoring.py +156 -0
- lean_explore/search/service.py +68 -0
- lean_explore/search/tokenization.py +71 -0
- lean_explore/util/__init__.py +28 -0
- lean_explore/util/embedding_client.py +92 -0
- lean_explore/util/logging.py +22 -0
- lean_explore/util/openrouter_client.py +63 -0
- lean_explore/util/reranker_client.py +189 -0
- {lean_explore-0.2.2.dist-info → lean_explore-1.0.0.dist-info}/METADATA +55 -10
- lean_explore-1.0.0.dist-info/RECORD +43 -0
- {lean_explore-0.2.2.dist-info → lean_explore-1.0.0.dist-info}/WHEEL +1 -1
- lean_explore-1.0.0.dist-info/entry_points.txt +2 -0
- lean_explore/cli/agent.py +0 -781
- lean_explore/cli/config_utils.py +0 -481
- lean_explore/defaults.py +0 -114
- lean_explore/local/__init__.py +0 -1
- lean_explore/local/search.py +0 -1050
- lean_explore/local/service.py +0 -392
- lean_explore/shared/__init__.py +0 -1
- lean_explore/shared/models/__init__.py +0 -1
- lean_explore/shared/models/api.py +0 -117
- lean_explore/shared/models/db.py +0 -396
- lean_explore-0.2.2.dist-info/RECORD +0 -26
- lean_explore-0.2.2.dist-info/entry_points.txt +0 -2
- {lean_explore-0.2.2.dist-info → lean_explore-1.0.0.dist-info}/licenses/LICENSE +0 -0
- {lean_explore-0.2.2.dist-info → lean_explore-1.0.0.dist-info}/top_level.txt +0 -0
lean_explore/cli/agent.py
DELETED
|
@@ -1,781 +0,0 @@
|
|
|
1
|
-
# src/lean_explore/cli/agent.py
|
|
2
|
-
|
|
3
|
-
"""Command-line interface logic for interacting with an AI agent.
|
|
4
|
-
|
|
5
|
-
This module provides the agent_chat_command and supporting functions for the chat
|
|
6
|
-
interaction, intended to be registered with a main Typer application.
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
import asyncio
|
|
10
|
-
import functools
|
|
11
|
-
import logging
|
|
12
|
-
import os
|
|
13
|
-
import pathlib
|
|
14
|
-
import shutil
|
|
15
|
-
import sys
|
|
16
|
-
import textwrap
|
|
17
|
-
from typing import Optional
|
|
18
|
-
|
|
19
|
-
import typer
|
|
20
|
-
|
|
21
|
-
# Ensure 'openai-agents' is installed
|
|
22
|
-
try:
|
|
23
|
-
from agents import Agent, Runner
|
|
24
|
-
from agents.exceptions import AgentsException, UserError
|
|
25
|
-
from agents.mcp import MCPServerStdio
|
|
26
|
-
except ImportError:
|
|
27
|
-
print(
|
|
28
|
-
"Fatal Error: The 'openai-agents' library or its expected exceptions "
|
|
29
|
-
"are not installed/found. Please install 'openai-agents' correctly "
|
|
30
|
-
"(e.g., 'pip install openai-agents')",
|
|
31
|
-
file=sys.stderr,
|
|
32
|
-
)
|
|
33
|
-
raise typer.Exit(code=1)
|
|
34
|
-
|
|
35
|
-
try:
|
|
36
|
-
from rich.console import Console
|
|
37
|
-
from rich.panel import Panel
|
|
38
|
-
from rich.text import Text
|
|
39
|
-
except ImportError:
|
|
40
|
-
print(
|
|
41
|
-
"Fatal Error: The 'rich' library is not installed/found. "
|
|
42
|
-
"Please install 'rich' (e.g., 'pip install rich')",
|
|
43
|
-
file=sys.stderr,
|
|
44
|
-
)
|
|
45
|
-
raise typer.Exit(code=1)
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
# Attempt to import your project's config_utils for API key loading
|
|
49
|
-
config_utils_imported = False
|
|
50
|
-
try:
|
|
51
|
-
from lean_explore.cli import config_utils
|
|
52
|
-
|
|
53
|
-
config_utils_imported = True
|
|
54
|
-
except ImportError:
|
|
55
|
-
# BasicConfig for this warning, actual command configures logger later
|
|
56
|
-
logging.basicConfig(level=logging.WARNING)
|
|
57
|
-
logging.warning(
|
|
58
|
-
"Could not import 'lean_explore.cli.config_utils'. "
|
|
59
|
-
"Automatic loading/saving of stored API keys will be disabled. "
|
|
60
|
-
"Ensure 'lean_explore' package is installed correctly and accessible "
|
|
61
|
-
"in PYTHONPATH (e.g., by running 'pip install -e .' from the project root)."
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
class _MockConfigUtils:
|
|
65
|
-
"""A mock for config_utils if it cannot be imported."""
|
|
66
|
-
|
|
67
|
-
def load_api_key(self) -> Optional[str]:
|
|
68
|
-
"""Loads Lean Explore API key."""
|
|
69
|
-
return None
|
|
70
|
-
|
|
71
|
-
def load_openai_api_key(self) -> Optional[str]:
|
|
72
|
-
"""Loads OpenAI API key."""
|
|
73
|
-
return None
|
|
74
|
-
|
|
75
|
-
def save_api_key(self, api_key: str) -> bool:
|
|
76
|
-
"""Saves Lean Explore API key."""
|
|
77
|
-
return False
|
|
78
|
-
|
|
79
|
-
def save_openai_api_key(self, api_key: str) -> bool:
|
|
80
|
-
"""Saves OpenAI API key."""
|
|
81
|
-
return False
|
|
82
|
-
|
|
83
|
-
config_utils = _MockConfigUtils()
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
# --- Async Wrapper for Typer Commands ---
|
|
87
|
-
def typer_async(f):
|
|
88
|
-
"""A decorator to allow Typer commands to be async functions.
|
|
89
|
-
|
|
90
|
-
It wraps the async function in `asyncio.run()`.
|
|
91
|
-
|
|
92
|
-
Args:
|
|
93
|
-
f: The asynchronous function to wrap.
|
|
94
|
-
|
|
95
|
-
Returns:
|
|
96
|
-
The wrapped function that can be called synchronously.
|
|
97
|
-
"""
|
|
98
|
-
|
|
99
|
-
@functools.wraps(f)
|
|
100
|
-
def wrapper(*args, **kwargs):
|
|
101
|
-
return asyncio.run(f(*args, **kwargs))
|
|
102
|
-
|
|
103
|
-
return wrapper
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
# --- ANSI Color Codes ---
|
|
107
|
-
class _Colors:
|
|
108
|
-
"""ANSI color codes for terminal output for enhanced readability."""
|
|
109
|
-
|
|
110
|
-
BLUE = "\033[94m"
|
|
111
|
-
GREEN = "\033[92m"
|
|
112
|
-
YELLOW = "\033[93m"
|
|
113
|
-
RED = "\033[91m"
|
|
114
|
-
BOLD = "\033[1m"
|
|
115
|
-
ENDC = "\033[0m"
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
agent_cli_app = typer.Typer(
|
|
119
|
-
name="agent_cli_utils",
|
|
120
|
-
help="Utilities related to AI agent interactions.",
|
|
121
|
-
no_args_is_help=True,
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
logger = logging.getLogger(__name__)
|
|
125
|
-
console = Console()
|
|
126
|
-
CHAT_CONTENT_WIDTH = 76 # Consistent with main.py's PANEL_CONTENT_WIDTH
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
def _format_chat_text_for_panel(text_content: str, width: int) -> str:
|
|
130
|
-
"""Wraps text for chat display, padding lines to fill panel width.
|
|
131
|
-
|
|
132
|
-
This function processes text line by line, wraps content that exceeds the
|
|
133
|
-
specified width, and pads each resulting line with spaces to ensure a
|
|
134
|
-
uniform block appearance within a Rich Panel. Empty lines in the input
|
|
135
|
-
are preserved as padded blank lines.
|
|
136
|
-
|
|
137
|
-
Args:
|
|
138
|
-
text_content: The text content to wrap.
|
|
139
|
-
width: The target width for text wrapping and padding.
|
|
140
|
-
|
|
141
|
-
Returns:
|
|
142
|
-
A string with wrapped and padded text.
|
|
143
|
-
"""
|
|
144
|
-
if not text_content.strip():
|
|
145
|
-
# For empty or whitespace-only input, provide a single padded blank line
|
|
146
|
-
return " " * width
|
|
147
|
-
|
|
148
|
-
input_lines = text_content.splitlines()
|
|
149
|
-
output_panel_lines = []
|
|
150
|
-
|
|
151
|
-
if not input_lines:
|
|
152
|
-
return " " * width # Should be caught by strip(), but safeguard
|
|
153
|
-
|
|
154
|
-
for line_text in input_lines:
|
|
155
|
-
if not line_text.strip(): # An intentionally blank line in the input
|
|
156
|
-
output_panel_lines.append(" " * width)
|
|
157
|
-
else:
|
|
158
|
-
wrapped_segments = textwrap.wrap(
|
|
159
|
-
line_text,
|
|
160
|
-
width=width,
|
|
161
|
-
replace_whitespace=True, # Collapse multiple spaces within line
|
|
162
|
-
drop_whitespace=True, # Remove leading/trailing space from segments
|
|
163
|
-
break_long_words=True, # Break words that exceed width
|
|
164
|
-
break_on_hyphens=True, # Allow breaking on hyphens
|
|
165
|
-
)
|
|
166
|
-
if not wrapped_segments:
|
|
167
|
-
# If wrapping a non-blank line results in nothing (e.g. only whitespace)
|
|
168
|
-
output_panel_lines.append(" " * width)
|
|
169
|
-
else:
|
|
170
|
-
for segment in wrapped_segments:
|
|
171
|
-
output_panel_lines.append(segment.ljust(width))
|
|
172
|
-
|
|
173
|
-
if not output_panel_lines:
|
|
174
|
-
# Fallback if all processing led to no lines (e.g. input was "\n \n")
|
|
175
|
-
return " " * width
|
|
176
|
-
|
|
177
|
-
return "\n".join(output_panel_lines)
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
def _handle_server_connection_error(
|
|
181
|
-
error: Exception,
|
|
182
|
-
lean_backend_type: str,
|
|
183
|
-
debug_mode: bool,
|
|
184
|
-
context: str = "server startup",
|
|
185
|
-
):
|
|
186
|
-
"""Handles MCP server connection errors by logging and user-friendly messages."""
|
|
187
|
-
logger.error(
|
|
188
|
-
f"CRITICAL: Error during MCP {context}: {type(error).__name__}: {error}",
|
|
189
|
-
exc_info=debug_mode,
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
error_str = str(error).lower()
|
|
193
|
-
is_timeout_error = "timed out" in error_str or "timeout" in error_str
|
|
194
|
-
|
|
195
|
-
if is_timeout_error:
|
|
196
|
-
console.print(
|
|
197
|
-
Text.from_markup(
|
|
198
|
-
"[bold red]Error: The Lean Explore server failed to start "
|
|
199
|
-
"or respond promptly.[/bold red]"
|
|
200
|
-
),
|
|
201
|
-
stderr=True,
|
|
202
|
-
)
|
|
203
|
-
if lean_backend_type == "local":
|
|
204
|
-
console.print(
|
|
205
|
-
Text.from_markup(
|
|
206
|
-
"[yellow]This often occurs with the 'local' backend due to missing "
|
|
207
|
-
"or corrupted data files.[/yellow]"
|
|
208
|
-
),
|
|
209
|
-
stderr=True,
|
|
210
|
-
)
|
|
211
|
-
console.print(
|
|
212
|
-
Text.from_markup("[yellow]Please try the following steps:[/yellow]"),
|
|
213
|
-
stderr=True,
|
|
214
|
-
)
|
|
215
|
-
console.print(
|
|
216
|
-
Text.from_markup(
|
|
217
|
-
"[yellow] 1. Run 'leanexplore data fetch' to download or update "
|
|
218
|
-
"the required data.[/yellow]"
|
|
219
|
-
),
|
|
220
|
-
stderr=True,
|
|
221
|
-
)
|
|
222
|
-
console.print(
|
|
223
|
-
Text.from_markup("[yellow] 2. Try this chat command again.[/yellow]"),
|
|
224
|
-
stderr=True,
|
|
225
|
-
)
|
|
226
|
-
console.print(
|
|
227
|
-
Text.from_markup(
|
|
228
|
-
"[yellow] 3. If the problem persists, run 'leanexplore mcp serve "
|
|
229
|
-
"--backend local --log-level DEBUG' directly in another terminal "
|
|
230
|
-
"to see detailed server startup logs.[/yellow]"
|
|
231
|
-
),
|
|
232
|
-
stderr=True,
|
|
233
|
-
)
|
|
234
|
-
else: # api backend or other cases
|
|
235
|
-
console.print(
|
|
236
|
-
Text.from_markup(
|
|
237
|
-
"[yellow]Please check your network connection and ensure the API "
|
|
238
|
-
"server is accessible.[/yellow]"
|
|
239
|
-
),
|
|
240
|
-
stderr=True,
|
|
241
|
-
)
|
|
242
|
-
elif isinstance(error, UserError):
|
|
243
|
-
console.print(
|
|
244
|
-
Text.from_markup(
|
|
245
|
-
f"[bold red]Error: SDK usage problem during {context}: "
|
|
246
|
-
f"{error}[/bold red]"
|
|
247
|
-
),
|
|
248
|
-
stderr=True,
|
|
249
|
-
)
|
|
250
|
-
elif isinstance(error, AgentsException):
|
|
251
|
-
console.print(
|
|
252
|
-
Text.from_markup(
|
|
253
|
-
f"[bold red]Error: An SDK error occurred during {context}: "
|
|
254
|
-
f"{error}[/bold red]"
|
|
255
|
-
),
|
|
256
|
-
stderr=True,
|
|
257
|
-
)
|
|
258
|
-
else:
|
|
259
|
-
console.print(
|
|
260
|
-
Text.from_markup(
|
|
261
|
-
f"[bold red]An unexpected error occurred during {context}: "
|
|
262
|
-
f"{error}[/bold red]"
|
|
263
|
-
),
|
|
264
|
-
stderr=True,
|
|
265
|
-
)
|
|
266
|
-
|
|
267
|
-
if debug_mode:
|
|
268
|
-
console.print(
|
|
269
|
-
Text.from_markup(
|
|
270
|
-
f"[magenta]Error Details ({type(error).__name__}): {error}[/magenta]"
|
|
271
|
-
),
|
|
272
|
-
stderr=True,
|
|
273
|
-
)
|
|
274
|
-
raise typer.Exit(code=1)
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
# --- Core Agent Logic ---
|
|
278
|
-
async def _run_agent_session(
|
|
279
|
-
lean_backend_type: str,
|
|
280
|
-
lean_explore_api_key_arg: Optional[str] = None,
|
|
281
|
-
debug_mode: bool = False,
|
|
282
|
-
log_level_for_mcp_server: str = "WARNING",
|
|
283
|
-
):
|
|
284
|
-
"""Internal function to set up and run the OpenAI Agent session.
|
|
285
|
-
|
|
286
|
-
Args:
|
|
287
|
-
lean_backend_type: The backend ('api' or 'local') for the Lean Explore server.
|
|
288
|
-
lean_explore_api_key_arg: API key for Lean Explore (if 'api' backend),
|
|
289
|
-
already resolved from CLI arg or ENV.
|
|
290
|
-
debug_mode: If True, enables more verbose logging for this client and
|
|
291
|
-
the MCP server.
|
|
292
|
-
log_level_for_mcp_server: The log level to pass to the MCP server.
|
|
293
|
-
"""
|
|
294
|
-
internal_server_script_path = (
|
|
295
|
-
pathlib.Path(__file__).parent.parent / "mcp" / "server.py"
|
|
296
|
-
).resolve()
|
|
297
|
-
|
|
298
|
-
# --- OpenAI API Key Acquisition ---
|
|
299
|
-
openai_api_key = None
|
|
300
|
-
if config_utils_imported:
|
|
301
|
-
logger.debug("Attempting to load OpenAI API key from CLI configuration...")
|
|
302
|
-
try:
|
|
303
|
-
openai_api_key = config_utils.load_openai_api_key()
|
|
304
|
-
if openai_api_key:
|
|
305
|
-
logger.info("Loaded OpenAI API key from CLI configuration.")
|
|
306
|
-
else:
|
|
307
|
-
logger.debug("No OpenAI API key found in CLI configuration.")
|
|
308
|
-
except Exception as e: # pylint: disable=broad-except
|
|
309
|
-
logger.error(
|
|
310
|
-
f"Error loading OpenAI API key from CLI configuration: {e}",
|
|
311
|
-
exc_info=debug_mode,
|
|
312
|
-
)
|
|
313
|
-
|
|
314
|
-
if not openai_api_key:
|
|
315
|
-
console.print(
|
|
316
|
-
Text.from_markup(
|
|
317
|
-
"[yellow]OpenAI API key not found in configuration.[/yellow]"
|
|
318
|
-
)
|
|
319
|
-
)
|
|
320
|
-
openai_api_key = typer.prompt(
|
|
321
|
-
"Please enter your OpenAI API key", hide_input=True
|
|
322
|
-
)
|
|
323
|
-
if not openai_api_key:
|
|
324
|
-
console.print(
|
|
325
|
-
Text.from_markup(
|
|
326
|
-
"[bold red]OpenAI API key cannot be empty. Exiting.[/bold red]"
|
|
327
|
-
),
|
|
328
|
-
stderr=True,
|
|
329
|
-
)
|
|
330
|
-
raise typer.Exit(code=1)
|
|
331
|
-
logger.info("Using OpenAI API key provided via prompt.")
|
|
332
|
-
if config_utils_imported:
|
|
333
|
-
if typer.confirm(
|
|
334
|
-
"Would you like to save this OpenAI API key for future use?"
|
|
335
|
-
):
|
|
336
|
-
if config_utils.save_openai_api_key(openai_api_key):
|
|
337
|
-
console.print(
|
|
338
|
-
Text.from_markup(
|
|
339
|
-
"[green]OpenAI API key saved successfully.[/green]"
|
|
340
|
-
)
|
|
341
|
-
)
|
|
342
|
-
else:
|
|
343
|
-
console.print(
|
|
344
|
-
Text.from_markup("[red]Failed to save OpenAI API key.[/red]"),
|
|
345
|
-
stderr=True,
|
|
346
|
-
)
|
|
347
|
-
else:
|
|
348
|
-
console.print("OpenAI API key will be used for this session only.")
|
|
349
|
-
else:
|
|
350
|
-
console.print(
|
|
351
|
-
Text.from_markup(
|
|
352
|
-
"[yellow]Note: config_utils not available, "
|
|
353
|
-
"OpenAI API key cannot be saved.[/yellow]"
|
|
354
|
-
)
|
|
355
|
-
)
|
|
356
|
-
|
|
357
|
-
os.environ["OPENAI_API_KEY"] = openai_api_key
|
|
358
|
-
|
|
359
|
-
# --- Lean Explore Server Script and Executable Validation ---
|
|
360
|
-
if not internal_server_script_path.exists():
|
|
361
|
-
error_msg = (
|
|
362
|
-
"Lean Explore MCP server script not found at calculated path: "
|
|
363
|
-
f"{internal_server_script_path}"
|
|
364
|
-
)
|
|
365
|
-
logger.error(error_msg)
|
|
366
|
-
console.print(
|
|
367
|
-
Text.from_markup(f"[bold red]Error: {error_msg}[/bold red]"), stderr=True
|
|
368
|
-
)
|
|
369
|
-
raise typer.Exit(code=1)
|
|
370
|
-
|
|
371
|
-
python_executable = sys.executable
|
|
372
|
-
if not python_executable or not shutil.which(python_executable):
|
|
373
|
-
error_msg = (
|
|
374
|
-
f"Python executable '{python_executable}' not found or not executable. "
|
|
375
|
-
"Ensure Python is correctly installed and in your PATH."
|
|
376
|
-
)
|
|
377
|
-
logger.error(error_msg)
|
|
378
|
-
console.print(
|
|
379
|
-
Text.from_markup(f"[bold red]Error: {error_msg}[/bold red]"), stderr=True
|
|
380
|
-
)
|
|
381
|
-
raise typer.Exit(code=1)
|
|
382
|
-
|
|
383
|
-
# --- Lean Explore API Key Acquisition (if API backend) ---
|
|
384
|
-
effective_lean_api_key = lean_explore_api_key_arg
|
|
385
|
-
if lean_backend_type == "api":
|
|
386
|
-
if not effective_lean_api_key and config_utils_imported:
|
|
387
|
-
logger.debug(
|
|
388
|
-
"Lean Explore API key not provided via CLI option or ENV. "
|
|
389
|
-
"Attempting to load from CLI configuration..."
|
|
390
|
-
)
|
|
391
|
-
try:
|
|
392
|
-
stored_key = config_utils.load_api_key()
|
|
393
|
-
if stored_key:
|
|
394
|
-
effective_lean_api_key = stored_key
|
|
395
|
-
logger.debug(
|
|
396
|
-
"Successfully loaded Lean Explore API key from "
|
|
397
|
-
"CLI configuration."
|
|
398
|
-
)
|
|
399
|
-
else:
|
|
400
|
-
logger.debug("No Lean Explore API key found in CLI configuration.")
|
|
401
|
-
except Exception as e: # pylint: disable=broad-except
|
|
402
|
-
logger.error(
|
|
403
|
-
f"Error loading Lean Explore API key from CLI configuration: {e}",
|
|
404
|
-
exc_info=debug_mode,
|
|
405
|
-
)
|
|
406
|
-
|
|
407
|
-
if not effective_lean_api_key:
|
|
408
|
-
console.print(
|
|
409
|
-
Text.from_markup(
|
|
410
|
-
"[yellow]Lean Explore API key is required for the 'api' backend "
|
|
411
|
-
"and was not found through CLI option, environment variable, "
|
|
412
|
-
"or configuration.[/yellow]"
|
|
413
|
-
)
|
|
414
|
-
)
|
|
415
|
-
effective_lean_api_key = typer.prompt(
|
|
416
|
-
"Please enter your Lean Explore API key", hide_input=True
|
|
417
|
-
)
|
|
418
|
-
if not effective_lean_api_key:
|
|
419
|
-
console.print(
|
|
420
|
-
Text.from_markup(
|
|
421
|
-
"[bold red]Lean Explore API key cannot be empty for 'api' "
|
|
422
|
-
"backend. Exiting.[/bold red]"
|
|
423
|
-
),
|
|
424
|
-
stderr=True,
|
|
425
|
-
)
|
|
426
|
-
raise typer.Exit(code=1)
|
|
427
|
-
logger.info("Using Lean Explore API key provided via prompt.")
|
|
428
|
-
if config_utils_imported:
|
|
429
|
-
if typer.confirm(
|
|
430
|
-
"Would you like to save this Lean Explore API key for future use?"
|
|
431
|
-
):
|
|
432
|
-
if config_utils.save_api_key(effective_lean_api_key):
|
|
433
|
-
console.print(
|
|
434
|
-
Text.from_markup(
|
|
435
|
-
"[green]Lean Explore API key saved successfully."
|
|
436
|
-
"[/green]"
|
|
437
|
-
)
|
|
438
|
-
)
|
|
439
|
-
else:
|
|
440
|
-
console.print(
|
|
441
|
-
Text.from_markup(
|
|
442
|
-
"[red]Failed to save Lean Explore API key.[/red]"
|
|
443
|
-
),
|
|
444
|
-
stderr=True,
|
|
445
|
-
)
|
|
446
|
-
else:
|
|
447
|
-
console.print(
|
|
448
|
-
"Lean Explore API key will be used for this session only."
|
|
449
|
-
)
|
|
450
|
-
else:
|
|
451
|
-
console.print(
|
|
452
|
-
Text.from_markup(
|
|
453
|
-
"[yellow]Note: config_utils not available, "
|
|
454
|
-
"Lean Explore API key "
|
|
455
|
-
"cannot be saved.[/yellow]"
|
|
456
|
-
)
|
|
457
|
-
)
|
|
458
|
-
|
|
459
|
-
# --- MCP Server Setup ---
|
|
460
|
-
mcp_server_args = [
|
|
461
|
-
str(internal_server_script_path),
|
|
462
|
-
"--backend",
|
|
463
|
-
lean_backend_type,
|
|
464
|
-
"--log-level",
|
|
465
|
-
log_level_for_mcp_server,
|
|
466
|
-
]
|
|
467
|
-
if lean_backend_type == "api" and effective_lean_api_key:
|
|
468
|
-
mcp_server_args.extend(["--api-key", effective_lean_api_key])
|
|
469
|
-
|
|
470
|
-
lean_explore_mcp_server = MCPServerStdio(
|
|
471
|
-
name="LeanExploreSearchServer",
|
|
472
|
-
params={
|
|
473
|
-
"command": python_executable,
|
|
474
|
-
"args": mcp_server_args,
|
|
475
|
-
"cwd": str(internal_server_script_path.parent),
|
|
476
|
-
},
|
|
477
|
-
cache_tools_list=True,
|
|
478
|
-
client_session_timeout_seconds=10.0,
|
|
479
|
-
)
|
|
480
|
-
|
|
481
|
-
# --- Agent Interaction Loop ---
|
|
482
|
-
try:
|
|
483
|
-
async with lean_explore_mcp_server as server_instance:
|
|
484
|
-
logger.debug(
|
|
485
|
-
f"MCP server '{server_instance.name}' connection initiated. "
|
|
486
|
-
"Listing tools..."
|
|
487
|
-
)
|
|
488
|
-
tools = []
|
|
489
|
-
try:
|
|
490
|
-
tools = await server_instance.list_tools()
|
|
491
|
-
if not tools or not any(tools):
|
|
492
|
-
logger.warning(
|
|
493
|
-
"MCP Server connected but reported no tools. "
|
|
494
|
-
"Agent may lack expected capabilities."
|
|
495
|
-
)
|
|
496
|
-
else:
|
|
497
|
-
logger.debug(
|
|
498
|
-
f"Available tools from {server_instance.name}: "
|
|
499
|
-
f"{[tool.name for tool in tools]}"
|
|
500
|
-
)
|
|
501
|
-
except (UserError, AgentsException, Exception) as e_list_tools:
|
|
502
|
-
_handle_server_connection_error(
|
|
503
|
-
e_list_tools, lean_backend_type, debug_mode, context="tool listing"
|
|
504
|
-
)
|
|
505
|
-
|
|
506
|
-
agent_model = "gpt-4.1"
|
|
507
|
-
agent_object_name = "Assistant"
|
|
508
|
-
agent_display_name = (
|
|
509
|
-
f"{_Colors.BOLD}{_Colors.GREEN}{agent_object_name}{_Colors.ENDC}"
|
|
510
|
-
)
|
|
511
|
-
|
|
512
|
-
agent = Agent(
|
|
513
|
-
name=agent_object_name,
|
|
514
|
-
model=agent_model,
|
|
515
|
-
instructions=(
|
|
516
|
-
"You are a CLI assistant for searching a Lean 4 mathematical "
|
|
517
|
-
"library.\n"
|
|
518
|
-
"**Goal:** Find relevant Lean statements, understand them "
|
|
519
|
-
"(including dependencies), and explain them conversationally "
|
|
520
|
-
"to the user.\n"
|
|
521
|
-
"**Output:** CLI-friendly (plain text, simple lists). "
|
|
522
|
-
"NO complex Markdown/LaTeX.\n\n"
|
|
523
|
-
"**Packages:** Use exact top-level names for filters (Batteries, "
|
|
524
|
-
"Init, Lean, Mathlib, PhysLean, Std). Map subpackage mentions "
|
|
525
|
-
"to top-level (e.g., 'Mathlib.Analysis' -> 'Mathlib').\n\n"
|
|
526
|
-
"**Core Workflow:**\n"
|
|
527
|
-
"1. **Search & Analyze:**\n"
|
|
528
|
-
" * Execute multiple distinct `search` queries for each user "
|
|
529
|
-
"request (e.g., using full statements, rephrasing). Set `limit` "
|
|
530
|
-
">= 10 for each search.\n"
|
|
531
|
-
" * From all search results, select the statement(s) most "
|
|
532
|
-
"helpful to the user.\n"
|
|
533
|
-
" * For each selected statement, **MUST** use "
|
|
534
|
-
"`get_dependencies` to understand its context before "
|
|
535
|
-
"explaining.\n\n"
|
|
536
|
-
"2. **Explain Results (Conversational & CLI-Friendly):**\n"
|
|
537
|
-
" * Briefly state your search approach (e.g., 'I looked into X "
|
|
538
|
-
"in Mathlib...').\n"
|
|
539
|
-
" * For each selected statement:\n"
|
|
540
|
-
" * Introduce it (e.g., Lean name: "
|
|
541
|
-
"`primary_declaration.lean_name`).\n"
|
|
542
|
-
" * Explain its meaning (use `docstring`, "
|
|
543
|
-
"`informal_description`, `statement_text`).\n"
|
|
544
|
-
" * Provide the full Lean code (`statement_text`).\n"
|
|
545
|
-
" * Explain key dependencies (what they are, their role, "
|
|
546
|
-
"using `statement_text` or `display_statement_text` from "
|
|
547
|
-
"`get_dependencies` output).\n"
|
|
548
|
-
"3. **Specific User Follow-ups (If Asked):**\n"
|
|
549
|
-
" * **`get_by_id`:** For a specific ID, provide: ID, Lean name, "
|
|
550
|
-
"statement text, source/line, docstring, informal description "
|
|
551
|
-
"(structured CLI format).\n"
|
|
552
|
-
" * **`get_dependencies` (Direct Request):** For all "
|
|
553
|
-
"dependencies of an ID, list: ID, Lean name, statement "
|
|
554
|
-
"text/summary. State total count.\n\n"
|
|
555
|
-
"Always be concise, helpful, and clear."
|
|
556
|
-
),
|
|
557
|
-
mcp_servers=[server_instance],
|
|
558
|
-
)
|
|
559
|
-
console.print(
|
|
560
|
-
Text.from_markup(
|
|
561
|
-
"[bold]Lean Search Assistant[/bold] (powered by [green]"
|
|
562
|
-
f"{agent_model}[/green] and [green]{server_instance.name}[/green]) "
|
|
563
|
-
"is ready."
|
|
564
|
-
)
|
|
565
|
-
)
|
|
566
|
-
console.print(
|
|
567
|
-
"Ask me to search for Lean statements (e.g., 'find definitions "
|
|
568
|
-
"of a scheme')."
|
|
569
|
-
)
|
|
570
|
-
if not debug_mode and lean_backend_type == "local":
|
|
571
|
-
console.print(
|
|
572
|
-
Text.from_markup(
|
|
573
|
-
"[yellow]Note: The local search server might print startup "
|
|
574
|
-
"logs. "
|
|
575
|
-
"For a quieter experience, use --debug to see detailed logs or "
|
|
576
|
-
"ensure the server's default log level is WARNING.[/yellow]"
|
|
577
|
-
)
|
|
578
|
-
)
|
|
579
|
-
console.print("Type 'exit' or 'quit' to end the session.")
|
|
580
|
-
console.print()
|
|
581
|
-
|
|
582
|
-
while True:
|
|
583
|
-
try:
|
|
584
|
-
user_styled_name = typer.style(
|
|
585
|
-
"You", fg=typer.colors.BLUE, bold=True
|
|
586
|
-
)
|
|
587
|
-
user_input = typer.prompt(
|
|
588
|
-
user_styled_name, default="", prompt_suffix=": "
|
|
589
|
-
).strip()
|
|
590
|
-
|
|
591
|
-
if user_input.lower() in ["exit", "quit"]:
|
|
592
|
-
logger.debug("Exiting chat loop.")
|
|
593
|
-
break
|
|
594
|
-
if not user_input:
|
|
595
|
-
continue
|
|
596
|
-
|
|
597
|
-
formatted_user_input = _format_chat_text_for_panel(
|
|
598
|
-
user_input, CHAT_CONTENT_WIDTH
|
|
599
|
-
)
|
|
600
|
-
console.print(
|
|
601
|
-
Panel(
|
|
602
|
-
formatted_user_input,
|
|
603
|
-
title="You",
|
|
604
|
-
border_style="blue",
|
|
605
|
-
title_align="left",
|
|
606
|
-
expand=False,
|
|
607
|
-
)
|
|
608
|
-
)
|
|
609
|
-
console.print()
|
|
610
|
-
|
|
611
|
-
thinking_line_str_ansi = (
|
|
612
|
-
f"{agent_display_name}: "
|
|
613
|
-
f"{_Colors.YELLOW}Thinking...{_Colors.ENDC}"
|
|
614
|
-
)
|
|
615
|
-
sys.stdout.write(thinking_line_str_ansi)
|
|
616
|
-
sys.stdout.flush()
|
|
617
|
-
|
|
618
|
-
result = await Runner.run(starting_agent=agent, input=user_input)
|
|
619
|
-
|
|
620
|
-
thinking_len_to_clear = Text.from_ansi(
|
|
621
|
-
thinking_line_str_ansi
|
|
622
|
-
).cell_len
|
|
623
|
-
sys.stdout.write("\r" + " " * thinking_len_to_clear + "\r")
|
|
624
|
-
sys.stdout.flush()
|
|
625
|
-
|
|
626
|
-
assistant_output = (
|
|
627
|
-
"No specific textual output from the agent for this turn."
|
|
628
|
-
)
|
|
629
|
-
if result.final_output is not None:
|
|
630
|
-
assistant_output = result.final_output
|
|
631
|
-
else:
|
|
632
|
-
logger.warning(
|
|
633
|
-
"Agent run completed without error, but final_output "
|
|
634
|
-
"is None."
|
|
635
|
-
)
|
|
636
|
-
assistant_output = (
|
|
637
|
-
"(Agent action completed; no specific text message "
|
|
638
|
-
"for this turn.)"
|
|
639
|
-
)
|
|
640
|
-
|
|
641
|
-
formatted_assistant_output = _format_chat_text_for_panel(
|
|
642
|
-
assistant_output, CHAT_CONTENT_WIDTH
|
|
643
|
-
)
|
|
644
|
-
console.print(
|
|
645
|
-
Panel(
|
|
646
|
-
formatted_assistant_output,
|
|
647
|
-
title=agent_object_name,
|
|
648
|
-
border_style="green",
|
|
649
|
-
title_align="left",
|
|
650
|
-
expand=False,
|
|
651
|
-
)
|
|
652
|
-
)
|
|
653
|
-
console.print()
|
|
654
|
-
|
|
655
|
-
except typer.Abort:
|
|
656
|
-
console.print(
|
|
657
|
-
Text.from_markup(
|
|
658
|
-
"\n[yellow]Chat interrupted by user. Exiting.[/yellow]"
|
|
659
|
-
)
|
|
660
|
-
)
|
|
661
|
-
logger.debug("Chat interrupted by user (typer.Abort). Exiting.")
|
|
662
|
-
break
|
|
663
|
-
except KeyboardInterrupt:
|
|
664
|
-
console.print(
|
|
665
|
-
Text.from_markup(
|
|
666
|
-
"\n[yellow]Chat interrupted by user. Exiting.[/yellow]"
|
|
667
|
-
)
|
|
668
|
-
)
|
|
669
|
-
logger.debug(
|
|
670
|
-
"Chat interrupted by user (KeyboardInterrupt). Exiting."
|
|
671
|
-
)
|
|
672
|
-
break
|
|
673
|
-
except Exception as e: # pylint: disable=broad-except
|
|
674
|
-
logger.error(
|
|
675
|
-
f"An error occurred in the chat loop: {e}", exc_info=debug_mode
|
|
676
|
-
)
|
|
677
|
-
console.print(
|
|
678
|
-
Text.from_markup(
|
|
679
|
-
f"[bold red]An unexpected error occurred: {e}[/bold red]"
|
|
680
|
-
)
|
|
681
|
-
)
|
|
682
|
-
break
|
|
683
|
-
except (UserError, AgentsException, Exception) as e_startup:
|
|
684
|
-
_handle_server_connection_error(
|
|
685
|
-
e_startup,
|
|
686
|
-
lean_backend_type,
|
|
687
|
-
debug_mode,
|
|
688
|
-
context="server startup or connection",
|
|
689
|
-
)
|
|
690
|
-
|
|
691
|
-
console.print(
|
|
692
|
-
Text.from_markup("[bold]Lean Search Assistant session has ended.[/bold]")
|
|
693
|
-
)
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
@typer_async
|
|
697
|
-
async def agent_chat_command(
|
|
698
|
-
ctx: typer.Context,
|
|
699
|
-
lean_backend: str = typer.Option(
|
|
700
|
-
"api",
|
|
701
|
-
"--backend",
|
|
702
|
-
"-lb",
|
|
703
|
-
help="Backend for the Lean Explore MCP server ('api' or 'local'). "
|
|
704
|
-
"Default: api.",
|
|
705
|
-
case_sensitive=False,
|
|
706
|
-
),
|
|
707
|
-
lean_api_key: Optional[str] = typer.Option(
|
|
708
|
-
None,
|
|
709
|
-
"--lean-api-key",
|
|
710
|
-
help="API key for Lean Explore (if 'api' backend). Overrides env var/config.",
|
|
711
|
-
show_default=False,
|
|
712
|
-
),
|
|
713
|
-
debug: bool = typer.Option(
|
|
714
|
-
False,
|
|
715
|
-
"--debug",
|
|
716
|
-
help="Enable detailed debug logging for this script and the MCP server.",
|
|
717
|
-
),
|
|
718
|
-
):
|
|
719
|
-
"""Start an interactive chat session with the Lean Search Assistant.
|
|
720
|
-
|
|
721
|
-
The assistant uses the Lean Explore MCP server to search for Lean statements.
|
|
722
|
-
An OpenAI API key must be available (prompts if not found). If using `--backend api`
|
|
723
|
-
(default), a Lean Explore API key is also needed (prompts if not found).
|
|
724
|
-
"""
|
|
725
|
-
client_log_level = logging.DEBUG if debug else logging.INFO
|
|
726
|
-
logging.basicConfig(
|
|
727
|
-
level=client_log_level,
|
|
728
|
-
format="%(asctime)s - %(levelname)s [%(name)s:%(lineno)s] %(message)s",
|
|
729
|
-
datefmt="%Y-%m-%d %H:%M:%S",
|
|
730
|
-
force=True,
|
|
731
|
-
)
|
|
732
|
-
logger.setLevel(client_log_level)
|
|
733
|
-
|
|
734
|
-
library_log_level_for_client = logging.DEBUG if debug else logging.WARNING
|
|
735
|
-
logging.getLogger("httpx").setLevel(library_log_level_for_client)
|
|
736
|
-
logging.getLogger("httpcore").setLevel(library_log_level_for_client)
|
|
737
|
-
logging.getLogger("openai").setLevel(library_log_level_for_client)
|
|
738
|
-
logging.getLogger("agents").setLevel(library_log_level_for_client)
|
|
739
|
-
|
|
740
|
-
mcp_server_log_level_str = "DEBUG" if debug else "WARNING"
|
|
741
|
-
|
|
742
|
-
if not config_utils_imported and not debug:
|
|
743
|
-
if not os.getenv("OPENAI_API_KEY"):
|
|
744
|
-
console.print(
|
|
745
|
-
Text.from_markup(
|
|
746
|
-
"[yellow]Warning: Automatic loading of stored OpenAI API key "
|
|
747
|
-
"is disabled (config module not found). OPENAI_API_KEY env "
|
|
748
|
-
"var is not set. You will be prompted if no key is found "
|
|
749
|
-
"in config.[/yellow]"
|
|
750
|
-
),
|
|
751
|
-
stderr=True,
|
|
752
|
-
)
|
|
753
|
-
if lean_backend == "api" and not (
|
|
754
|
-
lean_api_key or os.getenv("LEAN_EXPLORE_API_KEY")
|
|
755
|
-
):
|
|
756
|
-
console.print(
|
|
757
|
-
Text.from_markup(
|
|
758
|
-
"[yellow]Warning: Automatic loading of stored Lean Explore "
|
|
759
|
-
"API key is disabled (config module not found). If using "
|
|
760
|
-
"--backend api, and key is not in env or via option, you "
|
|
761
|
-
"will be prompted.[/yellow]"
|
|
762
|
-
),
|
|
763
|
-
stderr=True,
|
|
764
|
-
)
|
|
765
|
-
|
|
766
|
-
resolved_lean_api_key = lean_api_key
|
|
767
|
-
if resolved_lean_api_key is None and lean_backend == "api":
|
|
768
|
-
env_key = os.getenv("LEAN_EXPLORE_API_KEY")
|
|
769
|
-
if env_key:
|
|
770
|
-
logger.debug(
|
|
771
|
-
"Using Lean Explore API key from LEAN_EXPLORE_API_KEY environment "
|
|
772
|
-
"variable for agent session."
|
|
773
|
-
)
|
|
774
|
-
resolved_lean_api_key = env_key
|
|
775
|
-
|
|
776
|
-
await _run_agent_session(
|
|
777
|
-
lean_backend_type=lean_backend,
|
|
778
|
-
lean_explore_api_key_arg=resolved_lean_api_key,
|
|
779
|
-
debug_mode=debug,
|
|
780
|
-
log_level_for_mcp_server=mcp_server_log_level_str,
|
|
781
|
-
)
|