sqlshell 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. sqlshell/__init__.py +84 -0
  2. sqlshell/__main__.py +4926 -0
  3. sqlshell/ai_autocomplete.py +392 -0
  4. sqlshell/ai_settings_dialog.py +337 -0
  5. sqlshell/context_suggester.py +768 -0
  6. sqlshell/create_test_data.py +152 -0
  7. sqlshell/data/create_test_data.py +137 -0
  8. sqlshell/db/__init__.py +6 -0
  9. sqlshell/db/database_manager.py +1318 -0
  10. sqlshell/db/export_manager.py +188 -0
  11. sqlshell/editor.py +1166 -0
  12. sqlshell/editor_integration.py +127 -0
  13. sqlshell/execution_handler.py +421 -0
  14. sqlshell/menus.py +262 -0
  15. sqlshell/notification_manager.py +370 -0
  16. sqlshell/query_tab.py +904 -0
  17. sqlshell/resources/__init__.py +1 -0
  18. sqlshell/resources/icon.png +0 -0
  19. sqlshell/resources/logo_large.png +0 -0
  20. sqlshell/resources/logo_medium.png +0 -0
  21. sqlshell/resources/logo_small.png +0 -0
  22. sqlshell/resources/splash_screen.gif +0 -0
  23. sqlshell/space_invaders.py +501 -0
  24. sqlshell/splash_screen.py +405 -0
  25. sqlshell/sqlshell/__init__.py +5 -0
  26. sqlshell/sqlshell/create_test_data.py +118 -0
  27. sqlshell/sqlshell/create_test_databases.py +96 -0
  28. sqlshell/sqlshell_demo.png +0 -0
  29. sqlshell/styles.py +257 -0
  30. sqlshell/suggester_integration.py +330 -0
  31. sqlshell/syntax_highlighter.py +124 -0
  32. sqlshell/table_list.py +996 -0
  33. sqlshell/ui/__init__.py +6 -0
  34. sqlshell/ui/bar_chart_delegate.py +49 -0
  35. sqlshell/ui/filter_header.py +469 -0
  36. sqlshell/utils/__init__.py +16 -0
  37. sqlshell/utils/profile_cn2.py +1661 -0
  38. sqlshell/utils/profile_column.py +2635 -0
  39. sqlshell/utils/profile_distributions.py +616 -0
  40. sqlshell/utils/profile_entropy.py +347 -0
  41. sqlshell/utils/profile_foreign_keys.py +779 -0
  42. sqlshell/utils/profile_keys.py +2834 -0
  43. sqlshell/utils/profile_ohe.py +934 -0
  44. sqlshell/utils/profile_ohe_advanced.py +754 -0
  45. sqlshell/utils/profile_ohe_comparison.py +237 -0
  46. sqlshell/utils/profile_prediction.py +926 -0
  47. sqlshell/utils/profile_similarity.py +876 -0
  48. sqlshell/utils/search_in_df.py +90 -0
  49. sqlshell/widgets.py +400 -0
  50. sqlshell-0.4.4.dist-info/METADATA +441 -0
  51. sqlshell-0.4.4.dist-info/RECORD +54 -0
  52. sqlshell-0.4.4.dist-info/WHEEL +5 -0
  53. sqlshell-0.4.4.dist-info/entry_points.txt +2 -0
  54. sqlshell-0.4.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,392 @@
1
+ """
2
+ AI-powered SQL autocomplete using OpenAI API.
3
+
4
+ This module provides intelligent SQL suggestions using OpenAI's GPT models
5
+ when an API key is configured.
6
+ """
7
+
8
+ import os
9
+ import json
10
+ import threading
11
+ from typing import Optional, Callable, List, Dict, Any
12
+ from PyQt6.QtCore import QObject, pyqtSignal, QTimer
13
+
14
+
15
+ class AIAutocompleteManager(QObject):
16
+ """
17
+ Manages AI-powered SQL autocomplete suggestions using OpenAI.
18
+
19
+ This class handles:
20
+ - API key storage and validation
21
+ - Async requests to OpenAI
22
+ - Caching of suggestions for performance
23
+ - Integration with the editor's ghost text system
24
+ """
25
+
26
+ # Signal emitted when AI suggestion is ready
27
+ suggestion_ready = pyqtSignal(str, int) # suggestion text, cursor position
28
+
29
+ # Signal emitted when there's an error
30
+ error_occurred = pyqtSignal(str) # error message
31
+
32
+ def __init__(self):
33
+ super().__init__()
34
+ self._api_key: Optional[str] = None
35
+ self._client = None
36
+ self._enabled = True
37
+ self._model = "gpt-4o-mini" # Default to cost-effective model
38
+ self._pending_request: Optional[threading.Thread] = None
39
+ self._request_timer: Optional[QTimer] = None
40
+ self._last_context = ""
41
+ self._cache: Dict[str, str] = {}
42
+ self._max_cache_size = 100
43
+ self._schema_context = "" # Store table/column info for context
44
+
45
+ # Debounce settings
46
+ self._debounce_ms = 500 # Wait 500ms after last keystroke
47
+
48
+ # Load settings
49
+ self._load_settings()
50
+
51
+ def _get_settings_file(self) -> str:
52
+ """Get the path to the settings file."""
53
+ return os.path.join(os.path.expanduser('~'), '.sqlshell_settings.json')
54
+
55
+ def _load_settings(self) -> None:
56
+ """Load AI settings from the settings file."""
57
+ try:
58
+ settings_file = self._get_settings_file()
59
+ if os.path.exists(settings_file):
60
+ with open(settings_file, 'r') as f:
61
+ settings = json.load(f)
62
+ ai_settings = settings.get('ai_autocomplete', {})
63
+ self._api_key = ai_settings.get('api_key')
64
+ self._enabled = ai_settings.get('enabled', True)
65
+ self._model = ai_settings.get('model', 'gpt-4o-mini')
66
+
67
+ # Initialize OpenAI client if API key is available
68
+ if self._api_key:
69
+ self._init_client()
70
+ except Exception as e:
71
+ print(f"Error loading AI settings: {e}")
72
+
73
+ def _save_settings(self) -> None:
74
+ """Save AI settings to the settings file."""
75
+ try:
76
+ settings_file = self._get_settings_file()
77
+ settings = {}
78
+
79
+ # Load existing settings
80
+ if os.path.exists(settings_file):
81
+ with open(settings_file, 'r') as f:
82
+ settings = json.load(f)
83
+
84
+ # Update AI settings
85
+ settings['ai_autocomplete'] = {
86
+ 'api_key': self._api_key,
87
+ 'enabled': self._enabled,
88
+ 'model': self._model
89
+ }
90
+
91
+ # Save settings
92
+ with open(settings_file, 'w') as f:
93
+ json.dump(settings, f, indent=4)
94
+ except Exception as e:
95
+ print(f"Error saving AI settings: {e}")
96
+
97
+ def _init_client(self) -> bool:
98
+ """Initialize the OpenAI client."""
99
+ if not self._api_key:
100
+ return False
101
+
102
+ try:
103
+ from openai import OpenAI
104
+ self._client = OpenAI(api_key=self._api_key)
105
+ return True
106
+ except ImportError:
107
+ print("OpenAI library not installed. Run: pip install openai")
108
+ return False
109
+ except Exception as e:
110
+ print(f"Error initializing OpenAI client: {e}")
111
+ return False
112
+
113
+ @property
114
+ def is_available(self) -> bool:
115
+ """Check if AI autocomplete is available and enabled."""
116
+ return self._enabled and self._api_key is not None and self._client is not None
117
+
118
+ @property
119
+ def is_configured(self) -> bool:
120
+ """Check if an API key is configured (even if not valid)."""
121
+ return self._api_key is not None and len(self._api_key) > 0
122
+
123
+ def set_api_key(self, api_key: str) -> bool:
124
+ """
125
+ Set the OpenAI API key.
126
+
127
+ Args:
128
+ api_key: The OpenAI API key
129
+
130
+ Returns:
131
+ True if the key was set successfully
132
+ """
133
+ self._api_key = api_key if api_key and api_key.strip() else None
134
+ success = self._init_client() if self._api_key else True
135
+ self._save_settings()
136
+ return success
137
+
138
+ def get_api_key(self) -> Optional[str]:
139
+ """Get the current API key (masked for display)."""
140
+ if not self._api_key:
141
+ return None
142
+ # Return masked version for security
143
+ if len(self._api_key) > 8:
144
+ return self._api_key[:4] + "*" * (len(self._api_key) - 8) + self._api_key[-4:]
145
+ return "*" * len(self._api_key)
146
+
147
+ def get_raw_api_key(self) -> Optional[str]:
148
+ """Get the raw API key (for internal use only)."""
149
+ return self._api_key
150
+
151
+ def set_enabled(self, enabled: bool) -> None:
152
+ """Enable or disable AI autocomplete."""
153
+ self._enabled = enabled
154
+ self._save_settings()
155
+
156
+ def is_enabled(self) -> bool:
157
+ """Check if AI autocomplete is enabled."""
158
+ return self._enabled
159
+
160
+ def set_model(self, model: str) -> None:
161
+ """Set the OpenAI model to use."""
162
+ self._model = model
163
+ self._save_settings()
164
+
165
+ def get_model(self) -> str:
166
+ """Get the current OpenAI model."""
167
+ return self._model
168
+
169
+ def update_schema_context(self, tables: List[str], table_columns: Dict[str, List[str]]) -> None:
170
+ """
171
+ Update the schema context for better suggestions.
172
+
173
+ Args:
174
+ tables: List of table names
175
+ table_columns: Dictionary mapping table names to column lists
176
+ """
177
+ if not tables:
178
+ self._schema_context = ""
179
+ return
180
+
181
+ # Build a compact schema description
182
+ schema_parts = []
183
+ for table in tables:
184
+ columns = table_columns.get(table, [])
185
+ if columns:
186
+ cols_str = ", ".join(columns[:20]) # Limit columns to avoid token explosion
187
+ if len(columns) > 20:
188
+ cols_str += f", ... ({len(columns) - 20} more)"
189
+ schema_parts.append(f"{table}({cols_str})")
190
+ else:
191
+ schema_parts.append(table)
192
+
193
+ self._schema_context = "Available tables: " + "; ".join(schema_parts[:15]) # Limit tables too
194
+ if len(tables) > 15:
195
+ self._schema_context += f" ... and {len(tables) - 15} more tables"
196
+
197
+ def request_suggestion(self, text_before_cursor: str, current_word: str,
198
+ cursor_position: int, callback: Optional[Callable] = None) -> None:
199
+ """
200
+ Request an AI suggestion for the current context.
201
+
202
+ This method debounces requests to avoid excessive API calls.
203
+
204
+ Args:
205
+ text_before_cursor: The SQL text before the cursor
206
+ current_word: The current word being typed
207
+ cursor_position: The current cursor position
208
+ callback: Optional callback function for the result
209
+ """
210
+ if not self.is_available:
211
+ return
212
+
213
+ # Create cache key
214
+ cache_key = f"{text_before_cursor}|{current_word}"
215
+
216
+ # Check cache first
217
+ if cache_key in self._cache:
218
+ suggestion = self._cache[cache_key]
219
+ if suggestion:
220
+ self.suggestion_ready.emit(suggestion, cursor_position)
221
+ return
222
+
223
+ # Cancel any pending request
224
+ if self._request_timer:
225
+ self._request_timer.stop()
226
+
227
+ # Store context for debounced request
228
+ self._last_context = (text_before_cursor, current_word, cursor_position, callback)
229
+
230
+ # Create debounce timer
231
+ self._request_timer = QTimer()
232
+ self._request_timer.setSingleShot(True)
233
+ self._request_timer.timeout.connect(self._execute_request)
234
+ self._request_timer.start(self._debounce_ms)
235
+
236
+ def _execute_request(self) -> None:
237
+ """Execute the actual API request in a background thread."""
238
+ if not self._last_context:
239
+ return
240
+
241
+ text_before_cursor, current_word, cursor_position, callback = self._last_context
242
+
243
+ print(f"[AI] Executing request, context length: {len(text_before_cursor)}")
244
+
245
+ # Run API request in background thread
246
+ thread = threading.Thread(
247
+ target=self._fetch_suggestion,
248
+ args=(text_before_cursor, current_word, cursor_position, callback),
249
+ daemon=True
250
+ )
251
+ thread.start()
252
+ self._pending_request = thread
253
+
254
+ def _fetch_suggestion(self, text_before_cursor: str, current_word: str,
255
+ cursor_position: int, callback: Optional[Callable]) -> None:
256
+ """Fetch suggestion from OpenAI API (runs in background thread)."""
257
+ try:
258
+ if not self._client:
259
+ return
260
+
261
+ # Build the prompt
262
+ prompt = self._build_prompt(text_before_cursor, current_word)
263
+
264
+ # Make the API call
265
+ response = self._client.chat.completions.create(
266
+ model=self._model,
267
+ messages=[
268
+ {
269
+ "role": "system",
270
+ "content": (
271
+ "You are a SQL autocomplete assistant. Complete the SQL query "
272
+ "based on the context. Return ONLY the completion text that should "
273
+ "be inserted after the cursor - no explanation, no markdown, no "
274
+ "code blocks. If the user is mid-word, complete that word. "
275
+ "Keep completions concise and contextually appropriate. "
276
+ "If unsure, provide a common SQL pattern that fits the context."
277
+ )
278
+ },
279
+ {
280
+ "role": "user",
281
+ "content": prompt
282
+ }
283
+ ],
284
+ max_tokens=100,
285
+ temperature=0.3, # Lower temperature for more predictable completions
286
+ stop=[";", "\n\n"] # Stop at statement end or double newline
287
+ )
288
+
289
+ suggestion = response.choices[0].message.content.strip()
290
+
291
+ # Clean up suggestion
292
+ suggestion = self._clean_suggestion(suggestion, current_word)
293
+
294
+ if suggestion:
295
+ # Cache the result
296
+ cache_key = f"{text_before_cursor}|{current_word}"
297
+ self._add_to_cache(cache_key, suggestion)
298
+
299
+ print(f"[AI] Got suggestion: '{suggestion[:50]}...' at position {cursor_position}")
300
+
301
+ # Emit signal (Qt will handle thread safety)
302
+ self.suggestion_ready.emit(suggestion, cursor_position)
303
+
304
+ # Call callback if provided
305
+ if callback:
306
+ callback(suggestion, cursor_position)
307
+
308
+ except Exception as e:
309
+ error_msg = str(e)
310
+ # Don't spam errors for rate limits or network issues
311
+ if "rate" not in error_msg.lower() and "connection" not in error_msg.lower():
312
+ print(f"AI autocomplete error: {e}")
313
+ self.error_occurred.emit(error_msg)
314
+
315
+ def _build_prompt(self, text_before_cursor: str, current_word: str) -> str:
316
+ """Build the prompt for the AI model."""
317
+ parts = []
318
+
319
+ # Add schema context if available
320
+ if self._schema_context:
321
+ parts.append(self._schema_context)
322
+
323
+ # Add the SQL context
324
+ parts.append(f"SQL query so far:\n{text_before_cursor}")
325
+
326
+ if current_word:
327
+ parts.append(f"Currently typing: {current_word}")
328
+
329
+ parts.append("Complete the SQL (return only the completion text):")
330
+
331
+ return "\n\n".join(parts)
332
+
333
+ def _clean_suggestion(self, suggestion: str, current_word: str) -> str:
334
+ """Clean up the AI suggestion."""
335
+ if not suggestion:
336
+ return ""
337
+
338
+ # Remove markdown code blocks if present
339
+ if suggestion.startswith("```"):
340
+ lines = suggestion.split("\n")
341
+ # Find content between ``` markers
342
+ content_lines = []
343
+ in_code = False
344
+ for line in lines:
345
+ if line.startswith("```"):
346
+ in_code = not in_code
347
+ continue
348
+ if in_code or not suggestion.count("```"):
349
+ content_lines.append(line)
350
+ suggestion = "\n".join(content_lines).strip()
351
+
352
+ # Remove leading/trailing quotes
353
+ suggestion = suggestion.strip('"\'`')
354
+
355
+ # If suggestion starts with the current word, remove it to avoid duplication
356
+ if current_word and suggestion.lower().startswith(current_word.lower()):
357
+ suggestion = suggestion[len(current_word):]
358
+
359
+ return suggestion.strip()
360
+
361
+ def _add_to_cache(self, key: str, value: str) -> None:
362
+ """Add a suggestion to the cache, managing cache size."""
363
+ if len(self._cache) >= self._max_cache_size:
364
+ # Remove oldest entries (simple FIFO)
365
+ keys_to_remove = list(self._cache.keys())[:self._max_cache_size // 2]
366
+ for k in keys_to_remove:
367
+ del self._cache[k]
368
+
369
+ self._cache[key] = value
370
+
371
+ def clear_cache(self) -> None:
372
+ """Clear the suggestion cache."""
373
+ self._cache.clear()
374
+
375
+ def cancel_pending_requests(self) -> None:
376
+ """Cancel any pending AI requests."""
377
+ if self._request_timer:
378
+ self._request_timer.stop()
379
+ self._last_context = None
380
+
381
+
382
+ # Singleton instance
383
+ _ai_manager: Optional[AIAutocompleteManager] = None
384
+
385
+
386
+ def get_ai_autocomplete_manager() -> AIAutocompleteManager:
387
+ """Get the global AI autocomplete manager instance."""
388
+ global _ai_manager
389
+ if _ai_manager is None:
390
+ _ai_manager = AIAutocompleteManager()
391
+ return _ai_manager
392
+