sqlshell 0.1.9__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sqlshell might be problematic. Click here for more details.
- sqlshell/LICENSE +21 -0
- sqlshell/MANIFEST.in +6 -0
- sqlshell/README.md +59 -0
- sqlshell/__init__.py +3 -1
- sqlshell/context_suggester.py +765 -0
- sqlshell/create_test_data.py +106 -30
- sqlshell/db/database_manager.py +152 -6
- sqlshell/editor.py +68 -11
- sqlshell/main.py +1566 -656
- sqlshell/menus.py +171 -0
- sqlshell/query_tab.py +32 -3
- sqlshell/styles.py +257 -0
- sqlshell/suggester_integration.py +275 -0
- sqlshell/table_list.py +907 -0
- sqlshell/utils/__init__.py +8 -0
- sqlshell/utils/profile_entropy.py +347 -0
- sqlshell/utils/profile_keys.py +356 -0
- sqlshell-0.2.1.dist-info/METADATA +198 -0
- {sqlshell-0.1.9.dist-info → sqlshell-0.2.1.dist-info}/RECORD +22 -12
- {sqlshell-0.1.9.dist-info → sqlshell-0.2.1.dist-info}/WHEEL +1 -1
- sqlshell/setup.py +0 -42
- sqlshell-0.1.9.dist-info/METADATA +0 -122
- {sqlshell-0.1.9.dist-info → sqlshell-0.2.1.dist-info}/entry_points.txt +0 -0
- {sqlshell-0.1.9.dist-info → sqlshell-0.2.1.dist-info}/top_level.txt +0 -0
sqlshell/create_test_data.py
CHANGED
|
@@ -1,50 +1,126 @@
|
|
|
1
1
|
import pandas as pd
|
|
2
2
|
import numpy as np
|
|
3
3
|
from datetime import datetime, timedelta
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
# Set random seed for reproducibility
|
|
7
|
+
np.random.seed(42)
|
|
8
|
+
|
|
9
|
+
# Define output directory
|
|
10
|
+
OUTPUT_DIR = 'test_data'
|
|
11
|
+
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
|
4
12
|
|
|
5
13
|
def create_sales_data(num_records=1000):
|
|
6
|
-
|
|
7
|
-
# Generate random dates within the last year
|
|
14
|
+
# Generate dates for the last 365 days
|
|
8
15
|
end_date = datetime.now()
|
|
9
16
|
start_date = end_date - timedelta(days=365)
|
|
10
|
-
dates =
|
|
11
|
-
|
|
17
|
+
dates = [start_date + timedelta(days=x) for x in range(366)]
|
|
18
|
+
random_dates = np.random.choice(dates, num_records)
|
|
19
|
+
|
|
20
|
+
# Create product data
|
|
21
|
+
products = ['Laptop', 'Smartphone', 'Tablet', 'Monitor', 'Keyboard', 'Mouse', 'Headphones', 'Printer']
|
|
22
|
+
product_prices = {
|
|
23
|
+
'Laptop': (800, 2000),
|
|
24
|
+
'Smartphone': (400, 1200),
|
|
25
|
+
'Tablet': (200, 800),
|
|
26
|
+
'Monitor': (150, 500),
|
|
27
|
+
'Keyboard': (20, 150),
|
|
28
|
+
'Mouse': (10, 80),
|
|
29
|
+
'Headphones': (30, 300),
|
|
30
|
+
'Printer': (100, 400)
|
|
31
|
+
}
|
|
32
|
+
|
|
12
33
|
# Generate random data
|
|
13
34
|
data = {
|
|
14
|
-
'
|
|
15
|
-
'
|
|
16
|
-
'
|
|
17
|
-
'
|
|
18
|
-
'
|
|
19
|
-
'
|
|
35
|
+
'OrderID': range(1, num_records + 1),
|
|
36
|
+
'Date': random_dates,
|
|
37
|
+
'ProductID': np.random.randint(1, len(products) + 1, num_records), # Changed to ProductID for joining
|
|
38
|
+
'Quantity': np.random.randint(1, 11, num_records),
|
|
39
|
+
'CustomerID': np.random.randint(1, 201, num_records),
|
|
40
|
+
'Region': np.random.choice(['North', 'South', 'East', 'West'], num_records)
|
|
20
41
|
}
|
|
21
|
-
|
|
22
|
-
return pd.DataFrame(data)
|
|
23
42
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
43
|
+
# Calculate prices based on product
|
|
44
|
+
product_list = [products[pid-1] for pid in data['ProductID']]
|
|
45
|
+
data['Price'] = [np.random.uniform(product_prices[p][0], product_prices[p][1])
|
|
46
|
+
for p in product_list]
|
|
47
|
+
data['TotalAmount'] = [price * qty for price, qty in zip(data['Price'], data['Quantity'])]
|
|
48
|
+
|
|
49
|
+
# Create DataFrame
|
|
50
|
+
df = pd.DataFrame(data)
|
|
51
|
+
|
|
52
|
+
# Round numerical columns
|
|
53
|
+
df['Price'] = df['Price'].round(2)
|
|
54
|
+
df['TotalAmount'] = df['TotalAmount'].round(2)
|
|
55
|
+
|
|
56
|
+
# Sort by Date
|
|
57
|
+
return df.sort_values('Date')
|
|
58
|
+
|
|
59
|
+
def create_customer_data(num_customers=200):
|
|
60
|
+
# Generate customer data
|
|
27
61
|
data = {
|
|
28
|
-
'
|
|
29
|
-
'
|
|
30
|
-
'
|
|
31
|
-
'
|
|
32
|
-
'
|
|
62
|
+
'CustomerID': range(1, num_customers + 1),
|
|
63
|
+
'FirstName': [f'Customer{i}' for i in range(1, num_customers + 1)],
|
|
64
|
+
'LastName': [f'Lastname{i}' for i in range(1, num_customers + 1)],
|
|
65
|
+
'Email': [f'customer{i}@example.com' for i in range(1, num_customers + 1)],
|
|
66
|
+
'JoinDate': [datetime.now() - timedelta(days=np.random.randint(1, 1000))
|
|
67
|
+
for _ in range(num_customers)],
|
|
68
|
+
'CustomerType': np.random.choice(['Regular', 'Premium', 'VIP'], num_customers),
|
|
69
|
+
'CreditScore': np.random.randint(300, 851, num_customers)
|
|
33
70
|
}
|
|
34
71
|
|
|
35
72
|
return pd.DataFrame(data)
|
|
36
73
|
|
|
37
|
-
def create_product_data(
|
|
38
|
-
|
|
39
|
-
|
|
74
|
+
def create_product_data():
|
|
75
|
+
# Create detailed product information
|
|
76
|
+
products = {
|
|
77
|
+
'ProductID': range(1, 9),
|
|
78
|
+
'ProductName': ['Laptop', 'Smartphone', 'Tablet', 'Monitor', 'Keyboard', 'Mouse', 'Headphones', 'Printer'],
|
|
79
|
+
'Category': ['Computers', 'Mobile', 'Mobile', 'Accessories', 'Accessories', 'Accessories', 'Audio', 'Peripherals'],
|
|
80
|
+
'Brand': ['TechPro', 'MobileX', 'TabletCo', 'ViewMax', 'TypeMaster', 'ClickPro', 'SoundMax', 'PrintPro'],
|
|
81
|
+
'StockQuantity': np.random.randint(50, 500, 8),
|
|
82
|
+
'MinPrice': [800, 400, 200, 150, 20, 10, 30, 100],
|
|
83
|
+
'MaxPrice': [2000, 1200, 800, 500, 150, 80, 300, 400],
|
|
84
|
+
'Weight_kg': [2.5, 0.2, 0.5, 3.0, 0.8, 0.1, 0.3, 5.0],
|
|
85
|
+
'WarrantyMonths': [24, 12, 12, 36, 12, 12, 24, 12]
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return pd.DataFrame(products)
|
|
89
|
+
|
|
90
|
+
def create_large_numbers_data(num_records=100):
|
|
91
|
+
"""Create a dataset with very large numbers for testing and visualization."""
|
|
40
92
|
|
|
41
|
-
# Generate random
|
|
93
|
+
# Generate random IDs
|
|
94
|
+
ids = range(1, num_records + 1)
|
|
95
|
+
|
|
96
|
+
# Create different columns with large numbers
|
|
42
97
|
data = {
|
|
43
|
-
'
|
|
44
|
-
'
|
|
45
|
-
'
|
|
46
|
-
'
|
|
47
|
-
'
|
|
98
|
+
'ID': ids,
|
|
99
|
+
'Date': pd.date_range(start='2023-01-01', periods=num_records),
|
|
100
|
+
'SmallValue': np.random.randint(1, 1000, num_records),
|
|
101
|
+
'MediumValue': np.random.randint(10000, 9999999, num_records),
|
|
102
|
+
'LargeValue': [int(str(np.random.randint(1, 999)) + str(np.random.randint(0, 9999999)).zfill(7) +
|
|
103
|
+
str(np.random.randint(0, 9999)).zfill(4)) for _ in range(num_records)],
|
|
104
|
+
'VeryLargeValue': [int(str(np.random.randint(100, 999)) + str(np.random.randint(1000000, 9999999)) +
|
|
105
|
+
str(np.random.randint(1000000, 9999999))) for _ in range(num_records)],
|
|
106
|
+
'MassiveValue': [int('1' + ''.join([str(np.random.randint(0, 10)) for _ in range(15)])) for _ in range(num_records)],
|
|
107
|
+
'Category': np.random.choice(['A', 'B', 'C', 'D', 'E'], num_records),
|
|
108
|
+
'IsActive': np.random.choice([True, False], num_records, p=[0.8, 0.2])
|
|
48
109
|
}
|
|
49
110
|
|
|
50
|
-
|
|
111
|
+
# Create exponential values for scientific notation
|
|
112
|
+
data['ExponentialValue'] = [float(f"{np.random.randint(1, 10)}.{np.random.randint(1, 100):02d}e{np.random.randint(10, 20)}")
|
|
113
|
+
for _ in range(num_records)]
|
|
114
|
+
|
|
115
|
+
# Create monetary values (with decimals)
|
|
116
|
+
data['Revenue'] = [np.random.randint(1000000, 9999999999) + np.random.random() for _ in range(num_records)]
|
|
117
|
+
data['Budget'] = [np.random.randint(10000000, 999999999) + np.random.random() for _ in range(num_records)]
|
|
118
|
+
|
|
119
|
+
# Create DataFrame
|
|
120
|
+
df = pd.DataFrame(data)
|
|
121
|
+
|
|
122
|
+
# Round monetary values to 2 decimal places
|
|
123
|
+
df['Revenue'] = df['Revenue'].round(2)
|
|
124
|
+
df['Budget'] = df['Budget'].round(2)
|
|
125
|
+
|
|
126
|
+
return df
|
sqlshell/db/database_manager.py
CHANGED
|
@@ -2,6 +2,7 @@ import os
|
|
|
2
2
|
import sqlite3
|
|
3
3
|
import pandas as pd
|
|
4
4
|
import duckdb
|
|
5
|
+
from pathlib import Path
|
|
5
6
|
|
|
6
7
|
class DatabaseManager:
|
|
7
8
|
"""
|
|
@@ -47,13 +48,14 @@ class DatabaseManager:
|
|
|
47
48
|
self.connection_type = None
|
|
48
49
|
self.database_path = None # Clear the database path
|
|
49
50
|
|
|
50
|
-
def open_database(self, filename):
|
|
51
|
+
def open_database(self, filename, load_all_tables=True):
|
|
51
52
|
"""
|
|
52
53
|
Open a database connection to the specified file.
|
|
53
54
|
Detects whether it's a SQLite or DuckDB database.
|
|
54
55
|
|
|
55
56
|
Args:
|
|
56
57
|
filename: Path to the database file
|
|
58
|
+
load_all_tables: Whether to automatically load all tables from the database
|
|
57
59
|
|
|
58
60
|
Returns:
|
|
59
61
|
True if successful, False otherwise
|
|
@@ -64,6 +66,10 @@ class DatabaseManager:
|
|
|
64
66
|
# Close any existing connection
|
|
65
67
|
self.close_connection()
|
|
66
68
|
|
|
69
|
+
# Clear any existing loaded tables
|
|
70
|
+
self.loaded_tables = {}
|
|
71
|
+
self.table_columns = {}
|
|
72
|
+
|
|
67
73
|
try:
|
|
68
74
|
if self.is_sqlite_db(filename):
|
|
69
75
|
self.conn = sqlite3.connect(filename)
|
|
@@ -75,8 +81,9 @@ class DatabaseManager:
|
|
|
75
81
|
# Store the database path
|
|
76
82
|
self.database_path = os.path.abspath(filename)
|
|
77
83
|
|
|
78
|
-
# Load tables from the database
|
|
79
|
-
|
|
84
|
+
# Load tables from the database if requested
|
|
85
|
+
if load_all_tables:
|
|
86
|
+
self.load_database_tables()
|
|
80
87
|
return True
|
|
81
88
|
except (sqlite3.Error, duckdb.Error) as e:
|
|
82
89
|
self.conn = None
|
|
@@ -212,7 +219,7 @@ class DatabaseManager:
|
|
|
212
219
|
Load data from a file into the database.
|
|
213
220
|
|
|
214
221
|
Args:
|
|
215
|
-
file_path: Path to the data file (Excel, CSV, Parquet)
|
|
222
|
+
file_path: Path to the data file (Excel, CSV, Parquet, Delta)
|
|
216
223
|
|
|
217
224
|
Returns:
|
|
218
225
|
Tuple of (table_name, DataFrame) for the loaded data
|
|
@@ -221,8 +228,23 @@ class DatabaseManager:
|
|
|
221
228
|
ValueError: If the file format is unsupported or there's an error
|
|
222
229
|
"""
|
|
223
230
|
try:
|
|
231
|
+
# Check if this is a Delta table (folder with _delta_log)
|
|
232
|
+
delta_path = Path(file_path)
|
|
233
|
+
is_delta_table = (delta_path.is_dir() and
|
|
234
|
+
(delta_path / '_delta_log').exists()) or file_path.endswith('.delta')
|
|
235
|
+
|
|
224
236
|
# Read the file into a DataFrame, using optimized loading strategies
|
|
225
|
-
if
|
|
237
|
+
if is_delta_table:
|
|
238
|
+
# Read as Delta table using deltalake library
|
|
239
|
+
try:
|
|
240
|
+
# Load the Delta table
|
|
241
|
+
import deltalake
|
|
242
|
+
delta_table = deltalake.DeltaTable(file_path)
|
|
243
|
+
# Convert to pandas DataFrame
|
|
244
|
+
df = delta_table.to_pandas()
|
|
245
|
+
except Exception as e:
|
|
246
|
+
raise ValueError(f"Error loading Delta table: {str(e)}")
|
|
247
|
+
elif file_path.endswith(('.xlsx', '.xls')):
|
|
226
248
|
# Try to use a streaming approach for Excel files
|
|
227
249
|
try:
|
|
228
250
|
# For Excel files, we first check if it's a large file
|
|
@@ -279,6 +301,11 @@ class DatabaseManager:
|
|
|
279
301
|
|
|
280
302
|
# Generate table name from file name
|
|
281
303
|
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
|
304
|
+
|
|
305
|
+
# For directories like Delta tables, use the directory name
|
|
306
|
+
if os.path.isdir(file_path):
|
|
307
|
+
base_name = os.path.basename(file_path)
|
|
308
|
+
|
|
282
309
|
table_name = self.sanitize_table_name(base_name)
|
|
283
310
|
|
|
284
311
|
# Ensure unique table name
|
|
@@ -374,6 +401,68 @@ class DatabaseManager:
|
|
|
374
401
|
except Exception as e:
|
|
375
402
|
raise Exception(f"Error previewing table: {str(e)}")
|
|
376
403
|
|
|
404
|
+
def reload_table(self, table_name):
|
|
405
|
+
"""
|
|
406
|
+
Reload a table's data from its source file.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
table_name: Name of the table to reload
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
Tuple of (bool, message) indicating success/failure and a message
|
|
413
|
+
|
|
414
|
+
Raises:
|
|
415
|
+
ValueError: If the table cannot be reloaded
|
|
416
|
+
"""
|
|
417
|
+
if not table_name in self.loaded_tables:
|
|
418
|
+
return False, f"Table '{table_name}' not found"
|
|
419
|
+
|
|
420
|
+
file_path = self.loaded_tables[table_name]
|
|
421
|
+
|
|
422
|
+
# Check if this is a file-based table
|
|
423
|
+
if file_path in ['database', 'query_result']:
|
|
424
|
+
return False, f"Cannot reload '{table_name}' because it's not a file-based table"
|
|
425
|
+
|
|
426
|
+
try:
|
|
427
|
+
# Check if the file still exists
|
|
428
|
+
if not os.path.exists(file_path):
|
|
429
|
+
return False, f"Source file '{file_path}' no longer exists"
|
|
430
|
+
|
|
431
|
+
# Store the original table name
|
|
432
|
+
original_name = table_name
|
|
433
|
+
|
|
434
|
+
# Remove the existing table
|
|
435
|
+
self.remove_table(table_name)
|
|
436
|
+
|
|
437
|
+
# Check if this is a Delta table
|
|
438
|
+
delta_path = Path(file_path)
|
|
439
|
+
is_delta_table = (delta_path.is_dir() and
|
|
440
|
+
(delta_path / '_delta_log').exists()) or file_path.endswith('.delta')
|
|
441
|
+
|
|
442
|
+
# Load the file with the original table name
|
|
443
|
+
df = None
|
|
444
|
+
if is_delta_table:
|
|
445
|
+
# Read as Delta table
|
|
446
|
+
import deltalake
|
|
447
|
+
delta_table = deltalake.DeltaTable(file_path)
|
|
448
|
+
df = delta_table.to_pandas()
|
|
449
|
+
elif file_path.endswith(('.xlsx', '.xls')):
|
|
450
|
+
df = pd.read_excel(file_path)
|
|
451
|
+
elif file_path.endswith('.csv'):
|
|
452
|
+
df = pd.read_csv(file_path)
|
|
453
|
+
elif file_path.endswith('.parquet'):
|
|
454
|
+
df = pd.read_parquet(file_path)
|
|
455
|
+
else:
|
|
456
|
+
return False, "Unsupported file format"
|
|
457
|
+
|
|
458
|
+
# Register the dataframe with the original name
|
|
459
|
+
self.register_dataframe(df, original_name, file_path)
|
|
460
|
+
|
|
461
|
+
return True, f"Table '{table_name}' reloaded successfully"
|
|
462
|
+
|
|
463
|
+
except Exception as e:
|
|
464
|
+
return False, f"Error reloading table: {str(e)}"
|
|
465
|
+
|
|
377
466
|
def rename_table(self, old_name, new_name):
|
|
378
467
|
"""
|
|
379
468
|
Rename a table in the database.
|
|
@@ -688,4 +777,61 @@ class DatabaseManager:
|
|
|
688
777
|
column_data_types[col_name] = data_type
|
|
689
778
|
except Exception:
|
|
690
779
|
# Ignore errors in type detection - this is just for enhancement
|
|
691
|
-
pass
|
|
780
|
+
pass
|
|
781
|
+
|
|
782
|
+
def load_specific_table(self, table_name):
|
|
783
|
+
"""
|
|
784
|
+
Load metadata for a specific table from the database.
|
|
785
|
+
This is used when we know which tables we want to load rather than loading all tables.
|
|
786
|
+
|
|
787
|
+
Args:
|
|
788
|
+
table_name: Name of the table to load
|
|
789
|
+
|
|
790
|
+
Returns:
|
|
791
|
+
Boolean indicating if the table was found and loaded
|
|
792
|
+
"""
|
|
793
|
+
if not self.is_connected():
|
|
794
|
+
return False
|
|
795
|
+
|
|
796
|
+
try:
|
|
797
|
+
if self.connection_type == 'sqlite':
|
|
798
|
+
# Check if the table exists in SQLite
|
|
799
|
+
cursor = self.conn.cursor()
|
|
800
|
+
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table_name,))
|
|
801
|
+
result = cursor.fetchone()
|
|
802
|
+
|
|
803
|
+
if result:
|
|
804
|
+
# Get column names for the table
|
|
805
|
+
try:
|
|
806
|
+
column_query = f"PRAGMA table_info({table_name})"
|
|
807
|
+
columns = cursor.execute(column_query).fetchall()
|
|
808
|
+
self.table_columns[table_name] = [col[1] for col in columns] # Column name is at index 1
|
|
809
|
+
except Exception:
|
|
810
|
+
self.table_columns[table_name] = []
|
|
811
|
+
|
|
812
|
+
# Register the table
|
|
813
|
+
self.loaded_tables[table_name] = 'database'
|
|
814
|
+
return True
|
|
815
|
+
|
|
816
|
+
else: # duckdb
|
|
817
|
+
# Check if the table exists in DuckDB
|
|
818
|
+
query = f"SELECT table_name FROM information_schema.tables WHERE table_name='{table_name}' AND table_schema='main'"
|
|
819
|
+
result = self.conn.execute(query).fetchdf()
|
|
820
|
+
|
|
821
|
+
if not result.empty:
|
|
822
|
+
# Get column names for the table
|
|
823
|
+
try:
|
|
824
|
+
column_query = f"SELECT column_name FROM information_schema.columns WHERE table_name='{table_name}' AND table_schema='main'"
|
|
825
|
+
columns = self.conn.execute(column_query).fetchdf()
|
|
826
|
+
self.table_columns[table_name] = columns['column_name'].tolist()
|
|
827
|
+
except Exception:
|
|
828
|
+
self.table_columns[table_name] = []
|
|
829
|
+
|
|
830
|
+
# Register the table
|
|
831
|
+
self.loaded_tables[table_name] = 'database'
|
|
832
|
+
return True
|
|
833
|
+
|
|
834
|
+
return False
|
|
835
|
+
|
|
836
|
+
except Exception:
|
|
837
|
+
return False
|
sqlshell/editor.py
CHANGED
|
@@ -496,15 +496,68 @@ class SQLEditor(QPlainTextEdit):
|
|
|
496
496
|
popup = self.completer.popup()
|
|
497
497
|
popup.setCurrentIndex(self.completer.completionModel().index(0, 0))
|
|
498
498
|
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
499
|
+
try:
|
|
500
|
+
# Calculate position for the popup
|
|
501
|
+
cr = self.cursorRect()
|
|
502
|
+
|
|
503
|
+
# Ensure cursorRect is valid
|
|
504
|
+
if not cr.isValid() or cr.x() < 0 or cr.y() < 0:
|
|
505
|
+
# Try to recompute using the text cursor
|
|
506
|
+
cursor = self.textCursor()
|
|
507
|
+
cr = self.cursorRect(cursor)
|
|
508
|
+
|
|
509
|
+
# If still invalid, use a default position
|
|
510
|
+
if not cr.isValid() or cr.x() < 0 or cr.y() < 0:
|
|
511
|
+
pos = self.mapToGlobal(self.pos())
|
|
512
|
+
cr = QRect(pos.x() + 10, pos.y() + 10, 10, self.fontMetrics().height())
|
|
513
|
+
|
|
514
|
+
# Calculate width for the popup that fits the content
|
|
515
|
+
suggested_width = popup.sizeHintForColumn(0) + popup.verticalScrollBar().sizeHint().width()
|
|
516
|
+
# Ensure minimum width
|
|
517
|
+
popup_width = max(suggested_width, 200)
|
|
518
|
+
cr.setWidth(popup_width)
|
|
519
|
+
|
|
520
|
+
# Show the popup at the correct position
|
|
521
|
+
self.completer.complete(cr)
|
|
522
|
+
except Exception as e:
|
|
523
|
+
# In case of any error, try a more direct approach
|
|
524
|
+
print(f"Error positioning completion popup: {e}")
|
|
525
|
+
try:
|
|
526
|
+
cursor_pos = self.mapToGlobal(self.cursorRect().bottomLeft())
|
|
527
|
+
popup.move(cursor_pos)
|
|
528
|
+
popup.show()
|
|
529
|
+
except:
|
|
530
|
+
# Last resort - if all else fails, hide the popup to avoid showing it in the wrong place
|
|
531
|
+
popup.hide()
|
|
506
532
|
|
|
507
533
|
def keyPressEvent(self, event):
|
|
534
|
+
# Check for Ctrl+Enter first, which should take precedence over other behaviors
|
|
535
|
+
if event.key() == Qt.Key.Key_Return and (event.modifiers() & Qt.KeyboardModifier.ControlModifier):
|
|
536
|
+
# If autocomplete popup is showing, hide it
|
|
537
|
+
if self.completer and self.completer.popup().isVisible():
|
|
538
|
+
self.completer.popup().hide()
|
|
539
|
+
|
|
540
|
+
# Cancel any pending autocomplete timers
|
|
541
|
+
if hasattr(self, '_completion_timer') and self._completion_timer.isActive():
|
|
542
|
+
self._completion_timer.stop()
|
|
543
|
+
|
|
544
|
+
# Let the main window handle query execution
|
|
545
|
+
# Important: We need to emit event to parent to trigger execution
|
|
546
|
+
# and prevent it from being treated as an autocomplete selection
|
|
547
|
+
event.accept() # Mark the event as handled
|
|
548
|
+
|
|
549
|
+
# Find the parent SQLShell instance and call its execute_query method
|
|
550
|
+
parent = self
|
|
551
|
+
while parent is not None:
|
|
552
|
+
if hasattr(parent, 'execute_query'):
|
|
553
|
+
parent.execute_query()
|
|
554
|
+
return
|
|
555
|
+
parent = parent.parent()
|
|
556
|
+
|
|
557
|
+
# If we couldn't find the execute_query method, pass the event up
|
|
558
|
+
super().keyPressEvent(event)
|
|
559
|
+
return
|
|
560
|
+
|
|
508
561
|
# Handle completer popup navigation
|
|
509
562
|
if self.completer and self.completer.popup().isVisible():
|
|
510
563
|
# Handle Tab key to complete the current selection
|
|
@@ -816,11 +869,15 @@ class SQLEditor(QPlainTextEdit):
|
|
|
816
869
|
# Get table name from dropped text
|
|
817
870
|
text = event.mimeData().text()
|
|
818
871
|
|
|
819
|
-
#
|
|
820
|
-
if
|
|
821
|
-
table_name =
|
|
872
|
+
# Try to extract table name from custom mime data if available
|
|
873
|
+
if event.mimeData().hasFormat('application/x-sqlshell-tablename'):
|
|
874
|
+
table_name = bytes(event.mimeData().data('application/x-sqlshell-tablename')).decode()
|
|
822
875
|
else:
|
|
823
|
-
|
|
876
|
+
# Extract actual table name (if it includes parentheses)
|
|
877
|
+
if " (" in text:
|
|
878
|
+
table_name = text.split(" (")[0]
|
|
879
|
+
else:
|
|
880
|
+
table_name = text
|
|
824
881
|
|
|
825
882
|
# Get current cursor position and surrounding text
|
|
826
883
|
cursor = self.textCursor()
|