chdb 3.6.0__cp38-abi3-macosx_10_15_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chdb might be problematic. Click here for more details.

@@ -0,0 +1,505 @@
1
+ from typing import Optional, Any
2
+ from chdb import _chdb
3
+
4
+ # try import pyarrow if failed, raise ImportError with suggestion
5
+ try:
6
+ import pyarrow as pa # noqa
7
+ except ImportError as e:
8
+ print(f"ImportError: {e}")
9
+ print('Please install pyarrow via "pip install pyarrow"')
10
+ raise ImportError("Failed to import pyarrow") from None
11
+
12
+
13
+ _arrow_format = set({"dataframe", "arrowtable"})
14
+ _process_result_format_funs = {
15
+ "dataframe": lambda x: to_df(x),
16
+ "arrowtable": lambda x: to_arrowTable(x),
17
+ }
18
+
19
+
20
+ # return pyarrow table
21
+ def to_arrowTable(res):
22
+ """convert res to arrow table"""
23
+ # try import pyarrow and pandas, if failed, raise ImportError with suggestion
24
+ try:
25
+ import pyarrow as pa # noqa
26
+ import pandas as pd # noqa
27
+ except ImportError as e:
28
+ print(f"ImportError: {e}")
29
+ print('Please install pyarrow and pandas via "pip install pyarrow pandas"')
30
+ raise ImportError("Failed to import pyarrow or pandas") from None
31
+ if len(res) == 0:
32
+ return pa.Table.from_batches([], schema=pa.schema([]))
33
+ return pa.RecordBatchFileReader(res.bytes()).read_all()
34
+
35
+
36
+ # return pandas dataframe
37
+ def to_df(r):
38
+ """convert arrow table to Dataframe"""
39
+ t = to_arrowTable(r)
40
+ return t.to_pandas(use_threads=True)
41
+
42
+
43
+ class StreamingResult:
44
+ def __init__(self, c_result, conn, result_func, supports_record_batch):
45
+ self._result = c_result
46
+ self._result_func = result_func
47
+ self._conn = conn
48
+ self._exhausted = False
49
+ self._supports_record_batch = supports_record_batch
50
+
51
+ def fetch(self):
52
+ """Fetch next chunk of streaming results"""
53
+ if self._exhausted:
54
+ return None
55
+
56
+ try:
57
+ result = self._conn.streaming_fetch_result(self._result)
58
+ if result is None or result.rows_read() == 0:
59
+ self._exhausted = True
60
+ return None
61
+ return self._result_func(result)
62
+ except Exception as e:
63
+ self._exhausted = True
64
+ raise RuntimeError(f"Streaming query failed: {str(e)}") from e
65
+
66
+ def __iter__(self):
67
+ return self
68
+
69
+ def __next__(self):
70
+ if self._exhausted:
71
+ raise StopIteration
72
+
73
+ chunk = self.fetch()
74
+ if chunk is None:
75
+ self._exhausted = True
76
+ raise StopIteration
77
+
78
+ return chunk
79
+
80
+ def __enter__(self):
81
+ return self
82
+
83
+ def __exit__(self, exc_type, exc_val, exc_tb):
84
+ self.cancel()
85
+
86
+ def close(self):
87
+ self.cancel()
88
+
89
+ def cancel(self):
90
+ if not self._exhausted:
91
+ self._exhausted = True
92
+ try:
93
+ self._conn.streaming_cancel_query(self._result)
94
+ except Exception as e:
95
+ raise RuntimeError(f"Failed to cancel streaming query: {str(e)}") from e
96
+
97
+ def record_batch(self, rows_per_batch: int = 1000000) -> pa.RecordBatchReader:
98
+ """
99
+ Create a PyArrow RecordBatchReader from this StreamingResult.
100
+
101
+ This method requires that the StreamingResult was created with arrow format.
102
+ It wraps the streaming result with ChdbRecordBatchReader to provide efficient
103
+ batching with configurable batch sizes.
104
+
105
+ Args:
106
+ rows_per_batch (int): Number of rows per batch. Defaults to 1000000.
107
+
108
+ Returns:
109
+ pa.RecordBatchReader: PyArrow RecordBatchReader for efficient streaming
110
+
111
+ Raises:
112
+ ValueError: If the StreamingResult was not created with arrow format
113
+ """
114
+ if not self._supports_record_batch:
115
+ raise ValueError(
116
+ "record_batch() can only be used with arrow format. "
117
+ "Please use format='Arrow' when calling send_query."
118
+ )
119
+
120
+ chdb_reader = ChdbRecordBatchReader(self, rows_per_batch)
121
+ return pa.RecordBatchReader.from_batches(chdb_reader.schema(), chdb_reader)
122
+
123
+
124
+ class ChdbRecordBatchReader:
125
+ """
126
+ A PyArrow RecordBatchReader wrapper for chdb StreamingResult.
127
+
128
+ This class provides an efficient way to read large result sets as PyArrow RecordBatches
129
+ with configurable batch sizes to optimize memory usage and performance.
130
+ """
131
+
132
+ def __init__(self, chdb_stream_result, batch_size_rows):
133
+ self._stream_result = chdb_stream_result
134
+ self._schema = None
135
+ self._closed = False
136
+ self._pending_batches = []
137
+ self._accumulator = []
138
+ self._batch_size_rows = batch_size_rows
139
+ self._current_rows = 0
140
+ self._first_batch = None
141
+ self._first_batch_consumed = True
142
+ self._schema = self.schema()
143
+
144
+ def schema(self):
145
+ if self._schema is None:
146
+ # Get the first chunk to determine schema
147
+ chunk = self._stream_result.fetch()
148
+ if chunk is not None:
149
+ arrow_bytes = chunk.bytes()
150
+ reader = pa.RecordBatchFileReader(arrow_bytes)
151
+ self._schema = reader.schema
152
+
153
+ table = reader.read_all()
154
+ if table.num_rows > 0:
155
+ batches = table.to_batches()
156
+ self._first_batch = batches[0]
157
+ if len(batches) > 1:
158
+ self._pending_batches = batches[1:]
159
+ self._first_batch_consumed = False
160
+ else:
161
+ self._first_batch = None
162
+ self._first_batch_consumed = True
163
+ else:
164
+ self._schema = pa.schema([])
165
+ self._first_batch = None
166
+ self._first_batch_consumed = True
167
+ self._closed = True
168
+ return self._schema
169
+
170
+ def read_next_batch(self):
171
+ if self._accumulator:
172
+ result = self._accumulator.pop(0)
173
+ return result
174
+
175
+ if self._closed:
176
+ raise StopIteration
177
+
178
+ while True:
179
+ batch = None
180
+
181
+ # 1. Return the first batch if not consumed yet
182
+ if not self._first_batch_consumed:
183
+ self._first_batch_consumed = True
184
+ batch = self._first_batch
185
+
186
+ # 2. Check pending batches from current chunk
187
+ elif self._pending_batches:
188
+ batch = self._pending_batches.pop(0)
189
+
190
+ # 3. Fetch new chunk from chdb stream
191
+ else:
192
+ chunk = self._stream_result.fetch()
193
+ if chunk is None:
194
+ # No more data - return accumulated batches if any
195
+ break
196
+
197
+ arrow_bytes = chunk.bytes()
198
+ if not arrow_bytes:
199
+ continue
200
+
201
+ reader = pa.RecordBatchFileReader(arrow_bytes)
202
+ table = reader.read_all()
203
+
204
+ if table.num_rows > 0:
205
+ batches = table.to_batches()
206
+ batch = batches[0]
207
+ if len(batches) > 1:
208
+ self._pending_batches = batches[1:]
209
+ else:
210
+ continue
211
+
212
+ # Process the batch if we got one
213
+ if batch is not None:
214
+ self._accumulator.append(batch)
215
+ self._current_rows += batch.num_rows
216
+
217
+ # If accumulated enough rows, return combined batch
218
+ if self._current_rows >= self._batch_size_rows:
219
+ if len(self._accumulator) == 1:
220
+ result = self._accumulator.pop(0)
221
+ else:
222
+ if hasattr(pa, 'concat_batches'):
223
+ result = pa.concat_batches(self._accumulator)
224
+ self._accumulator = []
225
+ else:
226
+ result = self._accumulator.pop(0)
227
+
228
+ self._current_rows = 0
229
+ return result
230
+
231
+ # End of stream - return any accumulated batches
232
+ if self._accumulator:
233
+ if len(self._accumulator) == 1:
234
+ result = self._accumulator.pop(0)
235
+ else:
236
+ if hasattr(pa, 'concat_batches'):
237
+ result = pa.concat_batches(self._accumulator)
238
+ self._accumulator = []
239
+ else:
240
+ result = self._accumulator.pop(0)
241
+
242
+ self._current_rows = 0
243
+ self._closed = True
244
+ return result
245
+
246
+ # No more data
247
+ self._closed = True
248
+ raise StopIteration
249
+
250
+ def close(self):
251
+ if not self._closed:
252
+ self._stream_result.close()
253
+ self._closed = True
254
+
255
+ def __iter__(self):
256
+ return self
257
+
258
+ def __next__(self):
259
+ return self.read_next_batch()
260
+
261
+
262
+ class Connection:
263
+ def __init__(self, connection_string: str):
264
+ # print("Connection", connection_string)
265
+ self._cursor: Optional[Cursor] = None
266
+ self._conn = _chdb.connect(connection_string)
267
+
268
+ def cursor(self) -> "Cursor":
269
+ self._cursor = Cursor(self._conn)
270
+ return self._cursor
271
+
272
+ def query(self, query: str, format: str = "CSV") -> Any:
273
+ lower_output_format = format.lower()
274
+ result_func = _process_result_format_funs.get(lower_output_format, lambda x: x)
275
+ if lower_output_format in _arrow_format:
276
+ format = "Arrow"
277
+
278
+ result = self._conn.query(query, format)
279
+ return result_func(result)
280
+
281
+ def send_query(self, query: str, format: str = "CSV") -> StreamingResult:
282
+ lower_output_format = format.lower()
283
+ supports_record_batch = lower_output_format == "arrow"
284
+ result_func = _process_result_format_funs.get(lower_output_format, lambda x: x)
285
+ if lower_output_format in _arrow_format:
286
+ format = "Arrow"
287
+
288
+ c_stream_result = self._conn.send_query(query, format)
289
+ return StreamingResult(c_stream_result, self._conn, result_func, supports_record_batch)
290
+
291
+ def close(self) -> None:
292
+ # print("close")
293
+ if self._cursor:
294
+ self._cursor.close()
295
+ self._conn.close()
296
+
297
+
298
+ class Cursor:
299
+ def __init__(self, connection):
300
+ self._conn = connection
301
+ self._cursor = self._conn.cursor()
302
+ self._current_table: Optional[pa.Table] = None
303
+ self._current_row: int = 0
304
+
305
+ def execute(self, query: str) -> None:
306
+ self._cursor.execute(query)
307
+ result_mv = self._cursor.get_memview()
308
+ if self._cursor.has_error():
309
+ raise Exception(self._cursor.error_message())
310
+ if self._cursor.data_size() == 0:
311
+ self._current_table = None
312
+ self._current_row = 0
313
+ self._column_names = []
314
+ self._column_types = []
315
+ return
316
+
317
+ # Parse JSON data
318
+ json_data = result_mv.tobytes().decode("utf-8")
319
+ import json
320
+
321
+ try:
322
+ # First line contains column names
323
+ # Second line contains column types
324
+ # Following lines contain data
325
+ lines = json_data.strip().split("\n")
326
+ if len(lines) < 2:
327
+ self._current_table = None
328
+ self._current_row = 0
329
+ self._column_names = []
330
+ self._column_types = []
331
+ return
332
+
333
+ self._column_names = json.loads(lines[0])
334
+ self._column_types = json.loads(lines[1])
335
+
336
+ # Convert data rows
337
+ rows = []
338
+ for line in lines[2:]:
339
+ if not line.strip():
340
+ continue
341
+ row_data = json.loads(line)
342
+ converted_row = []
343
+ for val, type_info in zip(row_data, self._column_types):
344
+ # Handle NULL values first
345
+ if val is None:
346
+ converted_row.append(None)
347
+ continue
348
+
349
+ # Basic type conversion
350
+ try:
351
+ if type_info.startswith("Int") or type_info.startswith("UInt"):
352
+ converted_row.append(int(val))
353
+ elif type_info.startswith("Float"):
354
+ converted_row.append(float(val))
355
+ elif type_info == "Bool":
356
+ converted_row.append(bool(val))
357
+ elif type_info == "String" or type_info == "FixedString":
358
+ converted_row.append(str(val))
359
+ elif type_info.startswith("DateTime"):
360
+ from datetime import datetime
361
+
362
+ # Check if the value is numeric (timestamp)
363
+ val_str = str(val)
364
+ if val_str.replace(".", "").isdigit():
365
+ converted_row.append(datetime.fromtimestamp(float(val)))
366
+ else:
367
+ # Handle datetime string formats
368
+ if "." in val_str: # Has microseconds
369
+ converted_row.append(
370
+ datetime.strptime(
371
+ val_str, "%Y-%m-%d %H:%M:%S.%f"
372
+ )
373
+ )
374
+ else: # No microseconds
375
+ converted_row.append(
376
+ datetime.strptime(val_str, "%Y-%m-%d %H:%M:%S")
377
+ )
378
+ elif type_info.startswith("Date"):
379
+ from datetime import date, datetime
380
+
381
+ # Check if the value is numeric (days since epoch)
382
+ val_str = str(val)
383
+ if val_str.isdigit():
384
+ converted_row.append(
385
+ date.fromtimestamp(float(val) * 86400)
386
+ )
387
+ else:
388
+ # Handle date string format
389
+ converted_row.append(
390
+ datetime.strptime(val_str, "%Y-%m-%d").date()
391
+ )
392
+ else:
393
+ # For unsupported types, keep as string
394
+ converted_row.append(str(val))
395
+ except (ValueError, TypeError):
396
+ # If conversion fails, keep original value as string
397
+ converted_row.append(str(val))
398
+ rows.append(tuple(converted_row))
399
+
400
+ self._current_table = rows
401
+ self._current_row = 0
402
+
403
+ except json.JSONDecodeError as e:
404
+ raise Exception(f"Failed to parse JSON data: {e}")
405
+
406
+ def commit(self) -> None:
407
+ self._cursor.commit()
408
+
409
+ def fetchone(self) -> Optional[tuple]:
410
+ if not self._current_table or self._current_row >= len(self._current_table):
411
+ return None
412
+
413
+ # Now self._current_table is a list of row tuples
414
+ row = self._current_table[self._current_row]
415
+ self._current_row += 1
416
+ return row
417
+
418
+ def fetchmany(self, size: int = 1) -> tuple:
419
+ if not self._current_table:
420
+ return tuple()
421
+
422
+ rows = []
423
+ for _ in range(size):
424
+ if (row := self.fetchone()) is None:
425
+ break
426
+ rows.append(row)
427
+ return tuple(rows)
428
+
429
+ def fetchall(self) -> tuple:
430
+ if not self._current_table:
431
+ return tuple()
432
+
433
+ remaining_rows = []
434
+ while (row := self.fetchone()) is not None:
435
+ remaining_rows.append(row)
436
+ return tuple(remaining_rows)
437
+
438
+ def close(self) -> None:
439
+ self._cursor.close()
440
+
441
+ def __iter__(self):
442
+ return self
443
+
444
+ def __next__(self) -> tuple:
445
+ row = self.fetchone()
446
+ if row is None:
447
+ raise StopIteration
448
+ return row
449
+
450
+ def column_names(self) -> list:
451
+ """Return a list of column names from the last executed query"""
452
+ return self._column_names if hasattr(self, "_column_names") else []
453
+
454
+ def column_types(self) -> list:
455
+ """Return a list of column types from the last executed query"""
456
+ return self._column_types if hasattr(self, "_column_types") else []
457
+
458
+ @property
459
+ def description(self) -> list:
460
+ """
461
+ Return a description of the columns as per DB-API 2.0
462
+ Returns a list of 7-item tuples, each containing:
463
+ (name, type_code, display_size, internal_size, precision, scale, null_ok)
464
+ where only name and type_code are provided
465
+ """
466
+ if not hasattr(self, "_column_names") or not self._column_names:
467
+ return []
468
+
469
+ return [
470
+ (name, type_info, None, None, None, None, None)
471
+ for name, type_info in zip(self._column_names, self._column_types)
472
+ ]
473
+
474
+
475
+ def connect(connection_string: str = ":memory:") -> Connection:
476
+ """
477
+ Create a connection to chDB backgroud server.
478
+ Only one open connection is allowed per process. Use `close` to close the connection.
479
+ If called with the same connection string, the same connection object will be returned.
480
+ You can use the connection object to create cursor object. `cursor` method will return a cursor object.
481
+
482
+ Args:
483
+ connection_string (str, optional): Connection string. Defaults to ":memory:".
484
+ Also support file path like:
485
+ - ":memory:" (for in-memory database)
486
+ - "test.db" (for relative path)
487
+ - "file:test.db" (same as above)
488
+ - "/path/to/test.db" (for absolute path)
489
+ - "file:/path/to/test.db" (same as above)
490
+ - "file:test.db?param1=value1&param2=value2" (for relative path with query params)
491
+ - "file::memory:?verbose&log-level=test" (for in-memory database with query params)
492
+ - "///path/to/test.db?param1=value1&param2=value2" (for absolute path)
493
+
494
+ Connection string args handling:
495
+ Connection string can contain query params like "file:test.db?param1=value1&param2=value2"
496
+ "param1=value1" will be passed to ClickHouse engine as start up args.
497
+
498
+ For more details, see `clickhouse local --help --verbose`
499
+ Some special args handling:
500
+ - "mode=ro" would be "--readonly=1" for clickhouse (read-only mode)
501
+
502
+ Returns:
503
+ Connection: Connection object
504
+ """
505
+ return Connection(connection_string)
chdb/udf/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ from .udf import chdb_udf, generate_udf
2
+
3
+ __all__ = ["chdb_udf", "generate_udf"]
chdb/udf/udf.py ADDED
@@ -0,0 +1,106 @@
1
+ import functools
2
+ import inspect
3
+ import os
4
+ import sys
5
+ import tempfile
6
+ import atexit
7
+ import shutil
8
+ import textwrap
9
+ from xml.etree import ElementTree as ET
10
+ import chdb
11
+
12
+
13
+ def generate_udf(func_name, args, return_type, udf_body):
14
+ # generate python script
15
+ with open(f"{chdb.g_udf_path}/{func_name}.py", "w") as f:
16
+ f.write(f"#!{sys.executable}\n")
17
+ f.write("import sys\n")
18
+ f.write("\n")
19
+ for line in udf_body.split("\n"):
20
+ f.write(f"{line}\n")
21
+ f.write("\n")
22
+ f.write("if __name__ == '__main__':\n")
23
+ f.write(" for line in sys.stdin:\n")
24
+ f.write(" args = line.strip().split('\t')\n")
25
+ for i, arg in enumerate(args):
26
+ f.write(f" {arg} = args[{i}]\n")
27
+ f.write(f" print({func_name}({', '.join(args)}))\n")
28
+ f.write(" sys.stdout.flush()\n")
29
+ os.chmod(f"{chdb.g_udf_path}/{func_name}.py", 0o755)
30
+ # generate xml file
31
+ xml_file = f"{chdb.g_udf_path}/udf_config.xml"
32
+ root = ET.Element("functions")
33
+ if os.path.exists(xml_file):
34
+ tree = ET.parse(xml_file)
35
+ root = tree.getroot()
36
+ function = ET.SubElement(root, "function")
37
+ ET.SubElement(function, "type").text = "executable"
38
+ ET.SubElement(function, "name").text = func_name
39
+ ET.SubElement(function, "return_type").text = return_type
40
+ ET.SubElement(function, "format").text = "TabSeparated"
41
+ ET.SubElement(function, "command").text = f"{func_name}.py"
42
+ for arg in args:
43
+ argument = ET.SubElement(function, "argument")
44
+ # We use TabSeparated format, so assume all arguments are strings
45
+ ET.SubElement(argument, "type").text = "String"
46
+ ET.SubElement(argument, "name").text = arg
47
+ tree = ET.ElementTree(root)
48
+ tree.write(xml_file)
49
+
50
+
51
+ def chdb_udf(return_type="String"):
52
+ """
53
+ Decorator for chDB Python UDF(User Defined Function).
54
+ 1. The function should be stateless. So, only UDFs are supported, not UDAFs(User Defined Aggregation Function).
55
+ 2. Default return type is String. If you want to change the return type, you can pass in the return type as an argument.
56
+ The return type should be one of the following: https://clickhouse.com/docs/en/sql-reference/data-types
57
+ 3. The function should take in arguments of type String. As the input is TabSeparated, all arguments are strings.
58
+ 4. The function will be called for each line of input. Something like this:
59
+ ```
60
+ def sum_udf(lhs, rhs):
61
+ return int(lhs) + int(rhs)
62
+
63
+ for line in sys.stdin:
64
+ args = line.strip().split('\t')
65
+ lhs = args[0]
66
+ rhs = args[1]
67
+ print(sum_udf(lhs, rhs))
68
+ sys.stdout.flush()
69
+ ```
70
+ 5. The function should be pure python function. You SHOULD import all python modules used IN THE FUNCTION.
71
+ ```
72
+ def func_use_json(arg):
73
+ import json
74
+ ...
75
+ ```
76
+ 6. Python interpertor used is the same as the one used to run the script. Get from `sys.executable`
77
+ """
78
+
79
+ def decorator(func):
80
+ func_name = func.__name__
81
+ sig = inspect.signature(func)
82
+ args = list(sig.parameters.keys())
83
+ src = inspect.getsource(func)
84
+ src = textwrap.dedent(src)
85
+ udf_body = src.split("\n", 1)[1] # remove the first line "@chdb_udf()"
86
+ # create tmp dir and make sure the dir is deleted when the process exits
87
+ if chdb.g_udf_path == "":
88
+ chdb.g_udf_path = tempfile.mkdtemp()
89
+
90
+ # clean up the tmp dir on exit
91
+ @atexit.register
92
+ def _cleanup():
93
+ try:
94
+ shutil.rmtree(chdb.g_udf_path)
95
+ except: # noqa
96
+ pass
97
+
98
+ generate_udf(func_name, args, return_type, udf_body)
99
+
100
+ @functools.wraps(func)
101
+ def wrapper(*args, **kwargs):
102
+ return func(*args, **kwargs)
103
+
104
+ return wrapper
105
+
106
+ return decorator
chdb/utils/__init__.py ADDED
@@ -0,0 +1,9 @@
1
+ from .types import * # noqa: F403
2
+
3
+ __all__ = [ # noqa: F405
4
+ "flatten_dict",
5
+ "convert_to_columnar",
6
+ "infer_data_type",
7
+ "infer_data_types",
8
+ "trace",
9
+ ]
chdb/utils/trace.py ADDED
@@ -0,0 +1,74 @@
1
+ import functools
2
+ import inspect
3
+ import sys
4
+ import linecache
5
+ from datetime import datetime
6
+
7
+ enable_print = False
8
+
9
+
10
+ def print_lines(func):
11
+ if not enable_print:
12
+ return func
13
+
14
+ @functools.wraps(func)
15
+ def wrapper(*args, **kwargs):
16
+ # Get function name and determine if it's a method
17
+ is_method = inspect.ismethod(func) or (
18
+ len(args) > 0 and hasattr(args[0].__class__, func.__name__)
19
+ )
20
+ class_name = args[0].__class__.__name__ if is_method else None # type: ignore
21
+
22
+ # Get the source code of the function
23
+ try:
24
+ source_lines, start_line = inspect.getsourcelines(func)
25
+ except OSError:
26
+ # Handle cases where source might not be available
27
+ print(f"Warning: Could not get source for {func.__name__}")
28
+ return func(*args, **kwargs)
29
+
30
+ def trace(frame, event, arg):
31
+ if event == "line":
32
+ # Get the current line number and code
33
+ line_no = frame.f_lineno
34
+ line = linecache.getline(frame.f_code.co_filename, line_no).strip()
35
+
36
+ # Don't print decorator lines or empty lines
37
+ if line and not line.startswith("@"):
38
+ # Get local variables
39
+ local_vars = frame.f_locals.copy()
40
+ if is_method:
41
+ # Remove 'self' from local variables for clarity
42
+ local_vars.pop("self", None)
43
+
44
+ # Format timestamp
45
+ timestamp = datetime.now().strftime("%H:%M:%S.%f")[:-3]
46
+
47
+ # Create context string (class.method or function)
48
+ context = (
49
+ f"{class_name}.{func.__name__}" if class_name else func.__name__
50
+ )
51
+
52
+ # Print execution information
53
+ print(f"[{timestamp}] {context} line {line_no}: {line}")
54
+
55
+ # Print local variables if they exist and have changed
56
+ if local_vars:
57
+ vars_str = ", ".join(
58
+ f"{k}={repr(v)}" for k, v in local_vars.items()
59
+ )
60
+ print(f" Variables: {vars_str}")
61
+ return trace
62
+
63
+ # Set the trace function
64
+ sys.settrace(trace)
65
+
66
+ # Call the original function
67
+ result = func(*args, **kwargs)
68
+
69
+ # Disable tracing
70
+ sys.settrace(None)
71
+
72
+ return result
73
+
74
+ return wrapper