duckdb 1.5.0.dev37__cp314-cp314-win_amd64.whl → 1.5.0.dev94__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of duckdb might be problematic. Click here for more details.

Files changed (56) hide show
  1. _duckdb-stubs/__init__.pyi +1443 -0
  2. _duckdb-stubs/_func.pyi +46 -0
  3. _duckdb-stubs/_sqltypes.pyi +75 -0
  4. _duckdb.cp314-win_amd64.pyd +0 -0
  5. adbc_driver_duckdb/__init__.py +49 -0
  6. adbc_driver_duckdb/dbapi.py +115 -0
  7. duckdb/__init__.py +341 -435
  8. duckdb/_dbapi_type_object.py +231 -0
  9. duckdb/_version.py +22 -0
  10. duckdb/bytes_io_wrapper.py +12 -9
  11. duckdb/experimental/__init__.py +2 -1
  12. duckdb/experimental/spark/__init__.py +3 -4
  13. duckdb/experimental/spark/_globals.py +8 -8
  14. duckdb/experimental/spark/_typing.py +7 -9
  15. duckdb/experimental/spark/conf.py +16 -15
  16. duckdb/experimental/spark/context.py +60 -44
  17. duckdb/experimental/spark/errors/__init__.py +33 -35
  18. duckdb/experimental/spark/errors/error_classes.py +1 -1
  19. duckdb/experimental/spark/errors/exceptions/__init__.py +1 -1
  20. duckdb/experimental/spark/errors/exceptions/base.py +39 -88
  21. duckdb/experimental/spark/errors/utils.py +11 -16
  22. duckdb/experimental/spark/exception.py +9 -6
  23. duckdb/experimental/spark/sql/__init__.py +5 -5
  24. duckdb/experimental/spark/sql/_typing.py +8 -15
  25. duckdb/experimental/spark/sql/catalog.py +21 -20
  26. duckdb/experimental/spark/sql/column.py +48 -55
  27. duckdb/experimental/spark/sql/conf.py +9 -8
  28. duckdb/experimental/spark/sql/dataframe.py +185 -233
  29. duckdb/experimental/spark/sql/functions.py +1222 -1248
  30. duckdb/experimental/spark/sql/group.py +56 -52
  31. duckdb/experimental/spark/sql/readwriter.py +80 -94
  32. duckdb/experimental/spark/sql/session.py +64 -59
  33. duckdb/experimental/spark/sql/streaming.py +9 -10
  34. duckdb/experimental/spark/sql/type_utils.py +67 -65
  35. duckdb/experimental/spark/sql/types.py +309 -345
  36. duckdb/experimental/spark/sql/udf.py +6 -6
  37. duckdb/filesystem.py +26 -16
  38. duckdb/func/__init__.py +3 -0
  39. duckdb/functional/__init__.py +12 -16
  40. duckdb/polars_io.py +130 -83
  41. duckdb/query_graph/__main__.py +91 -96
  42. duckdb/sqltypes/__init__.py +63 -0
  43. duckdb/typing/__init__.py +18 -8
  44. duckdb/udf.py +10 -5
  45. duckdb/value/__init__.py +1 -0
  46. duckdb/value/constant/__init__.py +62 -60
  47. {duckdb-1.5.0.dev37.dist-info → duckdb-1.5.0.dev94.dist-info}/METADATA +12 -4
  48. duckdb-1.5.0.dev94.dist-info/RECORD +52 -0
  49. duckdb/__init__.pyi +0 -713
  50. duckdb/functional/__init__.pyi +0 -31
  51. duckdb/typing/__init__.pyi +0 -36
  52. duckdb/value/constant/__init__.pyi +0 -115
  53. duckdb-1.5.0.dev37.dist-info/RECORD +0 -47
  54. /duckdb/{value/__init__.pyi → py.typed} +0 -0
  55. {duckdb-1.5.0.dev37.dist-info → duckdb-1.5.0.dev94.dist-info}/WHEEL +0 -0
  56. {duckdb-1.5.0.dev37.dist-info → duckdb-1.5.0.dev94.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1443 @@
1
+ import os
2
+ import pathlib
3
+ import typing as pytyping
4
+ from typing_extensions import Self
5
+
6
+ if pytyping.TYPE_CHECKING:
7
+ import fsspec
8
+ import numpy as np
9
+ import polars
10
+ import pandas
11
+ import pyarrow.lib
12
+ import torch as pytorch
13
+ import tensorflow
14
+ from collections.abc import Callable, Sequence, Mapping
15
+ from duckdb import sqltypes, func
16
+
17
+ # the field_ids argument to to_parquet and write_parquet has a recursive structure
18
+ ParquetFieldIdsType = Mapping[str, pytyping.Union[int, "ParquetFieldIdsType"]]
19
+
20
+ __all__: list[str] = [
21
+ "BinderException",
22
+ "CSVLineTerminator",
23
+ "CaseExpression",
24
+ "CatalogException",
25
+ "CoalesceOperator",
26
+ "ColumnExpression",
27
+ "ConnectionException",
28
+ "ConstantExpression",
29
+ "ConstraintException",
30
+ "ConversionException",
31
+ "DataError",
32
+ "DatabaseError",
33
+ "DefaultExpression",
34
+ "DependencyException",
35
+ "DuckDBPyConnection",
36
+ "DuckDBPyRelation",
37
+ "Error",
38
+ "ExpectedResultType",
39
+ "ExplainType",
40
+ "Expression",
41
+ "FatalException",
42
+ "FunctionExpression",
43
+ "HTTPException",
44
+ "IOException",
45
+ "IntegrityError",
46
+ "InternalError",
47
+ "InternalException",
48
+ "InterruptException",
49
+ "InvalidInputException",
50
+ "InvalidTypeException",
51
+ "LambdaExpression",
52
+ "NotImplementedException",
53
+ "NotSupportedError",
54
+ "OperationalError",
55
+ "OutOfMemoryException",
56
+ "OutOfRangeException",
57
+ "ParserException",
58
+ "PermissionException",
59
+ "ProgrammingError",
60
+ "PythonExceptionHandling",
61
+ "RenderMode",
62
+ "SQLExpression",
63
+ "SequenceException",
64
+ "SerializationException",
65
+ "StarExpression",
66
+ "Statement",
67
+ "StatementType",
68
+ "SyntaxException",
69
+ "TransactionException",
70
+ "TypeMismatchException",
71
+ "Warning",
72
+ "aggregate",
73
+ "alias",
74
+ "apilevel",
75
+ "append",
76
+ "array_type",
77
+ "arrow",
78
+ "begin",
79
+ "checkpoint",
80
+ "close",
81
+ "commit",
82
+ "connect",
83
+ "create_function",
84
+ "cursor",
85
+ "decimal_type",
86
+ "default_connection",
87
+ "description",
88
+ "df",
89
+ "distinct",
90
+ "dtype",
91
+ "duplicate",
92
+ "enum_type",
93
+ "execute",
94
+ "executemany",
95
+ "extract_statements",
96
+ "fetch_arrow_table",
97
+ "fetch_df",
98
+ "fetch_df_chunk",
99
+ "fetch_record_batch",
100
+ "fetchall",
101
+ "fetchdf",
102
+ "fetchmany",
103
+ "fetchnumpy",
104
+ "fetchone",
105
+ "filesystem_is_registered",
106
+ "filter",
107
+ "from_arrow",
108
+ "from_csv_auto",
109
+ "from_df",
110
+ "from_parquet",
111
+ "from_query",
112
+ "get_table_names",
113
+ "install_extension",
114
+ "interrupt",
115
+ "limit",
116
+ "list_filesystems",
117
+ "list_type",
118
+ "load_extension",
119
+ "map_type",
120
+ "order",
121
+ "paramstyle",
122
+ "pl",
123
+ "project",
124
+ "query",
125
+ "query_df",
126
+ "query_progress",
127
+ "read_csv",
128
+ "read_json",
129
+ "read_parquet",
130
+ "register",
131
+ "register_filesystem",
132
+ "remove_function",
133
+ "rollback",
134
+ "row_type",
135
+ "rowcount",
136
+ "set_default_connection",
137
+ "sql",
138
+ "sqltype",
139
+ "string_type",
140
+ "struct_type",
141
+ "table",
142
+ "table_function",
143
+ "tf",
144
+ "threadsafety",
145
+ "token_type",
146
+ "tokenize",
147
+ "torch",
148
+ "type",
149
+ "union_type",
150
+ "unregister",
151
+ "unregister_filesystem",
152
+ "values",
153
+ "view",
154
+ "write_csv",
155
+ ]
156
+
157
+ class BinderException(ProgrammingError): ...
158
+
159
+ class CSVLineTerminator:
160
+ CARRIAGE_RETURN_LINE_FEED: pytyping.ClassVar[
161
+ CSVLineTerminator
162
+ ] # value = <CSVLineTerminator.CARRIAGE_RETURN_LINE_FEED: 1>
163
+ LINE_FEED: pytyping.ClassVar[CSVLineTerminator] # value = <CSVLineTerminator.LINE_FEED: 0>
164
+ __members__: pytyping.ClassVar[
165
+ dict[str, CSVLineTerminator]
166
+ ] # value = {'LINE_FEED': <CSVLineTerminator.LINE_FEED: 0>, 'CARRIAGE_RETURN_LINE_FEED': <CSVLineTerminator.CARRIAGE_RETURN_LINE_FEED: 1>} # noqa: E501
167
+ def __eq__(self, other: object) -> bool: ...
168
+ def __getstate__(self) -> int: ...
169
+ def __hash__(self) -> int: ...
170
+ def __index__(self) -> int: ...
171
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
172
+ def __int__(self) -> int: ...
173
+ def __ne__(self, other: object) -> bool: ...
174
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
175
+ @property
176
+ def name(self) -> str: ...
177
+ @property
178
+ def value(self) -> int: ...
179
+
180
+ class CatalogException(ProgrammingError): ...
181
+ class ConnectionException(OperationalError): ...
182
+ class ConstraintException(IntegrityError): ...
183
+ class ConversionException(DataError): ...
184
+ class DataError(DatabaseError): ...
185
+ class DatabaseError(Error): ...
186
+ class DependencyException(DatabaseError): ...
187
+
188
+ class DuckDBPyConnection:
189
+ def __del__(self) -> None: ...
190
+ def __enter__(self) -> Self: ...
191
+ def __exit__(self, exc_type: object, exc: object, traceback: object) -> None: ...
192
+ def append(self, table_name: str, df: pandas.DataFrame, *, by_name: bool = False) -> DuckDBPyConnection: ...
193
+ def array_type(self, type: sqltypes.DuckDBPyType, size: pytyping.SupportsInt) -> sqltypes.DuckDBPyType: ...
194
+ def arrow(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
195
+ def begin(self) -> DuckDBPyConnection: ...
196
+ def checkpoint(self) -> DuckDBPyConnection: ...
197
+ def close(self) -> None: ...
198
+ def commit(self) -> DuckDBPyConnection: ...
199
+ def create_function(
200
+ self,
201
+ name: str,
202
+ function: Callable[..., pytyping.Any],
203
+ parameters: list[sqltypes.DuckDBPyType] | None = None,
204
+ return_type: sqltypes.DuckDBPyType | None = None,
205
+ *,
206
+ type: func.PythonUDFType = ...,
207
+ null_handling: func.FunctionNullHandling = ...,
208
+ exception_handling: PythonExceptionHandling = ...,
209
+ side_effects: bool = False,
210
+ ) -> DuckDBPyConnection: ...
211
+ def cursor(self) -> DuckDBPyConnection: ...
212
+ def decimal_type(self, width: pytyping.SupportsInt, scale: pytyping.SupportsInt) -> sqltypes.DuckDBPyType: ...
213
+ def df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
214
+ def dtype(self, type_str: str) -> sqltypes.DuckDBPyType: ...
215
+ def duplicate(self) -> DuckDBPyConnection: ...
216
+ def enum_type(
217
+ self, name: str, type: sqltypes.DuckDBPyType, values: list[pytyping.Any]
218
+ ) -> sqltypes.DuckDBPyType: ...
219
+ def execute(self, query: Statement | str, parameters: object = None) -> DuckDBPyConnection: ...
220
+ def executemany(self, query: Statement | str, parameters: object = None) -> DuckDBPyConnection: ...
221
+ def extract_statements(self, query: str) -> list[Statement]: ...
222
+ def fetch_arrow_table(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
223
+ def fetch_df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
224
+ def fetch_df_chunk(
225
+ self, vectors_per_chunk: pytyping.SupportsInt = 1, *, date_as_object: bool = False
226
+ ) -> pandas.DataFrame: ...
227
+ def fetch_record_batch(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
228
+ def fetchall(self) -> list[tuple[pytyping.Any, ...]]: ...
229
+ def fetchdf(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
230
+ def fetchmany(self, size: pytyping.SupportsInt = 1) -> list[tuple[pytyping.Any, ...]]: ...
231
+ def fetchnumpy(self) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
232
+ def fetchone(self) -> tuple[pytyping.Any, ...] | None: ...
233
+ def filesystem_is_registered(self, name: str) -> bool: ...
234
+ def from_arrow(self, arrow_object: object) -> DuckDBPyRelation: ...
235
+ def from_csv_auto(
236
+ self,
237
+ path_or_buffer: str | bytes | os.PathLike[str] | os.PathLike[bytes],
238
+ header: bool | int | None = None,
239
+ compression: str | None = None,
240
+ sep: str | None = None,
241
+ delimiter: str | None = None,
242
+ files_to_sniff: int | None = None,
243
+ comment: str | None = None,
244
+ thousands: str | None = None,
245
+ dtype: dict[str, str] | list[str] | None = None,
246
+ na_values: str | list[str] | None = None,
247
+ skiprows: int | None = None,
248
+ quotechar: str | None = None,
249
+ escapechar: str | None = None,
250
+ encoding: str | None = None,
251
+ parallel: bool | None = None,
252
+ date_format: str | None = None,
253
+ timestamp_format: str | None = None,
254
+ sample_size: int | None = None,
255
+ auto_detect: bool | int | None = None,
256
+ all_varchar: bool | None = None,
257
+ normalize_names: bool | None = None,
258
+ null_padding: bool | None = None,
259
+ names: list[str] | None = None,
260
+ lineterminator: str | None = None,
261
+ columns: dict[str, str] | None = None,
262
+ auto_type_candidates: list[str] | None = None,
263
+ max_line_size: int | None = None,
264
+ ignore_errors: bool | None = None,
265
+ store_rejects: bool | None = None,
266
+ rejects_table: str | None = None,
267
+ rejects_scan: str | None = None,
268
+ rejects_limit: int | None = None,
269
+ force_not_null: list[str] | None = None,
270
+ buffer_size: int | None = None,
271
+ decimal: str | None = None,
272
+ allow_quoted_nulls: bool | None = None,
273
+ filename: bool | str | None = None,
274
+ hive_partitioning: bool | None = None,
275
+ union_by_name: bool | None = None,
276
+ hive_types: dict[str, str] | None = None,
277
+ hive_types_autocast: bool | None = None,
278
+ strict_mode: bool | None = None,
279
+ ) -> DuckDBPyRelation: ...
280
+ def from_df(self, df: pandas.DataFrame) -> DuckDBPyRelation: ...
281
+ @pytyping.overload
282
+ def from_parquet(
283
+ self,
284
+ file_glob: str,
285
+ binary_as_string: bool = False,
286
+ *,
287
+ file_row_number: bool = False,
288
+ filename: bool = False,
289
+ hive_partitioning: bool = False,
290
+ union_by_name: bool = False,
291
+ compression: str | None = None,
292
+ ) -> DuckDBPyRelation: ...
293
+ @pytyping.overload
294
+ def from_parquet(
295
+ self,
296
+ file_globs: Sequence[str],
297
+ binary_as_string: bool = False,
298
+ *,
299
+ file_row_number: bool = False,
300
+ filename: bool = False,
301
+ hive_partitioning: bool = False,
302
+ union_by_name: bool = False,
303
+ compression: str | None = None,
304
+ ) -> DuckDBPyRelation: ...
305
+ def from_query(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
306
+ def get_table_names(self, query: str, *, qualified: bool = False) -> set[str]: ...
307
+ def install_extension(
308
+ self,
309
+ extension: str,
310
+ *,
311
+ force_install: bool = False,
312
+ repository: str | None = None,
313
+ repository_url: str | None = None,
314
+ version: str | None = None,
315
+ ) -> None: ...
316
+ def interrupt(self) -> None: ...
317
+ def list_filesystems(self) -> list[str]: ...
318
+ def list_type(self, type: sqltypes.DuckDBPyType) -> sqltypes.DuckDBPyType: ...
319
+ def load_extension(self, extension: str) -> None: ...
320
+ def map_type(self, key: sqltypes.DuckDBPyType, value: sqltypes.DuckDBPyType) -> sqltypes.DuckDBPyType: ...
321
+ def pl(self, rows_per_batch: pytyping.SupportsInt = 1000000, *, lazy: bool = False) -> polars.DataFrame: ...
322
+ def query(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
323
+ def query_progress(self) -> float: ...
324
+ def read_csv(
325
+ self,
326
+ path_or_buffer: str | bytes | os.PathLike[str],
327
+ header: bool | int | None = None,
328
+ compression: str | None = None,
329
+ sep: str | None = None,
330
+ delimiter: str | None = None,
331
+ files_to_sniff: int | None = None,
332
+ comment: str | None = None,
333
+ thousands: str | None = None,
334
+ dtype: dict[str, str] | list[str] | None = None,
335
+ na_values: str | list[str] | None = None,
336
+ skiprows: int | None = None,
337
+ quotechar: str | None = None,
338
+ escapechar: str | None = None,
339
+ encoding: str | None = None,
340
+ parallel: bool | None = None,
341
+ date_format: str | None = None,
342
+ timestamp_format: str | None = None,
343
+ sample_size: int | None = None,
344
+ auto_detect: bool | int | None = None,
345
+ all_varchar: bool | None = None,
346
+ normalize_names: bool | None = None,
347
+ null_padding: bool | None = None,
348
+ names: list[str] | None = None,
349
+ lineterminator: str | None = None,
350
+ columns: dict[str, str] | None = None,
351
+ auto_type_candidates: list[str] | None = None,
352
+ max_line_size: int | None = None,
353
+ ignore_errors: bool | None = None,
354
+ store_rejects: bool | None = None,
355
+ rejects_table: str | None = None,
356
+ rejects_scan: str | None = None,
357
+ rejects_limit: int | None = None,
358
+ force_not_null: list[str] | None = None,
359
+ buffer_size: int | None = None,
360
+ decimal: str | None = None,
361
+ allow_quoted_nulls: bool | None = None,
362
+ filename: bool | str | None = None,
363
+ hive_partitioning: bool | None = None,
364
+ union_by_name: bool | None = None,
365
+ hive_types: dict[str, str] | None = None,
366
+ hive_types_autocast: bool | None = None,
367
+ strict_mode: bool | None = None,
368
+ ) -> DuckDBPyRelation: ...
369
+ def read_json(
370
+ self,
371
+ path_or_buffer: str | bytes | os.PathLike[str],
372
+ *,
373
+ columns: dict[str, str] | None = None,
374
+ sample_size: int | None = None,
375
+ maximum_depth: int | None = None,
376
+ records: str | None = None,
377
+ format: str | None = None,
378
+ date_format: str | None = None,
379
+ timestamp_format: str | None = None,
380
+ compression: str | None = None,
381
+ maximum_object_size: int | None = None,
382
+ ignore_errors: bool | None = None,
383
+ convert_strings_to_integers: bool | None = None,
384
+ field_appearance_threshold: float | None = None,
385
+ map_inference_threshold: int | None = None,
386
+ maximum_sample_files: int | None = None,
387
+ filename: bool | str | None = None,
388
+ hive_partitioning: bool | None = None,
389
+ union_by_name: bool | None = None,
390
+ hive_types: dict[str, str] | None = None,
391
+ hive_types_autocast: bool | None = None,
392
+ ) -> DuckDBPyRelation: ...
393
+ @pytyping.overload
394
+ def read_parquet(
395
+ self,
396
+ file_glob: str,
397
+ binary_as_string: bool = False,
398
+ *,
399
+ file_row_number: bool = False,
400
+ filename: bool = False,
401
+ hive_partitioning: bool = False,
402
+ union_by_name: bool = False,
403
+ compression: str | None = None,
404
+ ) -> DuckDBPyRelation: ...
405
+ @pytyping.overload
406
+ def read_parquet(
407
+ self,
408
+ file_globs: Sequence[str],
409
+ binary_as_string: bool = False,
410
+ *,
411
+ file_row_number: bool = False,
412
+ filename: bool = False,
413
+ hive_partitioning: bool = False,
414
+ union_by_name: bool = False,
415
+ compression: pytyping.Any = None,
416
+ ) -> DuckDBPyRelation: ...
417
+ def register(self, view_name: str, python_object: object) -> DuckDBPyConnection: ...
418
+ def register_filesystem(self, filesystem: fsspec.AbstractFileSystem) -> None: ...
419
+ def remove_function(self, name: str) -> DuckDBPyConnection: ...
420
+ def rollback(self) -> DuckDBPyConnection: ...
421
+ def row_type(
422
+ self, fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType]
423
+ ) -> sqltypes.DuckDBPyType: ...
424
+ def sql(self, query: Statement | str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
425
+ def sqltype(self, type_str: str) -> sqltypes.DuckDBPyType: ...
426
+ def string_type(self, collation: str = "") -> sqltypes.DuckDBPyType: ...
427
+ def struct_type(
428
+ self, fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType]
429
+ ) -> sqltypes.DuckDBPyType: ...
430
+ def table(self, table_name: str) -> DuckDBPyRelation: ...
431
+ def table_function(self, name: str, parameters: object = None) -> DuckDBPyRelation: ...
432
+ def tf(self) -> dict[str, tensorflow.Tensor]: ...
433
+ def torch(self) -> dict[str, pytorch.Tensor]: ...
434
+ def type(self, type_str: str) -> sqltypes.DuckDBPyType: ...
435
+ def union_type(
436
+ self, members: list[sqltypes.DuckDBPyType] | dict[str, sqltypes.DuckDBPyType]
437
+ ) -> sqltypes.DuckDBPyType: ...
438
+ def unregister(self, view_name: str) -> DuckDBPyConnection: ...
439
+ def unregister_filesystem(self, name: str) -> None: ...
440
+ def values(self, *args: list[pytyping.Any] | tuple[Expression, ...] | Expression) -> DuckDBPyRelation: ...
441
+ def view(self, view_name: str) -> DuckDBPyRelation: ...
442
+ @property
443
+ def description(self) -> list[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]]: ...
444
+ @property
445
+ def rowcount(self) -> int: ...
446
+
447
+ class DuckDBPyRelation:
448
+ def __arrow_c_stream__(self, requested_schema: object | None = None) -> pytyping.Any: ...
449
+ def __contains__(self, name: str) -> bool: ...
450
+ def __getattr__(self, name: str) -> DuckDBPyRelation: ...
451
+ def __getitem__(self, name: str) -> DuckDBPyRelation: ...
452
+ def __len__(self) -> int: ...
453
+ def aggregate(self, aggr_expr: Expression | str, group_expr: Expression | str = "") -> DuckDBPyRelation: ...
454
+ def any_value(
455
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
456
+ ) -> DuckDBPyRelation: ...
457
+ def apply(
458
+ self,
459
+ function_name: str,
460
+ function_aggr: str,
461
+ group_expr: str = "",
462
+ function_parameter: str = "",
463
+ projected_columns: str = "",
464
+ ) -> DuckDBPyRelation: ...
465
+ def arg_max(
466
+ self, arg_column: str, value_column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
467
+ ) -> DuckDBPyRelation: ...
468
+ def arg_min(
469
+ self, arg_column: str, value_column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
470
+ ) -> DuckDBPyRelation: ...
471
+ def arrow(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
472
+ def avg(
473
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
474
+ ) -> DuckDBPyRelation: ...
475
+ def bit_and(
476
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
477
+ ) -> DuckDBPyRelation: ...
478
+ def bit_or(
479
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
480
+ ) -> DuckDBPyRelation: ...
481
+ def bit_xor(
482
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
483
+ ) -> DuckDBPyRelation: ...
484
+ def bitstring_agg(
485
+ self,
486
+ column: str,
487
+ min: int | None = None,
488
+ max: int | None = None,
489
+ groups: str = "",
490
+ window_spec: str = "",
491
+ projected_columns: str = "",
492
+ ) -> DuckDBPyRelation: ...
493
+ def bool_and(
494
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
495
+ ) -> DuckDBPyRelation: ...
496
+ def bool_or(
497
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
498
+ ) -> DuckDBPyRelation: ...
499
+ def close(self) -> None: ...
500
+ def count(
501
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
502
+ ) -> DuckDBPyRelation: ...
503
+ def create(self, table_name: str) -> None: ...
504
+ def create_view(self, view_name: str, replace: bool = True) -> DuckDBPyRelation: ...
505
+ def cross(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
506
+ def cume_dist(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
507
+ def dense_rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
508
+ def describe(self) -> DuckDBPyRelation: ...
509
+ def df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
510
+ def distinct(self) -> DuckDBPyRelation: ...
511
+ def except_(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
512
+ def execute(self) -> DuckDBPyRelation: ...
513
+ def explain(self, type: ExplainType = ExplainType.STANDARD) -> str: ...
514
+ def favg(
515
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
516
+ ) -> DuckDBPyRelation: ...
517
+ def fetch_arrow_reader(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
518
+ def fetch_arrow_table(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
519
+ def fetch_df_chunk(
520
+ self, vectors_per_chunk: pytyping.SupportsInt = 1, *, date_as_object: bool = False
521
+ ) -> pandas.DataFrame: ...
522
+ def fetch_record_batch(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
523
+ def fetchall(self) -> list[tuple[pytyping.Any, ...]]: ...
524
+ def fetchdf(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
525
+ def fetchmany(self, size: pytyping.SupportsInt = 1) -> list[tuple[pytyping.Any, ...]]: ...
526
+ def fetchnumpy(self) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
527
+ def fetchone(self) -> tuple[pytyping.Any, ...] | None: ...
528
+ def filter(self, filter_expr: Expression | str) -> DuckDBPyRelation: ...
529
+ def first(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
530
+ def first_value(self, column: str, window_spec: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
531
+ def fsum(
532
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
533
+ ) -> DuckDBPyRelation: ...
534
+ def geomean(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
535
+ def histogram(
536
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
537
+ ) -> DuckDBPyRelation: ...
538
+ def insert(self, values: pytyping.List[object]) -> None: ...
539
+ def insert_into(self, table_name: str) -> None: ...
540
+ def intersect(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
541
+ def join(
542
+ self, other_rel: DuckDBPyRelation, condition: Expression | str, how: str = "inner"
543
+ ) -> DuckDBPyRelation: ...
544
+ def lag(
545
+ self,
546
+ column: str,
547
+ window_spec: str,
548
+ offset: pytyping.SupportsInt = 1,
549
+ default_value: str = "NULL",
550
+ ignore_nulls: bool = False,
551
+ projected_columns: str = "",
552
+ ) -> DuckDBPyRelation: ...
553
+ def last(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
554
+ def last_value(self, column: str, window_spec: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
555
+ def lead(
556
+ self,
557
+ column: str,
558
+ window_spec: str,
559
+ offset: pytyping.SupportsInt = 1,
560
+ default_value: str = "NULL",
561
+ ignore_nulls: bool = False,
562
+ projected_columns: str = "",
563
+ ) -> DuckDBPyRelation: ...
564
+ def limit(self, n: pytyping.SupportsInt, offset: pytyping.SupportsInt = 0) -> DuckDBPyRelation: ...
565
+ def list(
566
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
567
+ ) -> DuckDBPyRelation: ...
568
+ def map(
569
+ self, map_function: Callable[..., pytyping.Any], *, schema: dict[str, sqltypes.DuckDBPyType] | None = None
570
+ ) -> DuckDBPyRelation: ...
571
+ def max(
572
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
573
+ ) -> DuckDBPyRelation: ...
574
+ def mean(
575
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
576
+ ) -> DuckDBPyRelation: ...
577
+ def median(
578
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
579
+ ) -> DuckDBPyRelation: ...
580
+ def min(
581
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
582
+ ) -> DuckDBPyRelation: ...
583
+ def mode(
584
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
585
+ ) -> DuckDBPyRelation: ...
586
+ def n_tile(
587
+ self, window_spec: str, num_buckets: pytyping.SupportsInt, projected_columns: str = ""
588
+ ) -> DuckDBPyRelation: ...
589
+ def nth_value(
590
+ self,
591
+ column: str,
592
+ window_spec: str,
593
+ offset: pytyping.SupportsInt,
594
+ ignore_nulls: bool = False,
595
+ projected_columns: str = "",
596
+ ) -> DuckDBPyRelation: ...
597
+ def order(self, order_expr: str) -> DuckDBPyRelation: ...
598
+ def percent_rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
599
+ def pl(self, batch_size: pytyping.SupportsInt = 1000000, *, lazy: bool = False) -> polars.DataFrame: ...
600
+ def product(
601
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
602
+ ) -> DuckDBPyRelation: ...
603
+ def project(self, *args: str | Expression, groups: str = "") -> DuckDBPyRelation: ...
604
+ def quantile(
605
+ self,
606
+ column: str,
607
+ q: float | pytyping.List[float] = 0.5,
608
+ groups: str = "",
609
+ window_spec: str = "",
610
+ projected_columns: str = "",
611
+ ) -> DuckDBPyRelation: ...
612
+ def quantile_cont(
613
+ self,
614
+ column: str,
615
+ q: float | pytyping.List[float] = 0.5,
616
+ groups: str = "",
617
+ window_spec: str = "",
618
+ projected_columns: str = "",
619
+ ) -> DuckDBPyRelation: ...
620
+ def quantile_disc(
621
+ self,
622
+ column: str,
623
+ q: float | pytyping.List[float] = 0.5,
624
+ groups: str = "",
625
+ window_spec: str = "",
626
+ projected_columns: str = "",
627
+ ) -> DuckDBPyRelation: ...
628
+ def query(self, virtual_table_name: str, sql_query: str) -> DuckDBPyRelation: ...
629
+ def rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
630
+ def rank_dense(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
631
+ def record_batch(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.RecordBatchReader: ...
632
+ def row_number(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
633
+ def select(self, *args: str | Expression, groups: str = "") -> DuckDBPyRelation: ...
634
+ def select_dtypes(self, types: pytyping.List[sqltypes.DuckDBPyType | str]) -> DuckDBPyRelation: ...
635
+ def select_types(self, types: pytyping.List[sqltypes.DuckDBPyType | str]) -> DuckDBPyRelation: ...
636
+ def set_alias(self, alias: str) -> DuckDBPyRelation: ...
637
+ def show(
638
+ self,
639
+ *,
640
+ max_width: pytyping.SupportsInt | None = None,
641
+ max_rows: pytyping.SupportsInt | None = None,
642
+ max_col_width: pytyping.SupportsInt | None = None,
643
+ null_value: str | None = None,
644
+ render_mode: RenderMode | None = None,
645
+ ) -> None: ...
646
+ def sort(self, *args: Expression) -> DuckDBPyRelation: ...
647
+ def sql_query(self) -> str: ...
648
+ def std(
649
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
650
+ ) -> DuckDBPyRelation: ...
651
+ def stddev(
652
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
653
+ ) -> DuckDBPyRelation: ...
654
+ def stddev_pop(
655
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
656
+ ) -> DuckDBPyRelation: ...
657
+ def stddev_samp(
658
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
659
+ ) -> DuckDBPyRelation: ...
660
+ def string_agg(
661
+ self, column: str, sep: str = ",", groups: str = "", window_spec: str = "", projected_columns: str = ""
662
+ ) -> DuckDBPyRelation: ...
663
+ def sum(
664
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
665
+ ) -> DuckDBPyRelation: ...
666
+ def tf(self) -> dict[str, tensorflow.Tensor]: ...
667
+ def to_arrow_table(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
668
+ def to_csv(
669
+ self,
670
+ file_name: str,
671
+ *,
672
+ sep: str | None = None,
673
+ na_rep: str | None = None,
674
+ header: bool | None = None,
675
+ quotechar: str | None = None,
676
+ escapechar: str | None = None,
677
+ date_format: str | None = None,
678
+ timestamp_format: str | None = None,
679
+ quoting: str | int | None = None,
680
+ encoding: str | None = None,
681
+ compression: str | None = None,
682
+ overwrite: bool | None = None,
683
+ per_thread_output: bool | None = None,
684
+ use_tmp_file: bool | None = None,
685
+ partition_by: pytyping.List[str] | None = None,
686
+ write_partition_columns: bool | None = None,
687
+ ) -> None: ...
688
+ def to_df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
689
+ def to_parquet(
690
+ self,
691
+ file_name: str,
692
+ *,
693
+ compression: str | None = None,
694
+ field_ids: ParquetFieldIdsType | pytyping.Literal["auto"] | None = None,
695
+ row_group_size_bytes: int | str | None = None,
696
+ row_group_size: int | None = None,
697
+ overwrite: bool | None = None,
698
+ per_thread_output: bool | None = None,
699
+ use_tmp_file: bool | None = None,
700
+ partition_by: pytyping.List[str] | None = None,
701
+ write_partition_columns: bool | None = None,
702
+ append: bool | None = None,
703
+ ) -> None: ...
704
+ def to_table(self, table_name: str) -> None: ...
705
+ def to_view(self, view_name: str, replace: bool = True) -> DuckDBPyRelation: ...
706
+ def torch(self) -> dict[str, pytorch.Tensor]: ...
707
+ def union(self, union_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
708
+ def unique(self, unique_aggr: str) -> DuckDBPyRelation: ...
709
+ def update(self, set: Expression | str, *, condition: Expression | str | None = None) -> None: ...
710
+ def value_counts(self, column: str, groups: str = "") -> DuckDBPyRelation: ...
711
+ def var(
712
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
713
+ ) -> DuckDBPyRelation: ...
714
+ def var_pop(
715
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
716
+ ) -> DuckDBPyRelation: ...
717
+ def var_samp(
718
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
719
+ ) -> DuckDBPyRelation: ...
720
+ def variance(
721
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
722
+ ) -> DuckDBPyRelation: ...
723
+ def write_csv(
724
+ self,
725
+ file_name: str,
726
+ sep: str | None = None,
727
+ na_rep: str | None = None,
728
+ header: bool | None = None,
729
+ quotechar: str | None = None,
730
+ escapechar: str | None = None,
731
+ date_format: str | None = None,
732
+ timestamp_format: str | None = None,
733
+ quoting: str | int | None = None,
734
+ encoding: str | None = None,
735
+ compression: str | None = None,
736
+ overwrite: bool | None = None,
737
+ per_thread_output: bool | None = None,
738
+ use_tmp_file: bool | None = None,
739
+ partition_by: pytyping.List[str] | None = None,
740
+ write_partition_columns: bool | None = None,
741
+ ) -> None: ...
742
+ def write_parquet(
743
+ self,
744
+ file_name: str,
745
+ compression: str | None = None,
746
+ field_ids: ParquetFieldIdsType | pytyping.Literal["auto"] | None = None,
747
+ row_group_size_bytes: str | int | None = None,
748
+ row_group_size: int | None = None,
749
+ overwrite: bool | None = None,
750
+ per_thread_output: bool | None = None,
751
+ use_tmp_file: bool | None = None,
752
+ partition_by: pytyping.List[str] | None = None,
753
+ write_partition_columns: bool | None = None,
754
+ append: bool | None = None,
755
+ ) -> None: ...
756
+ @property
757
+ def alias(self) -> str: ...
758
+ @property
759
+ def columns(self) -> pytyping.List[str]: ...
760
+ @property
761
+ def description(self) -> pytyping.List[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]]: ...
762
+ @property
763
+ def dtypes(self) -> pytyping.List[str]: ...
764
+ @property
765
+ def shape(self) -> tuple[int, int]: ...
766
+ @property
767
+ def type(self) -> str: ...
768
+ @property
769
+ def types(self) -> pytyping.List[sqltypes.DuckDBPyType]: ...
770
+
771
+ class Error(Exception): ...
772
+
773
+ class ExpectedResultType:
774
+ CHANGED_ROWS: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.CHANGED_ROWS: 1>
775
+ NOTHING: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.NOTHING: 2>
776
+ QUERY_RESULT: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.QUERY_RESULT: 0>
777
+ __members__: pytyping.ClassVar[
778
+ dict[str, ExpectedResultType]
779
+ ] # value = {'QUERY_RESULT': <ExpectedResultType.QUERY_RESULT: 0>, 'CHANGED_ROWS': <ExpectedResultType.CHANGED_ROWS: 1>, 'NOTHING': <ExpectedResultType.NOTHING: 2>} # noqa: E501
780
+ def __eq__(self, other: object) -> bool: ...
781
+ def __getstate__(self) -> int: ...
782
+ def __hash__(self) -> int: ...
783
+ def __index__(self) -> int: ...
784
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
785
+ def __int__(self) -> int: ...
786
+ def __ne__(self, other: object) -> bool: ...
787
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
788
+ @property
789
+ def name(self) -> str: ...
790
+ @property
791
+ def value(self) -> int: ...
792
+
793
+ class ExplainType:
794
+ ANALYZE: pytyping.ClassVar[ExplainType] # value = <ExplainType.ANALYZE: 1>
795
+ STANDARD: pytyping.ClassVar[ExplainType] # value = <ExplainType.STANDARD: 0>
796
+ __members__: pytyping.ClassVar[
797
+ dict[str, ExplainType]
798
+ ] # value = {'STANDARD': <ExplainType.STANDARD: 0>, 'ANALYZE': <ExplainType.ANALYZE: 1>}
799
+ def __eq__(self, other: object) -> bool: ...
800
+ def __getstate__(self) -> int: ...
801
+ def __hash__(self) -> int: ...
802
+ def __index__(self) -> int: ...
803
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
804
+ def __int__(self) -> int: ...
805
+ def __ne__(self, other: object) -> bool: ...
806
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
807
+ @property
808
+ def name(self) -> str: ...
809
+ @property
810
+ def value(self) -> int: ...
811
+
812
+ class Expression:
813
+ def __add__(self, other: Expression) -> Expression: ...
814
+ def __and__(self, other: Expression) -> Expression: ...
815
+ def __div__(self, other: Expression) -> Expression: ...
816
+ def __eq__(self, other: Expression) -> Expression: ... # type: ignore[override]
817
+ def __floordiv__(self, other: Expression) -> Expression: ...
818
+ def __ge__(self, other: Expression) -> Expression: ...
819
+ def __gt__(self, other: Expression) -> Expression: ...
820
+ @pytyping.overload
821
+ def __init__(self, arg0: str) -> None: ...
822
+ @pytyping.overload
823
+ def __init__(self, arg0: pytyping.Any) -> None: ...
824
+ def __invert__(self) -> Expression: ...
825
+ def __le__(self, other: Expression) -> Expression: ...
826
+ def __lt__(self, other: Expression) -> Expression: ...
827
+ def __mod__(self, other: Expression) -> Expression: ...
828
+ def __mul__(self, other: Expression) -> Expression: ...
829
+ def __ne__(self, other: Expression) -> Expression: ... # type: ignore[override]
830
+ def __neg__(self) -> Expression: ...
831
+ def __or__(self, other: Expression) -> Expression: ...
832
+ def __pow__(self, other: Expression) -> Expression: ...
833
+ def __radd__(self, other: Expression) -> Expression: ...
834
+ def __rand__(self, other: Expression) -> Expression: ...
835
+ def __rdiv__(self, other: Expression) -> Expression: ...
836
+ def __rfloordiv__(self, other: Expression) -> Expression: ...
837
+ def __rmod__(self, other: Expression) -> Expression: ...
838
+ def __rmul__(self, other: Expression) -> Expression: ...
839
+ def __ror__(self, other: Expression) -> Expression: ...
840
+ def __rpow__(self, other: Expression) -> Expression: ...
841
+ def __rsub__(self, other: Expression) -> Expression: ...
842
+ def __rtruediv__(self, other: Expression) -> Expression: ...
843
+ def __sub__(self, other: Expression) -> Expression: ...
844
+ def __truediv__(self, other: Expression) -> Expression: ...
845
+ def alias(self, name: str) -> Expression: ...
846
+ def asc(self) -> Expression: ...
847
+ def between(self, lower: Expression, upper: Expression) -> Expression: ...
848
+ def cast(self, type: sqltypes.DuckDBPyType) -> Expression: ...
849
+ def collate(self, collation: str) -> Expression: ...
850
+ def desc(self) -> Expression: ...
851
+ def get_name(self) -> str: ...
852
+ def isin(self, *args: Expression) -> Expression: ...
853
+ def isnotin(self, *args: Expression) -> Expression: ...
854
+ def isnotnull(self) -> Expression: ...
855
+ def isnull(self) -> Expression: ...
856
+ def nulls_first(self) -> Expression: ...
857
+ def nulls_last(self) -> Expression: ...
858
+ def otherwise(self, value: Expression) -> Expression: ...
859
+ def show(self) -> None: ...
860
+ def when(self, condition: Expression, value: Expression) -> Expression: ...
861
+
862
+ class FatalException(DatabaseError): ...
863
+
864
+ class HTTPException(IOException):
865
+ status_code: int
866
+ body: str
867
+ reason: str
868
+ headers: dict[str, str]
869
+
870
+ class IOException(OperationalError): ...
871
+ class IntegrityError(DatabaseError): ...
872
+ class InternalError(DatabaseError): ...
873
+ class InternalException(InternalError): ...
874
+ class InterruptException(DatabaseError): ...
875
+ class InvalidInputException(ProgrammingError): ...
876
+ class InvalidTypeException(ProgrammingError): ...
877
+ class NotImplementedException(NotSupportedError): ...
878
+ class NotSupportedError(DatabaseError): ...
879
+ class OperationalError(DatabaseError): ...
880
+ class OutOfMemoryException(OperationalError): ...
881
+ class OutOfRangeException(DataError): ...
882
+ class ParserException(ProgrammingError): ...
883
+ class PermissionException(DatabaseError): ...
884
+ class ProgrammingError(DatabaseError): ...
885
+
886
+ class PythonExceptionHandling:
887
+ DEFAULT: pytyping.ClassVar[PythonExceptionHandling] # value = <PythonExceptionHandling.DEFAULT: 0>
888
+ RETURN_NULL: pytyping.ClassVar[PythonExceptionHandling] # value = <PythonExceptionHandling.RETURN_NULL: 1>
889
+ __members__: pytyping.ClassVar[
890
+ dict[str, PythonExceptionHandling]
891
+ ] # value = {'DEFAULT': <PythonExceptionHandling.DEFAULT: 0>, 'RETURN_NULL': <PythonExceptionHandling.RETURN_NULL: 1>} # noqa: E501
892
+ def __eq__(self, other: object) -> bool: ...
893
+ def __getstate__(self) -> int: ...
894
+ def __hash__(self) -> int: ...
895
+ def __index__(self) -> int: ...
896
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
897
+ def __int__(self) -> int: ...
898
+ def __ne__(self, other: object) -> bool: ...
899
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
900
+ @property
901
+ def name(self) -> str: ...
902
+ @property
903
+ def value(self) -> int: ...
904
+
905
+ class RenderMode:
906
+ COLUMNS: pytyping.ClassVar[RenderMode] # value = <RenderMode.COLUMNS: 1>
907
+ ROWS: pytyping.ClassVar[RenderMode] # value = <RenderMode.ROWS: 0>
908
+ __members__: pytyping.ClassVar[
909
+ dict[str, RenderMode]
910
+ ] # value = {'ROWS': <RenderMode.ROWS: 0>, 'COLUMNS': <RenderMode.COLUMNS: 1>}
911
+ def __eq__(self, other: object) -> bool: ...
912
+ def __getstate__(self) -> int: ...
913
+ def __hash__(self) -> int: ...
914
+ def __index__(self) -> int: ...
915
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
916
+ def __int__(self) -> int: ...
917
+ def __ne__(self, other: object) -> bool: ...
918
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
919
+ @property
920
+ def name(self) -> str: ...
921
+ @property
922
+ def value(self) -> int: ...
923
+
924
+ class SequenceException(DatabaseError): ...
925
+ class SerializationException(OperationalError): ...
926
+
927
+ class Statement:
928
+ @property
929
+ def expected_result_type(self) -> list[StatementType]: ...
930
+ @property
931
+ def named_parameters(self) -> set[str]: ...
932
+ @property
933
+ def query(self) -> str: ...
934
+ @property
935
+ def type(self) -> StatementType: ...
936
+
937
+ class StatementType:
938
+ ALTER_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ALTER_STATEMENT: 8>
939
+ ANALYZE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ANALYZE_STATEMENT: 11>
940
+ ATTACH_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ATTACH_STATEMENT: 25>
941
+ CALL_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CALL_STATEMENT: 19>
942
+ COPY_DATABASE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.COPY_DATABASE_STATEMENT: 28>
943
+ COPY_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.COPY_STATEMENT: 10>
944
+ CREATE_FUNC_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CREATE_FUNC_STATEMENT: 13>
945
+ CREATE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CREATE_STATEMENT: 4>
946
+ DELETE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DELETE_STATEMENT: 5>
947
+ DETACH_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DETACH_STATEMENT: 26>
948
+ DROP_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DROP_STATEMENT: 15>
949
+ EXECUTE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXECUTE_STATEMENT: 7>
950
+ EXPLAIN_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXPLAIN_STATEMENT: 14>
951
+ EXPORT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXPORT_STATEMENT: 16>
952
+ EXTENSION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXTENSION_STATEMENT: 23>
953
+ INSERT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.INSERT_STATEMENT: 2>
954
+ INVALID_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.INVALID_STATEMENT: 0>
955
+ LOAD_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.LOAD_STATEMENT: 21>
956
+ LOGICAL_PLAN_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.LOGICAL_PLAN_STATEMENT: 24>
957
+ MERGE_INTO_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.MERGE_INTO_STATEMENT: 30>
958
+ MULTI_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.MULTI_STATEMENT: 27>
959
+ PRAGMA_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.PRAGMA_STATEMENT: 17>
960
+ PREPARE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.PREPARE_STATEMENT: 6>
961
+ RELATION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.RELATION_STATEMENT: 22>
962
+ SELECT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.SELECT_STATEMENT: 1>
963
+ SET_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.SET_STATEMENT: 20>
964
+ TRANSACTION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.TRANSACTION_STATEMENT: 9>
965
+ UPDATE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.UPDATE_STATEMENT: 3>
966
+ VACUUM_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.VACUUM_STATEMENT: 18>
967
+ VARIABLE_SET_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.VARIABLE_SET_STATEMENT: 12>
968
+ __members__: pytyping.ClassVar[
969
+ dict[str, StatementType]
970
+ ] # value = {'INVALID_STATEMENT': <StatementType.INVALID_STATEMENT: 0>, 'SELECT_STATEMENT': <StatementType.SELECT_STATEMENT: 1>, 'INSERT_STATEMENT': <StatementType.INSERT_STATEMENT: 2>, 'UPDATE_STATEMENT': <StatementType.UPDATE_STATEMENT: 3>, 'CREATE_STATEMENT': <StatementType.CREATE_STATEMENT: 4>, 'DELETE_STATEMENT': <StatementType.DELETE_STATEMENT: 5>, 'PREPARE_STATEMENT': <StatementType.PREPARE_STATEMENT: 6>, 'EXECUTE_STATEMENT': <StatementType.EXECUTE_STATEMENT: 7>, 'ALTER_STATEMENT': <StatementType.ALTER_STATEMENT: 8>, 'TRANSACTION_STATEMENT': <StatementType.TRANSACTION_STATEMENT: 9>, 'COPY_STATEMENT': <StatementType.COPY_STATEMENT: 10>, 'ANALYZE_STATEMENT': <StatementType.ANALYZE_STATEMENT: 11>, 'VARIABLE_SET_STATEMENT': <StatementType.VARIABLE_SET_STATEMENT: 12>, 'CREATE_FUNC_STATEMENT': <StatementType.CREATE_FUNC_STATEMENT: 13>, 'EXPLAIN_STATEMENT': <StatementType.EXPLAIN_STATEMENT: 14>, 'DROP_STATEMENT': <StatementType.DROP_STATEMENT: 15>, 'EXPORT_STATEMENT': <StatementType.EXPORT_STATEMENT: 16>, 'PRAGMA_STATEMENT': <StatementType.PRAGMA_STATEMENT: 17>, 'VACUUM_STATEMENT': <StatementType.VACUUM_STATEMENT: 18>, 'CALL_STATEMENT': <StatementType.CALL_STATEMENT: 19>, 'SET_STATEMENT': <StatementType.SET_STATEMENT: 20>, 'LOAD_STATEMENT': <StatementType.LOAD_STATEMENT: 21>, 'RELATION_STATEMENT': <StatementType.RELATION_STATEMENT: 22>, 'EXTENSION_STATEMENT': <StatementType.EXTENSION_STATEMENT: 23>, 'LOGICAL_PLAN_STATEMENT': <StatementType.LOGICAL_PLAN_STATEMENT: 24>, 'ATTACH_STATEMENT': <StatementType.ATTACH_STATEMENT: 25>, 'DETACH_STATEMENT': <StatementType.DETACH_STATEMENT: 26>, 'MULTI_STATEMENT': <StatementType.MULTI_STATEMENT: 27>, 'COPY_DATABASE_STATEMENT': <StatementType.COPY_DATABASE_STATEMENT: 28>, 'MERGE_INTO_STATEMENT': <StatementType.MERGE_INTO_STATEMENT: 30>} # noqa: E501
971
+ def __eq__(self, other: object) -> bool: ...
972
+ def __getstate__(self) -> int: ...
973
+ def __hash__(self) -> int: ...
974
+ def __index__(self) -> int: ...
975
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
976
+ def __int__(self) -> int: ...
977
+ def __ne__(self, other: object) -> bool: ...
978
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
979
+ @property
980
+ def name(self) -> str: ...
981
+ @property
982
+ def value(self) -> int: ...
983
+
984
+ class SyntaxException(ProgrammingError): ...
985
+ class TransactionException(OperationalError): ...
986
+ class TypeMismatchException(DataError): ...
987
+ class Warning(Exception): ...
988
+
989
+ class token_type:
990
+ __members__: pytyping.ClassVar[
991
+ dict[str, token_type]
992
+ ] # value = {'identifier': <token_type.identifier: 0>, 'numeric_const': <token_type.numeric_const: 1>, 'string_const': <token_type.string_const: 2>, 'operator': <token_type.operator: 3>, 'keyword': <token_type.keyword: 4>, 'comment': <token_type.comment: 5>} # noqa: E501
993
+ comment: pytyping.ClassVar[token_type] # value = <token_type.comment: 5>
994
+ identifier: pytyping.ClassVar[token_type] # value = <token_type.identifier: 0>
995
+ keyword: pytyping.ClassVar[token_type] # value = <token_type.keyword: 4>
996
+ numeric_const: pytyping.ClassVar[token_type] # value = <token_type.numeric_const: 1>
997
+ operator: pytyping.ClassVar[token_type] # value = <token_type.operator: 3>
998
+ string_const: pytyping.ClassVar[token_type] # value = <token_type.string_const: 2>
999
+ def __eq__(self, other: object) -> bool: ...
1000
+ def __getstate__(self) -> int: ...
1001
+ def __hash__(self) -> int: ...
1002
+ def __index__(self) -> int: ...
1003
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
1004
+ def __int__(self) -> int: ...
1005
+ def __ne__(self, other: object) -> bool: ...
1006
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
1007
+ @property
1008
+ def name(self) -> str: ...
1009
+ @property
1010
+ def value(self) -> int: ...
1011
+
1012
+ def CaseExpression(condition: Expression, value: Expression) -> Expression: ...
1013
+ def CoalesceOperator(*args: Expression) -> Expression: ...
1014
+ def ColumnExpression(*args: str) -> Expression: ...
1015
+ def ConstantExpression(value: Expression | str) -> Expression: ...
1016
+ def DefaultExpression() -> Expression: ...
1017
+ def FunctionExpression(function_name: str, *args: Expression) -> Expression: ...
1018
+ def LambdaExpression(lhs: Expression | str | tuple[str], rhs: Expression) -> Expression: ...
1019
+ def SQLExpression(expression: str) -> Expression: ...
1020
+ @pytyping.overload
1021
+ def StarExpression(*, exclude: Expression | str | tuple[str]) -> Expression: ...
1022
+ @pytyping.overload
1023
+ def StarExpression() -> Expression: ...
1024
+ def aggregate(
1025
+ df: pandas.DataFrame,
1026
+ aggr_expr: Expression | list[Expression] | str | list[str],
1027
+ group_expr: str = "",
1028
+ *,
1029
+ connection: DuckDBPyConnection | None = None,
1030
+ ) -> DuckDBPyRelation: ...
1031
+ def alias(df: pandas.DataFrame, alias: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1032
+ def append(
1033
+ table_name: str, df: pandas.DataFrame, *, by_name: bool = False, connection: DuckDBPyConnection | None = None
1034
+ ) -> DuckDBPyConnection: ...
1035
+ def array_type(
1036
+ type: sqltypes.DuckDBPyType, size: pytyping.SupportsInt, *, connection: DuckDBPyConnection | None = None
1037
+ ) -> sqltypes.DuckDBPyType: ...
1038
+ @pytyping.overload
1039
+ def arrow(
1040
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1041
+ ) -> pyarrow.lib.RecordBatchReader: ...
1042
+ @pytyping.overload
1043
+ def arrow(arrow_object: pytyping.Any, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1044
+ def begin(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1045
+ def checkpoint(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1046
+ def close(*, connection: DuckDBPyConnection | None = None) -> None: ...
1047
+ def commit(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1048
+ def connect(
1049
+ database: str | pathlib.Path = ":memory:",
1050
+ read_only: bool = False,
1051
+ config: dict[str, str] | None = None,
1052
+ ) -> DuckDBPyConnection: ...
1053
+ def create_function(
1054
+ name: str,
1055
+ function: Callable[..., pytyping.Any],
1056
+ parameters: list[sqltypes.DuckDBPyType] | None = None,
1057
+ return_type: sqltypes.DuckDBPyType | None = None,
1058
+ *,
1059
+ type: func.PythonUDFType = ...,
1060
+ null_handling: func.FunctionNullHandling = ...,
1061
+ exception_handling: PythonExceptionHandling = ...,
1062
+ side_effects: bool = False,
1063
+ connection: DuckDBPyConnection | None = None,
1064
+ ) -> DuckDBPyConnection: ...
1065
+ def cursor(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1066
+ def decimal_type(
1067
+ width: pytyping.SupportsInt, scale: pytyping.SupportsInt, *, connection: DuckDBPyConnection | None = None
1068
+ ) -> sqltypes.DuckDBPyType: ...
1069
+ def default_connection() -> DuckDBPyConnection: ...
1070
+ def description(
1071
+ *, connection: DuckDBPyConnection | None = None
1072
+ ) -> list[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]] | None: ...
1073
+ @pytyping.overload
1074
+ def df(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1075
+ @pytyping.overload
1076
+ def df(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1077
+ def distinct(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1078
+ def dtype(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1079
+ def duplicate(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1080
+ def enum_type(
1081
+ name: str,
1082
+ type: sqltypes.DuckDBPyType,
1083
+ values: list[pytyping.Any],
1084
+ *,
1085
+ connection: DuckDBPyConnection | None = None,
1086
+ ) -> sqltypes.DuckDBPyType: ...
1087
+ def execute(
1088
+ query: Statement | str,
1089
+ parameters: object = None,
1090
+ *,
1091
+ connection: DuckDBPyConnection | None = None,
1092
+ ) -> DuckDBPyConnection: ...
1093
+ def executemany(
1094
+ query: Statement | str,
1095
+ parameters: object = None,
1096
+ *,
1097
+ connection: DuckDBPyConnection | None = None,
1098
+ ) -> DuckDBPyConnection: ...
1099
+ def extract_statements(query: str, *, connection: DuckDBPyConnection | None = None) -> list[Statement]: ...
1100
+ def fetch_arrow_table(
1101
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1102
+ ) -> pyarrow.lib.Table: ...
1103
+ def fetch_df(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1104
+ def fetch_df_chunk(
1105
+ vectors_per_chunk: pytyping.SupportsInt = 1,
1106
+ *,
1107
+ date_as_object: bool = False,
1108
+ connection: DuckDBPyConnection | None = None,
1109
+ ) -> pandas.DataFrame: ...
1110
+ def fetch_record_batch(
1111
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1112
+ ) -> pyarrow.lib.RecordBatchReader: ...
1113
+ def fetchall(*, connection: DuckDBPyConnection | None = None) -> list[tuple[pytyping.Any, ...]]: ...
1114
+ def fetchdf(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1115
+ def fetchmany(
1116
+ size: pytyping.SupportsInt = 1, *, connection: DuckDBPyConnection | None = None
1117
+ ) -> list[tuple[pytyping.Any, ...]]: ...
1118
+ def fetchnumpy(
1119
+ *, connection: DuckDBPyConnection | None = None
1120
+ ) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
1121
+ def fetchone(*, connection: DuckDBPyConnection | None = None) -> tuple[pytyping.Any, ...] | None: ...
1122
+ def filesystem_is_registered(name: str, *, connection: DuckDBPyConnection | None = None) -> bool: ...
1123
+ def filter(
1124
+ df: pandas.DataFrame,
1125
+ filter_expr: Expression | str,
1126
+ *,
1127
+ connection: DuckDBPyConnection | None = None,
1128
+ ) -> DuckDBPyRelation: ...
1129
+ def from_arrow(
1130
+ arrow_object: object,
1131
+ *,
1132
+ connection: DuckDBPyConnection | None = None,
1133
+ ) -> DuckDBPyRelation: ...
1134
+ def from_csv_auto(
1135
+ path_or_buffer: str | bytes | os.PathLike[str],
1136
+ header: bool | int | None = None,
1137
+ compression: str | None = None,
1138
+ sep: str | None = None,
1139
+ delimiter: str | None = None,
1140
+ files_to_sniff: int | None = None,
1141
+ comment: str | None = None,
1142
+ thousands: str | None = None,
1143
+ dtype: dict[str, str] | list[str] | None = None,
1144
+ na_values: str | list[str] | None = None,
1145
+ skiprows: int | None = None,
1146
+ quotechar: str | None = None,
1147
+ escapechar: str | None = None,
1148
+ encoding: str | None = None,
1149
+ parallel: bool | None = None,
1150
+ date_format: str | None = None,
1151
+ timestamp_format: str | None = None,
1152
+ sample_size: int | None = None,
1153
+ auto_detect: bool | int | None = None,
1154
+ all_varchar: bool | None = None,
1155
+ normalize_names: bool | None = None,
1156
+ null_padding: bool | None = None,
1157
+ names: list[str] | None = None,
1158
+ lineterminator: str | None = None,
1159
+ columns: dict[str, str] | None = None,
1160
+ auto_type_candidates: list[str] | None = None,
1161
+ max_line_size: int | None = None,
1162
+ ignore_errors: bool | None = None,
1163
+ store_rejects: bool | None = None,
1164
+ rejects_table: str | None = None,
1165
+ rejects_scan: str | None = None,
1166
+ rejects_limit: int | None = None,
1167
+ force_not_null: list[str] | None = None,
1168
+ buffer_size: int | None = None,
1169
+ decimal: str | None = None,
1170
+ allow_quoted_nulls: bool | None = None,
1171
+ filename: bool | str | None = None,
1172
+ hive_partitioning: bool | None = None,
1173
+ union_by_name: bool | None = None,
1174
+ hive_types: dict[str, str] | None = None,
1175
+ hive_types_autocast: bool | None = None,
1176
+ strict_mode: bool | None = None,
1177
+ ) -> DuckDBPyRelation: ...
1178
+ def from_df(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1179
+ @pytyping.overload
1180
+ def from_parquet(
1181
+ file_glob: str,
1182
+ binary_as_string: bool = False,
1183
+ *,
1184
+ file_row_number: bool = False,
1185
+ filename: bool = False,
1186
+ hive_partitioning: bool = False,
1187
+ union_by_name: bool = False,
1188
+ compression: str | None = None,
1189
+ connection: DuckDBPyConnection | None = None,
1190
+ ) -> DuckDBPyRelation: ...
1191
+ @pytyping.overload
1192
+ def from_parquet(
1193
+ file_globs: Sequence[str],
1194
+ binary_as_string: bool = False,
1195
+ *,
1196
+ file_row_number: bool = False,
1197
+ filename: bool = False,
1198
+ hive_partitioning: bool = False,
1199
+ union_by_name: bool = False,
1200
+ compression: pytyping.Any = None,
1201
+ connection: DuckDBPyConnection | None = None,
1202
+ ) -> DuckDBPyRelation: ...
1203
+ def from_query(
1204
+ query: Statement | str,
1205
+ *,
1206
+ alias: str = "",
1207
+ params: object = None,
1208
+ connection: DuckDBPyConnection | None = None,
1209
+ ) -> DuckDBPyRelation: ...
1210
+ def get_table_names(
1211
+ query: str, *, qualified: bool = False, connection: DuckDBPyConnection | None = None
1212
+ ) -> set[str]: ...
1213
+ def install_extension(
1214
+ extension: str,
1215
+ *,
1216
+ force_install: bool = False,
1217
+ repository: str | None = None,
1218
+ repository_url: str | None = None,
1219
+ version: str | None = None,
1220
+ connection: DuckDBPyConnection | None = None,
1221
+ ) -> None: ...
1222
+ def interrupt(*, connection: DuckDBPyConnection | None = None) -> None: ...
1223
+ def limit(
1224
+ df: pandas.DataFrame,
1225
+ n: pytyping.SupportsInt,
1226
+ offset: pytyping.SupportsInt = 0,
1227
+ *,
1228
+ connection: DuckDBPyConnection | None = None,
1229
+ ) -> DuckDBPyRelation: ...
1230
+ def list_filesystems(*, connection: DuckDBPyConnection | None = None) -> list[str]: ...
1231
+ def list_type(
1232
+ type: sqltypes.DuckDBPyType, *, connection: DuckDBPyConnection | None = None
1233
+ ) -> sqltypes.DuckDBPyType: ...
1234
+ def load_extension(extension: str, *, connection: DuckDBPyConnection | None = None) -> None: ...
1235
+ def map_type(
1236
+ key: sqltypes.DuckDBPyType,
1237
+ value: sqltypes.DuckDBPyType,
1238
+ *,
1239
+ connection: DuckDBPyConnection | None = None,
1240
+ ) -> sqltypes.DuckDBPyType: ...
1241
+ def order(
1242
+ df: pandas.DataFrame, order_expr: str, *, connection: DuckDBPyConnection | None = None
1243
+ ) -> DuckDBPyRelation: ...
1244
+ def pl(
1245
+ rows_per_batch: pytyping.SupportsInt = 1000000,
1246
+ *,
1247
+ lazy: bool = False,
1248
+ connection: DuckDBPyConnection | None = None,
1249
+ ) -> polars.DataFrame: ...
1250
+ def project(
1251
+ df: pandas.DataFrame, *args: str | Expression, groups: str = "", connection: DuckDBPyConnection | None = None
1252
+ ) -> DuckDBPyRelation: ...
1253
+ def query(
1254
+ query: Statement | str,
1255
+ *,
1256
+ alias: str = "",
1257
+ params: object = None,
1258
+ connection: DuckDBPyConnection | None = None,
1259
+ ) -> DuckDBPyRelation: ...
1260
+ def query_df(
1261
+ df: pandas.DataFrame,
1262
+ virtual_table_name: str,
1263
+ sql_query: str,
1264
+ *,
1265
+ connection: DuckDBPyConnection | None = None,
1266
+ ) -> DuckDBPyRelation: ...
1267
+ def query_progress(*, connection: DuckDBPyConnection | None = None) -> float: ...
1268
+ def read_csv(
1269
+ path_or_buffer: str | bytes | os.PathLike[str],
1270
+ header: bool | int | None = None,
1271
+ compression: str | None = None,
1272
+ sep: str | None = None,
1273
+ delimiter: str | None = None,
1274
+ files_to_sniff: int | None = None,
1275
+ comment: str | None = None,
1276
+ thousands: str | None = None,
1277
+ dtype: dict[str, str] | list[str] | None = None,
1278
+ na_values: str | list[str] | None = None,
1279
+ skiprows: int | None = None,
1280
+ quotechar: str | None = None,
1281
+ escapechar: str | None = None,
1282
+ encoding: str | None = None,
1283
+ parallel: bool | None = None,
1284
+ date_format: str | None = None,
1285
+ timestamp_format: str | None = None,
1286
+ sample_size: int | None = None,
1287
+ auto_detect: bool | int | None = None,
1288
+ all_varchar: bool | None = None,
1289
+ normalize_names: bool | None = None,
1290
+ null_padding: bool | None = None,
1291
+ names: list[str] | None = None,
1292
+ lineterminator: str | None = None,
1293
+ columns: dict[str, str] | None = None,
1294
+ auto_type_candidates: list[str] | None = None,
1295
+ max_line_size: int | None = None,
1296
+ ignore_errors: bool | None = None,
1297
+ store_rejects: bool | None = None,
1298
+ rejects_table: str | None = None,
1299
+ rejects_scan: str | None = None,
1300
+ rejects_limit: int | None = None,
1301
+ force_not_null: list[str] | None = None,
1302
+ buffer_size: int | None = None,
1303
+ decimal: str | None = None,
1304
+ allow_quoted_nulls: bool | None = None,
1305
+ filename: bool | str | None = None,
1306
+ hive_partitioning: bool | None = None,
1307
+ union_by_name: bool | None = None,
1308
+ hive_types: dict[str, str] | None = None,
1309
+ hive_types_autocast: bool | None = None,
1310
+ strict_mode: bool | None = None,
1311
+ ) -> DuckDBPyRelation: ...
1312
+ def read_json(
1313
+ path_or_buffer: str | bytes | os.PathLike[str],
1314
+ *,
1315
+ columns: dict[str, str] | None = None,
1316
+ sample_size: int | None = None,
1317
+ maximum_depth: int | None = None,
1318
+ records: str | None = None,
1319
+ format: str | None = None,
1320
+ date_format: str | None = None,
1321
+ timestamp_format: str | None = None,
1322
+ compression: str | None = None,
1323
+ maximum_object_size: int | None = None,
1324
+ ignore_errors: bool | None = None,
1325
+ convert_strings_to_integers: bool | None = None,
1326
+ field_appearance_threshold: float | None = None,
1327
+ map_inference_threshold: int | None = None,
1328
+ maximum_sample_files: int | None = None,
1329
+ filename: bool | str | None = None,
1330
+ hive_partitioning: bool | None = None,
1331
+ union_by_name: bool | None = None,
1332
+ hive_types: dict[str, str] | None = None,
1333
+ hive_types_autocast: bool | None = None,
1334
+ ) -> DuckDBPyRelation: ...
1335
+ @pytyping.overload
1336
+ def read_parquet(
1337
+ file_glob: str,
1338
+ binary_as_string: bool = False,
1339
+ *,
1340
+ file_row_number: bool = False,
1341
+ filename: bool = False,
1342
+ hive_partitioning: bool = False,
1343
+ union_by_name: bool = False,
1344
+ compression: str | None = None,
1345
+ connection: DuckDBPyConnection | None = None,
1346
+ ) -> DuckDBPyRelation: ...
1347
+ @pytyping.overload
1348
+ def read_parquet(
1349
+ file_globs: Sequence[str],
1350
+ binary_as_string: bool = False,
1351
+ *,
1352
+ file_row_number: bool = False,
1353
+ filename: bool = False,
1354
+ hive_partitioning: bool = False,
1355
+ union_by_name: bool = False,
1356
+ compression: pytyping.Any = None,
1357
+ connection: DuckDBPyConnection | None = None,
1358
+ ) -> DuckDBPyRelation: ...
1359
+ def register(
1360
+ view_name: str,
1361
+ python_object: object,
1362
+ *,
1363
+ connection: DuckDBPyConnection | None = None,
1364
+ ) -> DuckDBPyConnection: ...
1365
+ def register_filesystem(
1366
+ filesystem: fsspec.AbstractFileSystem, *, connection: DuckDBPyConnection | None = None
1367
+ ) -> None: ...
1368
+ def remove_function(name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1369
+ def rollback(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1370
+ def row_type(
1371
+ fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1372
+ *,
1373
+ connection: DuckDBPyConnection | None = None,
1374
+ ) -> sqltypes.DuckDBPyType: ...
1375
+ def rowcount(*, connection: DuckDBPyConnection | None = None) -> int: ...
1376
+ def set_default_connection(connection: DuckDBPyConnection) -> None: ...
1377
+ def sql(
1378
+ query: Statement | str,
1379
+ *,
1380
+ alias: str = "",
1381
+ params: object = None,
1382
+ connection: DuckDBPyConnection | None = None,
1383
+ ) -> DuckDBPyRelation: ...
1384
+ def sqltype(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1385
+ def string_type(collation: str = "", *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1386
+ def struct_type(
1387
+ fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1388
+ *,
1389
+ connection: DuckDBPyConnection | None = None,
1390
+ ) -> sqltypes.DuckDBPyType: ...
1391
+ def table(table_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1392
+ def table_function(
1393
+ name: str,
1394
+ parameters: object = None,
1395
+ *,
1396
+ connection: DuckDBPyConnection | None = None,
1397
+ ) -> DuckDBPyRelation: ...
1398
+ def tf(*, connection: DuckDBPyConnection | None = None) -> dict[str, tensorflow.Tensor]: ...
1399
+ def tokenize(query: str) -> list[tuple[int, token_type]]: ...
1400
+ def torch(*, connection: DuckDBPyConnection | None = None) -> dict[str, pytorch.Tensor]: ...
1401
+ def type(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1402
+ def union_type(
1403
+ members: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1404
+ *,
1405
+ connection: DuckDBPyConnection | None = None,
1406
+ ) -> sqltypes.DuckDBPyType: ...
1407
+ def unregister(view_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1408
+ def unregister_filesystem(name: str, *, connection: DuckDBPyConnection | None = None) -> None: ...
1409
+ def values(
1410
+ *args: list[pytyping.Any] | tuple[Expression, ...] | Expression, connection: DuckDBPyConnection | None = None
1411
+ ) -> DuckDBPyRelation: ...
1412
+ def view(view_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1413
+ def write_csv(
1414
+ df: pandas.DataFrame,
1415
+ filename: str,
1416
+ *,
1417
+ sep: str | None = None,
1418
+ na_rep: str | None = None,
1419
+ header: bool | None = None,
1420
+ quotechar: str | None = None,
1421
+ escapechar: str | None = None,
1422
+ date_format: str | None = None,
1423
+ timestamp_format: str | None = None,
1424
+ quoting: str | int | None = None,
1425
+ encoding: str | None = None,
1426
+ compression: str | None = None,
1427
+ overwrite: bool | None = None,
1428
+ per_thread_output: bool | None = None,
1429
+ use_tmp_file: bool | None = None,
1430
+ partition_by: list[str] | None = None,
1431
+ write_partition_columns: bool | None = None,
1432
+ ) -> None: ...
1433
+
1434
+ __formatted_python_version__: str
1435
+ __git_revision__: str
1436
+ __interactive__: bool
1437
+ __jupyter__: bool
1438
+ __standard_vector_size__: int
1439
+ __version__: str
1440
+ _clean_default_connection: pytyping.Any # value = <capsule object>
1441
+ apilevel: str
1442
+ paramstyle: str
1443
+ threadsafety: int