duckdb 1.2.2.dev194__cp311-cp311-win_amd64.whl → 1.4.3.dev8__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. _duckdb-stubs/__init__.pyi +1478 -0
  2. _duckdb-stubs/_func.pyi +46 -0
  3. _duckdb-stubs/_sqltypes.pyi +75 -0
  4. duckdb/duckdb.cp311-win_amd64.pyd → _duckdb.cp311-win_amd64.pyd +0 -0
  5. adbc_driver_duckdb/__init__.py +10 -8
  6. adbc_driver_duckdb/dbapi.py +4 -5
  7. duckdb/__init__.py +343 -384
  8. duckdb/_dbapi_type_object.py +231 -0
  9. duckdb/_version.py +22 -0
  10. duckdb/bytes_io_wrapper.py +12 -9
  11. duckdb/experimental/__init__.py +5 -2
  12. duckdb/experimental/spark/__init__.py +3 -4
  13. duckdb/experimental/spark/_globals.py +8 -8
  14. duckdb/experimental/spark/_typing.py +7 -9
  15. duckdb/experimental/spark/conf.py +16 -15
  16. duckdb/experimental/spark/context.py +60 -44
  17. duckdb/experimental/spark/errors/__init__.py +33 -35
  18. duckdb/experimental/spark/errors/error_classes.py +1 -1
  19. duckdb/experimental/spark/errors/exceptions/__init__.py +1 -1
  20. duckdb/experimental/spark/errors/exceptions/base.py +39 -88
  21. duckdb/experimental/spark/errors/utils.py +11 -16
  22. duckdb/experimental/spark/exception.py +9 -6
  23. duckdb/experimental/spark/sql/__init__.py +5 -5
  24. duckdb/experimental/spark/sql/_typing.py +8 -15
  25. duckdb/experimental/spark/sql/catalog.py +21 -20
  26. duckdb/experimental/spark/sql/column.py +48 -55
  27. duckdb/experimental/spark/sql/conf.py +9 -8
  28. duckdb/experimental/spark/sql/dataframe.py +213 -231
  29. duckdb/experimental/spark/sql/functions.py +1347 -1218
  30. duckdb/experimental/spark/sql/group.py +56 -52
  31. duckdb/experimental/spark/sql/readwriter.py +80 -94
  32. duckdb/experimental/spark/sql/session.py +64 -59
  33. duckdb/experimental/spark/sql/streaming.py +9 -10
  34. duckdb/experimental/spark/sql/type_utils.py +67 -65
  35. duckdb/experimental/spark/sql/types.py +309 -345
  36. duckdb/experimental/spark/sql/udf.py +6 -6
  37. duckdb/filesystem.py +26 -16
  38. duckdb/func/__init__.py +3 -0
  39. duckdb/functional/__init__.py +12 -16
  40. duckdb/polars_io.py +284 -0
  41. duckdb/query_graph/__main__.py +91 -96
  42. duckdb/sqltypes/__init__.py +63 -0
  43. duckdb/typing/__init__.py +18 -8
  44. duckdb/udf.py +10 -5
  45. duckdb/value/__init__.py +1 -0
  46. duckdb/value/{constant.py → constant/__init__.py} +62 -60
  47. duckdb-1.4.3.dev8.dist-info/METADATA +88 -0
  48. duckdb-1.4.3.dev8.dist-info/RECORD +52 -0
  49. {duckdb-1.2.2.dev194.dist-info → duckdb-1.4.3.dev8.dist-info}/WHEEL +1 -1
  50. duckdb-1.4.3.dev8.dist-info/licenses/LICENSE +7 -0
  51. duckdb-1.2.2.dev194.dist-info/METADATA +0 -29
  52. duckdb-1.2.2.dev194.dist-info/RECORD +0 -47
  53. duckdb-1.2.2.dev194.dist-info/top_level.txt +0 -3
  54. duckdb-stubs/__init__.pyi +0 -708
  55. duckdb-stubs/functional/__init__.pyi +0 -33
  56. duckdb-stubs/typing/__init__.pyi +0 -37
  57. duckdb-stubs/value/constant/__init__.pyi +0 -116
  58. /duckdb-stubs/value/__init__.pyi → /duckdb/py.typed +0 -0
@@ -0,0 +1,1478 @@
1
+ import os
2
+ import pathlib
3
+ import typing as pytyping
4
+ from typing_extensions import Self
5
+
6
+ if pytyping.TYPE_CHECKING:
7
+ import fsspec
8
+ import numpy as np
9
+ import polars
10
+ import pandas
11
+ import pyarrow.lib
12
+ import torch as pytorch
13
+ import tensorflow
14
+ from collections.abc import Callable, Sequence, Mapping
15
+ from duckdb import sqltypes, func
16
+
17
+ # the field_ids argument to to_parquet and write_parquet has a recursive structure
18
+ ParquetFieldIdsType = Mapping[str, pytyping.Union[int, "ParquetFieldIdsType"]]
19
+
20
+ __all__: list[str] = [
21
+ "BinderException",
22
+ "CSVLineTerminator",
23
+ "CaseExpression",
24
+ "CatalogException",
25
+ "CoalesceOperator",
26
+ "ColumnExpression",
27
+ "ConnectionException",
28
+ "ConstantExpression",
29
+ "ConstraintException",
30
+ "ConversionException",
31
+ "DataError",
32
+ "DatabaseError",
33
+ "DefaultExpression",
34
+ "DependencyException",
35
+ "DuckDBPyConnection",
36
+ "DuckDBPyRelation",
37
+ "Error",
38
+ "ExpectedResultType",
39
+ "ExplainType",
40
+ "Expression",
41
+ "FatalException",
42
+ "FunctionExpression",
43
+ "HTTPException",
44
+ "IOException",
45
+ "IntegrityError",
46
+ "InternalError",
47
+ "InternalException",
48
+ "InterruptException",
49
+ "InvalidInputException",
50
+ "InvalidTypeException",
51
+ "LambdaExpression",
52
+ "NotImplementedException",
53
+ "NotSupportedError",
54
+ "OperationalError",
55
+ "OutOfMemoryException",
56
+ "OutOfRangeException",
57
+ "ParserException",
58
+ "PermissionException",
59
+ "ProgrammingError",
60
+ "PythonExceptionHandling",
61
+ "RenderMode",
62
+ "SQLExpression",
63
+ "SequenceException",
64
+ "SerializationException",
65
+ "StarExpression",
66
+ "Statement",
67
+ "StatementType",
68
+ "SyntaxException",
69
+ "TransactionException",
70
+ "TypeMismatchException",
71
+ "Warning",
72
+ "aggregate",
73
+ "alias",
74
+ "apilevel",
75
+ "append",
76
+ "array_type",
77
+ "arrow",
78
+ "begin",
79
+ "checkpoint",
80
+ "close",
81
+ "commit",
82
+ "connect",
83
+ "create_function",
84
+ "cursor",
85
+ "decimal_type",
86
+ "default_connection",
87
+ "description",
88
+ "df",
89
+ "distinct",
90
+ "dtype",
91
+ "duplicate",
92
+ "enum_type",
93
+ "execute",
94
+ "executemany",
95
+ "extract_statements",
96
+ "fetch_arrow_table",
97
+ "fetch_df",
98
+ "fetch_df_chunk",
99
+ "fetch_record_batch",
100
+ "fetchall",
101
+ "fetchdf",
102
+ "fetchmany",
103
+ "fetchnumpy",
104
+ "fetchone",
105
+ "filesystem_is_registered",
106
+ "filter",
107
+ "from_arrow",
108
+ "from_csv_auto",
109
+ "from_df",
110
+ "from_parquet",
111
+ "from_query",
112
+ "get_table_names",
113
+ "install_extension",
114
+ "interrupt",
115
+ "limit",
116
+ "list_filesystems",
117
+ "list_type",
118
+ "load_extension",
119
+ "map_type",
120
+ "order",
121
+ "paramstyle",
122
+ "pl",
123
+ "project",
124
+ "query",
125
+ "query_df",
126
+ "query_progress",
127
+ "read_csv",
128
+ "read_json",
129
+ "read_parquet",
130
+ "register",
131
+ "register_filesystem",
132
+ "remove_function",
133
+ "rollback",
134
+ "row_type",
135
+ "rowcount",
136
+ "set_default_connection",
137
+ "sql",
138
+ "sqltype",
139
+ "string_type",
140
+ "struct_type",
141
+ "table",
142
+ "table_function",
143
+ "tf",
144
+ "threadsafety",
145
+ "token_type",
146
+ "tokenize",
147
+ "torch",
148
+ "type",
149
+ "union_type",
150
+ "unregister",
151
+ "unregister_filesystem",
152
+ "values",
153
+ "view",
154
+ "write_csv",
155
+ ]
156
+
157
+ class BinderException(ProgrammingError): ...
158
+
159
+ class CSVLineTerminator:
160
+ CARRIAGE_RETURN_LINE_FEED: pytyping.ClassVar[
161
+ CSVLineTerminator
162
+ ] # value = <CSVLineTerminator.CARRIAGE_RETURN_LINE_FEED: 1>
163
+ LINE_FEED: pytyping.ClassVar[CSVLineTerminator] # value = <CSVLineTerminator.LINE_FEED: 0>
164
+ __members__: pytyping.ClassVar[
165
+ dict[str, CSVLineTerminator]
166
+ ] # value = {'LINE_FEED': <CSVLineTerminator.LINE_FEED: 0>, 'CARRIAGE_RETURN_LINE_FEED': <CSVLineTerminator.CARRIAGE_RETURN_LINE_FEED: 1>} # noqa: E501
167
+ def __eq__(self, other: object) -> bool: ...
168
+ def __getstate__(self) -> int: ...
169
+ def __hash__(self) -> int: ...
170
+ def __index__(self) -> int: ...
171
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
172
+ def __int__(self) -> int: ...
173
+ def __ne__(self, other: object) -> bool: ...
174
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
175
+ @property
176
+ def name(self) -> str: ...
177
+ @property
178
+ def value(self) -> int: ...
179
+
180
+ class CatalogException(ProgrammingError): ...
181
+ class ConnectionException(OperationalError): ...
182
+ class ConstraintException(IntegrityError): ...
183
+ class ConversionException(DataError): ...
184
+ class DataError(DatabaseError): ...
185
+ class DatabaseError(Error): ...
186
+ class DependencyException(DatabaseError): ...
187
+
188
+ class DuckDBPyConnection:
189
+ def __del__(self) -> None: ...
190
+ def __enter__(self) -> Self: ...
191
+ def __exit__(self, exc_type: object, exc: object, traceback: object) -> None: ...
192
+ def append(self, table_name: str, df: pandas.DataFrame, *, by_name: bool = False) -> DuckDBPyConnection: ...
193
+ def array_type(self, type: sqltypes.DuckDBPyType, size: pytyping.SupportsInt) -> sqltypes.DuckDBPyType: ...
194
+ def arrow(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
195
+ def begin(self) -> DuckDBPyConnection: ...
196
+ def checkpoint(self) -> DuckDBPyConnection: ...
197
+ def close(self) -> None: ...
198
+ def commit(self) -> DuckDBPyConnection: ...
199
+ def create_function(
200
+ self,
201
+ name: str,
202
+ function: Callable[..., pytyping.Any],
203
+ parameters: list[sqltypes.DuckDBPyType] | None = None,
204
+ return_type: sqltypes.DuckDBPyType | None = None,
205
+ *,
206
+ type: func.PythonUDFType = ...,
207
+ null_handling: func.FunctionNullHandling = ...,
208
+ exception_handling: PythonExceptionHandling = ...,
209
+ side_effects: bool = False,
210
+ ) -> DuckDBPyConnection: ...
211
+ def cursor(self) -> DuckDBPyConnection: ...
212
+ def decimal_type(self, width: pytyping.SupportsInt, scale: pytyping.SupportsInt) -> sqltypes.DuckDBPyType: ...
213
+ def df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
214
+ def dtype(self, type_str: str) -> sqltypes.DuckDBPyType: ...
215
+ def duplicate(self) -> DuckDBPyConnection: ...
216
+ def enum_type(
217
+ self, name: str, type: sqltypes.DuckDBPyType, values: list[pytyping.Any]
218
+ ) -> sqltypes.DuckDBPyType: ...
219
+ def execute(self, query: Statement | str, parameters: object = None) -> DuckDBPyConnection: ...
220
+ def executemany(self, query: Statement | str, parameters: object = None) -> DuckDBPyConnection: ...
221
+ def extract_statements(self, query: str) -> list[Statement]: ...
222
+ def fetch_arrow_table(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
223
+ def fetch_df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
224
+ def fetch_df_chunk(
225
+ self, vectors_per_chunk: pytyping.SupportsInt = 1, *, date_as_object: bool = False
226
+ ) -> pandas.DataFrame: ...
227
+ def fetch_record_batch(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
228
+ def fetchall(self) -> list[tuple[pytyping.Any, ...]]: ...
229
+ def fetchdf(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
230
+ def fetchmany(self, size: pytyping.SupportsInt = 1) -> list[tuple[pytyping.Any, ...]]: ...
231
+ def fetchnumpy(self) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
232
+ def fetchone(self) -> tuple[pytyping.Any, ...] | None: ...
233
+ def filesystem_is_registered(self, name: str) -> bool: ...
234
+ def from_arrow(self, arrow_object: object) -> DuckDBPyRelation: ...
235
+ def from_csv_auto(
236
+ self,
237
+ path_or_buffer: str | bytes | os.PathLike[str] | os.PathLike[bytes],
238
+ header: bool | int | None = None,
239
+ compression: str | None = None,
240
+ sep: str | None = None,
241
+ delimiter: str | None = None,
242
+ files_to_sniff: int | None = None,
243
+ comment: str | None = None,
244
+ thousands: str | None = None,
245
+ dtype: dict[str, str] | list[str] | None = None,
246
+ na_values: str | list[str] | None = None,
247
+ skiprows: int | None = None,
248
+ quotechar: str | None = None,
249
+ escapechar: str | None = None,
250
+ encoding: str | None = None,
251
+ parallel: bool | None = None,
252
+ date_format: str | None = None,
253
+ timestamp_format: str | None = None,
254
+ sample_size: int | None = None,
255
+ auto_detect: bool | int | None = None,
256
+ all_varchar: bool | None = None,
257
+ normalize_names: bool | None = None,
258
+ null_padding: bool | None = None,
259
+ names: list[str] | None = None,
260
+ lineterminator: str | None = None,
261
+ columns: dict[str, str] | None = None,
262
+ auto_type_candidates: list[str] | None = None,
263
+ max_line_size: int | None = None,
264
+ ignore_errors: bool | None = None,
265
+ store_rejects: bool | None = None,
266
+ rejects_table: str | None = None,
267
+ rejects_scan: str | None = None,
268
+ rejects_limit: int | None = None,
269
+ force_not_null: list[str] | None = None,
270
+ buffer_size: int | None = None,
271
+ decimal: str | None = None,
272
+ allow_quoted_nulls: bool | None = None,
273
+ filename: bool | str | None = None,
274
+ hive_partitioning: bool | None = None,
275
+ union_by_name: bool | None = None,
276
+ hive_types: dict[str, str] | None = None,
277
+ hive_types_autocast: bool | None = None,
278
+ strict_mode: bool | None = None,
279
+ ) -> DuckDBPyRelation: ...
280
+ def from_df(self, df: pandas.DataFrame) -> DuckDBPyRelation: ...
281
+ @pytyping.overload
282
+ def from_parquet(
283
+ self,
284
+ file_glob: str,
285
+ binary_as_string: bool = False,
286
+ *,
287
+ file_row_number: bool = False,
288
+ filename: bool = False,
289
+ hive_partitioning: bool = False,
290
+ union_by_name: bool = False,
291
+ compression: str | None = None,
292
+ ) -> DuckDBPyRelation: ...
293
+ @pytyping.overload
294
+ def from_parquet(
295
+ self,
296
+ file_globs: Sequence[str],
297
+ binary_as_string: bool = False,
298
+ *,
299
+ file_row_number: bool = False,
300
+ filename: bool = False,
301
+ hive_partitioning: bool = False,
302
+ union_by_name: bool = False,
303
+ compression: str | None = None,
304
+ ) -> DuckDBPyRelation: ...
305
+ def from_query(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
306
+ def get_table_names(self, query: str, *, qualified: bool = False) -> set[str]: ...
307
+ def install_extension(
308
+ self,
309
+ extension: str,
310
+ *,
311
+ force_install: bool = False,
312
+ repository: str | None = None,
313
+ repository_url: str | None = None,
314
+ version: str | None = None,
315
+ ) -> None: ...
316
+ def interrupt(self) -> None: ...
317
+ def list_filesystems(self) -> list[str]: ...
318
+ def list_type(self, type: sqltypes.DuckDBPyType) -> sqltypes.DuckDBPyType: ...
319
+ def load_extension(self, extension: str) -> None: ...
320
+ def map_type(self, key: sqltypes.DuckDBPyType, value: sqltypes.DuckDBPyType) -> sqltypes.DuckDBPyType: ...
321
+ @pytyping.overload
322
+ def pl(
323
+ self, rows_per_batch: pytyping.SupportsInt = 1000000, *, lazy: pytyping.Literal[False] = ...
324
+ ) -> polars.DataFrame: ...
325
+ @pytyping.overload
326
+ def pl(
327
+ self, rows_per_batch: pytyping.SupportsInt = 1000000, *, lazy: pytyping.Literal[True]
328
+ ) -> polars.LazyFrame: ...
329
+ @pytyping.overload
330
+ def pl(
331
+ self, rows_per_batch: pytyping.SupportsInt = 1000000, *, lazy: bool = False
332
+ ) -> pytyping.Union[polars.DataFrame, polars.LazyFrame]: ...
333
+ def query(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
334
+ def query_progress(self) -> float: ...
335
+ def read_csv(
336
+ self,
337
+ path_or_buffer: str | bytes | os.PathLike[str],
338
+ header: bool | int | None = None,
339
+ compression: str | None = None,
340
+ sep: str | None = None,
341
+ delimiter: str | None = None,
342
+ files_to_sniff: int | None = None,
343
+ comment: str | None = None,
344
+ thousands: str | None = None,
345
+ dtype: dict[str, str] | list[str] | None = None,
346
+ na_values: str | list[str] | None = None,
347
+ skiprows: int | None = None,
348
+ quotechar: str | None = None,
349
+ escapechar: str | None = None,
350
+ encoding: str | None = None,
351
+ parallel: bool | None = None,
352
+ date_format: str | None = None,
353
+ timestamp_format: str | None = None,
354
+ sample_size: int | None = None,
355
+ auto_detect: bool | int | None = None,
356
+ all_varchar: bool | None = None,
357
+ normalize_names: bool | None = None,
358
+ null_padding: bool | None = None,
359
+ names: list[str] | None = None,
360
+ lineterminator: str | None = None,
361
+ columns: dict[str, str] | None = None,
362
+ auto_type_candidates: list[str] | None = None,
363
+ max_line_size: int | None = None,
364
+ ignore_errors: bool | None = None,
365
+ store_rejects: bool | None = None,
366
+ rejects_table: str | None = None,
367
+ rejects_scan: str | None = None,
368
+ rejects_limit: int | None = None,
369
+ force_not_null: list[str] | None = None,
370
+ buffer_size: int | None = None,
371
+ decimal: str | None = None,
372
+ allow_quoted_nulls: bool | None = None,
373
+ filename: bool | str | None = None,
374
+ hive_partitioning: bool | None = None,
375
+ union_by_name: bool | None = None,
376
+ hive_types: dict[str, str] | None = None,
377
+ hive_types_autocast: bool | None = None,
378
+ strict_mode: bool | None = None,
379
+ ) -> DuckDBPyRelation: ...
380
+ def read_json(
381
+ self,
382
+ path_or_buffer: str | bytes | os.PathLike[str],
383
+ *,
384
+ columns: dict[str, str] | None = None,
385
+ sample_size: int | None = None,
386
+ maximum_depth: int | None = None,
387
+ records: str | None = None,
388
+ format: str | None = None,
389
+ date_format: str | None = None,
390
+ timestamp_format: str | None = None,
391
+ compression: str | None = None,
392
+ maximum_object_size: int | None = None,
393
+ ignore_errors: bool | None = None,
394
+ convert_strings_to_integers: bool | None = None,
395
+ field_appearance_threshold: float | None = None,
396
+ map_inference_threshold: int | None = None,
397
+ maximum_sample_files: int | None = None,
398
+ filename: bool | str | None = None,
399
+ hive_partitioning: bool | None = None,
400
+ union_by_name: bool | None = None,
401
+ hive_types: dict[str, str] | None = None,
402
+ hive_types_autocast: bool | None = None,
403
+ ) -> DuckDBPyRelation: ...
404
+ @pytyping.overload
405
+ def read_parquet(
406
+ self,
407
+ file_glob: str,
408
+ binary_as_string: bool = False,
409
+ *,
410
+ file_row_number: bool = False,
411
+ filename: bool = False,
412
+ hive_partitioning: bool = False,
413
+ union_by_name: bool = False,
414
+ compression: str | None = None,
415
+ ) -> DuckDBPyRelation: ...
416
+ @pytyping.overload
417
+ def read_parquet(
418
+ self,
419
+ file_globs: Sequence[str],
420
+ binary_as_string: bool = False,
421
+ *,
422
+ file_row_number: bool = False,
423
+ filename: bool = False,
424
+ hive_partitioning: bool = False,
425
+ union_by_name: bool = False,
426
+ compression: pytyping.Any = None,
427
+ ) -> DuckDBPyRelation: ...
428
+ def register(self, view_name: str, python_object: object) -> DuckDBPyConnection: ...
429
+ def register_filesystem(self, filesystem: fsspec.AbstractFileSystem) -> None: ...
430
+ def remove_function(self, name: str) -> DuckDBPyConnection: ...
431
+ def rollback(self) -> DuckDBPyConnection: ...
432
+ def row_type(
433
+ self, fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType]
434
+ ) -> sqltypes.DuckDBPyType: ...
435
+ def sql(self, query: Statement | str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
436
+ def sqltype(self, type_str: str) -> sqltypes.DuckDBPyType: ...
437
+ def string_type(self, collation: str = "") -> sqltypes.DuckDBPyType: ...
438
+ def struct_type(
439
+ self, fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType]
440
+ ) -> sqltypes.DuckDBPyType: ...
441
+ def table(self, table_name: str) -> DuckDBPyRelation: ...
442
+ def table_function(self, name: str, parameters: object = None) -> DuckDBPyRelation: ...
443
+ def tf(self) -> dict[str, tensorflow.Tensor]: ...
444
+ def torch(self) -> dict[str, pytorch.Tensor]: ...
445
+ def type(self, type_str: str) -> sqltypes.DuckDBPyType: ...
446
+ def union_type(
447
+ self, members: list[sqltypes.DuckDBPyType] | dict[str, sqltypes.DuckDBPyType]
448
+ ) -> sqltypes.DuckDBPyType: ...
449
+ def unregister(self, view_name: str) -> DuckDBPyConnection: ...
450
+ def unregister_filesystem(self, name: str) -> None: ...
451
+ def values(self, *args: list[pytyping.Any] | tuple[Expression, ...] | Expression) -> DuckDBPyRelation: ...
452
+ def view(self, view_name: str) -> DuckDBPyRelation: ...
453
+ @property
454
+ def description(self) -> list[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]]: ...
455
+ @property
456
+ def rowcount(self) -> int: ...
457
+
458
+ class DuckDBPyRelation:
459
+ def __arrow_c_stream__(self, requested_schema: object | None = None) -> pytyping.Any: ...
460
+ def __contains__(self, name: str) -> bool: ...
461
+ def __getattr__(self, name: str) -> DuckDBPyRelation: ...
462
+ def __getitem__(self, name: str) -> DuckDBPyRelation: ...
463
+ def __len__(self) -> int: ...
464
+ def aggregate(self, aggr_expr: Expression | str, group_expr: Expression | str = "") -> DuckDBPyRelation: ...
465
+ def any_value(
466
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
467
+ ) -> DuckDBPyRelation: ...
468
+ def apply(
469
+ self,
470
+ function_name: str,
471
+ function_aggr: str,
472
+ group_expr: str = "",
473
+ function_parameter: str = "",
474
+ projected_columns: str = "",
475
+ ) -> DuckDBPyRelation: ...
476
+ def arg_max(
477
+ self, arg_column: str, value_column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
478
+ ) -> DuckDBPyRelation: ...
479
+ def arg_min(
480
+ self, arg_column: str, value_column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
481
+ ) -> DuckDBPyRelation: ...
482
+ def arrow(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
483
+ def avg(
484
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
485
+ ) -> DuckDBPyRelation: ...
486
+ def bit_and(
487
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
488
+ ) -> DuckDBPyRelation: ...
489
+ def bit_or(
490
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
491
+ ) -> DuckDBPyRelation: ...
492
+ def bit_xor(
493
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
494
+ ) -> DuckDBPyRelation: ...
495
+ def bitstring_agg(
496
+ self,
497
+ column: str,
498
+ min: int | None = None,
499
+ max: int | None = None,
500
+ groups: str = "",
501
+ window_spec: str = "",
502
+ projected_columns: str = "",
503
+ ) -> DuckDBPyRelation: ...
504
+ def bool_and(
505
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
506
+ ) -> DuckDBPyRelation: ...
507
+ def bool_or(
508
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
509
+ ) -> DuckDBPyRelation: ...
510
+ def close(self) -> None: ...
511
+ def count(
512
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
513
+ ) -> DuckDBPyRelation: ...
514
+ def create(self, table_name: str) -> None: ...
515
+ def create_view(self, view_name: str, replace: bool = True) -> DuckDBPyRelation: ...
516
+ def cross(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
517
+ def cume_dist(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
518
+ def dense_rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
519
+ def describe(self) -> DuckDBPyRelation: ...
520
+ def df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
521
+ def distinct(self) -> DuckDBPyRelation: ...
522
+ def except_(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
523
+ def execute(self) -> DuckDBPyRelation: ...
524
+ def explain(self, type: ExplainType = ExplainType.STANDARD) -> str: ...
525
+ def favg(
526
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
527
+ ) -> DuckDBPyRelation: ...
528
+ def fetch_arrow_reader(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
529
+ def fetch_arrow_table(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
530
+ def fetch_df_chunk(
531
+ self, vectors_per_chunk: pytyping.SupportsInt = 1, *, date_as_object: bool = False
532
+ ) -> pandas.DataFrame: ...
533
+ def fetch_record_batch(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
534
+ def fetchall(self) -> list[tuple[pytyping.Any, ...]]: ...
535
+ def fetchdf(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
536
+ def fetchmany(self, size: pytyping.SupportsInt = 1) -> list[tuple[pytyping.Any, ...]]: ...
537
+ def fetchnumpy(self) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
538
+ def fetchone(self) -> tuple[pytyping.Any, ...] | None: ...
539
+ def filter(self, filter_expr: Expression | str) -> DuckDBPyRelation: ...
540
+ def first(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
541
+ def first_value(self, column: str, window_spec: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
542
+ def fsum(
543
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
544
+ ) -> DuckDBPyRelation: ...
545
+ def geomean(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
546
+ def histogram(
547
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
548
+ ) -> DuckDBPyRelation: ...
549
+ def insert(self, values: pytyping.List[object]) -> None: ...
550
+ def insert_into(self, table_name: str) -> None: ...
551
+ def intersect(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
552
+ def join(
553
+ self, other_rel: DuckDBPyRelation, condition: Expression | str, how: str = "inner"
554
+ ) -> DuckDBPyRelation: ...
555
+ def lag(
556
+ self,
557
+ column: str,
558
+ window_spec: str,
559
+ offset: pytyping.SupportsInt = 1,
560
+ default_value: str = "NULL",
561
+ ignore_nulls: bool = False,
562
+ projected_columns: str = "",
563
+ ) -> DuckDBPyRelation: ...
564
+ def last(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
565
+ def last_value(self, column: str, window_spec: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
566
+ def lead(
567
+ self,
568
+ column: str,
569
+ window_spec: str,
570
+ offset: pytyping.SupportsInt = 1,
571
+ default_value: str = "NULL",
572
+ ignore_nulls: bool = False,
573
+ projected_columns: str = "",
574
+ ) -> DuckDBPyRelation: ...
575
+ def limit(self, n: pytyping.SupportsInt, offset: pytyping.SupportsInt = 0) -> DuckDBPyRelation: ...
576
+ def list(
577
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
578
+ ) -> DuckDBPyRelation: ...
579
+ def map(
580
+ self, map_function: Callable[..., pytyping.Any], *, schema: dict[str, sqltypes.DuckDBPyType] | None = None
581
+ ) -> DuckDBPyRelation: ...
582
+ def max(
583
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
584
+ ) -> DuckDBPyRelation: ...
585
+ def mean(
586
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
587
+ ) -> DuckDBPyRelation: ...
588
+ def median(
589
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
590
+ ) -> DuckDBPyRelation: ...
591
+ def min(
592
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
593
+ ) -> DuckDBPyRelation: ...
594
+ def mode(
595
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
596
+ ) -> DuckDBPyRelation: ...
597
+ def n_tile(
598
+ self, window_spec: str, num_buckets: pytyping.SupportsInt, projected_columns: str = ""
599
+ ) -> DuckDBPyRelation: ...
600
+ def nth_value(
601
+ self,
602
+ column: str,
603
+ window_spec: str,
604
+ offset: pytyping.SupportsInt,
605
+ ignore_nulls: bool = False,
606
+ projected_columns: str = "",
607
+ ) -> DuckDBPyRelation: ...
608
+ def order(self, order_expr: str) -> DuckDBPyRelation: ...
609
+ def percent_rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
610
+ @pytyping.overload
611
+ def pl(
612
+ self, batch_size: pytyping.SupportsInt = 1000000, *, lazy: pytyping.Literal[False] = ...
613
+ ) -> polars.DataFrame: ...
614
+ @pytyping.overload
615
+ def pl(self, batch_size: pytyping.SupportsInt = 1000000, *, lazy: pytyping.Literal[True]) -> polars.LazyFrame: ...
616
+ @pytyping.overload
617
+ def pl(
618
+ self, batch_size: pytyping.SupportsInt = 1000000, *, lazy: bool = False
619
+ ) -> pytyping.Union[polars.DataFrame, polars.LazyFrame]: ...
620
+ def product(
621
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
622
+ ) -> DuckDBPyRelation: ...
623
+ def project(self, *args: str | Expression, groups: str = "") -> DuckDBPyRelation: ...
624
+ def quantile(
625
+ self,
626
+ column: str,
627
+ q: float | pytyping.List[float] = 0.5,
628
+ groups: str = "",
629
+ window_spec: str = "",
630
+ projected_columns: str = "",
631
+ ) -> DuckDBPyRelation: ...
632
+ def quantile_cont(
633
+ self,
634
+ column: str,
635
+ q: float | pytyping.List[float] = 0.5,
636
+ groups: str = "",
637
+ window_spec: str = "",
638
+ projected_columns: str = "",
639
+ ) -> DuckDBPyRelation: ...
640
+ def quantile_disc(
641
+ self,
642
+ column: str,
643
+ q: float | pytyping.List[float] = 0.5,
644
+ groups: str = "",
645
+ window_spec: str = "",
646
+ projected_columns: str = "",
647
+ ) -> DuckDBPyRelation: ...
648
+ def query(self, virtual_table_name: str, sql_query: str) -> DuckDBPyRelation: ...
649
+ def rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
650
+ def rank_dense(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
651
+ def record_batch(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.RecordBatchReader: ...
652
+ def row_number(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
653
+ def select(self, *args: str | Expression, groups: str = "") -> DuckDBPyRelation: ...
654
+ def select_dtypes(self, types: pytyping.List[sqltypes.DuckDBPyType | str]) -> DuckDBPyRelation: ...
655
+ def select_types(self, types: pytyping.List[sqltypes.DuckDBPyType | str]) -> DuckDBPyRelation: ...
656
+ def set_alias(self, alias: str) -> DuckDBPyRelation: ...
657
+ def show(
658
+ self,
659
+ *,
660
+ max_width: pytyping.SupportsInt | None = None,
661
+ max_rows: pytyping.SupportsInt | None = None,
662
+ max_col_width: pytyping.SupportsInt | None = None,
663
+ null_value: str | None = None,
664
+ render_mode: RenderMode | None = None,
665
+ ) -> None: ...
666
+ def sort(self, *args: Expression) -> DuckDBPyRelation: ...
667
+ def sql_query(self) -> str: ...
668
+ def std(
669
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
670
+ ) -> DuckDBPyRelation: ...
671
+ def stddev(
672
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
673
+ ) -> DuckDBPyRelation: ...
674
+ def stddev_pop(
675
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
676
+ ) -> DuckDBPyRelation: ...
677
+ def stddev_samp(
678
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
679
+ ) -> DuckDBPyRelation: ...
680
+ def string_agg(
681
+ self, column: str, sep: str = ",", groups: str = "", window_spec: str = "", projected_columns: str = ""
682
+ ) -> DuckDBPyRelation: ...
683
+ def sum(
684
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
685
+ ) -> DuckDBPyRelation: ...
686
+ def tf(self) -> dict[str, tensorflow.Tensor]: ...
687
+ def to_arrow_table(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
688
+ def to_csv(
689
+ self,
690
+ file_name: str,
691
+ *,
692
+ sep: str | None = None,
693
+ na_rep: str | None = None,
694
+ header: bool | None = None,
695
+ quotechar: str | None = None,
696
+ escapechar: str | None = None,
697
+ date_format: str | None = None,
698
+ timestamp_format: str | None = None,
699
+ quoting: str | int | None = None,
700
+ encoding: str | None = None,
701
+ compression: str | None = None,
702
+ overwrite: bool | None = None,
703
+ per_thread_output: bool | None = None,
704
+ use_tmp_file: bool | None = None,
705
+ partition_by: pytyping.List[str] | None = None,
706
+ write_partition_columns: bool | None = None,
707
+ ) -> None: ...
708
+ def to_df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
709
+ def to_parquet(
710
+ self,
711
+ file_name: str,
712
+ *,
713
+ compression: str | None = None,
714
+ field_ids: ParquetFieldIdsType | pytyping.Literal["auto"] | None = None,
715
+ row_group_size_bytes: int | str | None = None,
716
+ row_group_size: int | None = None,
717
+ overwrite: bool | None = None,
718
+ per_thread_output: bool | None = None,
719
+ use_tmp_file: bool | None = None,
720
+ partition_by: pytyping.List[str] | None = None,
721
+ write_partition_columns: bool | None = None,
722
+ append: bool | None = None,
723
+ ) -> None: ...
724
+ def to_table(self, table_name: str) -> None: ...
725
+ def to_view(self, view_name: str, replace: bool = True) -> DuckDBPyRelation: ...
726
+ def torch(self) -> dict[str, pytorch.Tensor]: ...
727
+ def union(self, union_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
728
+ def unique(self, unique_aggr: str) -> DuckDBPyRelation: ...
729
+ def update(self, set: Expression | str, *, condition: Expression | str | None = None) -> None: ...
730
+ def value_counts(self, column: str, groups: str = "") -> DuckDBPyRelation: ...
731
+ def var(
732
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
733
+ ) -> DuckDBPyRelation: ...
734
+ def var_pop(
735
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
736
+ ) -> DuckDBPyRelation: ...
737
+ def var_samp(
738
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
739
+ ) -> DuckDBPyRelation: ...
740
+ def variance(
741
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
742
+ ) -> DuckDBPyRelation: ...
743
+ def write_csv(
744
+ self,
745
+ file_name: str,
746
+ sep: str | None = None,
747
+ na_rep: str | None = None,
748
+ header: bool | None = None,
749
+ quotechar: str | None = None,
750
+ escapechar: str | None = None,
751
+ date_format: str | None = None,
752
+ timestamp_format: str | None = None,
753
+ quoting: str | int | None = None,
754
+ encoding: str | None = None,
755
+ compression: str | None = None,
756
+ overwrite: bool | None = None,
757
+ per_thread_output: bool | None = None,
758
+ use_tmp_file: bool | None = None,
759
+ partition_by: pytyping.List[str] | None = None,
760
+ write_partition_columns: bool | None = None,
761
+ ) -> None: ...
762
+ def write_parquet(
763
+ self,
764
+ file_name: str,
765
+ compression: str | None = None,
766
+ field_ids: ParquetFieldIdsType | pytyping.Literal["auto"] | None = None,
767
+ row_group_size_bytes: str | int | None = None,
768
+ row_group_size: int | None = None,
769
+ overwrite: bool | None = None,
770
+ per_thread_output: bool | None = None,
771
+ use_tmp_file: bool | None = None,
772
+ partition_by: pytyping.List[str] | None = None,
773
+ write_partition_columns: bool | None = None,
774
+ append: bool | None = None,
775
+ ) -> None: ...
776
+ @property
777
+ def alias(self) -> str: ...
778
+ @property
779
+ def columns(self) -> pytyping.List[str]: ...
780
+ @property
781
+ def description(self) -> pytyping.List[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]]: ...
782
+ @property
783
+ def dtypes(self) -> pytyping.List[str]: ...
784
+ @property
785
+ def shape(self) -> tuple[int, int]: ...
786
+ @property
787
+ def type(self) -> str: ...
788
+ @property
789
+ def types(self) -> pytyping.List[sqltypes.DuckDBPyType]: ...
790
+
791
+ class Error(Exception): ...
792
+
793
+ class ExpectedResultType:
794
+ CHANGED_ROWS: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.CHANGED_ROWS: 1>
795
+ NOTHING: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.NOTHING: 2>
796
+ QUERY_RESULT: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.QUERY_RESULT: 0>
797
+ __members__: pytyping.ClassVar[
798
+ dict[str, ExpectedResultType]
799
+ ] # value = {'QUERY_RESULT': <ExpectedResultType.QUERY_RESULT: 0>, 'CHANGED_ROWS': <ExpectedResultType.CHANGED_ROWS: 1>, 'NOTHING': <ExpectedResultType.NOTHING: 2>} # noqa: E501
800
+ def __eq__(self, other: object) -> bool: ...
801
+ def __getstate__(self) -> int: ...
802
+ def __hash__(self) -> int: ...
803
+ def __index__(self) -> int: ...
804
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
805
+ def __int__(self) -> int: ...
806
+ def __ne__(self, other: object) -> bool: ...
807
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
808
+ @property
809
+ def name(self) -> str: ...
810
+ @property
811
+ def value(self) -> int: ...
812
+
813
+ class ExplainType:
814
+ ANALYZE: pytyping.ClassVar[ExplainType] # value = <ExplainType.ANALYZE: 1>
815
+ STANDARD: pytyping.ClassVar[ExplainType] # value = <ExplainType.STANDARD: 0>
816
+ __members__: pytyping.ClassVar[
817
+ dict[str, ExplainType]
818
+ ] # value = {'STANDARD': <ExplainType.STANDARD: 0>, 'ANALYZE': <ExplainType.ANALYZE: 1>}
819
+ def __eq__(self, other: object) -> bool: ...
820
+ def __getstate__(self) -> int: ...
821
+ def __hash__(self) -> int: ...
822
+ def __index__(self) -> int: ...
823
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
824
+ def __int__(self) -> int: ...
825
+ def __ne__(self, other: object) -> bool: ...
826
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
827
+ @property
828
+ def name(self) -> str: ...
829
+ @property
830
+ def value(self) -> int: ...
831
+
832
+ class Expression:
833
+ def __add__(self, other: Expression) -> Expression: ...
834
+ def __and__(self, other: Expression) -> Expression: ...
835
+ def __div__(self, other: Expression) -> Expression: ...
836
+ def __eq__(self, other: Expression) -> Expression: ... # type: ignore[override]
837
+ def __floordiv__(self, other: Expression) -> Expression: ...
838
+ def __ge__(self, other: Expression) -> Expression: ...
839
+ def __gt__(self, other: Expression) -> Expression: ...
840
+ @pytyping.overload
841
+ def __init__(self, arg0: str) -> None: ...
842
+ @pytyping.overload
843
+ def __init__(self, arg0: pytyping.Any) -> None: ...
844
+ def __invert__(self) -> Expression: ...
845
+ def __le__(self, other: Expression) -> Expression: ...
846
+ def __lt__(self, other: Expression) -> Expression: ...
847
+ def __mod__(self, other: Expression) -> Expression: ...
848
+ def __mul__(self, other: Expression) -> Expression: ...
849
+ def __ne__(self, other: Expression) -> Expression: ... # type: ignore[override]
850
+ def __neg__(self) -> Expression: ...
851
+ def __or__(self, other: Expression) -> Expression: ...
852
+ def __pow__(self, other: Expression) -> Expression: ...
853
+ def __radd__(self, other: Expression) -> Expression: ...
854
+ def __rand__(self, other: Expression) -> Expression: ...
855
+ def __rdiv__(self, other: Expression) -> Expression: ...
856
+ def __rfloordiv__(self, other: Expression) -> Expression: ...
857
+ def __rmod__(self, other: Expression) -> Expression: ...
858
+ def __rmul__(self, other: Expression) -> Expression: ...
859
+ def __ror__(self, other: Expression) -> Expression: ...
860
+ def __rpow__(self, other: Expression) -> Expression: ...
861
+ def __rsub__(self, other: Expression) -> Expression: ...
862
+ def __rtruediv__(self, other: Expression) -> Expression: ...
863
+ def __sub__(self, other: Expression) -> Expression: ...
864
+ def __truediv__(self, other: Expression) -> Expression: ...
865
+ def alias(self, name: str) -> Expression: ...
866
+ def asc(self) -> Expression: ...
867
+ def between(self, lower: Expression, upper: Expression) -> Expression: ...
868
+ def cast(self, type: sqltypes.DuckDBPyType) -> Expression: ...
869
+ def collate(self, collation: str) -> Expression: ...
870
+ def desc(self) -> Expression: ...
871
+ def get_name(self) -> str: ...
872
+ def isin(self, *args: Expression) -> Expression: ...
873
+ def isnotin(self, *args: Expression) -> Expression: ...
874
+ def isnotnull(self) -> Expression: ...
875
+ def isnull(self) -> Expression: ...
876
+ def nulls_first(self) -> Expression: ...
877
+ def nulls_last(self) -> Expression: ...
878
+ def otherwise(self, value: Expression) -> Expression: ...
879
+ def show(self) -> None: ...
880
+ def when(self, condition: Expression, value: Expression) -> Expression: ...
881
+
882
+ class FatalException(DatabaseError): ...
883
+
884
+ class HTTPException(IOException):
885
+ status_code: int
886
+ body: str
887
+ reason: str
888
+ headers: dict[str, str]
889
+
890
+ class IOException(OperationalError): ...
891
+ class IntegrityError(DatabaseError): ...
892
+ class InternalError(DatabaseError): ...
893
+ class InternalException(InternalError): ...
894
+ class InterruptException(DatabaseError): ...
895
+ class InvalidInputException(ProgrammingError): ...
896
+ class InvalidTypeException(ProgrammingError): ...
897
+ class NotImplementedException(NotSupportedError): ...
898
+ class NotSupportedError(DatabaseError): ...
899
+ class OperationalError(DatabaseError): ...
900
+ class OutOfMemoryException(OperationalError): ...
901
+ class OutOfRangeException(DataError): ...
902
+ class ParserException(ProgrammingError): ...
903
+ class PermissionException(DatabaseError): ...
904
+ class ProgrammingError(DatabaseError): ...
905
+
906
+ class PythonExceptionHandling:
907
+ DEFAULT: pytyping.ClassVar[PythonExceptionHandling] # value = <PythonExceptionHandling.DEFAULT: 0>
908
+ RETURN_NULL: pytyping.ClassVar[PythonExceptionHandling] # value = <PythonExceptionHandling.RETURN_NULL: 1>
909
+ __members__: pytyping.ClassVar[
910
+ dict[str, PythonExceptionHandling]
911
+ ] # value = {'DEFAULT': <PythonExceptionHandling.DEFAULT: 0>, 'RETURN_NULL': <PythonExceptionHandling.RETURN_NULL: 1>} # noqa: E501
912
+ def __eq__(self, other: object) -> bool: ...
913
+ def __getstate__(self) -> int: ...
914
+ def __hash__(self) -> int: ...
915
+ def __index__(self) -> int: ...
916
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
917
+ def __int__(self) -> int: ...
918
+ def __ne__(self, other: object) -> bool: ...
919
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
920
+ @property
921
+ def name(self) -> str: ...
922
+ @property
923
+ def value(self) -> int: ...
924
+
925
+ class RenderMode:
926
+ COLUMNS: pytyping.ClassVar[RenderMode] # value = <RenderMode.COLUMNS: 1>
927
+ ROWS: pytyping.ClassVar[RenderMode] # value = <RenderMode.ROWS: 0>
928
+ __members__: pytyping.ClassVar[
929
+ dict[str, RenderMode]
930
+ ] # value = {'ROWS': <RenderMode.ROWS: 0>, 'COLUMNS': <RenderMode.COLUMNS: 1>}
931
+ def __eq__(self, other: object) -> bool: ...
932
+ def __getstate__(self) -> int: ...
933
+ def __hash__(self) -> int: ...
934
+ def __index__(self) -> int: ...
935
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
936
+ def __int__(self) -> int: ...
937
+ def __ne__(self, other: object) -> bool: ...
938
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
939
+ @property
940
+ def name(self) -> str: ...
941
+ @property
942
+ def value(self) -> int: ...
943
+
944
+ class SequenceException(DatabaseError): ...
945
+ class SerializationException(OperationalError): ...
946
+
947
+ class Statement:
948
+ @property
949
+ def expected_result_type(self) -> list[StatementType]: ...
950
+ @property
951
+ def named_parameters(self) -> set[str]: ...
952
+ @property
953
+ def query(self) -> str: ...
954
+ @property
955
+ def type(self) -> StatementType: ...
956
+
957
+ class StatementType:
958
+ ALTER_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ALTER_STATEMENT: 8>
959
+ ANALYZE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ANALYZE_STATEMENT: 11>
960
+ ATTACH_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ATTACH_STATEMENT: 25>
961
+ CALL_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CALL_STATEMENT: 19>
962
+ COPY_DATABASE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.COPY_DATABASE_STATEMENT: 28>
963
+ COPY_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.COPY_STATEMENT: 10>
964
+ CREATE_FUNC_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CREATE_FUNC_STATEMENT: 13>
965
+ CREATE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CREATE_STATEMENT: 4>
966
+ DELETE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DELETE_STATEMENT: 5>
967
+ DETACH_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DETACH_STATEMENT: 26>
968
+ DROP_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DROP_STATEMENT: 15>
969
+ EXECUTE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXECUTE_STATEMENT: 7>
970
+ EXPLAIN_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXPLAIN_STATEMENT: 14>
971
+ EXPORT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXPORT_STATEMENT: 16>
972
+ EXTENSION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXTENSION_STATEMENT: 23>
973
+ INSERT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.INSERT_STATEMENT: 2>
974
+ INVALID_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.INVALID_STATEMENT: 0>
975
+ LOAD_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.LOAD_STATEMENT: 21>
976
+ LOGICAL_PLAN_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.LOGICAL_PLAN_STATEMENT: 24>
977
+ MERGE_INTO_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.MERGE_INTO_STATEMENT: 30>
978
+ MULTI_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.MULTI_STATEMENT: 27>
979
+ PRAGMA_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.PRAGMA_STATEMENT: 17>
980
+ PREPARE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.PREPARE_STATEMENT: 6>
981
+ RELATION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.RELATION_STATEMENT: 22>
982
+ SELECT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.SELECT_STATEMENT: 1>
983
+ SET_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.SET_STATEMENT: 20>
984
+ TRANSACTION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.TRANSACTION_STATEMENT: 9>
985
+ UPDATE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.UPDATE_STATEMENT: 3>
986
+ VACUUM_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.VACUUM_STATEMENT: 18>
987
+ VARIABLE_SET_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.VARIABLE_SET_STATEMENT: 12>
988
+ __members__: pytyping.ClassVar[
989
+ dict[str, StatementType]
990
+ ] # value = {'INVALID_STATEMENT': <StatementType.INVALID_STATEMENT: 0>, 'SELECT_STATEMENT': <StatementType.SELECT_STATEMENT: 1>, 'INSERT_STATEMENT': <StatementType.INSERT_STATEMENT: 2>, 'UPDATE_STATEMENT': <StatementType.UPDATE_STATEMENT: 3>, 'CREATE_STATEMENT': <StatementType.CREATE_STATEMENT: 4>, 'DELETE_STATEMENT': <StatementType.DELETE_STATEMENT: 5>, 'PREPARE_STATEMENT': <StatementType.PREPARE_STATEMENT: 6>, 'EXECUTE_STATEMENT': <StatementType.EXECUTE_STATEMENT: 7>, 'ALTER_STATEMENT': <StatementType.ALTER_STATEMENT: 8>, 'TRANSACTION_STATEMENT': <StatementType.TRANSACTION_STATEMENT: 9>, 'COPY_STATEMENT': <StatementType.COPY_STATEMENT: 10>, 'ANALYZE_STATEMENT': <StatementType.ANALYZE_STATEMENT: 11>, 'VARIABLE_SET_STATEMENT': <StatementType.VARIABLE_SET_STATEMENT: 12>, 'CREATE_FUNC_STATEMENT': <StatementType.CREATE_FUNC_STATEMENT: 13>, 'EXPLAIN_STATEMENT': <StatementType.EXPLAIN_STATEMENT: 14>, 'DROP_STATEMENT': <StatementType.DROP_STATEMENT: 15>, 'EXPORT_STATEMENT': <StatementType.EXPORT_STATEMENT: 16>, 'PRAGMA_STATEMENT': <StatementType.PRAGMA_STATEMENT: 17>, 'VACUUM_STATEMENT': <StatementType.VACUUM_STATEMENT: 18>, 'CALL_STATEMENT': <StatementType.CALL_STATEMENT: 19>, 'SET_STATEMENT': <StatementType.SET_STATEMENT: 20>, 'LOAD_STATEMENT': <StatementType.LOAD_STATEMENT: 21>, 'RELATION_STATEMENT': <StatementType.RELATION_STATEMENT: 22>, 'EXTENSION_STATEMENT': <StatementType.EXTENSION_STATEMENT: 23>, 'LOGICAL_PLAN_STATEMENT': <StatementType.LOGICAL_PLAN_STATEMENT: 24>, 'ATTACH_STATEMENT': <StatementType.ATTACH_STATEMENT: 25>, 'DETACH_STATEMENT': <StatementType.DETACH_STATEMENT: 26>, 'MULTI_STATEMENT': <StatementType.MULTI_STATEMENT: 27>, 'COPY_DATABASE_STATEMENT': <StatementType.COPY_DATABASE_STATEMENT: 28>, 'MERGE_INTO_STATEMENT': <StatementType.MERGE_INTO_STATEMENT: 30>} # noqa: E501
991
+ def __eq__(self, other: object) -> bool: ...
992
+ def __getstate__(self) -> int: ...
993
+ def __hash__(self) -> int: ...
994
+ def __index__(self) -> int: ...
995
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
996
+ def __int__(self) -> int: ...
997
+ def __ne__(self, other: object) -> bool: ...
998
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
999
+ @property
1000
+ def name(self) -> str: ...
1001
+ @property
1002
+ def value(self) -> int: ...
1003
+
1004
+ class SyntaxException(ProgrammingError): ...
1005
+ class TransactionException(OperationalError): ...
1006
+ class TypeMismatchException(DataError): ...
1007
+ class Warning(Exception): ...
1008
+
1009
+ class token_type:
1010
+ __members__: pytyping.ClassVar[
1011
+ dict[str, token_type]
1012
+ ] # value = {'identifier': <token_type.identifier: 0>, 'numeric_const': <token_type.numeric_const: 1>, 'string_const': <token_type.string_const: 2>, 'operator': <token_type.operator: 3>, 'keyword': <token_type.keyword: 4>, 'comment': <token_type.comment: 5>} # noqa: E501
1013
+ comment: pytyping.ClassVar[token_type] # value = <token_type.comment: 5>
1014
+ identifier: pytyping.ClassVar[token_type] # value = <token_type.identifier: 0>
1015
+ keyword: pytyping.ClassVar[token_type] # value = <token_type.keyword: 4>
1016
+ numeric_const: pytyping.ClassVar[token_type] # value = <token_type.numeric_const: 1>
1017
+ operator: pytyping.ClassVar[token_type] # value = <token_type.operator: 3>
1018
+ string_const: pytyping.ClassVar[token_type] # value = <token_type.string_const: 2>
1019
+ def __eq__(self, other: object) -> bool: ...
1020
+ def __getstate__(self) -> int: ...
1021
+ def __hash__(self) -> int: ...
1022
+ def __index__(self) -> int: ...
1023
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
1024
+ def __int__(self) -> int: ...
1025
+ def __ne__(self, other: object) -> bool: ...
1026
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
1027
+ @property
1028
+ def name(self) -> str: ...
1029
+ @property
1030
+ def value(self) -> int: ...
1031
+
1032
+ def CaseExpression(condition: Expression, value: Expression) -> Expression: ...
1033
+ def CoalesceOperator(*args: Expression) -> Expression: ...
1034
+ def ColumnExpression(*args: str) -> Expression: ...
1035
+ def ConstantExpression(value: Expression | str) -> Expression: ...
1036
+ def DefaultExpression() -> Expression: ...
1037
+ def FunctionExpression(function_name: str, *args: Expression) -> Expression: ...
1038
+ def LambdaExpression(lhs: Expression | str | tuple[str], rhs: Expression) -> Expression: ...
1039
+ def SQLExpression(expression: str) -> Expression: ...
1040
+ @pytyping.overload
1041
+ def StarExpression(*, exclude: Expression | str | tuple[str]) -> Expression: ...
1042
+ @pytyping.overload
1043
+ def StarExpression() -> Expression: ...
1044
+ def aggregate(
1045
+ df: pandas.DataFrame,
1046
+ aggr_expr: Expression | list[Expression] | str | list[str],
1047
+ group_expr: str = "",
1048
+ *,
1049
+ connection: DuckDBPyConnection | None = None,
1050
+ ) -> DuckDBPyRelation: ...
1051
+ def alias(df: pandas.DataFrame, alias: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1052
+ def append(
1053
+ table_name: str, df: pandas.DataFrame, *, by_name: bool = False, connection: DuckDBPyConnection | None = None
1054
+ ) -> DuckDBPyConnection: ...
1055
+ def array_type(
1056
+ type: sqltypes.DuckDBPyType, size: pytyping.SupportsInt, *, connection: DuckDBPyConnection | None = None
1057
+ ) -> sqltypes.DuckDBPyType: ...
1058
+ @pytyping.overload
1059
+ def arrow(
1060
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1061
+ ) -> pyarrow.lib.RecordBatchReader: ...
1062
+ @pytyping.overload
1063
+ def arrow(arrow_object: pytyping.Any, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1064
+ def begin(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1065
+ def checkpoint(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1066
+ def close(*, connection: DuckDBPyConnection | None = None) -> None: ...
1067
+ def commit(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1068
+ def connect(
1069
+ database: str | pathlib.Path = ":memory:",
1070
+ read_only: bool = False,
1071
+ config: dict[str, str | bool | int | float | list[str]] | None = None,
1072
+ ) -> DuckDBPyConnection: ...
1073
+ def create_function(
1074
+ name: str,
1075
+ function: Callable[..., pytyping.Any],
1076
+ parameters: list[sqltypes.DuckDBPyType] | None = None,
1077
+ return_type: sqltypes.DuckDBPyType | None = None,
1078
+ *,
1079
+ type: func.PythonUDFType = ...,
1080
+ null_handling: func.FunctionNullHandling = ...,
1081
+ exception_handling: PythonExceptionHandling = ...,
1082
+ side_effects: bool = False,
1083
+ connection: DuckDBPyConnection | None = None,
1084
+ ) -> DuckDBPyConnection: ...
1085
+ def cursor(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1086
+ def decimal_type(
1087
+ width: pytyping.SupportsInt, scale: pytyping.SupportsInt, *, connection: DuckDBPyConnection | None = None
1088
+ ) -> sqltypes.DuckDBPyType: ...
1089
+ def default_connection() -> DuckDBPyConnection: ...
1090
+ def description(
1091
+ *, connection: DuckDBPyConnection | None = None
1092
+ ) -> list[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]] | None: ...
1093
+ @pytyping.overload
1094
+ def df(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1095
+ @pytyping.overload
1096
+ def df(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1097
+ def distinct(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1098
+ def dtype(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1099
+ def duplicate(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1100
+ def enum_type(
1101
+ name: str,
1102
+ type: sqltypes.DuckDBPyType,
1103
+ values: list[pytyping.Any],
1104
+ *,
1105
+ connection: DuckDBPyConnection | None = None,
1106
+ ) -> sqltypes.DuckDBPyType: ...
1107
+ def execute(
1108
+ query: Statement | str,
1109
+ parameters: object = None,
1110
+ *,
1111
+ connection: DuckDBPyConnection | None = None,
1112
+ ) -> DuckDBPyConnection: ...
1113
+ def executemany(
1114
+ query: Statement | str,
1115
+ parameters: object = None,
1116
+ *,
1117
+ connection: DuckDBPyConnection | None = None,
1118
+ ) -> DuckDBPyConnection: ...
1119
+ def extract_statements(query: str, *, connection: DuckDBPyConnection | None = None) -> list[Statement]: ...
1120
+ def fetch_arrow_table(
1121
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1122
+ ) -> pyarrow.lib.Table: ...
1123
+ def fetch_df(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1124
+ def fetch_df_chunk(
1125
+ vectors_per_chunk: pytyping.SupportsInt = 1,
1126
+ *,
1127
+ date_as_object: bool = False,
1128
+ connection: DuckDBPyConnection | None = None,
1129
+ ) -> pandas.DataFrame: ...
1130
+ def fetch_record_batch(
1131
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1132
+ ) -> pyarrow.lib.RecordBatchReader: ...
1133
+ def fetchall(*, connection: DuckDBPyConnection | None = None) -> list[tuple[pytyping.Any, ...]]: ...
1134
+ def fetchdf(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1135
+ def fetchmany(
1136
+ size: pytyping.SupportsInt = 1, *, connection: DuckDBPyConnection | None = None
1137
+ ) -> list[tuple[pytyping.Any, ...]]: ...
1138
+ def fetchnumpy(
1139
+ *, connection: DuckDBPyConnection | None = None
1140
+ ) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
1141
+ def fetchone(*, connection: DuckDBPyConnection | None = None) -> tuple[pytyping.Any, ...] | None: ...
1142
+ def filesystem_is_registered(name: str, *, connection: DuckDBPyConnection | None = None) -> bool: ...
1143
+ def filter(
1144
+ df: pandas.DataFrame,
1145
+ filter_expr: Expression | str,
1146
+ *,
1147
+ connection: DuckDBPyConnection | None = None,
1148
+ ) -> DuckDBPyRelation: ...
1149
+ def from_arrow(
1150
+ arrow_object: object,
1151
+ *,
1152
+ connection: DuckDBPyConnection | None = None,
1153
+ ) -> DuckDBPyRelation: ...
1154
+ def from_csv_auto(
1155
+ path_or_buffer: str | bytes | os.PathLike[str],
1156
+ header: bool | int | None = None,
1157
+ compression: str | None = None,
1158
+ sep: str | None = None,
1159
+ delimiter: str | None = None,
1160
+ files_to_sniff: int | None = None,
1161
+ comment: str | None = None,
1162
+ thousands: str | None = None,
1163
+ dtype: dict[str, str] | list[str] | None = None,
1164
+ na_values: str | list[str] | None = None,
1165
+ skiprows: int | None = None,
1166
+ quotechar: str | None = None,
1167
+ escapechar: str | None = None,
1168
+ encoding: str | None = None,
1169
+ parallel: bool | None = None,
1170
+ date_format: str | None = None,
1171
+ timestamp_format: str | None = None,
1172
+ sample_size: int | None = None,
1173
+ auto_detect: bool | int | None = None,
1174
+ all_varchar: bool | None = None,
1175
+ normalize_names: bool | None = None,
1176
+ null_padding: bool | None = None,
1177
+ names: list[str] | None = None,
1178
+ lineterminator: str | None = None,
1179
+ columns: dict[str, str] | None = None,
1180
+ auto_type_candidates: list[str] | None = None,
1181
+ max_line_size: int | None = None,
1182
+ ignore_errors: bool | None = None,
1183
+ store_rejects: bool | None = None,
1184
+ rejects_table: str | None = None,
1185
+ rejects_scan: str | None = None,
1186
+ rejects_limit: int | None = None,
1187
+ force_not_null: list[str] | None = None,
1188
+ buffer_size: int | None = None,
1189
+ decimal: str | None = None,
1190
+ allow_quoted_nulls: bool | None = None,
1191
+ filename: bool | str | None = None,
1192
+ hive_partitioning: bool | None = None,
1193
+ union_by_name: bool | None = None,
1194
+ hive_types: dict[str, str] | None = None,
1195
+ hive_types_autocast: bool | None = None,
1196
+ strict_mode: bool | None = None,
1197
+ ) -> DuckDBPyRelation: ...
1198
+ def from_df(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1199
+ @pytyping.overload
1200
+ def from_parquet(
1201
+ file_glob: str,
1202
+ binary_as_string: bool = False,
1203
+ *,
1204
+ file_row_number: bool = False,
1205
+ filename: bool = False,
1206
+ hive_partitioning: bool = False,
1207
+ union_by_name: bool = False,
1208
+ compression: str | None = None,
1209
+ connection: DuckDBPyConnection | None = None,
1210
+ ) -> DuckDBPyRelation: ...
1211
+ @pytyping.overload
1212
+ def from_parquet(
1213
+ file_globs: Sequence[str],
1214
+ binary_as_string: bool = False,
1215
+ *,
1216
+ file_row_number: bool = False,
1217
+ filename: bool = False,
1218
+ hive_partitioning: bool = False,
1219
+ union_by_name: bool = False,
1220
+ compression: pytyping.Any = None,
1221
+ connection: DuckDBPyConnection | None = None,
1222
+ ) -> DuckDBPyRelation: ...
1223
+ def from_query(
1224
+ query: Statement | str,
1225
+ *,
1226
+ alias: str = "",
1227
+ params: object = None,
1228
+ connection: DuckDBPyConnection | None = None,
1229
+ ) -> DuckDBPyRelation: ...
1230
+ def get_table_names(
1231
+ query: str, *, qualified: bool = False, connection: DuckDBPyConnection | None = None
1232
+ ) -> set[str]: ...
1233
+ def install_extension(
1234
+ extension: str,
1235
+ *,
1236
+ force_install: bool = False,
1237
+ repository: str | None = None,
1238
+ repository_url: str | None = None,
1239
+ version: str | None = None,
1240
+ connection: DuckDBPyConnection | None = None,
1241
+ ) -> None: ...
1242
+ def interrupt(*, connection: DuckDBPyConnection | None = None) -> None: ...
1243
+ def limit(
1244
+ df: pandas.DataFrame,
1245
+ n: pytyping.SupportsInt,
1246
+ offset: pytyping.SupportsInt = 0,
1247
+ *,
1248
+ connection: DuckDBPyConnection | None = None,
1249
+ ) -> DuckDBPyRelation: ...
1250
+ def list_filesystems(*, connection: DuckDBPyConnection | None = None) -> list[str]: ...
1251
+ def list_type(
1252
+ type: sqltypes.DuckDBPyType, *, connection: DuckDBPyConnection | None = None
1253
+ ) -> sqltypes.DuckDBPyType: ...
1254
+ def load_extension(extension: str, *, connection: DuckDBPyConnection | None = None) -> None: ...
1255
+ def map_type(
1256
+ key: sqltypes.DuckDBPyType,
1257
+ value: sqltypes.DuckDBPyType,
1258
+ *,
1259
+ connection: DuckDBPyConnection | None = None,
1260
+ ) -> sqltypes.DuckDBPyType: ...
1261
+ def order(
1262
+ df: pandas.DataFrame, order_expr: str, *, connection: DuckDBPyConnection | None = None
1263
+ ) -> DuckDBPyRelation: ...
1264
+ @pytyping.overload
1265
+ def pl(
1266
+ rows_per_batch: pytyping.SupportsInt = 1000000,
1267
+ *,
1268
+ lazy: pytyping.Literal[False] = ...,
1269
+ connection: DuckDBPyConnection | None = None,
1270
+ ) -> polars.DataFrame: ...
1271
+ @pytyping.overload
1272
+ def pl(
1273
+ rows_per_batch: pytyping.SupportsInt = 1000000,
1274
+ *,
1275
+ lazy: pytyping.Literal[True],
1276
+ connection: DuckDBPyConnection | None = None,
1277
+ ) -> polars.LazyFrame: ...
1278
+ @pytyping.overload
1279
+ def pl(
1280
+ rows_per_batch: pytyping.SupportsInt = 1000000,
1281
+ *,
1282
+ lazy: bool = False,
1283
+ connection: DuckDBPyConnection | None = None,
1284
+ ) -> pytyping.Union[polars.DataFrame, polars.LazyFrame]: ...
1285
+ def project(
1286
+ df: pandas.DataFrame, *args: str | Expression, groups: str = "", connection: DuckDBPyConnection | None = None
1287
+ ) -> DuckDBPyRelation: ...
1288
+ def query(
1289
+ query: Statement | str,
1290
+ *,
1291
+ alias: str = "",
1292
+ params: object = None,
1293
+ connection: DuckDBPyConnection | None = None,
1294
+ ) -> DuckDBPyRelation: ...
1295
+ def query_df(
1296
+ df: pandas.DataFrame,
1297
+ virtual_table_name: str,
1298
+ sql_query: str,
1299
+ *,
1300
+ connection: DuckDBPyConnection | None = None,
1301
+ ) -> DuckDBPyRelation: ...
1302
+ def query_progress(*, connection: DuckDBPyConnection | None = None) -> float: ...
1303
+ def read_csv(
1304
+ path_or_buffer: str | bytes | os.PathLike[str],
1305
+ header: bool | int | None = None,
1306
+ compression: str | None = None,
1307
+ sep: str | None = None,
1308
+ delimiter: str | None = None,
1309
+ files_to_sniff: int | None = None,
1310
+ comment: str | None = None,
1311
+ thousands: str | None = None,
1312
+ dtype: dict[str, str] | list[str] | None = None,
1313
+ na_values: str | list[str] | None = None,
1314
+ skiprows: int | None = None,
1315
+ quotechar: str | None = None,
1316
+ escapechar: str | None = None,
1317
+ encoding: str | None = None,
1318
+ parallel: bool | None = None,
1319
+ date_format: str | None = None,
1320
+ timestamp_format: str | None = None,
1321
+ sample_size: int | None = None,
1322
+ auto_detect: bool | int | None = None,
1323
+ all_varchar: bool | None = None,
1324
+ normalize_names: bool | None = None,
1325
+ null_padding: bool | None = None,
1326
+ names: list[str] | None = None,
1327
+ lineterminator: str | None = None,
1328
+ columns: dict[str, str] | None = None,
1329
+ auto_type_candidates: list[str] | None = None,
1330
+ max_line_size: int | None = None,
1331
+ ignore_errors: bool | None = None,
1332
+ store_rejects: bool | None = None,
1333
+ rejects_table: str | None = None,
1334
+ rejects_scan: str | None = None,
1335
+ rejects_limit: int | None = None,
1336
+ force_not_null: list[str] | None = None,
1337
+ buffer_size: int | None = None,
1338
+ decimal: str | None = None,
1339
+ allow_quoted_nulls: bool | None = None,
1340
+ filename: bool | str | None = None,
1341
+ hive_partitioning: bool | None = None,
1342
+ union_by_name: bool | None = None,
1343
+ hive_types: dict[str, str] | None = None,
1344
+ hive_types_autocast: bool | None = None,
1345
+ strict_mode: bool | None = None,
1346
+ ) -> DuckDBPyRelation: ...
1347
+ def read_json(
1348
+ path_or_buffer: str | bytes | os.PathLike[str],
1349
+ *,
1350
+ columns: dict[str, str] | None = None,
1351
+ sample_size: int | None = None,
1352
+ maximum_depth: int | None = None,
1353
+ records: str | None = None,
1354
+ format: str | None = None,
1355
+ date_format: str | None = None,
1356
+ timestamp_format: str | None = None,
1357
+ compression: str | None = None,
1358
+ maximum_object_size: int | None = None,
1359
+ ignore_errors: bool | None = None,
1360
+ convert_strings_to_integers: bool | None = None,
1361
+ field_appearance_threshold: float | None = None,
1362
+ map_inference_threshold: int | None = None,
1363
+ maximum_sample_files: int | None = None,
1364
+ filename: bool | str | None = None,
1365
+ hive_partitioning: bool | None = None,
1366
+ union_by_name: bool | None = None,
1367
+ hive_types: dict[str, str] | None = None,
1368
+ hive_types_autocast: bool | None = None,
1369
+ ) -> DuckDBPyRelation: ...
1370
+ @pytyping.overload
1371
+ def read_parquet(
1372
+ file_glob: str,
1373
+ binary_as_string: bool = False,
1374
+ *,
1375
+ file_row_number: bool = False,
1376
+ filename: bool = False,
1377
+ hive_partitioning: bool = False,
1378
+ union_by_name: bool = False,
1379
+ compression: str | None = None,
1380
+ connection: DuckDBPyConnection | None = None,
1381
+ ) -> DuckDBPyRelation: ...
1382
+ @pytyping.overload
1383
+ def read_parquet(
1384
+ file_globs: Sequence[str],
1385
+ binary_as_string: bool = False,
1386
+ *,
1387
+ file_row_number: bool = False,
1388
+ filename: bool = False,
1389
+ hive_partitioning: bool = False,
1390
+ union_by_name: bool = False,
1391
+ compression: pytyping.Any = None,
1392
+ connection: DuckDBPyConnection | None = None,
1393
+ ) -> DuckDBPyRelation: ...
1394
+ def register(
1395
+ view_name: str,
1396
+ python_object: object,
1397
+ *,
1398
+ connection: DuckDBPyConnection | None = None,
1399
+ ) -> DuckDBPyConnection: ...
1400
+ def register_filesystem(
1401
+ filesystem: fsspec.AbstractFileSystem, *, connection: DuckDBPyConnection | None = None
1402
+ ) -> None: ...
1403
+ def remove_function(name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1404
+ def rollback(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1405
+ def row_type(
1406
+ fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1407
+ *,
1408
+ connection: DuckDBPyConnection | None = None,
1409
+ ) -> sqltypes.DuckDBPyType: ...
1410
+ def rowcount(*, connection: DuckDBPyConnection | None = None) -> int: ...
1411
+ def set_default_connection(connection: DuckDBPyConnection) -> None: ...
1412
+ def sql(
1413
+ query: Statement | str,
1414
+ *,
1415
+ alias: str = "",
1416
+ params: object = None,
1417
+ connection: DuckDBPyConnection | None = None,
1418
+ ) -> DuckDBPyRelation: ...
1419
+ def sqltype(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1420
+ def string_type(collation: str = "", *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1421
+ def struct_type(
1422
+ fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1423
+ *,
1424
+ connection: DuckDBPyConnection | None = None,
1425
+ ) -> sqltypes.DuckDBPyType: ...
1426
+ def table(table_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1427
+ def table_function(
1428
+ name: str,
1429
+ parameters: object = None,
1430
+ *,
1431
+ connection: DuckDBPyConnection | None = None,
1432
+ ) -> DuckDBPyRelation: ...
1433
+ def tf(*, connection: DuckDBPyConnection | None = None) -> dict[str, tensorflow.Tensor]: ...
1434
+ def tokenize(query: str) -> list[tuple[int, token_type]]: ...
1435
+ def torch(*, connection: DuckDBPyConnection | None = None) -> dict[str, pytorch.Tensor]: ...
1436
+ def type(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1437
+ def union_type(
1438
+ members: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1439
+ *,
1440
+ connection: DuckDBPyConnection | None = None,
1441
+ ) -> sqltypes.DuckDBPyType: ...
1442
+ def unregister(view_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1443
+ def unregister_filesystem(name: str, *, connection: DuckDBPyConnection | None = None) -> None: ...
1444
+ def values(
1445
+ *args: list[pytyping.Any] | tuple[Expression, ...] | Expression, connection: DuckDBPyConnection | None = None
1446
+ ) -> DuckDBPyRelation: ...
1447
+ def view(view_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1448
+ def write_csv(
1449
+ df: pandas.DataFrame,
1450
+ filename: str,
1451
+ *,
1452
+ sep: str | None = None,
1453
+ na_rep: str | None = None,
1454
+ header: bool | None = None,
1455
+ quotechar: str | None = None,
1456
+ escapechar: str | None = None,
1457
+ date_format: str | None = None,
1458
+ timestamp_format: str | None = None,
1459
+ quoting: str | int | None = None,
1460
+ encoding: str | None = None,
1461
+ compression: str | None = None,
1462
+ overwrite: bool | None = None,
1463
+ per_thread_output: bool | None = None,
1464
+ use_tmp_file: bool | None = None,
1465
+ partition_by: list[str] | None = None,
1466
+ write_partition_columns: bool | None = None,
1467
+ ) -> None: ...
1468
+
1469
+ __formatted_python_version__: str
1470
+ __git_revision__: str
1471
+ __interactive__: bool
1472
+ __jupyter__: bool
1473
+ __standard_vector_size__: int
1474
+ __version__: str
1475
+ _clean_default_connection: pytyping.Any # value = <capsule object>
1476
+ apilevel: str
1477
+ paramstyle: str
1478
+ threadsafety: int