duckdb 1.4.4.dev11__cp314-cp314-macosx_10_15_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. _duckdb-stubs/__init__.pyi +1482 -0
  2. _duckdb-stubs/_func.pyi +46 -0
  3. _duckdb-stubs/_sqltypes.pyi +75 -0
  4. _duckdb.cpython-314-darwin.so +0 -0
  5. adbc_driver_duckdb/__init__.py +49 -0
  6. adbc_driver_duckdb/dbapi.py +115 -0
  7. duckdb/__init__.py +381 -0
  8. duckdb/_dbapi_type_object.py +231 -0
  9. duckdb/_version.py +22 -0
  10. duckdb/bytes_io_wrapper.py +69 -0
  11. duckdb/experimental/__init__.py +5 -0
  12. duckdb/experimental/spark/LICENSE +260 -0
  13. duckdb/experimental/spark/__init__.py +6 -0
  14. duckdb/experimental/spark/_globals.py +77 -0
  15. duckdb/experimental/spark/_typing.py +46 -0
  16. duckdb/experimental/spark/conf.py +46 -0
  17. duckdb/experimental/spark/context.py +180 -0
  18. duckdb/experimental/spark/errors/__init__.py +70 -0
  19. duckdb/experimental/spark/errors/error_classes.py +918 -0
  20. duckdb/experimental/spark/errors/exceptions/__init__.py +16 -0
  21. duckdb/experimental/spark/errors/exceptions/base.py +168 -0
  22. duckdb/experimental/spark/errors/utils.py +111 -0
  23. duckdb/experimental/spark/exception.py +18 -0
  24. duckdb/experimental/spark/sql/__init__.py +7 -0
  25. duckdb/experimental/spark/sql/_typing.py +86 -0
  26. duckdb/experimental/spark/sql/catalog.py +79 -0
  27. duckdb/experimental/spark/sql/column.py +361 -0
  28. duckdb/experimental/spark/sql/conf.py +24 -0
  29. duckdb/experimental/spark/sql/dataframe.py +1423 -0
  30. duckdb/experimental/spark/sql/functions.py +6216 -0
  31. duckdb/experimental/spark/sql/group.py +424 -0
  32. duckdb/experimental/spark/sql/readwriter.py +435 -0
  33. duckdb/experimental/spark/sql/session.py +297 -0
  34. duckdb/experimental/spark/sql/streaming.py +36 -0
  35. duckdb/experimental/spark/sql/type_utils.py +113 -0
  36. duckdb/experimental/spark/sql/types.py +1310 -0
  37. duckdb/experimental/spark/sql/udf.py +37 -0
  38. duckdb/filesystem.py +33 -0
  39. duckdb/func/__init__.py +3 -0
  40. duckdb/functional/__init__.py +13 -0
  41. duckdb/polars_io.py +284 -0
  42. duckdb/py.typed +0 -0
  43. duckdb/query_graph/__main__.py +358 -0
  44. duckdb/sqltypes/__init__.py +63 -0
  45. duckdb/typing/__init__.py +71 -0
  46. duckdb/udf.py +24 -0
  47. duckdb/value/__init__.py +1 -0
  48. duckdb/value/constant/__init__.py +270 -0
  49. duckdb-1.4.4.dev11.dist-info/METADATA +88 -0
  50. duckdb-1.4.4.dev11.dist-info/RECORD +52 -0
  51. duckdb-1.4.4.dev11.dist-info/WHEEL +6 -0
  52. duckdb-1.4.4.dev11.dist-info/licenses/LICENSE +7 -0
@@ -0,0 +1,1482 @@
1
+ import os
2
+ import pathlib
3
+ import typing as pytyping
4
+ from typing_extensions import Self
5
+
6
+ if pytyping.TYPE_CHECKING:
7
+ import fsspec
8
+ import numpy as np
9
+ import polars
10
+ import pandas
11
+ import pyarrow.lib
12
+ import torch as pytorch
13
+ import tensorflow
14
+ from collections.abc import Callable, Sequence, Mapping
15
+ from duckdb import sqltypes, func
16
+
17
+ # the field_ids argument to to_parquet and write_parquet has a recursive structure
18
+ ParquetFieldIdsType = Mapping[str, pytyping.Union[int, "ParquetFieldIdsType"]]
19
+
20
+ __all__: list[str] = [
21
+ "BinderException",
22
+ "CSVLineTerminator",
23
+ "CaseExpression",
24
+ "CatalogException",
25
+ "CoalesceOperator",
26
+ "ColumnExpression",
27
+ "ConnectionException",
28
+ "ConstantExpression",
29
+ "ConstraintException",
30
+ "ConversionException",
31
+ "DataError",
32
+ "DatabaseError",
33
+ "DefaultExpression",
34
+ "DependencyException",
35
+ "DuckDBPyConnection",
36
+ "DuckDBPyRelation",
37
+ "Error",
38
+ "ExpectedResultType",
39
+ "ExplainType",
40
+ "Expression",
41
+ "FatalException",
42
+ "FunctionExpression",
43
+ "HTTPException",
44
+ "IOException",
45
+ "IntegrityError",
46
+ "InternalError",
47
+ "InternalException",
48
+ "InterruptException",
49
+ "InvalidInputException",
50
+ "InvalidTypeException",
51
+ "LambdaExpression",
52
+ "NotImplementedException",
53
+ "NotSupportedError",
54
+ "OperationalError",
55
+ "OutOfMemoryException",
56
+ "OutOfRangeException",
57
+ "ParserException",
58
+ "PermissionException",
59
+ "ProgrammingError",
60
+ "PythonExceptionHandling",
61
+ "RenderMode",
62
+ "SQLExpression",
63
+ "SequenceException",
64
+ "SerializationException",
65
+ "StarExpression",
66
+ "Statement",
67
+ "StatementType",
68
+ "SyntaxException",
69
+ "TransactionException",
70
+ "TypeMismatchException",
71
+ "Warning",
72
+ "aggregate",
73
+ "alias",
74
+ "apilevel",
75
+ "append",
76
+ "array_type",
77
+ "arrow",
78
+ "begin",
79
+ "checkpoint",
80
+ "close",
81
+ "commit",
82
+ "connect",
83
+ "create_function",
84
+ "cursor",
85
+ "decimal_type",
86
+ "default_connection",
87
+ "description",
88
+ "df",
89
+ "distinct",
90
+ "dtype",
91
+ "duplicate",
92
+ "enum_type",
93
+ "execute",
94
+ "executemany",
95
+ "extract_statements",
96
+ "fetch_arrow_table",
97
+ "fetch_df",
98
+ "fetch_df_chunk",
99
+ "fetch_record_batch",
100
+ "fetchall",
101
+ "fetchdf",
102
+ "fetchmany",
103
+ "fetchnumpy",
104
+ "fetchone",
105
+ "filesystem_is_registered",
106
+ "filter",
107
+ "from_arrow",
108
+ "from_csv_auto",
109
+ "from_df",
110
+ "from_parquet",
111
+ "from_query",
112
+ "get_table_names",
113
+ "install_extension",
114
+ "interrupt",
115
+ "limit",
116
+ "list_filesystems",
117
+ "list_type",
118
+ "load_extension",
119
+ "map_type",
120
+ "order",
121
+ "paramstyle",
122
+ "pl",
123
+ "project",
124
+ "query",
125
+ "query_df",
126
+ "query_progress",
127
+ "read_csv",
128
+ "read_json",
129
+ "read_parquet",
130
+ "register",
131
+ "register_filesystem",
132
+ "remove_function",
133
+ "rollback",
134
+ "row_type",
135
+ "rowcount",
136
+ "set_default_connection",
137
+ "sql",
138
+ "sqltype",
139
+ "string_type",
140
+ "struct_type",
141
+ "table",
142
+ "table_function",
143
+ "tf",
144
+ "threadsafety",
145
+ "token_type",
146
+ "tokenize",
147
+ "torch",
148
+ "type",
149
+ "union_type",
150
+ "unregister",
151
+ "unregister_filesystem",
152
+ "values",
153
+ "view",
154
+ "write_csv",
155
+ ]
156
+
157
+ class BinderException(ProgrammingError): ...
158
+
159
+ class CSVLineTerminator:
160
+ CARRIAGE_RETURN_LINE_FEED: pytyping.ClassVar[
161
+ CSVLineTerminator
162
+ ] # value = <CSVLineTerminator.CARRIAGE_RETURN_LINE_FEED: 1>
163
+ LINE_FEED: pytyping.ClassVar[CSVLineTerminator] # value = <CSVLineTerminator.LINE_FEED: 0>
164
+ __members__: pytyping.ClassVar[
165
+ dict[str, CSVLineTerminator]
166
+ ] # value = {'LINE_FEED': <CSVLineTerminator.LINE_FEED: 0>, 'CARRIAGE_RETURN_LINE_FEED': <CSVLineTerminator.CARRIAGE_RETURN_LINE_FEED: 1>} # noqa: E501
167
+ def __eq__(self, other: object) -> bool: ...
168
+ def __getstate__(self) -> int: ...
169
+ def __hash__(self) -> int: ...
170
+ def __index__(self) -> int: ...
171
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
172
+ def __int__(self) -> int: ...
173
+ def __ne__(self, other: object) -> bool: ...
174
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
175
+ @property
176
+ def name(self) -> str: ...
177
+ @property
178
+ def value(self) -> int: ...
179
+
180
+ class CatalogException(ProgrammingError): ...
181
+ class ConnectionException(OperationalError): ...
182
+ class ConstraintException(IntegrityError): ...
183
+ class ConversionException(DataError): ...
184
+ class DataError(DatabaseError): ...
185
+ class DatabaseError(Error): ...
186
+ class DependencyException(DatabaseError): ...
187
+
188
+ class DuckDBPyConnection:
189
+ def __del__(self) -> None: ...
190
+ def __enter__(self) -> Self: ...
191
+ def __exit__(self, exc_type: object, exc: object, traceback: object) -> None: ...
192
+ def append(self, table_name: str, df: pandas.DataFrame, *, by_name: bool = False) -> DuckDBPyConnection: ...
193
+ def array_type(self, type: sqltypes.DuckDBPyType, size: pytyping.SupportsInt) -> sqltypes.DuckDBPyType: ...
194
+ def arrow(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
195
+ def begin(self) -> DuckDBPyConnection: ...
196
+ def checkpoint(self) -> DuckDBPyConnection: ...
197
+ def close(self) -> None: ...
198
+ def commit(self) -> DuckDBPyConnection: ...
199
+ def create_function(
200
+ self,
201
+ name: str,
202
+ function: Callable[..., pytyping.Any],
203
+ parameters: list[sqltypes.DuckDBPyType] | None = None,
204
+ return_type: sqltypes.DuckDBPyType | None = None,
205
+ *,
206
+ type: func.PythonUDFType = ...,
207
+ null_handling: func.FunctionNullHandling = ...,
208
+ exception_handling: PythonExceptionHandling = ...,
209
+ side_effects: bool = False,
210
+ ) -> DuckDBPyConnection: ...
211
+ def cursor(self) -> DuckDBPyConnection: ...
212
+ def decimal_type(self, width: pytyping.SupportsInt, scale: pytyping.SupportsInt) -> sqltypes.DuckDBPyType: ...
213
+ def df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
214
+ def dtype(self, type_str: str) -> sqltypes.DuckDBPyType: ...
215
+ def duplicate(self) -> DuckDBPyConnection: ...
216
+ def enum_type(
217
+ self, name: str, type: sqltypes.DuckDBPyType, values: list[pytyping.Any]
218
+ ) -> sqltypes.DuckDBPyType: ...
219
+ def execute(self, query: Statement | str, parameters: object = None) -> DuckDBPyConnection: ...
220
+ def executemany(self, query: Statement | str, parameters: object = None) -> DuckDBPyConnection: ...
221
+ def extract_statements(self, query: str) -> list[Statement]: ...
222
+ def fetch_arrow_table(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
223
+ def fetch_df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
224
+ def fetch_df_chunk(
225
+ self, vectors_per_chunk: pytyping.SupportsInt = 1, *, date_as_object: bool = False
226
+ ) -> pandas.DataFrame: ...
227
+ def fetch_record_batch(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
228
+ def fetchall(self) -> list[tuple[pytyping.Any, ...]]: ...
229
+ def fetchdf(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
230
+ def fetchmany(self, size: pytyping.SupportsInt = 1) -> list[tuple[pytyping.Any, ...]]: ...
231
+ def fetchnumpy(self) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
232
+ def fetchone(self) -> tuple[pytyping.Any, ...] | None: ...
233
+ def filesystem_is_registered(self, name: str) -> bool: ...
234
+ def from_arrow(self, arrow_object: object) -> DuckDBPyRelation: ...
235
+ def from_csv_auto(
236
+ self,
237
+ path_or_buffer: str | bytes | os.PathLike[str] | os.PathLike[bytes],
238
+ header: bool | int | None = None,
239
+ compression: str | None = None,
240
+ sep: str | None = None,
241
+ delimiter: str | None = None,
242
+ files_to_sniff: int | None = None,
243
+ comment: str | None = None,
244
+ thousands: str | None = None,
245
+ dtype: dict[str, str] | list[str] | None = None,
246
+ na_values: str | list[str] | None = None,
247
+ skiprows: int | None = None,
248
+ quotechar: str | None = None,
249
+ escapechar: str | None = None,
250
+ encoding: str | None = None,
251
+ parallel: bool | None = None,
252
+ date_format: str | None = None,
253
+ timestamp_format: str | None = None,
254
+ sample_size: int | None = None,
255
+ auto_detect: bool | int | None = None,
256
+ all_varchar: bool | None = None,
257
+ normalize_names: bool | None = None,
258
+ null_padding: bool | None = None,
259
+ names: list[str] | None = None,
260
+ lineterminator: str | None = None,
261
+ columns: dict[str, str] | None = None,
262
+ auto_type_candidates: list[str] | None = None,
263
+ max_line_size: int | None = None,
264
+ ignore_errors: bool | None = None,
265
+ store_rejects: bool | None = None,
266
+ rejects_table: str | None = None,
267
+ rejects_scan: str | None = None,
268
+ rejects_limit: int | None = None,
269
+ force_not_null: list[str] | None = None,
270
+ buffer_size: int | None = None,
271
+ decimal: str | None = None,
272
+ allow_quoted_nulls: bool | None = None,
273
+ filename: bool | str | None = None,
274
+ hive_partitioning: bool | None = None,
275
+ union_by_name: bool | None = None,
276
+ hive_types: dict[str, str] | None = None,
277
+ hive_types_autocast: bool | None = None,
278
+ strict_mode: bool | None = None,
279
+ ) -> DuckDBPyRelation: ...
280
+ def from_df(self, df: pandas.DataFrame) -> DuckDBPyRelation: ...
281
+ @pytyping.overload
282
+ def from_parquet(
283
+ self,
284
+ file_glob: str,
285
+ binary_as_string: bool = False,
286
+ *,
287
+ file_row_number: bool = False,
288
+ filename: bool = False,
289
+ hive_partitioning: bool = False,
290
+ union_by_name: bool = False,
291
+ compression: str | None = None,
292
+ ) -> DuckDBPyRelation: ...
293
+ @pytyping.overload
294
+ def from_parquet(
295
+ self,
296
+ file_globs: Sequence[str],
297
+ binary_as_string: bool = False,
298
+ *,
299
+ file_row_number: bool = False,
300
+ filename: bool = False,
301
+ hive_partitioning: bool = False,
302
+ union_by_name: bool = False,
303
+ compression: str | None = None,
304
+ ) -> DuckDBPyRelation: ...
305
+ def from_query(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
306
+ def get_table_names(self, query: str, *, qualified: bool = False) -> set[str]: ...
307
+ def install_extension(
308
+ self,
309
+ extension: str,
310
+ *,
311
+ force_install: bool = False,
312
+ repository: str | None = None,
313
+ repository_url: str | None = None,
314
+ version: str | None = None,
315
+ ) -> None: ...
316
+ def interrupt(self) -> None: ...
317
+ def list_filesystems(self) -> list[str]: ...
318
+ def list_type(self, type: sqltypes.DuckDBPyType) -> sqltypes.DuckDBPyType: ...
319
+ def load_extension(self, extension: str) -> None: ...
320
+ def map_type(self, key: sqltypes.DuckDBPyType, value: sqltypes.DuckDBPyType) -> sqltypes.DuckDBPyType: ...
321
+ @pytyping.overload
322
+ def pl(
323
+ self, rows_per_batch: pytyping.SupportsInt = 1000000, *, lazy: pytyping.Literal[False] = ...
324
+ ) -> polars.DataFrame: ...
325
+ @pytyping.overload
326
+ def pl(
327
+ self, rows_per_batch: pytyping.SupportsInt = 1000000, *, lazy: pytyping.Literal[True]
328
+ ) -> polars.LazyFrame: ...
329
+ @pytyping.overload
330
+ def pl(
331
+ self, rows_per_batch: pytyping.SupportsInt = 1000000, *, lazy: bool = False
332
+ ) -> pytyping.Union[polars.DataFrame, polars.LazyFrame]: ...
333
+ def query(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
334
+ def query_progress(self) -> float: ...
335
+ def read_csv(
336
+ self,
337
+ path_or_buffer: str | bytes | os.PathLike[str],
338
+ header: bool | int | None = None,
339
+ compression: str | None = None,
340
+ sep: str | None = None,
341
+ delimiter: str | None = None,
342
+ files_to_sniff: int | None = None,
343
+ comment: str | None = None,
344
+ thousands: str | None = None,
345
+ dtype: dict[str, str] | list[str] | None = None,
346
+ na_values: str | list[str] | None = None,
347
+ skiprows: int | None = None,
348
+ quotechar: str | None = None,
349
+ escapechar: str | None = None,
350
+ encoding: str | None = None,
351
+ parallel: bool | None = None,
352
+ date_format: str | None = None,
353
+ timestamp_format: str | None = None,
354
+ sample_size: int | None = None,
355
+ auto_detect: bool | int | None = None,
356
+ all_varchar: bool | None = None,
357
+ normalize_names: bool | None = None,
358
+ null_padding: bool | None = None,
359
+ names: list[str] | None = None,
360
+ lineterminator: str | None = None,
361
+ columns: dict[str, str] | None = None,
362
+ auto_type_candidates: list[str] | None = None,
363
+ max_line_size: int | None = None,
364
+ ignore_errors: bool | None = None,
365
+ store_rejects: bool | None = None,
366
+ rejects_table: str | None = None,
367
+ rejects_scan: str | None = None,
368
+ rejects_limit: int | None = None,
369
+ force_not_null: list[str] | None = None,
370
+ buffer_size: int | None = None,
371
+ decimal: str | None = None,
372
+ allow_quoted_nulls: bool | None = None,
373
+ filename: bool | str | None = None,
374
+ hive_partitioning: bool | None = None,
375
+ union_by_name: bool | None = None,
376
+ hive_types: dict[str, str] | None = None,
377
+ hive_types_autocast: bool | None = None,
378
+ strict_mode: bool | None = None,
379
+ ) -> DuckDBPyRelation: ...
380
+ def read_json(
381
+ self,
382
+ path_or_buffer: str | bytes | os.PathLike[str],
383
+ *,
384
+ columns: dict[str, str] | None = None,
385
+ sample_size: int | None = None,
386
+ maximum_depth: int | None = None,
387
+ records: str | None = None,
388
+ format: str | None = None,
389
+ date_format: str | None = None,
390
+ timestamp_format: str | None = None,
391
+ compression: str | None = None,
392
+ maximum_object_size: int | None = None,
393
+ ignore_errors: bool | None = None,
394
+ convert_strings_to_integers: bool | None = None,
395
+ field_appearance_threshold: float | None = None,
396
+ map_inference_threshold: int | None = None,
397
+ maximum_sample_files: int | None = None,
398
+ filename: bool | str | None = None,
399
+ hive_partitioning: bool | None = None,
400
+ union_by_name: bool | None = None,
401
+ hive_types: dict[str, str] | None = None,
402
+ hive_types_autocast: bool | None = None,
403
+ ) -> DuckDBPyRelation: ...
404
+ @pytyping.overload
405
+ def read_parquet(
406
+ self,
407
+ file_glob: str,
408
+ binary_as_string: bool = False,
409
+ *,
410
+ file_row_number: bool = False,
411
+ filename: bool = False,
412
+ hive_partitioning: bool = False,
413
+ union_by_name: bool = False,
414
+ compression: str | None = None,
415
+ ) -> DuckDBPyRelation: ...
416
+ @pytyping.overload
417
+ def read_parquet(
418
+ self,
419
+ file_globs: Sequence[str],
420
+ binary_as_string: bool = False,
421
+ *,
422
+ file_row_number: bool = False,
423
+ filename: bool = False,
424
+ hive_partitioning: bool = False,
425
+ union_by_name: bool = False,
426
+ compression: pytyping.Any = None,
427
+ ) -> DuckDBPyRelation: ...
428
+ def register(self, view_name: str, python_object: object) -> DuckDBPyConnection: ...
429
+ def register_filesystem(self, filesystem: fsspec.AbstractFileSystem) -> None: ...
430
+ def remove_function(self, name: str) -> DuckDBPyConnection: ...
431
+ def rollback(self) -> DuckDBPyConnection: ...
432
+ def row_type(
433
+ self, fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType]
434
+ ) -> sqltypes.DuckDBPyType: ...
435
+ def sql(self, query: Statement | str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
436
+ def sqltype(self, type_str: str) -> sqltypes.DuckDBPyType: ...
437
+ def string_type(self, collation: str = "") -> sqltypes.DuckDBPyType: ...
438
+ def struct_type(
439
+ self, fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType]
440
+ ) -> sqltypes.DuckDBPyType: ...
441
+ def table(self, table_name: str) -> DuckDBPyRelation: ...
442
+ def table_function(self, name: str, parameters: object = None) -> DuckDBPyRelation: ...
443
+ def tf(self) -> dict[str, tensorflow.Tensor]: ...
444
+ def torch(self) -> dict[str, pytorch.Tensor]: ...
445
+ def type(self, type_str: str) -> sqltypes.DuckDBPyType: ...
446
+ def union_type(
447
+ self, members: list[sqltypes.DuckDBPyType] | dict[str, sqltypes.DuckDBPyType]
448
+ ) -> sqltypes.DuckDBPyType: ...
449
+ def unregister(self, view_name: str) -> DuckDBPyConnection: ...
450
+ def unregister_filesystem(self, name: str) -> None: ...
451
+ def values(self, *args: list[pytyping.Any] | tuple[Expression, ...] | Expression) -> DuckDBPyRelation: ...
452
+ def view(self, view_name: str) -> DuckDBPyRelation: ...
453
+ @property
454
+ def description(self) -> list[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]]: ...
455
+ @property
456
+ def rowcount(self) -> int: ...
457
+
458
+ class DuckDBPyRelation:
459
+ def __arrow_c_stream__(self, requested_schema: object | None = None) -> pytyping.Any: ...
460
+ def __contains__(self, name: str) -> bool: ...
461
+ def __getattr__(self, name: str) -> DuckDBPyRelation: ...
462
+ def __getitem__(self, name: str) -> DuckDBPyRelation: ...
463
+ def __len__(self) -> int: ...
464
+ def aggregate(self, aggr_expr: Expression | str, group_expr: Expression | str = "") -> DuckDBPyRelation: ...
465
+ def any_value(
466
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
467
+ ) -> DuckDBPyRelation: ...
468
+ def apply(
469
+ self,
470
+ function_name: str,
471
+ function_aggr: str,
472
+ group_expr: str = "",
473
+ function_parameter: str = "",
474
+ projected_columns: str = "",
475
+ ) -> DuckDBPyRelation: ...
476
+ def arg_max(
477
+ self, arg_column: str, value_column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
478
+ ) -> DuckDBPyRelation: ...
479
+ def arg_min(
480
+ self, arg_column: str, value_column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
481
+ ) -> DuckDBPyRelation: ...
482
+ def arrow(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
483
+ def avg(
484
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
485
+ ) -> DuckDBPyRelation: ...
486
+ def bit_and(
487
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
488
+ ) -> DuckDBPyRelation: ...
489
+ def bit_or(
490
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
491
+ ) -> DuckDBPyRelation: ...
492
+ def bit_xor(
493
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
494
+ ) -> DuckDBPyRelation: ...
495
+ def bitstring_agg(
496
+ self,
497
+ column: str,
498
+ min: int | None = None,
499
+ max: int | None = None,
500
+ groups: str = "",
501
+ window_spec: str = "",
502
+ projected_columns: str = "",
503
+ ) -> DuckDBPyRelation: ...
504
+ def bool_and(
505
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
506
+ ) -> DuckDBPyRelation: ...
507
+ def bool_or(
508
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
509
+ ) -> DuckDBPyRelation: ...
510
+ def close(self) -> None: ...
511
+ def count(
512
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
513
+ ) -> DuckDBPyRelation: ...
514
+ def create(self, table_name: str) -> None: ...
515
+ def create_view(self, view_name: str, replace: bool = True) -> DuckDBPyRelation: ...
516
+ def cross(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
517
+ def cume_dist(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
518
+ def dense_rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
519
+ def describe(self) -> DuckDBPyRelation: ...
520
+ def df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
521
+ def distinct(self) -> DuckDBPyRelation: ...
522
+ def except_(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
523
+ def execute(self) -> DuckDBPyRelation: ...
524
+ def explain(self, type: ExplainType = ExplainType.STANDARD) -> str: ...
525
+ def favg(
526
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
527
+ ) -> DuckDBPyRelation: ...
528
+ def fetch_arrow_reader(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
529
+ def fetch_arrow_table(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
530
+ def fetch_df_chunk(
531
+ self, vectors_per_chunk: pytyping.SupportsInt = 1, *, date_as_object: bool = False
532
+ ) -> pandas.DataFrame: ...
533
+ def fetch_record_batch(self, rows_per_batch: pytyping.SupportsInt = 1000000) -> pyarrow.lib.RecordBatchReader: ...
534
+ def fetchall(self) -> list[tuple[pytyping.Any, ...]]: ...
535
+ def fetchdf(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
536
+ def fetchmany(self, size: pytyping.SupportsInt = 1) -> list[tuple[pytyping.Any, ...]]: ...
537
+ def fetchnumpy(self) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
538
+ def fetchone(self) -> tuple[pytyping.Any, ...] | None: ...
539
+ def filter(self, filter_expr: Expression | str) -> DuckDBPyRelation: ...
540
+ def first(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
541
+ def first_value(self, column: str, window_spec: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
542
+ def fsum(
543
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
544
+ ) -> DuckDBPyRelation: ...
545
+ def geomean(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
546
+ def histogram(
547
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
548
+ ) -> DuckDBPyRelation: ...
549
+ def insert(self, values: pytyping.List[object]) -> None: ...
550
+ def insert_into(self, table_name: str) -> None: ...
551
+ def intersect(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
552
+ def join(
553
+ self, other_rel: DuckDBPyRelation, condition: Expression | str, how: str = "inner"
554
+ ) -> DuckDBPyRelation: ...
555
+ def lag(
556
+ self,
557
+ column: str,
558
+ window_spec: str,
559
+ offset: pytyping.SupportsInt = 1,
560
+ default_value: str = "NULL",
561
+ ignore_nulls: bool = False,
562
+ projected_columns: str = "",
563
+ ) -> DuckDBPyRelation: ...
564
+ def last(self, column: str, groups: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
565
+ def last_value(self, column: str, window_spec: str = "", projected_columns: str = "") -> DuckDBPyRelation: ...
566
+ def lead(
567
+ self,
568
+ column: str,
569
+ window_spec: str,
570
+ offset: pytyping.SupportsInt = 1,
571
+ default_value: str = "NULL",
572
+ ignore_nulls: bool = False,
573
+ projected_columns: str = "",
574
+ ) -> DuckDBPyRelation: ...
575
+ def limit(self, n: pytyping.SupportsInt, offset: pytyping.SupportsInt = 0) -> DuckDBPyRelation: ...
576
+ def list(
577
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
578
+ ) -> DuckDBPyRelation: ...
579
+ def map(
580
+ self, map_function: Callable[..., pytyping.Any], *, schema: dict[str, sqltypes.DuckDBPyType] | None = None
581
+ ) -> DuckDBPyRelation: ...
582
+ def max(
583
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
584
+ ) -> DuckDBPyRelation: ...
585
+ def mean(
586
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
587
+ ) -> DuckDBPyRelation: ...
588
+ def median(
589
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
590
+ ) -> DuckDBPyRelation: ...
591
+ def min(
592
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
593
+ ) -> DuckDBPyRelation: ...
594
+ def mode(
595
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
596
+ ) -> DuckDBPyRelation: ...
597
+ def n_tile(
598
+ self, window_spec: str, num_buckets: pytyping.SupportsInt, projected_columns: str = ""
599
+ ) -> DuckDBPyRelation: ...
600
+ def nth_value(
601
+ self,
602
+ column: str,
603
+ window_spec: str,
604
+ offset: pytyping.SupportsInt,
605
+ ignore_nulls: bool = False,
606
+ projected_columns: str = "",
607
+ ) -> DuckDBPyRelation: ...
608
+ def order(self, order_expr: str) -> DuckDBPyRelation: ...
609
+ def percent_rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
610
+ @pytyping.overload
611
+ def pl(
612
+ self, batch_size: pytyping.SupportsInt = 1000000, *, lazy: pytyping.Literal[False] = ...
613
+ ) -> polars.DataFrame: ...
614
+ @pytyping.overload
615
+ def pl(self, batch_size: pytyping.SupportsInt = 1000000, *, lazy: pytyping.Literal[True]) -> polars.LazyFrame: ...
616
+ @pytyping.overload
617
+ def pl(
618
+ self, batch_size: pytyping.SupportsInt = 1000000, *, lazy: bool = False
619
+ ) -> pytyping.Union[polars.DataFrame, polars.LazyFrame]: ...
620
+ def product(
621
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
622
+ ) -> DuckDBPyRelation: ...
623
+ def project(self, *args: str | Expression, groups: str = "") -> DuckDBPyRelation: ...
624
+ def quantile(
625
+ self,
626
+ column: str,
627
+ q: float | pytyping.List[float] = 0.5,
628
+ groups: str = "",
629
+ window_spec: str = "",
630
+ projected_columns: str = "",
631
+ ) -> DuckDBPyRelation: ...
632
+ def quantile_cont(
633
+ self,
634
+ column: str,
635
+ q: float | pytyping.List[float] = 0.5,
636
+ groups: str = "",
637
+ window_spec: str = "",
638
+ projected_columns: str = "",
639
+ ) -> DuckDBPyRelation: ...
640
+ def quantile_disc(
641
+ self,
642
+ column: str,
643
+ q: float | pytyping.List[float] = 0.5,
644
+ groups: str = "",
645
+ window_spec: str = "",
646
+ projected_columns: str = "",
647
+ ) -> DuckDBPyRelation: ...
648
+ def query(self, virtual_table_name: str, sql_query: str) -> DuckDBPyRelation: ...
649
+ def rank(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
650
+ def rank_dense(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
651
+ def record_batch(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.RecordBatchReader: ...
652
+ def row_number(self, window_spec: str, projected_columns: str = "") -> DuckDBPyRelation: ...
653
+ def select(self, *args: str | Expression, groups: str = "") -> DuckDBPyRelation: ...
654
+ def select_dtypes(self, types: pytyping.List[sqltypes.DuckDBPyType | str]) -> DuckDBPyRelation: ...
655
+ def select_types(self, types: pytyping.List[sqltypes.DuckDBPyType | str]) -> DuckDBPyRelation: ...
656
+ def set_alias(self, alias: str) -> DuckDBPyRelation: ...
657
+ def show(
658
+ self,
659
+ *,
660
+ max_width: pytyping.SupportsInt | None = None,
661
+ max_rows: pytyping.SupportsInt | None = None,
662
+ max_col_width: pytyping.SupportsInt | None = None,
663
+ null_value: str | None = None,
664
+ render_mode: RenderMode | None = None,
665
+ ) -> None: ...
666
+ def sort(self, *args: Expression) -> DuckDBPyRelation: ...
667
+ def sql_query(self) -> str: ...
668
+ def std(
669
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
670
+ ) -> DuckDBPyRelation: ...
671
+ def stddev(
672
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
673
+ ) -> DuckDBPyRelation: ...
674
+ def stddev_pop(
675
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
676
+ ) -> DuckDBPyRelation: ...
677
+ def stddev_samp(
678
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
679
+ ) -> DuckDBPyRelation: ...
680
+ def string_agg(
681
+ self, column: str, sep: str = ",", groups: str = "", window_spec: str = "", projected_columns: str = ""
682
+ ) -> DuckDBPyRelation: ...
683
+ def sum(
684
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
685
+ ) -> DuckDBPyRelation: ...
686
+ def tf(self) -> dict[str, tensorflow.Tensor]: ...
687
+ def to_arrow_table(self, batch_size: pytyping.SupportsInt = 1000000) -> pyarrow.lib.Table: ...
688
+ def to_csv(
689
+ self,
690
+ file_name: str,
691
+ *,
692
+ sep: str | None = None,
693
+ na_rep: str | None = None,
694
+ header: bool | None = None,
695
+ quotechar: str | None = None,
696
+ escapechar: str | None = None,
697
+ date_format: str | None = None,
698
+ timestamp_format: str | None = None,
699
+ quoting: str | int | None = None,
700
+ encoding: str | None = None,
701
+ compression: str | None = None,
702
+ overwrite: bool | None = None,
703
+ per_thread_output: bool | None = None,
704
+ use_tmp_file: bool | None = None,
705
+ partition_by: pytyping.List[str] | None = None,
706
+ write_partition_columns: bool | None = None,
707
+ ) -> None: ...
708
+ def to_df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
709
+ def to_parquet(
710
+ self,
711
+ file_name: str,
712
+ *,
713
+ compression: str | None = None,
714
+ field_ids: ParquetFieldIdsType | pytyping.Literal["auto"] | None = None,
715
+ row_group_size_bytes: int | str | None = None,
716
+ row_group_size: int | None = None,
717
+ overwrite: bool | None = None,
718
+ per_thread_output: bool | None = None,
719
+ use_tmp_file: bool | None = None,
720
+ partition_by: pytyping.List[str] | None = None,
721
+ write_partition_columns: bool | None = None,
722
+ append: bool | None = None,
723
+ filename_pattern: str | None = None,
724
+ file_size_bytes: str | int | None = None,
725
+ ) -> None: ...
726
+ def to_table(self, table_name: str) -> None: ...
727
+ def to_view(self, view_name: str, replace: bool = True) -> DuckDBPyRelation: ...
728
+ def torch(self) -> dict[str, pytorch.Tensor]: ...
729
+ def union(self, union_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
730
+ def unique(self, unique_aggr: str) -> DuckDBPyRelation: ...
731
+ def update(self, set: Expression | str, *, condition: Expression | str | None = None) -> None: ...
732
+ def value_counts(self, column: str, groups: str = "") -> DuckDBPyRelation: ...
733
+ def var(
734
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
735
+ ) -> DuckDBPyRelation: ...
736
+ def var_pop(
737
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
738
+ ) -> DuckDBPyRelation: ...
739
+ def var_samp(
740
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
741
+ ) -> DuckDBPyRelation: ...
742
+ def variance(
743
+ self, column: str, groups: str = "", window_spec: str = "", projected_columns: str = ""
744
+ ) -> DuckDBPyRelation: ...
745
+ def write_csv(
746
+ self,
747
+ file_name: str,
748
+ sep: str | None = None,
749
+ na_rep: str | None = None,
750
+ header: bool | None = None,
751
+ quotechar: str | None = None,
752
+ escapechar: str | None = None,
753
+ date_format: str | None = None,
754
+ timestamp_format: str | None = None,
755
+ quoting: str | int | None = None,
756
+ encoding: str | None = None,
757
+ compression: str | None = None,
758
+ overwrite: bool | None = None,
759
+ per_thread_output: bool | None = None,
760
+ use_tmp_file: bool | None = None,
761
+ partition_by: pytyping.List[str] | None = None,
762
+ write_partition_columns: bool | None = None,
763
+ ) -> None: ...
764
+ def write_parquet(
765
+ self,
766
+ file_name: str,
767
+ compression: str | None = None,
768
+ field_ids: ParquetFieldIdsType | pytyping.Literal["auto"] | None = None,
769
+ row_group_size_bytes: str | int | None = None,
770
+ row_group_size: int | None = None,
771
+ overwrite: bool | None = None,
772
+ per_thread_output: bool | None = None,
773
+ use_tmp_file: bool | None = None,
774
+ partition_by: pytyping.List[str] | None = None,
775
+ write_partition_columns: bool | None = None,
776
+ append: bool | None = None,
777
+ filename_pattern: str | None = None,
778
+ file_size_bytes: str | int | None = None,
779
+ ) -> None: ...
780
+ @property
781
+ def alias(self) -> str: ...
782
+ @property
783
+ def columns(self) -> pytyping.List[str]: ...
784
+ @property
785
+ def description(self) -> pytyping.List[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]]: ...
786
+ @property
787
+ def dtypes(self) -> pytyping.List[str]: ...
788
+ @property
789
+ def shape(self) -> tuple[int, int]: ...
790
+ @property
791
+ def type(self) -> str: ...
792
+ @property
793
+ def types(self) -> pytyping.List[sqltypes.DuckDBPyType]: ...
794
+
795
+ class Error(Exception): ...
796
+
797
+ class ExpectedResultType:
798
+ CHANGED_ROWS: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.CHANGED_ROWS: 1>
799
+ NOTHING: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.NOTHING: 2>
800
+ QUERY_RESULT: pytyping.ClassVar[ExpectedResultType] # value = <ExpectedResultType.QUERY_RESULT: 0>
801
+ __members__: pytyping.ClassVar[
802
+ dict[str, ExpectedResultType]
803
+ ] # value = {'QUERY_RESULT': <ExpectedResultType.QUERY_RESULT: 0>, 'CHANGED_ROWS': <ExpectedResultType.CHANGED_ROWS: 1>, 'NOTHING': <ExpectedResultType.NOTHING: 2>} # noqa: E501
804
+ def __eq__(self, other: object) -> bool: ...
805
+ def __getstate__(self) -> int: ...
806
+ def __hash__(self) -> int: ...
807
+ def __index__(self) -> int: ...
808
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
809
+ def __int__(self) -> int: ...
810
+ def __ne__(self, other: object) -> bool: ...
811
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
812
+ @property
813
+ def name(self) -> str: ...
814
+ @property
815
+ def value(self) -> int: ...
816
+
817
+ class ExplainType:
818
+ ANALYZE: pytyping.ClassVar[ExplainType] # value = <ExplainType.ANALYZE: 1>
819
+ STANDARD: pytyping.ClassVar[ExplainType] # value = <ExplainType.STANDARD: 0>
820
+ __members__: pytyping.ClassVar[
821
+ dict[str, ExplainType]
822
+ ] # value = {'STANDARD': <ExplainType.STANDARD: 0>, 'ANALYZE': <ExplainType.ANALYZE: 1>}
823
+ def __eq__(self, other: object) -> bool: ...
824
+ def __getstate__(self) -> int: ...
825
+ def __hash__(self) -> int: ...
826
+ def __index__(self) -> int: ...
827
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
828
+ def __int__(self) -> int: ...
829
+ def __ne__(self, other: object) -> bool: ...
830
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
831
+ @property
832
+ def name(self) -> str: ...
833
+ @property
834
+ def value(self) -> int: ...
835
+
836
+ class Expression:
837
+ def __add__(self, other: Expression) -> Expression: ...
838
+ def __and__(self, other: Expression) -> Expression: ...
839
+ def __div__(self, other: Expression) -> Expression: ...
840
+ def __eq__(self, other: Expression) -> Expression: ... # type: ignore[override]
841
+ def __floordiv__(self, other: Expression) -> Expression: ...
842
+ def __ge__(self, other: Expression) -> Expression: ...
843
+ def __gt__(self, other: Expression) -> Expression: ...
844
+ @pytyping.overload
845
+ def __init__(self, arg0: str) -> None: ...
846
+ @pytyping.overload
847
+ def __init__(self, arg0: pytyping.Any) -> None: ...
848
+ def __invert__(self) -> Expression: ...
849
+ def __le__(self, other: Expression) -> Expression: ...
850
+ def __lt__(self, other: Expression) -> Expression: ...
851
+ def __mod__(self, other: Expression) -> Expression: ...
852
+ def __mul__(self, other: Expression) -> Expression: ...
853
+ def __ne__(self, other: Expression) -> Expression: ... # type: ignore[override]
854
+ def __neg__(self) -> Expression: ...
855
+ def __or__(self, other: Expression) -> Expression: ...
856
+ def __pow__(self, other: Expression) -> Expression: ...
857
+ def __radd__(self, other: Expression) -> Expression: ...
858
+ def __rand__(self, other: Expression) -> Expression: ...
859
+ def __rdiv__(self, other: Expression) -> Expression: ...
860
+ def __rfloordiv__(self, other: Expression) -> Expression: ...
861
+ def __rmod__(self, other: Expression) -> Expression: ...
862
+ def __rmul__(self, other: Expression) -> Expression: ...
863
+ def __ror__(self, other: Expression) -> Expression: ...
864
+ def __rpow__(self, other: Expression) -> Expression: ...
865
+ def __rsub__(self, other: Expression) -> Expression: ...
866
+ def __rtruediv__(self, other: Expression) -> Expression: ...
867
+ def __sub__(self, other: Expression) -> Expression: ...
868
+ def __truediv__(self, other: Expression) -> Expression: ...
869
+ def alias(self, name: str) -> Expression: ...
870
+ def asc(self) -> Expression: ...
871
+ def between(self, lower: Expression, upper: Expression) -> Expression: ...
872
+ def cast(self, type: sqltypes.DuckDBPyType) -> Expression: ...
873
+ def collate(self, collation: str) -> Expression: ...
874
+ def desc(self) -> Expression: ...
875
+ def get_name(self) -> str: ...
876
+ def isin(self, *args: Expression) -> Expression: ...
877
+ def isnotin(self, *args: Expression) -> Expression: ...
878
+ def isnotnull(self) -> Expression: ...
879
+ def isnull(self) -> Expression: ...
880
+ def nulls_first(self) -> Expression: ...
881
+ def nulls_last(self) -> Expression: ...
882
+ def otherwise(self, value: Expression) -> Expression: ...
883
+ def show(self) -> None: ...
884
+ def when(self, condition: Expression, value: Expression) -> Expression: ...
885
+
886
+ class FatalException(DatabaseError): ...
887
+
888
+ class HTTPException(IOException):
889
+ status_code: int
890
+ body: str
891
+ reason: str
892
+ headers: dict[str, str]
893
+
894
+ class IOException(OperationalError): ...
895
+ class IntegrityError(DatabaseError): ...
896
+ class InternalError(DatabaseError): ...
897
+ class InternalException(InternalError): ...
898
+ class InterruptException(DatabaseError): ...
899
+ class InvalidInputException(ProgrammingError): ...
900
+ class InvalidTypeException(ProgrammingError): ...
901
+ class NotImplementedException(NotSupportedError): ...
902
+ class NotSupportedError(DatabaseError): ...
903
+ class OperationalError(DatabaseError): ...
904
+ class OutOfMemoryException(OperationalError): ...
905
+ class OutOfRangeException(DataError): ...
906
+ class ParserException(ProgrammingError): ...
907
+ class PermissionException(DatabaseError): ...
908
+ class ProgrammingError(DatabaseError): ...
909
+
910
+ class PythonExceptionHandling:
911
+ DEFAULT: pytyping.ClassVar[PythonExceptionHandling] # value = <PythonExceptionHandling.DEFAULT: 0>
912
+ RETURN_NULL: pytyping.ClassVar[PythonExceptionHandling] # value = <PythonExceptionHandling.RETURN_NULL: 1>
913
+ __members__: pytyping.ClassVar[
914
+ dict[str, PythonExceptionHandling]
915
+ ] # value = {'DEFAULT': <PythonExceptionHandling.DEFAULT: 0>, 'RETURN_NULL': <PythonExceptionHandling.RETURN_NULL: 1>} # noqa: E501
916
+ def __eq__(self, other: object) -> bool: ...
917
+ def __getstate__(self) -> int: ...
918
+ def __hash__(self) -> int: ...
919
+ def __index__(self) -> int: ...
920
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
921
+ def __int__(self) -> int: ...
922
+ def __ne__(self, other: object) -> bool: ...
923
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
924
+ @property
925
+ def name(self) -> str: ...
926
+ @property
927
+ def value(self) -> int: ...
928
+
929
+ class RenderMode:
930
+ COLUMNS: pytyping.ClassVar[RenderMode] # value = <RenderMode.COLUMNS: 1>
931
+ ROWS: pytyping.ClassVar[RenderMode] # value = <RenderMode.ROWS: 0>
932
+ __members__: pytyping.ClassVar[
933
+ dict[str, RenderMode]
934
+ ] # value = {'ROWS': <RenderMode.ROWS: 0>, 'COLUMNS': <RenderMode.COLUMNS: 1>}
935
+ def __eq__(self, other: object) -> bool: ...
936
+ def __getstate__(self) -> int: ...
937
+ def __hash__(self) -> int: ...
938
+ def __index__(self) -> int: ...
939
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
940
+ def __int__(self) -> int: ...
941
+ def __ne__(self, other: object) -> bool: ...
942
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
943
+ @property
944
+ def name(self) -> str: ...
945
+ @property
946
+ def value(self) -> int: ...
947
+
948
+ class SequenceException(DatabaseError): ...
949
+ class SerializationException(OperationalError): ...
950
+
951
+ class Statement:
952
+ @property
953
+ def expected_result_type(self) -> list[StatementType]: ...
954
+ @property
955
+ def named_parameters(self) -> set[str]: ...
956
+ @property
957
+ def query(self) -> str: ...
958
+ @property
959
+ def type(self) -> StatementType: ...
960
+
961
+ class StatementType:
962
+ ALTER_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ALTER_STATEMENT: 8>
963
+ ANALYZE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ANALYZE_STATEMENT: 11>
964
+ ATTACH_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.ATTACH_STATEMENT: 25>
965
+ CALL_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CALL_STATEMENT: 19>
966
+ COPY_DATABASE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.COPY_DATABASE_STATEMENT: 28>
967
+ COPY_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.COPY_STATEMENT: 10>
968
+ CREATE_FUNC_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CREATE_FUNC_STATEMENT: 13>
969
+ CREATE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.CREATE_STATEMENT: 4>
970
+ DELETE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DELETE_STATEMENT: 5>
971
+ DETACH_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DETACH_STATEMENT: 26>
972
+ DROP_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.DROP_STATEMENT: 15>
973
+ EXECUTE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXECUTE_STATEMENT: 7>
974
+ EXPLAIN_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXPLAIN_STATEMENT: 14>
975
+ EXPORT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXPORT_STATEMENT: 16>
976
+ EXTENSION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.EXTENSION_STATEMENT: 23>
977
+ INSERT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.INSERT_STATEMENT: 2>
978
+ INVALID_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.INVALID_STATEMENT: 0>
979
+ LOAD_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.LOAD_STATEMENT: 21>
980
+ LOGICAL_PLAN_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.LOGICAL_PLAN_STATEMENT: 24>
981
+ MERGE_INTO_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.MERGE_INTO_STATEMENT: 30>
982
+ MULTI_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.MULTI_STATEMENT: 27>
983
+ PRAGMA_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.PRAGMA_STATEMENT: 17>
984
+ PREPARE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.PREPARE_STATEMENT: 6>
985
+ RELATION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.RELATION_STATEMENT: 22>
986
+ SELECT_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.SELECT_STATEMENT: 1>
987
+ SET_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.SET_STATEMENT: 20>
988
+ TRANSACTION_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.TRANSACTION_STATEMENT: 9>
989
+ UPDATE_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.UPDATE_STATEMENT: 3>
990
+ VACUUM_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.VACUUM_STATEMENT: 18>
991
+ VARIABLE_SET_STATEMENT: pytyping.ClassVar[StatementType] # value = <StatementType.VARIABLE_SET_STATEMENT: 12>
992
+ __members__: pytyping.ClassVar[
993
+ dict[str, StatementType]
994
+ ] # value = {'INVALID_STATEMENT': <StatementType.INVALID_STATEMENT: 0>, 'SELECT_STATEMENT': <StatementType.SELECT_STATEMENT: 1>, 'INSERT_STATEMENT': <StatementType.INSERT_STATEMENT: 2>, 'UPDATE_STATEMENT': <StatementType.UPDATE_STATEMENT: 3>, 'CREATE_STATEMENT': <StatementType.CREATE_STATEMENT: 4>, 'DELETE_STATEMENT': <StatementType.DELETE_STATEMENT: 5>, 'PREPARE_STATEMENT': <StatementType.PREPARE_STATEMENT: 6>, 'EXECUTE_STATEMENT': <StatementType.EXECUTE_STATEMENT: 7>, 'ALTER_STATEMENT': <StatementType.ALTER_STATEMENT: 8>, 'TRANSACTION_STATEMENT': <StatementType.TRANSACTION_STATEMENT: 9>, 'COPY_STATEMENT': <StatementType.COPY_STATEMENT: 10>, 'ANALYZE_STATEMENT': <StatementType.ANALYZE_STATEMENT: 11>, 'VARIABLE_SET_STATEMENT': <StatementType.VARIABLE_SET_STATEMENT: 12>, 'CREATE_FUNC_STATEMENT': <StatementType.CREATE_FUNC_STATEMENT: 13>, 'EXPLAIN_STATEMENT': <StatementType.EXPLAIN_STATEMENT: 14>, 'DROP_STATEMENT': <StatementType.DROP_STATEMENT: 15>, 'EXPORT_STATEMENT': <StatementType.EXPORT_STATEMENT: 16>, 'PRAGMA_STATEMENT': <StatementType.PRAGMA_STATEMENT: 17>, 'VACUUM_STATEMENT': <StatementType.VACUUM_STATEMENT: 18>, 'CALL_STATEMENT': <StatementType.CALL_STATEMENT: 19>, 'SET_STATEMENT': <StatementType.SET_STATEMENT: 20>, 'LOAD_STATEMENT': <StatementType.LOAD_STATEMENT: 21>, 'RELATION_STATEMENT': <StatementType.RELATION_STATEMENT: 22>, 'EXTENSION_STATEMENT': <StatementType.EXTENSION_STATEMENT: 23>, 'LOGICAL_PLAN_STATEMENT': <StatementType.LOGICAL_PLAN_STATEMENT: 24>, 'ATTACH_STATEMENT': <StatementType.ATTACH_STATEMENT: 25>, 'DETACH_STATEMENT': <StatementType.DETACH_STATEMENT: 26>, 'MULTI_STATEMENT': <StatementType.MULTI_STATEMENT: 27>, 'COPY_DATABASE_STATEMENT': <StatementType.COPY_DATABASE_STATEMENT: 28>, 'MERGE_INTO_STATEMENT': <StatementType.MERGE_INTO_STATEMENT: 30>} # noqa: E501
995
+ def __eq__(self, other: object) -> bool: ...
996
+ def __getstate__(self) -> int: ...
997
+ def __hash__(self) -> int: ...
998
+ def __index__(self) -> int: ...
999
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
1000
+ def __int__(self) -> int: ...
1001
+ def __ne__(self, other: object) -> bool: ...
1002
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
1003
+ @property
1004
+ def name(self) -> str: ...
1005
+ @property
1006
+ def value(self) -> int: ...
1007
+
1008
+ class SyntaxException(ProgrammingError): ...
1009
+ class TransactionException(OperationalError): ...
1010
+ class TypeMismatchException(DataError): ...
1011
+ class Warning(Exception): ...
1012
+
1013
+ class token_type:
1014
+ __members__: pytyping.ClassVar[
1015
+ dict[str, token_type]
1016
+ ] # value = {'identifier': <token_type.identifier: 0>, 'numeric_const': <token_type.numeric_const: 1>, 'string_const': <token_type.string_const: 2>, 'operator': <token_type.operator: 3>, 'keyword': <token_type.keyword: 4>, 'comment': <token_type.comment: 5>} # noqa: E501
1017
+ comment: pytyping.ClassVar[token_type] # value = <token_type.comment: 5>
1018
+ identifier: pytyping.ClassVar[token_type] # value = <token_type.identifier: 0>
1019
+ keyword: pytyping.ClassVar[token_type] # value = <token_type.keyword: 4>
1020
+ numeric_const: pytyping.ClassVar[token_type] # value = <token_type.numeric_const: 1>
1021
+ operator: pytyping.ClassVar[token_type] # value = <token_type.operator: 3>
1022
+ string_const: pytyping.ClassVar[token_type] # value = <token_type.string_const: 2>
1023
+ def __eq__(self, other: object) -> bool: ...
1024
+ def __getstate__(self) -> int: ...
1025
+ def __hash__(self) -> int: ...
1026
+ def __index__(self) -> int: ...
1027
+ def __init__(self, value: pytyping.SupportsInt) -> None: ...
1028
+ def __int__(self) -> int: ...
1029
+ def __ne__(self, other: object) -> bool: ...
1030
+ def __setstate__(self, state: pytyping.SupportsInt) -> None: ...
1031
+ @property
1032
+ def name(self) -> str: ...
1033
+ @property
1034
+ def value(self) -> int: ...
1035
+
1036
+ def CaseExpression(condition: Expression, value: Expression) -> Expression: ...
1037
+ def CoalesceOperator(*args: Expression) -> Expression: ...
1038
+ def ColumnExpression(*args: str) -> Expression: ...
1039
+ def ConstantExpression(value: Expression | str) -> Expression: ...
1040
+ def DefaultExpression() -> Expression: ...
1041
+ def FunctionExpression(function_name: str, *args: Expression) -> Expression: ...
1042
+ def LambdaExpression(lhs: Expression | str | tuple[str], rhs: Expression) -> Expression: ...
1043
+ def SQLExpression(expression: str) -> Expression: ...
1044
+ @pytyping.overload
1045
+ def StarExpression(*, exclude: Expression | str | tuple[str]) -> Expression: ...
1046
+ @pytyping.overload
1047
+ def StarExpression() -> Expression: ...
1048
+ def aggregate(
1049
+ df: pandas.DataFrame,
1050
+ aggr_expr: Expression | list[Expression] | str | list[str],
1051
+ group_expr: str = "",
1052
+ *,
1053
+ connection: DuckDBPyConnection | None = None,
1054
+ ) -> DuckDBPyRelation: ...
1055
+ def alias(df: pandas.DataFrame, alias: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1056
+ def append(
1057
+ table_name: str, df: pandas.DataFrame, *, by_name: bool = False, connection: DuckDBPyConnection | None = None
1058
+ ) -> DuckDBPyConnection: ...
1059
+ def array_type(
1060
+ type: sqltypes.DuckDBPyType, size: pytyping.SupportsInt, *, connection: DuckDBPyConnection | None = None
1061
+ ) -> sqltypes.DuckDBPyType: ...
1062
+ @pytyping.overload
1063
+ def arrow(
1064
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1065
+ ) -> pyarrow.lib.RecordBatchReader: ...
1066
+ @pytyping.overload
1067
+ def arrow(arrow_object: pytyping.Any, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1068
+ def begin(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1069
+ def checkpoint(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1070
+ def close(*, connection: DuckDBPyConnection | None = None) -> None: ...
1071
+ def commit(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1072
+ def connect(
1073
+ database: str | pathlib.Path = ":memory:",
1074
+ read_only: bool = False,
1075
+ config: dict[str, str | bool | int | float | list[str]] | None = None,
1076
+ ) -> DuckDBPyConnection: ...
1077
+ def create_function(
1078
+ name: str,
1079
+ function: Callable[..., pytyping.Any],
1080
+ parameters: list[sqltypes.DuckDBPyType] | None = None,
1081
+ return_type: sqltypes.DuckDBPyType | None = None,
1082
+ *,
1083
+ type: func.PythonUDFType = ...,
1084
+ null_handling: func.FunctionNullHandling = ...,
1085
+ exception_handling: PythonExceptionHandling = ...,
1086
+ side_effects: bool = False,
1087
+ connection: DuckDBPyConnection | None = None,
1088
+ ) -> DuckDBPyConnection: ...
1089
+ def cursor(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1090
+ def decimal_type(
1091
+ width: pytyping.SupportsInt, scale: pytyping.SupportsInt, *, connection: DuckDBPyConnection | None = None
1092
+ ) -> sqltypes.DuckDBPyType: ...
1093
+ def default_connection() -> DuckDBPyConnection: ...
1094
+ def description(
1095
+ *, connection: DuckDBPyConnection | None = None
1096
+ ) -> list[tuple[str, sqltypes.DuckDBPyType, None, None, None, None, None]] | None: ...
1097
+ @pytyping.overload
1098
+ def df(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1099
+ @pytyping.overload
1100
+ def df(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1101
+ def distinct(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1102
+ def dtype(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1103
+ def duplicate(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1104
+ def enum_type(
1105
+ name: str,
1106
+ type: sqltypes.DuckDBPyType,
1107
+ values: list[pytyping.Any],
1108
+ *,
1109
+ connection: DuckDBPyConnection | None = None,
1110
+ ) -> sqltypes.DuckDBPyType: ...
1111
+ def execute(
1112
+ query: Statement | str,
1113
+ parameters: object = None,
1114
+ *,
1115
+ connection: DuckDBPyConnection | None = None,
1116
+ ) -> DuckDBPyConnection: ...
1117
+ def executemany(
1118
+ query: Statement | str,
1119
+ parameters: object = None,
1120
+ *,
1121
+ connection: DuckDBPyConnection | None = None,
1122
+ ) -> DuckDBPyConnection: ...
1123
+ def extract_statements(query: str, *, connection: DuckDBPyConnection | None = None) -> list[Statement]: ...
1124
+ def fetch_arrow_table(
1125
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1126
+ ) -> pyarrow.lib.Table: ...
1127
+ def fetch_df(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1128
+ def fetch_df_chunk(
1129
+ vectors_per_chunk: pytyping.SupportsInt = 1,
1130
+ *,
1131
+ date_as_object: bool = False,
1132
+ connection: DuckDBPyConnection | None = None,
1133
+ ) -> pandas.DataFrame: ...
1134
+ def fetch_record_batch(
1135
+ rows_per_batch: pytyping.SupportsInt = 1000000, *, connection: DuckDBPyConnection | None = None
1136
+ ) -> pyarrow.lib.RecordBatchReader: ...
1137
+ def fetchall(*, connection: DuckDBPyConnection | None = None) -> list[tuple[pytyping.Any, ...]]: ...
1138
+ def fetchdf(*, date_as_object: bool = False, connection: DuckDBPyConnection | None = None) -> pandas.DataFrame: ...
1139
+ def fetchmany(
1140
+ size: pytyping.SupportsInt = 1, *, connection: DuckDBPyConnection | None = None
1141
+ ) -> list[tuple[pytyping.Any, ...]]: ...
1142
+ def fetchnumpy(
1143
+ *, connection: DuckDBPyConnection | None = None
1144
+ ) -> dict[str, np.typing.NDArray[pytyping.Any] | pandas.Categorical]: ...
1145
+ def fetchone(*, connection: DuckDBPyConnection | None = None) -> tuple[pytyping.Any, ...] | None: ...
1146
+ def filesystem_is_registered(name: str, *, connection: DuckDBPyConnection | None = None) -> bool: ...
1147
+ def filter(
1148
+ df: pandas.DataFrame,
1149
+ filter_expr: Expression | str,
1150
+ *,
1151
+ connection: DuckDBPyConnection | None = None,
1152
+ ) -> DuckDBPyRelation: ...
1153
+ def from_arrow(
1154
+ arrow_object: object,
1155
+ *,
1156
+ connection: DuckDBPyConnection | None = None,
1157
+ ) -> DuckDBPyRelation: ...
1158
+ def from_csv_auto(
1159
+ path_or_buffer: str | bytes | os.PathLike[str],
1160
+ header: bool | int | None = None,
1161
+ compression: str | None = None,
1162
+ sep: str | None = None,
1163
+ delimiter: str | None = None,
1164
+ files_to_sniff: int | None = None,
1165
+ comment: str | None = None,
1166
+ thousands: str | None = None,
1167
+ dtype: dict[str, str] | list[str] | None = None,
1168
+ na_values: str | list[str] | None = None,
1169
+ skiprows: int | None = None,
1170
+ quotechar: str | None = None,
1171
+ escapechar: str | None = None,
1172
+ encoding: str | None = None,
1173
+ parallel: bool | None = None,
1174
+ date_format: str | None = None,
1175
+ timestamp_format: str | None = None,
1176
+ sample_size: int | None = None,
1177
+ auto_detect: bool | int | None = None,
1178
+ all_varchar: bool | None = None,
1179
+ normalize_names: bool | None = None,
1180
+ null_padding: bool | None = None,
1181
+ names: list[str] | None = None,
1182
+ lineterminator: str | None = None,
1183
+ columns: dict[str, str] | None = None,
1184
+ auto_type_candidates: list[str] | None = None,
1185
+ max_line_size: int | None = None,
1186
+ ignore_errors: bool | None = None,
1187
+ store_rejects: bool | None = None,
1188
+ rejects_table: str | None = None,
1189
+ rejects_scan: str | None = None,
1190
+ rejects_limit: int | None = None,
1191
+ force_not_null: list[str] | None = None,
1192
+ buffer_size: int | None = None,
1193
+ decimal: str | None = None,
1194
+ allow_quoted_nulls: bool | None = None,
1195
+ filename: bool | str | None = None,
1196
+ hive_partitioning: bool | None = None,
1197
+ union_by_name: bool | None = None,
1198
+ hive_types: dict[str, str] | None = None,
1199
+ hive_types_autocast: bool | None = None,
1200
+ strict_mode: bool | None = None,
1201
+ ) -> DuckDBPyRelation: ...
1202
+ def from_df(df: pandas.DataFrame, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1203
+ @pytyping.overload
1204
+ def from_parquet(
1205
+ file_glob: str,
1206
+ binary_as_string: bool = False,
1207
+ *,
1208
+ file_row_number: bool = False,
1209
+ filename: bool = False,
1210
+ hive_partitioning: bool = False,
1211
+ union_by_name: bool = False,
1212
+ compression: str | None = None,
1213
+ connection: DuckDBPyConnection | None = None,
1214
+ ) -> DuckDBPyRelation: ...
1215
+ @pytyping.overload
1216
+ def from_parquet(
1217
+ file_globs: Sequence[str],
1218
+ binary_as_string: bool = False,
1219
+ *,
1220
+ file_row_number: bool = False,
1221
+ filename: bool = False,
1222
+ hive_partitioning: bool = False,
1223
+ union_by_name: bool = False,
1224
+ compression: pytyping.Any = None,
1225
+ connection: DuckDBPyConnection | None = None,
1226
+ ) -> DuckDBPyRelation: ...
1227
+ def from_query(
1228
+ query: Statement | str,
1229
+ *,
1230
+ alias: str = "",
1231
+ params: object = None,
1232
+ connection: DuckDBPyConnection | None = None,
1233
+ ) -> DuckDBPyRelation: ...
1234
+ def get_table_names(
1235
+ query: str, *, qualified: bool = False, connection: DuckDBPyConnection | None = None
1236
+ ) -> set[str]: ...
1237
+ def install_extension(
1238
+ extension: str,
1239
+ *,
1240
+ force_install: bool = False,
1241
+ repository: str | None = None,
1242
+ repository_url: str | None = None,
1243
+ version: str | None = None,
1244
+ connection: DuckDBPyConnection | None = None,
1245
+ ) -> None: ...
1246
+ def interrupt(*, connection: DuckDBPyConnection | None = None) -> None: ...
1247
+ def limit(
1248
+ df: pandas.DataFrame,
1249
+ n: pytyping.SupportsInt,
1250
+ offset: pytyping.SupportsInt = 0,
1251
+ *,
1252
+ connection: DuckDBPyConnection | None = None,
1253
+ ) -> DuckDBPyRelation: ...
1254
+ def list_filesystems(*, connection: DuckDBPyConnection | None = None) -> list[str]: ...
1255
+ def list_type(
1256
+ type: sqltypes.DuckDBPyType, *, connection: DuckDBPyConnection | None = None
1257
+ ) -> sqltypes.DuckDBPyType: ...
1258
+ def load_extension(extension: str, *, connection: DuckDBPyConnection | None = None) -> None: ...
1259
+ def map_type(
1260
+ key: sqltypes.DuckDBPyType,
1261
+ value: sqltypes.DuckDBPyType,
1262
+ *,
1263
+ connection: DuckDBPyConnection | None = None,
1264
+ ) -> sqltypes.DuckDBPyType: ...
1265
+ def order(
1266
+ df: pandas.DataFrame, order_expr: str, *, connection: DuckDBPyConnection | None = None
1267
+ ) -> DuckDBPyRelation: ...
1268
+ @pytyping.overload
1269
+ def pl(
1270
+ rows_per_batch: pytyping.SupportsInt = 1000000,
1271
+ *,
1272
+ lazy: pytyping.Literal[False] = ...,
1273
+ connection: DuckDBPyConnection | None = None,
1274
+ ) -> polars.DataFrame: ...
1275
+ @pytyping.overload
1276
+ def pl(
1277
+ rows_per_batch: pytyping.SupportsInt = 1000000,
1278
+ *,
1279
+ lazy: pytyping.Literal[True],
1280
+ connection: DuckDBPyConnection | None = None,
1281
+ ) -> polars.LazyFrame: ...
1282
+ @pytyping.overload
1283
+ def pl(
1284
+ rows_per_batch: pytyping.SupportsInt = 1000000,
1285
+ *,
1286
+ lazy: bool = False,
1287
+ connection: DuckDBPyConnection | None = None,
1288
+ ) -> pytyping.Union[polars.DataFrame, polars.LazyFrame]: ...
1289
+ def project(
1290
+ df: pandas.DataFrame, *args: str | Expression, groups: str = "", connection: DuckDBPyConnection | None = None
1291
+ ) -> DuckDBPyRelation: ...
1292
+ def query(
1293
+ query: Statement | str,
1294
+ *,
1295
+ alias: str = "",
1296
+ params: object = None,
1297
+ connection: DuckDBPyConnection | None = None,
1298
+ ) -> DuckDBPyRelation: ...
1299
+ def query_df(
1300
+ df: pandas.DataFrame,
1301
+ virtual_table_name: str,
1302
+ sql_query: str,
1303
+ *,
1304
+ connection: DuckDBPyConnection | None = None,
1305
+ ) -> DuckDBPyRelation: ...
1306
+ def query_progress(*, connection: DuckDBPyConnection | None = None) -> float: ...
1307
+ def read_csv(
1308
+ path_or_buffer: str | bytes | os.PathLike[str],
1309
+ header: bool | int | None = None,
1310
+ compression: str | None = None,
1311
+ sep: str | None = None,
1312
+ delimiter: str | None = None,
1313
+ files_to_sniff: int | None = None,
1314
+ comment: str | None = None,
1315
+ thousands: str | None = None,
1316
+ dtype: dict[str, str] | list[str] | None = None,
1317
+ na_values: str | list[str] | None = None,
1318
+ skiprows: int | None = None,
1319
+ quotechar: str | None = None,
1320
+ escapechar: str | None = None,
1321
+ encoding: str | None = None,
1322
+ parallel: bool | None = None,
1323
+ date_format: str | None = None,
1324
+ timestamp_format: str | None = None,
1325
+ sample_size: int | None = None,
1326
+ auto_detect: bool | int | None = None,
1327
+ all_varchar: bool | None = None,
1328
+ normalize_names: bool | None = None,
1329
+ null_padding: bool | None = None,
1330
+ names: list[str] | None = None,
1331
+ lineterminator: str | None = None,
1332
+ columns: dict[str, str] | None = None,
1333
+ auto_type_candidates: list[str] | None = None,
1334
+ max_line_size: int | None = None,
1335
+ ignore_errors: bool | None = None,
1336
+ store_rejects: bool | None = None,
1337
+ rejects_table: str | None = None,
1338
+ rejects_scan: str | None = None,
1339
+ rejects_limit: int | None = None,
1340
+ force_not_null: list[str] | None = None,
1341
+ buffer_size: int | None = None,
1342
+ decimal: str | None = None,
1343
+ allow_quoted_nulls: bool | None = None,
1344
+ filename: bool | str | None = None,
1345
+ hive_partitioning: bool | None = None,
1346
+ union_by_name: bool | None = None,
1347
+ hive_types: dict[str, str] | None = None,
1348
+ hive_types_autocast: bool | None = None,
1349
+ strict_mode: bool | None = None,
1350
+ ) -> DuckDBPyRelation: ...
1351
+ def read_json(
1352
+ path_or_buffer: str | bytes | os.PathLike[str],
1353
+ *,
1354
+ columns: dict[str, str] | None = None,
1355
+ sample_size: int | None = None,
1356
+ maximum_depth: int | None = None,
1357
+ records: str | None = None,
1358
+ format: str | None = None,
1359
+ date_format: str | None = None,
1360
+ timestamp_format: str | None = None,
1361
+ compression: str | None = None,
1362
+ maximum_object_size: int | None = None,
1363
+ ignore_errors: bool | None = None,
1364
+ convert_strings_to_integers: bool | None = None,
1365
+ field_appearance_threshold: float | None = None,
1366
+ map_inference_threshold: int | None = None,
1367
+ maximum_sample_files: int | None = None,
1368
+ filename: bool | str | None = None,
1369
+ hive_partitioning: bool | None = None,
1370
+ union_by_name: bool | None = None,
1371
+ hive_types: dict[str, str] | None = None,
1372
+ hive_types_autocast: bool | None = None,
1373
+ ) -> DuckDBPyRelation: ...
1374
+ @pytyping.overload
1375
+ def read_parquet(
1376
+ file_glob: str,
1377
+ binary_as_string: bool = False,
1378
+ *,
1379
+ file_row_number: bool = False,
1380
+ filename: bool = False,
1381
+ hive_partitioning: bool = False,
1382
+ union_by_name: bool = False,
1383
+ compression: str | None = None,
1384
+ connection: DuckDBPyConnection | None = None,
1385
+ ) -> DuckDBPyRelation: ...
1386
+ @pytyping.overload
1387
+ def read_parquet(
1388
+ file_globs: Sequence[str],
1389
+ binary_as_string: bool = False,
1390
+ *,
1391
+ file_row_number: bool = False,
1392
+ filename: bool = False,
1393
+ hive_partitioning: bool = False,
1394
+ union_by_name: bool = False,
1395
+ compression: pytyping.Any = None,
1396
+ connection: DuckDBPyConnection | None = None,
1397
+ ) -> DuckDBPyRelation: ...
1398
+ def register(
1399
+ view_name: str,
1400
+ python_object: object,
1401
+ *,
1402
+ connection: DuckDBPyConnection | None = None,
1403
+ ) -> DuckDBPyConnection: ...
1404
+ def register_filesystem(
1405
+ filesystem: fsspec.AbstractFileSystem, *, connection: DuckDBPyConnection | None = None
1406
+ ) -> None: ...
1407
+ def remove_function(name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1408
+ def rollback(*, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1409
+ def row_type(
1410
+ fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1411
+ *,
1412
+ connection: DuckDBPyConnection | None = None,
1413
+ ) -> sqltypes.DuckDBPyType: ...
1414
+ def rowcount(*, connection: DuckDBPyConnection | None = None) -> int: ...
1415
+ def set_default_connection(connection: DuckDBPyConnection) -> None: ...
1416
+ def sql(
1417
+ query: Statement | str,
1418
+ *,
1419
+ alias: str = "",
1420
+ params: object = None,
1421
+ connection: DuckDBPyConnection | None = None,
1422
+ ) -> DuckDBPyRelation: ...
1423
+ def sqltype(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1424
+ def string_type(collation: str = "", *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1425
+ def struct_type(
1426
+ fields: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1427
+ *,
1428
+ connection: DuckDBPyConnection | None = None,
1429
+ ) -> sqltypes.DuckDBPyType: ...
1430
+ def table(table_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1431
+ def table_function(
1432
+ name: str,
1433
+ parameters: object = None,
1434
+ *,
1435
+ connection: DuckDBPyConnection | None = None,
1436
+ ) -> DuckDBPyRelation: ...
1437
+ def tf(*, connection: DuckDBPyConnection | None = None) -> dict[str, tensorflow.Tensor]: ...
1438
+ def tokenize(query: str) -> list[tuple[int, token_type]]: ...
1439
+ def torch(*, connection: DuckDBPyConnection | None = None) -> dict[str, pytorch.Tensor]: ...
1440
+ def type(type_str: str, *, connection: DuckDBPyConnection | None = None) -> sqltypes.DuckDBPyType: ...
1441
+ def union_type(
1442
+ members: dict[str, sqltypes.DuckDBPyType] | list[sqltypes.DuckDBPyType],
1443
+ *,
1444
+ connection: DuckDBPyConnection | None = None,
1445
+ ) -> sqltypes.DuckDBPyType: ...
1446
+ def unregister(view_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyConnection: ...
1447
+ def unregister_filesystem(name: str, *, connection: DuckDBPyConnection | None = None) -> None: ...
1448
+ def values(
1449
+ *args: list[pytyping.Any] | tuple[Expression, ...] | Expression, connection: DuckDBPyConnection | None = None
1450
+ ) -> DuckDBPyRelation: ...
1451
+ def view(view_name: str, *, connection: DuckDBPyConnection | None = None) -> DuckDBPyRelation: ...
1452
+ def write_csv(
1453
+ df: pandas.DataFrame,
1454
+ filename: str,
1455
+ *,
1456
+ sep: str | None = None,
1457
+ na_rep: str | None = None,
1458
+ header: bool | None = None,
1459
+ quotechar: str | None = None,
1460
+ escapechar: str | None = None,
1461
+ date_format: str | None = None,
1462
+ timestamp_format: str | None = None,
1463
+ quoting: str | int | None = None,
1464
+ encoding: str | None = None,
1465
+ compression: str | None = None,
1466
+ overwrite: bool | None = None,
1467
+ per_thread_output: bool | None = None,
1468
+ use_tmp_file: bool | None = None,
1469
+ partition_by: list[str] | None = None,
1470
+ write_partition_columns: bool | None = None,
1471
+ ) -> None: ...
1472
+
1473
+ __formatted_python_version__: str
1474
+ __git_revision__: str
1475
+ __interactive__: bool
1476
+ __jupyter__: bool
1477
+ __standard_vector_size__: int
1478
+ __version__: str
1479
+ _clean_default_connection: pytyping.Any # value = <capsule object>
1480
+ apilevel: str
1481
+ paramstyle: str
1482
+ threadsafety: int