duckdb 1.4.1.dev125__cp313-cp313-win_amd64.whl → 1.5.0.dev94__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of duckdb might be problematic. Click here for more details.

duckdb/__init__.pyi DELETED
@@ -1,1137 +0,0 @@
1
- # to regenerate this from scratch, run scripts/regenerate_python_stubs.sh .
2
- # be warned - currently there are still tweaks needed after this file is
3
- # generated. These should be annotated with a comment like
4
- # # stubgen override
5
- # to help the sanity of maintainers.
6
-
7
- import duckdb.typing as typing
8
- import duckdb.functional as functional
9
- from duckdb.typing import DuckDBPyType
10
- from duckdb.functional import FunctionNullHandling, PythonUDFType
11
- from duckdb.value.constant import (
12
- Value,
13
- NullValue,
14
- BooleanValue,
15
- UnsignedBinaryValue,
16
- UnsignedShortValue,
17
- UnsignedIntegerValue,
18
- UnsignedLongValue,
19
- BinaryValue,
20
- ShortValue,
21
- IntegerValue,
22
- LongValue,
23
- HugeIntegerValue,
24
- FloatValue,
25
- DoubleValue,
26
- DecimalValue,
27
- StringValue,
28
- UUIDValue,
29
- BitValue,
30
- BlobValue,
31
- DateValue,
32
- IntervalValue,
33
- TimestampValue,
34
- TimestampSecondValue,
35
- TimestampMilisecondValue,
36
- TimestampNanosecondValue,
37
- TimestampTimeZoneValue,
38
- TimeValue,
39
- TimeTimeZoneValue,
40
- )
41
-
42
- # We also run this in python3.7, where this is needed
43
- from typing_extensions import Literal
44
-
45
- # stubgen override - missing import of Set
46
- from typing import Any, ClassVar, Set, Optional, Callable
47
- from io import StringIO, TextIOBase
48
- from pathlib import Path
49
-
50
- from typing import overload, Dict, List, Union, Tuple
51
- import pandas
52
-
53
- # stubgen override - unfortunately we need this for version checks
54
- import sys
55
- import fsspec
56
- import pyarrow.lib
57
- import polars
58
-
59
- # stubgen override - This should probably not be exposed
60
- apilevel: str
61
- comment: token_type
62
- identifier: token_type
63
- keyword: token_type
64
- numeric_const: token_type
65
- operator: token_type
66
- paramstyle: str
67
- string_const: token_type
68
- threadsafety: int
69
- __standard_vector_size__: int
70
- STANDARD: ExplainType
71
- ANALYZE: ExplainType
72
- DEFAULT: PythonExceptionHandling
73
- RETURN_NULL: PythonExceptionHandling
74
- ROWS: RenderMode
75
- COLUMNS: RenderMode
76
-
77
- __version__: str
78
-
79
- __interactive__: bool
80
- __jupyter__: bool
81
- __formatted_python_version__: str
82
-
83
- class BinderException(ProgrammingError): ...
84
- class CatalogException(ProgrammingError): ...
85
- class ConnectionException(OperationalError): ...
86
- class ConstraintException(IntegrityError): ...
87
- class ConversionException(DataError): ...
88
- class DataError(Error): ...
89
-
90
- class ExplainType:
91
- STANDARD: ExplainType
92
- ANALYZE: ExplainType
93
- def __int__(self) -> int: ...
94
- def __index__(self) -> int: ...
95
- @property
96
- def __members__(self) -> Dict[str, ExplainType]: ...
97
- @property
98
- def name(self) -> str: ...
99
- @property
100
- def value(self) -> int: ...
101
-
102
- class RenderMode:
103
- ROWS: RenderMode
104
- COLUMNS: RenderMode
105
- def __int__(self) -> int: ...
106
- def __index__(self) -> int: ...
107
- @property
108
- def __members__(self) -> Dict[str, RenderMode]: ...
109
- @property
110
- def name(self) -> str: ...
111
- @property
112
- def value(self) -> int: ...
113
-
114
- class PythonExceptionHandling:
115
- DEFAULT: PythonExceptionHandling
116
- RETURN_NULL: PythonExceptionHandling
117
- def __int__(self) -> int: ...
118
- def __index__(self) -> int: ...
119
- @property
120
- def __members__(self) -> Dict[str, PythonExceptionHandling]: ...
121
- @property
122
- def name(self) -> str: ...
123
- @property
124
- def value(self) -> int: ...
125
-
126
- class CSVLineTerminator:
127
- LINE_FEED: CSVLineTerminator
128
- CARRIAGE_RETURN_LINE_FEED: CSVLineTerminator
129
- def __int__(self) -> int: ...
130
- def __index__(self) -> int: ...
131
- @property
132
- def __members__(self) -> Dict[str, CSVLineTerminator]: ...
133
- @property
134
- def name(self) -> str: ...
135
- @property
136
- def value(self) -> int: ...
137
-
138
- class ExpectedResultType:
139
- QUERY_RESULT: ExpectedResultType
140
- CHANGED_ROWS: ExpectedResultType
141
- NOTHING: ExpectedResultType
142
- def __int__(self) -> int: ...
143
- def __index__(self) -> int: ...
144
- @property
145
- def __members__(self) -> Dict[str, ExpectedResultType]: ...
146
- @property
147
- def name(self) -> str: ...
148
- @property
149
- def value(self) -> int: ...
150
-
151
- class StatementType:
152
- INVALID: StatementType
153
- SELECT: StatementType
154
- INSERT: StatementType
155
- UPDATE: StatementType
156
- CREATE: StatementType
157
- DELETE: StatementType
158
- PREPARE: StatementType
159
- EXECUTE: StatementType
160
- ALTER: StatementType
161
- TRANSACTION: StatementType
162
- COPY: StatementType
163
- ANALYZE: StatementType
164
- VARIABLE_SET: StatementType
165
- CREATE_FUNC: StatementType
166
- EXPLAIN: StatementType
167
- DROP: StatementType
168
- EXPORT: StatementType
169
- PRAGMA: StatementType
170
- VACUUM: StatementType
171
- CALL: StatementType
172
- SET: StatementType
173
- LOAD: StatementType
174
- RELATION: StatementType
175
- EXTENSION: StatementType
176
- LOGICAL_PLAN: StatementType
177
- ATTACH: StatementType
178
- DETACH: StatementType
179
- MULTI: StatementType
180
- COPY_DATABASE: StatementType
181
- MERGE_INTO: StatementType
182
- def __int__(self) -> int: ...
183
- def __index__(self) -> int: ...
184
- @property
185
- def __members__(self) -> Dict[str, StatementType]: ...
186
- @property
187
- def name(self) -> str: ...
188
- @property
189
- def value(self) -> int: ...
190
-
191
- class Statement:
192
- def __init__(self, *args, **kwargs) -> None: ...
193
- @property
194
- def query(self) -> str: ...
195
- @property
196
- def named_parameters(self) -> Set[str]: ...
197
- @property
198
- def expected_result_type(self) -> List[ExpectedResultType]: ...
199
- @property
200
- def type(self) -> StatementType: ...
201
-
202
- class Expression:
203
- def __init__(self, *args, **kwargs) -> None: ...
204
- def __neg__(self) -> "Expression": ...
205
- def __add__(self, expr: "Expression") -> "Expression": ...
206
- def __radd__(self, expr: "Expression") -> "Expression": ...
207
- def __sub__(self, expr: "Expression") -> "Expression": ...
208
- def __rsub__(self, expr: "Expression") -> "Expression": ...
209
- def __mul__(self, expr: "Expression") -> "Expression": ...
210
- def __rmul__(self, expr: "Expression") -> "Expression": ...
211
- def __div__(self, expr: "Expression") -> "Expression": ...
212
- def __rdiv__(self, expr: "Expression") -> "Expression": ...
213
- def __truediv__(self, expr: "Expression") -> "Expression": ...
214
- def __rtruediv__(self, expr: "Expression") -> "Expression": ...
215
- def __floordiv__(self, expr: "Expression") -> "Expression": ...
216
- def __rfloordiv__(self, expr: "Expression") -> "Expression": ...
217
- def __mod__(self, expr: "Expression") -> "Expression": ...
218
- def __rmod__(self, expr: "Expression") -> "Expression": ...
219
- def __pow__(self, expr: "Expression") -> "Expression": ...
220
- def __rpow__(self, expr: "Expression") -> "Expression": ...
221
- def __and__(self, expr: "Expression") -> "Expression": ...
222
- def __rand__(self, expr: "Expression") -> "Expression": ...
223
- def __or__(self, expr: "Expression") -> "Expression": ...
224
- def __ror__(self, expr: "Expression") -> "Expression": ...
225
- def __invert__(self) -> "Expression": ...
226
- def __eq__( # type: ignore[override]
227
- self, expr: "Expression"
228
- ) -> "Expression": ...
229
- def __ne__( # type: ignore[override]
230
- self, expr: "Expression"
231
- ) -> "Expression": ...
232
- def __gt__(self, expr: "Expression") -> "Expression": ...
233
- def __ge__(self, expr: "Expression") -> "Expression": ...
234
- def __lt__(self, expr: "Expression") -> "Expression": ...
235
- def __le__(self, expr: "Expression") -> "Expression": ...
236
- def show(self) -> None: ...
237
- def __repr__(self) -> str: ...
238
- def get_name(self) -> str: ...
239
- def alias(self, alias: str) -> "Expression": ...
240
- def when(self, condition: "Expression", value: "Expression") -> "Expression": ...
241
- def otherwise(self, value: "Expression") -> "Expression": ...
242
- def cast(self, type: DuckDBPyType) -> "Expression": ...
243
- def between(self, lower: "Expression", upper: "Expression") -> "Expression": ...
244
- def collate(self, collation: str) -> "Expression": ...
245
- def asc(self) -> "Expression": ...
246
- def desc(self) -> "Expression": ...
247
- def nulls_first(self) -> "Expression": ...
248
- def nulls_last(self) -> "Expression": ...
249
- def isnull(self) -> "Expression": ...
250
- def isnotnull(self) -> "Expression": ...
251
- def isin(self, *cols: "Expression") -> "Expression": ...
252
- def isnotin(self, *cols: "Expression") -> "Expression": ...
253
-
254
- def StarExpression(exclude: Optional[List[str]] = None) -> Expression: ...
255
- def ColumnExpression(column: str) -> Expression: ...
256
- def DefaultExpression() -> Expression: ...
257
- def ConstantExpression(val: Any) -> Expression: ...
258
- def CaseExpression(condition: Expression, value: Expression) -> Expression: ...
259
- def FunctionExpression(function: str, *cols: Expression) -> Expression: ...
260
- def CoalesceOperator(*cols: Expression) -> Expression: ...
261
- def LambdaExpression(lhs: Union[Tuple["Expression", ...], str], rhs: Expression) -> Expression: ...
262
- def SQLExpression(expr: str) -> Expression: ...
263
-
264
- class DuckDBPyConnection:
265
- def __init__(self, *args, **kwargs) -> None: ...
266
- def __enter__(self) -> DuckDBPyConnection: ...
267
- def __exit__(self, exc_type: object, exc: object, traceback: object) -> None: ...
268
- def __del__(self) -> None: ...
269
- @property
270
- def description(self) -> Optional[List[Any]]: ...
271
- @property
272
- def rowcount(self) -> int: ...
273
-
274
- # NOTE: this section is generated by tools/pythonpkg/scripts/generate_connection_stubs.py.
275
- # Do not edit this section manually, your changes will be overwritten!
276
-
277
- # START OF CONNECTION METHODS
278
- def cursor(self) -> DuckDBPyConnection: ...
279
- def register_filesystem(self, filesystem: fsspec.AbstractFileSystem) -> None: ...
280
- def unregister_filesystem(self, name: str) -> None: ...
281
- def list_filesystems(self) -> list: ...
282
- def filesystem_is_registered(self, name: str) -> bool: ...
283
- def create_function(
284
- self,
285
- name: str,
286
- function: function,
287
- parameters: Optional[List[DuckDBPyType]] = None,
288
- return_type: Optional[DuckDBPyType] = None,
289
- *,
290
- type: Optional[PythonUDFType] = PythonUDFType.NATIVE,
291
- null_handling: Optional[FunctionNullHandling] = FunctionNullHandling.DEFAULT,
292
- exception_handling: Optional[PythonExceptionHandling] = PythonExceptionHandling.DEFAULT,
293
- side_effects: bool = False,
294
- ) -> DuckDBPyConnection: ...
295
- def remove_function(self, name: str) -> DuckDBPyConnection: ...
296
- def sqltype(self, type_str: str) -> DuckDBPyType: ...
297
- def dtype(self, type_str: str) -> DuckDBPyType: ...
298
- def type(self, type_str: str) -> DuckDBPyType: ...
299
- def array_type(self, type: DuckDBPyType, size: int) -> DuckDBPyType: ...
300
- def list_type(self, type: DuckDBPyType) -> DuckDBPyType: ...
301
- def union_type(self, members: DuckDBPyType) -> DuckDBPyType: ...
302
- def string_type(self, collation: str = "") -> DuckDBPyType: ...
303
- def enum_type(self, name: str, type: DuckDBPyType, values: List[Any]) -> DuckDBPyType: ...
304
- def decimal_type(self, width: int, scale: int) -> DuckDBPyType: ...
305
- def struct_type(self, fields: Union[Dict[str, DuckDBPyType], List[str]]) -> DuckDBPyType: ...
306
- def row_type(self, fields: Union[Dict[str, DuckDBPyType], List[str]]) -> DuckDBPyType: ...
307
- def map_type(self, key: DuckDBPyType, value: DuckDBPyType) -> DuckDBPyType: ...
308
- def duplicate(self) -> DuckDBPyConnection: ...
309
- def execute(self, query: object, parameters: object = None) -> DuckDBPyConnection: ...
310
- def executemany(self, query: object, parameters: object = None) -> DuckDBPyConnection: ...
311
- def close(self) -> None: ...
312
- def interrupt(self) -> None: ...
313
- def query_progress(self) -> float: ...
314
- def fetchone(self) -> Optional[tuple]: ...
315
- def fetchmany(self, size: int = 1) -> List[Any]: ...
316
- def fetchall(self) -> List[Any]: ...
317
- def fetchnumpy(self) -> dict: ...
318
- def fetchdf(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
319
- def fetch_df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
320
- def df(self, *, date_as_object: bool = False) -> pandas.DataFrame: ...
321
- def fetch_df_chunk(self, vectors_per_chunk: int = 1, *, date_as_object: bool = False) -> pandas.DataFrame: ...
322
- def pl(self, rows_per_batch: int = 1000000, *, lazy: bool = False) -> polars.DataFrame: ...
323
- def fetch_arrow_table(self, rows_per_batch: int = 1000000) -> pyarrow.lib.Table: ...
324
- def fetch_record_batch(self, rows_per_batch: int = 1000000) -> pyarrow.lib.RecordBatchReader: ...
325
- def arrow(self, rows_per_batch: int = 1000000) -> pyarrow.lib.RecordBatchReader: ...
326
- def torch(self) -> dict: ...
327
- def tf(self) -> dict: ...
328
- def begin(self) -> DuckDBPyConnection: ...
329
- def commit(self) -> DuckDBPyConnection: ...
330
- def rollback(self) -> DuckDBPyConnection: ...
331
- def checkpoint(self) -> DuckDBPyConnection: ...
332
- def append(self, table_name: str, df: pandas.DataFrame, *, by_name: bool = False) -> DuckDBPyConnection: ...
333
- def register(self, view_name: str, python_object: object) -> DuckDBPyConnection: ...
334
- def unregister(self, view_name: str) -> DuckDBPyConnection: ...
335
- def table(self, table_name: str) -> DuckDBPyRelation: ...
336
- def view(self, view_name: str) -> DuckDBPyRelation: ...
337
- def values(self, *args: Union[List[Any], Expression, Tuple[Expression]]) -> DuckDBPyRelation: ...
338
- def table_function(self, name: str, parameters: object = None) -> DuckDBPyRelation: ...
339
- def read_json(
340
- self,
341
- path_or_buffer: Union[str, StringIO, TextIOBase],
342
- *,
343
- columns: Optional[Dict[str, str]] = None,
344
- sample_size: Optional[int] = None,
345
- maximum_depth: Optional[int] = None,
346
- records: Optional[str] = None,
347
- format: Optional[str] = None,
348
- date_format: Optional[str] = None,
349
- timestamp_format: Optional[str] = None,
350
- compression: Optional[str] = None,
351
- maximum_object_size: Optional[int] = None,
352
- ignore_errors: Optional[bool] = None,
353
- convert_strings_to_integers: Optional[bool] = None,
354
- field_appearance_threshold: Optional[float] = None,
355
- map_inference_threshold: Optional[int] = None,
356
- maximum_sample_files: Optional[int] = None,
357
- filename: Optional[Union[bool, str]] = None,
358
- hive_partitioning: Optional[bool] = None,
359
- union_by_name: Optional[bool] = None,
360
- hive_types: Optional[Dict[str, str]] = None,
361
- hive_types_autocast: Optional[bool] = None,
362
- ) -> DuckDBPyRelation: ...
363
- def extract_statements(self, query: str) -> List[Statement]: ...
364
- def sql(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
365
- def query(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
366
- def from_query(self, query: str, *, alias: str = "", params: object = None) -> DuckDBPyRelation: ...
367
- def read_csv(
368
- self,
369
- path_or_buffer: Union[str, StringIO, TextIOBase],
370
- *,
371
- header: Optional[Union[bool, int]] = None,
372
- compression: Optional[str] = None,
373
- sep: Optional[str] = None,
374
- delimiter: Optional[str] = None,
375
- dtype: Optional[Union[Dict[str, str], List[str]]] = None,
376
- na_values: Optional[Union[str, List[str]]] = None,
377
- skiprows: Optional[int] = None,
378
- quotechar: Optional[str] = None,
379
- escapechar: Optional[str] = None,
380
- encoding: Optional[str] = None,
381
- parallel: Optional[bool] = None,
382
- date_format: Optional[str] = None,
383
- timestamp_format: Optional[str] = None,
384
- sample_size: Optional[int] = None,
385
- all_varchar: Optional[bool] = None,
386
- normalize_names: Optional[bool] = None,
387
- null_padding: Optional[bool] = None,
388
- names: Optional[List[str]] = None,
389
- lineterminator: Optional[str] = None,
390
- columns: Optional[Dict[str, str]] = None,
391
- auto_type_candidates: Optional[List[str]] = None,
392
- max_line_size: Optional[int] = None,
393
- ignore_errors: Optional[bool] = None,
394
- store_rejects: Optional[bool] = None,
395
- rejects_table: Optional[str] = None,
396
- rejects_scan: Optional[str] = None,
397
- rejects_limit: Optional[int] = None,
398
- force_not_null: Optional[List[str]] = None,
399
- buffer_size: Optional[int] = None,
400
- decimal: Optional[str] = None,
401
- allow_quoted_nulls: Optional[bool] = None,
402
- filename: Optional[Union[bool, str]] = None,
403
- hive_partitioning: Optional[bool] = None,
404
- union_by_name: Optional[bool] = None,
405
- hive_types: Optional[Dict[str, str]] = None,
406
- hive_types_autocast: Optional[bool] = None,
407
- ) -> DuckDBPyRelation: ...
408
- def from_csv_auto(
409
- self,
410
- path_or_buffer: Union[str, StringIO, TextIOBase],
411
- *,
412
- header: Optional[Union[bool, int]] = None,
413
- compression: Optional[str] = None,
414
- sep: Optional[str] = None,
415
- delimiter: Optional[str] = None,
416
- dtype: Optional[Union[Dict[str, str], List[str]]] = None,
417
- na_values: Optional[Union[str, List[str]]] = None,
418
- skiprows: Optional[int] = None,
419
- quotechar: Optional[str] = None,
420
- escapechar: Optional[str] = None,
421
- encoding: Optional[str] = None,
422
- parallel: Optional[bool] = None,
423
- date_format: Optional[str] = None,
424
- timestamp_format: Optional[str] = None,
425
- sample_size: Optional[int] = None,
426
- all_varchar: Optional[bool] = None,
427
- normalize_names: Optional[bool] = None,
428
- null_padding: Optional[bool] = None,
429
- names: Optional[List[str]] = None,
430
- lineterminator: Optional[str] = None,
431
- columns: Optional[Dict[str, str]] = None,
432
- auto_type_candidates: Optional[List[str]] = None,
433
- max_line_size: Optional[int] = None,
434
- ignore_errors: Optional[bool] = None,
435
- store_rejects: Optional[bool] = None,
436
- rejects_table: Optional[str] = None,
437
- rejects_scan: Optional[str] = None,
438
- rejects_limit: Optional[int] = None,
439
- force_not_null: Optional[List[str]] = None,
440
- buffer_size: Optional[int] = None,
441
- decimal: Optional[str] = None,
442
- allow_quoted_nulls: Optional[bool] = None,
443
- filename: Optional[Union[bool, str]] = None,
444
- hive_partitioning: Optional[bool] = None,
445
- union_by_name: Optional[bool] = None,
446
- hive_types: Optional[Dict[str, str]] = None,
447
- hive_types_autocast: Optional[bool] = None,
448
- ) -> DuckDBPyRelation: ...
449
- def from_df(self, df: pandas.DataFrame) -> DuckDBPyRelation: ...
450
- def from_arrow(self, arrow_object: object) -> DuckDBPyRelation: ...
451
- def from_parquet(
452
- self,
453
- file_glob: str,
454
- binary_as_string: bool = False,
455
- *,
456
- file_row_number: bool = False,
457
- filename: bool = False,
458
- hive_partitioning: bool = False,
459
- union_by_name: bool = False,
460
- compression: Optional[str] = None,
461
- ) -> DuckDBPyRelation: ...
462
- def read_parquet(
463
- self,
464
- file_glob: str,
465
- binary_as_string: bool = False,
466
- *,
467
- file_row_number: bool = False,
468
- filename: bool = False,
469
- hive_partitioning: bool = False,
470
- union_by_name: bool = False,
471
- compression: Optional[str] = None,
472
- ) -> DuckDBPyRelation: ...
473
- def get_table_names(self, query: str, *, qualified: bool = False) -> Set[str]: ...
474
- def install_extension(
475
- self,
476
- extension: str,
477
- *,
478
- force_install: bool = False,
479
- repository: Optional[str] = None,
480
- repository_url: Optional[str] = None,
481
- version: Optional[str] = None,
482
- ) -> None: ...
483
- def load_extension(self, extension: str) -> None: ...
484
- # END OF CONNECTION METHODS
485
-
486
- class DuckDBPyRelation:
487
- def close(self) -> None: ...
488
- def __getattr__(self, name: str) -> DuckDBPyRelation: ...
489
- def __getitem__(self, name: str) -> DuckDBPyRelation: ...
490
- def __init__(self, *args, **kwargs) -> None: ...
491
- def __contains__(self, name: str) -> bool: ...
492
- def aggregate(self, aggr_expr: str, group_expr: str = ...) -> DuckDBPyRelation: ...
493
- def apply(
494
- self,
495
- function_name: str,
496
- function_aggr: str,
497
- group_expr: str = ...,
498
- function_parameter: str = ...,
499
- projected_columns: str = ...,
500
- ) -> DuckDBPyRelation: ...
501
- def cume_dist(self, window_spec: str, projected_columns: str = ...) -> DuckDBPyRelation: ...
502
- def dense_rank(self, window_spec: str, projected_columns: str = ...) -> DuckDBPyRelation: ...
503
- def percent_rank(self, window_spec: str, projected_columns: str = ...) -> DuckDBPyRelation: ...
504
- def rank(self, window_spec: str, projected_columns: str = ...) -> DuckDBPyRelation: ...
505
- def rank_dense(self, window_spec: str, projected_columns: str = ...) -> DuckDBPyRelation: ...
506
- def row_number(self, window_spec: str, projected_columns: str = ...) -> DuckDBPyRelation: ...
507
- def lag(
508
- self,
509
- column: str,
510
- window_spec: str,
511
- offset: int,
512
- default_value: str,
513
- ignore_nulls: bool,
514
- projected_columns: str = ...,
515
- ) -> DuckDBPyRelation: ...
516
- def lead(
517
- self,
518
- column: str,
519
- window_spec: str,
520
- offset: int,
521
- default_value: str,
522
- ignore_nulls: bool,
523
- projected_columns: str = ...,
524
- ) -> DuckDBPyRelation: ...
525
- def nth_value(
526
- self, column: str, window_spec: str, offset: int, ignore_nulls: bool = ..., projected_columns: str = ...
527
- ) -> DuckDBPyRelation: ...
528
- def value_counts(self, column: str, groups: str = ...) -> DuckDBPyRelation: ...
529
- def geomean(self, column: str, groups: str = ..., projected_columns: str = ...) -> DuckDBPyRelation: ...
530
- def first(self, column: str, groups: str = ..., projected_columns: str = ...) -> DuckDBPyRelation: ...
531
- def first_value(self, column: str, window_spec: str = ..., projected_columns: str = ...) -> DuckDBPyRelation: ...
532
- def last(self, column: str, groups: str = ..., projected_columns: str = ...) -> DuckDBPyRelation: ...
533
- def last_value(self, column: str, window_spec: str = ..., projected_columns: str = ...) -> DuckDBPyRelation: ...
534
- def mode(self, aggregation_columns: str, group_columns: str = ...) -> DuckDBPyRelation: ...
535
- def n_tile(self, window_spec: str, num_buckets: int, projected_columns: str = ...) -> DuckDBPyRelation: ...
536
- def quantile_cont(
537
- self, column: str, q: Union[float, List[float]] = ..., groups: str = ..., projected_columns: str = ...
538
- ) -> DuckDBPyRelation: ...
539
- def quantile_disc(
540
- self, column: str, q: Union[float, List[float]] = ..., groups: str = ..., projected_columns: str = ...
541
- ) -> DuckDBPyRelation: ...
542
- def sum(self, sum_aggr: str, group_expr: str = ...) -> DuckDBPyRelation: ...
543
- def any_value(
544
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
545
- ) -> DuckDBPyRelation: ...
546
- def arg_max(
547
- self,
548
- arg_column: str,
549
- value_column: str,
550
- groups: str = ...,
551
- window_spec: str = ...,
552
- projected_columns: str = ...,
553
- ) -> DuckDBPyRelation: ...
554
- def arg_min(
555
- self,
556
- arg_column: str,
557
- value_column: str,
558
- groups: str = ...,
559
- window_spec: str = ...,
560
- projected_columns: str = ...,
561
- ) -> DuckDBPyRelation: ...
562
- def avg(
563
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
564
- ) -> DuckDBPyRelation: ...
565
- def bit_and(
566
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
567
- ) -> DuckDBPyRelation: ...
568
- def bit_or(
569
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
570
- ) -> DuckDBPyRelation: ...
571
- def bit_xor(
572
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
573
- ) -> DuckDBPyRelation: ...
574
- def bitstring_agg(
575
- self,
576
- column: str,
577
- min: Optional[int],
578
- max: Optional[int],
579
- groups: str = ...,
580
- window_spec: str = ...,
581
- projected_columns: str = ...,
582
- ) -> DuckDBPyRelation: ...
583
- def bool_and(
584
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
585
- ) -> DuckDBPyRelation: ...
586
- def bool_or(
587
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
588
- ) -> DuckDBPyRelation: ...
589
- def count(
590
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
591
- ) -> DuckDBPyRelation: ...
592
- def favg(
593
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
594
- ) -> DuckDBPyRelation: ...
595
- def fsum(
596
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
597
- ) -> DuckDBPyRelation: ...
598
- def histogram(
599
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
600
- ) -> DuckDBPyRelation: ...
601
- def max(
602
- self, max_aggr: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
603
- ) -> DuckDBPyRelation: ...
604
- def min(
605
- self, min_aggr: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
606
- ) -> DuckDBPyRelation: ...
607
- def mean(
608
- self, mean_aggr: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
609
- ) -> DuckDBPyRelation: ...
610
- def median(
611
- self, median_aggr: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
612
- ) -> DuckDBPyRelation: ...
613
- def product(
614
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
615
- ) -> DuckDBPyRelation: ...
616
- def quantile(
617
- self, q: str, quantile_aggr: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
618
- ) -> DuckDBPyRelation: ...
619
- def std(
620
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
621
- ) -> DuckDBPyRelation: ...
622
- def stddev(
623
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
624
- ) -> DuckDBPyRelation: ...
625
- def stddev_pop(
626
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
627
- ) -> DuckDBPyRelation: ...
628
- def stddev_samp(
629
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
630
- ) -> DuckDBPyRelation: ...
631
- def string_agg(
632
- self, column: str, sep: str = ..., groups: str = ..., window_spec: str = ..., projected_columns: str = ...
633
- ) -> DuckDBPyRelation: ...
634
- def var(
635
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
636
- ) -> DuckDBPyRelation: ...
637
- def var_pop(
638
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
639
- ) -> DuckDBPyRelation: ...
640
- def var_samp(
641
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
642
- ) -> DuckDBPyRelation: ...
643
- def variance(
644
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
645
- ) -> DuckDBPyRelation: ...
646
- def list(
647
- self, column: str, groups: str = ..., window_spec: str = ..., projected_columns: str = ...
648
- ) -> DuckDBPyRelation: ...
649
- def arrow(self, batch_size: int = ...) -> pyarrow.lib.RecordBatchReader: ...
650
- def __arrow_c_stream__(self, requested_schema: Optional[object] = None) -> object: ...
651
- def create(self, table_name: str) -> None: ...
652
- def create_view(self, view_name: str, replace: bool = ...) -> DuckDBPyRelation: ...
653
- def describe(self) -> DuckDBPyRelation: ...
654
- def df(self, *args, **kwargs) -> pandas.DataFrame: ...
655
- def distinct(self) -> DuckDBPyRelation: ...
656
- def except_(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
657
- def execute(self, *args, **kwargs) -> DuckDBPyRelation: ...
658
- def explain(self, type: Optional[Union[Literal["standard", "analyze"], int]] = "standard") -> str: ...
659
- def fetchall(self) -> List[Any]: ...
660
- def fetchmany(self, size: int = ...) -> List[Any]: ...
661
- def fetchnumpy(self) -> dict: ...
662
- def fetchone(self) -> Optional[tuple]: ...
663
- def fetchdf(self, *args, **kwargs) -> Any: ...
664
- def fetch_arrow_reader(self, batch_size: int = ...) -> pyarrow.lib.RecordBatchReader: ...
665
- def fetch_arrow_table(self, rows_per_batch: int = ...) -> pyarrow.lib.Table: ...
666
- def filter(self, filter_expr: Union[Expression, str]) -> DuckDBPyRelation: ...
667
- def insert(self, values: List[Any]) -> None: ...
668
- def update(self, set: Dict[str, Expression], condition: Optional[Expression] = None) -> None: ...
669
- def insert_into(self, table_name: str) -> None: ...
670
- def intersect(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
671
- def join(
672
- self, other_rel: DuckDBPyRelation, condition: Union[str, Expression], how: str = ...
673
- ) -> DuckDBPyRelation: ...
674
- def cross(self, other_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
675
- def limit(self, n: int, offset: int = ...) -> DuckDBPyRelation: ...
676
- def map(self, map_function: function, schema: Optional[Dict[str, DuckDBPyType]] = None) -> DuckDBPyRelation: ...
677
- def order(self, order_expr: str) -> DuckDBPyRelation: ...
678
- def sort(self, *cols: Expression) -> DuckDBPyRelation: ...
679
- def project(self, *cols: Union[str, Expression]) -> DuckDBPyRelation: ...
680
- def select(self, *cols: Union[str, Expression]) -> DuckDBPyRelation: ...
681
- def pl(self, rows_per_batch: int = ..., connection: DuckDBPyConnection = ...) -> polars.DataFrame: ...
682
- def query(self, virtual_table_name: str, sql_query: str) -> DuckDBPyRelation: ...
683
- def record_batch(self, batch_size: int = ...) -> pyarrow.lib.RecordBatchReader: ...
684
- def fetch_record_batch(
685
- self, rows_per_batch: int = 1000000, *, connection: DuckDBPyConnection = ...
686
- ) -> pyarrow.lib.RecordBatchReader: ...
687
- def select_types(self, types: List[Union[str, DuckDBPyType]]) -> DuckDBPyRelation: ...
688
- def select_dtypes(self, types: List[Union[str, DuckDBPyType]]) -> DuckDBPyRelation: ...
689
- def set_alias(self, alias: str) -> DuckDBPyRelation: ...
690
- def show(
691
- self,
692
- max_width: Optional[int] = None,
693
- max_rows: Optional[int] = None,
694
- max_col_width: Optional[int] = None,
695
- null_value: Optional[str] = None,
696
- render_mode: Optional[RenderMode] = None,
697
- ) -> None: ...
698
- def sql_query(self) -> str: ...
699
- def to_arrow_table(self, batch_size: int = ...) -> pyarrow.lib.Table: ...
700
- def to_csv(
701
- self,
702
- file_name: str,
703
- sep: Optional[str] = None,
704
- na_rep: Optional[str] = None,
705
- header: Optional[bool] = None,
706
- quotechar: Optional[str] = None,
707
- escapechar: Optional[str] = None,
708
- date_format: Optional[str] = None,
709
- timestamp_format: Optional[str] = None,
710
- quoting: Optional[Union[str, int]] = None,
711
- encoding: Optional[str] = None,
712
- compression: Optional[str] = None,
713
- write_partition_columns: Optional[bool] = None,
714
- overwrite: Optional[bool] = None,
715
- per_thread_output: Optional[bool] = None,
716
- use_tmp_file: Optional[bool] = None,
717
- partition_by: Optional[List[str]] = None,
718
- ) -> None: ...
719
- def to_df(self, *args, **kwargs) -> pandas.DataFrame: ...
720
- def to_parquet(
721
- self,
722
- file_name: str,
723
- compression: Optional[str] = None,
724
- field_ids: Optional[Union[dict, str]] = None,
725
- row_group_size_bytes: Optional[Union[int, str]] = None,
726
- row_group_size: Optional[int] = None,
727
- partition_by: Optional[List[str]] = None,
728
- write_partition_columns: Optional[bool] = None,
729
- overwrite: Optional[bool] = None,
730
- per_thread_output: Optional[bool] = None,
731
- use_tmp_file: Optional[bool] = None,
732
- append: Optional[bool] = None,
733
- ) -> None: ...
734
- def fetch_df_chunk(self, vectors_per_chunk: int = 1, *, date_as_object: bool = False) -> pandas.DataFrame: ...
735
- def to_table(self, table_name: str) -> None: ...
736
- def to_view(self, view_name: str, replace: bool = ...) -> DuckDBPyRelation: ...
737
- def torch(self, connection: DuckDBPyConnection = ...) -> dict: ...
738
- def tf(self, connection: DuckDBPyConnection = ...) -> dict: ...
739
- def union(self, union_rel: DuckDBPyRelation) -> DuckDBPyRelation: ...
740
- def unique(self, unique_aggr: str) -> DuckDBPyRelation: ...
741
- def write_csv(
742
- self,
743
- file_name: str,
744
- sep: Optional[str] = None,
745
- na_rep: Optional[str] = None,
746
- header: Optional[bool] = None,
747
- quotechar: Optional[str] = None,
748
- escapechar: Optional[str] = None,
749
- date_format: Optional[str] = None,
750
- timestamp_format: Optional[str] = None,
751
- quoting: Optional[Union[str, int]] = None,
752
- encoding: Optional[str] = None,
753
- compression: Optional[str] = None,
754
- write_partition_columns: Optional[bool] = None,
755
- overwrite: Optional[bool] = None,
756
- per_thread_output: Optional[bool] = None,
757
- use_tmp_file: Optional[bool] = None,
758
- partition_by: Optional[List[str]] = None,
759
- ) -> None: ...
760
- def write_parquet(
761
- self,
762
- file_name: str,
763
- compression: Optional[str] = None,
764
- field_ids: Optional[Union[dict, str]] = None,
765
- row_group_size_bytes: Optional[Union[int, str]] = None,
766
- row_group_size: Optional[int] = None,
767
- partition_by: Optional[List[str]] = None,
768
- write_partition_columns: Optional[bool] = None,
769
- overwrite: Optional[bool] = None,
770
- per_thread_output: Optional[bool] = None,
771
- use_tmp_file: Optional[bool] = None,
772
- append: Optional[bool] = None,
773
- ) -> None: ...
774
- def __len__(self) -> int: ...
775
- @property
776
- def alias(self) -> str: ...
777
- @property
778
- def columns(self) -> List[str]: ...
779
- @property
780
- def dtypes(self) -> List[DuckDBPyType]: ...
781
- @property
782
- def description(self) -> List[Any]: ...
783
- @property
784
- def shape(self) -> tuple[int, int]: ...
785
- @property
786
- def type(self) -> str: ...
787
- @property
788
- def types(self) -> List[DuckDBPyType]: ...
789
-
790
- class Error(Exception): ...
791
- class FatalException(Error): ...
792
-
793
- class HTTPException(IOException):
794
- status_code: int
795
- body: str
796
- reason: str
797
- headers: Dict[str, str]
798
-
799
- class IOException(OperationalError): ...
800
- class IntegrityError(Error): ...
801
- class InternalError(Error): ...
802
- class InternalException(InternalError): ...
803
- class InterruptException(Error): ...
804
- class InvalidInputException(ProgrammingError): ...
805
- class InvalidTypeException(ProgrammingError): ...
806
- class NotImplementedException(NotSupportedError): ...
807
- class NotSupportedError(Error): ...
808
- class OperationalError(Error): ...
809
- class OutOfMemoryException(OperationalError): ...
810
- class OutOfRangeException(DataError): ...
811
- class ParserException(ProgrammingError): ...
812
- class PermissionException(Error): ...
813
- class ProgrammingError(Error): ...
814
- class SequenceException(Error): ...
815
- class SerializationException(OperationalError): ...
816
- class SyntaxException(ProgrammingError): ...
817
- class TransactionException(OperationalError): ...
818
- class TypeMismatchException(DataError): ...
819
- class Warning(Exception): ...
820
-
821
- class token_type:
822
- # stubgen override - these make mypy sad
823
- # __doc__: ClassVar[str] = ... # read-only
824
- # __members__: ClassVar[dict] = ... # read-only
825
- __entries: ClassVar[dict] = ...
826
- comment: ClassVar[token_type] = ...
827
- identifier: ClassVar[token_type] = ...
828
- keyword: ClassVar[token_type] = ...
829
- numeric_const: ClassVar[token_type] = ...
830
- operator: ClassVar[token_type] = ...
831
- string_const: ClassVar[token_type] = ...
832
- def __init__(self, value: int) -> None: ...
833
- def __eq__(self, other: object) -> bool: ...
834
- def __getstate__(self) -> int: ...
835
- def __hash__(self) -> int: ...
836
- # stubgen override - pybind only puts index in python >= 3.8: https://github.com/EricCousineau-TRI/pybind11/blob/54430436/include/pybind11/pybind11.h#L1789
837
- if sys.version_info >= (3, 7):
838
- def __index__(self) -> int: ...
839
- def __int__(self) -> int: ...
840
- def __ne__(self, other: object) -> bool: ...
841
- def __setstate__(self, state: int) -> None: ...
842
- @property
843
- def name(self) -> str: ...
844
- @property
845
- def value(self) -> int: ...
846
- @property
847
- # stubgen override - this gets removed by stubgen but it shouldn't
848
- def __members__(self) -> object: ...
849
-
850
- def connect(database: Union[str, Path] = ..., read_only: bool = ..., config: dict = ...) -> DuckDBPyConnection: ...
851
- def default_connection() -> DuckDBPyConnection: ...
852
- def set_default_connection(connection: DuckDBPyConnection) -> None: ...
853
- def tokenize(query: str) -> List[Any]: ...
854
-
855
- # NOTE: this section is generated by tools/pythonpkg/scripts/generate_connection_wrapper_stubs.py.
856
- # Do not edit this section manually, your changes will be overwritten!
857
-
858
- # START OF CONNECTION WRAPPER
859
- def cursor(*, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
860
- def register_filesystem(filesystem: fsspec.AbstractFileSystem, *, connection: DuckDBPyConnection = ...) -> None: ...
861
- def unregister_filesystem(name: str, *, connection: DuckDBPyConnection = ...) -> None: ...
862
- def list_filesystems(*, connection: DuckDBPyConnection = ...) -> list: ...
863
- def filesystem_is_registered(name: str, *, connection: DuckDBPyConnection = ...) -> bool: ...
864
- def create_function(
865
- name: str,
866
- function: function,
867
- parameters: Optional[List[DuckDBPyType]] = None,
868
- return_type: Optional[DuckDBPyType] = None,
869
- *,
870
- type: Optional[PythonUDFType] = PythonUDFType.NATIVE,
871
- null_handling: Optional[FunctionNullHandling] = FunctionNullHandling.DEFAULT,
872
- exception_handling: Optional[PythonExceptionHandling] = PythonExceptionHandling.DEFAULT,
873
- side_effects: bool = False,
874
- connection: DuckDBPyConnection = ...,
875
- ) -> DuckDBPyConnection: ...
876
- def remove_function(name: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
877
- def sqltype(type_str: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
878
- def dtype(type_str: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
879
- def type(type_str: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
880
- def array_type(type: DuckDBPyType, size: int, *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
881
- def list_type(type: DuckDBPyType, *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
882
- def union_type(members: DuckDBPyType, *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
883
- def string_type(collation: str = "", *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
884
- def enum_type(
885
- name: str, type: DuckDBPyType, values: List[Any], *, connection: DuckDBPyConnection = ...
886
- ) -> DuckDBPyType: ...
887
- def decimal_type(width: int, scale: int, *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
888
- def struct_type(
889
- fields: Union[Dict[str, DuckDBPyType], List[str]], *, connection: DuckDBPyConnection = ...
890
- ) -> DuckDBPyType: ...
891
- def row_type(
892
- fields: Union[Dict[str, DuckDBPyType], List[str]], *, connection: DuckDBPyConnection = ...
893
- ) -> DuckDBPyType: ...
894
- def map_type(key: DuckDBPyType, value: DuckDBPyType, *, connection: DuckDBPyConnection = ...) -> DuckDBPyType: ...
895
- def duplicate(*, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
896
- def execute(
897
- query: object, parameters: object = None, *, connection: DuckDBPyConnection = ...
898
- ) -> DuckDBPyConnection: ...
899
- def executemany(
900
- query: object, parameters: object = None, *, connection: DuckDBPyConnection = ...
901
- ) -> DuckDBPyConnection: ...
902
- def close(*, connection: DuckDBPyConnection = ...) -> None: ...
903
- def interrupt(*, connection: DuckDBPyConnection = ...) -> None: ...
904
- def query_progress(*, connection: DuckDBPyConnection = ...) -> float: ...
905
- def fetchone(*, connection: DuckDBPyConnection = ...) -> Optional[tuple]: ...
906
- def fetchmany(size: int = 1, *, connection: DuckDBPyConnection = ...) -> List[Any]: ...
907
- def fetchall(*, connection: DuckDBPyConnection = ...) -> List[Any]: ...
908
- def fetchnumpy(*, connection: DuckDBPyConnection = ...) -> dict: ...
909
- def fetchdf(*, date_as_object: bool = False, connection: DuckDBPyConnection = ...) -> pandas.DataFrame: ...
910
- def fetch_df(*, date_as_object: bool = False, connection: DuckDBPyConnection = ...) -> pandas.DataFrame: ...
911
- def df(*, date_as_object: bool = False, connection: DuckDBPyConnection = ...) -> pandas.DataFrame: ...
912
- def fetch_df_chunk(
913
- vectors_per_chunk: int = 1, *, date_as_object: bool = False, connection: DuckDBPyConnection = ...
914
- ) -> pandas.DataFrame: ...
915
- def pl(
916
- rows_per_batch: int = 1000000, *, lazy: bool = False, connection: DuckDBPyConnection = ...
917
- ) -> polars.DataFrame: ...
918
- def fetch_arrow_table(rows_per_batch: int = 1000000, *, connection: DuckDBPyConnection = ...) -> pyarrow.lib.Table: ...
919
- def fetch_record_batch(
920
- rows_per_batch: int = 1000000, *, connection: DuckDBPyConnection = ...
921
- ) -> pyarrow.lib.RecordBatchReader: ...
922
- def arrow(rows_per_batch: int = 1000000, *, connection: DuckDBPyConnection = ...) -> pyarrow.lib.RecordBatchReader: ...
923
- def torch(*, connection: DuckDBPyConnection = ...) -> dict: ...
924
- def tf(*, connection: DuckDBPyConnection = ...) -> dict: ...
925
- def begin(*, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
926
- def commit(*, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
927
- def rollback(*, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
928
- def checkpoint(*, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
929
- def append(
930
- table_name: str, df: pandas.DataFrame, *, by_name: bool = False, connection: DuckDBPyConnection = ...
931
- ) -> DuckDBPyConnection: ...
932
- def register(view_name: str, python_object: object, *, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
933
- def unregister(view_name: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyConnection: ...
934
- def table(table_name: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyRelation: ...
935
- def view(view_name: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyRelation: ...
936
- def values(
937
- *args: Union[List[Any], Expression, Tuple[Expression]], connection: DuckDBPyConnection = ...
938
- ) -> DuckDBPyRelation: ...
939
- def table_function(
940
- name: str, parameters: object = None, *, connection: DuckDBPyConnection = ...
941
- ) -> DuckDBPyRelation: ...
942
- def read_json(
943
- path_or_buffer: Union[str, StringIO, TextIOBase],
944
- *,
945
- columns: Optional[Dict[str, str]] = None,
946
- sample_size: Optional[int] = None,
947
- maximum_depth: Optional[int] = None,
948
- records: Optional[str] = None,
949
- format: Optional[str] = None,
950
- date_format: Optional[str] = None,
951
- timestamp_format: Optional[str] = None,
952
- compression: Optional[str] = None,
953
- maximum_object_size: Optional[int] = None,
954
- ignore_errors: Optional[bool] = None,
955
- convert_strings_to_integers: Optional[bool] = None,
956
- field_appearance_threshold: Optional[float] = None,
957
- map_inference_threshold: Optional[int] = None,
958
- maximum_sample_files: Optional[int] = None,
959
- filename: Optional[Union[bool, str]] = None,
960
- hive_partitioning: Optional[bool] = None,
961
- union_by_name: Optional[bool] = None,
962
- hive_types: Optional[Dict[str, str]] = None,
963
- hive_types_autocast: Optional[bool] = None,
964
- connection: DuckDBPyConnection = ...,
965
- ) -> DuckDBPyRelation: ...
966
- def extract_statements(query: str, *, connection: DuckDBPyConnection = ...) -> List[Statement]: ...
967
- def sql(
968
- query: str, *, alias: str = "", params: object = None, connection: DuckDBPyConnection = ...
969
- ) -> DuckDBPyRelation: ...
970
- def query(
971
- query: str, *, alias: str = "", params: object = None, connection: DuckDBPyConnection = ...
972
- ) -> DuckDBPyRelation: ...
973
- def from_query(
974
- query: str, *, alias: str = "", params: object = None, connection: DuckDBPyConnection = ...
975
- ) -> DuckDBPyRelation: ...
976
- def read_csv(
977
- path_or_buffer: Union[str, StringIO, TextIOBase],
978
- *,
979
- header: Optional[Union[bool, int]] = None,
980
- compression: Optional[str] = None,
981
- sep: Optional[str] = None,
982
- delimiter: Optional[str] = None,
983
- dtype: Optional[Union[Dict[str, str], List[str]]] = None,
984
- na_values: Optional[Union[str, List[str]]] = None,
985
- skiprows: Optional[int] = None,
986
- quotechar: Optional[str] = None,
987
- escapechar: Optional[str] = None,
988
- encoding: Optional[str] = None,
989
- parallel: Optional[bool] = None,
990
- date_format: Optional[str] = None,
991
- timestamp_format: Optional[str] = None,
992
- sample_size: Optional[int] = None,
993
- all_varchar: Optional[bool] = None,
994
- normalize_names: Optional[bool] = None,
995
- null_padding: Optional[bool] = None,
996
- names: Optional[List[str]] = None,
997
- lineterminator: Optional[str] = None,
998
- columns: Optional[Dict[str, str]] = None,
999
- auto_type_candidates: Optional[List[str]] = None,
1000
- max_line_size: Optional[int] = None,
1001
- ignore_errors: Optional[bool] = None,
1002
- store_rejects: Optional[bool] = None,
1003
- rejects_table: Optional[str] = None,
1004
- rejects_scan: Optional[str] = None,
1005
- rejects_limit: Optional[int] = None,
1006
- force_not_null: Optional[List[str]] = None,
1007
- buffer_size: Optional[int] = None,
1008
- decimal: Optional[str] = None,
1009
- allow_quoted_nulls: Optional[bool] = None,
1010
- filename: Optional[Union[bool, str]] = None,
1011
- hive_partitioning: Optional[bool] = None,
1012
- union_by_name: Optional[bool] = None,
1013
- hive_types: Optional[Dict[str, str]] = None,
1014
- hive_types_autocast: Optional[bool] = None,
1015
- connection: DuckDBPyConnection = ...,
1016
- ) -> DuckDBPyRelation: ...
1017
- def from_csv_auto(
1018
- path_or_buffer: Union[str, StringIO, TextIOBase],
1019
- *,
1020
- header: Optional[Union[bool, int]] = None,
1021
- compression: Optional[str] = None,
1022
- sep: Optional[str] = None,
1023
- delimiter: Optional[str] = None,
1024
- dtype: Optional[Union[Dict[str, str], List[str]]] = None,
1025
- na_values: Optional[Union[str, List[str]]] = None,
1026
- skiprows: Optional[int] = None,
1027
- quotechar: Optional[str] = None,
1028
- escapechar: Optional[str] = None,
1029
- encoding: Optional[str] = None,
1030
- parallel: Optional[bool] = None,
1031
- date_format: Optional[str] = None,
1032
- timestamp_format: Optional[str] = None,
1033
- sample_size: Optional[int] = None,
1034
- all_varchar: Optional[bool] = None,
1035
- normalize_names: Optional[bool] = None,
1036
- null_padding: Optional[bool] = None,
1037
- names: Optional[List[str]] = None,
1038
- lineterminator: Optional[str] = None,
1039
- columns: Optional[Dict[str, str]] = None,
1040
- auto_type_candidates: Optional[List[str]] = None,
1041
- max_line_size: Optional[int] = None,
1042
- ignore_errors: Optional[bool] = None,
1043
- store_rejects: Optional[bool] = None,
1044
- rejects_table: Optional[str] = None,
1045
- rejects_scan: Optional[str] = None,
1046
- rejects_limit: Optional[int] = None,
1047
- force_not_null: Optional[List[str]] = None,
1048
- buffer_size: Optional[int] = None,
1049
- decimal: Optional[str] = None,
1050
- allow_quoted_nulls: Optional[bool] = None,
1051
- filename: Optional[Union[bool, str]] = None,
1052
- hive_partitioning: Optional[bool] = None,
1053
- union_by_name: Optional[bool] = None,
1054
- hive_types: Optional[Dict[str, str]] = None,
1055
- hive_types_autocast: Optional[bool] = None,
1056
- connection: DuckDBPyConnection = ...,
1057
- ) -> DuckDBPyRelation: ...
1058
- def from_df(df: pandas.DataFrame, *, connection: DuckDBPyConnection = ...) -> DuckDBPyRelation: ...
1059
- def from_arrow(arrow_object: object, *, connection: DuckDBPyConnection = ...) -> DuckDBPyRelation: ...
1060
- def from_parquet(
1061
- file_glob: str,
1062
- binary_as_string: bool = False,
1063
- *,
1064
- file_row_number: bool = False,
1065
- filename: bool = False,
1066
- hive_partitioning: bool = False,
1067
- union_by_name: bool = False,
1068
- compression: Optional[str] = None,
1069
- connection: DuckDBPyConnection = ...,
1070
- ) -> DuckDBPyRelation: ...
1071
- def read_parquet(
1072
- file_glob: str,
1073
- binary_as_string: bool = False,
1074
- *,
1075
- file_row_number: bool = False,
1076
- filename: bool = False,
1077
- hive_partitioning: bool = False,
1078
- union_by_name: bool = False,
1079
- compression: Optional[str] = None,
1080
- connection: DuckDBPyConnection = ...,
1081
- ) -> DuckDBPyRelation: ...
1082
- def get_table_names(query: str, *, qualified: bool = False, connection: DuckDBPyConnection = ...) -> Set[str]: ...
1083
- def install_extension(
1084
- extension: str,
1085
- *,
1086
- force_install: bool = False,
1087
- repository: Optional[str] = None,
1088
- repository_url: Optional[str] = None,
1089
- version: Optional[str] = None,
1090
- connection: DuckDBPyConnection = ...,
1091
- ) -> None: ...
1092
- def load_extension(extension: str, *, connection: DuckDBPyConnection = ...) -> None: ...
1093
- def project(
1094
- df: pandas.DataFrame, *args: str, groups: str = "", connection: DuckDBPyConnection = ...
1095
- ) -> DuckDBPyRelation: ...
1096
- def distinct(df: pandas.DataFrame, *, connection: DuckDBPyConnection = ...) -> DuckDBPyRelation: ...
1097
- def write_csv(
1098
- df: pandas.DataFrame,
1099
- filename: str,
1100
- *,
1101
- sep: Optional[str] = None,
1102
- na_rep: Optional[str] = None,
1103
- header: Optional[bool] = None,
1104
- quotechar: Optional[str] = None,
1105
- escapechar: Optional[str] = None,
1106
- date_format: Optional[str] = None,
1107
- timestamp_format: Optional[str] = None,
1108
- quoting: Optional[Union[str, int]] = None,
1109
- encoding: Optional[str] = None,
1110
- compression: Optional[str] = None,
1111
- overwrite: Optional[bool] = None,
1112
- per_thread_output: Optional[bool] = None,
1113
- use_tmp_file: Optional[bool] = None,
1114
- partition_by: Optional[List[str]] = None,
1115
- write_partition_columns: Optional[bool] = None,
1116
- connection: DuckDBPyConnection = ...,
1117
- ) -> None: ...
1118
- def aggregate(
1119
- df: pandas.DataFrame,
1120
- aggr_expr: Union[str, List[Expression]],
1121
- group_expr: str = "",
1122
- *,
1123
- connection: DuckDBPyConnection = ...,
1124
- ) -> DuckDBPyRelation: ...
1125
- def alias(df: pandas.DataFrame, alias: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyRelation: ...
1126
- def filter(df: pandas.DataFrame, filter_expr: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyRelation: ...
1127
- def limit(
1128
- df: pandas.DataFrame, n: int, offset: int = 0, *, connection: DuckDBPyConnection = ...
1129
- ) -> DuckDBPyRelation: ...
1130
- def order(df: pandas.DataFrame, order_expr: str, *, connection: DuckDBPyConnection = ...) -> DuckDBPyRelation: ...
1131
- def query_df(
1132
- df: pandas.DataFrame, virtual_table_name: str, sql_query: str, *, connection: DuckDBPyConnection = ...
1133
- ) -> DuckDBPyRelation: ...
1134
- def description(*, connection: DuckDBPyConnection = ...) -> Optional[List[Any]]: ...
1135
- def rowcount(*, connection: DuckDBPyConnection = ...) -> int: ...
1136
-
1137
- # END OF CONNECTION WRAPPER