duckdb 1.5.0.dev56__cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of duckdb might be problematic. Click here for more details.

Files changed (52) hide show
  1. _duckdb-stubs/__init__.pyi +1443 -0
  2. _duckdb-stubs/_func.pyi +46 -0
  3. _duckdb-stubs/_sqltypes.pyi +75 -0
  4. _duckdb.cpython-314-x86_64-linux-gnu.so +0 -0
  5. adbc_driver_duckdb/__init__.py +50 -0
  6. adbc_driver_duckdb/dbapi.py +115 -0
  7. duckdb/__init__.py +381 -0
  8. duckdb/_dbapi_type_object.py +231 -0
  9. duckdb/_version.py +22 -0
  10. duckdb/bytes_io_wrapper.py +69 -0
  11. duckdb/experimental/__init__.py +3 -0
  12. duckdb/experimental/spark/LICENSE +260 -0
  13. duckdb/experimental/spark/__init__.py +6 -0
  14. duckdb/experimental/spark/_globals.py +77 -0
  15. duckdb/experimental/spark/_typing.py +46 -0
  16. duckdb/experimental/spark/conf.py +46 -0
  17. duckdb/experimental/spark/context.py +180 -0
  18. duckdb/experimental/spark/errors/__init__.py +70 -0
  19. duckdb/experimental/spark/errors/error_classes.py +918 -0
  20. duckdb/experimental/spark/errors/exceptions/__init__.py +16 -0
  21. duckdb/experimental/spark/errors/exceptions/base.py +168 -0
  22. duckdb/experimental/spark/errors/utils.py +111 -0
  23. duckdb/experimental/spark/exception.py +18 -0
  24. duckdb/experimental/spark/sql/__init__.py +7 -0
  25. duckdb/experimental/spark/sql/_typing.py +86 -0
  26. duckdb/experimental/spark/sql/catalog.py +79 -0
  27. duckdb/experimental/spark/sql/column.py +361 -0
  28. duckdb/experimental/spark/sql/conf.py +24 -0
  29. duckdb/experimental/spark/sql/dataframe.py +1389 -0
  30. duckdb/experimental/spark/sql/functions.py +6195 -0
  31. duckdb/experimental/spark/sql/group.py +424 -0
  32. duckdb/experimental/spark/sql/readwriter.py +435 -0
  33. duckdb/experimental/spark/sql/session.py +297 -0
  34. duckdb/experimental/spark/sql/streaming.py +36 -0
  35. duckdb/experimental/spark/sql/type_utils.py +107 -0
  36. duckdb/experimental/spark/sql/types.py +1239 -0
  37. duckdb/experimental/spark/sql/udf.py +37 -0
  38. duckdb/filesystem.py +33 -0
  39. duckdb/func/__init__.py +3 -0
  40. duckdb/functional/__init__.py +13 -0
  41. duckdb/polars_io.py +284 -0
  42. duckdb/py.typed +0 -0
  43. duckdb/query_graph/__main__.py +358 -0
  44. duckdb/sqltypes/__init__.py +63 -0
  45. duckdb/typing/__init__.py +71 -0
  46. duckdb/udf.py +24 -0
  47. duckdb/value/__init__.py +1 -0
  48. duckdb/value/constant/__init__.py +270 -0
  49. duckdb-1.5.0.dev56.dist-info/METADATA +87 -0
  50. duckdb-1.5.0.dev56.dist-info/RECORD +52 -0
  51. duckdb-1.5.0.dev56.dist-info/WHEEL +6 -0
  52. duckdb-1.5.0.dev56.dist-info/licenses/LICENSE +7 -0
@@ -0,0 +1,424 @@
1
+ # # noqa: D100
2
+ # Licensed to the Apache Software Foundation (ASF) under one or more
3
+ # contributor license agreements. See the NOTICE file distributed with
4
+ # this work for additional information regarding copyright ownership.
5
+ # The ASF licenses this file to You under the Apache License, Version 2.0
6
+ # (the "License"); you may not use this file except in compliance with
7
+ # the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+
18
+ from typing import TYPE_CHECKING, Callable, Union, overload
19
+
20
+ from ..exception import ContributionsAcceptedError
21
+ from .column import Column
22
+ from .dataframe import DataFrame
23
+ from .functions import _to_column_expr
24
+ from .types import NumericType
25
+
26
+ # Only import symbols needed for type checking if something is type checking
27
+ if TYPE_CHECKING:
28
+ from ._typing import ColumnOrName
29
+ from .session import SparkSession
30
+
31
+ __all__ = ["GroupedData", "Grouping"]
32
+
33
+
34
+ def _api_internal(self: "GroupedData", name: str, *cols: str) -> DataFrame:
35
+ expressions = ",".join(list(cols))
36
+ group_by = str(self._grouping) if self._grouping else ""
37
+ projections = self._grouping.get_columns()
38
+ jdf = self._df.relation.apply(
39
+ function_name=name, # aggregate function
40
+ function_aggr=expressions, # inputs to aggregate
41
+ group_expr=group_by, # groups
42
+ projected_columns=projections, # projections
43
+ )
44
+ return DataFrame(jdf, self.session)
45
+
46
+
47
+ def df_varargs_api(f: Callable[..., DataFrame]) -> Callable[..., DataFrame]:
48
+ def _api(self: "GroupedData", *cols: str) -> DataFrame:
49
+ name = f.__name__
50
+ return _api_internal(self, name, *cols)
51
+
52
+ _api.__name__ = f.__name__
53
+ _api.__doc__ = f.__doc__
54
+ return _api
55
+
56
+
57
+ class Grouping: # noqa: D101
58
+ def __init__(self, *cols: "ColumnOrName", **kwargs) -> None: # noqa: D107
59
+ self._type = ""
60
+ self._cols = [_to_column_expr(x) for x in cols]
61
+ if "special" in kwargs:
62
+ special = kwargs["special"]
63
+ accepted_special = ["cube", "rollup"]
64
+ assert special in accepted_special
65
+ self._type = special
66
+
67
+ def get_columns(self) -> str: # noqa: D102
68
+ columns = ",".join([str(x) for x in self._cols])
69
+ return columns
70
+
71
+ def __str__(self) -> str: # noqa: D105
72
+ columns = self.get_columns()
73
+ if self._type:
74
+ return self._type + "(" + columns + ")"
75
+ return columns
76
+
77
+
78
+ class GroupedData:
79
+ """A set of methods for aggregations on a :class:`DataFrame`,
80
+ created by :func:`DataFrame.groupBy`.
81
+
82
+ """ # noqa: D205
83
+
84
+ def __init__(self, grouping: Grouping, df: DataFrame) -> None: # noqa: D107
85
+ self._grouping = grouping
86
+ self._df = df
87
+ self.session: SparkSession = df.session
88
+
89
+ def __repr__(self) -> str: # noqa: D105
90
+ return str(self._df)
91
+
92
+ def count(self) -> DataFrame:
93
+ """Counts the number of records for each group.
94
+
95
+ Examples:
96
+ --------
97
+ >>> df = spark.createDataFrame(
98
+ ... [(2, "Alice"), (3, "Alice"), (5, "Bob"), (10, "Bob")], ["age", "name"]
99
+ ... )
100
+ >>> df.show()
101
+ +---+-----+
102
+ |age| name|
103
+ +---+-----+
104
+ | 2|Alice|
105
+ | 3|Alice|
106
+ | 5| Bob|
107
+ | 10| Bob|
108
+ +---+-----+
109
+
110
+ Group-by name, and count each group.
111
+
112
+ >>> df.groupBy(df.name).count().sort("name").show()
113
+ +-----+-----+
114
+ | name|count|
115
+ +-----+-----+
116
+ |Alice| 2|
117
+ | Bob| 2|
118
+ +-----+-----+
119
+ """
120
+ return _api_internal(self, "count").withColumnRenamed("count_star()", "count")
121
+
122
+ @df_varargs_api
123
+ def mean(self, *cols: str) -> DataFrame:
124
+ """Computes average values for each numeric columns for each group.
125
+
126
+ :func:`mean` is an alias for :func:`avg`.
127
+
128
+ Parameters
129
+ ----------
130
+ cols : str
131
+ column names. Non-numeric columns are ignored.
132
+ """
133
+
134
+ def avg(self, *cols: str) -> DataFrame:
135
+ """Computes average values for each numeric columns for each group.
136
+
137
+ :func:`mean` is an alias for :func:`avg`.
138
+
139
+ Parameters
140
+ ----------
141
+ cols : str
142
+ column names. Non-numeric columns are ignored.
143
+
144
+ Examples:
145
+ --------
146
+ >>> df = spark.createDataFrame(
147
+ ... [(2, "Alice", 80), (3, "Alice", 100), (5, "Bob", 120), (10, "Bob", 140)],
148
+ ... ["age", "name", "height"],
149
+ ... )
150
+ >>> df.show()
151
+ +---+-----+------+
152
+ |age| name|height|
153
+ +---+-----+------+
154
+ | 2|Alice| 80|
155
+ | 3|Alice| 100|
156
+ | 5| Bob| 120|
157
+ | 10| Bob| 140|
158
+ +---+-----+------+
159
+
160
+ Group-by name, and calculate the mean of the age in each group.
161
+
162
+ >>> df.groupBy("name").avg("age").sort("name").show()
163
+ +-----+--------+
164
+ | name|avg(age)|
165
+ +-----+--------+
166
+ |Alice| 2.5|
167
+ | Bob| 7.5|
168
+ +-----+--------+
169
+
170
+ Calculate the mean of the age and height in all data.
171
+
172
+ >>> df.groupBy().avg("age", "height").show()
173
+ +--------+-----------+
174
+ |avg(age)|avg(height)|
175
+ +--------+-----------+
176
+ | 5.0| 110.0|
177
+ +--------+-----------+
178
+ """
179
+ columns = list(cols)
180
+ if len(columns) == 0:
181
+ schema = self._df.schema
182
+ # Take only the numeric types of the relation
183
+ columns: list[str] = [x.name for x in schema.fields if isinstance(x.dataType, NumericType)]
184
+ return _api_internal(self, "avg", *columns)
185
+
186
+ @df_varargs_api
187
+ def max(self, *cols: str) -> DataFrame:
188
+ """Computes the max value for each numeric columns for each group.
189
+
190
+ Examples:
191
+ --------
192
+ >>> df = spark.createDataFrame(
193
+ ... [(2, "Alice", 80), (3, "Alice", 100), (5, "Bob", 120), (10, "Bob", 140)],
194
+ ... ["age", "name", "height"],
195
+ ... )
196
+ >>> df.show()
197
+ +---+-----+------+
198
+ |age| name|height|
199
+ +---+-----+------+
200
+ | 2|Alice| 80|
201
+ | 3|Alice| 100|
202
+ | 5| Bob| 120|
203
+ | 10| Bob| 140|
204
+ +---+-----+------+
205
+
206
+ Group-by name, and calculate the max of the age in each group.
207
+
208
+ >>> df.groupBy("name").max("age").sort("name").show()
209
+ +-----+--------+
210
+ | name|max(age)|
211
+ +-----+--------+
212
+ |Alice| 3|
213
+ | Bob| 10|
214
+ +-----+--------+
215
+
216
+ Calculate the max of the age and height in all data.
217
+
218
+ >>> df.groupBy().max("age", "height").show()
219
+ +--------+-----------+
220
+ |max(age)|max(height)|
221
+ +--------+-----------+
222
+ | 10| 140|
223
+ +--------+-----------+
224
+ """
225
+
226
+ @df_varargs_api
227
+ def min(self, *cols: str) -> DataFrame:
228
+ """Computes the min value for each numeric column for each group.
229
+
230
+ Parameters
231
+ ----------
232
+ cols : str
233
+ column names. Non-numeric columns are ignored.
234
+
235
+ Examples:
236
+ --------
237
+ >>> df = spark.createDataFrame(
238
+ ... [(2, "Alice", 80), (3, "Alice", 100), (5, "Bob", 120), (10, "Bob", 140)],
239
+ ... ["age", "name", "height"],
240
+ ... )
241
+ >>> df.show()
242
+ +---+-----+------+
243
+ |age| name|height|
244
+ +---+-----+------+
245
+ | 2|Alice| 80|
246
+ | 3|Alice| 100|
247
+ | 5| Bob| 120|
248
+ | 10| Bob| 140|
249
+ +---+-----+------+
250
+
251
+ Group-by name, and calculate the min of the age in each group.
252
+
253
+ >>> df.groupBy("name").min("age").sort("name").show()
254
+ +-----+--------+
255
+ | name|min(age)|
256
+ +-----+--------+
257
+ |Alice| 2|
258
+ | Bob| 5|
259
+ +-----+--------+
260
+
261
+ Calculate the min of the age and height in all data.
262
+
263
+ >>> df.groupBy().min("age", "height").show()
264
+ +--------+-----------+
265
+ |min(age)|min(height)|
266
+ +--------+-----------+
267
+ | 2| 80|
268
+ +--------+-----------+
269
+ """
270
+
271
+ @df_varargs_api
272
+ def sum(self, *cols: str) -> DataFrame:
273
+ """Computes the sum for each numeric columns for each group.
274
+
275
+ Parameters
276
+ ----------
277
+ cols : str
278
+ column names. Non-numeric columns are ignored.
279
+
280
+ Examples:
281
+ --------
282
+ >>> df = spark.createDataFrame(
283
+ ... [(2, "Alice", 80), (3, "Alice", 100), (5, "Bob", 120), (10, "Bob", 140)],
284
+ ... ["age", "name", "height"],
285
+ ... )
286
+ >>> df.show()
287
+ +---+-----+------+
288
+ |age| name|height|
289
+ +---+-----+------+
290
+ | 2|Alice| 80|
291
+ | 3|Alice| 100|
292
+ | 5| Bob| 120|
293
+ | 10| Bob| 140|
294
+ +---+-----+------+
295
+
296
+ Group-by name, and calculate the sum of the age in each group.
297
+
298
+ >>> df.groupBy("name").sum("age").sort("name").show()
299
+ +-----+--------+
300
+ | name|sum(age)|
301
+ +-----+--------+
302
+ |Alice| 5|
303
+ | Bob| 15|
304
+ +-----+--------+
305
+
306
+ Calculate the sum of the age and height in all data.
307
+
308
+ >>> df.groupBy().sum("age", "height").show()
309
+ +--------+-----------+
310
+ |sum(age)|sum(height)|
311
+ +--------+-----------+
312
+ | 20| 440|
313
+ +--------+-----------+
314
+ """
315
+
316
+ @overload
317
+ def agg(self, *exprs: Column) -> DataFrame: ...
318
+
319
+ @overload
320
+ def agg(self, __exprs: dict[str, str]) -> DataFrame: ... # noqa: PYI063
321
+
322
+ def agg(self, *exprs: Union[Column, dict[str, str]]) -> DataFrame:
323
+ """Compute aggregates and returns the result as a :class:`DataFrame`.
324
+
325
+ The available aggregate functions can be:
326
+
327
+ 1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
328
+
329
+ 2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
330
+
331
+ .. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
332
+ a full shuffle is required. Also, all the data of a group will be loaded into
333
+ memory, so the user should be aware of the potential OOM risk if data is skewed
334
+ and certain groups are too large to fit in memory.
335
+
336
+ .. seealso:: :func:`pyspark.sql.functions.pandas_udf`
337
+
338
+ If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
339
+ is the column to perform aggregation on, and the value is the aggregate function.
340
+
341
+ Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
342
+
343
+ .. versionadded:: 1.3.0
344
+
345
+ .. versionchanged:: 3.4.0
346
+ Supports Spark Connect.
347
+
348
+ Parameters
349
+ ----------
350
+ exprs : dict
351
+ a dict mapping from column name (string) to aggregate functions (string),
352
+ or a list of :class:`Column`.
353
+
354
+ Notes:
355
+ -----
356
+ Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
357
+ in a single call to this function.
358
+
359
+ Examples:
360
+ --------
361
+ >>> from pyspark.sql import functions as F
362
+ >>> from pyspark.sql.functions import pandas_udf, PandasUDFType
363
+ >>> df = spark.createDataFrame(
364
+ ... [(2, "Alice"), (3, "Alice"), (5, "Bob"), (10, "Bob")], ["age", "name"]
365
+ ... )
366
+ >>> df.show()
367
+ +---+-----+
368
+ |age| name|
369
+ +---+-----+
370
+ | 2|Alice|
371
+ | 3|Alice|
372
+ | 5| Bob|
373
+ | 10| Bob|
374
+ +---+-----+
375
+
376
+ Group-by name, and count each group.
377
+
378
+ >>> df.groupBy(df.name)
379
+ GroupedData[grouping...: [name...], value: [age: bigint, name: string], type: GroupBy]
380
+
381
+ >>> df.groupBy(df.name).agg({"*": "count"}).sort("name").show()
382
+ +-----+--------+
383
+ | name|count(1)|
384
+ +-----+--------+
385
+ |Alice| 2|
386
+ | Bob| 2|
387
+ +-----+--------+
388
+
389
+ Group-by name, and calculate the minimum age.
390
+
391
+ >>> df.groupBy(df.name).agg(F.min(df.age)).sort("name").show()
392
+ +-----+--------+
393
+ | name|min(age)|
394
+ +-----+--------+
395
+ |Alice| 2|
396
+ | Bob| 5|
397
+ +-----+--------+
398
+
399
+ Same as above but uses pandas UDF.
400
+
401
+ >>> @pandas_udf("int", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
402
+ ... def min_udf(v):
403
+ ... return v.min()
404
+ >>> df.groupBy(df.name).agg(min_udf(df.age)).sort("name").show() # doctest: +SKIP
405
+ +-----+------------+
406
+ | name|min_udf(age)|
407
+ +-----+------------+
408
+ |Alice| 2|
409
+ | Bob| 5|
410
+ +-----+------------+
411
+ """
412
+ assert exprs, "exprs should not be empty"
413
+ if len(exprs) == 1 and isinstance(exprs[0], dict):
414
+ raise ContributionsAcceptedError
415
+ else:
416
+ # Columns
417
+ assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
418
+ expressions = list(self._grouping._cols)
419
+ expressions.extend([x.expr for x in exprs])
420
+ group_by = str(self._grouping)
421
+ rel = self._df.relation.select(*expressions, groups=group_by)
422
+ return DataFrame(rel, self.session)
423
+
424
+ # TODO: add 'pivot' # noqa: TD002, TD003