duckdb 1.5.0.dev44__cp39-cp39-macosx_11_0_arm64.whl → 1.5.0.dev94__cp39-cp39-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of duckdb might be problematic. Click here for more details.

Files changed (56) hide show
  1. _duckdb-stubs/__init__.pyi +1443 -0
  2. _duckdb-stubs/_func.pyi +46 -0
  3. _duckdb-stubs/_sqltypes.pyi +75 -0
  4. _duckdb.cpython-39-darwin.so +0 -0
  5. adbc_driver_duckdb/__init__.py +49 -0
  6. adbc_driver_duckdb/dbapi.py +115 -0
  7. duckdb/__init__.py +341 -435
  8. duckdb/_dbapi_type_object.py +231 -0
  9. duckdb/_version.py +22 -0
  10. duckdb/bytes_io_wrapper.py +12 -9
  11. duckdb/experimental/__init__.py +2 -1
  12. duckdb/experimental/spark/__init__.py +3 -4
  13. duckdb/experimental/spark/_globals.py +8 -8
  14. duckdb/experimental/spark/_typing.py +7 -9
  15. duckdb/experimental/spark/conf.py +16 -15
  16. duckdb/experimental/spark/context.py +60 -44
  17. duckdb/experimental/spark/errors/__init__.py +33 -35
  18. duckdb/experimental/spark/errors/error_classes.py +1 -1
  19. duckdb/experimental/spark/errors/exceptions/__init__.py +1 -1
  20. duckdb/experimental/spark/errors/exceptions/base.py +39 -88
  21. duckdb/experimental/spark/errors/utils.py +11 -16
  22. duckdb/experimental/spark/exception.py +9 -6
  23. duckdb/experimental/spark/sql/__init__.py +5 -5
  24. duckdb/experimental/spark/sql/_typing.py +8 -15
  25. duckdb/experimental/spark/sql/catalog.py +21 -20
  26. duckdb/experimental/spark/sql/column.py +48 -55
  27. duckdb/experimental/spark/sql/conf.py +9 -8
  28. duckdb/experimental/spark/sql/dataframe.py +185 -233
  29. duckdb/experimental/spark/sql/functions.py +1222 -1248
  30. duckdb/experimental/spark/sql/group.py +56 -52
  31. duckdb/experimental/spark/sql/readwriter.py +80 -94
  32. duckdb/experimental/spark/sql/session.py +64 -59
  33. duckdb/experimental/spark/sql/streaming.py +9 -10
  34. duckdb/experimental/spark/sql/type_utils.py +67 -65
  35. duckdb/experimental/spark/sql/types.py +309 -345
  36. duckdb/experimental/spark/sql/udf.py +6 -6
  37. duckdb/filesystem.py +26 -16
  38. duckdb/func/__init__.py +3 -0
  39. duckdb/functional/__init__.py +12 -16
  40. duckdb/polars_io.py +130 -83
  41. duckdb/query_graph/__main__.py +91 -96
  42. duckdb/sqltypes/__init__.py +63 -0
  43. duckdb/typing/__init__.py +18 -8
  44. duckdb/udf.py +10 -5
  45. duckdb/value/__init__.py +1 -0
  46. duckdb/value/constant/__init__.py +62 -60
  47. {duckdb-1.5.0.dev44.dist-info → duckdb-1.5.0.dev94.dist-info}/METADATA +12 -4
  48. duckdb-1.5.0.dev94.dist-info/RECORD +52 -0
  49. duckdb/__init__.pyi +0 -713
  50. duckdb/functional/__init__.pyi +0 -31
  51. duckdb/typing/__init__.pyi +0 -36
  52. duckdb/value/constant/__init__.pyi +0 -115
  53. duckdb-1.5.0.dev44.dist-info/RECORD +0 -47
  54. /duckdb/{value/__init__.pyi → py.typed} +0 -0
  55. {duckdb-1.5.0.dev44.dist-info → duckdb-1.5.0.dev94.dist-info}/WHEEL +0 -0
  56. {duckdb-1.5.0.dev44.dist-info → duckdb-1.5.0.dev94.dist-info}/licenses/LICENSE +0 -0
@@ -1,42 +1,42 @@
1
- from typing import Optional
1
+ from typing import Optional # noqa: D100
2
+
2
3
  import duckdb
3
4
  from duckdb import DuckDBPyConnection
4
-
5
- from duckdb.experimental.spark.exception import ContributionsAcceptedError
6
5
  from duckdb.experimental.spark.conf import SparkConf
6
+ from duckdb.experimental.spark.exception import ContributionsAcceptedError
7
7
 
8
8
 
9
- class SparkContext:
10
- def __init__(self, master: str):
11
- self._connection = duckdb.connect(':memory:')
9
+ class SparkContext: # noqa: D101
10
+ def __init__(self, master: str) -> None: # noqa: D107
11
+ self._connection = duckdb.connect(":memory:")
12
12
  # This aligns the null ordering with Spark.
13
13
  self._connection.execute("set default_null_order='nulls_first_on_asc_last_on_desc'")
14
14
 
15
15
  @property
16
- def connection(self) -> DuckDBPyConnection:
16
+ def connection(self) -> DuckDBPyConnection: # noqa: D102
17
17
  return self._connection
18
18
 
19
- def stop(self) -> None:
19
+ def stop(self) -> None: # noqa: D102
20
20
  self._connection.close()
21
21
 
22
22
  @classmethod
23
- def getOrCreate(cls, conf: Optional[SparkConf] = None) -> "SparkContext":
23
+ def getOrCreate(cls, conf: Optional[SparkConf] = None) -> "SparkContext": # noqa: D102
24
24
  raise ContributionsAcceptedError
25
25
 
26
26
  @classmethod
27
- def setSystemProperty(cls, key: str, value: str) -> None:
27
+ def setSystemProperty(cls, key: str, value: str) -> None: # noqa: D102
28
28
  raise ContributionsAcceptedError
29
29
 
30
30
  @property
31
- def applicationId(self) -> str:
31
+ def applicationId(self) -> str: # noqa: D102
32
32
  raise ContributionsAcceptedError
33
33
 
34
34
  @property
35
- def defaultMinPartitions(self) -> int:
35
+ def defaultMinPartitions(self) -> int: # noqa: D102
36
36
  raise ContributionsAcceptedError
37
37
 
38
38
  @property
39
- def defaultParallelism(self) -> int:
39
+ def defaultParallelism(self) -> int: # noqa: D102
40
40
  raise ContributionsAcceptedError
41
41
 
42
42
  # @property
@@ -44,33 +44,35 @@ class SparkContext:
44
44
  # raise ContributionsAcceptedError
45
45
 
46
46
  @property
47
- def startTime(self) -> str:
47
+ def startTime(self) -> str: # noqa: D102
48
48
  raise ContributionsAcceptedError
49
49
 
50
50
  @property
51
- def uiWebUrl(self) -> str:
51
+ def uiWebUrl(self) -> str: # noqa: D102
52
52
  raise ContributionsAcceptedError
53
53
 
54
54
  @property
55
- def version(self) -> str:
55
+ def version(self) -> str: # noqa: D102
56
56
  raise ContributionsAcceptedError
57
57
 
58
- def __repr__(self) -> str:
58
+ def __repr__(self) -> str: # noqa: D105
59
59
  raise ContributionsAcceptedError
60
60
 
61
- # def accumulator(self, value: ~T, accum_param: Optional[ForwardRef('AccumulatorParam[T]')] = None) -> 'Accumulator[T]':
61
+ # def accumulator(self, value: ~T, accum_param: Optional[ForwardRef('AccumulatorParam[T]')] = None
62
+ # ) -> 'Accumulator[T]':
62
63
  # pass
63
64
 
64
- def addArchive(self, path: str) -> None:
65
+ def addArchive(self, path: str) -> None: # noqa: D102
65
66
  raise ContributionsAcceptedError
66
67
 
67
- def addFile(self, path: str, recursive: bool = False) -> None:
68
+ def addFile(self, path: str, recursive: bool = False) -> None: # noqa: D102
68
69
  raise ContributionsAcceptedError
69
70
 
70
- def addPyFile(self, path: str) -> None:
71
+ def addPyFile(self, path: str) -> None: # noqa: D102
71
72
  raise ContributionsAcceptedError
72
73
 
73
- # def binaryFiles(self, path: str, minPartitions: Optional[int] = None) -> duckdb.experimental.spark.rdd.RDD[typing.Tuple[str, bytes]]:
74
+ # def binaryFiles(self, path: str, minPartitions: Optional[int] = None
75
+ # ) -> duckdb.experimental.spark.rdd.RDD[typing.Tuple[str, bytes]]:
74
76
  # pass
75
77
 
76
78
  # def binaryRecords(self, path: str, recordLength: int) -> duckdb.experimental.spark.rdd.RDD[bytes]:
@@ -79,37 +81,45 @@ class SparkContext:
79
81
  # def broadcast(self, value: ~T) -> 'Broadcast[T]':
80
82
  # pass
81
83
 
82
- def cancelAllJobs(self) -> None:
84
+ def cancelAllJobs(self) -> None: # noqa: D102
83
85
  raise ContributionsAcceptedError
84
86
 
85
- def cancelJobGroup(self, groupId: str) -> None:
87
+ def cancelJobGroup(self, groupId: str) -> None: # noqa: D102
86
88
  raise ContributionsAcceptedError
87
89
 
88
- def dump_profiles(self, path: str) -> None:
90
+ def dump_profiles(self, path: str) -> None: # noqa: D102
89
91
  raise ContributionsAcceptedError
90
92
 
91
93
  # def emptyRDD(self) -> duckdb.experimental.spark.rdd.RDD[typing.Any]:
92
94
  # pass
93
95
 
94
- def getCheckpointDir(self) -> Optional[str]:
96
+ def getCheckpointDir(self) -> Optional[str]: # noqa: D102
95
97
  raise ContributionsAcceptedError
96
98
 
97
- def getConf(self) -> SparkConf:
99
+ def getConf(self) -> SparkConf: # noqa: D102
98
100
  raise ContributionsAcceptedError
99
101
 
100
- def getLocalProperty(self, key: str) -> Optional[str]:
102
+ def getLocalProperty(self, key: str) -> Optional[str]: # noqa: D102
101
103
  raise ContributionsAcceptedError
102
104
 
103
- # def hadoopFile(self, path: str, inputFormatClass: str, keyClass: str, valueClass: str, keyConverter: Optional[str] = None, valueConverter: Optional[str] = None, conf: Optional[Dict[str, str]] = None, batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
105
+ # def hadoopFile(self, path: str, inputFormatClass: str, keyClass: str, valueClass: str,
106
+ # keyConverter: Optional[str] = None, valueConverter: Optional[str] = None,
107
+ # conf: Optional[Dict[str, str]] = None, batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
104
108
  # pass
105
109
 
106
- # def hadoopRDD(self, inputFormatClass: str, keyClass: str, valueClass: str, keyConverter: Optional[str] = None, valueConverter: Optional[str] = None, conf: Optional[Dict[str, str]] = None, batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
110
+ # def hadoopRDD(self, inputFormatClass: str, keyClass: str, valueClass: str, keyConverter: Optional[str] = None,
111
+ # valueConverter: Optional[str] = None, conf: Optional[Dict[str, str]] = None, batchSize: int = 0
112
+ # ) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
107
113
  # pass
108
114
 
109
- # def newAPIHadoopFile(self, path: str, inputFormatClass: str, keyClass: str, valueClass: str, keyConverter: Optional[str] = None, valueConverter: Optional[str] = None, conf: Optional[Dict[str, str]] = None, batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
115
+ # def newAPIHadoopFile(self, path: str, inputFormatClass: str, keyClass: str, valueClass: str,
116
+ # keyConverter: Optional[str] = None, valueConverter: Optional[str] = None,
117
+ # conf: Optional[Dict[str, str]] = None, batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
110
118
  # pass
111
119
 
112
- # def newAPIHadoopRDD(self, inputFormatClass: str, keyClass: str, valueClass: str, keyConverter: Optional[str] = None, valueConverter: Optional[str] = None, conf: Optional[Dict[str, str]] = None, batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
120
+ # def newAPIHadoopRDD(self, inputFormatClass: str, keyClass: str, valueClass: str,
121
+ # keyConverter: Optional[str] = None, valueConverter: Optional[str] = None,
122
+ # conf: Optional[Dict[str, str]] = None, batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
113
123
  # pass
114
124
 
115
125
  # def parallelize(self, c: Iterable[~T], numSlices: Optional[int] = None) -> pyspark.rdd.RDD[~T]:
@@ -118,46 +128,52 @@ class SparkContext:
118
128
  # def pickleFile(self, name: str, minPartitions: Optional[int] = None) -> pyspark.rdd.RDD[typing.Any]:
119
129
  # pass
120
130
 
121
- # def range(self, start: int, end: Optional[int] = None, step: int = 1, numSlices: Optional[int] = None) -> pyspark.rdd.RDD[int]:
131
+ # def range(self, start: int, end: Optional[int] = None, step: int = 1, numSlices: Optional[int] = None
132
+ # ) -> pyspark.rdd.RDD[int]:
122
133
  # pass
123
134
 
124
- # def runJob(self, rdd: pyspark.rdd.RDD[~T], partitionFunc: Callable[[Iterable[~T]], Iterable[~U]], partitions: Optional[Sequence[int]] = None, allowLocal: bool = False) -> List[~U]:
135
+ # def runJob(self, rdd: pyspark.rdd.RDD[~T], partitionFunc: Callable[[Iterable[~T]], Iterable[~U]],
136
+ # partitions: Optional[Sequence[int]] = None, allowLocal: bool = False) -> List[~U]:
125
137
  # pass
126
138
 
127
- # def sequenceFile(self, path: str, keyClass: Optional[str] = None, valueClass: Optional[str] = None, keyConverter: Optional[str] = None, valueConverter: Optional[str] = None, minSplits: Optional[int] = None, batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
139
+ # def sequenceFile(self, path: str, keyClass: Optional[str] = None, valueClass: Optional[str] = None,
140
+ # keyConverter: Optional[str] = None, valueConverter: Optional[str] = None, minSplits: Optional[int] = None,
141
+ # batchSize: int = 0) -> pyspark.rdd.RDD[typing.Tuple[~T, ~U]]:
128
142
  # pass
129
143
 
130
- def setCheckpointDir(self, dirName: str) -> None:
144
+ def setCheckpointDir(self, dirName: str) -> None: # noqa: D102
131
145
  raise ContributionsAcceptedError
132
146
 
133
- def setJobDescription(self, value: str) -> None:
147
+ def setJobDescription(self, value: str) -> None: # noqa: D102
134
148
  raise ContributionsAcceptedError
135
149
 
136
- def setJobGroup(self, groupId: str, description: str, interruptOnCancel: bool = False) -> None:
150
+ def setJobGroup(self, groupId: str, description: str, interruptOnCancel: bool = False) -> None: # noqa: D102
137
151
  raise ContributionsAcceptedError
138
152
 
139
- def setLocalProperty(self, key: str, value: str) -> None:
153
+ def setLocalProperty(self, key: str, value: str) -> None: # noqa: D102
140
154
  raise ContributionsAcceptedError
141
155
 
142
- def setLogLevel(self, logLevel: str) -> None:
156
+ def setLogLevel(self, logLevel: str) -> None: # noqa: D102
143
157
  raise ContributionsAcceptedError
144
158
 
145
- def show_profiles(self) -> None:
159
+ def show_profiles(self) -> None: # noqa: D102
146
160
  raise ContributionsAcceptedError
147
161
 
148
- def sparkUser(self) -> str:
162
+ def sparkUser(self) -> str: # noqa: D102
149
163
  raise ContributionsAcceptedError
150
164
 
151
165
  # def statusTracker(self) -> duckdb.experimental.spark.status.StatusTracker:
152
166
  # raise ContributionsAcceptedError
153
167
 
154
- # def textFile(self, name: str, minPartitions: Optional[int] = None, use_unicode: bool = True) -> pyspark.rdd.RDD[str]:
168
+ # def textFile(self, name: str, minPartitions: Optional[int] = None, use_unicode: bool = True
169
+ # ) -> pyspark.rdd.RDD[str]:
155
170
  # pass
156
171
 
157
172
  # def union(self, rdds: List[pyspark.rdd.RDD[~T]]) -> pyspark.rdd.RDD[~T]:
158
173
  # pass
159
174
 
160
- # def wholeTextFiles(self, path: str, minPartitions: Optional[int] = None, use_unicode: bool = True) -> pyspark.rdd.RDD[typing.Tuple[str, str]]:
175
+ # def wholeTextFiles(self, path: str, minPartitions: Optional[int] = None, use_unicode: bool = True
176
+ # ) -> pyspark.rdd.RDD[typing.Tuple[str, str]]:
161
177
  # pass
162
178
 
163
179
 
@@ -15,58 +15,56 @@
15
15
  # limitations under the License.
16
16
  #
17
17
 
18
- """
19
- PySpark exceptions.
20
- """
21
- from .exceptions.base import ( # noqa: F401
22
- PySparkException,
18
+ """PySpark exceptions."""
19
+
20
+ from .exceptions.base import (
23
21
  AnalysisException,
24
- TempTableAlreadyExistsException,
25
- ParseException,
26
- IllegalArgumentException,
27
22
  ArithmeticException,
28
- UnsupportedOperationException,
29
23
  ArrayIndexOutOfBoundsException,
30
24
  DateTimeException,
25
+ IllegalArgumentException,
31
26
  NumberFormatException,
32
- StreamingQueryException,
33
- QueryExecutionException,
27
+ ParseException,
28
+ PySparkAssertionError,
29
+ PySparkAttributeError,
30
+ PySparkException,
31
+ PySparkIndexError,
32
+ PySparkNotImplementedError,
33
+ PySparkRuntimeError,
34
+ PySparkTypeError,
35
+ PySparkValueError,
34
36
  PythonException,
35
- UnknownException,
37
+ QueryExecutionException,
36
38
  SparkRuntimeException,
37
39
  SparkUpgradeException,
38
- PySparkTypeError,
39
- PySparkValueError,
40
- PySparkIndexError,
41
- PySparkAttributeError,
42
- PySparkRuntimeError,
43
- PySparkAssertionError,
44
- PySparkNotImplementedError,
40
+ StreamingQueryException,
41
+ TempTableAlreadyExistsException,
42
+ UnknownException,
43
+ UnsupportedOperationException,
45
44
  )
46
45
 
47
-
48
46
  __all__ = [
49
- "PySparkException",
50
47
  "AnalysisException",
51
- "TempTableAlreadyExistsException",
52
- "ParseException",
53
- "IllegalArgumentException",
54
48
  "ArithmeticException",
55
- "UnsupportedOperationException",
56
49
  "ArrayIndexOutOfBoundsException",
57
50
  "DateTimeException",
51
+ "IllegalArgumentException",
58
52
  "NumberFormatException",
59
- "StreamingQueryException",
60
- "QueryExecutionException",
53
+ "ParseException",
54
+ "PySparkAssertionError",
55
+ "PySparkAttributeError",
56
+ "PySparkException",
57
+ "PySparkIndexError",
58
+ "PySparkNotImplementedError",
59
+ "PySparkRuntimeError",
60
+ "PySparkTypeError",
61
+ "PySparkValueError",
61
62
  "PythonException",
62
- "UnknownException",
63
+ "QueryExecutionException",
63
64
  "SparkRuntimeException",
64
65
  "SparkUpgradeException",
65
- "PySparkTypeError",
66
- "PySparkValueError",
67
- "PySparkIndexError",
68
- "PySparkAttributeError",
69
- "PySparkRuntimeError",
70
- "PySparkAssertionError",
71
- "PySparkNotImplementedError",
66
+ "StreamingQueryException",
67
+ "TempTableAlreadyExistsException",
68
+ "UnknownException",
69
+ "UnsupportedOperationException",
72
70
  ]
@@ -1,4 +1,4 @@
1
- #
1
+ # ruff: noqa: D100, E501
2
2
  # Licensed to the Apache Software Foundation (ASF) under one or more
3
3
  # contributor license agreements. See the NOTICE file distributed with
4
4
  # this work for additional information regarding copyright ownership.
@@ -1,4 +1,4 @@
1
- #
1
+ # # noqa: D104
2
2
  # Licensed to the Apache Software Foundation (ASF) under one or more
3
3
  # contributor license agreements. See the NOTICE file distributed with
4
4
  # this work for additional information regarding copyright ownership.
@@ -1,20 +1,19 @@
1
- from typing import Dict, Optional, cast
1
+ from typing import Optional, cast # noqa: D100
2
2
 
3
3
  from ..utils import ErrorClassesReader
4
4
 
5
+
5
6
  class PySparkException(Exception):
6
- """
7
- Base Exception for handling errors generated from PySpark.
8
- """
7
+ """Base Exception for handling errors generated from PySpark."""
9
8
 
10
- def __init__(
9
+ def __init__( # noqa: D107
11
10
  self,
12
11
  message: Optional[str] = None,
13
12
  # The error class, decides the message format, must be one of the valid options listed in 'error_classes.py'
14
13
  error_class: Optional[str] = None,
15
14
  # The dictionary listing the arguments specified in the message (or the error_class)
16
- message_parameters: Optional[Dict[str, str]] = None,
17
- ):
15
+ message_parameters: Optional[dict[str, str]] = None,
16
+ ) -> None:
18
17
  # `message` vs `error_class` & `message_parameters` are mutually exclusive.
19
18
  assert (message is not None and (error_class is None and message_parameters is None)) or (
20
19
  message is None and (error_class is not None and message_parameters is not None)
@@ -24,7 +23,7 @@ class PySparkException(Exception):
24
23
 
25
24
  if message is None:
26
25
  self.message = self.error_reader.get_error_message(
27
- cast(str, error_class), cast(Dict[str, str], message_parameters)
26
+ cast("str", error_class), cast("dict[str, str]", message_parameters)
28
27
  )
29
28
  else:
30
29
  self.message = message
@@ -33,25 +32,23 @@ class PySparkException(Exception):
33
32
  self.message_parameters = message_parameters
34
33
 
35
34
  def getErrorClass(self) -> Optional[str]:
36
- """
37
- Returns an error class as a string.
35
+ """Returns an error class as a string.
38
36
 
39
37
  .. versionadded:: 3.4.0
40
38
 
41
- See Also
39
+ See Also:
42
40
  --------
43
41
  :meth:`PySparkException.getMessageParameters`
44
42
  :meth:`PySparkException.getSqlState`
45
43
  """
46
44
  return self.error_class
47
45
 
48
- def getMessageParameters(self) -> Optional[Dict[str, str]]:
49
- """
50
- Returns a message parameters as a dictionary.
46
+ def getMessageParameters(self) -> Optional[dict[str, str]]:
47
+ """Returns a message parameters as a dictionary.
51
48
 
52
49
  .. versionadded:: 3.4.0
53
50
 
54
- See Also
51
+ See Also:
55
52
  --------
56
53
  :meth:`PySparkException.getErrorClass`
57
54
  :meth:`PySparkException.getSqlState`
@@ -59,159 +56,113 @@ class PySparkException(Exception):
59
56
  return self.message_parameters
60
57
 
61
58
  def getSqlState(self) -> None:
62
- """
63
- Returns an SQLSTATE as a string.
59
+ """Returns an SQLSTATE as a string.
64
60
 
65
61
  Errors generated in Python have no SQLSTATE, so it always returns None.
66
62
 
67
63
  .. versionadded:: 3.4.0
68
64
 
69
- See Also
65
+ See Also:
70
66
  --------
71
67
  :meth:`PySparkException.getErrorClass`
72
68
  :meth:`PySparkException.getMessageParameters`
73
69
  """
74
70
  return None
75
71
 
76
- def __str__(self) -> str:
72
+ def __str__(self) -> str: # noqa: D105
77
73
  if self.getErrorClass() is not None:
78
74
  return f"[{self.getErrorClass()}] {self.message}"
79
75
  else:
80
76
  return self.message
81
77
 
78
+
82
79
  class AnalysisException(PySparkException):
83
- """
84
- Failed to analyze a SQL query plan.
85
- """
80
+ """Failed to analyze a SQL query plan."""
86
81
 
87
82
 
88
83
  class SessionNotSameException(PySparkException):
89
- """
90
- Performed the same operation on different SparkSession.
91
- """
84
+ """Performed the same operation on different SparkSession."""
92
85
 
93
86
 
94
87
  class TempTableAlreadyExistsException(AnalysisException):
95
- """
96
- Failed to create temp view since it is already exists.
97
- """
88
+ """Failed to create temp view since it is already exists."""
98
89
 
99
90
 
100
91
  class ParseException(AnalysisException):
101
- """
102
- Failed to parse a SQL command.
103
- """
92
+ """Failed to parse a SQL command."""
104
93
 
105
94
 
106
95
  class IllegalArgumentException(PySparkException):
107
- """
108
- Passed an illegal or inappropriate argument.
109
- """
96
+ """Passed an illegal or inappropriate argument."""
110
97
 
111
98
 
112
99
  class ArithmeticException(PySparkException):
113
- """
114
- Arithmetic exception thrown from Spark with an error class.
115
- """
100
+ """Arithmetic exception thrown from Spark with an error class."""
116
101
 
117
102
 
118
103
  class UnsupportedOperationException(PySparkException):
119
- """
120
- Unsupported operation exception thrown from Spark with an error class.
121
- """
104
+ """Unsupported operation exception thrown from Spark with an error class."""
122
105
 
123
106
 
124
107
  class ArrayIndexOutOfBoundsException(PySparkException):
125
- """
126
- Array index out of bounds exception thrown from Spark with an error class.
127
- """
108
+ """Array index out of bounds exception thrown from Spark with an error class."""
128
109
 
129
110
 
130
111
  class DateTimeException(PySparkException):
131
- """
132
- Datetime exception thrown from Spark with an error class.
133
- """
112
+ """Datetime exception thrown from Spark with an error class."""
134
113
 
135
114
 
136
115
  class NumberFormatException(IllegalArgumentException):
137
- """
138
- Number format exception thrown from Spark with an error class.
139
- """
116
+ """Number format exception thrown from Spark with an error class."""
140
117
 
141
118
 
142
119
  class StreamingQueryException(PySparkException):
143
- """
144
- Exception that stopped a :class:`StreamingQuery`.
145
- """
120
+ """Exception that stopped a :class:`StreamingQuery`."""
146
121
 
147
122
 
148
123
  class QueryExecutionException(PySparkException):
149
- """
150
- Failed to execute a query.
151
- """
124
+ """Failed to execute a query."""
152
125
 
153
126
 
154
127
  class PythonException(PySparkException):
155
- """
156
- Exceptions thrown from Python workers.
157
- """
128
+ """Exceptions thrown from Python workers."""
158
129
 
159
130
 
160
131
  class SparkRuntimeException(PySparkException):
161
- """
162
- Runtime exception thrown from Spark with an error class.
163
- """
132
+ """Runtime exception thrown from Spark with an error class."""
164
133
 
165
134
 
166
135
  class SparkUpgradeException(PySparkException):
167
- """
168
- Exception thrown because of Spark upgrade.
169
- """
136
+ """Exception thrown because of Spark upgrade."""
170
137
 
171
138
 
172
139
  class UnknownException(PySparkException):
173
- """
174
- None of the above exceptions.
175
- """
140
+ """None of the above exceptions."""
176
141
 
177
142
 
178
143
  class PySparkValueError(PySparkException, ValueError):
179
- """
180
- Wrapper class for ValueError to support error classes.
181
- """
144
+ """Wrapper class for ValueError to support error classes."""
182
145
 
183
146
 
184
147
  class PySparkIndexError(PySparkException, IndexError):
185
- """
186
- Wrapper class for IndexError to support error classes.
187
- """
148
+ """Wrapper class for IndexError to support error classes."""
188
149
 
189
150
 
190
151
  class PySparkTypeError(PySparkException, TypeError):
191
- """
192
- Wrapper class for TypeError to support error classes.
193
- """
152
+ """Wrapper class for TypeError to support error classes."""
194
153
 
195
154
 
196
155
  class PySparkAttributeError(PySparkException, AttributeError):
197
- """
198
- Wrapper class for AttributeError to support error classes.
199
- """
156
+ """Wrapper class for AttributeError to support error classes."""
200
157
 
201
158
 
202
159
  class PySparkRuntimeError(PySparkException, RuntimeError):
203
- """
204
- Wrapper class for RuntimeError to support error classes.
205
- """
160
+ """Wrapper class for RuntimeError to support error classes."""
206
161
 
207
162
 
208
163
  class PySparkAssertionError(PySparkException, AssertionError):
209
- """
210
- Wrapper class for AssertionError to support error classes.
211
- """
164
+ """Wrapper class for AssertionError to support error classes."""
212
165
 
213
166
 
214
167
  class PySparkNotImplementedError(PySparkException, NotImplementedError):
215
- """
216
- Wrapper class for NotImplementedError to support error classes.
217
- """
168
+ """Wrapper class for NotImplementedError to support error classes."""
@@ -1,4 +1,4 @@
1
- #
1
+ # # noqa: D100
2
2
  # Licensed to the Apache Software Foundation (ASF) under one or more
3
3
  # contributor license agreements. See the NOTICE file distributed with
4
4
  # this work for additional information regarding copyright ownership.
@@ -16,37 +16,30 @@
16
16
  #
17
17
 
18
18
  import re
19
- from typing import Dict
20
19
 
21
20
  from .error_classes import ERROR_CLASSES_MAP
22
21
 
23
22
 
24
23
  class ErrorClassesReader:
25
- """
26
- A reader to load error information from error_classes.py.
27
- """
24
+ """A reader to load error information from error_classes.py."""
28
25
 
29
- def __init__(self) -> None:
26
+ def __init__(self) -> None: # noqa: D107
30
27
  self.error_info_map = ERROR_CLASSES_MAP
31
28
 
32
- def get_error_message(self, error_class: str, message_parameters: Dict[str, str]) -> str:
33
- """
34
- Returns the completed error message by applying message parameters to the message template.
35
- """
29
+ def get_error_message(self, error_class: str, message_parameters: dict[str, str]) -> str:
30
+ """Returns the completed error message by applying message parameters to the message template."""
36
31
  message_template = self.get_message_template(error_class)
37
32
  # Verify message parameters.
38
33
  message_parameters_from_template = re.findall("<([a-zA-Z0-9_-]+)>", message_template)
39
34
  assert set(message_parameters_from_template) == set(message_parameters), (
40
- f"Undefined error message parameter for error class: {error_class}. "
41
- f"Parameters: {message_parameters}"
35
+ f"Undefined error message parameter for error class: {error_class}. Parameters: {message_parameters}"
42
36
  )
43
37
  table = str.maketrans("<>", "{}")
44
38
 
45
39
  return message_template.translate(table).format(**message_parameters)
46
40
 
47
41
  def get_message_template(self, error_class: str) -> str:
48
- """
49
- Returns the message template for corresponding error class from error_classes.py.
42
+ """Returns the message template for corresponding error class from error_classes.py.
50
43
 
51
44
  For example,
52
45
  when given `error_class` is "EXAMPLE_ERROR_CLASS",
@@ -93,7 +86,8 @@ class ErrorClassesReader:
93
86
  if main_error_class in self.error_info_map:
94
87
  main_error_class_info_map = self.error_info_map[main_error_class]
95
88
  else:
96
- raise ValueError(f"Cannot find main error class '{main_error_class}'")
89
+ msg = f"Cannot find main error class '{main_error_class}'"
90
+ raise ValueError(msg)
97
91
 
98
92
  main_message_template = "\n".join(main_error_class_info_map["message"])
99
93
 
@@ -108,7 +102,8 @@ class ErrorClassesReader:
108
102
  if sub_error_class in main_error_class_subclass_info_map:
109
103
  sub_error_class_info_map = main_error_class_subclass_info_map[sub_error_class]
110
104
  else:
111
- raise ValueError(f"Cannot find sub error class '{sub_error_class}'")
105
+ msg = f"Cannot find sub error class '{sub_error_class}'"
106
+ raise ValueError(msg)
112
107
 
113
108
  sub_message_template = "\n".join(sub_error_class_info_map["message"])
114
109
  message_template = main_message_template + " " + sub_message_template
@@ -1,14 +1,17 @@
1
+ # ruff: noqa: D100
2
+ from typing import Optional
3
+
4
+
1
5
  class ContributionsAcceptedError(NotImplementedError):
2
- """
3
- This method is not planned to be implemented, if you would like to implement this method
6
+ """This method is not planned to be implemented, if you would like to implement this method
4
7
  or show your interest in this method to other members of the community,
5
- feel free to open up a PR or a Discussion over on https://github.com/duckdb/duckdb
6
- """
8
+ feel free to open up a PR or a Discussion over on https://github.com/duckdb/duckdb.
9
+ """ # noqa: D205
7
10
 
8
- def __init__(self, message=None):
11
+ def __init__(self, message: Optional[str] = None) -> None: # noqa: D107
9
12
  doc = self.__class__.__doc__
10
13
  if message:
11
- doc = message + '\n' + doc
14
+ doc = message + "\n" + doc
12
15
  super().__init__(doc)
13
16
 
14
17