polars-runtime-compat 1.34.0b2__cp39-abi3-macosx_11_0_arm64.whl → 1.34.0b4__cp39-abi3-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of polars-runtime-compat might be problematic. Click here for more details.
- _polars_runtime_compat/_polars_runtime_compat.abi3.so +0 -0
- {polars_runtime_compat-1.34.0b2.dist-info → polars_runtime_compat-1.34.0b4.dist-info}/METADATA +1 -1
- polars_runtime_compat-1.34.0b4.dist-info/RECORD +6 -0
- polars/__init__.py +0 -528
- polars/_cpu_check.py +0 -265
- polars/_dependencies.py +0 -355
- polars/_plr.py +0 -99
- polars/_plr.pyi +0 -2496
- polars/_reexport.py +0 -23
- polars/_typing.py +0 -478
- polars/_utils/__init__.py +0 -37
- polars/_utils/async_.py +0 -102
- polars/_utils/cache.py +0 -176
- polars/_utils/cloud.py +0 -40
- polars/_utils/constants.py +0 -29
- polars/_utils/construction/__init__.py +0 -46
- polars/_utils/construction/dataframe.py +0 -1397
- polars/_utils/construction/other.py +0 -72
- polars/_utils/construction/series.py +0 -560
- polars/_utils/construction/utils.py +0 -118
- polars/_utils/convert.py +0 -224
- polars/_utils/deprecation.py +0 -406
- polars/_utils/getitem.py +0 -457
- polars/_utils/logging.py +0 -11
- polars/_utils/nest_asyncio.py +0 -264
- polars/_utils/parquet.py +0 -15
- polars/_utils/parse/__init__.py +0 -12
- polars/_utils/parse/expr.py +0 -242
- polars/_utils/polars_version.py +0 -19
- polars/_utils/pycapsule.py +0 -53
- polars/_utils/scan.py +0 -27
- polars/_utils/serde.py +0 -63
- polars/_utils/slice.py +0 -215
- polars/_utils/udfs.py +0 -1251
- polars/_utils/unstable.py +0 -63
- polars/_utils/various.py +0 -782
- polars/_utils/wrap.py +0 -25
- polars/api.py +0 -370
- polars/catalog/__init__.py +0 -0
- polars/catalog/unity/__init__.py +0 -19
- polars/catalog/unity/client.py +0 -733
- polars/catalog/unity/models.py +0 -152
- polars/config.py +0 -1571
- polars/convert/__init__.py +0 -25
- polars/convert/general.py +0 -1046
- polars/convert/normalize.py +0 -261
- polars/dataframe/__init__.py +0 -5
- polars/dataframe/_html.py +0 -186
- polars/dataframe/frame.py +0 -12582
- polars/dataframe/group_by.py +0 -1067
- polars/dataframe/plotting.py +0 -257
- polars/datatype_expr/__init__.py +0 -5
- polars/datatype_expr/array.py +0 -56
- polars/datatype_expr/datatype_expr.py +0 -304
- polars/datatype_expr/list.py +0 -18
- polars/datatype_expr/struct.py +0 -69
- polars/datatypes/__init__.py +0 -122
- polars/datatypes/_parse.py +0 -195
- polars/datatypes/_utils.py +0 -48
- polars/datatypes/classes.py +0 -1213
- polars/datatypes/constants.py +0 -11
- polars/datatypes/constructor.py +0 -172
- polars/datatypes/convert.py +0 -366
- polars/datatypes/group.py +0 -130
- polars/exceptions.py +0 -230
- polars/expr/__init__.py +0 -7
- polars/expr/array.py +0 -964
- polars/expr/binary.py +0 -346
- polars/expr/categorical.py +0 -306
- polars/expr/datetime.py +0 -2620
- polars/expr/expr.py +0 -11272
- polars/expr/list.py +0 -1408
- polars/expr/meta.py +0 -444
- polars/expr/name.py +0 -321
- polars/expr/string.py +0 -3045
- polars/expr/struct.py +0 -357
- polars/expr/whenthen.py +0 -185
- polars/functions/__init__.py +0 -193
- polars/functions/aggregation/__init__.py +0 -33
- polars/functions/aggregation/horizontal.py +0 -298
- polars/functions/aggregation/vertical.py +0 -341
- polars/functions/as_datatype.py +0 -848
- polars/functions/business.py +0 -138
- polars/functions/col.py +0 -384
- polars/functions/datatype.py +0 -121
- polars/functions/eager.py +0 -524
- polars/functions/escape_regex.py +0 -29
- polars/functions/lazy.py +0 -2751
- polars/functions/len.py +0 -68
- polars/functions/lit.py +0 -210
- polars/functions/random.py +0 -22
- polars/functions/range/__init__.py +0 -19
- polars/functions/range/_utils.py +0 -15
- polars/functions/range/date_range.py +0 -303
- polars/functions/range/datetime_range.py +0 -370
- polars/functions/range/int_range.py +0 -348
- polars/functions/range/linear_space.py +0 -311
- polars/functions/range/time_range.py +0 -287
- polars/functions/repeat.py +0 -301
- polars/functions/whenthen.py +0 -353
- polars/interchange/__init__.py +0 -10
- polars/interchange/buffer.py +0 -77
- polars/interchange/column.py +0 -190
- polars/interchange/dataframe.py +0 -230
- polars/interchange/from_dataframe.py +0 -328
- polars/interchange/protocol.py +0 -303
- polars/interchange/utils.py +0 -170
- polars/io/__init__.py +0 -64
- polars/io/_utils.py +0 -317
- polars/io/avro.py +0 -49
- polars/io/clipboard.py +0 -36
- polars/io/cloud/__init__.py +0 -17
- polars/io/cloud/_utils.py +0 -80
- polars/io/cloud/credential_provider/__init__.py +0 -17
- polars/io/cloud/credential_provider/_builder.py +0 -520
- polars/io/cloud/credential_provider/_providers.py +0 -618
- polars/io/csv/__init__.py +0 -9
- polars/io/csv/_utils.py +0 -38
- polars/io/csv/batched_reader.py +0 -142
- polars/io/csv/functions.py +0 -1495
- polars/io/database/__init__.py +0 -6
- polars/io/database/_arrow_registry.py +0 -70
- polars/io/database/_cursor_proxies.py +0 -147
- polars/io/database/_executor.py +0 -578
- polars/io/database/_inference.py +0 -314
- polars/io/database/_utils.py +0 -144
- polars/io/database/functions.py +0 -516
- polars/io/delta.py +0 -499
- polars/io/iceberg/__init__.py +0 -3
- polars/io/iceberg/_utils.py +0 -697
- polars/io/iceberg/dataset.py +0 -556
- polars/io/iceberg/functions.py +0 -151
- polars/io/ipc/__init__.py +0 -8
- polars/io/ipc/functions.py +0 -514
- polars/io/json/__init__.py +0 -3
- polars/io/json/read.py +0 -101
- polars/io/ndjson.py +0 -332
- polars/io/parquet/__init__.py +0 -17
- polars/io/parquet/field_overwrites.py +0 -140
- polars/io/parquet/functions.py +0 -722
- polars/io/partition.py +0 -491
- polars/io/plugins.py +0 -187
- polars/io/pyarrow_dataset/__init__.py +0 -5
- polars/io/pyarrow_dataset/anonymous_scan.py +0 -109
- polars/io/pyarrow_dataset/functions.py +0 -79
- polars/io/scan_options/__init__.py +0 -5
- polars/io/scan_options/_options.py +0 -59
- polars/io/scan_options/cast_options.py +0 -126
- polars/io/spreadsheet/__init__.py +0 -6
- polars/io/spreadsheet/_utils.py +0 -52
- polars/io/spreadsheet/_write_utils.py +0 -647
- polars/io/spreadsheet/functions.py +0 -1323
- polars/lazyframe/__init__.py +0 -9
- polars/lazyframe/engine_config.py +0 -61
- polars/lazyframe/frame.py +0 -8564
- polars/lazyframe/group_by.py +0 -669
- polars/lazyframe/in_process.py +0 -42
- polars/lazyframe/opt_flags.py +0 -333
- polars/meta/__init__.py +0 -14
- polars/meta/build.py +0 -33
- polars/meta/index_type.py +0 -27
- polars/meta/thread_pool.py +0 -50
- polars/meta/versions.py +0 -120
- polars/ml/__init__.py +0 -0
- polars/ml/torch.py +0 -213
- polars/ml/utilities.py +0 -30
- polars/plugins.py +0 -155
- polars/py.typed +0 -0
- polars/pyproject.toml +0 -96
- polars/schema.py +0 -265
- polars/selectors.py +0 -3117
- polars/series/__init__.py +0 -5
- polars/series/array.py +0 -776
- polars/series/binary.py +0 -254
- polars/series/categorical.py +0 -246
- polars/series/datetime.py +0 -2275
- polars/series/list.py +0 -1087
- polars/series/plotting.py +0 -191
- polars/series/series.py +0 -9197
- polars/series/string.py +0 -2367
- polars/series/struct.py +0 -154
- polars/series/utils.py +0 -191
- polars/sql/__init__.py +0 -7
- polars/sql/context.py +0 -677
- polars/sql/functions.py +0 -139
- polars/string_cache.py +0 -185
- polars/testing/__init__.py +0 -13
- polars/testing/asserts/__init__.py +0 -9
- polars/testing/asserts/frame.py +0 -231
- polars/testing/asserts/series.py +0 -219
- polars/testing/asserts/utils.py +0 -12
- polars/testing/parametric/__init__.py +0 -33
- polars/testing/parametric/profiles.py +0 -107
- polars/testing/parametric/strategies/__init__.py +0 -22
- polars/testing/parametric/strategies/_utils.py +0 -14
- polars/testing/parametric/strategies/core.py +0 -615
- polars/testing/parametric/strategies/data.py +0 -452
- polars/testing/parametric/strategies/dtype.py +0 -436
- polars/testing/parametric/strategies/legacy.py +0 -169
- polars/type_aliases.py +0 -24
- polars_runtime_compat-1.34.0b2.dist-info/RECORD +0 -203
- {polars_runtime_compat-1.34.0b2.dist-info → polars_runtime_compat-1.34.0b4.dist-info}/WHEEL +0 -0
- {polars_runtime_compat-1.34.0b2.dist-info → polars_runtime_compat-1.34.0b4.dist-info}/licenses/LICENSE +0 -0
polars/convert/general.py
DELETED
|
@@ -1,1046 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import io
|
|
4
|
-
import itertools
|
|
5
|
-
import re
|
|
6
|
-
from collections.abc import Iterable, Sequence
|
|
7
|
-
from typing import TYPE_CHECKING, Any, Literal, overload
|
|
8
|
-
|
|
9
|
-
import polars._reexport as pl
|
|
10
|
-
from polars import functions as F
|
|
11
|
-
from polars._dependencies import _check_for_pyarrow
|
|
12
|
-
from polars._dependencies import pandas as pd
|
|
13
|
-
from polars._dependencies import pyarrow as pa
|
|
14
|
-
from polars._utils.construction.dataframe import (
|
|
15
|
-
arrow_to_pydf,
|
|
16
|
-
dict_to_pydf,
|
|
17
|
-
numpy_to_pydf,
|
|
18
|
-
pandas_to_pydf,
|
|
19
|
-
sequence_to_pydf,
|
|
20
|
-
)
|
|
21
|
-
from polars._utils.construction.series import arrow_to_pyseries, pandas_to_pyseries
|
|
22
|
-
from polars._utils.deprecation import (
|
|
23
|
-
deprecate_renamed_parameter,
|
|
24
|
-
issue_deprecation_warning,
|
|
25
|
-
)
|
|
26
|
-
from polars._utils.pycapsule import is_pycapsule, pycapsule_to_frame
|
|
27
|
-
from polars._utils.various import (
|
|
28
|
-
_cast_repr_strings_with_schema,
|
|
29
|
-
issue_warning,
|
|
30
|
-
qualified_type_name,
|
|
31
|
-
)
|
|
32
|
-
from polars._utils.wrap import wrap_df, wrap_s
|
|
33
|
-
from polars.datatypes import N_INFER_DEFAULT, Categorical, String
|
|
34
|
-
from polars.exceptions import NoDataError
|
|
35
|
-
|
|
36
|
-
if TYPE_CHECKING:
|
|
37
|
-
from collections.abc import Mapping
|
|
38
|
-
|
|
39
|
-
from polars import DataFrame, Series
|
|
40
|
-
from polars._dependencies import numpy as np
|
|
41
|
-
from polars._dependencies import torch
|
|
42
|
-
from polars._typing import (
|
|
43
|
-
ArrowArrayExportable,
|
|
44
|
-
ArrowStreamExportable,
|
|
45
|
-
Orientation,
|
|
46
|
-
PolarsDataType,
|
|
47
|
-
SchemaDefinition,
|
|
48
|
-
SchemaDict,
|
|
49
|
-
)
|
|
50
|
-
from polars.interchange.protocol import SupportsInterchange
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
def from_dict(
|
|
54
|
-
data: Mapping[str, Sequence[object] | Mapping[str, Sequence[object]] | Series],
|
|
55
|
-
schema: SchemaDefinition | None = None,
|
|
56
|
-
*,
|
|
57
|
-
schema_overrides: SchemaDict | None = None,
|
|
58
|
-
strict: bool = True,
|
|
59
|
-
) -> DataFrame:
|
|
60
|
-
"""
|
|
61
|
-
Construct a DataFrame from a dictionary of sequences.
|
|
62
|
-
|
|
63
|
-
This operation clones data, unless you pass a `{str: pl.Series,}` dict.
|
|
64
|
-
|
|
65
|
-
Parameters
|
|
66
|
-
----------
|
|
67
|
-
data : dict of sequences
|
|
68
|
-
Two-dimensional data represented as a dictionary. dict must contain
|
|
69
|
-
Sequences.
|
|
70
|
-
schema : Sequence of str, (str,DataType) pairs, or a {str:DataType,} dict
|
|
71
|
-
The DataFrame schema may be declared in several ways:
|
|
72
|
-
|
|
73
|
-
* As a dict of {name:type} pairs; if type is None, it will be auto-inferred.
|
|
74
|
-
* As a list of column names; in this case types are automatically inferred.
|
|
75
|
-
* As a list of (name,type) pairs; this is equivalent to the dictionary form.
|
|
76
|
-
|
|
77
|
-
If you supply a list of column names that does not match the names in the
|
|
78
|
-
underlying data, the names given here will overwrite them. The number
|
|
79
|
-
of names given in the schema should match the underlying data dimensions.
|
|
80
|
-
schema_overrides : dict, default None
|
|
81
|
-
Support type specification or override of one or more columns; note that
|
|
82
|
-
any dtypes inferred from the columns param will be overridden.
|
|
83
|
-
strict : bool, default True
|
|
84
|
-
Throw an error if any `data` value does not exactly match the given or inferred
|
|
85
|
-
data type for that column. If set to `False`, values that do not match the data
|
|
86
|
-
type are cast to that data type or, if casting is not possible, set to null
|
|
87
|
-
instead.
|
|
88
|
-
|
|
89
|
-
Returns
|
|
90
|
-
-------
|
|
91
|
-
DataFrame
|
|
92
|
-
|
|
93
|
-
Examples
|
|
94
|
-
--------
|
|
95
|
-
>>> df = pl.from_dict({"a": [1, 2], "b": [3, 4]})
|
|
96
|
-
>>> df
|
|
97
|
-
shape: (2, 2)
|
|
98
|
-
┌─────┬─────┐
|
|
99
|
-
│ a ┆ b │
|
|
100
|
-
│ --- ┆ --- │
|
|
101
|
-
│ i64 ┆ i64 │
|
|
102
|
-
╞═════╪═════╡
|
|
103
|
-
│ 1 ┆ 3 │
|
|
104
|
-
│ 2 ┆ 4 │
|
|
105
|
-
└─────┴─────┘
|
|
106
|
-
"""
|
|
107
|
-
return wrap_df(
|
|
108
|
-
dict_to_pydf(
|
|
109
|
-
data,
|
|
110
|
-
schema=schema,
|
|
111
|
-
schema_overrides=schema_overrides,
|
|
112
|
-
strict=strict,
|
|
113
|
-
)
|
|
114
|
-
)
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def from_dicts(
|
|
118
|
-
data: Iterable[Mapping[str, Any]],
|
|
119
|
-
schema: SchemaDefinition | None = None,
|
|
120
|
-
*,
|
|
121
|
-
schema_overrides: SchemaDict | None = None,
|
|
122
|
-
strict: bool = True,
|
|
123
|
-
infer_schema_length: int | None = N_INFER_DEFAULT,
|
|
124
|
-
) -> DataFrame:
|
|
125
|
-
"""
|
|
126
|
-
Construct a DataFrame from a sequence of dictionaries. This operation clones data.
|
|
127
|
-
|
|
128
|
-
Parameters
|
|
129
|
-
----------
|
|
130
|
-
data
|
|
131
|
-
Sequence with dictionaries mapping column name to value
|
|
132
|
-
schema : Sequence of str, (str,DataType) pairs, or a {str:DataType,} dict
|
|
133
|
-
The DataFrame schema may be declared in several ways:
|
|
134
|
-
|
|
135
|
-
* As a dict of {name:type} pairs; if type is None, it will be auto-inferred.
|
|
136
|
-
* As a list of column names; in this case types are automatically inferred.
|
|
137
|
-
* As a list of (name,type) pairs; this is equivalent to the dictionary form.
|
|
138
|
-
|
|
139
|
-
If a list of column names is supplied that does NOT match the names in the
|
|
140
|
-
underlying data, the names given here will overwrite the actual fields in
|
|
141
|
-
the order that they appear - however, in this case it is typically clearer
|
|
142
|
-
to rename after loading the frame.
|
|
143
|
-
|
|
144
|
-
If you want to drop some of the fields found in the input dictionaries, a
|
|
145
|
-
*partial* schema can be declared, in which case omitted fields will not be
|
|
146
|
-
loaded. Similarly, you can extend the loaded frame with empty columns by
|
|
147
|
-
adding them to the schema.
|
|
148
|
-
schema_overrides : dict, default None
|
|
149
|
-
Support override of inferred types for one or more columns.
|
|
150
|
-
strict : bool, default True
|
|
151
|
-
Throw an error if any `data` value does not exactly match the given or inferred
|
|
152
|
-
data type for that column. If set to `False`, values that do not match the data
|
|
153
|
-
type are cast to that data type or, if casting is not possible, set to null
|
|
154
|
-
instead.
|
|
155
|
-
infer_schema_length
|
|
156
|
-
The maximum number of rows to scan for schema inference.
|
|
157
|
-
If set to `None`, the full data may be scanned *(this is slow)*.
|
|
158
|
-
|
|
159
|
-
Returns
|
|
160
|
-
-------
|
|
161
|
-
DataFrame
|
|
162
|
-
|
|
163
|
-
Examples
|
|
164
|
-
--------
|
|
165
|
-
>>> data = [{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}]
|
|
166
|
-
>>> df = pl.from_dicts(data)
|
|
167
|
-
>>> df
|
|
168
|
-
shape: (3, 2)
|
|
169
|
-
┌─────┬─────┐
|
|
170
|
-
│ a ┆ b │
|
|
171
|
-
│ --- ┆ --- │
|
|
172
|
-
│ i64 ┆ i64 │
|
|
173
|
-
╞═════╪═════╡
|
|
174
|
-
│ 1 ┆ 4 │
|
|
175
|
-
│ 2 ┆ 5 │
|
|
176
|
-
│ 3 ┆ 6 │
|
|
177
|
-
└─────┴─────┘
|
|
178
|
-
|
|
179
|
-
Declaring a partial `schema` will drop the omitted columns.
|
|
180
|
-
|
|
181
|
-
>>> df = pl.from_dicts(data, schema={"a": pl.Int32})
|
|
182
|
-
>>> df
|
|
183
|
-
shape: (3, 1)
|
|
184
|
-
┌─────┐
|
|
185
|
-
│ a │
|
|
186
|
-
│ --- │
|
|
187
|
-
│ i32 │
|
|
188
|
-
╞═════╡
|
|
189
|
-
│ 1 │
|
|
190
|
-
│ 2 │
|
|
191
|
-
│ 3 │
|
|
192
|
-
└─────┘
|
|
193
|
-
|
|
194
|
-
Can also use the `schema` param to extend the loaded columns with one
|
|
195
|
-
or more additional (empty) columns that are not present in the input dicts:
|
|
196
|
-
|
|
197
|
-
>>> pl.from_dicts(
|
|
198
|
-
... data,
|
|
199
|
-
... schema=["a", "b", "c", "d"],
|
|
200
|
-
... schema_overrides={"c": pl.Float64, "d": pl.String},
|
|
201
|
-
... )
|
|
202
|
-
shape: (3, 4)
|
|
203
|
-
┌─────┬─────┬──────┬──────┐
|
|
204
|
-
│ a ┆ b ┆ c ┆ d │
|
|
205
|
-
│ --- ┆ --- ┆ --- ┆ --- │
|
|
206
|
-
│ i64 ┆ i64 ┆ f64 ┆ str │
|
|
207
|
-
╞═════╪═════╪══════╪══════╡
|
|
208
|
-
│ 1 ┆ 4 ┆ null ┆ null │
|
|
209
|
-
│ 2 ┆ 5 ┆ null ┆ null │
|
|
210
|
-
│ 3 ┆ 6 ┆ null ┆ null │
|
|
211
|
-
└─────┴─────┴──────┴──────┘
|
|
212
|
-
"""
|
|
213
|
-
if not data and not (schema or schema_overrides):
|
|
214
|
-
msg = "no data, cannot infer schema"
|
|
215
|
-
raise NoDataError(msg)
|
|
216
|
-
|
|
217
|
-
return pl.DataFrame(
|
|
218
|
-
data,
|
|
219
|
-
schema=schema,
|
|
220
|
-
schema_overrides=schema_overrides,
|
|
221
|
-
strict=strict,
|
|
222
|
-
infer_schema_length=infer_schema_length,
|
|
223
|
-
)
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def from_records(
|
|
227
|
-
data: Sequence[Any],
|
|
228
|
-
schema: SchemaDefinition | None = None,
|
|
229
|
-
*,
|
|
230
|
-
schema_overrides: SchemaDict | None = None,
|
|
231
|
-
strict: bool = True,
|
|
232
|
-
orient: Orientation | None = None,
|
|
233
|
-
infer_schema_length: int | None = N_INFER_DEFAULT,
|
|
234
|
-
) -> DataFrame:
|
|
235
|
-
"""
|
|
236
|
-
Construct a DataFrame from a sequence of sequences. This operation clones data.
|
|
237
|
-
|
|
238
|
-
Note that this is slower than creating from columnar memory.
|
|
239
|
-
|
|
240
|
-
Parameters
|
|
241
|
-
----------
|
|
242
|
-
data : Sequence of sequences
|
|
243
|
-
Two-dimensional data represented as a sequence of sequences.
|
|
244
|
-
schema : Sequence of str, (str,DataType) pairs, or a {str:DataType,} dict
|
|
245
|
-
The DataFrame schema may be declared in several ways:
|
|
246
|
-
|
|
247
|
-
* As a dict of {name:type} pairs; if type is None, it will be auto-inferred.
|
|
248
|
-
* As a list of column names; in this case types are automatically inferred.
|
|
249
|
-
* As a list of (name,type) pairs; this is equivalent to the dictionary form.
|
|
250
|
-
|
|
251
|
-
If you supply a list of column names that does not match the names in the
|
|
252
|
-
underlying data, the names given here will overwrite them. The number
|
|
253
|
-
of names given in the schema should match the underlying data dimensions.
|
|
254
|
-
schema_overrides : dict, default None
|
|
255
|
-
Support type specification or override of one or more columns; note that
|
|
256
|
-
any dtypes inferred from the columns param will be overridden.
|
|
257
|
-
strict : bool, default True
|
|
258
|
-
Throw an error if any `data` value does not exactly match the given or inferred
|
|
259
|
-
data type for that column. If set to `False`, values that do not match the data
|
|
260
|
-
type are cast to that data type or, if casting is not possible, set to null
|
|
261
|
-
instead.
|
|
262
|
-
orient : {None, 'col', 'row'}
|
|
263
|
-
Whether to interpret two-dimensional data as columns or as rows. If None,
|
|
264
|
-
the orientation is inferred by matching the columns and data dimensions. If
|
|
265
|
-
this does not yield conclusive results, column orientation is used.
|
|
266
|
-
infer_schema_length
|
|
267
|
-
The maximum number of rows to scan for schema inference.
|
|
268
|
-
If set to `None`, the full data may be scanned *(this is slow)*.
|
|
269
|
-
|
|
270
|
-
Returns
|
|
271
|
-
-------
|
|
272
|
-
DataFrame
|
|
273
|
-
|
|
274
|
-
Examples
|
|
275
|
-
--------
|
|
276
|
-
>>> data = [[1, 2, 3], [4, 5, 6]]
|
|
277
|
-
>>> df = pl.from_records(data, schema=["a", "b"])
|
|
278
|
-
>>> df
|
|
279
|
-
shape: (3, 2)
|
|
280
|
-
┌─────┬─────┐
|
|
281
|
-
│ a ┆ b │
|
|
282
|
-
│ --- ┆ --- │
|
|
283
|
-
│ i64 ┆ i64 │
|
|
284
|
-
╞═════╪═════╡
|
|
285
|
-
│ 1 ┆ 4 │
|
|
286
|
-
│ 2 ┆ 5 │
|
|
287
|
-
│ 3 ┆ 6 │
|
|
288
|
-
└─────┴─────┘
|
|
289
|
-
"""
|
|
290
|
-
if not isinstance(data, Sequence):
|
|
291
|
-
msg = (
|
|
292
|
-
f"expected data of type Sequence, got {type(data).__name__!r}"
|
|
293
|
-
"\n\nHint: Try passing your data to the DataFrame constructor instead,"
|
|
294
|
-
" e.g. `pl.DataFrame(data)`."
|
|
295
|
-
)
|
|
296
|
-
raise TypeError(msg)
|
|
297
|
-
|
|
298
|
-
return wrap_df(
|
|
299
|
-
sequence_to_pydf(
|
|
300
|
-
data,
|
|
301
|
-
schema=schema,
|
|
302
|
-
schema_overrides=schema_overrides,
|
|
303
|
-
strict=strict,
|
|
304
|
-
orient=orient,
|
|
305
|
-
infer_schema_length=infer_schema_length,
|
|
306
|
-
)
|
|
307
|
-
)
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
def from_numpy(
|
|
311
|
-
data: np.ndarray[Any, Any],
|
|
312
|
-
schema: SchemaDefinition | None = None,
|
|
313
|
-
*,
|
|
314
|
-
schema_overrides: SchemaDict | None = None,
|
|
315
|
-
orient: Orientation | None = None,
|
|
316
|
-
) -> DataFrame:
|
|
317
|
-
"""
|
|
318
|
-
Construct a DataFrame from a NumPy ndarray. This operation clones data.
|
|
319
|
-
|
|
320
|
-
Note that this is slower than creating from columnar memory.
|
|
321
|
-
|
|
322
|
-
Parameters
|
|
323
|
-
----------
|
|
324
|
-
data : :class:`numpy.ndarray`
|
|
325
|
-
Two-dimensional data represented as a NumPy ndarray.
|
|
326
|
-
schema : Sequence of str, (str,DataType) pairs, or a {str:DataType,} dict
|
|
327
|
-
The DataFrame schema may be declared in several ways:
|
|
328
|
-
|
|
329
|
-
* As a dict of {name:type} pairs; if type is None, it will be auto-inferred.
|
|
330
|
-
* As a list of column names; in this case types are automatically inferred.
|
|
331
|
-
* As a list of (name,type) pairs; this is equivalent to the dictionary form.
|
|
332
|
-
|
|
333
|
-
If you supply a list of column names that does not match the names in the
|
|
334
|
-
underlying data, the names given here will overwrite them. The number
|
|
335
|
-
of names given in the schema should match the underlying data dimensions.
|
|
336
|
-
schema_overrides : dict, default None
|
|
337
|
-
Support type specification or override of one or more columns; note that
|
|
338
|
-
any dtypes inferred from the columns param will be overridden.
|
|
339
|
-
orient : {None, 'col', 'row'}
|
|
340
|
-
Whether to interpret two-dimensional data as columns or as rows. If None,
|
|
341
|
-
the orientation is inferred by matching the columns and data dimensions. If
|
|
342
|
-
this does not yield conclusive results, column orientation is used.
|
|
343
|
-
|
|
344
|
-
Returns
|
|
345
|
-
-------
|
|
346
|
-
DataFrame
|
|
347
|
-
|
|
348
|
-
Examples
|
|
349
|
-
--------
|
|
350
|
-
>>> import numpy as np
|
|
351
|
-
>>> data = np.array([[1, 2, 3], [4, 5, 6]])
|
|
352
|
-
>>> df = pl.from_numpy(data, schema=["a", "b"], orient="col")
|
|
353
|
-
>>> df
|
|
354
|
-
shape: (3, 2)
|
|
355
|
-
┌─────┬─────┐
|
|
356
|
-
│ a ┆ b │
|
|
357
|
-
│ --- ┆ --- │
|
|
358
|
-
│ i64 ┆ i64 │
|
|
359
|
-
╞═════╪═════╡
|
|
360
|
-
│ 1 ┆ 4 │
|
|
361
|
-
│ 2 ┆ 5 │
|
|
362
|
-
│ 3 ┆ 6 │
|
|
363
|
-
└─────┴─────┘
|
|
364
|
-
"""
|
|
365
|
-
return wrap_df(
|
|
366
|
-
numpy_to_pydf(
|
|
367
|
-
data=data,
|
|
368
|
-
schema=schema,
|
|
369
|
-
schema_overrides=schema_overrides,
|
|
370
|
-
orient=orient,
|
|
371
|
-
)
|
|
372
|
-
)
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
def from_torch(
|
|
376
|
-
tensor: torch.Tensor,
|
|
377
|
-
schema: SchemaDefinition | None = None,
|
|
378
|
-
*,
|
|
379
|
-
schema_overrides: SchemaDict | None = None,
|
|
380
|
-
orient: Orientation | None = None,
|
|
381
|
-
force: bool = False,
|
|
382
|
-
) -> DataFrame:
|
|
383
|
-
"""
|
|
384
|
-
Construct a DataFrame from a PyTorch Tensor.
|
|
385
|
-
|
|
386
|
-
Parameters
|
|
387
|
-
----------
|
|
388
|
-
tensor : :class:`torch.Tensor`
|
|
389
|
-
A PyTorch `Tensor` object of one or more dimensions.
|
|
390
|
-
schema : Sequence of str, (str,DataType) pairs, or a {str:DataType,} dict
|
|
391
|
-
The DataFrame schema may be declared in several ways:
|
|
392
|
-
|
|
393
|
-
* As a dict of {name:type} pairs; if type is None, it will be auto-inferred.
|
|
394
|
-
* As a list of column names; in this case types are automatically inferred.
|
|
395
|
-
* As a list of (name,type) pairs; this is equivalent to the dictionary form.
|
|
396
|
-
|
|
397
|
-
If you supply a list of column names that does not match the names in the
|
|
398
|
-
underlying data, the names given here will overwrite them. The number
|
|
399
|
-
of names given in the schema should match the underlying data dimensions.
|
|
400
|
-
schema_overrides : dict, default None
|
|
401
|
-
Support type specification or override of one or more columns; note that
|
|
402
|
-
any dtypes inferred from the columns param will be overridden.
|
|
403
|
-
orient : {None, 'col', 'row'}
|
|
404
|
-
Whether to interpret two-dimensional data as columns or as rows. If None,
|
|
405
|
-
the orientation is inferred by matching the columns and data dimensions. If
|
|
406
|
-
this does not yield conclusive results, column orientation is used.
|
|
407
|
-
force : bool
|
|
408
|
-
If False, the conversion is performed only if the Tensor is on CPU, does not
|
|
409
|
-
require grad, does not have its conjugate bit set, and is of a dtype (and
|
|
410
|
-
layout) that NumPy supports; this will typically be zero-copy. If True, it
|
|
411
|
-
is equivalent to calling `.detach().cpu().resolve_conj().resolve_neg()`
|
|
412
|
-
before passing the Tensor to Polars.
|
|
413
|
-
|
|
414
|
-
Returns
|
|
415
|
-
-------
|
|
416
|
-
DataFrame
|
|
417
|
-
|
|
418
|
-
Examples
|
|
419
|
-
--------
|
|
420
|
-
>>> import torch
|
|
421
|
-
>>> data = torch.tensor(
|
|
422
|
-
... [
|
|
423
|
-
... [1234.5, 200.0, 3000.5],
|
|
424
|
-
... [8000.0, 500.5, 6000.0],
|
|
425
|
-
... ]
|
|
426
|
-
... )
|
|
427
|
-
>>> df = pl.from_torch(
|
|
428
|
-
... data,
|
|
429
|
-
... schema=["colx", "coly", "colz"],
|
|
430
|
-
... schema_overrides={"colz": pl.Float64},
|
|
431
|
-
... )
|
|
432
|
-
>>> df
|
|
433
|
-
shape: (2, 3)
|
|
434
|
-
┌────────┬───────┬────────┐
|
|
435
|
-
│ colx ┆ coly ┆ colz │
|
|
436
|
-
│ --- ┆ --- ┆ --- │
|
|
437
|
-
│ f32 ┆ f32 ┆ f64 │
|
|
438
|
-
╞════════╪═══════╪════════╡
|
|
439
|
-
│ 1234.5 ┆ 200.0 ┆ 3000.5 │
|
|
440
|
-
│ 8000.0 ┆ 500.5 ┆ 6000.0 │
|
|
441
|
-
└────────┴───────┴────────┘
|
|
442
|
-
"""
|
|
443
|
-
return wrap_df(
|
|
444
|
-
numpy_to_pydf(
|
|
445
|
-
data=tensor.numpy(force=force),
|
|
446
|
-
schema=schema,
|
|
447
|
-
schema_overrides=schema_overrides,
|
|
448
|
-
orient=orient,
|
|
449
|
-
)
|
|
450
|
-
)
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
# Note: we cannot @overload the typing (Series vs DataFrame) here, as pyarrow
|
|
454
|
-
# does not (yet?) implement any support for type hints; attempts to hint here
|
|
455
|
-
# will simply result in mypy inferring "Any", which isn't at all useful...
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
def from_arrow(
|
|
459
|
-
data: (
|
|
460
|
-
pa.Table
|
|
461
|
-
| pa.Array
|
|
462
|
-
| pa.ChunkedArray
|
|
463
|
-
| pa.RecordBatch
|
|
464
|
-
| Iterable[pa.RecordBatch | pa.Table]
|
|
465
|
-
| ArrowArrayExportable
|
|
466
|
-
| ArrowStreamExportable
|
|
467
|
-
),
|
|
468
|
-
schema: SchemaDefinition | None = None,
|
|
469
|
-
*,
|
|
470
|
-
schema_overrides: SchemaDict | None = None,
|
|
471
|
-
rechunk: bool = True,
|
|
472
|
-
) -> DataFrame | Series:
|
|
473
|
-
"""
|
|
474
|
-
Create a DataFrame or Series from an Arrow Table or Array.
|
|
475
|
-
|
|
476
|
-
This operation will be zero copy for the most part. Types that are not
|
|
477
|
-
supported by Polars may be cast to the closest supported type.
|
|
478
|
-
|
|
479
|
-
Hint: You can also directly pass arrow tables to `pl.DataFrame()` / arrow
|
|
480
|
-
arrays to `pl.Series()` if the output type is known to avoid typing issues.
|
|
481
|
-
|
|
482
|
-
Parameters
|
|
483
|
-
----------
|
|
484
|
-
data : :class:`pyarrow.Table`, :class:`pyarrow.Array`, one or more :class:`pyarrow.RecordBatch`
|
|
485
|
-
Data representing an Arrow Table, Array, sequence of RecordBatches or Tables, or other
|
|
486
|
-
object that supports the Arrow PyCapsule interface.
|
|
487
|
-
schema : Sequence of str, (str,DataType) pairs, or a {str:DataType,} dict
|
|
488
|
-
The DataFrame schema may be declared in several ways:
|
|
489
|
-
|
|
490
|
-
* As a dict of {name:type} pairs; if type is None, it will be auto-inferred.
|
|
491
|
-
* As a list of column names; in this case types are automatically inferred.
|
|
492
|
-
* As a list of (name,type) pairs; this is equivalent to the dictionary form.
|
|
493
|
-
|
|
494
|
-
If you supply a list of column names that does not match the names in the
|
|
495
|
-
underlying data, the names given here will overwrite them. The number
|
|
496
|
-
of names given in the schema should match the underlying data dimensions.
|
|
497
|
-
schema_overrides : dict, default None
|
|
498
|
-
Support type specification or override of one or more columns; note that
|
|
499
|
-
any dtypes inferred from the schema param will be overridden.
|
|
500
|
-
rechunk : bool, default True
|
|
501
|
-
Make sure that all data is in contiguous memory.
|
|
502
|
-
|
|
503
|
-
Returns
|
|
504
|
-
-------
|
|
505
|
-
DataFrame or Series
|
|
506
|
-
|
|
507
|
-
Examples
|
|
508
|
-
--------
|
|
509
|
-
Constructing a DataFrame from an Arrow Table:
|
|
510
|
-
|
|
511
|
-
>>> import pyarrow as pa
|
|
512
|
-
>>> data = pa.table({"a": [1, 2, 3], "b": [4, 5, 6]})
|
|
513
|
-
>>> pl.from_arrow(data)
|
|
514
|
-
shape: (3, 2)
|
|
515
|
-
┌─────┬─────┐
|
|
516
|
-
│ a ┆ b │
|
|
517
|
-
│ --- ┆ --- │
|
|
518
|
-
│ i64 ┆ i64 │
|
|
519
|
-
╞═════╪═════╡
|
|
520
|
-
│ 1 ┆ 4 │
|
|
521
|
-
│ 2 ┆ 5 │
|
|
522
|
-
│ 3 ┆ 6 │
|
|
523
|
-
└─────┴─────┘
|
|
524
|
-
|
|
525
|
-
Constructing a Series from an Arrow Array:
|
|
526
|
-
|
|
527
|
-
>>> import pyarrow as pa
|
|
528
|
-
>>> data = pa.array([1, 2, 3])
|
|
529
|
-
>>> pl.from_arrow(data, schema={"s": pl.Int32})
|
|
530
|
-
shape: (3,)
|
|
531
|
-
Series: 's' [i32]
|
|
532
|
-
[
|
|
533
|
-
1
|
|
534
|
-
2
|
|
535
|
-
3
|
|
536
|
-
]
|
|
537
|
-
""" # noqa: W505
|
|
538
|
-
if is_pycapsule(data) and not _check_for_pyarrow(data):
|
|
539
|
-
return pycapsule_to_frame(
|
|
540
|
-
data,
|
|
541
|
-
schema=schema,
|
|
542
|
-
schema_overrides=schema_overrides,
|
|
543
|
-
rechunk=rechunk,
|
|
544
|
-
)
|
|
545
|
-
|
|
546
|
-
elif isinstance(data, (pa.Table, pa.RecordBatch)):
|
|
547
|
-
return wrap_df(
|
|
548
|
-
arrow_to_pydf(
|
|
549
|
-
data=data,
|
|
550
|
-
rechunk=rechunk,
|
|
551
|
-
schema=schema,
|
|
552
|
-
schema_overrides=schema_overrides,
|
|
553
|
-
)
|
|
554
|
-
)
|
|
555
|
-
elif isinstance(data, (pa.Array, pa.ChunkedArray)):
|
|
556
|
-
name = getattr(data, "_name", "") or ""
|
|
557
|
-
s = wrap_s(arrow_to_pyseries(name, data, rechunk=rechunk))
|
|
558
|
-
s = pl.DataFrame(
|
|
559
|
-
data=s,
|
|
560
|
-
schema=schema,
|
|
561
|
-
schema_overrides=schema_overrides,
|
|
562
|
-
).to_series()
|
|
563
|
-
return s if (name or schema or schema_overrides) else s.alias("")
|
|
564
|
-
|
|
565
|
-
elif not data:
|
|
566
|
-
return pl.DataFrame(
|
|
567
|
-
schema=schema,
|
|
568
|
-
schema_overrides=schema_overrides,
|
|
569
|
-
)
|
|
570
|
-
|
|
571
|
-
if isinstance(data, Iterable):
|
|
572
|
-
pa_table = pa.Table.from_batches(
|
|
573
|
-
itertools.chain.from_iterable(
|
|
574
|
-
(b.to_batches() if isinstance(b, pa.Table) else [b]) for b in data
|
|
575
|
-
)
|
|
576
|
-
)
|
|
577
|
-
return wrap_df(
|
|
578
|
-
arrow_to_pydf(
|
|
579
|
-
data=pa_table,
|
|
580
|
-
rechunk=rechunk,
|
|
581
|
-
schema=schema,
|
|
582
|
-
schema_overrides=schema_overrides,
|
|
583
|
-
)
|
|
584
|
-
)
|
|
585
|
-
|
|
586
|
-
msg = f"expected PyArrow Table, Array, or one or more RecordBatches; got {qualified_type_name(data)!r}"
|
|
587
|
-
raise TypeError(msg)
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
@overload
|
|
591
|
-
def from_pandas(
|
|
592
|
-
data: pd.DataFrame,
|
|
593
|
-
*,
|
|
594
|
-
schema_overrides: SchemaDict | None = ...,
|
|
595
|
-
rechunk: bool = ...,
|
|
596
|
-
nan_to_null: bool = ...,
|
|
597
|
-
include_index: bool = ...,
|
|
598
|
-
) -> DataFrame: ...
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
@overload
|
|
602
|
-
def from_pandas(
|
|
603
|
-
data: pd.Series[Any] | pd.Index[Any] | pd.DatetimeIndex,
|
|
604
|
-
*,
|
|
605
|
-
schema_overrides: SchemaDict | None = ...,
|
|
606
|
-
rechunk: bool = ...,
|
|
607
|
-
nan_to_null: bool = ...,
|
|
608
|
-
include_index: Literal[False] = ...,
|
|
609
|
-
) -> Series: ...
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
@overload
|
|
613
|
-
def from_pandas(
|
|
614
|
-
data: pd.Series[Any],
|
|
615
|
-
*,
|
|
616
|
-
schema_overrides: SchemaDict | None = ...,
|
|
617
|
-
rechunk: bool = ...,
|
|
618
|
-
nan_to_null: bool = ...,
|
|
619
|
-
include_index: Literal[True],
|
|
620
|
-
) -> DataFrame: ...
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
def from_pandas(
|
|
624
|
-
data: pd.DataFrame | pd.Series[Any] | pd.Index[Any] | pd.DatetimeIndex,
|
|
625
|
-
*,
|
|
626
|
-
schema_overrides: SchemaDict | None = None,
|
|
627
|
-
rechunk: bool = True,
|
|
628
|
-
nan_to_null: bool = True,
|
|
629
|
-
include_index: bool = False,
|
|
630
|
-
) -> DataFrame | Series:
|
|
631
|
-
"""
|
|
632
|
-
Construct a Polars DataFrame or Series from a pandas DataFrame, Series, or Index.
|
|
633
|
-
|
|
634
|
-
This operation may clone data. If you want to ensure that in-place modifications
|
|
635
|
-
of the output don't affect the input, you may want to consider one of the following:
|
|
636
|
-
|
|
637
|
-
- Enable `Copy-On-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`_
|
|
638
|
-
in pandas.
|
|
639
|
-
- Call :meth:`DataFrame.clone` on the output of `from_pandas`.
|
|
640
|
-
|
|
641
|
-
This requires that :mod:`pandas` and :mod:`pyarrow` are installed.
|
|
642
|
-
|
|
643
|
-
Parameters
|
|
644
|
-
----------
|
|
645
|
-
data : :class:`pandas.DataFrame` or :class:`pandas.Series` or :class:`pandas.Index`
|
|
646
|
-
Data represented as a pandas DataFrame, Series, or Index.
|
|
647
|
-
schema_overrides : dict, default None
|
|
648
|
-
Support override of inferred types for one or more columns.
|
|
649
|
-
rechunk : bool, default True
|
|
650
|
-
Make sure that all data is in contiguous memory.
|
|
651
|
-
nan_to_null : bool, default True
|
|
652
|
-
If data contains `NaN` values PyArrow will convert the `NaN` to `None`
|
|
653
|
-
include_index : bool, default False
|
|
654
|
-
Load any non-default pandas indexes as columns.
|
|
655
|
-
|
|
656
|
-
.. note::
|
|
657
|
-
If the input is a pandas ``DataFrame`` and has a nameless index
|
|
658
|
-
which just enumerates the rows, then it will not be included in the
|
|
659
|
-
result, regardless of this parameter. If you want to be sure to include it,
|
|
660
|
-
please call ``.reset_index()`` prior to calling this function.
|
|
661
|
-
|
|
662
|
-
Returns
|
|
663
|
-
-------
|
|
664
|
-
DataFrame
|
|
665
|
-
|
|
666
|
-
Examples
|
|
667
|
-
--------
|
|
668
|
-
Constructing a :class:`DataFrame` from a :class:`pandas.DataFrame`:
|
|
669
|
-
|
|
670
|
-
>>> import pandas as pd
|
|
671
|
-
>>> pd_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "b", "c"])
|
|
672
|
-
>>> df = pl.from_pandas(pd_df)
|
|
673
|
-
>>> df
|
|
674
|
-
shape: (2, 3)
|
|
675
|
-
┌─────┬─────┬─────┐
|
|
676
|
-
│ a ┆ b ┆ c │
|
|
677
|
-
│ --- ┆ --- ┆ --- │
|
|
678
|
-
│ i64 ┆ i64 ┆ i64 │
|
|
679
|
-
╞═════╪═════╪═════╡
|
|
680
|
-
│ 1 ┆ 2 ┆ 3 │
|
|
681
|
-
│ 4 ┆ 5 ┆ 6 │
|
|
682
|
-
└─────┴─────┴─────┘
|
|
683
|
-
|
|
684
|
-
Constructing a Series from a :class:`pandas.Series`:
|
|
685
|
-
|
|
686
|
-
>>> import pandas as pd
|
|
687
|
-
>>> pd_series = pd.Series([1, 2, 3], name="pd")
|
|
688
|
-
>>> df = pl.from_pandas(pd_series)
|
|
689
|
-
>>> df
|
|
690
|
-
shape: (3,)
|
|
691
|
-
Series: 'pd' [i64]
|
|
692
|
-
[
|
|
693
|
-
1
|
|
694
|
-
2
|
|
695
|
-
3
|
|
696
|
-
]
|
|
697
|
-
"""
|
|
698
|
-
if include_index and isinstance(data, pd.Series):
|
|
699
|
-
data = data.reset_index()
|
|
700
|
-
|
|
701
|
-
if isinstance(data, (pd.Series, pd.Index, pd.DatetimeIndex)):
|
|
702
|
-
return wrap_s(pandas_to_pyseries("", data, nan_to_null=nan_to_null))
|
|
703
|
-
elif isinstance(data, pd.DataFrame):
|
|
704
|
-
return wrap_df(
|
|
705
|
-
pandas_to_pydf(
|
|
706
|
-
data,
|
|
707
|
-
schema_overrides=schema_overrides,
|
|
708
|
-
rechunk=rechunk,
|
|
709
|
-
nan_to_null=nan_to_null,
|
|
710
|
-
include_index=include_index,
|
|
711
|
-
)
|
|
712
|
-
)
|
|
713
|
-
else:
|
|
714
|
-
msg = f"expected pandas DataFrame or Series, got {qualified_type_name(data)!r}"
|
|
715
|
-
raise TypeError(msg)
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
@deprecate_renamed_parameter("tbl", "data", version="0.20.17")
|
|
719
|
-
def from_repr(data: str) -> DataFrame | Series:
|
|
720
|
-
"""
|
|
721
|
-
Construct a Polars DataFrame or Series from its string representation.
|
|
722
|
-
|
|
723
|
-
.. versionchanged:: 0.20.17
|
|
724
|
-
The `tbl` parameter was renamed to `data`.
|
|
725
|
-
|
|
726
|
-
Parameters
|
|
727
|
-
----------
|
|
728
|
-
data
|
|
729
|
-
A string containing a polars DataFrame or Series repr; does not need
|
|
730
|
-
to be trimmed of whitespace (or leading prompts) as the repr will be
|
|
731
|
-
found/extracted automatically.
|
|
732
|
-
|
|
733
|
-
Notes
|
|
734
|
-
-----
|
|
735
|
-
This function handles the default UTF8_FULL (and UTF8_FULL_CONDENSED) DataFrame
|
|
736
|
-
tables, with or without rounded corners. Truncated columns/rows are omitted,
|
|
737
|
-
wrapped headers are accounted for, and dtypes are automatically identified.
|
|
738
|
-
|
|
739
|
-
Currently compound/nested dtypes such as List and Struct are not supported;
|
|
740
|
-
neither are Object dtypes. The DuckDB table/relation repr is also compatible
|
|
741
|
-
with this function.
|
|
742
|
-
|
|
743
|
-
See Also
|
|
744
|
-
--------
|
|
745
|
-
polars.DataFrame.to_init_repr
|
|
746
|
-
polars.Series.to_init_repr
|
|
747
|
-
|
|
748
|
-
Examples
|
|
749
|
-
--------
|
|
750
|
-
From DataFrame table repr:
|
|
751
|
-
|
|
752
|
-
>>> df = pl.from_repr(
|
|
753
|
-
... '''
|
|
754
|
-
... Out[3]:
|
|
755
|
-
... shape: (1, 5)
|
|
756
|
-
... ┌───────────┬────────────┬───┬───────┬────────────────────────────────┐
|
|
757
|
-
... │ source_ac ┆ source_cha ┆ … ┆ ident ┆ timestamp │
|
|
758
|
-
... │ tor_id ┆ nnel_id ┆ ┆ --- ┆ --- │
|
|
759
|
-
... │ --- ┆ --- ┆ ┆ str ┆ datetime[μs, Asia/Tokyo] │
|
|
760
|
-
... │ i32 ┆ i64 ┆ ┆ ┆ │
|
|
761
|
-
... ╞═══════════╪════════════╪═══╪═══════╪════════════════════════════════╡
|
|
762
|
-
... │ 123456780 ┆ 9876543210 ┆ … ┆ a:b:c ┆ 2023-03-25 10:56:59.663053 JST │
|
|
763
|
-
... │ … ┆ … ┆ … ┆ … ┆ … │
|
|
764
|
-
... │ 803065983 ┆ 2055938745 ┆ … ┆ x:y:z ┆ 2023-03-25 12:38:18.050545 JST │
|
|
765
|
-
... └───────────┴────────────┴───┴───────┴────────────────────────────────┘
|
|
766
|
-
... '''
|
|
767
|
-
... )
|
|
768
|
-
>>> df
|
|
769
|
-
shape: (2, 4)
|
|
770
|
-
┌─────────────────┬───────────────────┬───────┬────────────────────────────────┐
|
|
771
|
-
│ source_actor_id ┆ source_channel_id ┆ ident ┆ timestamp │
|
|
772
|
-
│ --- ┆ --- ┆ --- ┆ --- │
|
|
773
|
-
│ i32 ┆ i64 ┆ str ┆ datetime[μs, Asia/Tokyo] │
|
|
774
|
-
╞═════════════════╪═══════════════════╪═══════╪════════════════════════════════╡
|
|
775
|
-
│ 123456780 ┆ 9876543210 ┆ a:b:c ┆ 2023-03-25 10:56:59.663053 JST │
|
|
776
|
-
│ 803065983 ┆ 2055938745 ┆ x:y:z ┆ 2023-03-25 12:38:18.050545 JST │
|
|
777
|
-
└─────────────────┴───────────────────┴───────┴────────────────────────────────┘
|
|
778
|
-
|
|
779
|
-
From Series repr:
|
|
780
|
-
|
|
781
|
-
>>> s = pl.from_repr(
|
|
782
|
-
... '''
|
|
783
|
-
... shape: (3,)
|
|
784
|
-
... Series: 's' [bool]
|
|
785
|
-
... [
|
|
786
|
-
... true
|
|
787
|
-
... false
|
|
788
|
-
... true
|
|
789
|
-
... ]
|
|
790
|
-
... '''
|
|
791
|
-
... )
|
|
792
|
-
>>> s.to_list()
|
|
793
|
-
[True, False, True]
|
|
794
|
-
"""
|
|
795
|
-
# find DataFrame table...
|
|
796
|
-
m = re.search(r"([┌╭].*?[┘╯])", data, re.DOTALL)
|
|
797
|
-
if m is not None:
|
|
798
|
-
return _from_dataframe_repr(m)
|
|
799
|
-
|
|
800
|
-
# ...or Series in the given string
|
|
801
|
-
m = re.search(
|
|
802
|
-
pattern=r"(?:shape: (\(\d+,\))\n.*?)?Series:\s+([^\n]+)\s+\[([^\n]+)](.*)",
|
|
803
|
-
string=data,
|
|
804
|
-
flags=re.DOTALL,
|
|
805
|
-
)
|
|
806
|
-
if m is not None:
|
|
807
|
-
return _from_series_repr(m)
|
|
808
|
-
|
|
809
|
-
msg = "input string does not contain DataFrame or Series"
|
|
810
|
-
raise ValueError(msg)
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
def _from_dataframe_repr(m: re.Match[str]) -> DataFrame:
|
|
814
|
-
"""Reconstruct a DataFrame from a regex-matched table repr."""
|
|
815
|
-
from polars.datatypes.convert import dtype_short_repr_to_dtype
|
|
816
|
-
from polars.io.database._inference import dtype_from_database_typename
|
|
817
|
-
|
|
818
|
-
def _dtype_from_name(tp: str | None) -> PolarsDataType | None:
|
|
819
|
-
return (
|
|
820
|
-
None
|
|
821
|
-
if tp is None
|
|
822
|
-
else (
|
|
823
|
-
dtype_short_repr_to_dtype(tp)
|
|
824
|
-
or dtype_from_database_typename(tp, raise_unmatched=False)
|
|
825
|
-
)
|
|
826
|
-
)
|
|
827
|
-
|
|
828
|
-
# extract elements from table structure
|
|
829
|
-
lines = m.group().split("\n")[1:-1]
|
|
830
|
-
rows = [
|
|
831
|
-
[re.sub(r"^[\W+]*│", "", elem).strip() for elem in row]
|
|
832
|
-
for row in [re.split("[│┆|]", row.lstrip("#. ").rstrip("│ ")) for row in lines]
|
|
833
|
-
if len(row) > 1 or not re.search("├[╌┼]+┤", row[0])
|
|
834
|
-
]
|
|
835
|
-
|
|
836
|
-
# determine beginning/end of the header block
|
|
837
|
-
table_body_start = 2
|
|
838
|
-
found_header_divider = False
|
|
839
|
-
for idx, (elem, *_) in enumerate(rows):
|
|
840
|
-
if re.match(r"^\W*[╞]", elem):
|
|
841
|
-
found_header_divider = True
|
|
842
|
-
table_body_start = idx
|
|
843
|
-
break
|
|
844
|
-
|
|
845
|
-
# handle headers with wrapped column names and determine headers/dtypes
|
|
846
|
-
header_rows = rows[:table_body_start]
|
|
847
|
-
header_block: list[Sequence[str]]
|
|
848
|
-
if (
|
|
849
|
-
not found_header_divider
|
|
850
|
-
and len(header_rows) == 2
|
|
851
|
-
and not any("---" in h for h in header_rows)
|
|
852
|
-
):
|
|
853
|
-
header_block = list(zip(*header_rows))
|
|
854
|
-
else:
|
|
855
|
-
header_block = ["".join(h).split("---") for h in zip(*header_rows)]
|
|
856
|
-
|
|
857
|
-
dtypes: list[str | None]
|
|
858
|
-
if all(len(h) == 1 for h in header_block):
|
|
859
|
-
headers = [h[0] for h in header_block]
|
|
860
|
-
dtypes = [None] * len(headers)
|
|
861
|
-
else:
|
|
862
|
-
headers, dtypes = (list(h) for h in itertools.zip_longest(*header_block))
|
|
863
|
-
|
|
864
|
-
body = rows[table_body_start + 1 :]
|
|
865
|
-
if not headers[0] and not dtypes[0]:
|
|
866
|
-
body = [row[1:] for row in body]
|
|
867
|
-
headers = headers[1:]
|
|
868
|
-
dtypes = dtypes[1:]
|
|
869
|
-
|
|
870
|
-
no_dtypes = all(d is None for d in dtypes)
|
|
871
|
-
|
|
872
|
-
# transpose rows into columns, detect/omit truncated columns
|
|
873
|
-
coldata = list(zip(*(row for row in body if not all((e == "…") for e in row))))
|
|
874
|
-
for el in ("…", "..."):
|
|
875
|
-
if el in headers:
|
|
876
|
-
idx = headers.index(el)
|
|
877
|
-
for table_elem in (headers, dtypes):
|
|
878
|
-
table_elem.pop(idx)
|
|
879
|
-
if coldata:
|
|
880
|
-
coldata.pop(idx)
|
|
881
|
-
|
|
882
|
-
# init cols as String Series, handle "null" -> None, create schema from repr dtype
|
|
883
|
-
data = [
|
|
884
|
-
pl.Series([(None if v in ("null", "NULL") else v) for v in cd], dtype=String)
|
|
885
|
-
for cd in coldata
|
|
886
|
-
]
|
|
887
|
-
schema = dict(zip(headers, (_dtype_from_name(d) for d in dtypes)))
|
|
888
|
-
if schema and data and (n_extend_cols := (len(schema) - len(data))) > 0:
|
|
889
|
-
empty_data = [None] * len(data[0])
|
|
890
|
-
data.extend((pl.Series(empty_data, dtype=String)) for _ in range(n_extend_cols))
|
|
891
|
-
|
|
892
|
-
for dtype in set(schema.values()):
|
|
893
|
-
if dtype is not None and (dtype.is_nested() or dtype.is_object()):
|
|
894
|
-
msg = (
|
|
895
|
-
f"`from_repr` does not support data type {dtype.base_type().__name__!r}"
|
|
896
|
-
)
|
|
897
|
-
raise NotImplementedError(msg)
|
|
898
|
-
|
|
899
|
-
# construct DataFrame from string series and cast from repr to native dtype
|
|
900
|
-
df = pl.DataFrame(data=data, orient="col", schema=list(schema))
|
|
901
|
-
if no_dtypes:
|
|
902
|
-
if df.is_empty():
|
|
903
|
-
# if no dtypes *and* empty, default to string
|
|
904
|
-
return df.with_columns(F.all().cast(String))
|
|
905
|
-
else:
|
|
906
|
-
# otherwise, take a trip through our CSV inference logic
|
|
907
|
-
if all(tp == String for tp in df.schema.values()):
|
|
908
|
-
from polars.io import read_csv
|
|
909
|
-
|
|
910
|
-
buf = io.BytesIO()
|
|
911
|
-
df.write_csv(file=buf)
|
|
912
|
-
buf.seek(0)
|
|
913
|
-
df = read_csv(
|
|
914
|
-
buf,
|
|
915
|
-
new_columns=df.columns,
|
|
916
|
-
try_parse_dates=True,
|
|
917
|
-
infer_schema_length=None,
|
|
918
|
-
)
|
|
919
|
-
return df
|
|
920
|
-
elif schema and not data:
|
|
921
|
-
return df.cast(schema) # type: ignore[arg-type]
|
|
922
|
-
else:
|
|
923
|
-
return _cast_repr_strings_with_schema(df, schema)
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
def _from_series_repr(m: re.Match[str]) -> Series:
|
|
927
|
-
"""Reconstruct a Series from a regex-matched series repr."""
|
|
928
|
-
from polars.datatypes.convert import dtype_short_repr_to_dtype
|
|
929
|
-
|
|
930
|
-
shape = m.groups()[0]
|
|
931
|
-
name = m.groups()[1][1:-1]
|
|
932
|
-
length = int(shape[1:-2] if shape else -1)
|
|
933
|
-
dtype = dtype_short_repr_to_dtype(m.groups()[2])
|
|
934
|
-
|
|
935
|
-
if length == 0:
|
|
936
|
-
string_values = []
|
|
937
|
-
else:
|
|
938
|
-
string_values = [
|
|
939
|
-
v.strip()
|
|
940
|
-
for v in re.findall(r"[\s>#]*(?:\t|\s{2,})([^\n]*)\n", m.groups()[-1])
|
|
941
|
-
]
|
|
942
|
-
if string_values == ["[", "]"]:
|
|
943
|
-
string_values = []
|
|
944
|
-
else:
|
|
945
|
-
start: int | None = None
|
|
946
|
-
end: int | None = None
|
|
947
|
-
for idx, v in enumerate(string_values):
|
|
948
|
-
if start is None and v.lstrip("#> ") == "[":
|
|
949
|
-
start = idx
|
|
950
|
-
if v.lstrip("#> ") == "]":
|
|
951
|
-
end = idx
|
|
952
|
-
if start is not None and end is not None:
|
|
953
|
-
string_values = string_values[start + 1 : end]
|
|
954
|
-
|
|
955
|
-
values = string_values[:length] if length > 0 else string_values
|
|
956
|
-
values = [(None if v == "null" else v) for v in values if v not in ("…", "...")]
|
|
957
|
-
|
|
958
|
-
if not values:
|
|
959
|
-
return pl.Series(name=name, values=values, dtype=dtype)
|
|
960
|
-
else:
|
|
961
|
-
srs = pl.Series(name=name, values=values, dtype=String)
|
|
962
|
-
if dtype is None:
|
|
963
|
-
return srs
|
|
964
|
-
elif dtype in (Categorical, String):
|
|
965
|
-
return srs.str.replace('^"(.*)"$', r"$1").cast(dtype)
|
|
966
|
-
|
|
967
|
-
return _cast_repr_strings_with_schema(
|
|
968
|
-
srs.to_frame(), schema={srs.name: dtype}
|
|
969
|
-
).to_series()
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
def from_dataframe(
|
|
973
|
-
df: SupportsInterchange | ArrowArrayExportable | ArrowStreamExportable,
|
|
974
|
-
*,
|
|
975
|
-
allow_copy: bool | None = None,
|
|
976
|
-
rechunk: bool = True,
|
|
977
|
-
) -> DataFrame:
|
|
978
|
-
"""
|
|
979
|
-
Build a Polars DataFrame from any dataframe supporting the PyCapsule Interface.
|
|
980
|
-
|
|
981
|
-
.. versionchanged:: 1.23.0
|
|
982
|
-
|
|
983
|
-
`from_dataframe` uses the PyCapsule Interface instead of the Dataframe
|
|
984
|
-
Interchange Protocol for conversion, only using the latter as a fallback.
|
|
985
|
-
|
|
986
|
-
Parameters
|
|
987
|
-
----------
|
|
988
|
-
df
|
|
989
|
-
Object supporting the dataframe PyCapsule Interface.
|
|
990
|
-
allow_copy
|
|
991
|
-
Allow memory to be copied to perform the conversion. If set to False, may cause
|
|
992
|
-
conversions that are not zero-copy to fail.
|
|
993
|
-
|
|
994
|
-
.. deprecated: 1.23.0
|
|
995
|
-
`allow_copy` is deprecated and will be removed in a future version.
|
|
996
|
-
rechunk : bool, default True
|
|
997
|
-
Make sure that all data is in contiguous memory.
|
|
998
|
-
|
|
999
|
-
Notes
|
|
1000
|
-
-----
|
|
1001
|
-
- Details on the PyCapsule Interface:
|
|
1002
|
-
https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html.
|
|
1003
|
-
- Details on the Python dataframe interchange protocol:
|
|
1004
|
-
https://data-apis.org/dataframe-protocol/latest/index.html.
|
|
1005
|
-
Using a dedicated function like :func:`from_pandas` or :func:`from_arrow` is
|
|
1006
|
-
a more efficient method of conversion.
|
|
1007
|
-
|
|
1008
|
-
Examples
|
|
1009
|
-
--------
|
|
1010
|
-
Convert a pandas dataframe to Polars.
|
|
1011
|
-
|
|
1012
|
-
>>> import pandas as pd
|
|
1013
|
-
>>> df_pd = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["x", "y"]})
|
|
1014
|
-
>>> pl.from_dataframe(df_pd)
|
|
1015
|
-
shape: (2, 3)
|
|
1016
|
-
┌─────┬─────┬─────┐
|
|
1017
|
-
│ a ┆ b ┆ c │
|
|
1018
|
-
│ --- ┆ --- ┆ --- │
|
|
1019
|
-
│ i64 ┆ f64 ┆ str │
|
|
1020
|
-
╞═════╪═════╪═════╡
|
|
1021
|
-
│ 1 ┆ 3.0 ┆ x │
|
|
1022
|
-
│ 2 ┆ 4.0 ┆ y │
|
|
1023
|
-
└─────┴─────┴─────┘
|
|
1024
|
-
"""
|
|
1025
|
-
if allow_copy is not None:
|
|
1026
|
-
issue_deprecation_warning(
|
|
1027
|
-
"`allow_copy` is deprecated and will be removed in a future version.",
|
|
1028
|
-
version="1.23",
|
|
1029
|
-
)
|
|
1030
|
-
else:
|
|
1031
|
-
allow_copy = True
|
|
1032
|
-
if is_pycapsule(df):
|
|
1033
|
-
try:
|
|
1034
|
-
return pycapsule_to_frame(df, rechunk=rechunk)
|
|
1035
|
-
except Exception as exc:
|
|
1036
|
-
issue_warning(
|
|
1037
|
-
f"Failed to convert dataframe using PyCapsule Interface with exception: {exc!r}.\n"
|
|
1038
|
-
"Falling back to Dataframe Interchange Protocol, which is known to be less robust.",
|
|
1039
|
-
UserWarning,
|
|
1040
|
-
)
|
|
1041
|
-
from polars.interchange.from_dataframe import from_dataframe
|
|
1042
|
-
|
|
1043
|
-
result = from_dataframe(df, allow_copy=allow_copy) # type: ignore[arg-type]
|
|
1044
|
-
if rechunk:
|
|
1045
|
-
return result.rechunk()
|
|
1046
|
-
return result
|