Flowfile 0.3.2__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Flowfile might be problematic. Click here for more details.
- flowfile/__init__.py +2 -1
- flowfile/web/__init__.py +3 -0
- {flowfile-0.3.2.dist-info → flowfile-0.3.3.dist-info}/METADATA +1 -1
- {flowfile-0.3.2.dist-info → flowfile-0.3.3.dist-info}/RECORD +46 -35
- flowfile_core/configs/__init__.py +15 -4
- flowfile_core/configs/settings.py +5 -3
- flowfile_core/configs/utils.py +18 -0
- flowfile_core/flowfile/FlowfileFlow.py +13 -18
- flowfile_core/flowfile/database_connection_manager/db_connections.py +1 -1
- flowfile_core/flowfile/flow_data_engine/flow_data_engine.py +54 -17
- flowfile_core/flowfile/flow_data_engine/flow_file_column/main.py +42 -9
- flowfile_core/flowfile/flow_data_engine/flow_file_column/utils.py +42 -3
- flowfile_core/flowfile/flow_data_engine/polars_code_parser.py +2 -1
- flowfile_core/flowfile/flow_data_engine/sample_data.py +25 -7
- flowfile_core/flowfile/flow_data_engine/subprocess_operations/subprocess_operations.py +4 -3
- flowfile_core/flowfile/flow_data_engine/utils.py +1 -0
- flowfile_core/flowfile/flow_node/flow_node.py +2 -1
- flowfile_core/flowfile/sources/external_sources/airbyte_sources/models.py +2 -2
- flowfile_core/flowfile/sources/external_sources/sql_source/sql_source.py +1 -1
- flowfile_core/flowfile/utils.py +34 -3
- flowfile_core/main.py +2 -3
- flowfile_core/routes/secrets.py +1 -1
- flowfile_core/schemas/input_schema.py +10 -4
- flowfile_core/schemas/transform_schema.py +25 -47
- flowfile_frame/__init__.py +11 -4
- flowfile_frame/adding_expr.py +280 -0
- flowfile_frame/config.py +9 -0
- flowfile_frame/expr.py +301 -83
- flowfile_frame/expr.pyi +2174 -0
- flowfile_frame/expr_name.py +258 -0
- flowfile_frame/flow_frame.py +587 -1002
- flowfile_frame/flow_frame.pyi +336 -0
- flowfile_frame/flow_frame_methods.py +617 -0
- flowfile_frame/group_frame.py +89 -42
- flowfile_frame/join.py +1 -2
- flowfile_frame/lazy.py +704 -0
- flowfile_frame/lazy_methods.py +201 -0
- flowfile_frame/list_name_space.py +324 -0
- flowfile_frame/selectors.py +3 -0
- flowfile_frame/series.py +70 -0
- flowfile_frame/utils.py +80 -4
- {flowfile-0.3.2.dist-info → flowfile-0.3.3.dist-info}/LICENSE +0 -0
- {flowfile-0.3.2.dist-info → flowfile-0.3.3.dist-info}/WHEEL +0 -0
- {flowfile-0.3.2.dist-info → flowfile-0.3.3.dist-info}/entry_points.txt +0 -0
- /flowfile_core/{secrets → secret_manager}/__init__.py +0 -0
- /flowfile_core/{secrets/secrets.py → secret_manager/secret_manager.py} +0 -0
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
# This file was auto-generated to provide type information for FlowFrame
|
|
2
|
+
# DO NOT MODIFY THIS FILE MANUALLY
|
|
3
|
+
import collections
|
|
4
|
+
import typing
|
|
5
|
+
import inspect
|
|
6
|
+
from typing import List, Dict, Optional, Union, ForwardRef
|
|
7
|
+
import polars as pl
|
|
8
|
+
from polars.lazyframe.frame import *
|
|
9
|
+
from polars._typing import SchemaDict
|
|
10
|
+
from polars.type_aliases import ColumnNameOrSelector, FillNullStrategy
|
|
11
|
+
|
|
12
|
+
import flowfile_frame
|
|
13
|
+
from flowfile_core.flowfile.flow_node.flow_node import FlowNode
|
|
14
|
+
from flowfile_core.flowfile.FlowfileFlow import FlowGraph
|
|
15
|
+
from flowfile_frame import group_frame
|
|
16
|
+
from flowfile_frame.expr import Expr, Column
|
|
17
|
+
from flowfile_frame.selectors import Selector
|
|
18
|
+
|
|
19
|
+
T = TypeVar('T')
|
|
20
|
+
FlowFrameT = TypeVar('FlowFrameT', bound='FlowFrame')
|
|
21
|
+
# Define NoneType to handle type hints with None
|
|
22
|
+
NoneType = type(None)
|
|
23
|
+
|
|
24
|
+
# Module-level functions
|
|
25
|
+
def can_be_expr(param: inspect.Parameter) -> bool: ...
|
|
26
|
+
def generate_node_id() -> int: ...
|
|
27
|
+
def get_method_name_from_code(code: str) -> str | None: ...
|
|
28
|
+
def _contains_lambda_pattern(text: str) -> bool: ...
|
|
29
|
+
def _to_string_val(v) -> str: ...
|
|
30
|
+
def _extract_expr_parts(expr_obj) -> tuple[str, str]: ...
|
|
31
|
+
def _check_ok_for_serialization(method_name: str = None, polars_expr: pl.Expr | None = None, group_expr: pl.Expr | None = None) -> None: ...
|
|
32
|
+
|
|
33
|
+
class FlowFrame:
|
|
34
|
+
data: LazyFrame
|
|
35
|
+
flow_graph: FlowGraph
|
|
36
|
+
node_id: int
|
|
37
|
+
parent_node_id: Optional[int]
|
|
38
|
+
|
|
39
|
+
# This special method determines how the object behaves in boolean contexts.
|
|
40
|
+
def __bool__(self, ) -> Any: ...
|
|
41
|
+
|
|
42
|
+
# This special method enables the 'in' operator to work with FlowFrame objects.
|
|
43
|
+
def __contains__(self, key) -> Any: ...
|
|
44
|
+
|
|
45
|
+
def __eq__(self, other: object) -> typing.NoReturn: ...
|
|
46
|
+
|
|
47
|
+
def __ge__(self, other: Any) -> typing.NoReturn: ...
|
|
48
|
+
|
|
49
|
+
def __gt__(self, other: Any) -> typing.NoReturn: ...
|
|
50
|
+
|
|
51
|
+
# Initialize the FlowFrame with data and graph references.
|
|
52
|
+
def __init__(self, data: Union[LazyFrame, Mapping, Sequence, ForwardRef('np.ndarray[Any, Any]'), ForwardRef('pa.Table'), ForwardRef('pd.DataFrame'), ForwardRef('ArrowArrayExportable'), ForwardRef('ArrowStreamExportable')]=None, schema: Union[Mapping, Sequence, NoneType]=None, schema_overrides: collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType')]] | None=None, strict: bool=True, orient: Union[Literal, NoneType]=None, infer_schema_length: int | None=100, nan_to_null: bool=False, flow_graph=None, node_id=None, parent_node_id=None) -> Any: ...
|
|
53
|
+
|
|
54
|
+
def __le__(self, other: Any) -> typing.NoReturn: ...
|
|
55
|
+
|
|
56
|
+
def __lt__(self, other: Any) -> typing.NoReturn: ...
|
|
57
|
+
|
|
58
|
+
def __ne__(self, other: object) -> typing.NoReturn: ...
|
|
59
|
+
|
|
60
|
+
# Create a new FlowFrame instance.
|
|
61
|
+
def __new__(self, cls, data: Union[LazyFrame, Mapping, Sequence, ForwardRef('np.ndarray[Any, Any]'), ForwardRef('pa.Table'), ForwardRef('pd.DataFrame'), ForwardRef('ArrowArrayExportable'), ForwardRef('ArrowStreamExportable')]=None, schema: Union[Mapping, Sequence, NoneType]=None, schema_overrides: collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType')]] | None=None, strict: bool=True, orient: Union[Literal, NoneType]=None, infer_schema_length: int | None=100, nan_to_null: bool=False, flow_graph=None, node_id=None, parent_node_id=None) -> Any: ...
|
|
62
|
+
|
|
63
|
+
def __repr__(self, ) -> Any: ...
|
|
64
|
+
|
|
65
|
+
# Helper method to add a connection between nodes
|
|
66
|
+
def _add_connection(self, from_id, to_id, input_type: typing.Literal['main', 'left', 'right']='main') -> Any: ...
|
|
67
|
+
|
|
68
|
+
def _add_number_of_records(self, new_node_id: int, description: str=None) -> 'FlowFrame': ...
|
|
69
|
+
|
|
70
|
+
def _add_polars_code(self, new_node_id: int, code: str, description: str=None, depending_on_ids: Union[List, NoneType]=None, convertable_to_code: bool=True, method_name: str=None, polars_expr: Union[Expr, List, NoneType]=None, group_expr: Union[Expr, List, NoneType]=None, kwargs_expr: Union[Dict, NoneType]=None, group_kwargs: Union[Dict, NoneType]=None) -> Any: ...
|
|
71
|
+
|
|
72
|
+
def _comparison_error(self, operator: str) -> typing.NoReturn: ...
|
|
73
|
+
|
|
74
|
+
# Helper method to create a new FlowFrame that's a child of this one
|
|
75
|
+
def _create_child_frame(self, new_node_id) -> 'FlowFrame': ...
|
|
76
|
+
|
|
77
|
+
# Detect if the expression is a cum_count operation and use record_id if possible.
|
|
78
|
+
def _detect_cum_count_record_id(self, expr: Any, new_node_id: int, description: Union[str, NoneType]=None) -> 'FlowFrame': ...
|
|
79
|
+
|
|
80
|
+
# Generates the `input_df.sort(...)` Polars code string using pure expression strings.
|
|
81
|
+
def _generate_sort_polars_code(self, pure_sort_expr_strs: typing.List[str], descending_values: typing.List[bool], nulls_last_values: typing.List[bool], multithreaded: bool, maintain_order: bool) -> str: ...
|
|
82
|
+
|
|
83
|
+
def _with_flowfile_formula(self, flowfile_formula: str, output_column_name, description: str=None) -> 'FlowFrame': ...
|
|
84
|
+
|
|
85
|
+
# Approximate count of unique values.
|
|
86
|
+
def approx_n_unique(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
87
|
+
|
|
88
|
+
# Return the `k` smallest rows.
|
|
89
|
+
def bottom_k(self, k: int, by: IntoExpr | Iterable[IntoExpr], reverse: bool | Sequence[bool]=False, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
90
|
+
|
|
91
|
+
def cache(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
92
|
+
|
|
93
|
+
# Cast LazyFrame column(s) to the specified dtype(s).
|
|
94
|
+
def cast(self, dtypes: Mapping[ColumnNameOrSelector | PolarsDataType, PolarsDataType | PythonDataType] | PolarsDataType, strict: bool=True, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
95
|
+
|
|
96
|
+
# Create an empty copy of the current LazyFrame, with zero to 'n' rows.
|
|
97
|
+
def clear(self, n: int=0, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
98
|
+
|
|
99
|
+
# Create a copy of this LazyFrame.
|
|
100
|
+
def clone(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
101
|
+
|
|
102
|
+
# Collect lazy data into memory.
|
|
103
|
+
def collect(self, *args, **kwargs) -> Any: ...
|
|
104
|
+
|
|
105
|
+
# Collect DataFrame asynchronously in thread pool.
|
|
106
|
+
def collect_async(self, gevent: bool=False, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, no_optimization: bool=False, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True, engine: EngineType='auto', _check_order: bool=True) -> Awaitable[DataFrame] | _GeventDataFrameResult[DataFrame]: ...
|
|
107
|
+
|
|
108
|
+
# Resolve the schema of this LazyFrame.
|
|
109
|
+
def collect_schema(self, ) -> Schema: ...
|
|
110
|
+
|
|
111
|
+
# Combine multiple FlowFrames into a single FlowFrame.
|
|
112
|
+
def concat(self, other: Union[ForwardRef('FlowFrame'), List], how: str='vertical', rechunk: bool=False, parallel: bool=True, description: str=None) -> 'FlowFrame': ...
|
|
113
|
+
|
|
114
|
+
# Return the number of non-null elements for each column.
|
|
115
|
+
def count(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
116
|
+
|
|
117
|
+
# Simple naive implementation of creating the frame from any type. It converts the data to a polars frame,
|
|
118
|
+
def create_from_any_type(self, data: Union[Mapping, Sequence, ForwardRef('np.ndarray[Any, Any]'), ForwardRef('pa.Table'), ForwardRef('pd.DataFrame'), ForwardRef('ArrowArrayExportable'), ForwardRef('ArrowStreamExportable')]=None, schema: Union[Mapping, Sequence, NoneType]=None, schema_overrides: collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType')]] | None=None, strict: bool=True, orient: Union[Literal, NoneType]=None, infer_schema_length: int | None=100, nan_to_null: bool=False, flow_graph=None, node_id=None, parent_node_id=None, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
119
|
+
|
|
120
|
+
# Creates a summary of statistics for a LazyFrame, returning a DataFrame.
|
|
121
|
+
def describe(self, percentiles: Sequence[float] | float | None=(0.25, 0.5, 0.75), interpolation: RollingInterpolationMethod='nearest') -> DataFrame: ...
|
|
122
|
+
|
|
123
|
+
# Read a logical plan from a file to construct a LazyFrame.
|
|
124
|
+
def deserialize(self, source: str | Path | IOBase, format: SerializationFormat='binary', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
125
|
+
|
|
126
|
+
# Remove columns from the DataFrame.
|
|
127
|
+
def drop(self, *columns, strict: bool=True, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
128
|
+
|
|
129
|
+
# Drop all rows that contain one or more NaN values.
|
|
130
|
+
def drop_nans(self, subset: ColumnNameOrSelector | Collection[ColumnNameOrSelector] | None=None, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
131
|
+
|
|
132
|
+
# Drop all rows that contain one or more null values.
|
|
133
|
+
def drop_nulls(self, subset: ColumnNameOrSelector | Collection[ColumnNameOrSelector] | None=None, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
134
|
+
|
|
135
|
+
# Create a string representation of the query plan.
|
|
136
|
+
def explain(self, format: ExplainFormat='plain', optimized: bool=True, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True, streaming: bool=False, engine: EngineType='auto', tree_format: bool | None=None, _check_order: bool=True) -> str: ...
|
|
137
|
+
|
|
138
|
+
# Explode the dataframe to long format by exploding the given columns.
|
|
139
|
+
def explode(self, columns: Union[str, Column, Iterable], *more_columns, description: str=None) -> 'FlowFrame': ...
|
|
140
|
+
|
|
141
|
+
# Collect a small number of rows for debugging purposes.
|
|
142
|
+
def fetch(self, n_rows: int=500, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, no_optimization: bool=False, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True) -> DataFrame: ...
|
|
143
|
+
|
|
144
|
+
# Fill floating point NaN values.
|
|
145
|
+
def fill_nan(self, value: int | float | Expr | None, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
146
|
+
|
|
147
|
+
# Fill null values using the specified value or strategy.
|
|
148
|
+
def fill_null(self, value: Any | Expr | None=None, strategy: FillNullStrategy | None=None, limit: int | None=None, matches_supertype: bool=True, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
149
|
+
|
|
150
|
+
# Filter rows based on a predicate.
|
|
151
|
+
def filter(self, *predicates, flowfile_formula: Union[str, NoneType]=None, description: Union[str, NoneType]=None, **constraints) -> 'FlowFrame': ...
|
|
152
|
+
|
|
153
|
+
# Get the first row of the DataFrame.
|
|
154
|
+
def first(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
155
|
+
|
|
156
|
+
# Take every nth row in the LazyFrame and return as a new LazyFrame.
|
|
157
|
+
def gather_every(self, n: int, offset: int=0, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
158
|
+
|
|
159
|
+
def get_node_settings(self, description: Optional[str] = None) -> FlowNode: ...
|
|
160
|
+
|
|
161
|
+
# Start a group by operation.
|
|
162
|
+
def group_by(self, *by, description: Optional[str] = None, maintain_order: bool = False, **named_by) -> group_frame.GroupByFrame: ...
|
|
163
|
+
|
|
164
|
+
# Group based on a time value (or index value of type Int32, Int64).
|
|
165
|
+
def group_by_dynamic(self, index_column: IntoExpr, every: str | timedelta, period: str | timedelta | None=None, offset: str | timedelta | None=None, include_boundaries: bool=False, closed: ClosedInterval='left', label: Label='left', group_by: IntoExpr | Iterable[IntoExpr] | None=None, start_by: StartBy='window', description: Optional[str] = None) -> LazyGroupBy: ...
|
|
166
|
+
|
|
167
|
+
def head(self, n: int, description: str=None) -> 'FlowFrame': ...
|
|
168
|
+
|
|
169
|
+
# Inspect a node in the computation graph.
|
|
170
|
+
def inspect(self, fmt: str='{}', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
171
|
+
|
|
172
|
+
# Interpolate intermediate values. The interpolation method is linear.
|
|
173
|
+
def interpolate(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
174
|
+
|
|
175
|
+
# Add a join operation to the Logical Plan.
|
|
176
|
+
def join(self, other, on: Union[List, str, Column]=None, how: str='inner', left_on: Union[List, str, Column]=None, right_on: Union[List, str, Column]=None, suffix: str='_right', validate: str=None, nulls_equal: bool=False, coalesce: bool=None, maintain_order: typing.Literal[None, 'left', 'right', 'left_right', 'right_left']=None, description: str=None) -> 'FlowFrame': ...
|
|
177
|
+
|
|
178
|
+
# Perform an asof join.
|
|
179
|
+
def join_asof(self, other: LazyFrame, left_on: str | None | Expr=None, right_on: str | None | Expr=None, on: str | None | Expr=None, by_left: str | Sequence[str] | None=None, by_right: str | Sequence[str] | None=None, by: str | Sequence[str] | None=None, strategy: AsofJoinStrategy='backward', suffix: str='_right', tolerance: str | int | float | timedelta | None=None, allow_parallel: bool=True, force_parallel: bool=False, coalesce: bool=True, allow_exact_matches: bool=True, check_sortedness: bool=True, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
180
|
+
|
|
181
|
+
# Perform a join based on one or multiple (in)equality predicates.
|
|
182
|
+
def join_where(self, other: LazyFrame, *predicates, suffix: str='_right', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
183
|
+
|
|
184
|
+
# Get the last row of the DataFrame.
|
|
185
|
+
def last(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
186
|
+
|
|
187
|
+
# Return lazy representation, i.e. itself.
|
|
188
|
+
def lazy(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
189
|
+
|
|
190
|
+
def limit(self, n: int, description: str=None) -> 'FlowFrame': ...
|
|
191
|
+
|
|
192
|
+
# Apply a custom function.
|
|
193
|
+
def map_batches(self, function: Callable[[DataFrame], DataFrame], predicate_pushdown: bool=True, projection_pushdown: bool=True, slice_pushdown: bool=True, no_optimizations: bool=False, schema: None | SchemaDict=None, validate_output_schema: bool=True, streamable: bool=False, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
194
|
+
|
|
195
|
+
# Aggregate the columns in the LazyFrame to their maximum value.
|
|
196
|
+
def max(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
197
|
+
|
|
198
|
+
# Aggregate the columns in the LazyFrame to their mean value.
|
|
199
|
+
def mean(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
200
|
+
|
|
201
|
+
# Aggregate the columns in the LazyFrame to their median value.
|
|
202
|
+
def median(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
203
|
+
|
|
204
|
+
# Unpivot a DataFrame from wide to long format.
|
|
205
|
+
def melt(self, id_vars: ColumnNameOrSelector | Sequence[ColumnNameOrSelector] | None=None, value_vars: ColumnNameOrSelector | Sequence[ColumnNameOrSelector] | None=None, variable_name: str | None=None, value_name: str | None=None, streamable: bool=True, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
206
|
+
|
|
207
|
+
# Take two sorted DataFrames and merge them by the sorted key.
|
|
208
|
+
def merge_sorted(self, other: LazyFrame, key: str, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
209
|
+
|
|
210
|
+
# Aggregate the columns in the LazyFrame to their minimum value.
|
|
211
|
+
def min(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
212
|
+
|
|
213
|
+
# Aggregate the columns in the LazyFrame as the sum of their null value count.
|
|
214
|
+
def null_count(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
215
|
+
|
|
216
|
+
# Offers a structured way to apply a sequence of user-defined functions (UDFs).
|
|
217
|
+
def pipe(self, function: Callable[Concatenate[LazyFrame, P], T], *args, **kwargs) -> T: ...
|
|
218
|
+
|
|
219
|
+
# Pivot a DataFrame from long to wide format.
|
|
220
|
+
def pivot(self, on: str | list[str], index: str | list[str] | None=None, values: str | list[str] | None=None, aggregate_function: str | None='first', maintain_order: bool=True, sort_columns: bool=False, separator: str='_', description: str=None) -> 'FlowFrame': ...
|
|
221
|
+
|
|
222
|
+
# Profile a LazyFrame.
|
|
223
|
+
def profile(self, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, no_optimization: bool=False, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True, show_plot: bool=False, truncate_nodes: int=0, figsize: tuple[int, int]=(18, 8), engine: EngineType='auto', _check_order: bool=True, **_kwargs) -> tuple[DataFrame, DataFrame]: ...
|
|
224
|
+
|
|
225
|
+
# Aggregate the columns in the LazyFrame to their quantile value.
|
|
226
|
+
def quantile(self, quantile: float | Expr, interpolation: RollingInterpolationMethod='nearest', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
227
|
+
|
|
228
|
+
# Run a query remotely on Polars Cloud.
|
|
229
|
+
def remote(self, context: pc.ComputeContext | None=None, plan_type: pc._typing.PlanTypePreference='dot', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
230
|
+
|
|
231
|
+
# Remove rows, dropping those that match the given predicate expression(s).
|
|
232
|
+
def remove(self, *predicates, **constraints) -> 'FlowFrame': ...
|
|
233
|
+
|
|
234
|
+
# Rename column names.
|
|
235
|
+
def rename(self, mapping: dict[str, str] | Callable[[str], str], strict: bool=True, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
236
|
+
|
|
237
|
+
# Reverse the DataFrame.
|
|
238
|
+
def reverse(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
239
|
+
|
|
240
|
+
# Create rolling groups based on a temporal or integer column.
|
|
241
|
+
def rolling(self, index_column: IntoExpr, period: str | timedelta, offset: str | timedelta | None=None, closed: ClosedInterval='right', group_by: IntoExpr | Iterable[IntoExpr] | None=None, description: Optional[str] = None) -> LazyGroupBy: ...
|
|
242
|
+
|
|
243
|
+
# Save the graph
|
|
244
|
+
def save_graph(self, file_path: str, auto_arrange: bool=True, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
245
|
+
|
|
246
|
+
# Select columns from the frame.
|
|
247
|
+
def select(self, *columns, description: Union[str, NoneType]=None) -> 'FlowFrame': ...
|
|
248
|
+
|
|
249
|
+
# Select columns from this LazyFrame.
|
|
250
|
+
def select_seq(self, *exprs, **named_exprs) -> 'FlowFrame': ...
|
|
251
|
+
|
|
252
|
+
# Serialize the logical plan of this LazyFrame to a file or string in JSON format.
|
|
253
|
+
def serialize(self, file: IOBase | str | Path | None=None, format: SerializationFormat='binary', description: Optional[str] = None) -> bytes | str | None: ...
|
|
254
|
+
|
|
255
|
+
# Flag a column as sorted.
|
|
256
|
+
def set_sorted(self, column: str, descending: bool=False, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
257
|
+
|
|
258
|
+
# Shift values by the given number of indices.
|
|
259
|
+
def shift(self, n: int | IntoExprColumn=1, fill_value: IntoExpr | None=None, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
260
|
+
|
|
261
|
+
# Show a plot of the query plan.
|
|
262
|
+
def show_graph(self, optimized: bool=True, show: bool=True, output_path: str | Path | None=None, raw_output: bool=False, figsize: tuple[float, float]=(16.0, 12.0), type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True, streaming: bool=False, engine: EngineType='auto', _check_order: bool=True) -> str | None: ...
|
|
263
|
+
|
|
264
|
+
# Write the data to a CSV file.
|
|
265
|
+
def sink_csv(self, file: str, *args, separator: str=',', encoding: str='utf-8', description: str=None) -> 'FlowFrame': ...
|
|
266
|
+
|
|
267
|
+
# Evaluate the query in streaming mode and write to an IPC file.
|
|
268
|
+
def sink_ipc(self, path: str | Path, compression: IpcCompression | None='zstd', compat_level: CompatLevel | None=None, maintain_order: bool=True, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, collapse_joins: bool=True, no_optimization: bool=False, storage_options: dict[str, Any] | None=None, credential_provider: CredentialProviderFunction | Literal['auto'] | None='auto', retries: int=2, sync_on_close: SyncOnCloseMethod | None=None, mkdir: bool=False, lazy: bool=False, engine: EngineType='auto', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
269
|
+
|
|
270
|
+
# Evaluate the query in streaming mode and write to an NDJSON file.
|
|
271
|
+
def sink_ndjson(self, path: str | Path, maintain_order: bool=True, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, collapse_joins: bool=True, no_optimization: bool=False, storage_options: dict[str, Any] | None=None, credential_provider: CredentialProviderFunction | Literal['auto'] | None='auto', retries: int=2, sync_on_close: SyncOnCloseMethod | None=None, mkdir: bool=False, lazy: bool=False, engine: EngineType='auto', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
272
|
+
|
|
273
|
+
# Evaluate the query in streaming mode and write to a Parquet file.
|
|
274
|
+
def sink_parquet(self, path: str | Path, compression: str='zstd', compression_level: int | None=None, statistics: bool | str | dict[str, bool]=True, row_group_size: int | None=None, data_page_size: int | None=None, maintain_order: bool=True, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, collapse_joins: bool=True, no_optimization: bool=False, storage_options: dict[str, Any] | None=None, credential_provider: CredentialProviderFunction | Literal['auto'] | None='auto', retries: int=2, sync_on_close: SyncOnCloseMethod | None=None, mkdir: bool=False, lazy: bool=False, engine: EngineType='auto', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
275
|
+
|
|
276
|
+
# Get a slice of this DataFrame.
|
|
277
|
+
def slice(self, offset: int, length: int | None=None, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
278
|
+
|
|
279
|
+
# Sort the dataframe by the given columns.
|
|
280
|
+
def sort(self, by: Union[List, Expr, str], *more_by, descending: Union[bool, List]=False, nulls_last: Union[bool, List]=False, multithreaded: bool=True, maintain_order: bool=False, description: Union[str, NoneType]=None) -> 'FlowFrame': ...
|
|
281
|
+
|
|
282
|
+
# Execute a SQL query against the LazyFrame.
|
|
283
|
+
def sql(self, query: str, table_name: str='self', description: Optional[str] = None) -> 'FlowFrame': ...
|
|
284
|
+
|
|
285
|
+
# Aggregate the columns in the LazyFrame to their standard deviation value.
|
|
286
|
+
def std(self, ddof: int=1, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
287
|
+
|
|
288
|
+
# Aggregate the columns in the LazyFrame to their sum value.
|
|
289
|
+
def sum(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
290
|
+
|
|
291
|
+
# Get the last `n` rows.
|
|
292
|
+
def tail(self, n: int=5, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
293
|
+
|
|
294
|
+
# Split text in a column into multiple rows.
|
|
295
|
+
def text_to_rows(self, column: str | flowfile_frame.expr.Column, output_column: str=None, delimiter: str=None, split_by_column: str=None, description: str=None) -> 'FlowFrame': ...
|
|
296
|
+
|
|
297
|
+
# Get the underlying ETL graph.
|
|
298
|
+
def to_graph(self, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
299
|
+
|
|
300
|
+
# Return the `k` largest rows.
|
|
301
|
+
def top_k(self, k: int, by: IntoExpr | Iterable[IntoExpr], reverse: bool | Sequence[bool]=False, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
302
|
+
|
|
303
|
+
# Drop duplicate rows from this dataframe.
|
|
304
|
+
def unique(self, subset: Union[str, ForwardRef('Expr'), List]=None, keep: typing.Literal['first', 'last', 'any', 'none']='any', maintain_order: bool=False, description: str=None) -> 'FlowFrame': ...
|
|
305
|
+
|
|
306
|
+
# Decompose struct columns into separate columns for each of their fields.
|
|
307
|
+
def unnest(self, columns: ColumnNameOrSelector | Collection[ColumnNameOrSelector], *more_columns, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
308
|
+
|
|
309
|
+
# Unpivot a DataFrame from wide to long format.
|
|
310
|
+
def unpivot(self, on: list[str | flowfile_frame.selectors.Selector] | str | None | flowfile_frame.selectors.Selector=None, index: list[str] | str | None=None, variable_name: str='variable', value_name: str='value', description: str=None) -> 'FlowFrame': ...
|
|
311
|
+
|
|
312
|
+
# Update the values in this `LazyFrame` with the values in `other`.
|
|
313
|
+
def update(self, other: LazyFrame, on: str | Sequence[str] | None=None, how: Literal['left', 'inner', 'full']='left', left_on: str | Sequence[str] | None=None, right_on: str | Sequence[str] | None=None, include_nulls: bool=False, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
314
|
+
|
|
315
|
+
# Aggregate the columns in the LazyFrame to their variance value.
|
|
316
|
+
def var(self, ddof: int=1, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
317
|
+
|
|
318
|
+
# Add or replace columns in the DataFrame.
|
|
319
|
+
def with_columns(self, exprs: Union[Expr, List[Union[Expr, None]]] = None, *, flowfile_formulas: Optional[List[str]] = None, output_column_names: Optional[List[str]] = None, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
320
|
+
|
|
321
|
+
# Add columns to this LazyFrame.
|
|
322
|
+
def with_columns_seq(self, *exprs, **named_exprs) -> 'FlowFrame': ...
|
|
323
|
+
|
|
324
|
+
# Add an external context to the computation graph.
|
|
325
|
+
def with_context(self, other: Self | list[Self], description: Optional[str] = None) -> 'FlowFrame': ...
|
|
326
|
+
|
|
327
|
+
# Add a column at index 0 that counts the rows.
|
|
328
|
+
def with_row_count(self, name: str='row_nr', offset: int=0, description: Optional[str] = None) -> 'FlowFrame': ...
|
|
329
|
+
|
|
330
|
+
# Add a row index as the first column in the DataFrame.
|
|
331
|
+
def with_row_index(self, name: str='index', offset: int=0, description: str=None) -> 'FlowFrame': ...
|
|
332
|
+
|
|
333
|
+
def write_csv(self, file: str | os.PathLike, separator: str=',', encoding: str='utf-8', description: str=None, convert_to_absolute_path: bool=True, **kwargs) -> 'FlowFrame': ...
|
|
334
|
+
|
|
335
|
+
# Write the data to a Parquet file. Creates a standard Output node if only
|
|
336
|
+
def write_parquet(self, path: str | os.PathLike, description: str=None, convert_to_absolute_path: bool=True, **kwargs) -> 'FlowFrame': ...
|