Flowfile 0.3.2__py3-none-any.whl → 0.3.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. flowfile/__init__.py +3 -2
  2. flowfile/web/__init__.py +3 -0
  3. {flowfile-0.3.2.dist-info → flowfile-0.3.3.1.dist-info}/METADATA +4 -3
  4. {flowfile-0.3.2.dist-info → flowfile-0.3.3.1.dist-info}/RECORD +46 -35
  5. flowfile_core/configs/__init__.py +15 -4
  6. flowfile_core/configs/settings.py +5 -3
  7. flowfile_core/configs/utils.py +18 -0
  8. flowfile_core/flowfile/FlowfileFlow.py +13 -18
  9. flowfile_core/flowfile/database_connection_manager/db_connections.py +1 -1
  10. flowfile_core/flowfile/flow_data_engine/flow_data_engine.py +54 -17
  11. flowfile_core/flowfile/flow_data_engine/flow_file_column/main.py +42 -9
  12. flowfile_core/flowfile/flow_data_engine/flow_file_column/utils.py +42 -3
  13. flowfile_core/flowfile/flow_data_engine/polars_code_parser.py +2 -1
  14. flowfile_core/flowfile/flow_data_engine/sample_data.py +25 -7
  15. flowfile_core/flowfile/flow_data_engine/subprocess_operations/subprocess_operations.py +4 -3
  16. flowfile_core/flowfile/flow_data_engine/utils.py +1 -0
  17. flowfile_core/flowfile/flow_node/flow_node.py +2 -1
  18. flowfile_core/flowfile/sources/external_sources/airbyte_sources/models.py +2 -2
  19. flowfile_core/flowfile/sources/external_sources/sql_source/sql_source.py +1 -1
  20. flowfile_core/flowfile/utils.py +34 -3
  21. flowfile_core/main.py +2 -3
  22. flowfile_core/routes/secrets.py +1 -1
  23. flowfile_core/schemas/input_schema.py +10 -4
  24. flowfile_core/schemas/transform_schema.py +25 -47
  25. flowfile_frame/__init__.py +11 -4
  26. flowfile_frame/adding_expr.py +280 -0
  27. flowfile_frame/config.py +9 -0
  28. flowfile_frame/expr.py +301 -83
  29. flowfile_frame/expr.pyi +2174 -0
  30. flowfile_frame/expr_name.py +258 -0
  31. flowfile_frame/flow_frame.py +584 -1002
  32. flowfile_frame/flow_frame.pyi +368 -0
  33. flowfile_frame/flow_frame_methods.py +617 -0
  34. flowfile_frame/group_frame.py +89 -42
  35. flowfile_frame/join.py +1 -2
  36. flowfile_frame/lazy.py +704 -0
  37. flowfile_frame/lazy_methods.py +201 -0
  38. flowfile_frame/list_name_space.py +324 -0
  39. flowfile_frame/selectors.py +3 -0
  40. flowfile_frame/series.py +70 -0
  41. flowfile_frame/utils.py +80 -4
  42. {flowfile-0.3.2.dist-info → flowfile-0.3.3.1.dist-info}/LICENSE +0 -0
  43. {flowfile-0.3.2.dist-info → flowfile-0.3.3.1.dist-info}/WHEEL +0 -0
  44. {flowfile-0.3.2.dist-info → flowfile-0.3.3.1.dist-info}/entry_points.txt +0 -0
  45. /flowfile_core/{secrets → secret_manager}/__init__.py +0 -0
  46. /flowfile_core/{secrets/secrets.py → secret_manager/secret_manager.py} +0 -0
@@ -0,0 +1,368 @@
1
+ # Standard library imports
2
+ import collections
3
+ import inspect
4
+ import os
5
+ import sys
6
+ import typing
7
+ from io import IOBase
8
+ from typing import List, Optional, ForwardRef
9
+ from collections.abc import Awaitable
10
+
11
+ # Third-party imports
12
+ import polars as pl
13
+ from polars._typing import * # Consider specifying needed imports
14
+ from polars._utils.async_ import _GeventDataFrameResult
15
+ from polars.dependencies import polars_cloud as pc
16
+ from polars.io.cloud import CredentialProviderFunction
17
+ from polars.lazyframe.frame import LazyGroupBy
18
+ from polars.type_aliases import (Schema, IntoExpr, ClosedInterval, Label, StartBy, RollingInterpolationMethod, IpcCompression, CompatLevel, SyncOnCloseMethod, ExplainFormat, EngineType, SerializationFormat, AsofJoinStrategy)
19
+
20
+ # Local application/library specific imports
21
+ import flowfile_frame
22
+ from flowfile_core.flowfile.FlowfileFlow import FlowGraph
23
+ from flowfile_core.flowfile.flow_node.flow_node import FlowNode
24
+ from flowfile_frame import group_frame
25
+ from flowfile_frame.expr import Expr
26
+
27
+ # Conditional imports
28
+ if sys.version_info >= (3, 10):
29
+ pass # from typing import ParamSpec, Concatenate (if needed)
30
+ else:
31
+ from typing_extensions import Concatenate
32
+
33
+ T = TypeVar('T')
34
+ P = typing.ParamSpec('P')
35
+ LazyFrameT = TypeVar('LazyFrameT', bound='LazyFrame')
36
+ FlowFrameT = TypeVar('FlowFrameT', bound='FlowFrame')
37
+ Self = TypeVar('Self', bound='FlowFrame')
38
+ NoneType = type(None)
39
+
40
+ # Module-level functions (example from your input)
41
+ def can_be_expr(param: inspect.Parameter) -> bool: ...
42
+ def generate_node_id() -> int: ...
43
+ def get_method_name_from_code(code: str) -> str | None: ...
44
+ def _contains_lambda_pattern(text: str) -> bool: ...
45
+ def _to_string_val(v) -> str: ...
46
+ def _extract_expr_parts(expr_obj) -> tuple[str, str]: ...
47
+ def _check_ok_for_serialization(method_name: str = None, polars_expr: pl.Expr | None = None, group_expr: pl.Expr | None = None) -> None: ...
48
+
49
+ class FlowFrame:
50
+ data: LazyFrame
51
+ flow_graph: FlowGraph
52
+ node_id: int
53
+ parent_node_id: Optional[int]
54
+
55
+ # This special method determines how the object behaves in boolean contexts.
56
+ def __bool__(self) -> Any: ...
57
+
58
+ # This special method enables the 'in' operator to work with FlowFrame objects.
59
+ def __contains__(self, key) -> Any: ...
60
+
61
+ def __eq__(self, other: object) -> typing.NoReturn: ...
62
+
63
+ def __ge__(self, other: Any) -> typing.NoReturn: ...
64
+
65
+ def __gt__(self, other: Any) -> typing.NoReturn: ...
66
+
67
+ # Initialize the FlowFrame with data and graph references.
68
+ def __init__(self, data: typing.Union[LazyFrame, collections.abc.Mapping[str, typing.Union[collections.abc.Sequence[object], collections.abc.Mapping[str, collections.abc.Sequence[object]], ForwardRef('Series')]], collections.abc.Sequence[typing.Any], ForwardRef('np.ndarray[Any, Any]'), ForwardRef('pa.Table'), ForwardRef('pd.DataFrame'), ForwardRef('ArrowArrayExportable'), ForwardRef('ArrowStreamExportable')]=None, schema: typing.Union[collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType'), type[int], type[float], type[bool], type[str], type['date'], type['time'], type['datetime'], type['timedelta'], type[list[typing.Any]], type[tuple[typing.Any, ...]], type[bytes], type[object], type['Decimal'], type[None], NoneType]], collections.abc.Sequence[typing.Union[str, tuple[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType'), type[int], type[float], type[bool], type[str], type['date'], type['time'], type['datetime'], type['timedelta'], type[list[typing.Any]], type[tuple[typing.Any, ...]], type[bytes], type[object], type['Decimal'], type[None], NoneType]]]], NoneType]=None, schema_overrides: collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType')]] | None=None, strict: bool=True, orient: typing.Optional[typing.Literal['col', 'row']]=None, infer_schema_length: int | None=100, nan_to_null: bool=False, flow_graph=None, node_id=None, parent_node_id=None) -> None: ...
69
+
70
+ def __le__(self, other: Any) -> typing.NoReturn: ...
71
+
72
+ def __lt__(self, other: Any) -> typing.NoReturn: ...
73
+
74
+ def __ne__(self, other: object) -> typing.NoReturn: ...
75
+
76
+ # Create a new FlowFrame instance.
77
+ def __new__(cls, data: typing.Union[LazyFrame, collections.abc.Mapping[str, typing.Union[collections.abc.Sequence[object], collections.abc.Mapping[str, collections.abc.Sequence[object]], ForwardRef('Series')]], collections.abc.Sequence[typing.Any], ForwardRef('np.ndarray[Any, Any]'), ForwardRef('pa.Table'), ForwardRef('pd.DataFrame'), ForwardRef('ArrowArrayExportable'), ForwardRef('ArrowStreamExportable')]=None, schema: typing.Union[collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType'), type[int], type[float], type[bool], type[str], type['date'], type['time'], type['datetime'], type['timedelta'], type[list[typing.Any]], type[tuple[typing.Any, ...]], type[bytes], type[object], type['Decimal'], type[None], NoneType]], collections.abc.Sequence[typing.Union[str, tuple[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType'), type[int], type[float], type[bool], type[str], type['date'], type['time'], type['datetime'], type['timedelta'], type[list[typing.Any]], type[tuple[typing.Any, ...]], type[bytes], type[object], type['Decimal'], type[None], NoneType]]]], NoneType]=None, schema_overrides: collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType')]] | None=None, strict: bool=True, orient: typing.Optional[typing.Literal['col', 'row']]=None, infer_schema_length: int | None=100, nan_to_null: bool=False, flow_graph=None, node_id=None, parent_node_id=None) -> Self: ...
78
+
79
+ def __repr__(self) -> Any: ...
80
+
81
+ # Helper method to add a connection between nodes
82
+ def _add_connection(self, from_id, to_id, input_type: typing.Literal['main', 'left', 'right']='main') -> Any: ...
83
+
84
+ def _add_number_of_records(self, new_node_id: int, description: str=None) -> 'FlowFrame': ...
85
+
86
+ def _add_polars_code(self, new_node_id: int, code: str, depending_on_ids: typing.Optional[typing.List[str]]=None, convertable_to_code: bool=True, method_name: str=None, polars_expr: typing.Union[flowfile_frame.expr.Expr, typing.List[flowfile_frame.expr.Expr], NoneType]=None, group_expr: typing.Union[flowfile_frame.expr.Expr, typing.List[flowfile_frame.expr.Expr], NoneType]=None, kwargs_expr: typing.Optional[typing.Dict]=None, group_kwargs: typing.Optional[typing.Dict]=None, description: str=None) -> Any: ...
87
+
88
+ def _comparison_error(self, operator: str) -> typing.NoReturn: ...
89
+
90
+ # Helper method to create a new FlowFrame that's a child of this one
91
+ def _create_child_frame(self, new_node_id) -> 'FlowFrame': ...
92
+
93
+ # Detect if the expression is a cum_count operation and use record_id if possible.
94
+ def _detect_cum_count_record_id(self, expr: Any, new_node_id: int, description: typing.Optional[str]=None) -> 'FlowFrame': ...
95
+
96
+ # Generates the `input_df.sort(...)` Polars code string using pure expression strings.
97
+ def _generate_sort_polars_code(self, pure_sort_expr_strs: typing.List[str], descending_values: typing.List[bool], nulls_last_values: typing.List[bool], multithreaded: bool, maintain_order: bool) -> str: ...
98
+
99
+ def _with_flowfile_formula(self, flowfile_formula: str, output_column_name, description: str=None) -> 'FlowFrame': ...
100
+
101
+ # Approximate count of unique values.
102
+ def approx_n_unique(self, description: Optional[str] = None) -> LazyFrame: ...
103
+
104
+ # Return the `k` smallest rows.
105
+ def bottom_k(self, k: int, by: IntoExpr | Iterable[IntoExpr], reverse: bool | Sequence[bool]=False, description: Optional[str] = None) -> LazyFrame: ...
106
+
107
+ def cache(self, description: Optional[str] = None) -> 'FlowFrame': ...
108
+
109
+ # Cast LazyFrame column(s) to the specified dtype(s).
110
+ def cast(self, dtypes: Mapping[ColumnNameOrSelector | PolarsDataType, PolarsDataType | PythonDataType] | PolarsDataType, strict: bool=True, description: Optional[str] = None) -> LazyFrame: ...
111
+
112
+ # Create an empty copy of the current LazyFrame, with zero to 'n' rows.
113
+ def clear(self, n: int=0, description: Optional[str] = None) -> LazyFrame: ...
114
+
115
+ # Create a copy of this LazyFrame.
116
+ def clone(self, description: Optional[str] = None) -> LazyFrame: ...
117
+
118
+ # Collect lazy data into memory.
119
+ def collect(self, *args, **kwargs) -> Any: ...
120
+
121
+ # Collect DataFrame asynchronously in thread pool.
122
+ def collect_async(self, gevent: bool=False, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, no_optimization: bool=False, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True, engine: EngineType='auto', _check_order: bool=True) -> Awaitable[DataFrame] | _GeventDataFrameResult[DataFrame]: ...
123
+
124
+ # Resolve the schema of this LazyFrame.
125
+ def collect_schema(self) -> Schema: ...
126
+
127
+ # Get the column names.
128
+ @property
129
+ def columns(self) -> typing.List[str]: ...
130
+
131
+ # Combine multiple FlowFrames into a single FlowFrame.
132
+ def concat(self, other: typing.Union[ForwardRef('FlowFrame'), typing.List[ForwardRef('FlowFrame')]], how: str='vertical', rechunk: bool=False, parallel: bool=True, description: str=None) -> 'FlowFrame': ...
133
+
134
+ # Return the number of non-null elements for each column.
135
+ def count(self, description: Optional[str] = None) -> LazyFrame: ...
136
+
137
+ # Simple naive implementation of creating the frame from any type. It converts the data to a polars frame,
138
+ def create_from_any_type(self, data: typing.Union[collections.abc.Mapping[str, typing.Union[collections.abc.Sequence[object], collections.abc.Mapping[str, collections.abc.Sequence[object]], ForwardRef('Series')]], collections.abc.Sequence[typing.Any], ForwardRef('np.ndarray[Any, Any]'), ForwardRef('pa.Table'), ForwardRef('pd.DataFrame'), ForwardRef('ArrowArrayExportable'), ForwardRef('ArrowStreamExportable')]=None, schema: typing.Union[collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType'), type[int], type[float], type[bool], type[str], type['date'], type['time'], type['datetime'], type['timedelta'], type[list[typing.Any]], type[tuple[typing.Any, ...]], type[bytes], type[object], type['Decimal'], type[None], NoneType]], collections.abc.Sequence[typing.Union[str, tuple[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType'), type[int], type[float], type[bool], type[str], type['date'], type['time'], type['datetime'], type['timedelta'], type[list[typing.Any]], type[tuple[typing.Any, ...]], type[bytes], type[object], type['Decimal'], type[None], NoneType]]]], NoneType]=None, schema_overrides: collections.abc.Mapping[str, typing.Union[ForwardRef('DataTypeClass'), ForwardRef('DataType')]] | None=None, strict: bool=True, orient: typing.Optional[typing.Literal['col', 'row']]=None, infer_schema_length: int | None=100, nan_to_null: bool=False, flow_graph=None, node_id=None, parent_node_id=None, description: Optional[str] = None) -> Any: ...
139
+
140
+ # Creates a summary of statistics for a LazyFrame, returning a DataFrame.
141
+ def describe(self, percentiles: Sequence[float] | float | None=(0.25, 0.5, 0.75), interpolation: RollingInterpolationMethod='nearest') -> DataFrame: ...
142
+
143
+ # Read a logical plan from a file to construct a LazyFrame.
144
+ def deserialize(self, source: str | Path | IOBase, format: SerializationFormat='binary', description: Optional[str] = None) -> LazyFrame: ...
145
+
146
+ # Remove columns from the DataFrame.
147
+ def drop(self, *columns, strict: bool=True, description: Optional[str] = None) -> LazyFrame: ...
148
+
149
+ # Drop all rows that contain one or more NaN values.
150
+ def drop_nans(self, subset: ColumnNameOrSelector | Collection[ColumnNameOrSelector] | None=None, description: Optional[str] = None) -> LazyFrame: ...
151
+
152
+ # Drop all rows that contain one or more null values.
153
+ def drop_nulls(self, subset: ColumnNameOrSelector | Collection[ColumnNameOrSelector] | None=None, description: Optional[str] = None) -> LazyFrame: ...
154
+
155
+ # Get the column data types.
156
+ @property
157
+ def dtypes(self) -> typing.List[pl.classes.DataType]: ...
158
+
159
+ # Create a string representation of the query plan.
160
+ def explain(self, format: ExplainFormat='plain', optimized: bool=True, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True, streaming: bool=False, engine: EngineType='auto', tree_format: bool | None=None, _check_order: bool=True) -> str: ...
161
+
162
+ # Explode the dataframe to long format by exploding the given columns.
163
+ def explode(self, columns: typing.Union[str, flowfile_frame.expr.Column, typing.Iterable[str | flowfile_frame.expr.Column]], *more_columns, description: str=None) -> 'FlowFrame': ...
164
+
165
+ # Collect a small number of rows for debugging purposes.
166
+ def fetch(self, n_rows: int=500, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, no_optimization: bool=False, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True) -> DataFrame: ...
167
+
168
+ # Fill floating point NaN values.
169
+ def fill_nan(self, value: int | float | Expr | None, description: Optional[str] = None) -> LazyFrame: ...
170
+
171
+ # Fill null values using the specified value or strategy.
172
+ def fill_null(self, value: Any | Expr | None=None, strategy: FillNullStrategy | None=None, limit: int | None=None, matches_supertype: bool=True, description: Optional[str] = None) -> LazyFrame: ...
173
+
174
+ # Filter rows based on a predicate.
175
+ def filter(self, *predicates, flowfile_formula: typing.Optional[str]=None, description: typing.Optional[str]=None, **constraints) -> 'FlowFrame': ...
176
+
177
+ # Get the first row of the DataFrame.
178
+ def first(self, description: Optional[str] = None) -> LazyFrame: ...
179
+
180
+ # Take every nth row in the LazyFrame and return as a new LazyFrame.
181
+ def gather_every(self, n: int, offset: int=0, description: Optional[str] = None) -> LazyFrame: ...
182
+
183
+ def get_node_settings(self, description: Optional[str] = None) -> FlowNode: ...
184
+
185
+ # Start a group by operation.
186
+ def group_by(self, *by, description: Optional[str] = None, maintain_order: bool = False, **named_by) -> group_frame.GroupByFrame: ...
187
+
188
+ # Group based on a time value (or index value of type Int32, Int64).
189
+ def group_by_dynamic(self, index_column: IntoExpr, every: str | timedelta, period: str | timedelta | None=None, offset: str | timedelta | None=None, include_boundaries: bool=False, closed: ClosedInterval='left', label: Label='left', group_by: IntoExpr | Iterable[IntoExpr] | None=None, start_by: StartBy='window', description: Optional[str] = None) -> LazyGroupBy: ...
190
+
191
+ def head(self, n: int, description: str=None) -> Any: ...
192
+
193
+ # Inspect a node in the computation graph.
194
+ def inspect(self, fmt: str='{}', description: Optional[str] = None) -> LazyFrame: ...
195
+
196
+ # Interpolate intermediate values. The interpolation method is linear.
197
+ def interpolate(self, description: Optional[str] = None) -> LazyFrame: ...
198
+
199
+ # Add a join operation to the Logical Plan.
200
+ def join(self, other, on: typing.Union[typing.List[str | flowfile_frame.expr.Column], str, flowfile_frame.expr.Column]=None, how: str='inner', left_on: typing.Union[typing.List[str | flowfile_frame.expr.Column], str, flowfile_frame.expr.Column]=None, right_on: typing.Union[typing.List[str | flowfile_frame.expr.Column], str, flowfile_frame.expr.Column]=None, suffix: str='_right', validate: str=None, nulls_equal: bool=False, coalesce: bool=None, maintain_order: typing.Literal[None, 'left', 'right', 'left_right', 'right_left']=None, description: str=None) -> Any: ...
201
+
202
+ # Perform an asof join.
203
+ def join_asof(self, other: LazyFrame, left_on: str | None | Expr=None, right_on: str | None | Expr=None, on: str | None | Expr=None, by_left: str | Sequence[str] | None=None, by_right: str | Sequence[str] | None=None, by: str | Sequence[str] | None=None, strategy: AsofJoinStrategy='backward', suffix: str='_right', tolerance: str | int | float | timedelta | None=None, allow_parallel: bool=True, force_parallel: bool=False, coalesce: bool=True, allow_exact_matches: bool=True, check_sortedness: bool=True, description: Optional[str] = None) -> LazyFrame: ...
204
+
205
+ # Perform a join based on one or multiple (in)equality predicates.
206
+ def join_where(self, other: LazyFrame, *predicates, suffix: str='_right', description: Optional[str] = None) -> LazyFrame: ...
207
+
208
+ # Get the last row of the DataFrame.
209
+ def last(self, description: Optional[str] = None) -> LazyFrame: ...
210
+
211
+ # Return lazy representation, i.e. itself.
212
+ def lazy(self, description: Optional[str] = None) -> LazyFrame: ...
213
+
214
+ def limit(self, n: int, description: str=None) -> Any: ...
215
+
216
+ # Apply a custom function.
217
+ def map_batches(self, function: Callable[[DataFrame], DataFrame], predicate_pushdown: bool=True, projection_pushdown: bool=True, slice_pushdown: bool=True, no_optimizations: bool=False, schema: None | SchemaDict=None, validate_output_schema: bool=True, streamable: bool=False, description: Optional[str] = None) -> LazyFrame: ...
218
+
219
+ # Aggregate the columns in the LazyFrame to their maximum value.
220
+ def max(self, description: Optional[str] = None) -> LazyFrame: ...
221
+
222
+ # Aggregate the columns in the LazyFrame to their mean value.
223
+ def mean(self, description: Optional[str] = None) -> LazyFrame: ...
224
+
225
+ # Aggregate the columns in the LazyFrame to their median value.
226
+ def median(self, description: Optional[str] = None) -> LazyFrame: ...
227
+
228
+ # Unpivot a DataFrame from wide to long format.
229
+ def melt(self, id_vars: ColumnNameOrSelector | Sequence[ColumnNameOrSelector] | None=None, value_vars: ColumnNameOrSelector | Sequence[ColumnNameOrSelector] | None=None, variable_name: str | None=None, value_name: str | None=None, streamable: bool=True, description: Optional[str] = None) -> LazyFrame: ...
230
+
231
+ # Take two sorted DataFrames and merge them by the sorted key.
232
+ def merge_sorted(self, other: LazyFrame, key: str, description: Optional[str] = None) -> LazyFrame: ...
233
+
234
+ # Aggregate the columns in the LazyFrame to their minimum value.
235
+ def min(self, description: Optional[str] = None) -> LazyFrame: ...
236
+
237
+ # Aggregate the columns in the LazyFrame as the sum of their null value count.
238
+ def null_count(self, description: Optional[str] = None) -> LazyFrame: ...
239
+
240
+ # Offers a structured way to apply a sequence of user-defined functions (UDFs).
241
+ def pipe(self, function: Callable[Concatenate[LazyFrame, P], T], *args, description: Optional[str] = None, **kwargs) -> T: ...
242
+
243
+ # Pivot a DataFrame from long to wide format.
244
+ def pivot(self, on: str | list[str], index: str | list[str] | None=None, values: str | list[str] | None=None, aggregate_function: str | None='first', maintain_order: bool=True, sort_columns: bool=False, separator: str='_', description: str=None) -> 'FlowFrame': ...
245
+
246
+ # Profile a LazyFrame.
247
+ def profile(self, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, no_optimization: bool=False, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True, show_plot: bool=False, truncate_nodes: int=0, figsize: tuple[int, int]=(18, 8), engine: EngineType='auto', _check_order: bool=True, **_kwargs) -> tuple[DataFrame, DataFrame]: ...
248
+
249
+ # Aggregate the columns in the LazyFrame to their quantile value.
250
+ def quantile(self, quantile: float | Expr, interpolation: RollingInterpolationMethod='nearest', description: Optional[str] = None) -> LazyFrame: ...
251
+
252
+ # Run a query remotely on Polars Cloud.
253
+ def remote(self, context: pc.ComputeContext | None=None, plan_type: pc._typing.PlanTypePreference='dot', description: Optional[str] = None) -> pc.LazyFrameExt: ...
254
+
255
+ # Remove rows, dropping those that match the given predicate expression(s).
256
+ def remove(self, *predicates, description: Optional[str] = None, **constraints) -> LazyFrame: ...
257
+
258
+ # Rename column names.
259
+ def rename(self, mapping: dict[str, str] | Callable[[str], str], strict: bool=True, description: Optional[str] = None) -> LazyFrame: ...
260
+
261
+ # Reverse the DataFrame.
262
+ def reverse(self, description: Optional[str] = None) -> LazyFrame: ...
263
+
264
+ # Create rolling groups based on a temporal or integer column.
265
+ def rolling(self, index_column: IntoExpr, period: str | timedelta, offset: str | timedelta | None=None, closed: ClosedInterval='right', group_by: IntoExpr | Iterable[IntoExpr] | None=None, description: Optional[str] = None) -> LazyGroupBy: ...
266
+
267
+ # Save the graph
268
+ def save_graph(self, file_path: str, auto_arrange: bool=True, description: Optional[str] = None) -> Any: ...
269
+
270
+ # Get an ordered mapping of column names to their data type.
271
+ @property
272
+ def schema(self) -> pl.Schema: ...
273
+
274
+ # Select columns from the frame.
275
+ def select(self, *columns, description: typing.Optional[str]=None) -> 'FlowFrame': ...
276
+
277
+ # Select columns from this LazyFrame.
278
+ def select_seq(self, *exprs, description: Optional[str] = None, **named_exprs) -> LazyFrame: ...
279
+
280
+ # Serialize the logical plan of this LazyFrame to a file or string in JSON format.
281
+ def serialize(self, file: IOBase | str | Path | None=None, format: SerializationFormat='binary', description: Optional[str] = None) -> bytes | str | None: ...
282
+
283
+ # Flag a column as sorted.
284
+ def set_sorted(self, column: str, descending: bool=False, description: Optional[str] = None) -> LazyFrame: ...
285
+
286
+ # Shift values by the given number of indices.
287
+ def shift(self, n: int | IntoExprColumn=1, fill_value: IntoExpr | None=None, description: Optional[str] = None) -> LazyFrame: ...
288
+
289
+ # Show a plot of the query plan.
290
+ def show_graph(self, optimized: bool=True, show: bool=True, output_path: str | Path | None=None, raw_output: bool=False, figsize: tuple[float, float]=(16.0, 12.0), type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, comm_subplan_elim: bool=True, comm_subexpr_elim: bool=True, cluster_with_columns: bool=True, collapse_joins: bool=True, streaming: bool=False, engine: EngineType='auto', _check_order: bool=True) -> str | None: ...
291
+
292
+ # Write the data to a CSV file.
293
+ def sink_csv(self, file: str, *args, separator: str=',', encoding: str='utf-8', description: str=None) -> 'FlowFrame': ...
294
+
295
+ # Evaluate the query in streaming mode and write to an IPC file.
296
+ def sink_ipc(self, path: str | Path, compression: IpcCompression | None='zstd', compat_level: CompatLevel | None=None, maintain_order: bool=True, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, collapse_joins: bool=True, no_optimization: bool=False, storage_options: dict[str, Any] | None=None, credential_provider: CredentialProviderFunction | Literal['auto'] | None='auto', retries: int=2, sync_on_close: SyncOnCloseMethod | None=None, mkdir: bool=False, lazy: bool=False, engine: EngineType='auto', description: Optional[str] = None) -> LazyFrame | None: ...
297
+
298
+ # Evaluate the query in streaming mode and write to an NDJSON file.
299
+ def sink_ndjson(self, path: str | Path, maintain_order: bool=True, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, collapse_joins: bool=True, no_optimization: bool=False, storage_options: dict[str, Any] | None=None, credential_provider: CredentialProviderFunction | Literal['auto'] | None='auto', retries: int=2, sync_on_close: SyncOnCloseMethod | None=None, mkdir: bool=False, lazy: bool=False, engine: EngineType='auto', description: Optional[str] = None) -> LazyFrame | None: ...
300
+
301
+ # Evaluate the query in streaming mode and write to a Parquet file.
302
+ def sink_parquet(self, path: str | Path, compression: str='zstd', compression_level: int | None=None, statistics: bool | str | dict[str, bool]=True, row_group_size: int | None=None, data_page_size: int | None=None, maintain_order: bool=True, type_coercion: bool=True, _type_check: bool=True, predicate_pushdown: bool=True, projection_pushdown: bool=True, simplify_expression: bool=True, slice_pushdown: bool=True, collapse_joins: bool=True, no_optimization: bool=False, storage_options: dict[str, Any] | None=None, credential_provider: CredentialProviderFunction | Literal['auto'] | None='auto', retries: int=2, sync_on_close: SyncOnCloseMethod | None=None, mkdir: bool=False, lazy: bool=False, engine: EngineType='auto', description: Optional[str] = None) -> LazyFrame | None: ...
303
+
304
+ # Get a slice of this DataFrame.
305
+ def slice(self, offset: int, length: int | None=None, description: Optional[str] = None) -> LazyFrame: ...
306
+
307
+ # Sort the dataframe by the given columns.
308
+ def sort(self, by: typing.Union[typing.List[typing.Union[flowfile_frame.expr.Expr, str]], flowfile_frame.expr.Expr, str], *more_by, descending: typing.Union[bool, typing.List[bool]]=False, nulls_last: typing.Union[bool, typing.List[bool]]=False, multithreaded: bool=True, maintain_order: bool=False, description: typing.Optional[str]=None) -> 'FlowFrame': ...
309
+
310
+ # Execute a SQL query against the LazyFrame.
311
+ def sql(self, query: str, table_name: str='self', description: Optional[str] = None) -> LazyFrame: ...
312
+
313
+ # Aggregate the columns in the LazyFrame to their standard deviation value.
314
+ def std(self, ddof: int=1, description: Optional[str] = None) -> LazyFrame: ...
315
+
316
+ # Aggregate the columns in the LazyFrame to their sum value.
317
+ def sum(self, description: Optional[str] = None) -> LazyFrame: ...
318
+
319
+ # Get the last `n` rows.
320
+ def tail(self, n: int=5, description: Optional[str] = None) -> LazyFrame: ...
321
+
322
+ # Split text in a column into multiple rows.
323
+ def text_to_rows(self, column: str | flowfile_frame.expr.Column, output_column: str=None, delimiter: str=None, split_by_column: str=None, description: str=None) -> 'FlowFrame': ...
324
+
325
+ # Get the underlying ETL graph.
326
+ def to_graph(self, description: Optional[str] = None) -> Any: ...
327
+
328
+ # Return the `k` largest rows.
329
+ def top_k(self, k: int, by: IntoExpr | Iterable[IntoExpr], reverse: bool | Sequence[bool]=False, description: Optional[str] = None) -> LazyFrame: ...
330
+
331
+ # Drop duplicate rows from this dataframe.
332
+ def unique(self, subset: typing.Union[str, ForwardRef('Expr'), typing.List[typing.Union[ForwardRef('Expr'), str]]]=None, keep: typing.Literal['first', 'last', 'any', 'none']='any', maintain_order: bool=False, description: str=None) -> 'FlowFrame': ...
333
+
334
+ # Decompose struct columns into separate columns for each of their fields.
335
+ def unnest(self, columns: ColumnNameOrSelector | Collection[ColumnNameOrSelector], *more_columns, description: Optional[str] = None) -> LazyFrame: ...
336
+
337
+ # Unpivot a DataFrame from wide to long format.
338
+ def unpivot(self, on: list[str | flowfile_frame.selectors.Selector] | str | None | flowfile_frame.selectors.Selector=None, index: list[str] | str | None=None, variable_name: str='variable', value_name: str='value', description: str=None) -> 'FlowFrame': ...
339
+
340
+ # Update the values in this `LazyFrame` with the values in `other`.
341
+ def update(self, other: LazyFrame, on: str | Sequence[str] | None=None, how: Literal['left', 'inner', 'full']='left', left_on: str | Sequence[str] | None=None, right_on: str | Sequence[str] | None=None, include_nulls: bool=False, description: Optional[str] = None) -> LazyFrame: ...
342
+
343
+ # Aggregate the columns in the LazyFrame to their variance value.
344
+ def var(self, ddof: int=1, description: Optional[str] = None) -> LazyFrame: ...
345
+
346
+ # Get the number of columns.
347
+ @property
348
+ def width(self) -> int: ...
349
+
350
+ # Add or replace columns in the DataFrame.
351
+ def with_columns(self, *exprs: Union[Expr, Iterable[Expr], Any], flowfile_formulas: Optional[List[str]] = None, output_column_names: Optional[List[str]] = None, description: Optional[str] = None, **named_exprs: Union[Expr, Any]) -> 'FlowFrame': ...
352
+
353
+ # Add columns to this LazyFrame.
354
+ def with_columns_seq(self, *exprs, description: Optional[str] = None, **named_exprs) -> LazyFrame: ...
355
+
356
+ # Add an external context to the computation graph.
357
+ def with_context(self, other: Self | list[Self], description: Optional[str] = None) -> LazyFrame: ...
358
+
359
+ # Add a column at index 0 that counts the rows.
360
+ def with_row_count(self, name: str='row_nr', offset: int=0, description: Optional[str] = None) -> LazyFrame: ...
361
+
362
+ # Add a row index as the first column in the DataFrame.
363
+ def with_row_index(self, name: str='index', offset: int=0, description: str=None) -> 'FlowFrame': ...
364
+
365
+ def write_csv(self, file: str | os.PathLike, separator: str=',', encoding: str='utf-8', convert_to_absolute_path: bool=True, description: str=None, **kwargs) -> 'FlowFrame': ...
366
+
367
+ # Write the data to a Parquet file. Creates a standard Output node if only
368
+ def write_parquet(self, path: str | os.PathLike, convert_to_absolute_path: bool=True, description: str=None, **kwargs) -> 'FlowFrame': ...