churnkit 0.76.1a1__py3-none-any.whl → 0.76.1a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/00_start_here.ipynb +10 -5
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/01_data_discovery.ipynb +6 -6
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/01a_a_temporal_text_deep_dive.ipynb +52 -46
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/01a_temporal_deep_dive.ipynb +68 -65
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/01b_temporal_quality.ipynb +12 -27
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/01c_temporal_patterns.ipynb +216 -221
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/01d_event_aggregation.ipynb +88 -81
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/02_column_deep_dive.ipynb +111 -108
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/02a_text_columns_deep_dive.ipynb +44 -38
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/03_quality_assessment.ipynb +89 -85
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/04_relationship_analysis.ipynb +81 -80
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/05_multi_dataset.ipynb +83 -89
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/06_feature_opportunities.ipynb +102 -98
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/07_modeling_readiness.ipynb +32 -31
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/08_baseline_experiments.ipynb +33 -29
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/09_business_alignment.ipynb +6 -5
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/10_spec_generation.ipynb +67 -63
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/11_scoring_validation.ipynb +38 -23
- {churnkit-0.76.1a1.data → churnkit-0.76.1a2.data}/data/share/churnkit/exploration_notebooks/12_view_documentation.ipynb +3 -1
- {churnkit-0.76.1a1.dist-info → churnkit-0.76.1a2.dist-info}/METADATA +1 -1
- {churnkit-0.76.1a1.dist-info → churnkit-0.76.1a2.dist-info}/RECORD +30 -30
- customer_retention/__init__.py +1 -1
- customer_retention/analysis/auto_explorer/explorer.py +2 -2
- customer_retention/analysis/notebook_progress.py +4 -1
- customer_retention/core/compat/__init__.py +10 -0
- customer_retention/integrations/databricks_init.py +13 -0
- customer_retention/stages/profiling/column_profiler.py +9 -2
- {churnkit-0.76.1a1.dist-info → churnkit-0.76.1a2.dist-info}/WHEEL +0 -0
- {churnkit-0.76.1a1.dist-info → churnkit-0.76.1a2.dist-info}/entry_points.txt +0 -0
- {churnkit-0.76.1a1.dist-info → churnkit-0.76.1a2.dist-info}/licenses/LICENSE +0 -0
|
@@ -70,17 +70,22 @@
|
|
|
70
70
|
"outputs": [],
|
|
71
71
|
"source": [
|
|
72
72
|
"from customer_retention.analysis.notebook_progress import track_and_export_previous\n",
|
|
73
|
+
"\n",
|
|
73
74
|
"track_and_export_previous(\"00_start_here.ipynb\")\n",
|
|
74
75
|
"\n",
|
|
75
76
|
"try:\n",
|
|
76
77
|
" import customer_retention\n",
|
|
77
|
-
" from customer_retention.core.config.experiments import
|
|
78
|
-
"
|
|
78
|
+
" from customer_retention.core.config.experiments import (\n",
|
|
79
|
+
" EXPERIMENTS_DIR,\n",
|
|
80
|
+
" FINDINGS_DIR,\n",
|
|
81
|
+
" OUTPUT_DIR,\n",
|
|
82
|
+
" setup_experiments_structure,\n",
|
|
83
|
+
" )\n",
|
|
84
|
+
" print(\"customer_retention is installed\")\n",
|
|
79
85
|
"except ImportError:\n",
|
|
80
86
|
" print(\"customer_retention not found. Install with:\")\n",
|
|
81
87
|
" print(\" uv sync\")\n",
|
|
82
|
-
" print(\" # or: pip install -e .\")
|
|
83
|
-
"from customer_retention.stages.temporal import TEMPORAL_METADATA_COLS"
|
|
88
|
+
" print(\" # or: pip install -e .\")"
|
|
84
89
|
]
|
|
85
90
|
},
|
|
86
91
|
{
|
|
@@ -284,8 +289,8 @@
|
|
|
284
289
|
"outputs": [],
|
|
285
290
|
"source": [
|
|
286
291
|
"# Download Bank Customer Churn dataset\n",
|
|
287
|
-
"import subprocess\n",
|
|
288
292
|
"import shutil\n",
|
|
293
|
+
"import subprocess\n",
|
|
289
294
|
"\n",
|
|
290
295
|
"FIXTURES_DIR.mkdir(parents=True, exist_ok=True)\n",
|
|
291
296
|
"bank_churn_path = FIXTURES_DIR / \"bank_customer_churn.csv\"\n",
|
|
@@ -82,6 +82,7 @@
|
|
|
82
82
|
"outputs": [],
|
|
83
83
|
"source": [
|
|
84
84
|
"from customer_retention.analysis.notebook_progress import track_and_export_previous\n",
|
|
85
|
+
"\n",
|
|
85
86
|
"track_and_export_previous(\"01_data_discovery.ipynb\")\n",
|
|
86
87
|
"\n",
|
|
87
88
|
"from datetime import datetime\n",
|
|
@@ -94,7 +95,6 @@
|
|
|
94
95
|
"from customer_retention.analysis.visualization import ChartBuilder, console, display_figure, display_table\n",
|
|
95
96
|
"from customer_retention.core.config.column_config import DatasetGranularity\n",
|
|
96
97
|
"from customer_retention.core.config.experiments import (\n",
|
|
97
|
-
" EXPERIMENTS_DIR,\n",
|
|
98
98
|
" FINDINGS_DIR, # noqa: F401 - required for test validation\n",
|
|
99
99
|
" OUTPUT_DIR,\n",
|
|
100
100
|
" setup_experiments_structure,\n",
|
|
@@ -889,14 +889,14 @@
|
|
|
889
889
|
"if time_col and time_col in df.columns:\n",
|
|
890
890
|
" exclude_cols = list(TEMPORAL_METADATA_COLS) + ([valid_entity_col] if valid_entity_col else [])\n",
|
|
891
891
|
" availability = analyze_feature_availability(df, time_col, exclude_columns=exclude_cols)\n",
|
|
892
|
-
"
|
|
892
|
+
"\n",
|
|
893
893
|
" console.start_section()\n",
|
|
894
894
|
" console.header(\"Feature Availability\")\n",
|
|
895
895
|
" console.metric(\"Data Span\", f\"{availability.data_start.date()} to {availability.data_end.date()} ({availability.time_span_days} days)\")\n",
|
|
896
|
-
"
|
|
896
|
+
"\n",
|
|
897
897
|
" if availability.new_tracking or availability.retired_tracking or availability.partial_window:\n",
|
|
898
898
|
" if availability.new_tracking:\n",
|
|
899
|
-
" console.warning(f\"New tracking ({len(availability.new_tracking)}): {', '.join(availability.new_tracking[:5])}\"
|
|
899
|
+
" console.warning(f\"New tracking ({len(availability.new_tracking)}): {', '.join(availability.new_tracking[:5])}\" +\n",
|
|
900
900
|
" (f\" +{len(availability.new_tracking)-5} more\" if len(availability.new_tracking) > 5 else \"\"))\n",
|
|
901
901
|
" if availability.retired_tracking:\n",
|
|
902
902
|
" console.warning(f\"Retired tracking ({len(availability.retired_tracking)}): {', '.join(availability.retired_tracking[:5])}\" +\n",
|
|
@@ -904,7 +904,7 @@
|
|
|
904
904
|
" if availability.partial_window:\n",
|
|
905
905
|
" console.warning(f\"Partial window ({len(availability.partial_window)}): {', '.join(availability.partial_window[:5])}\" +\n",
|
|
906
906
|
" (f\" +{len(availability.partial_window)-5} more\" if len(availability.partial_window) > 5 else \"\"))\n",
|
|
907
|
-
"
|
|
907
|
+
"\n",
|
|
908
908
|
" console.subheader(\"Recommendations\")\n",
|
|
909
909
|
" for rec in availability.recommendations[:5]:\n",
|
|
910
910
|
" if rec[\"column\"] != \"_general_\":\n",
|
|
@@ -915,7 +915,7 @@
|
|
|
915
915
|
" else:\n",
|
|
916
916
|
" console.success(\"All features have full temporal coverage\")\n",
|
|
917
917
|
" console.end_section()\n",
|
|
918
|
-
"
|
|
918
|
+
"\n",
|
|
919
919
|
" # Store structured availability metadata for downstream use\n",
|
|
920
920
|
" features_info = {\n",
|
|
921
921
|
" feat.column: FeatureAvailabilityInfo(\n",
|
|
@@ -109,22 +109,27 @@
|
|
|
109
109
|
"outputs": [],
|
|
110
110
|
"source": [
|
|
111
111
|
"from customer_retention.analysis.notebook_progress import track_and_export_previous\n",
|
|
112
|
+
"\n",
|
|
112
113
|
"track_and_export_previous(\"01a_a_temporal_text_deep_dive.ipynb\")\n",
|
|
113
114
|
"\n",
|
|
114
|
-
"from customer_retention.analysis.auto_explorer import ExplorationFindings, TextProcessingMetadata\n",
|
|
115
|
-
"from customer_retention.analysis.visualization import ChartBuilder, display_figure, display_table, console\n",
|
|
116
|
-
"from customer_retention.core.config.column_config import ColumnType, DatasetGranularity\n",
|
|
117
|
-
"from customer_retention.stages.profiling import (\n",
|
|
118
|
-
" TextColumnProcessor, TextProcessingConfig, TextColumnResult,\n",
|
|
119
|
-
" TimeWindowAggregator, AggregationPlan,\n",
|
|
120
|
-
" EMBEDDING_MODELS, get_model_info, list_available_models\n",
|
|
121
|
-
")\n",
|
|
122
|
-
"import pandas as pd\n",
|
|
123
|
-
"import numpy as np\n",
|
|
124
|
-
"import plotly.graph_objects as go\n",
|
|
125
115
|
"import plotly.express as px\n",
|
|
116
|
+
"import plotly.graph_objects as go\n",
|
|
126
117
|
"from plotly.subplots import make_subplots\n",
|
|
127
|
-
"
|
|
118
|
+
"\n",
|
|
119
|
+
"from customer_retention.analysis.auto_explorer import ExplorationFindings, TextProcessingMetadata\n",
|
|
120
|
+
"from customer_retention.analysis.visualization import ChartBuilder, display_figure\n",
|
|
121
|
+
"from customer_retention.core.config.column_config import ColumnType\n",
|
|
122
|
+
"from customer_retention.core.config.experiments import (\n",
|
|
123
|
+
" EXPERIMENTS_DIR,\n",
|
|
124
|
+
" FINDINGS_DIR,\n",
|
|
125
|
+
")\n",
|
|
126
|
+
"from customer_retention.stages.profiling import (\n",
|
|
127
|
+
" TextColumnProcessor,\n",
|
|
128
|
+
" TextProcessingConfig,\n",
|
|
129
|
+
" TimeWindowAggregator,\n",
|
|
130
|
+
" get_model_info,\n",
|
|
131
|
+
" list_available_models,\n",
|
|
132
|
+
")"
|
|
128
133
|
]
|
|
129
134
|
},
|
|
130
135
|
{
|
|
@@ -296,7 +301,7 @@
|
|
|
296
301
|
},
|
|
297
302
|
"outputs": [],
|
|
298
303
|
"source": [
|
|
299
|
-
"from customer_retention.stages.temporal import load_data_with_snapshot_preference
|
|
304
|
+
"from customer_retention.stages.temporal import load_data_with_snapshot_preference\n",
|
|
300
305
|
"\n",
|
|
301
306
|
"df, data_source = load_data_with_snapshot_preference(findings, output_dir=str(FINDINGS_DIR))\n",
|
|
302
307
|
"charts = ChartBuilder()\n",
|
|
@@ -478,28 +483,28 @@
|
|
|
478
483
|
" print(f\"\\n{'='*70}\")\n",
|
|
479
484
|
" print(f\"Column: {col_name}\")\n",
|
|
480
485
|
" print(f\"{'='*70}\")\n",
|
|
481
|
-
"
|
|
486
|
+
"\n",
|
|
482
487
|
" text_series = df[col_name].fillna(\"\")\n",
|
|
483
|
-
"
|
|
488
|
+
"\n",
|
|
484
489
|
" non_empty = (text_series.str.len() > 0).sum()\n",
|
|
485
490
|
" avg_length = text_series.str.len().mean()\n",
|
|
486
|
-
"
|
|
487
|
-
" print(
|
|
491
|
+
"\n",
|
|
492
|
+
" print(\"\\n\\U0001f4ca Statistics:\")\n",
|
|
488
493
|
" print(f\" Total events: {len(text_series):,}\")\n",
|
|
489
494
|
" print(f\" Non-empty: {non_empty:,} ({non_empty/len(text_series)*100:.1f}%)\")\n",
|
|
490
495
|
" print(f\" Avg length: {avg_length:.0f} characters\")\n",
|
|
491
|
-
"
|
|
496
|
+
"\n",
|
|
492
497
|
" # Texts per entity\n",
|
|
493
498
|
" texts_per_entity = df.groupby(ENTITY_COLUMN)[col_name].apply(\n",
|
|
494
499
|
" lambda x: (x.fillna(\"\").str.len() > 0).sum()\n",
|
|
495
500
|
" )\n",
|
|
496
|
-
" print(
|
|
501
|
+
" print(\"\\n\\U0001f465 Text events per entity:\")\n",
|
|
497
502
|
" print(f\" Mean: {texts_per_entity.mean():.1f}\")\n",
|
|
498
503
|
" print(f\" Median: {texts_per_entity.median():.0f}\")\n",
|
|
499
504
|
" print(f\" Max: {texts_per_entity.max():,}\")\n",
|
|
500
|
-
"
|
|
505
|
+
"\n",
|
|
501
506
|
" # Sample texts\n",
|
|
502
|
-
" print(
|
|
507
|
+
" print(\"\\n\\U0001f4dd Sample texts:\")\n",
|
|
503
508
|
" samples = text_series[text_series.str.len() > 10].head(3)\n",
|
|
504
509
|
" for i, sample in enumerate(samples, 1):\n",
|
|
505
510
|
" truncated = sample[:80] + \"...\" if len(sample) > 80 else sample\n",
|
|
@@ -547,26 +552,26 @@
|
|
|
547
552
|
"source": [
|
|
548
553
|
"if text_columns and findings.is_time_series:\n",
|
|
549
554
|
" processor = TextColumnProcessor(text_config)\n",
|
|
550
|
-
"
|
|
555
|
+
"\n",
|
|
551
556
|
" print(\"Processing TEXT columns...\")\n",
|
|
552
557
|
" print(\"(This may take a moment for large datasets)\\n\")\n",
|
|
553
|
-
"
|
|
558
|
+
"\n",
|
|
554
559
|
" results = []\n",
|
|
555
560
|
" df_with_pcs = df.copy()\n",
|
|
556
|
-
"
|
|
561
|
+
"\n",
|
|
557
562
|
" for col_name in text_columns:\n",
|
|
558
563
|
" print(f\"\\n{'='*70}\")\n",
|
|
559
564
|
" print(f\"Processing: {col_name}\")\n",
|
|
560
565
|
" print(f\"{'='*70}\")\n",
|
|
561
|
-
"
|
|
566
|
+
"\n",
|
|
562
567
|
" df_with_pcs, result = processor.process_column(df_with_pcs, col_name)\n",
|
|
563
568
|
" results.append(result)\n",
|
|
564
|
-
"
|
|
565
|
-
" print(
|
|
569
|
+
"\n",
|
|
570
|
+
" print(\"\\n\\u2705 Per-event processing complete:\")\n",
|
|
566
571
|
" print(f\" Components: {result.n_components}\")\n",
|
|
567
572
|
" print(f\" Explained variance: {result.explained_variance:.1%}\")\n",
|
|
568
573
|
" print(f\" Features: {', '.join(result.component_columns)}\")\n",
|
|
569
|
-
"
|
|
574
|
+
"\n",
|
|
570
575
|
" print(f\"\\n\\nDataFrame now has {len(df_with_pcs.columns)} columns (added {len(df_with_pcs.columns) - len(df.columns)} PC columns)\")"
|
|
571
576
|
]
|
|
572
577
|
},
|
|
@@ -620,11 +625,11 @@
|
|
|
620
625
|
" all_pc_columns = []\n",
|
|
621
626
|
" for result in results:\n",
|
|
622
627
|
" all_pc_columns.extend(result.component_columns)\n",
|
|
623
|
-
"
|
|
628
|
+
"\n",
|
|
624
629
|
" print(f\"\\n{'='*70}\")\n",
|
|
625
630
|
" print(\"AGGREGATION PLAN\")\n",
|
|
626
631
|
" print(f\"{'='*70}\")\n",
|
|
627
|
-
"
|
|
632
|
+
"\n",
|
|
628
633
|
" aggregator = TimeWindowAggregator(ENTITY_COLUMN, TIME_COLUMN)\n",
|
|
629
634
|
" plan = aggregator.generate_plan(\n",
|
|
630
635
|
" df_with_pcs,\n",
|
|
@@ -634,16 +639,16 @@
|
|
|
634
639
|
" include_event_count=False,\n",
|
|
635
640
|
" include_recency=False\n",
|
|
636
641
|
" )\n",
|
|
637
|
-
"
|
|
638
|
-
" print(
|
|
642
|
+
"\n",
|
|
643
|
+
" print(\"\\n\\U0001f4ca Plan Summary:\")\n",
|
|
639
644
|
" print(f\" Entity column: {plan.entity_column}\")\n",
|
|
640
645
|
" print(f\" Time column: {plan.time_column}\")\n",
|
|
641
646
|
" print(f\" Windows: {[w.name for w in plan.windows]}\")\n",
|
|
642
647
|
" print(f\" Value columns: {len(plan.value_columns)}\")\n",
|
|
643
648
|
" print(f\" Aggregation functions: {plan.agg_funcs}\")\n",
|
|
644
649
|
" print(f\" Total features to create: {len(plan.feature_columns)}\")\n",
|
|
645
|
-
"
|
|
646
|
-
" print(
|
|
650
|
+
"\n",
|
|
651
|
+
" print(\"\\n\\U0001f4dd Sample feature names:\")\n",
|
|
647
652
|
" for feat in plan.feature_columns[:10]:\n",
|
|
648
653
|
" print(f\" - {feat}\")\n",
|
|
649
654
|
" if len(plan.feature_columns) > 10:\n",
|
|
@@ -694,28 +699,28 @@
|
|
|
694
699
|
" print(f\"\\n{'='*70}\")\n",
|
|
695
700
|
" print(f\"PC Feature Distributions: {result.column_name}\")\n",
|
|
696
701
|
" print(f\"{'='*70}\")\n",
|
|
697
|
-
"
|
|
702
|
+
"\n",
|
|
698
703
|
" # Distribution of PC1 and PC2\n",
|
|
699
704
|
" if len(result.component_columns) >= 2:\n",
|
|
700
705
|
" fig = make_subplots(rows=1, cols=2,\n",
|
|
701
706
|
" subplot_titles=(result.component_columns[0], result.component_columns[1]))\n",
|
|
702
|
-
"
|
|
707
|
+
"\n",
|
|
703
708
|
" fig.add_trace(go.Histogram(\n",
|
|
704
709
|
" x=df_with_pcs[result.component_columns[0]],\n",
|
|
705
710
|
" nbinsx=50, marker_color='steelblue', opacity=0.7\n",
|
|
706
711
|
" ), row=1, col=1)\n",
|
|
707
|
-
"
|
|
712
|
+
"\n",
|
|
708
713
|
" fig.add_trace(go.Histogram(\n",
|
|
709
714
|
" x=df_with_pcs[result.component_columns[1]],\n",
|
|
710
715
|
" nbinsx=50, marker_color='coral', opacity=0.7\n",
|
|
711
716
|
" ), row=1, col=2)\n",
|
|
712
|
-
"
|
|
717
|
+
"\n",
|
|
713
718
|
" fig.update_layout(\n",
|
|
714
719
|
" title=f\"PC Feature Distributions: {result.column_name}\",\n",
|
|
715
720
|
" height=350, template=\"plotly_white\", showlegend=False\n",
|
|
716
721
|
" )\n",
|
|
717
722
|
" display_figure(fig)\n",
|
|
718
|
-
"
|
|
723
|
+
"\n",
|
|
719
724
|
" # Scatter plot of PC1 vs PC2\n",
|
|
720
725
|
" if len(result.component_columns) >= 2:\n",
|
|
721
726
|
" fig = px.scatter(\n",
|
|
@@ -781,13 +786,14 @@
|
|
|
781
786
|
" processing_approach=\"pca\"\n",
|
|
782
787
|
" )\n",
|
|
783
788
|
" findings.text_processing[result.column_name] = metadata\n",
|
|
784
|
-
"
|
|
789
|
+
"\n",
|
|
785
790
|
" print(f\"\\u2705 Added text processing metadata for {result.column_name}\")\n",
|
|
786
|
-
"
|
|
791
|
+
"\n",
|
|
787
792
|
" findings.save(FINDINGS_PATH)\n",
|
|
788
793
|
" print(f\"\\nFindings saved to: {FINDINGS_PATH}\")\n",
|
|
789
794
|
"\n",
|
|
790
795
|
"from customer_retention.analysis.notebook_html_exporter import export_notebook_html\n",
|
|
796
|
+
"\n",
|
|
791
797
|
"export_notebook_html(Path(\"01a_a_temporal_text_deep_dive.ipynb\"), EXPERIMENTS_DIR / \"docs\")\n"
|
|
792
798
|
]
|
|
793
799
|
},
|
|
@@ -834,25 +840,25 @@
|
|
|
834
840
|
" print(\"\\n\" + \"=\"*70)\n",
|
|
835
841
|
" print(\"PRODUCTION PIPELINE RECOMMENDATIONS\")\n",
|
|
836
842
|
" print(\"=\"*70)\n",
|
|
837
|
-
"
|
|
843
|
+
"\n",
|
|
838
844
|
" print(\"\\n\\U0001f527 Bronze Layer (per-event processing):\")\n",
|
|
839
845
|
" for result in results:\n",
|
|
840
846
|
" print(f\"\\n {result.column_name}:\")\n",
|
|
841
|
-
" print(
|
|
847
|
+
" print(\" Action: embed_reduce\")\n",
|
|
842
848
|
" print(f\" Model: {text_config.embedding_model}\")\n",
|
|
843
849
|
" print(f\" Components: {result.n_components}\")\n",
|
|
844
850
|
" print(f\" Output: {', '.join(result.component_columns[:3])}...\")\n",
|
|
845
|
-
"
|
|
851
|
+
"\n",
|
|
846
852
|
" print(\"\\n\\U0001f527 Silver Layer (entity aggregation):\")\n",
|
|
847
853
|
" print(f\" Windows: {AGGREGATION_WINDOWS}\")\n",
|
|
848
854
|
" print(f\" Functions: {AGGREGATION_FUNCS}\")\n",
|
|
849
|
-
" print(
|
|
855
|
+
" print(\" Example features:\")\n",
|
|
850
856
|
" for result in results[:1]:\n",
|
|
851
857
|
" pc1 = result.component_columns[0]\n",
|
|
852
858
|
" for window in AGGREGATION_WINDOWS[:2]:\n",
|
|
853
859
|
" for func in AGGREGATION_FUNCS[:2]:\n",
|
|
854
860
|
" print(f\" - {pc1}_{func}_{window}\")\n",
|
|
855
|
-
"
|
|
861
|
+
"\n",
|
|
856
862
|
" print(\"\\n\\U0001f4a1 The pipeline generator will create these transformations automatically.\")"
|
|
857
863
|
]
|
|
858
864
|
},
|