owlplanner 2025.12.5__py3-none-any.whl → 2026.1.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. owlplanner/In Discussion #58, the case of Kim and Sam.md +307 -0
  2. owlplanner/__init__.py +20 -1
  3. owlplanner/abcapi.py +24 -23
  4. owlplanner/cli/README.md +50 -0
  5. owlplanner/cli/_main.py +52 -0
  6. owlplanner/cli/cli_logging.py +56 -0
  7. owlplanner/cli/cmd_list.py +83 -0
  8. owlplanner/cli/cmd_run.py +86 -0
  9. owlplanner/config.py +315 -136
  10. owlplanner/data/__init__.py +21 -0
  11. owlplanner/data/awi.csv +75 -0
  12. owlplanner/data/bendpoints.csv +49 -0
  13. owlplanner/data/newawi.csv +75 -0
  14. owlplanner/data/rates.csv +99 -98
  15. owlplanner/debts.py +315 -0
  16. owlplanner/fixedassets.py +288 -0
  17. owlplanner/mylogging.py +157 -25
  18. owlplanner/plan.py +1044 -332
  19. owlplanner/plotting/__init__.py +16 -3
  20. owlplanner/plotting/base.py +17 -3
  21. owlplanner/plotting/factory.py +16 -3
  22. owlplanner/plotting/matplotlib_backend.py +30 -7
  23. owlplanner/plotting/plotly_backend.py +33 -10
  24. owlplanner/progress.py +66 -9
  25. owlplanner/rates.py +366 -361
  26. owlplanner/socialsecurity.py +142 -22
  27. owlplanner/tax2026.py +170 -57
  28. owlplanner/timelists.py +316 -32
  29. owlplanner/utils.py +204 -5
  30. owlplanner/version.py +20 -1
  31. {owlplanner-2025.12.5.dist-info → owlplanner-2026.1.26.dist-info}/METADATA +50 -158
  32. owlplanner-2026.1.26.dist-info/RECORD +36 -0
  33. owlplanner-2026.1.26.dist-info/entry_points.txt +2 -0
  34. owlplanner-2026.1.26.dist-info/licenses/AUTHORS +15 -0
  35. owlplanner/tax2025.py +0 -339
  36. owlplanner-2025.12.5.dist-info/RECORD +0 -24
  37. {owlplanner-2025.12.5.dist-info → owlplanner-2026.1.26.dist-info}/WHEEL +0 -0
  38. {owlplanner-2025.12.5.dist-info → owlplanner-2026.1.26.dist-info}/licenses/LICENSE +0 -0
owlplanner/timelists.py CHANGED
@@ -1,24 +1,30 @@
1
1
  """
2
+ Time horizon data validation and processing utilities.
2
3
 
3
- Owl/timelists
4
- ---
4
+ This module provides utility functions to read and validate timelist data
5
+ from Excel files, including wage, contribution, and other time-based parameters.
5
6
 
6
- A retirement planner using linear programming optimization.
7
+ Copyright (C) 2025-2026 The Owlplanner Authors
7
8
 
8
- See companion document for a complete explanation and description
9
- of all variables and parameters.
9
+ This program is free software: you can redistribute it and/or modify
10
+ it under the terms of the GNU General Public License as published by
11
+ the Free Software Foundation, either version 3 of the License, or
12
+ (at your option) any later version.
10
13
 
11
- Utility functions to read and check timelists.
12
-
13
- Copyright © 2024 - Martin-D. Lacasse
14
-
15
- Disclaimers: This code is for educational purposes only and does not constitute financial advice.
14
+ This program is distributed in the hope that it will be useful,
15
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
16
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
+ GNU General Public License for more details.
16
18
 
19
+ You should have received a copy of the GNU General Public License
20
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
17
21
  """
18
22
 
19
23
  from datetime import date
20
24
  import pandas as pd
21
25
 
26
+ from . import utils as u
27
+
22
28
 
23
29
  # Expected headers in each excel sheet, one per individual.
24
30
  _timeHorizonItems = [
@@ -34,7 +40,47 @@ _timeHorizonItems = [
34
40
  ]
35
41
 
36
42
 
37
- def read(finput, inames, horizons, mylog):
43
+ _debtItems = [
44
+ "active",
45
+ "name",
46
+ "type",
47
+ "year",
48
+ "term",
49
+ "amount",
50
+ "rate",
51
+ ]
52
+
53
+
54
+ _debtTypes = [
55
+ "loan",
56
+ "mortgage",
57
+ ]
58
+
59
+
60
+ _fixedAssetItems = [
61
+ "active",
62
+ "name",
63
+ "type",
64
+ "year",
65
+ "basis",
66
+ "value",
67
+ "rate",
68
+ "yod",
69
+ "commission",
70
+ ]
71
+
72
+
73
+ _fixedAssetTypes = [
74
+ "collectibles",
75
+ "fixed annuity",
76
+ "precious metals",
77
+ "real estate",
78
+ "residence",
79
+ "stocks",
80
+ ]
81
+
82
+
83
+ def read(finput, inames, horizons, mylog, filename=None):
38
84
  """
39
85
  Read listed parameters from an excel spreadsheet or through
40
86
  a dictionary of dataframes through Pandas.
@@ -43,6 +89,20 @@ def read(finput, inames, horizons, mylog):
43
89
  IRA ctrb, Roth IRA ctrb, Roth conv, and big-ticket items.
44
90
  Supports xls, xlsx, xlsm, xlsb, odf, ods, and odt file extensions.
45
91
  Return a dictionary of dataframes by individual's names.
92
+
93
+ Parameters
94
+ ----------
95
+ finput : file-like object, str, or dict
96
+ Input file or dictionary of DataFrames
97
+ inames : list
98
+ List of individual names
99
+ horizons : list
100
+ List of time horizons
101
+ mylog : logger
102
+ Logger instance
103
+ filename : str, optional
104
+ Explicit filename for logging purposes. If provided, this will be used
105
+ instead of trying to extract it from finput.
46
106
  """
47
107
 
48
108
  mylog.vprint("Reading wages, contributions, conversions, and big-ticket items over time...")
@@ -52,21 +112,51 @@ def read(finput, inames, horizons, mylog):
52
112
  finput = "dictionary of DataFrames"
53
113
  streamName = "dictionary of DataFrames"
54
114
  else:
115
+ if filename is not None:
116
+ streamName = f"file '{filename}'"
117
+ elif hasattr(finput, "name"):
118
+ streamName = f"file '{finput.name}'"
119
+ else:
120
+ streamName = finput
121
+
55
122
  # Read all worksheets in memory but only process those with proper names.
56
123
  try:
57
- dfDict = pd.read_excel(finput, sheet_name=None, usecols=_timeHorizonItems)
124
+ # dfDict = pd.read_excel(finput, sheet_name=None, usecols=_timeHorizonItems)
125
+ dfDict = pd.read_excel(finput, sheet_name=None)
58
126
  except Exception as e:
59
- raise Exception(f"Could not read file {finput}: {e}.") from e
60
- streamName = f"file '{finput}'"
61
-
62
- timeLists = _condition(dfDict, inames, horizons, mylog)
127
+ raise Exception(f"Could not read file {streamName}: {e}.") from e
63
128
 
129
+ timeLists = _conditionTimetables(dfDict, inames, horizons, mylog)
64
130
  mylog.vprint(f"Successfully read time horizons from {streamName}.")
65
131
 
66
- return finput, timeLists
132
+ houseLists = _conditionHouseTables(dfDict, mylog)
133
+ mylog.vprint(f"Successfully read household tables from {streamName}.")
134
+
135
+ return finput, timeLists, houseLists
136
+
137
+
138
+ def _checkColumns(df, iname, colList):
139
+ """
140
+ Ensure all columns in colList are present. Remove others.
141
+ """
142
+ # Drop all columns not in the list.
143
+ # Make an explicit copy to avoid SettingWithCopyWarning
144
+ df = df.loc[:, ~df.columns.str.contains("^Unnamed")].copy()
145
+
146
+ # Collect columns to drop
147
+ cols_to_drop = [col for col in df.columns if col == "" or col not in colList]
148
+ if cols_to_drop:
149
+ df = df.drop(cols_to_drop, axis=1)
150
+
151
+ # Check that all columns in the list are present.
152
+ for item in colList:
153
+ if item not in df.columns:
154
+ raise ValueError(f"Column {item} not found for {iname}.")
67
155
 
156
+ return df
68
157
 
69
- def _condition(dfDict, inames, horizons, mylog):
158
+
159
+ def _conditionTimetables(dfDict, inames, horizons, mylog):
70
160
  """
71
161
  Make sure that time horizons contain all years up to life expectancy,
72
162
  and that values are positive (except big-ticket items).
@@ -81,14 +171,10 @@ def _condition(dfDict, inames, horizons, mylog):
81
171
 
82
172
  df = dfDict[iname]
83
173
 
84
- df = df.loc[:, ~df.columns.str.contains("^Unnamed")]
85
- for col in df.columns:
86
- if col == "" or col not in _timeHorizonItems:
87
- df.drop(col, axis=1, inplace=True)
174
+ df = _checkColumns(df, iname, _timeHorizonItems)
88
175
 
89
- for item in _timeHorizonItems:
90
- if item not in df.columns:
91
- raise ValueError(f"Item {item} not found for {iname}.")
176
+ # Ensure columns are in the correct order
177
+ df = df[_timeHorizonItems].copy()
92
178
 
93
179
  # Only consider lines in proper year range. Go back 5 years for Roth maturation.
94
180
  df = df[df["year"] >= (thisyear - 5)]
@@ -97,13 +183,17 @@ def _condition(dfDict, inames, horizons, mylog):
97
183
  missing = []
98
184
  for n in range(-5, horizons[i]):
99
185
  year = thisyear + n
100
- if not (df[df["year"] == year]).any(axis=None):
101
- df.loc[len(df)] = [year, 0, 0, 0, 0, 0, 0, 0, 0]
186
+ year_rows = df[df["year"] == year]
187
+ if year_rows.empty:
188
+ # Create a new row as a dictionary to ensure correct column mapping.
189
+ new_row = {col: 0 for col in _timeHorizonItems}
190
+ new_row["year"] = year
191
+ df = pd.concat([df, pd.DataFrame([new_row])], ignore_index=True)
102
192
  missing.append(year)
103
193
  else:
104
194
  for item in _timeHorizonItems:
105
- if item != "big-ticket items" and df[item].iloc[n] < 0:
106
- raise ValueError(f"Item {item} for {iname} in year {df['year'].iloc[n]} is < 0.")
195
+ if item != "big-ticket items" and year_rows[item].iloc[0] < 0:
196
+ raise ValueError(f"Item {item} for {iname} in year {year} is < 0.")
107
197
 
108
198
  if len(missing) > 0:
109
199
  mylog.vprint(f"Adding {len(missing)} missing years for {iname}: {missing}.")
@@ -115,8 +205,202 @@ def _condition(dfDict, inames, horizons, mylog):
115
205
  timeLists[iname] = df
116
206
 
117
207
  if df["year"].iloc[-1] != endyear - 1:
118
- raise ValueError(
119
- f"Time horizon for {iname} too short.\n\tIt should end in {endyear}, not {df['year'].iloc[-1]}"
120
- )
208
+ raise ValueError(f"""Time horizon for {iname} too short.\n\t
209
+ It should end in {endyear}, not {df['year'].iloc[-1]}""")
121
210
 
122
211
  return timeLists
212
+
213
+
214
+ def _conditionHouseTables(dfDict, mylog):
215
+ """
216
+ Read debts and fixed assets from Household Financial Profile workbook.
217
+ """
218
+ houseDic = {}
219
+
220
+ items = {"Debts" : _debtItems, "Fixed Assets": _fixedAssetItems}
221
+ types = {"Debts" : _debtTypes, "Fixed Assets": _fixedAssetTypes}
222
+ for page in items.keys():
223
+ if page in dfDict:
224
+ df = dfDict[page]
225
+ df = _checkColumns(df, page, items[page])
226
+ # Check categorical variables.
227
+ isInList = df["type"].isin(types[page])
228
+ df = df[isInList]
229
+
230
+ # Convert percentage columns from decimal to percentage if needed
231
+ # UI uses 0-100 range for percentages (e.g., 4.5 = 4.5%)
232
+ # If Excel read percentage-formatted cells, values might be decimals (0.045)
233
+ # Convert values < 1.0 to percentage format (multiply by 100)
234
+ if page == "Debts" and "rate" in df.columns:
235
+ # If rate values are less than 1, assume they're decimals (0.045 = 4.5%)
236
+ # and convert to percentages (4.5) to match UI format (0-100 range)
237
+ mask = (df["rate"] < 1.0) & (df["rate"] > 0)
238
+ if mask.any():
239
+ df.loc[mask, "rate"] = df.loc[mask, "rate"] * 100.0
240
+ mylog.vprint(f"Converted {mask.sum()} rate value(s) from decimal to percentage in Debts table.")
241
+
242
+ elif page == "Fixed Assets":
243
+ # Convert rate and commission if they're decimals
244
+ # Both should be in 0-100 range to match UI format
245
+ if "rate" in df.columns:
246
+ mask = (df["rate"] < 1.0) & (df["rate"] > 0)
247
+ if mask.any():
248
+ df.loc[mask, "rate"] = df.loc[mask, "rate"] * 100.0
249
+ mylog.vprint(
250
+ f"Converted {mask.sum()} rate value(s) from decimal "
251
+ f"to percentage in Fixed Assets table."
252
+ )
253
+ if "commission" in df.columns:
254
+ mask = (df["commission"] < 1.0) & (df["commission"] > 0)
255
+ if mask.any():
256
+ df.loc[mask, "commission"] = df.loc[mask, "commission"] * 100.0
257
+ mylog.vprint(
258
+ f"Converted {mask.sum()} commission value(s) from decimal "
259
+ f"to percentage in Fixed Assets table."
260
+ )
261
+ # Validate and reset "year" column (reference year) if in the past
262
+ if "year" in df.columns:
263
+ thisyear = date.today().year
264
+ mask = df["year"] < thisyear
265
+ if mask.any():
266
+ df.loc[mask, "year"] = thisyear
267
+ mylog.vprint(
268
+ f"Reset {mask.sum()} reference year value(s) to {thisyear} "
269
+ f"in Fixed Assets table (years cannot be in the past)."
270
+ )
271
+
272
+ # Convert "active" column to boolean if it exists.
273
+ # Excel may read booleans as strings ("True"/"False") or numbers (1/0).
274
+ if "active" in df.columns:
275
+ df["active"] = df["active"].apply(u.convert_to_bool).astype(bool)
276
+
277
+ houseDic[page] = df
278
+ mylog.vprint(f"Found {len(df)} valid row(s) in {page} table.")
279
+ else:
280
+ houseDic[page] = pd.DataFrame(columns=items[page])
281
+ mylog.vprint(f"Table for {page} not found. Assuming empty table.")
282
+
283
+ return houseDic
284
+
285
+
286
+ def conditionDebtsAndFixedAssetsDF(df, tableType, mylog=None):
287
+ """
288
+ Condition a DataFrame for Debts or Fixed Assets by:
289
+ - Creating an empty DataFrame with proper columns if df is None or empty
290
+ - Resetting the index
291
+ - Filling NaN values with 0 while preserving boolean columns (like "active")
292
+
293
+ Parameters
294
+ ----------
295
+ df : pandas.DataFrame or None
296
+ The DataFrame to condition, or None/empty to create a new empty DataFrame
297
+ tableType : str
298
+ Type of table: "Debts" or "Fixed Assets"
299
+ mylog : logger, optional
300
+ Logger instance for optional UI/log output
301
+
302
+ Returns
303
+ -------
304
+ pandas.DataFrame
305
+ Conditioned DataFrame with proper columns and no NaN values (except boolean columns default to True)
306
+ """
307
+ # Map table type to column items
308
+ items = {"Debts": _debtItems, "Fixed Assets": _fixedAssetItems}
309
+ if tableType not in items:
310
+ raise ValueError(f"tableType must be 'Debts' or 'Fixed Assets', got '{tableType}'")
311
+
312
+ columnItems = items[tableType]
313
+
314
+ df = u.ensure_dataframe(df, pd.DataFrame(columns=columnItems))
315
+
316
+ df = df.copy()
317
+ df.reset_index(drop=True, inplace=True)
318
+
319
+ # Ensure all required columns exist
320
+ for col in columnItems:
321
+ if col not in df.columns:
322
+ df[col] = None
323
+
324
+ # Only keep the columns we need, in the correct order
325
+ df = df[columnItems].copy()
326
+
327
+ # Define which columns are integers vs floats
328
+ if tableType == "Debts":
329
+ int_cols = ["year", "term"]
330
+ float_cols = ["amount", "rate"]
331
+ else: # Fixed Assets
332
+ int_cols = ["year", "yod"]
333
+ float_cols = ["basis", "value", "rate", "commission"]
334
+
335
+ # Handle empty DataFrame by setting dtypes directly
336
+ if len(df) == 0:
337
+ dtype_dict = {}
338
+ dtype_dict["active"] = bool
339
+ for col in ["name", "type"]:
340
+ dtype_dict[col] = "object" # string columns
341
+ for col in int_cols:
342
+ dtype_dict[col] = "int64"
343
+ for col in float_cols:
344
+ dtype_dict[col] = "float64"
345
+ df = df.astype(dtype_dict)
346
+ else:
347
+ # Fill NaN values and ensure proper types for non-empty DataFrame
348
+ for col in df.columns:
349
+ if col == "active":
350
+ # Ensure "active" column is boolean, handling strings/numbers from Excel
351
+ df[col] = df[col].apply(u.convert_to_bool).astype(bool)
352
+ elif col in ["name", "type"]:
353
+ # String columns: ensure they are strings, not lists
354
+ # Streamlit data_editor can return lists for string columns in some cases
355
+ def convert_to_string(val):
356
+ if pd.isna(val) or val is None:
357
+ return ""
358
+ if isinstance(val, list):
359
+ # If it's a list, join the elements (handles Streamlit data_editor edge cases)
360
+ # Filter out None/NaN values before joining
361
+ cleaned = [str(x) for x in val if x is not None and not pd.isna(x)]
362
+ return " ".join(cleaned) if cleaned else ""
363
+ return str(val)
364
+
365
+ df[col] = df[col].apply(convert_to_string).astype(str)
366
+ # Replace "nan" string with empty string
367
+ df[col] = df[col].replace("nan", "").replace("None", "")
368
+ elif col in int_cols:
369
+ # Integer columns: convert to int64, fill NaN with 0
370
+ df[col] = pd.to_numeric(df[col], errors="coerce").fillna(0).astype("int64")
371
+ elif col in float_cols:
372
+ # Float columns: convert to float64, fill NaN with 0.0
373
+ df[col] = pd.to_numeric(df[col], errors="coerce").fillna(0.0).astype("float64")
374
+
375
+ # For Fixed Assets, validate and reset "year" column if in the past
376
+ if tableType == "Fixed Assets" and "year" in df.columns and len(df) > 0:
377
+ thisyear = date.today().year
378
+ mask = df["year"] < thisyear
379
+ if mask.any():
380
+ df.loc[mask, "year"] = thisyear
381
+
382
+ if mylog is not None:
383
+ mylog.vprint(f"Found {len(df)} valid row(s) in {tableType} table.")
384
+
385
+ return df
386
+
387
+
388
+ def getTableTypes(tableType):
389
+ """
390
+ Get the list of valid types for a given table type.
391
+
392
+ Parameters
393
+ ----------
394
+ tableType : str
395
+ Type of table: "Debts" or "Fixed Assets"
396
+
397
+ Returns
398
+ -------
399
+ list
400
+ List of valid types for the specified table
401
+ """
402
+ types = {"Debts": _debtTypes, "Fixed Assets": _fixedAssetTypes}
403
+ if tableType not in types:
404
+ raise ValueError(f"tableType must be 'Debts' or 'Fixed Assets', got '{tableType}'")
405
+
406
+ return types[tableType]
owlplanner/utils.py CHANGED
@@ -1,17 +1,28 @@
1
1
  """
2
+ Utility functions for data formatting and manipulation.
2
3
 
3
- Owl/utils
4
+ This module provides helper functions for formatting currency, percentages,
5
+ and other data transformations used throughout the retirement planner.
4
6
 
5
- This file contains functions for handling data.
7
+ Copyright (C) 2025-2026 The Owlplanner Authors
6
8
 
7
- Copyright &copy; 2024 - Martin-D. Lacasse
9
+ This program is free software: you can redistribute it and/or modify
10
+ it under the terms of the GNU General Public License as published by
11
+ the Free Software Foundation, either version 3 of the License, or
12
+ (at your option) any later version.
8
13
 
9
- Disclaimers: This code is for educational purposes only and does not constitute financial advice.
14
+ This program is distributed in the hope that it will be useful,
15
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
16
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
+ GNU General Public License for more details.
10
18
 
19
+ You should have received a copy of the GNU General Public License
20
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
11
21
  """
12
22
 
13
23
  ######################################################################
14
24
  import numpy as np
25
+ import pandas as pd
15
26
 
16
27
 
17
28
  def d(value, f=0, latex=False) -> str:
@@ -70,7 +81,16 @@ def getUnits(units) -> int:
70
81
  return fac
71
82
 
72
83
 
73
- # Next two functins could be a one-line lambda functions.
84
+ def get_numeric_option(options, key, default, *, min_value=None) -> float:
85
+ value = options.get(key, default)
86
+ if not isinstance(value, (int, float)):
87
+ raise ValueError(f"{key} {value} is not a number.")
88
+ if min_value is not None and value < min_value:
89
+ raise ValueError(f"{key} must be >= {min_value}.")
90
+ return float(value)
91
+
92
+
93
+ # Next two functions could be a one-line lambda functions.
74
94
  # e.g., krond = lambda a, b: 1 if a == b else 0
75
95
  def krond(a, b) -> int:
76
96
  """
@@ -101,3 +121,182 @@ def roundCents(values, decimals=2):
101
121
  arr = np.where((-0.009 < arr) & (arr <= 0), 0, arr)
102
122
 
103
123
  return arr
124
+
125
+
126
+ def parseDobs(dobs):
127
+ """
128
+ Parse a list of dates and return int32 arrays of year, months, days.
129
+ """
130
+ icount = len(dobs)
131
+ yobs = []
132
+ mobs = []
133
+ tobs = []
134
+ for i in range(icount):
135
+ ls = dobs[i].split("-")
136
+ if len(ls) != 3:
137
+ raise ValueError(f"Date {dobs[i]} not in ISO format.")
138
+ if not 1 <= int(ls[1]) <= 12:
139
+ raise ValueError(f"Month in date {dobs[i]} not valid.")
140
+ if not 1 <= int(ls[2]) <= 31:
141
+ raise ValueError(f"Day in date {dobs[i]} not valid.")
142
+
143
+ yobs.append(ls[0])
144
+ mobs.append(ls[1])
145
+ tobs.append(ls[2])
146
+
147
+ return np.array(yobs, dtype=np.int32), np.array(mobs, dtype=np.int32), np.array(tobs, dtype=np.int32)
148
+
149
+
150
+ def is_row_active(row):
151
+ """
152
+ Check if a DataFrame row should be processed based on 'active' column.
153
+
154
+ This function handles the common pattern of checking whether a row in a DataFrame
155
+ should be processed based on its 'active' column value. The logic is:
156
+ - If 'active' column doesn't exist, the row is considered active (default behavior)
157
+ - If 'active' value is NaN or None, the row is considered active (default behavior)
158
+ - If 'active' value is explicitly False (or falsy), the row is considered inactive
159
+ - Otherwise (True or truthy), the row is considered active
160
+
161
+ Parameters
162
+ ----------
163
+ row : pd.Series
164
+ A pandas Series representing a row from a DataFrame. The row should have
165
+ an 'active' column (or index entry) if the active/inactive status is to be checked.
166
+
167
+ Returns
168
+ -------
169
+ bool
170
+ True if the row should be processed (is active), False if it should be skipped (is inactive).
171
+ """
172
+ if "active" not in row.index:
173
+ return True # Default to active if column doesn't exist
174
+ active_value = row["active"]
175
+ if pd.isna(active_value) or active_value is None:
176
+ return True # NaN/None means active
177
+ return bool(active_value)
178
+
179
+
180
+ def is_dataframe_empty(df):
181
+ """
182
+ Check if a DataFrame is None or empty.
183
+
184
+ This function consolidates the common pattern of checking
185
+ `df is None or df.empty` throughout the codebase.
186
+
187
+ Parameters
188
+ ----------
189
+ df : pd.DataFrame or None
190
+ The DataFrame to check. Can be None or an empty DataFrame.
191
+
192
+ Returns
193
+ -------
194
+ bool
195
+ True if df is None or empty, False otherwise.
196
+ """
197
+ return df is None or df.empty
198
+
199
+
200
+ def ensure_dataframe(df, default_empty=None):
201
+ """
202
+ Ensure DataFrame is not None or empty, return default if needed.
203
+
204
+ This function checks if a DataFrame is None or empty and returns a default
205
+ value if so. This consolidates the common pattern of checking
206
+ `df is None or df.empty` throughout the codebase.
207
+
208
+ Parameters
209
+ ----------
210
+ df : pd.DataFrame or None
211
+ The DataFrame to check. Can be None or an empty DataFrame.
212
+ default_empty : any, optional
213
+ The value to return if df is None or empty. Default is None.
214
+ Common values are 0.0, np.zeros(N_n), or a default DataFrame.
215
+
216
+ Returns
217
+ -------
218
+ any
219
+ Returns default_empty if df is None or empty, otherwise returns df.
220
+ """
221
+ if is_dataframe_empty(df):
222
+ return default_empty
223
+ return df
224
+
225
+
226
+ def get_empty_array_or_value(N_n, default_value=0.0):
227
+ """
228
+ Return empty array or single value based on context.
229
+
230
+ This helper function returns either a numpy array of zeros with length N_n
231
+ if N_n is provided, or a single default value if N_n is None.
232
+
233
+ Parameters
234
+ ----------
235
+ N_n : int or None
236
+ Length of the array to create. If None, returns default_value instead.
237
+ default_value : float, optional
238
+ Default value to return if N_n is None. Default is 0.0.
239
+
240
+ Returns
241
+ -------
242
+ np.ndarray or float
243
+ Returns np.zeros(N_n) if N_n is not None, otherwise returns default_value.
244
+ """
245
+ if N_n is not None:
246
+ return np.zeros(N_n)
247
+ return default_value
248
+
249
+
250
+ def convert_to_bool(val):
251
+ """
252
+ Convert various input types to boolean.
253
+
254
+ Handles conversion from strings, numbers, booleans, and NaN values.
255
+ Excel may read booleans as strings ("True"/"False") or numbers (1/0),
256
+ so this function provides robust conversion.
257
+
258
+ Parameters
259
+ ----------
260
+ val : any
261
+ Value to convert to boolean. Can be:
262
+ - bool: returned as-is
263
+ - str: "True", "False", "1", "0", "yes", "no", etc.
264
+ - numeric: 1/0 or other numeric values
265
+ - None/NaN: defaults to True
266
+
267
+ Returns
268
+ -------
269
+ bool
270
+ Boolean value. NaN/None and unknown values default to True.
271
+ """
272
+ # Check for None first (before pd.isna which can fail on some types)
273
+ if val is None:
274
+ return True # Default to True for None
275
+
276
+ # Check for NaN, but handle cases where pd.isna might fail (e.g., empty lists)
277
+ try:
278
+ if pd.isna(val):
279
+ return True # Default to True for NaN
280
+ except (ValueError, TypeError):
281
+ # pd.isna can raise ValueError for empty arrays/lists
282
+ # or TypeError for unhashable types - treat as non-NaN and continue
283
+ pass
284
+ if isinstance(val, bool):
285
+ return val
286
+ if isinstance(val, str):
287
+ # Handle string representations
288
+ val_lower = val.lower().strip()
289
+ if val_lower in ("true", "1", "yes", "y"):
290
+ return True
291
+ elif val_lower in ("false", "0", "no", "n"):
292
+ return False
293
+ else:
294
+ # Unknown string, default to True
295
+ return True
296
+ # Handle numeric values (1/0)
297
+ try:
298
+ num_val = float(val)
299
+ return bool(num_val) if num_val != 0 else False
300
+ except (ValueError, TypeError):
301
+ # Can't convert, default to True
302
+ return True
owlplanner/version.py CHANGED
@@ -1 +1,20 @@
1
- __version__ = "2025.12.05"
1
+ """
2
+ Package version information.
3
+
4
+ Copyright (C) 2025-2026 The Owlplanner Authors
5
+
6
+ This program is free software: you can redistribute it and/or modify
7
+ it under the terms of the GNU General Public License as published by
8
+ the Free Software Foundation, either version 3 of the License, or
9
+ (at your option) any later version.
10
+
11
+ This program is distributed in the hope that it will be useful,
12
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+ GNU General Public License for more details.
15
+
16
+ You should have received a copy of the GNU General Public License
17
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
18
+ """
19
+
20
+ __version__ = "2026.01.26"