datupapi 1.110.1__py3-none-any.whl → 1.111.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datupapi/inventory/src/FutureInventory/future_reorder.py +1105 -166
- {datupapi-1.110.1.dist-info → datupapi-1.111.0.dist-info}/METADATA +1 -1
- {datupapi-1.110.1.dist-info → datupapi-1.111.0.dist-info}/RECORD +5 -5
- {datupapi-1.110.1.dist-info → datupapi-1.111.0.dist-info}/WHEEL +1 -1
- {datupapi-1.110.1.dist-info → datupapi-1.111.0.dist-info}/top_level.txt +0 -0
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import pandas as pd
|
|
2
2
|
import os
|
|
3
|
+
import ast
|
|
3
4
|
import numpy as np
|
|
4
5
|
from datetime import timedelta
|
|
5
6
|
from datupapi.utils.utils import Utils
|
|
@@ -8,10 +9,83 @@ from datupapi.inventory.src.FutureInventory.daily_usage_future import DailyUsage
|
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
class FutureReorder():
|
|
12
|
+
"""
|
|
13
|
+
A class for calculating future inventory reorder points and quantities.
|
|
14
|
+
|
|
15
|
+
This class implements a sophisticated inventory management system that:
|
|
16
|
+
- Calculates optimal reorder points based on forecasted demand
|
|
17
|
+
- Manages in-transit inventory and arrival schedules
|
|
18
|
+
- Determines safety stock levels using statistical or reference methods
|
|
19
|
+
- Generates reorder recommendations for multiple future periods
|
|
20
|
+
- Supports both single-location and multi-location inventory
|
|
21
|
+
|
|
22
|
+
The system uses dynamic coverage strategies to optimize inventory levels
|
|
23
|
+
while maintaining adequate safety stock to prevent stockouts.
|
|
24
|
+
|
|
25
|
+
Output Fields:
|
|
26
|
+
- FutureInventoryTransit: Total future inventory (stock + transit)
|
|
27
|
+
- FutureInventory: Future inventory in stock only
|
|
28
|
+
- FutureTransit: Future inventory in transit only
|
|
29
|
+
- FutureInventoryTransitArrival: Future inventory in stock + arrivals in the period
|
|
30
|
+
"""
|
|
11
31
|
|
|
12
|
-
def __init__(self, df_inv, df_lead_time, df_prep, df_fcst, periods, start_date, location=False, security_stock_ref=False):
|
|
32
|
+
def __init__(self, df_inv, df_lead_time, df_prep, df_fcst, periods, start_date, location=False, security_stock_ref=False, df_transit=None, integer=True, complete_suggested=False, start_date_zero=None):
|
|
33
|
+
"""
|
|
34
|
+
Initialize the FutureReorder instance.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
df_inv (pd.DataFrame): Current inventory data with columns:
|
|
38
|
+
- Item: Item identifier
|
|
39
|
+
- Location: Location identifier (if location=True)
|
|
40
|
+
- Inventory: Current on-hand stock
|
|
41
|
+
- Transit: In-transit quantity
|
|
42
|
+
- PurchaseFactor: Minimum order multiple
|
|
43
|
+
|
|
44
|
+
df_lead_time (pd.DataFrame): Lead time and reorder parameters:
|
|
45
|
+
- Item: Item identifier
|
|
46
|
+
- Location: Location identifier (if location=True)
|
|
47
|
+
- ReorderFreq: Days between reorders (default: 30)
|
|
48
|
+
- AvgLeadTime: Average lead time in days
|
|
49
|
+
- MaxLeadTime: Maximum lead time in days
|
|
50
|
+
- Coverage: Total coverage days (optional)
|
|
51
|
+
- SecurityStockDaysRef: Reference days for safety stock (optional)
|
|
52
|
+
|
|
53
|
+
df_prep (pd.DataFrame): Preparation data for forecast calculations
|
|
54
|
+
|
|
55
|
+
df_fcst (pd.DataFrame): Forecast data containing demand predictions
|
|
56
|
+
|
|
57
|
+
periods (int): Number of future periods to calculate
|
|
58
|
+
|
|
59
|
+
start_date (str): Starting date for calculations (format: 'YYYY-MM-DD')
|
|
60
|
+
|
|
61
|
+
location (bool, optional): Whether to process by location. Defaults to False.
|
|
62
|
+
|
|
63
|
+
security_stock_ref (bool, optional): Use reference days method for safety stock
|
|
64
|
+
calculation instead of statistical method. Defaults to False.
|
|
65
|
+
|
|
66
|
+
df_transit (pd.DataFrame, optional): Transit arrival schedule with columns:
|
|
67
|
+
- Item: Item identifier
|
|
68
|
+
- Location: Location identifier (if location=True)
|
|
69
|
+
- Transit: Partial transit quantity
|
|
70
|
+
- ArrivalDate: Arrival date (format: 'YYYY-MM-DD')
|
|
71
|
+
If None, complete transit arrives in period 1. Defaults to None.
|
|
72
|
+
|
|
73
|
+
integer (bool, optional): Controls numeric formatting of quantity fields.
|
|
74
|
+
When True, quantity fields are displayed as integers.
|
|
75
|
+
When False, quantity fields are displayed with decimals.
|
|
76
|
+
Defaults to True.
|
|
77
|
+
|
|
78
|
+
complete_suggested (bool, optional): When True, uses the last calculated
|
|
79
|
+
SuggestedForecast value for periods without forecast data instead of
|
|
80
|
+
raising an error. Defaults to False.
|
|
81
|
+
|
|
82
|
+
start_date_zero (str, optional): Custom start date for period 0 (format: 'YYYY-MM-DD').
|
|
83
|
+
When None (default), uses the current system date for period 0.
|
|
84
|
+
When specified, uses this date as the starting point for period 0 instead
|
|
85
|
+
of the current system date. Defaults to None.
|
|
86
|
+
"""
|
|
13
87
|
self.df_inv = df_inv
|
|
14
|
-
self.df_lead_time = df_lead_time
|
|
88
|
+
self.df_lead_time = df_lead_time
|
|
15
89
|
self.df_prep = df_prep
|
|
16
90
|
self.df_fcst = df_fcst
|
|
17
91
|
self.default_coverage = 30
|
|
@@ -19,204 +93,1069 @@ class FutureReorder():
|
|
|
19
93
|
self.start_date = pd.to_datetime(start_date, format='%Y-%m-%d')
|
|
20
94
|
self.location = location
|
|
21
95
|
self.security_stock_ref = security_stock_ref
|
|
96
|
+
self.df_transit = df_transit
|
|
97
|
+
self.integer = integer
|
|
98
|
+
self.complete_suggested = complete_suggested
|
|
99
|
+
self.start_date_zero = start_date_zero
|
|
100
|
+
|
|
101
|
+
# Initialize metadata columns based on location usage
|
|
102
|
+
self.metadata = ['Item']
|
|
103
|
+
if self.location:
|
|
104
|
+
self.metadata.append('Location')
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _format_value(self, value, field_name):
|
|
108
|
+
"""
|
|
109
|
+
Apply appropriate formatting based on field type and integer setting.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
value: The numeric value to format (scalar or Series)
|
|
113
|
+
field_name: The name of the field to determine formatting rules
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Formatted value (int or float with 2 decimals)
|
|
117
|
+
"""
|
|
118
|
+
# Handle pandas Series - extract scalar value
|
|
119
|
+
if isinstance(value, pd.Series):
|
|
120
|
+
if len(value) == 1:
|
|
121
|
+
value = value.iloc[0]
|
|
122
|
+
else:
|
|
123
|
+
raise ValueError(f"Expected scalar value for {field_name}, got Series with {len(value)} elements")
|
|
124
|
+
|
|
125
|
+
# Handle NaN, None, and infinite values
|
|
126
|
+
if pd.isna(value) or value is None:
|
|
127
|
+
return 0
|
|
128
|
+
if np.isinf(value):
|
|
129
|
+
return 0
|
|
130
|
+
|
|
131
|
+
# Fields that are ALWAYS integers
|
|
132
|
+
always_integer_fields = [
|
|
133
|
+
'PurchaseFactor', 'AvgLeadTime', 'MaxLeadTime',
|
|
134
|
+
'ReorderQtyDays', 'ReorderFreq', 'Coverage', 'FutureStockoutDays'
|
|
135
|
+
]
|
|
136
|
+
|
|
137
|
+
# Fields that are ALWAYS decimals (2 decimal places)
|
|
138
|
+
always_decimal_fields = ['AvgDailyUsage', 'MaxDailyUsage']
|
|
139
|
+
|
|
140
|
+
# Fields that change based on self.integer setting
|
|
141
|
+
quantity_fields = [
|
|
142
|
+
'FutureInventoryTransit', 'FutureInventory', 'FutureTransit',
|
|
143
|
+
'FutureInventoryTransitArrival', 'SuggestedForecast', 'SuggestedForecastPeriod',
|
|
144
|
+
'ReorderPoint', 'ReorderQtyBase', 'ReorderQty', 'SecurityStock', 'Inventory', 'Transit'
|
|
145
|
+
]
|
|
146
|
+
|
|
147
|
+
if field_name in always_integer_fields:
|
|
148
|
+
return int(round(value))
|
|
149
|
+
elif field_name in always_decimal_fields:
|
|
150
|
+
return round(value, 2)
|
|
151
|
+
elif field_name in quantity_fields:
|
|
152
|
+
if self.integer:
|
|
153
|
+
return int(round(value))
|
|
154
|
+
else:
|
|
155
|
+
return round(value, 2)
|
|
156
|
+
else:
|
|
157
|
+
# Default: return as is
|
|
158
|
+
return value
|
|
22
159
|
|
|
23
160
|
|
|
24
161
|
def future_date(self):
|
|
162
|
+
"""
|
|
163
|
+
Generate future reorder dates for each item based on reorder frequency.
|
|
164
|
+
|
|
165
|
+
This method creates a schedule of dates when reorders should be evaluated
|
|
166
|
+
for each item (or item-location combination). The schedule includes:
|
|
167
|
+
1. Current date (always first)
|
|
168
|
+
2. Start date (if after current date)
|
|
169
|
+
3. Subsequent dates at reorder frequency intervals
|
|
170
|
+
|
|
171
|
+
This optimized version groups items by reorder frequency for better performance
|
|
172
|
+
with large datasets.
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
dict: Dictionary mapping item (or (item, location) tuple) to list of
|
|
176
|
+
reorder dates in 'YYYYMMDD' format.
|
|
177
|
+
|
|
178
|
+
Example:
|
|
179
|
+
{
|
|
180
|
+
'ITEM001': ['20240101', '20240115', '20240214', ...],
|
|
181
|
+
('ITEM002', 'LOC1'): ['20240101', '20240120', '20240219', ...]
|
|
182
|
+
}
|
|
183
|
+
"""
|
|
184
|
+
# Determine the starting date for period 0
|
|
185
|
+
if self.start_date_zero is not None:
|
|
186
|
+
# Use custom start date for period 0
|
|
187
|
+
actual_date = pd.to_datetime(self.start_date_zero, format='%Y-%m-%d')
|
|
188
|
+
else:
|
|
189
|
+
# Use current system date for period 0 (original behavior)
|
|
190
|
+
DOCKER_CONFIG_PATH = os.path.join('/opt/ml/processing/input', 'config.yml')
|
|
191
|
+
utils = Utils(config_file=DOCKER_CONFIG_PATH, logfile='data_io', log_path='output/logs')
|
|
192
|
+
timestamp = utils.set_timestamp()
|
|
193
|
+
actual_date = pd.to_datetime(str(int(float(timestamp[0:8]))), format='%Y%m%d')
|
|
194
|
+
|
|
195
|
+
end_date = actual_date + pd.DateOffset(months=self.periods)
|
|
196
|
+
|
|
197
|
+
# Get unique items with their reorder frequencies
|
|
198
|
+
columns = self.metadata + ['ReorderFreq']
|
|
199
|
+
df_unique = self.df_lead_time[columns].drop_duplicates().copy()
|
|
200
|
+
|
|
201
|
+
# Process ReorderFreq values
|
|
202
|
+
df_unique['ReorderFreq'] = df_unique['ReorderFreq'].fillna(self.default_coverage)
|
|
203
|
+
df_unique.loc[df_unique['ReorderFreq'] == 0, 'ReorderFreq'] = self.default_coverage
|
|
204
|
+
df_unique['ReorderFreq'] = df_unique['ReorderFreq'].astype(int)
|
|
205
|
+
|
|
206
|
+
# Pre-allocate result dictionary
|
|
207
|
+
item_dates = {}
|
|
208
|
+
|
|
209
|
+
# Group by ReorderFreq for batch processing - more efficient for large datasets
|
|
210
|
+
for freq, group in df_unique.groupby('ReorderFreq'):
|
|
211
|
+
# Generate date range for this frequency
|
|
212
|
+
date_range = []
|
|
213
|
+
|
|
214
|
+
# Always include actual date
|
|
215
|
+
date_range.append(actual_date)
|
|
216
|
+
|
|
217
|
+
# Include start_date if after actual_date
|
|
218
|
+
if self.start_date > actual_date:
|
|
219
|
+
date_range.append(self.start_date)
|
|
220
|
+
|
|
221
|
+
# Generate subsequent dates using pandas date_range for efficiency
|
|
222
|
+
num_periods = int((end_date - self.start_date).days / freq) + 1
|
|
223
|
+
future_dates = pd.date_range(
|
|
224
|
+
start=self.start_date + timedelta(days=freq),
|
|
225
|
+
periods=num_periods,
|
|
226
|
+
freq=f'{freq}D'
|
|
227
|
+
)
|
|
228
|
+
date_range.extend(future_dates[future_dates <= end_date])
|
|
229
|
+
|
|
230
|
+
# Convert to string format
|
|
231
|
+
date_strings = [d.strftime('%Y%m%d') for d in date_range]
|
|
232
|
+
|
|
233
|
+
# Assign to all items in this group
|
|
234
|
+
for _, row in group.iterrows():
|
|
235
|
+
if self.location:
|
|
236
|
+
key = (row['Item'], row['Location'])
|
|
237
|
+
else:
|
|
238
|
+
key = row['Item']
|
|
239
|
+
item_dates[key] = date_strings
|
|
240
|
+
|
|
241
|
+
return item_dates
|
|
25
242
|
|
|
26
|
-
'''Function to calculate the future dates by Item or Item-Location'''
|
|
27
243
|
|
|
28
|
-
|
|
29
|
-
|
|
244
|
+
def _get_current_dataframes(self, item, location=None):
|
|
245
|
+
"""
|
|
246
|
+
Get filtered dataframes for current item/location combination.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
item (str): Item identifier to filter for
|
|
250
|
+
location (str, optional): Location identifier if using multi-location mode
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
tuple: (current_df_lead_time, current_df_inv)
|
|
254
|
+
- current_df_lead_time: Lead time data filtered for item/location
|
|
255
|
+
- current_df_inv: Inventory data filtered for item/location
|
|
256
|
+
"""
|
|
257
|
+
# Create filter mask based on item
|
|
258
|
+
mask_lead_time = self.df_lead_time['Item'] == item
|
|
259
|
+
mask_inv = self.df_inv['Item'] == item
|
|
260
|
+
|
|
261
|
+
# Add location filter if needed
|
|
262
|
+
if self.location and location is not None:
|
|
263
|
+
mask_lead_time &= self.df_lead_time['Location'] == location
|
|
264
|
+
mask_inv &= self.df_inv['Location'] == location
|
|
265
|
+
|
|
266
|
+
# Apply filters using boolean indexing
|
|
267
|
+
current_df_lead_time = self.df_lead_time[mask_lead_time]
|
|
268
|
+
current_df_inv = self.df_inv[mask_inv]
|
|
269
|
+
|
|
270
|
+
return current_df_lead_time, current_df_inv
|
|
30
271
|
|
|
31
|
-
timestamp = utils.set_timestamp()
|
|
32
|
-
actual_date = pd.to_datetime(str(int(float(timestamp[0:8]))), format='%Y%m%d')
|
|
33
272
|
|
|
34
|
-
|
|
273
|
+
def _calculate_suggested_forecast(self, current_df_lead_time, current_df_inv, date, last_suggested_value=None):
|
|
274
|
+
"""
|
|
275
|
+
Calculate suggested forecast for the given date using the SuggestedForecast class.
|
|
276
|
+
|
|
277
|
+
This method now validates that sufficient forecast data exists to cover the
|
|
278
|
+
required coverage period. If forecast data doesn't extend far enough into
|
|
279
|
+
the future, it either raises an error or uses the last calculated value
|
|
280
|
+
based on the complete_suggested parameter.
|
|
281
|
+
|
|
282
|
+
Args:
|
|
283
|
+
current_df_lead_time (pd.DataFrame): Lead time data for current item
|
|
284
|
+
current_df_inv (pd.DataFrame): Inventory data for current item
|
|
285
|
+
date (str): Date for forecast calculation in 'YYYYMMDD' format
|
|
286
|
+
last_suggested_value (float, optional): Last calculated SuggestedForecast value
|
|
287
|
+
to use when complete_suggested is True and forecast data is insufficient
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
pd.DataFrame: DataFrame containing suggested forecast values
|
|
291
|
+
|
|
292
|
+
Raises:
|
|
293
|
+
ValueError: If forecast data doesn't extend far enough to cover the required period
|
|
294
|
+
and complete_suggested is False or no previous value is available
|
|
295
|
+
"""
|
|
296
|
+
# Convert current date to datetime
|
|
297
|
+
current_date = pd.to_datetime(date, format='%Y%m%d')
|
|
298
|
+
|
|
299
|
+
# Get the maximum forecast date available
|
|
300
|
+
max_forecast_date = self.df_fcst['Date'].max()
|
|
301
|
+
|
|
302
|
+
# Get coverage value for this item
|
|
303
|
+
coverage = current_df_lead_time['Coverage'].iloc[0]
|
|
304
|
+
|
|
305
|
+
# Calculate the required forecast end date
|
|
306
|
+
required_forecast_end_date = current_date + timedelta(days=int(coverage))
|
|
307
|
+
|
|
308
|
+
# Check if we have sufficient forecast data
|
|
309
|
+
if max_forecast_date < required_forecast_end_date:
|
|
310
|
+
# Get item identifier for error message
|
|
311
|
+
item = current_df_inv['Item'].iloc[0]
|
|
312
|
+
location_msg = ""
|
|
313
|
+
if self.location and 'Location' in current_df_inv.columns:
|
|
314
|
+
location = current_df_inv['Location'].iloc[0]
|
|
315
|
+
location_msg = f" at location {location}"
|
|
316
|
+
|
|
317
|
+
if self.complete_suggested:
|
|
318
|
+
if last_suggested_value is not None:
|
|
319
|
+
# Use the last calculated SuggestedForecast value
|
|
320
|
+
# Create a DataFrame with the same structure as the normal output
|
|
321
|
+
result_df = current_df_inv[self.metadata].copy()
|
|
322
|
+
result_df['SuggestedForecast'] = last_suggested_value
|
|
323
|
+
|
|
324
|
+
# Add PurchaseFactor and ItemDescription from inventory data
|
|
325
|
+
if 'PurchaseFactor' in current_df_inv.columns:
|
|
326
|
+
result_df['PurchaseFactor'] = current_df_inv['PurchaseFactor'].iloc[0]
|
|
327
|
+
else:
|
|
328
|
+
result_df['PurchaseFactor'] = 1 # Default value if not present
|
|
329
|
+
|
|
330
|
+
if 'ItemDescription' in current_df_inv.columns:
|
|
331
|
+
result_df['ItemDescription'] = current_df_inv['ItemDescription'].iloc[0]
|
|
332
|
+
else:
|
|
333
|
+
result_df['ItemDescription'] = '' # Default value if not present
|
|
35
334
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
335
|
+
return result_df
|
|
336
|
+
else:
|
|
337
|
+
# For the first period when complete_suggested=True but no previous value exists,
|
|
338
|
+
# try to calculate with available data up to max_forecast_date
|
|
339
|
+
# This allows at least the first period to be calculated
|
|
340
|
+
try:
|
|
341
|
+
return SuggestedForecast(
|
|
342
|
+
df_LeadTimes=current_df_lead_time,
|
|
343
|
+
df_Forecast=self.df_fcst,
|
|
344
|
+
df_Prep=self.df_prep,
|
|
345
|
+
df_inv=current_df_inv,
|
|
346
|
+
column_forecast='SuggestedForecast',
|
|
347
|
+
columns_metadata=self.metadata,
|
|
348
|
+
frequency_='M',
|
|
349
|
+
location=self.location,
|
|
350
|
+
actualdate=date,
|
|
351
|
+
default_coverage_=self.default_coverage,
|
|
352
|
+
join_='left'
|
|
353
|
+
).suggested_forecast()
|
|
354
|
+
except Exception as e:
|
|
355
|
+
# If even the basic calculation fails, raise a more informative error
|
|
356
|
+
error_msg = (
|
|
357
|
+
f"Cannot calculate initial forecast for item {item}{location_msg}. "
|
|
358
|
+
f"Forecast data extends only to {max_forecast_date.strftime('%Y-%m-%d')}, "
|
|
359
|
+
f"but coverage of {int(coverage)} days from {current_date.strftime('%Y-%m-%d')} "
|
|
360
|
+
f"requires forecast data until {required_forecast_end_date.strftime('%Y-%m-%d')}. "
|
|
361
|
+
f"Original error: {str(e)}"
|
|
362
|
+
)
|
|
363
|
+
raise ValueError(error_msg)
|
|
364
|
+
else:
|
|
365
|
+
error_msg = (
|
|
366
|
+
f"Insufficient forecast data for item {item}{location_msg}. "
|
|
367
|
+
f"Forecast data extends only to {max_forecast_date.strftime('%Y-%m-%d')}, "
|
|
368
|
+
f"but coverage of {int(coverage)} days from {current_date.strftime('%Y-%m-%d')} "
|
|
369
|
+
f"requires forecast data until {required_forecast_end_date.strftime('%Y-%m-%d')}."
|
|
370
|
+
)
|
|
371
|
+
raise ValueError(error_msg)
|
|
372
|
+
|
|
373
|
+
# If validation passes, proceed with the original calculation
|
|
374
|
+
return SuggestedForecast(
|
|
375
|
+
df_LeadTimes=current_df_lead_time,
|
|
376
|
+
df_Forecast=self.df_fcst,
|
|
377
|
+
df_Prep=self.df_prep,
|
|
378
|
+
df_inv=current_df_inv,
|
|
379
|
+
column_forecast='SuggestedForecast',
|
|
380
|
+
columns_metadata=self.metadata,
|
|
381
|
+
frequency_='M',
|
|
382
|
+
location=self.location,
|
|
383
|
+
actualdate=date,
|
|
384
|
+
default_coverage_=self.default_coverage,
|
|
385
|
+
join_='left'
|
|
386
|
+
).suggested_forecast()
|
|
39
387
|
|
|
40
|
-
for _, row in self.df_lead_time[columns].drop_duplicates().iterrows():
|
|
41
|
-
item = row['Item']
|
|
42
|
-
location = row['Location'] if self.location else None
|
|
43
|
-
cobertura = int(row['ReorderFreq']) if not pd.isnull(row['ReorderFreq']) and row['ReorderFreq'] != 0 else self.default_coverage
|
|
44
|
-
date = self.start_date
|
|
45
|
-
dates = []
|
|
46
388
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
389
|
+
def _calculate_daily_usage(self, suggested_forecast_df, date):
|
|
390
|
+
"""
|
|
391
|
+
Calculate average and maximum daily usage rates.
|
|
392
|
+
|
|
393
|
+
This method computes both average and maximum daily consumption rates
|
|
394
|
+
which are used for inventory planning and safety stock calculations.
|
|
395
|
+
|
|
396
|
+
Args:
|
|
397
|
+
suggested_forecast_df (pd.DataFrame): DataFrame with forecast data
|
|
398
|
+
date (str): Current calculation date in 'YYYYMMDD' format
|
|
399
|
+
|
|
400
|
+
Returns:
|
|
401
|
+
tuple: (df_avg, df_max)
|
|
402
|
+
- df_avg: DataFrame with average daily usage
|
|
403
|
+
- df_max: DataFrame with maximum daily usage
|
|
404
|
+
"""
|
|
405
|
+
df_avg = DailyUsageFuture(
|
|
406
|
+
location=self.location,
|
|
407
|
+
column_forecast='SuggestedForecast',
|
|
408
|
+
date=date,
|
|
409
|
+
df_fcst=self.df_fcst
|
|
410
|
+
).daily_usage(suggested_forecast_df, 'AvgDailyUsage').fillna(0)
|
|
50
411
|
|
|
51
|
-
|
|
412
|
+
df_max = DailyUsageFuture(
|
|
413
|
+
location=self.location,
|
|
414
|
+
column_forecast='SuggestedForecast',
|
|
415
|
+
date=date,
|
|
416
|
+
df_fcst=self.df_fcst
|
|
417
|
+
).daily_usage(df_avg, 'MaxDailyUsage').fillna(0)
|
|
418
|
+
|
|
419
|
+
return df_avg, df_max
|
|
52
420
|
|
|
53
|
-
return item_dates
|
|
54
421
|
|
|
422
|
+
def _calculate_security_stock_data(self, df_max, current_df_lead_time, period_index=None, dates=None):
|
|
423
|
+
"""
|
|
424
|
+
Calculate security stock related data and prepare for reorder calculations.
|
|
425
|
+
|
|
426
|
+
This method:
|
|
427
|
+
1. Merges daily usage with lead time data
|
|
428
|
+
2. Determines effective reorder frequency and coverage
|
|
429
|
+
3. Calculates SuggestedForecastPeriod based on coverage ratio
|
|
430
|
+
4. For period 0, uses days to next period instead of reorder frequency
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
df_max (pd.DataFrame): DataFrame with maximum daily usage
|
|
434
|
+
current_df_lead_time (pd.DataFrame): Lead time data for current item
|
|
435
|
+
period_index (int, optional): Current period index (0, 1, 2, ...)
|
|
436
|
+
dates (list, optional): List of dates for this item
|
|
437
|
+
|
|
438
|
+
Returns:
|
|
439
|
+
pd.DataFrame: DataFrame with merged data and calculated fields:
|
|
440
|
+
- All fields from df_max
|
|
441
|
+
- AvgLeadTime, MaxLeadTime from lead time data
|
|
442
|
+
- SuggestedForecastPeriod: Adjusted forecast for the period
|
|
443
|
+
"""
|
|
444
|
+
merge_columns = ['Item', 'Location', 'AvgLeadTime', 'MaxLeadTime'] if self.location else ['Item', 'AvgLeadTime', 'MaxLeadTime']
|
|
445
|
+
df_sstock = pd.merge(df_max, current_df_lead_time[merge_columns], on=self.metadata, how='inner').drop_duplicates()
|
|
446
|
+
|
|
447
|
+
# Get ReorderFreq and Coverage
|
|
448
|
+
reorder_freq = current_df_lead_time['ReorderFreq'].values[0]
|
|
449
|
+
if pd.isnull(reorder_freq) or reorder_freq == 0:
|
|
450
|
+
reorder_freq = self.default_coverage
|
|
451
|
+
|
|
452
|
+
coverage = self.default_coverage
|
|
453
|
+
if 'Coverage' in current_df_lead_time.columns:
|
|
454
|
+
coverage_val = current_df_lead_time['Coverage'].values[0]
|
|
455
|
+
if not pd.isnull(coverage_val):
|
|
456
|
+
coverage = coverage_val
|
|
457
|
+
else:
|
|
458
|
+
coverage = reorder_freq + df_sstock['AvgLeadTime'].values[0]
|
|
459
|
+
else:
|
|
460
|
+
coverage = reorder_freq + df_sstock['AvgLeadTime'].values[0]
|
|
461
|
+
|
|
462
|
+
# Calculate SuggestedForecastPeriod
|
|
463
|
+
if period_index == 0 and dates is not None and len(dates) > 1:
|
|
464
|
+
# For period 0, use days to next period instead of reorder frequency
|
|
465
|
+
# This allows uniform consumption calculation in all future periods
|
|
466
|
+
current_date = pd.to_datetime(dates[0], format='%Y%m%d')
|
|
467
|
+
next_date = pd.to_datetime(dates[1], format='%Y%m%d')
|
|
468
|
+
days_to_next_period = (next_date - current_date).days
|
|
469
|
+
|
|
470
|
+
# Formula: SuggestedForecast × (days_to_next_period / coverage)
|
|
471
|
+
# This represents the forecasted consumption from period 0 to period 1
|
|
472
|
+
suggested_forecast_period = np.ceil(df_sstock['SuggestedForecast'] * (days_to_next_period / coverage))
|
|
473
|
+
else:
|
|
474
|
+
# For other periods, use the original calculation with reorder frequency
|
|
475
|
+
# Formula: SuggestedForecast × (reorder_freq / coverage)
|
|
476
|
+
suggested_forecast_period = np.ceil(df_sstock['SuggestedForecast'] * (reorder_freq / coverage))
|
|
477
|
+
|
|
478
|
+
df_sstock['SuggestedForecastPeriod'] = df_sstock.apply(
|
|
479
|
+
lambda row: self._format_value(suggested_forecast_period.iloc[row.name], 'SuggestedForecastPeriod'),
|
|
480
|
+
axis=1
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
return df_sstock
|
|
55
484
|
|
|
56
|
-
def reorder(self):
|
|
57
485
|
|
|
58
|
-
|
|
486
|
+
def _calculate_security_stock(self, df):
|
|
487
|
+
"""
|
|
488
|
+
Calculate security stock using configured method.
|
|
489
|
+
|
|
490
|
+
Two methods are available:
|
|
491
|
+
1. Statistical method (default):
|
|
492
|
+
SecurityStock = (MaxDailyUsage × MaxLeadTime) - (AvgDailyUsage × AvgLeadTime)
|
|
493
|
+
This represents the difference between worst-case and average scenarios.
|
|
494
|
+
|
|
495
|
+
2. Reference days method (if security_stock_ref=True):
|
|
496
|
+
SecurityStock = SecurityStockDaysRef × AvgDailyUsage
|
|
497
|
+
Uses a predefined number of days of coverage.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
df (pd.DataFrame): DataFrame containing required calculation fields
|
|
501
|
+
|
|
502
|
+
Returns:
|
|
503
|
+
pd.Series: Calculated security stock values
|
|
504
|
+
"""
|
|
505
|
+
if self.security_stock_ref:
|
|
506
|
+
security_stock = df['SecurityStockDaysRef'] * df['AvgDailyUsage']
|
|
507
|
+
else:
|
|
508
|
+
security_stock = (df['MaxDailyUsage'] * df['MaxLeadTime']) - (df['AvgDailyUsage'] * df['AvgLeadTime'])
|
|
509
|
+
|
|
510
|
+
# Apply formatting
|
|
511
|
+
return security_stock.apply(lambda x: self._format_value(x, 'SecurityStock'))
|
|
59
512
|
|
|
60
|
-
item_dates = self.future_date()
|
|
61
513
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
514
|
+
def _calculate_inventory_days(self, df):
|
|
515
|
+
"""
|
|
516
|
+
Calculate inventory days using configured method.
|
|
517
|
+
|
|
518
|
+
FutureStockoutDays = (FutureInventoryTransitArrival - SecurityStock) / AvgDailyUsage
|
|
519
|
+
|
|
520
|
+
Args:
|
|
521
|
+
df (pd.DataFrame): DataFrame containing required calculation fields
|
|
522
|
+
|
|
523
|
+
Returns:
|
|
524
|
+
pd.Series: Calculated future stockout days
|
|
525
|
+
"""
|
|
526
|
+
# Calculate future stockout days with safe division
|
|
527
|
+
# Avoid division by zero by checking AvgDailyUsage
|
|
528
|
+
future_stockout_days = np.where(
|
|
529
|
+
df['AvgDailyUsage'] > 0,
|
|
530
|
+
(df['FutureInventoryTransitArrival'] - df['SecurityStock']) / df['AvgDailyUsage'],
|
|
531
|
+
0 # If no daily usage, return 0 days
|
|
532
|
+
)
|
|
65
533
|
|
|
66
|
-
|
|
67
|
-
|
|
534
|
+
# Apply formatting
|
|
535
|
+
return pd.Series(future_stockout_days).apply(lambda x: self._format_value(x, 'FutureStockoutDays'))
|
|
68
536
|
|
|
69
|
-
SuggestedForecast_cov = {}
|
|
70
|
-
SuggestedForecast_rf = {}
|
|
71
|
-
df_forecast = {}
|
|
72
|
-
df_avg_gen = {}
|
|
73
|
-
df_max_gen = {}
|
|
74
|
-
df_sstock = {}
|
|
75
|
-
df_inventory = {}
|
|
76
|
-
df = {}
|
|
77
537
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
538
|
+
def _sum_transit_arrivals(self, transit_arrivals_str):
|
|
539
|
+
"""
|
|
540
|
+
Calculate the total quantity from TransitArrival string.
|
|
541
|
+
|
|
542
|
+
Args:
|
|
543
|
+
transit_arrivals_str (str): String representation of transit arrivals list
|
|
544
|
+
e.g., '[{"quantity": 100.0, "arrival_date": "2024-01-15"}]'
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
float: Total quantity of all arrivals in the period
|
|
548
|
+
"""
|
|
549
|
+
|
|
550
|
+
if transit_arrivals_str == '[]' or not transit_arrivals_str:
|
|
551
|
+
return 0.0
|
|
552
|
+
|
|
553
|
+
try:
|
|
554
|
+
arrivals = ast.literal_eval(transit_arrivals_str)
|
|
555
|
+
return sum(arrival.get('quantity', 0) for arrival in arrivals)
|
|
556
|
+
except:
|
|
557
|
+
return 0.0
|
|
81
558
|
|
|
82
|
-
|
|
83
|
-
|
|
559
|
+
|
|
560
|
+
def _prepare_transit_schedule(self, key, transit_amount, dates):
|
|
561
|
+
"""
|
|
562
|
+
Prepare transit schedule based on df_transit or default logic.
|
|
563
|
+
|
|
564
|
+
Args:
|
|
565
|
+
key (tuple or str): Item identifier (item) or (item, location)
|
|
566
|
+
transit_amount (float): Total transit amount from df_inv
|
|
567
|
+
dates (list): List of dates for this item
|
|
568
|
+
|
|
569
|
+
Returns:
|
|
570
|
+
list: List of transit orders with 'quantity' and 'arrival_date'
|
|
571
|
+
"""
|
|
572
|
+
if transit_amount <= 0:
|
|
573
|
+
return []
|
|
574
|
+
|
|
575
|
+
transit_schedule = []
|
|
576
|
+
|
|
577
|
+
if self.df_transit is None:
|
|
578
|
+
# Default logic: complete transit arrives in period 1
|
|
579
|
+
if len(dates) > 1:
|
|
580
|
+
arrival_date = pd.to_datetime(dates[1], format='%Y%m%d')
|
|
581
|
+
transit_schedule.append({
|
|
582
|
+
'quantity': transit_amount,
|
|
583
|
+
'arrival_date': arrival_date
|
|
584
|
+
})
|
|
585
|
+
else:
|
|
586
|
+
# Use provided transit schedule
|
|
84
587
|
if self.location:
|
|
85
588
|
item, location = key
|
|
589
|
+
mask = (self.df_transit['Item'] == item) & (self.df_transit['Location'] == location)
|
|
86
590
|
else:
|
|
87
|
-
|
|
88
|
-
|
|
591
|
+
mask = self.df_transit['Item'] == key
|
|
592
|
+
|
|
593
|
+
transit_data = self.df_transit[mask].copy()
|
|
594
|
+
|
|
595
|
+
if not transit_data.empty:
|
|
596
|
+
# Validate total matches
|
|
597
|
+
total_scheduled = transit_data['Transit'].sum()
|
|
598
|
+
if abs(total_scheduled - transit_amount) > 0.01: # Allow small floating point differences
|
|
599
|
+
raise ValueError(f"Transit schedule total ({total_scheduled}) does not match inventory transit ({transit_amount}) for {key}")
|
|
600
|
+
|
|
601
|
+
# Create transit orders
|
|
602
|
+
for _, row in transit_data.iterrows():
|
|
603
|
+
arrival_date = pd.to_datetime(row['ArrivalDate'], format='%Y-%m-%d')
|
|
604
|
+
transit_schedule.append({
|
|
605
|
+
'quantity': float(row['Transit']),
|
|
606
|
+
'arrival_date': arrival_date
|
|
607
|
+
})
|
|
608
|
+
else:
|
|
609
|
+
# If no transit data provided for this item, use default logic
|
|
610
|
+
if len(dates) > 1:
|
|
611
|
+
arrival_date = pd.to_datetime(dates[1], format='%Y%m%d')
|
|
612
|
+
transit_schedule.append({
|
|
613
|
+
'quantity': transit_amount,
|
|
614
|
+
'arrival_date': arrival_date
|
|
615
|
+
})
|
|
616
|
+
|
|
617
|
+
return transit_schedule
|
|
89
618
|
|
|
90
|
-
for i, date in enumerate(dates):
|
|
91
|
-
if self.location:
|
|
92
|
-
current_df_lead_time_cov = self.df_lead_time[(self.df_lead_time['Item'] == item) &
|
|
93
|
-
(self.df_lead_time['Location'] == location)]
|
|
94
619
|
|
|
95
|
-
|
|
96
|
-
|
|
620
|
+
def _process_current_period(self, current_df_inv, df_sstock, key, date, transit_orders, dates):
|
|
621
|
+
"""
|
|
622
|
+
Process inventory for the current period (i=0).
|
|
623
|
+
|
|
624
|
+
This optimized version uses vectorized operations where possible and
|
|
625
|
+
minimizes redundant calculations.
|
|
626
|
+
|
|
627
|
+
Args:
|
|
628
|
+
current_df_inv (pd.DataFrame): Current inventory data
|
|
629
|
+
df_sstock (pd.DataFrame): Security stock calculation data
|
|
630
|
+
key (tuple or str): Item identifier (item) or (item, location)
|
|
631
|
+
date (str): Current date in 'YYYYMMDD' format
|
|
632
|
+
transit_orders (dict): Dictionary tracking in-transit orders
|
|
633
|
+
dates (list): List of all dates for this item
|
|
634
|
+
|
|
635
|
+
Returns:
|
|
636
|
+
pd.DataFrame: Processed inventory data for the current period
|
|
637
|
+
"""
|
|
638
|
+
inventory_columns = ['Item', 'Location', 'Inventory', 'Transit', 'PurchaseFactor'] if self.location else ['Item', 'Inventory', 'Transit', 'PurchaseFactor']
|
|
639
|
+
df_inventory = current_df_inv[inventory_columns].copy()
|
|
640
|
+
|
|
641
|
+
# Vectorized initialization of inventory values with formatting
|
|
642
|
+
df_inventory['FutureInventory'] = df_inventory['Inventory'].apply(
|
|
643
|
+
lambda x: self._format_value(x, 'FutureInventory')
|
|
644
|
+
)
|
|
645
|
+
df_inventory['FutureTransit'] = df_inventory['Transit'].apply(
|
|
646
|
+
lambda x: self._format_value(x, 'FutureTransit')
|
|
647
|
+
)
|
|
648
|
+
df_inventory['FutureInventoryTransit'] = df_inventory.apply(
|
|
649
|
+
lambda row: self._format_value(row['Inventory'] + row['Transit'], 'FutureInventoryTransit'),
|
|
650
|
+
axis=1
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
# Initialize transit orders for this item
|
|
654
|
+
if key not in transit_orders:
|
|
655
|
+
transit_orders[key] = []
|
|
656
|
+
|
|
657
|
+
# Handle initial transit
|
|
658
|
+
transit_qty = float(df_inventory['Transit'].iloc[0])
|
|
659
|
+
|
|
660
|
+
# Prepare transit schedule
|
|
661
|
+
transit_schedule = self._prepare_transit_schedule(key, transit_qty, dates)
|
|
662
|
+
|
|
663
|
+
# Add scheduled transits to transit_orders
|
|
664
|
+
transit_orders[key].extend(transit_schedule)
|
|
665
|
+
|
|
666
|
+
# For period 0, TransitArrival should always be empty list
|
|
667
|
+
df_inventory['TransitArrival'] = '[]'
|
|
668
|
+
|
|
669
|
+
# Select relevant columns
|
|
670
|
+
df_inventory = df_inventory[self.metadata + ['FutureInventoryTransit', 'FutureInventory', 'FutureTransit', 'TransitArrival']]
|
|
671
|
+
|
|
672
|
+
# Merge with stock data
|
|
673
|
+
df = pd.merge(df_inventory, df_sstock, on=self.metadata, how='inner')
|
|
674
|
+
|
|
675
|
+
# Vectorized calculations for all rows at once
|
|
676
|
+
df['SuggestedForecastPeriod'] = df_sstock['SuggestedForecastPeriod']
|
|
677
|
+
df['SecurityStock'] = self._calculate_security_stock(df)
|
|
678
|
+
|
|
679
|
+
# Apply formatting to calculated fields
|
|
680
|
+
df['SuggestedForecast'] = df['SuggestedForecast'].apply(
|
|
681
|
+
lambda x: self._format_value(x, 'SuggestedForecast')
|
|
682
|
+
)
|
|
683
|
+
df['ReorderPoint'] = df.apply(
|
|
684
|
+
lambda row: self._format_value(max(0, row['SuggestedForecast'] + row['SecurityStock']), 'ReorderPoint'),
|
|
685
|
+
axis=1
|
|
686
|
+
)
|
|
687
|
+
df['ReorderQtyBase'] = df.apply(
|
|
688
|
+
lambda row: self._format_value(max(0, row['ReorderPoint'] - row['FutureInventoryTransit']), 'ReorderQtyBase'),
|
|
689
|
+
axis=1
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
# First period has no reorder - vectorized assignment
|
|
693
|
+
df['ReorderQty'] = 0
|
|
694
|
+
df['ReorderQtyDays'] = 0
|
|
695
|
+
df['ArrivalDate'] = '' # No order in period 0
|
|
696
|
+
|
|
697
|
+
# Note: FutureInventoryTransitArrival and FutureStockoutDays are calculated later
|
|
698
|
+
# in _process_item_optimized after all periods are processed
|
|
699
|
+
|
|
700
|
+
return df
|
|
97
701
|
|
|
98
|
-
current_df_inv = self.df_inv[(self.df_inv['Item'] == item) &
|
|
99
|
-
(self.df_inv['Location'] == location)]
|
|
100
702
|
|
|
703
|
+
def _process_transit_orders(self, transit_orders, key, current_date, previous_date):
|
|
704
|
+
"""
|
|
705
|
+
Process transit orders and calculate arrivals for the current period.
|
|
706
|
+
|
|
707
|
+
This optimized method uses vectorization for better performance with large
|
|
708
|
+
numbers of transit orders. It manages the lifecycle of transit orders:
|
|
709
|
+
1. Identifies orders arriving in the current period
|
|
710
|
+
2. Moves arrived quantities from transit to stock
|
|
711
|
+
3. Updates remaining transit orders
|
|
712
|
+
4. Maintains arrival history for reporting
|
|
713
|
+
|
|
714
|
+
Args:
|
|
715
|
+
transit_orders (dict): Dictionary of active transit orders by item/location
|
|
716
|
+
key (tuple or str): Item identifier (item) or (item, location)
|
|
717
|
+
current_date (pd.Timestamp): Current period date
|
|
718
|
+
previous_date (pd.Timestamp): Previous period date
|
|
719
|
+
|
|
720
|
+
Returns:
|
|
721
|
+
tuple: (stock_from_arrivals, new_transit, transit_arrivals)
|
|
722
|
+
- stock_from_arrivals: Total quantity arriving in this period
|
|
723
|
+
- new_transit: Total quantity still in transit
|
|
724
|
+
- transit_arrivals: List of arrival records for this period
|
|
725
|
+
"""
|
|
726
|
+
# Get orders for this key, return early if none
|
|
727
|
+
orders = transit_orders.get(key, [])
|
|
728
|
+
if not orders:
|
|
729
|
+
return 0, 0, []
|
|
730
|
+
|
|
731
|
+
# For small numbers of orders, use loops implementation
|
|
732
|
+
# as it has less overhead
|
|
733
|
+
if len(orders) < 10:
|
|
734
|
+
new_transit = 0
|
|
735
|
+
remaining_orders = []
|
|
736
|
+
transit_arrivals = []
|
|
737
|
+
stock_from_arrivals = 0
|
|
738
|
+
|
|
739
|
+
for order in orders:
|
|
740
|
+
if order['arrival_date'] > previous_date and order['arrival_date'] <= current_date:
|
|
741
|
+
# Order arrives in this period
|
|
742
|
+
stock_from_arrivals += order['quantity']
|
|
743
|
+
transit_arrivals.append({
|
|
744
|
+
'quantity': float(order['quantity']),
|
|
745
|
+
'arrival_date': order['arrival_date'].strftime('%Y-%m-%d')
|
|
746
|
+
})
|
|
101
747
|
else:
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
748
|
+
# Order still in transit
|
|
749
|
+
new_transit += order['quantity']
|
|
750
|
+
remaining_orders.append(order)
|
|
751
|
+
|
|
752
|
+
transit_orders[key] = remaining_orders
|
|
753
|
+
return stock_from_arrivals, new_transit, transit_arrivals
|
|
754
|
+
|
|
755
|
+
# For larger numbers of orders, use vectorized approach
|
|
756
|
+
# Extract data into numpy arrays for faster processing
|
|
757
|
+
quantities = np.array([order['quantity'] for order in orders], dtype=np.float64)
|
|
758
|
+
arrival_dates = np.array([order['arrival_date'] for order in orders])
|
|
759
|
+
|
|
760
|
+
# Vectorized date comparison
|
|
761
|
+
mask_arrived = (arrival_dates > previous_date) & (arrival_dates <= current_date)
|
|
762
|
+
|
|
763
|
+
# Calculate totals using numpy operations
|
|
764
|
+
stock_from_arrivals = float(quantities[mask_arrived].sum()) if mask_arrived.any() else 0
|
|
765
|
+
new_transit = float(quantities[~mask_arrived].sum()) if (~mask_arrived).any() else 0
|
|
766
|
+
|
|
767
|
+
# Create transit arrivals list
|
|
768
|
+
transit_arrivals = []
|
|
769
|
+
if mask_arrived.any():
|
|
770
|
+
arrived_indices = np.where(mask_arrived)[0]
|
|
771
|
+
transit_arrivals = [
|
|
772
|
+
{
|
|
773
|
+
'quantity': float(quantities[i]),
|
|
774
|
+
'arrival_date': arrival_dates[i].strftime('%Y-%m-%d')
|
|
775
|
+
}
|
|
776
|
+
for i in arrived_indices
|
|
777
|
+
]
|
|
778
|
+
|
|
779
|
+
# Update transit orders with remaining orders
|
|
780
|
+
if (~mask_arrived).any():
|
|
781
|
+
remaining_indices = np.where(~mask_arrived)[0]
|
|
782
|
+
transit_orders[key] = [orders[i] for i in remaining_indices]
|
|
783
|
+
else:
|
|
784
|
+
transit_orders[key] = []
|
|
785
|
+
|
|
786
|
+
return stock_from_arrivals, new_transit, transit_arrivals
|
|
108
787
|
|
|
109
|
-
# SuggestedForecast_Coverage
|
|
110
|
-
SuggestedForecast_cov[i] = SuggestedForecast(df_LeadTimes=current_df_lead_time_cov,
|
|
111
|
-
df_Forecast=self.df_fcst,
|
|
112
|
-
df_Prep=self.df_prep,
|
|
113
|
-
df_inv=current_df_inv,
|
|
114
|
-
column_forecast='SuggestedForecast',
|
|
115
|
-
columns_metadata=metadata,
|
|
116
|
-
frequency_='M',
|
|
117
|
-
location=self.location,
|
|
118
|
-
actualdate=date,
|
|
119
|
-
default_coverage_=self.default_coverage,
|
|
120
|
-
join_='left').suggested_forecast()
|
|
121
|
-
|
|
122
|
-
SuggestedForecast_cov[i].rename(columns={'SuggestedForecast':'Suggested_Coverage'},inplace=True)
|
|
123
|
-
|
|
124
|
-
# SuggestedForecast_ReorderFreq
|
|
125
|
-
SuggestedForecast_rf[i] = SuggestedForecast(df_LeadTimes=current_df_lead_time_rf,
|
|
126
|
-
df_Forecast=self.df_fcst,
|
|
127
|
-
df_Prep=self.df_prep,
|
|
128
|
-
df_inv=current_df_inv,
|
|
129
|
-
column_forecast='SuggestedForecast',
|
|
130
|
-
columns_metadata=metadata,
|
|
131
|
-
frequency_='M',
|
|
132
|
-
location=self.location,
|
|
133
|
-
actualdate=date,
|
|
134
|
-
default_coverage_=self.default_coverage,
|
|
135
|
-
join_='left').suggested_forecast()
|
|
136
|
-
|
|
137
|
-
SuggestedForecast_rf[i].rename(columns={'SuggestedForecast':'Suggested_ReorderFreq'},inplace=True)
|
|
138
|
-
SuggestedForecast_rf[i] = SuggestedForecast_rf[i][metadata + ['Suggested_ReorderFreq']]
|
|
139
788
|
|
|
140
|
-
|
|
141
|
-
|
|
789
|
+
def _process_future_period(self, current_df_inv, df_sstock, df_previous, key, date, dates, i, transit_orders):
|
|
790
|
+
"""
|
|
791
|
+
Process inventory for future periods (i>0).
|
|
792
|
+
|
|
793
|
+
This method:
|
|
794
|
+
1. Calculates consumption using SuggestedForecastPeriod from previous period
|
|
795
|
+
2. Updates stock levels considering consumption and arrivals
|
|
796
|
+
3. Determines if reorder is needed
|
|
797
|
+
4. Calculates reorder quantity if needed
|
|
798
|
+
5. Adds new orders to transit tracking
|
|
142
799
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
800
|
+
Args:
|
|
801
|
+
current_df_inv (pd.DataFrame): Current inventory data
|
|
802
|
+
df_sstock (pd.DataFrame): Security stock calculation data
|
|
803
|
+
df_previous (pd.DataFrame): Previous period's results
|
|
804
|
+
key (tuple or str): Item identifier (item) or (item, location)
|
|
805
|
+
date (str): Current date in 'YYYYMMDD' format
|
|
806
|
+
dates (list): List of all dates for this item
|
|
807
|
+
i (int): Current period index
|
|
808
|
+
transit_orders (dict): Dictionary tracking in-transit orders
|
|
809
|
+
|
|
810
|
+
Returns:
|
|
811
|
+
pd.DataFrame: Processed inventory data for the period including:
|
|
812
|
+
- Updated inventory levels
|
|
813
|
+
- Reorder recommendations
|
|
814
|
+
- Transit arrival information
|
|
815
|
+
"""
|
|
816
|
+
inventory_columns = ['Item', 'Location', 'PurchaseFactor'] if self.location else ['Item', 'PurchaseFactor']
|
|
817
|
+
df_inventory = current_df_inv[inventory_columns].copy()
|
|
818
|
+
df = pd.merge(df_inventory, df_sstock, on=inventory_columns, how='inner')
|
|
819
|
+
df['SuggestedForecastPeriod'] = df_sstock['SuggestedForecastPeriod']
|
|
820
|
+
|
|
821
|
+
# Calculate consumption using SuggestedForecastPeriod from previous period
|
|
822
|
+
consumption = df_previous['SuggestedForecastPeriod'].values[0]
|
|
823
|
+
|
|
824
|
+
previous_stock = df_previous['FutureInventory'].values[0] - consumption
|
|
825
|
+
|
|
826
|
+
# Process transit orders
|
|
827
|
+
current_date = pd.to_datetime(date, format='%Y%m%d')
|
|
828
|
+
previous_date = pd.to_datetime(dates[i-1], format='%Y%m%d')
|
|
829
|
+
|
|
830
|
+
stock_from_arrivals, new_transit, transit_arrivals = self._process_transit_orders(
|
|
831
|
+
transit_orders, key, current_date, previous_date
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
# Update inventory values with formatting
|
|
835
|
+
future_stock = max(0, previous_stock + stock_from_arrivals)
|
|
836
|
+
df['FutureInventory'] = self._format_value(future_stock, 'FutureInventory')
|
|
837
|
+
df['FutureTransit'] = self._format_value(new_transit, 'FutureTransit')
|
|
838
|
+
df['FutureInventoryTransit'] = self._format_value(
|
|
839
|
+
future_stock + new_transit,
|
|
840
|
+
'FutureInventoryTransit'
|
|
841
|
+
)
|
|
842
|
+
df['TransitArrival'] = str(transit_arrivals) if transit_arrivals else '[]'
|
|
843
|
+
|
|
844
|
+
# Calculate security stock and reorder values
|
|
845
|
+
df['SecurityStock'] = self._calculate_security_stock(df)
|
|
846
|
+
|
|
847
|
+
# Apply formatting to calculated fields
|
|
848
|
+
df['SuggestedForecast'] = df['SuggestedForecast'].apply(
|
|
849
|
+
lambda x: self._format_value(x, 'SuggestedForecast')
|
|
850
|
+
)
|
|
851
|
+
df['ReorderPoint'] = df.apply(
|
|
852
|
+
lambda row: self._format_value(max(0, row['SuggestedForecast'] + row['SecurityStock']), 'ReorderPoint'),
|
|
853
|
+
axis=1
|
|
854
|
+
)
|
|
855
|
+
df['ReorderQtyBase'] = df.apply(
|
|
856
|
+
lambda row: self._format_value(max(0, row['ReorderPoint'] - row['FutureInventoryTransit']), 'ReorderQtyBase'),
|
|
857
|
+
axis=1
|
|
858
|
+
)
|
|
859
|
+
|
|
860
|
+
# Calculate ReorderQty only if ReorderQtyBase > 0
|
|
861
|
+
reorder_qty = np.where(
|
|
862
|
+
df['ReorderQtyBase'] > 0,
|
|
863
|
+
((df['ReorderQtyBase'] / df['PurchaseFactor']).apply(np.ceil)) * df['PurchaseFactor'],
|
|
864
|
+
0
|
|
865
|
+
)
|
|
866
|
+
df['ReorderQty'] = df.apply(
|
|
867
|
+
lambda row: self._format_value(reorder_qty[row.name], 'ReorderQty'),
|
|
868
|
+
axis=1
|
|
869
|
+
)
|
|
870
|
+
|
|
871
|
+
# Calculate ReorderQtyDays, avoiding division by zero
|
|
872
|
+
reorder_qty_days = np.where(
|
|
873
|
+
(df['ReorderQty'] > 0) & (df['AvgDailyUsage'] > 0),
|
|
874
|
+
df['ReorderQty'] / df['AvgDailyUsage'],
|
|
875
|
+
0
|
|
876
|
+
)
|
|
877
|
+
df['ReorderQtyDays'] = df.apply(
|
|
878
|
+
lambda row: self._format_value(reorder_qty_days[row.name], 'ReorderQtyDays'),
|
|
879
|
+
axis=1
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
# Add new order to transit if needed
|
|
883
|
+
if df['ReorderQty'].values[0] > 0:
|
|
884
|
+
avg_lead_time = df['AvgLeadTime'].values[0]
|
|
885
|
+
arrival_date = current_date + timedelta(days=int(avg_lead_time))
|
|
886
|
+
# Store the raw value for transit calculations
|
|
887
|
+
transit_orders[key].append({
|
|
888
|
+
'quantity': float(df['ReorderQty'].values[0]),
|
|
889
|
+
'arrival_date': arrival_date
|
|
890
|
+
})
|
|
891
|
+
# Store arrival date for this period's order
|
|
892
|
+
df['ArrivalDate'] = arrival_date.strftime('%Y-%m-%d')
|
|
893
|
+
else:
|
|
894
|
+
# No order in this period
|
|
895
|
+
df['ArrivalDate'] = ''
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
# Note: FutureInventoryTransitArrival and FutureStockoutDays are calculated later
|
|
899
|
+
# in _process_item_optimized after all periods are processed
|
|
900
|
+
|
|
901
|
+
return df
|
|
174
902
|
|
|
175
|
-
df[i]['ReorderPoint'] = (df[i]['Suggested_Coverage'] + df[i]['SecurityStock']).clip(lower=0)
|
|
176
|
-
df[i]['ReorderQtyBase'] = (df[i]['ReorderPoint'] - df[i]['InventoryTransit']).clip(lower=1)
|
|
177
|
-
df[i]['ReorderQty'] = ((df[i]['ReorderQtyBase'] / df[i]['PurchaseFactor']).apply(np.ceil)) * df[i]['PurchaseFactor']
|
|
178
|
-
df[i]['ReorderQtyDays'] = (df[i]['ReorderQty'] / df[i]['AvgDailyUsage']).astype(int)
|
|
179
|
-
|
|
180
|
-
# Future Dates
|
|
181
|
-
else:
|
|
182
|
-
inventory_columns = ['Item', 'Location', 'PurchaseFactor'] if self.location else ['Item', 'PurchaseFactor']
|
|
183
|
-
df_inventory[i] = current_df_inv[inventory_columns]
|
|
184
|
-
df[i] = pd.merge(df_inventory[i], df_sstock[i], on=inventory_columns, how='inner')
|
|
185
903
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
904
|
+
def _prepare_final_dataframe(self, data_frame):
|
|
905
|
+
"""
|
|
906
|
+
Prepare the final output dataframe with proper formatting and column selection.
|
|
907
|
+
|
|
908
|
+
This method:
|
|
909
|
+
1. Merges with lead time data to add reorder parameters
|
|
910
|
+
2. Formats dates to YYYY-MM-DD format
|
|
911
|
+
3. Renames columns for clarity
|
|
912
|
+
4. Rounds numeric values to 2 decimal places
|
|
913
|
+
5. Selects and orders final columns
|
|
914
|
+
|
|
915
|
+
Args:
|
|
916
|
+
data_frame (pd.DataFrame): Raw calculation results
|
|
917
|
+
|
|
918
|
+
Returns:
|
|
919
|
+
pd.DataFrame: Formatted output with columns:
|
|
920
|
+
- PurchaseDate, Item, ItemDescription, (Location)
|
|
921
|
+
- Forecast metrics: SuggestedForecast, SuggestedForecastPeriod
|
|
922
|
+
- Inventory levels: FutureInventoryTransit (total), FutureInventory (stock), FutureTransit (transit)
|
|
923
|
+
- FutureInventoryTransitArrival: FutureInventory + arrivals in the period
|
|
924
|
+
- FutureStockoutDays: Days of inventory coverage
|
|
925
|
+
- Transit information: TransitArrival
|
|
926
|
+
- Reorder metrics: ReorderQtyBase, ReorderQty, ReorderQtyDays
|
|
927
|
+
- Order information: ArrivalDate (arrival date of current period's order)
|
|
928
|
+
- Planning parameters: PurchaseFactor, ReorderPoint, SecurityStock
|
|
929
|
+
- Usage rates: AvgDailyUsage, MaxDailyUsage
|
|
930
|
+
- Lead times: AvgLeadTime, MaxLeadTime
|
|
931
|
+
- Coverage parameters: ReorderFreq, Coverage
|
|
932
|
+
"""
|
|
933
|
+
leadtimes_columns = ['Item', 'Location', 'ReorderFreq', 'Coverage'] if self.location else ['Item', 'ReorderFreq', 'Coverage']
|
|
934
|
+
leadtimes = self.df_lead_time[leadtimes_columns]
|
|
935
|
+
df_final = pd.merge(data_frame, leadtimes, on=self.metadata, how='left').fillna(0)
|
|
936
|
+
|
|
937
|
+
# Format date and rename to PurchaseDate
|
|
938
|
+
df_final['PurchaseDate'] = pd.to_datetime(df_final['Date'], format='%Y%m%d').dt.strftime('%Y-%m-%d')
|
|
939
|
+
df_final = df_final.drop('Date', axis=1)
|
|
940
|
+
|
|
941
|
+
# Ensure ArrivalDate is present (in case some records don't have it)
|
|
942
|
+
if 'ArrivalDate' not in df_final.columns:
|
|
943
|
+
df_final['ArrivalDate'] = ''
|
|
944
|
+
|
|
945
|
+
# Apply formatting to fields that are ALWAYS integers
|
|
946
|
+
always_integer_fields = ['PurchaseFactor', 'AvgLeadTime', 'MaxLeadTime', 'ReorderQtyDays', 'ReorderFreq', 'Coverage']
|
|
947
|
+
for field in always_integer_fields:
|
|
948
|
+
if field in df_final.columns:
|
|
949
|
+
df_final[field] = df_final[field].apply(lambda x: self._format_value(x, field))
|
|
950
|
+
|
|
951
|
+
# Apply formatting to fields that are ALWAYS decimals
|
|
952
|
+
always_decimal_fields = ['AvgDailyUsage', 'MaxDailyUsage']
|
|
953
|
+
for field in always_decimal_fields:
|
|
954
|
+
if field in df_final.columns:
|
|
955
|
+
df_final[field] = df_final[field].apply(lambda x: self._format_value(x, field))
|
|
956
|
+
|
|
957
|
+
# Select final columns
|
|
958
|
+
if self.location:
|
|
959
|
+
final_cols = [
|
|
960
|
+
'PurchaseDate', 'Item', 'ItemDescription', 'Location', 'SuggestedForecast',
|
|
961
|
+
'SuggestedForecastPeriod', 'FutureInventoryTransit', 'FutureInventory',
|
|
962
|
+
'FutureTransit', 'FutureInventoryTransitArrival', 'FutureStockoutDays', 'TransitArrival',
|
|
963
|
+
'ReorderQtyBase', 'ReorderQty', 'ReorderQtyDays', 'ArrivalDate', 'PurchaseFactor',
|
|
964
|
+
'ReorderPoint', 'SecurityStock', 'AvgDailyUsage', 'MaxDailyUsage', 'AvgLeadTime',
|
|
965
|
+
'MaxLeadTime', 'ReorderFreq', 'Coverage'
|
|
966
|
+
]
|
|
967
|
+
else:
|
|
968
|
+
final_cols = [
|
|
969
|
+
'PurchaseDate', 'Item', 'ItemDescription', 'SuggestedForecast',
|
|
970
|
+
'SuggestedForecastPeriod', 'FutureInventoryTransit', 'FutureInventory',
|
|
971
|
+
'FutureTransit', 'FutureInventoryTransitArrival', 'FutureStockoutDays', 'TransitArrival',
|
|
972
|
+
'ReorderQtyBase', 'ReorderQty', 'ReorderQtyDays', 'ArrivalDate', 'PurchaseFactor',
|
|
973
|
+
'ReorderPoint', 'SecurityStock', 'AvgDailyUsage', 'MaxDailyUsage', 'AvgLeadTime',
|
|
974
|
+
'MaxLeadTime', 'ReorderFreq', 'Coverage'
|
|
975
|
+
]
|
|
976
|
+
|
|
977
|
+
return df_final[final_cols]
|
|
190
978
|
|
|
191
|
-
df[i]['InventoryTransit'] = ((df[i-1]['InventoryTransit'] - df[i-1]['Suggested_ReorderFreq']) + df[i-1]['ReorderQty']).clip(lower=0)
|
|
192
|
-
df[i]['ReorderPoint'] = (df[i]['Suggested_Coverage'] + df[i]['SecurityStock']).clip(lower=0)
|
|
193
|
-
df[i]['ReorderQtyBase'] = (df[i]['ReorderPoint'] - df[i]['InventoryTransit']).clip(lower=1)
|
|
194
|
-
df[i]['ReorderQty'] = ((df[i]['ReorderQtyBase'] / df[i]['PurchaseFactor']).apply(np.ceil)) * df[i]['PurchaseFactor']
|
|
195
|
-
df[i]['ReorderQtyDays'] = (df[i]['ReorderQty'] / df[i]['AvgDailyUsage']).astype(int)
|
|
196
|
-
|
|
197
979
|
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
980
|
+
def reorder(self):
|
|
981
|
+
"""
|
|
982
|
+
Main method to calculate future reorder recommendations.
|
|
983
|
+
|
|
984
|
+
This optimized version uses batch processing and vectorization to improve
|
|
985
|
+
performance, especially for large datasets. The method:
|
|
986
|
+
1. Generates future dates based on reorder frequencies
|
|
987
|
+
2. Groups items for batch processing when possible
|
|
988
|
+
3. Pre-allocates data structures to minimize memory operations
|
|
989
|
+
4. Uses vectorized calculations where applicable
|
|
990
|
+
5. Formats and returns consolidated results
|
|
991
|
+
|
|
992
|
+
Returns:
|
|
993
|
+
pd.DataFrame: Complete reorder recommendations for all items/locations
|
|
994
|
+
and time periods. See _prepare_final_dataframe() for
|
|
995
|
+
detailed column descriptions.
|
|
996
|
+
|
|
997
|
+
Example usage:
|
|
998
|
+
>>> reorder_system = FutureReorder(
|
|
999
|
+
... df_inv=inventory_df,
|
|
1000
|
+
... df_lead_time=lead_time_df,
|
|
1001
|
+
... df_prep=prep_df,
|
|
1002
|
+
... df_fcst=forecast_df,
|
|
1003
|
+
... periods=6,
|
|
1004
|
+
... start_date='2024-01-01'
|
|
1005
|
+
... )
|
|
1006
|
+
>>> results = reorder_system.reorder()
|
|
1007
|
+
>>> results.head()
|
|
1008
|
+
# Returns DataFrame with reorder recommendations
|
|
1009
|
+
"""
|
|
1010
|
+
|
|
1011
|
+
item_dates = self.future_date()
|
|
1012
|
+
|
|
1013
|
+
# Pre-allocate list for results instead of concatenating DataFrames
|
|
1014
|
+
all_results = []
|
|
1015
|
+
|
|
1016
|
+
# Group items by number of periods for potential batch processing
|
|
1017
|
+
items_by_period_count = {}
|
|
1018
|
+
for key, dates in item_dates.items():
|
|
1019
|
+
period_count = len(dates)
|
|
1020
|
+
if period_count not in items_by_period_count:
|
|
1021
|
+
items_by_period_count[period_count] = []
|
|
1022
|
+
items_by_period_count[period_count].append((key, dates))
|
|
1023
|
+
|
|
1024
|
+
# Process each group
|
|
1025
|
+
for period_count, items_group in items_by_period_count.items():
|
|
1026
|
+
# For each item in the group
|
|
1027
|
+
for key, dates in items_group:
|
|
202
1028
|
if self.location:
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
1029
|
+
item, location = key
|
|
1030
|
+
else:
|
|
1031
|
+
item = key
|
|
1032
|
+
location = None
|
|
1033
|
+
|
|
1034
|
+
# Get current dataframes
|
|
1035
|
+
current_df_lead_time, current_df_inv = self._get_current_dataframes(item, location)
|
|
1036
|
+
|
|
1037
|
+
if current_df_lead_time.empty or current_df_inv.empty:
|
|
1038
|
+
continue
|
|
1039
|
+
|
|
1040
|
+
# Process this item using optimized approach
|
|
1041
|
+
item_results = self._process_item_optimized(
|
|
1042
|
+
key, item, location, dates, current_df_lead_time, current_df_inv
|
|
1043
|
+
)
|
|
1044
|
+
|
|
1045
|
+
if item_results is not None and not item_results.empty:
|
|
1046
|
+
all_results.append(item_results)
|
|
1047
|
+
|
|
1048
|
+
# Combine all results efficiently
|
|
1049
|
+
if all_results:
|
|
1050
|
+
data_frame = pd.concat(all_results, ignore_index=True)
|
|
1051
|
+
else:
|
|
1052
|
+
columns = ['Date', 'Item'] + (['Location'] if self.location else [])
|
|
1053
|
+
data_frame = pd.DataFrame(columns=columns)
|
|
1054
|
+
|
|
1055
|
+
# Prepare and return final dataframe
|
|
1056
|
+
return self._prepare_final_dataframe(data_frame)
|
|
1057
|
+
|
|
1058
|
+
|
|
1059
|
+
def _process_item_optimized(self, key, item, location, dates, current_df_lead_time, current_df_inv):
|
|
1060
|
+
"""
|
|
1061
|
+
Process a single item through all periods using optimized approach.
|
|
1062
|
+
|
|
1063
|
+
This method pre-allocates arrays and uses vectorized operations where possible
|
|
1064
|
+
to improve performance.
|
|
1065
|
+
|
|
1066
|
+
Args:
|
|
1067
|
+
key: Item key (item or (item, location))
|
|
1068
|
+
item: Item identifier
|
|
1069
|
+
location: Location identifier (if applicable)
|
|
1070
|
+
dates: List of dates to process
|
|
1071
|
+
current_df_lead_time: Lead time data for this item
|
|
1072
|
+
current_df_inv: Inventory data for this item
|
|
1073
|
+
|
|
1074
|
+
Returns:
|
|
1075
|
+
pd.DataFrame: Results for all periods of this item
|
|
1076
|
+
"""
|
|
1077
|
+
|
|
1078
|
+
# Pre-allocate dictionaries for intermediate results
|
|
1079
|
+
suggested_forecasts = {}
|
|
1080
|
+
df_avgs = {}
|
|
1081
|
+
df_maxs = {}
|
|
1082
|
+
df_sstocks = {}
|
|
1083
|
+
period_results = {}
|
|
1084
|
+
|
|
1085
|
+
# Initialize transit orders for this item
|
|
1086
|
+
transit_orders = {key: []}
|
|
1087
|
+
|
|
1088
|
+
# Track last suggested forecast value for complete_suggested feature
|
|
1089
|
+
last_suggested_value = None
|
|
1090
|
+
|
|
1091
|
+
# Process each period
|
|
1092
|
+
for i, date in enumerate(dates):
|
|
1093
|
+
# Calculate suggested forecast (cached if possible)
|
|
1094
|
+
suggested_forecasts[i] = self._calculate_suggested_forecast(
|
|
1095
|
+
current_df_lead_time, current_df_inv, date, last_suggested_value
|
|
1096
|
+
)
|
|
1097
|
+
|
|
1098
|
+
# Update last_suggested_value for next iteration
|
|
1099
|
+
if 'SuggestedForecast' in suggested_forecasts[i].columns:
|
|
1100
|
+
last_suggested_value = suggested_forecasts[i]['SuggestedForecast'].iloc[0]
|
|
1101
|
+
|
|
1102
|
+
# Calculate daily usage
|
|
1103
|
+
df_avgs[i], df_maxs[i] = self._calculate_daily_usage(
|
|
1104
|
+
suggested_forecasts[i], date
|
|
1105
|
+
)
|
|
1106
|
+
|
|
1107
|
+
# Calculate security stock data
|
|
1108
|
+
df_sstocks[i] = self._calculate_security_stock_data(
|
|
1109
|
+
df_maxs[i], current_df_lead_time, period_index=i, dates=dates
|
|
1110
|
+
)
|
|
1111
|
+
|
|
1112
|
+
# Process period based on whether it's current or future
|
|
1113
|
+
if i == 0:
|
|
1114
|
+
period_results[i] = self._process_current_period(
|
|
1115
|
+
current_df_inv, df_sstocks[i], key, date, transit_orders, dates
|
|
1116
|
+
)
|
|
1117
|
+
else:
|
|
1118
|
+
period_results[i] = self._process_future_period(
|
|
1119
|
+
current_df_inv, df_sstocks[i], period_results[i-1],
|
|
1120
|
+
key, date, dates, i, transit_orders
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
# Add metadata columns efficiently
|
|
1124
|
+
period_results[i]['Date'] = date
|
|
1125
|
+
period_results[i]['Item'] = item
|
|
1126
|
+
if self.location:
|
|
1127
|
+
period_results[i]['Location'] = location
|
|
1128
|
+
|
|
1129
|
+
# After processing all periods, update FutureInventoryTransitArrival with next period's TransitArrival
|
|
1130
|
+
for i in range(len(dates)):
|
|
1131
|
+
if i < len(dates) - 1: # If there's a next period
|
|
1132
|
+
# Get next period's TransitArrival
|
|
1133
|
+
next_transit_arrival = period_results[i + 1]['TransitArrival'].iloc[0]
|
|
1134
|
+
transit_arrival_sum = self._sum_transit_arrivals(next_transit_arrival)
|
|
1135
|
+
else: # Last period - no next period
|
|
1136
|
+
transit_arrival_sum = 0
|
|
1137
|
+
|
|
1138
|
+
# Update FutureInventoryTransitArrival
|
|
1139
|
+
period_results[i]['FutureInventoryTransitArrival'] = self._format_value(
|
|
1140
|
+
period_results[i]['FutureInventory'].iloc[0] + transit_arrival_sum,
|
|
1141
|
+
'FutureInventoryTransitArrival'
|
|
1142
|
+
)
|
|
1143
|
+
|
|
1144
|
+
# Recalculate FutureStockoutDays with the updated FutureInventoryTransitArrival
|
|
1145
|
+
period_results[i]['FutureStockoutDays'] = self._calculate_inventory_days(period_results[i])
|
|
1146
|
+
|
|
1147
|
+
# Combine all periods for this item
|
|
1148
|
+
if period_results:
|
|
1149
|
+
# Stack all period results at once
|
|
1150
|
+
item_df = pd.concat(period_results.values(), ignore_index=True)
|
|
1151
|
+
|
|
1152
|
+
# Reorder columns for consistency
|
|
1153
|
+
cols = ['Date', 'Item']
|
|
1154
|
+
if self.location:
|
|
1155
|
+
cols.append('Location')
|
|
1156
|
+
other_cols = [col for col in item_df.columns if col not in cols]
|
|
1157
|
+
item_df = item_df[cols + other_cols]
|
|
1158
|
+
|
|
1159
|
+
return item_df
|
|
1160
|
+
|
|
1161
|
+
return None
|