tsam 2.2.2__py3-none-any.whl → 2.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tsam/__init__.py +11 -0
- tsam/hyperparametertuning.py +245 -245
- tsam/periodAggregation.py +141 -141
- tsam/representations.py +167 -167
- tsam/timeseriesaggregation.py +1343 -1309
- tsam/utils/durationRepresentation.py +204 -128
- tsam/utils/k_maxoids.py +145 -145
- tsam/utils/k_medoids_contiguity.py +133 -133
- tsam/utils/k_medoids_exact.py +234 -234
- tsam/utils/segmentation.py +118 -119
- {tsam-2.2.2.dist-info → tsam-2.3.2.dist-info}/LICENSE.txt +20 -20
- {tsam-2.2.2.dist-info → tsam-2.3.2.dist-info}/METADATA +168 -167
- tsam-2.3.2.dist-info/RECORD +16 -0
- {tsam-2.2.2.dist-info → tsam-2.3.2.dist-info}/WHEEL +1 -1
- tsam-2.2.2.dist-info/RECORD +0 -16
- {tsam-2.2.2.dist-info → tsam-2.3.2.dist-info}/top_level.txt +0 -0
tsam/timeseriesaggregation.py
CHANGED
|
@@ -1,1309 +1,1343 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
|
|
3
|
-
import copy
|
|
4
|
-
import time
|
|
5
|
-
import warnings
|
|
6
|
-
|
|
7
|
-
import pandas as pd
|
|
8
|
-
import numpy as np
|
|
9
|
-
|
|
10
|
-
from sklearn.metrics import mean_squared_error, mean_absolute_error
|
|
11
|
-
from sklearn.metrics.pairwise import euclidean_distances
|
|
12
|
-
from sklearn import preprocessing
|
|
13
|
-
|
|
14
|
-
from tsam.periodAggregation import aggregatePeriods
|
|
15
|
-
from tsam.representations import representations
|
|
16
|
-
|
|
17
|
-
pd.set_option("mode.chained_assignment", None)
|
|
18
|
-
|
|
19
|
-
# max iterator while resacling cluster profiles
|
|
20
|
-
MAX_ITERATOR = 20
|
|
21
|
-
|
|
22
|
-
# tolerance while rescaling cluster periods to meet the annual sum of the original profile
|
|
23
|
-
TOLERANCE = 1e-6
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
# minimal weight that overwrites a weighting of zero in order to carry the profile through the aggregation process
|
|
27
|
-
MIN_WEIGHT = 1e-6
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def unstackToPeriods(timeSeries, timeStepsPerPeriod):
|
|
31
|
-
"""
|
|
32
|
-
Extend the timeseries to an integer multiple of the period length and
|
|
33
|
-
groups the time series to the periods.
|
|
34
|
-
|
|
35
|
-
:param timeSeries:
|
|
36
|
-
:type timeSeries: pandas DataFrame
|
|
37
|
-
|
|
38
|
-
:param timeStepsPerPeriod: The number of discrete timesteps which describe one period. required
|
|
39
|
-
:type timeStepsPerPeriod: integer
|
|
40
|
-
|
|
41
|
-
:returns: - **unstackedTimeSeries** (pandas DataFrame) -- is stacked such that each row represents a
|
|
42
|
-
candidate period
|
|
43
|
-
- **timeIndex** (pandas Series index) -- is the modification of the original
|
|
44
|
-
timeseriesindex in case an integer multiple was created
|
|
45
|
-
"""
|
|
46
|
-
# init new grouped timeindex
|
|
47
|
-
unstackedTimeSeries = timeSeries.copy()
|
|
48
|
-
|
|
49
|
-
# initialize new indices
|
|
50
|
-
periodIndex = []
|
|
51
|
-
stepIndex = []
|
|
52
|
-
|
|
53
|
-
# extend to inger multiple of period length
|
|
54
|
-
if len(timeSeries) % timeStepsPerPeriod == 0:
|
|
55
|
-
attached_timesteps = 0
|
|
56
|
-
else:
|
|
57
|
-
# calculate number of timesteps which get attached
|
|
58
|
-
attached_timesteps = timeStepsPerPeriod - len(timeSeries) % timeStepsPerPeriod
|
|
59
|
-
|
|
60
|
-
# take these from the head of the original time series
|
|
61
|
-
rep_data = unstackedTimeSeries.head(attached_timesteps)
|
|
62
|
-
|
|
63
|
-
# append them at the end of the time series
|
|
64
|
-
unstackedTimeSeries =
|
|
65
|
-
|
|
66
|
-
# create period and step index
|
|
67
|
-
for ii in range(0, len(unstackedTimeSeries)):
|
|
68
|
-
periodIndex.append(int(ii / timeStepsPerPeriod))
|
|
69
|
-
stepIndex.append(ii - int(ii / timeStepsPerPeriod) * timeStepsPerPeriod)
|
|
70
|
-
|
|
71
|
-
# save old index
|
|
72
|
-
timeIndex = copy.deepcopy(unstackedTimeSeries.index)
|
|
73
|
-
|
|
74
|
-
# create new double index and unstack the time series
|
|
75
|
-
unstackedTimeSeries.index = pd.MultiIndex.from_arrays(
|
|
76
|
-
[stepIndex, periodIndex], names=["TimeStep", "PeriodNum"]
|
|
77
|
-
)
|
|
78
|
-
unstackedTimeSeries = unstackedTimeSeries.unstack(level="TimeStep")
|
|
79
|
-
|
|
80
|
-
return unstackedTimeSeries, timeIndex
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
class TimeSeriesAggregation(object):
|
|
85
|
-
"""
|
|
86
|
-
Clusters time series data to typical periods.
|
|
87
|
-
"""
|
|
88
|
-
|
|
89
|
-
CLUSTER_METHODS = [
|
|
90
|
-
"averaging",
|
|
91
|
-
"k_means",
|
|
92
|
-
"k_medoids",
|
|
93
|
-
"k_maxoids",
|
|
94
|
-
"hierarchical",
|
|
95
|
-
"adjacent_periods",
|
|
96
|
-
]
|
|
97
|
-
|
|
98
|
-
REPRESENTATION_METHODS = [
|
|
99
|
-
"meanRepresentation",
|
|
100
|
-
"medoidRepresentation",
|
|
101
|
-
"maxoidRepresentation",
|
|
102
|
-
"minmaxmeanRepresentation",
|
|
103
|
-
"durationRepresentation",
|
|
104
|
-
"distributionRepresentation",
|
|
105
|
-
"distributionAndMinMaxRepresentation",
|
|
106
|
-
]
|
|
107
|
-
|
|
108
|
-
EXTREME_PERIOD_METHODS = [
|
|
109
|
-
"None",
|
|
110
|
-
"append",
|
|
111
|
-
"new_cluster_center",
|
|
112
|
-
"replace_cluster_center",
|
|
113
|
-
]
|
|
114
|
-
|
|
115
|
-
def __init__(
|
|
116
|
-
self,
|
|
117
|
-
timeSeries,
|
|
118
|
-
resolution=None,
|
|
119
|
-
noTypicalPeriods=10,
|
|
120
|
-
noSegments=10,
|
|
121
|
-
hoursPerPeriod=24,
|
|
122
|
-
clusterMethod="hierarchical",
|
|
123
|
-
evalSumPeriods=False,
|
|
124
|
-
sortValues=False,
|
|
125
|
-
sameMean=False,
|
|
126
|
-
rescaleClusterPeriods=True,
|
|
127
|
-
weightDict=None,
|
|
128
|
-
segmentation=False,
|
|
129
|
-
extremePeriodMethod="None",
|
|
130
|
-
representationMethod=None,
|
|
131
|
-
representationDict=None,
|
|
132
|
-
distributionPeriodWise=True,
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
:
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
:
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
* '
|
|
169
|
-
* '
|
|
170
|
-
* '
|
|
171
|
-
* '
|
|
172
|
-
* '
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
*
|
|
204
|
-
* '
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
* '
|
|
217
|
-
* '
|
|
218
|
-
* '
|
|
219
|
-
* '
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
dictionary is
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
:
|
|
245
|
-
|
|
246
|
-
:
|
|
247
|
-
|
|
248
|
-
:
|
|
249
|
-
|
|
250
|
-
:
|
|
251
|
-
|
|
252
|
-
:
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
:type
|
|
257
|
-
|
|
258
|
-
:param
|
|
259
|
-
|
|
260
|
-
:type
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
self.
|
|
285
|
-
|
|
286
|
-
self.
|
|
287
|
-
|
|
288
|
-
self.
|
|
289
|
-
|
|
290
|
-
self.
|
|
291
|
-
|
|
292
|
-
self.
|
|
293
|
-
|
|
294
|
-
self.
|
|
295
|
-
|
|
296
|
-
self.
|
|
297
|
-
|
|
298
|
-
self.
|
|
299
|
-
|
|
300
|
-
self.
|
|
301
|
-
|
|
302
|
-
self.
|
|
303
|
-
|
|
304
|
-
self.
|
|
305
|
-
|
|
306
|
-
self.
|
|
307
|
-
|
|
308
|
-
self.
|
|
309
|
-
|
|
310
|
-
self.
|
|
311
|
-
|
|
312
|
-
self.
|
|
313
|
-
|
|
314
|
-
self.
|
|
315
|
-
|
|
316
|
-
self.
|
|
317
|
-
|
|
318
|
-
self.
|
|
319
|
-
|
|
320
|
-
self.
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
for peak in self.
|
|
358
|
-
if peak not in self.timeSeries.columns:
|
|
359
|
-
raise ValueError(
|
|
360
|
-
peak
|
|
361
|
-
+ ' listed in "
|
|
362
|
-
+ " does not occur as timeSeries column"
|
|
363
|
-
)
|
|
364
|
-
for peak in self.
|
|
365
|
-
if peak not in self.timeSeries.columns:
|
|
366
|
-
raise ValueError(
|
|
367
|
-
peak
|
|
368
|
-
+ ' listed in "
|
|
369
|
-
+ " does not occur as timeSeries column"
|
|
370
|
-
)
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
"
|
|
426
|
-
+ "
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
#
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
)
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
:
|
|
518
|
-
|
|
519
|
-
:
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
#
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
normalizedTimeSeries
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
extremePeriodNo
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
extremePeriodNo
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
extremePeriodNo
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
#
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
self.extremePeriods
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
elif
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
)
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
self.
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
self.
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
self.
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
self.
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
).
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
)
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
"""
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
"""
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
.
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
#
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
#
|
|
1243
|
-
|
|
1244
|
-
[
|
|
1245
|
-
index=
|
|
1246
|
-
columns=self.
|
|
1247
|
-
)
|
|
1248
|
-
|
|
1249
|
-
#
|
|
1250
|
-
if self.
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
self.
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
return
|
|
1304
|
-
|
|
1305
|
-
def
|
|
1306
|
-
"""
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
|
|
3
|
+
import copy
|
|
4
|
+
import time
|
|
5
|
+
import warnings
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
from sklearn.metrics import mean_squared_error, mean_absolute_error
|
|
11
|
+
from sklearn.metrics.pairwise import euclidean_distances
|
|
12
|
+
from sklearn import preprocessing
|
|
13
|
+
|
|
14
|
+
from tsam.periodAggregation import aggregatePeriods
|
|
15
|
+
from tsam.representations import representations
|
|
16
|
+
|
|
17
|
+
pd.set_option("mode.chained_assignment", None)
|
|
18
|
+
|
|
19
|
+
# max iterator while resacling cluster profiles
|
|
20
|
+
MAX_ITERATOR = 20
|
|
21
|
+
|
|
22
|
+
# tolerance while rescaling cluster periods to meet the annual sum of the original profile
|
|
23
|
+
TOLERANCE = 1e-6
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# minimal weight that overwrites a weighting of zero in order to carry the profile through the aggregation process
|
|
27
|
+
MIN_WEIGHT = 1e-6
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def unstackToPeriods(timeSeries, timeStepsPerPeriod):
|
|
31
|
+
"""
|
|
32
|
+
Extend the timeseries to an integer multiple of the period length and
|
|
33
|
+
groups the time series to the periods.
|
|
34
|
+
|
|
35
|
+
:param timeSeries:
|
|
36
|
+
:type timeSeries: pandas DataFrame
|
|
37
|
+
|
|
38
|
+
:param timeStepsPerPeriod: The number of discrete timesteps which describe one period. required
|
|
39
|
+
:type timeStepsPerPeriod: integer
|
|
40
|
+
|
|
41
|
+
:returns: - **unstackedTimeSeries** (pandas DataFrame) -- is stacked such that each row represents a
|
|
42
|
+
candidate period
|
|
43
|
+
- **timeIndex** (pandas Series index) -- is the modification of the original
|
|
44
|
+
timeseriesindex in case an integer multiple was created
|
|
45
|
+
"""
|
|
46
|
+
# init new grouped timeindex
|
|
47
|
+
unstackedTimeSeries = timeSeries.copy()
|
|
48
|
+
|
|
49
|
+
# initialize new indices
|
|
50
|
+
periodIndex = []
|
|
51
|
+
stepIndex = []
|
|
52
|
+
|
|
53
|
+
# extend to inger multiple of period length
|
|
54
|
+
if len(timeSeries) % timeStepsPerPeriod == 0:
|
|
55
|
+
attached_timesteps = 0
|
|
56
|
+
else:
|
|
57
|
+
# calculate number of timesteps which get attached
|
|
58
|
+
attached_timesteps = timeStepsPerPeriod - len(timeSeries) % timeStepsPerPeriod
|
|
59
|
+
|
|
60
|
+
# take these from the head of the original time series
|
|
61
|
+
rep_data = unstackedTimeSeries.head(attached_timesteps)
|
|
62
|
+
|
|
63
|
+
# append them at the end of the time series
|
|
64
|
+
unstackedTimeSeries = pd.concat([unstackedTimeSeries, rep_data])
|
|
65
|
+
|
|
66
|
+
# create period and step index
|
|
67
|
+
for ii in range(0, len(unstackedTimeSeries)):
|
|
68
|
+
periodIndex.append(int(ii / timeStepsPerPeriod))
|
|
69
|
+
stepIndex.append(ii - int(ii / timeStepsPerPeriod) * timeStepsPerPeriod)
|
|
70
|
+
|
|
71
|
+
# save old index
|
|
72
|
+
timeIndex = copy.deepcopy(unstackedTimeSeries.index)
|
|
73
|
+
|
|
74
|
+
# create new double index and unstack the time series
|
|
75
|
+
unstackedTimeSeries.index = pd.MultiIndex.from_arrays(
|
|
76
|
+
[stepIndex, periodIndex], names=["TimeStep", "PeriodNum"]
|
|
77
|
+
)
|
|
78
|
+
unstackedTimeSeries = unstackedTimeSeries.unstack(level="TimeStep")
|
|
79
|
+
|
|
80
|
+
return unstackedTimeSeries, timeIndex
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class TimeSeriesAggregation(object):
|
|
85
|
+
"""
|
|
86
|
+
Clusters time series data to typical periods.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
CLUSTER_METHODS = [
|
|
90
|
+
"averaging",
|
|
91
|
+
"k_means",
|
|
92
|
+
"k_medoids",
|
|
93
|
+
"k_maxoids",
|
|
94
|
+
"hierarchical",
|
|
95
|
+
"adjacent_periods",
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
REPRESENTATION_METHODS = [
|
|
99
|
+
"meanRepresentation",
|
|
100
|
+
"medoidRepresentation",
|
|
101
|
+
"maxoidRepresentation",
|
|
102
|
+
"minmaxmeanRepresentation",
|
|
103
|
+
"durationRepresentation",
|
|
104
|
+
"distributionRepresentation",
|
|
105
|
+
"distributionAndMinMaxRepresentation",
|
|
106
|
+
]
|
|
107
|
+
|
|
108
|
+
EXTREME_PERIOD_METHODS = [
|
|
109
|
+
"None",
|
|
110
|
+
"append",
|
|
111
|
+
"new_cluster_center",
|
|
112
|
+
"replace_cluster_center",
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
def __init__(
|
|
116
|
+
self,
|
|
117
|
+
timeSeries,
|
|
118
|
+
resolution=None,
|
|
119
|
+
noTypicalPeriods=10,
|
|
120
|
+
noSegments=10,
|
|
121
|
+
hoursPerPeriod=24,
|
|
122
|
+
clusterMethod="hierarchical",
|
|
123
|
+
evalSumPeriods=False,
|
|
124
|
+
sortValues=False,
|
|
125
|
+
sameMean=False,
|
|
126
|
+
rescaleClusterPeriods=True,
|
|
127
|
+
weightDict=None,
|
|
128
|
+
segmentation=False,
|
|
129
|
+
extremePeriodMethod="None",
|
|
130
|
+
representationMethod=None,
|
|
131
|
+
representationDict=None,
|
|
132
|
+
distributionPeriodWise=True,
|
|
133
|
+
segmentRepresentationMethod=None,
|
|
134
|
+
predefClusterOrder=None,
|
|
135
|
+
predefClusterCenterIndices=None,
|
|
136
|
+
solver="highs",
|
|
137
|
+
roundOutput=None,
|
|
138
|
+
addPeakMin=None,
|
|
139
|
+
addPeakMax=None,
|
|
140
|
+
addMeanMin=None,
|
|
141
|
+
addMeanMax=None,
|
|
142
|
+
):
|
|
143
|
+
"""
|
|
144
|
+
Initialize the periodly clusters.
|
|
145
|
+
|
|
146
|
+
:param timeSeries: DataFrame with the datetime as index and the relevant
|
|
147
|
+
time series parameters as columns. required
|
|
148
|
+
:type timeSeries: pandas.DataFrame() or dict
|
|
149
|
+
|
|
150
|
+
:param resolution: Resolution of the time series in hours [h]. If timeSeries is a
|
|
151
|
+
pandas.DataFrame() the resolution is derived from the datetime
|
|
152
|
+
index. optional, default: delta_T in timeSeries
|
|
153
|
+
:type resolution: float
|
|
154
|
+
|
|
155
|
+
:param hoursPerPeriod: Value which defines the length of a cluster period. optional, default: 24
|
|
156
|
+
:type hoursPerPeriod: integer
|
|
157
|
+
|
|
158
|
+
:param noTypicalPeriods: Number of typical Periods - equivalent to the number of clusters. optional, default: 10
|
|
159
|
+
:type noTypicalPeriods: integer
|
|
160
|
+
|
|
161
|
+
:param noSegments: Number of segments in which the typical periods shoul be subdivided - equivalent to the
|
|
162
|
+
number of inner-period clusters. optional, default: 10
|
|
163
|
+
:type noSegments: integer
|
|
164
|
+
|
|
165
|
+
:param clusterMethod: Chosen clustering method. optional, default: 'hierarchical'
|
|
166
|
+
|br| Options are:
|
|
167
|
+
|
|
168
|
+
* 'averaging'
|
|
169
|
+
* 'k_means'
|
|
170
|
+
* 'k_medoids'
|
|
171
|
+
* 'k_maxoids'
|
|
172
|
+
* 'hierarchical'
|
|
173
|
+
* 'adjacent_periods'
|
|
174
|
+
:type clusterMethod: string
|
|
175
|
+
|
|
176
|
+
:param evalSumPeriods: Boolean if in the clustering process also the averaged periodly values
|
|
177
|
+
shall be integrated additional to the periodly profiles as parameters. optional, default: False
|
|
178
|
+
:type evalSumPeriods: boolean
|
|
179
|
+
|
|
180
|
+
:param sameMean: Boolean which is used in the normalization procedure. If true, all time series get normalized
|
|
181
|
+
such that they have the same mean value. optional, default: False
|
|
182
|
+
:type sameMean: boolean
|
|
183
|
+
|
|
184
|
+
:param sortValues: Boolean if the clustering should be done by the periodly duration
|
|
185
|
+
curves (true) or the original shape of the data. optional (default: False)
|
|
186
|
+
:type sortValues: boolean
|
|
187
|
+
|
|
188
|
+
:param rescaleClusterPeriods: Decides if the cluster Periods shall get rescaled such that their
|
|
189
|
+
weighted mean value fits the mean value of the original time series. optional (default: True)
|
|
190
|
+
:type rescaleClusterPeriods: boolean
|
|
191
|
+
|
|
192
|
+
:param weightDict: Dictionary which weights the profiles. It is done by scaling
|
|
193
|
+
the time series while the normalization process. Normally all time
|
|
194
|
+
series have a scale from 0 to 1. By scaling them, the values get
|
|
195
|
+
different distances to each other and with this, they are
|
|
196
|
+
differently evaluated while the clustering process. optional (default: None )
|
|
197
|
+
:type weightDict: dict
|
|
198
|
+
|
|
199
|
+
:param extremePeriodMethod: Method how to integrate extreme Periods (peak demand, lowest temperature etc.)
|
|
200
|
+
into to the typical period profiles. optional, default: 'None'
|
|
201
|
+
|br| Options are:
|
|
202
|
+
|
|
203
|
+
* None: No integration at all.
|
|
204
|
+
* 'append': append typical Periods to cluster centers
|
|
205
|
+
* 'new_cluster_center': add the extreme period as additional cluster center. It is checked then for all
|
|
206
|
+
Periods if they fit better to the this new center or their original cluster center.
|
|
207
|
+
* 'replace_cluster_center': replaces the cluster center of the
|
|
208
|
+
cluster where the extreme period belongs to with the periodly profile of the extreme period. (Worst
|
|
209
|
+
case system design)
|
|
210
|
+
:type extremePeriodMethod: string
|
|
211
|
+
|
|
212
|
+
:param representationMethod: Chosen representation. If specified, the clusters are represented in the chosen
|
|
213
|
+
way. Otherwise, each clusterMethod has its own commonly used default representation method.
|
|
214
|
+
|br| Options are:
|
|
215
|
+
|
|
216
|
+
* 'meanRepresentation' (default of 'averaging' and 'k_means')
|
|
217
|
+
* 'medoidRepresentation' (default of 'k_medoids', 'hierarchical' and 'adjacent_periods')
|
|
218
|
+
* 'minmaxmeanRepresentation'
|
|
219
|
+
* 'durationRepresentation'/ 'distributionRepresentation'
|
|
220
|
+
* 'distribtionAndMinMaxRepresentation'
|
|
221
|
+
:type representationMethod: string
|
|
222
|
+
|
|
223
|
+
:param representationDict: Dictionary which states for each attribute whether the profiles in each cluster
|
|
224
|
+
should be represented by the minimum value or maximum value of each time step. This enables estimations
|
|
225
|
+
to the safe side. This dictionary is needed when 'minmaxmeanRepresentation' is chosen. If not specified, the
|
|
226
|
+
dictionary is set to containing 'mean' values only.
|
|
227
|
+
:type representationDict: dict
|
|
228
|
+
|
|
229
|
+
:param distributionPeriodWise: If durationRepresentation is chosen, you can choose whether the distribution of
|
|
230
|
+
each cluster should be separately preserved or that of the original time series only (default: True)
|
|
231
|
+
:type distributionPeriodWise:
|
|
232
|
+
|
|
233
|
+
:param segmentRepresentationMethod: Chosen representation for the segments. If specified, the segments are
|
|
234
|
+
represented in the chosen way. Otherwise, it is inherited from the representationMethod.
|
|
235
|
+
|br| Options are:
|
|
236
|
+
|
|
237
|
+
* 'meanRepresentation' (default of 'averaging' and 'k_means')
|
|
238
|
+
* 'medoidRepresentation' (default of 'k_medoids', 'hierarchical' and 'adjacent_periods')
|
|
239
|
+
* 'minmaxmeanRepresentation'
|
|
240
|
+
* 'durationRepresentation'/ 'distributionRepresentation'
|
|
241
|
+
* 'distribtionAndMinMaxRepresentation'
|
|
242
|
+
:type segmentRepresentationMethod: string
|
|
243
|
+
|
|
244
|
+
:param predefClusterOrder: Instead of aggregating a time series, a predefined grouping is taken
|
|
245
|
+
which is given by this list. optional (default: None)
|
|
246
|
+
:type predefClusterOrder: list or array
|
|
247
|
+
|
|
248
|
+
:param predefClusterCenterIndices: If predefClusterOrder is give, this list can define the representative
|
|
249
|
+
cluster candidates. Otherwise the medoid is taken. optional (default: None)
|
|
250
|
+
:type predefClusterCenterIndices: list or array
|
|
251
|
+
|
|
252
|
+
:param solver: Solver that is used for k_medoids clustering. optional (default: 'cbc' )
|
|
253
|
+
:type solver: string
|
|
254
|
+
|
|
255
|
+
:param roundOutput: Decimals to what the output time series get round. optional (default: None )
|
|
256
|
+
:type roundOutput: integer
|
|
257
|
+
|
|
258
|
+
:param addPeakMin: List of column names which's minimal value shall be added to the
|
|
259
|
+
typical periods. E.g.: ['Temperature']. optional, default: []
|
|
260
|
+
:type addPeakMin: list
|
|
261
|
+
|
|
262
|
+
:param addPeakMax: List of column names which's maximal value shall be added to the
|
|
263
|
+
typical periods. E.g. ['EDemand', 'HDemand']. optional, default: []
|
|
264
|
+
:type addPeakMax: list
|
|
265
|
+
|
|
266
|
+
:param addMeanMin: List of column names where the period with the cumulative minimal value
|
|
267
|
+
shall be added to the typical periods. E.g. ['Photovoltaic']. optional, default: []
|
|
268
|
+
:type addMeanMin: list
|
|
269
|
+
|
|
270
|
+
:param addMeanMax: List of column names where the period with the cumulative maximal value
|
|
271
|
+
shall be added to the typical periods. optional, default: []
|
|
272
|
+
:type addMeanMax: list
|
|
273
|
+
"""
|
|
274
|
+
if addMeanMin is None:
|
|
275
|
+
addMeanMin = []
|
|
276
|
+
if addMeanMax is None:
|
|
277
|
+
addMeanMax = []
|
|
278
|
+
if addPeakMax is None:
|
|
279
|
+
addPeakMax = []
|
|
280
|
+
if addPeakMin is None:
|
|
281
|
+
addPeakMin = []
|
|
282
|
+
if weightDict is None:
|
|
283
|
+
weightDict = {}
|
|
284
|
+
self.timeSeries = timeSeries
|
|
285
|
+
|
|
286
|
+
self.resolution = resolution
|
|
287
|
+
|
|
288
|
+
self.hoursPerPeriod = hoursPerPeriod
|
|
289
|
+
|
|
290
|
+
self.noTypicalPeriods = noTypicalPeriods
|
|
291
|
+
|
|
292
|
+
self.noSegments = noSegments
|
|
293
|
+
|
|
294
|
+
self.clusterMethod = clusterMethod
|
|
295
|
+
|
|
296
|
+
self.extremePeriodMethod = extremePeriodMethod
|
|
297
|
+
|
|
298
|
+
self.evalSumPeriods = evalSumPeriods
|
|
299
|
+
|
|
300
|
+
self.sortValues = sortValues
|
|
301
|
+
|
|
302
|
+
self.sameMean = sameMean
|
|
303
|
+
|
|
304
|
+
self.rescaleClusterPeriods = rescaleClusterPeriods
|
|
305
|
+
|
|
306
|
+
self.weightDict = weightDict
|
|
307
|
+
|
|
308
|
+
self.representationMethod = representationMethod
|
|
309
|
+
|
|
310
|
+
self.representationDict = representationDict
|
|
311
|
+
|
|
312
|
+
self.distributionPeriodWise = distributionPeriodWise
|
|
313
|
+
|
|
314
|
+
self.segmentRepresentationMethod = segmentRepresentationMethod
|
|
315
|
+
|
|
316
|
+
self.predefClusterOrder = predefClusterOrder
|
|
317
|
+
|
|
318
|
+
self.predefClusterCenterIndices = predefClusterCenterIndices
|
|
319
|
+
|
|
320
|
+
self.solver = solver
|
|
321
|
+
|
|
322
|
+
self.segmentation = segmentation
|
|
323
|
+
|
|
324
|
+
self.roundOutput = roundOutput
|
|
325
|
+
|
|
326
|
+
self.addPeakMin = addPeakMin
|
|
327
|
+
|
|
328
|
+
self.addPeakMax = addPeakMax
|
|
329
|
+
|
|
330
|
+
self.addMeanMin = addMeanMin
|
|
331
|
+
|
|
332
|
+
self.addMeanMax = addMeanMax
|
|
333
|
+
|
|
334
|
+
self._check_init_args()
|
|
335
|
+
|
|
336
|
+
# internal attributes
|
|
337
|
+
self._normalizedMean = None
|
|
338
|
+
|
|
339
|
+
return
|
|
340
|
+
|
|
341
|
+
def _check_init_args(self):
|
|
342
|
+
|
|
343
|
+
# check timeSeries and set it as pandas DataFrame
|
|
344
|
+
if not isinstance(self.timeSeries, pd.DataFrame):
|
|
345
|
+
if isinstance(self.timeSeries, dict):
|
|
346
|
+
self.timeSeries = pd.DataFrame(self.timeSeries)
|
|
347
|
+
elif isinstance(self.timeSeries, np.ndarray):
|
|
348
|
+
self.timeSeries = pd.DataFrame(self.timeSeries)
|
|
349
|
+
else:
|
|
350
|
+
raise ValueError(
|
|
351
|
+
"timeSeries has to be of type pandas.DataFrame() "
|
|
352
|
+
+ "or of type np.array() "
|
|
353
|
+
"in initialization of object of class " + type(self).__name__
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
# check if extreme periods exist in the dataframe
|
|
357
|
+
for peak in self.addPeakMin:
|
|
358
|
+
if peak not in self.timeSeries.columns:
|
|
359
|
+
raise ValueError(
|
|
360
|
+
peak
|
|
361
|
+
+ ' listed in "addPeakMin"'
|
|
362
|
+
+ " does not occur as timeSeries column"
|
|
363
|
+
)
|
|
364
|
+
for peak in self.addPeakMax:
|
|
365
|
+
if peak not in self.timeSeries.columns:
|
|
366
|
+
raise ValueError(
|
|
367
|
+
peak
|
|
368
|
+
+ ' listed in "addPeakMax"'
|
|
369
|
+
+ " does not occur as timeSeries column"
|
|
370
|
+
)
|
|
371
|
+
for peak in self.addMeanMin:
|
|
372
|
+
if peak not in self.timeSeries.columns:
|
|
373
|
+
raise ValueError(
|
|
374
|
+
peak
|
|
375
|
+
+ ' listed in "addMeanMin"'
|
|
376
|
+
+ " does not occur as timeSeries column"
|
|
377
|
+
)
|
|
378
|
+
for peak in self.addMeanMax:
|
|
379
|
+
if peak not in self.timeSeries.columns:
|
|
380
|
+
raise ValueError(
|
|
381
|
+
peak
|
|
382
|
+
+ ' listed in "addMeanMax"'
|
|
383
|
+
+ " does not occur as timeSeries column"
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# derive resolution from date time index if not provided
|
|
387
|
+
if self.resolution is None:
|
|
388
|
+
try:
|
|
389
|
+
timedelta = self.timeSeries.index[1] - self.timeSeries.index[0]
|
|
390
|
+
self.resolution = float(timedelta.total_seconds()) / 3600
|
|
391
|
+
except AttributeError:
|
|
392
|
+
raise ValueError(
|
|
393
|
+
"'resolution' argument has to be nonnegative float or int"
|
|
394
|
+
+ " or the given timeseries needs a datetime index"
|
|
395
|
+
)
|
|
396
|
+
except TypeError:
|
|
397
|
+
try:
|
|
398
|
+
self.timeSeries.index = pd.to_datetime(self.timeSeries.index)
|
|
399
|
+
timedelta = self.timeSeries.index[1] - self.timeSeries.index[0]
|
|
400
|
+
self.resolution = float(timedelta.total_seconds()) / 3600
|
|
401
|
+
except:
|
|
402
|
+
raise ValueError(
|
|
403
|
+
"'resolution' argument has to be nonnegative float or int"
|
|
404
|
+
+ " or the given timeseries needs a datetime index"
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
if not (isinstance(self.resolution, int) or isinstance(self.resolution, float)):
|
|
408
|
+
raise ValueError("resolution has to be nonnegative float or int")
|
|
409
|
+
|
|
410
|
+
# check hoursPerPeriod
|
|
411
|
+
if self.hoursPerPeriod is None or self.hoursPerPeriod <= 0:
|
|
412
|
+
raise ValueError("hoursPerPeriod has to be nonnegative float or int")
|
|
413
|
+
|
|
414
|
+
# check typical Periods
|
|
415
|
+
if (
|
|
416
|
+
self.noTypicalPeriods is None
|
|
417
|
+
or self.noTypicalPeriods <= 0
|
|
418
|
+
or not isinstance(self.noTypicalPeriods, int)
|
|
419
|
+
):
|
|
420
|
+
raise ValueError("noTypicalPeriods has to be nonnegative integer")
|
|
421
|
+
self.timeStepsPerPeriod = int(self.hoursPerPeriod / self.resolution)
|
|
422
|
+
if not self.timeStepsPerPeriod == self.hoursPerPeriod / self.resolution:
|
|
423
|
+
raise ValueError(
|
|
424
|
+
"The combination of hoursPerPeriod and the "
|
|
425
|
+
+ "resulution does not result in an integer "
|
|
426
|
+
+ "number of time steps per period"
|
|
427
|
+
)
|
|
428
|
+
if self.segmentation:
|
|
429
|
+
if self.noSegments > self.timeStepsPerPeriod:
|
|
430
|
+
warnings.warn(
|
|
431
|
+
"The number of segments must be less than or equal to the number of time steps per period. "
|
|
432
|
+
"Segment number is decreased to number of time steps per period."
|
|
433
|
+
)
|
|
434
|
+
self.noSegments = self.timeStepsPerPeriod
|
|
435
|
+
|
|
436
|
+
# check clusterMethod
|
|
437
|
+
if self.clusterMethod not in self.CLUSTER_METHODS:
|
|
438
|
+
raise ValueError(
|
|
439
|
+
"clusterMethod needs to be one of "
|
|
440
|
+
+ "the following: "
|
|
441
|
+
+ "{}".format(self.CLUSTER_METHODS)
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
# check representationMethod
|
|
445
|
+
if (
|
|
446
|
+
self.representationMethod is not None
|
|
447
|
+
and self.representationMethod not in self.REPRESENTATION_METHODS
|
|
448
|
+
):
|
|
449
|
+
raise ValueError(
|
|
450
|
+
"If specified, representationMethod needs to be one of "
|
|
451
|
+
+ "the following: "
|
|
452
|
+
+ "{}".format(self.REPRESENTATION_METHODS)
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
# check representationMethod
|
|
456
|
+
if self.segmentRepresentationMethod is None:
|
|
457
|
+
self.segmentRepresentationMethod = self.representationMethod
|
|
458
|
+
else:
|
|
459
|
+
if self.segmentRepresentationMethod not in self.REPRESENTATION_METHODS:
|
|
460
|
+
raise ValueError(
|
|
461
|
+
"If specified, segmentRepresentationMethod needs to be one of "
|
|
462
|
+
+ "the following: "
|
|
463
|
+
+ "{}".format(self.REPRESENTATION_METHODS)
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
# if representationDict None, represent by maximum time steps in each cluster
|
|
467
|
+
if self.representationDict is None:
|
|
468
|
+
self.representationDict = {i: "mean" for i in list(self.timeSeries.columns)}
|
|
469
|
+
# sort representationDict alphabetically to make sure that the min, max or mean function is applied to the right
|
|
470
|
+
# column
|
|
471
|
+
self.representationDict = (
|
|
472
|
+
pd.Series(self.representationDict).sort_index(axis=0).to_dict()
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# check extremePeriods
|
|
476
|
+
if self.extremePeriodMethod not in self.EXTREME_PERIOD_METHODS:
|
|
477
|
+
raise ValueError(
|
|
478
|
+
"extremePeriodMethod needs to be one of "
|
|
479
|
+
+ "the following: "
|
|
480
|
+
+ "{}".format(self.EXTREME_PERIOD_METHODS)
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
# check evalSumPeriods
|
|
484
|
+
if not isinstance(self.evalSumPeriods, bool):
|
|
485
|
+
raise ValueError("evalSumPeriods has to be boolean")
|
|
486
|
+
# check sortValues
|
|
487
|
+
if not isinstance(self.sortValues, bool):
|
|
488
|
+
raise ValueError("sortValues has to be boolean")
|
|
489
|
+
# check sameMean
|
|
490
|
+
if not isinstance(self.sameMean, bool):
|
|
491
|
+
raise ValueError("sameMean has to be boolean")
|
|
492
|
+
# check rescaleClusterPeriods
|
|
493
|
+
if not isinstance(self.rescaleClusterPeriods, bool):
|
|
494
|
+
raise ValueError("rescaleClusterPeriods has to be boolean")
|
|
495
|
+
|
|
496
|
+
# check predefClusterOrder
|
|
497
|
+
if self.predefClusterOrder is not None:
|
|
498
|
+
if not isinstance(self.predefClusterOrder, (list, np.ndarray)):
|
|
499
|
+
raise ValueError("predefClusterOrder has to be an array or list")
|
|
500
|
+
if self.predefClusterCenterIndices is not None:
|
|
501
|
+
# check predefClusterCenterIndices
|
|
502
|
+
if not isinstance(self.predefClusterCenterIndices, (list, np.ndarray)):
|
|
503
|
+
raise ValueError(
|
|
504
|
+
"predefClusterCenterIndices has to be an array or list"
|
|
505
|
+
)
|
|
506
|
+
elif self.predefClusterCenterIndices is not None:
|
|
507
|
+
raise ValueError(
|
|
508
|
+
'If "predefClusterCenterIndices" is defined, "predefClusterOrder" needs to be defined as well'
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
return
|
|
512
|
+
|
|
513
|
+
def _normalizeTimeSeries(self, sameMean=False):
|
|
514
|
+
"""
|
|
515
|
+
Normalizes each time series independently.
|
|
516
|
+
|
|
517
|
+
:param sameMean: Decides if the time series should have all the same mean value.
|
|
518
|
+
Relevant for weighting time series. optional (default: False)
|
|
519
|
+
:type sameMean: boolean
|
|
520
|
+
|
|
521
|
+
:returns: normalized time series
|
|
522
|
+
"""
|
|
523
|
+
min_max_scaler = preprocessing.MinMaxScaler()
|
|
524
|
+
normalizedTimeSeries = pd.DataFrame(
|
|
525
|
+
min_max_scaler.fit_transform(self.timeSeries),
|
|
526
|
+
columns=self.timeSeries.columns,
|
|
527
|
+
index=self.timeSeries.index,
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
self._normalizedMean = normalizedTimeSeries.mean()
|
|
531
|
+
if sameMean:
|
|
532
|
+
normalizedTimeSeries /= self._normalizedMean
|
|
533
|
+
|
|
534
|
+
return normalizedTimeSeries
|
|
535
|
+
|
|
536
|
+
def _unnormalizeTimeSeries(self, normalizedTimeSeries, sameMean=False):
|
|
537
|
+
"""
|
|
538
|
+
Equivalent to '_normalizeTimeSeries'. Just does the back
|
|
539
|
+
transformation.
|
|
540
|
+
|
|
541
|
+
:param normalizedTimeSeries: Time series which should get back transformated. required
|
|
542
|
+
:type normalizedTimeSeries: pandas.DataFrame()
|
|
543
|
+
|
|
544
|
+
:param sameMean: Has to have the same value as in _normalizeTimeSeries. optional (default: False)
|
|
545
|
+
:type sameMean: boolean
|
|
546
|
+
|
|
547
|
+
:returns: unnormalized time series
|
|
548
|
+
"""
|
|
549
|
+
from sklearn import preprocessing
|
|
550
|
+
|
|
551
|
+
min_max_scaler = preprocessing.MinMaxScaler()
|
|
552
|
+
min_max_scaler.fit(self.timeSeries)
|
|
553
|
+
|
|
554
|
+
if sameMean:
|
|
555
|
+
normalizedTimeSeries *= self._normalizedMean
|
|
556
|
+
|
|
557
|
+
unnormalizedTimeSeries = pd.DataFrame(
|
|
558
|
+
min_max_scaler.inverse_transform(normalizedTimeSeries),
|
|
559
|
+
columns=normalizedTimeSeries.columns,
|
|
560
|
+
index=normalizedTimeSeries.index,
|
|
561
|
+
)
|
|
562
|
+
|
|
563
|
+
return unnormalizedTimeSeries
|
|
564
|
+
|
|
565
|
+
def _preProcessTimeSeries(self):
|
|
566
|
+
"""
|
|
567
|
+
Normalize the time series, weight them based on the weight dict and
|
|
568
|
+
puts them into the correct matrix format.
|
|
569
|
+
"""
|
|
570
|
+
# first sort the time series in order to avoid bug mention in #18
|
|
571
|
+
self.timeSeries.sort_index(axis=1, inplace=True)
|
|
572
|
+
|
|
573
|
+
# convert the dataframe to floats
|
|
574
|
+
self.timeSeries = self.timeSeries.astype(float)
|
|
575
|
+
|
|
576
|
+
# normalize the time series and group them to periodly profiles
|
|
577
|
+
self.normalizedTimeSeries = self._normalizeTimeSeries(sameMean=self.sameMean)
|
|
578
|
+
|
|
579
|
+
for column in self.weightDict:
|
|
580
|
+
if self.weightDict[column] < MIN_WEIGHT:
|
|
581
|
+
print(
|
|
582
|
+
'weight of "'
|
|
583
|
+
+ str(column)
|
|
584
|
+
+ '" set to the minmal tolerable weighting'
|
|
585
|
+
)
|
|
586
|
+
self.weightDict[column] = MIN_WEIGHT
|
|
587
|
+
self.normalizedTimeSeries[column] = (
|
|
588
|
+
self.normalizedTimeSeries[column] * self.weightDict[column]
|
|
589
|
+
)
|
|
590
|
+
|
|
591
|
+
self.normalizedPeriodlyProfiles, self.timeIndex = unstackToPeriods(
|
|
592
|
+
self.normalizedTimeSeries, self.timeStepsPerPeriod
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
# check if no NaN is in the resulting profiles
|
|
596
|
+
if self.normalizedPeriodlyProfiles.isnull().values.any():
|
|
597
|
+
raise ValueError(
|
|
598
|
+
"Pre processed data includes NaN. Please check the timeSeries input data."
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
def _postProcessTimeSeries(self, normalizedTimeSeries, applyWeighting=True):
|
|
602
|
+
"""
|
|
603
|
+
Neutralizes the weighting the time series back and unnormalizes them.
|
|
604
|
+
"""
|
|
605
|
+
if applyWeighting:
|
|
606
|
+
for column in self.weightDict:
|
|
607
|
+
normalizedTimeSeries[column] = (
|
|
608
|
+
normalizedTimeSeries[column] / self.weightDict[column]
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
unnormalizedTimeSeries = self._unnormalizeTimeSeries(
|
|
612
|
+
normalizedTimeSeries, sameMean=self.sameMean
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
if self.roundOutput is not None:
|
|
616
|
+
unnormalizedTimeSeries = unnormalizedTimeSeries.round(
|
|
617
|
+
decimals=self.roundOutput
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
return unnormalizedTimeSeries
|
|
621
|
+
|
|
622
|
+
def _addExtremePeriods(
|
|
623
|
+
self,
|
|
624
|
+
groupedSeries,
|
|
625
|
+
clusterCenters,
|
|
626
|
+
clusterOrder,
|
|
627
|
+
extremePeriodMethod="new_cluster_center",
|
|
628
|
+
addPeakMin=None,
|
|
629
|
+
addPeakMax=None,
|
|
630
|
+
addMeanMin=None,
|
|
631
|
+
addMeanMax=None,
|
|
632
|
+
):
|
|
633
|
+
"""
|
|
634
|
+
Adds different extreme periods based on the to the clustered data,
|
|
635
|
+
decribed by the clusterCenters and clusterOrder.
|
|
636
|
+
|
|
637
|
+
:param groupedSeries: periodly grouped groupedSeries on which basis it should be decided,
|
|
638
|
+
which period is an extreme period. required
|
|
639
|
+
:type groupedSeries: pandas.DataFrame()
|
|
640
|
+
|
|
641
|
+
:param clusterCenters: Output from clustering with sklearn. required
|
|
642
|
+
:type clusterCenters: dict
|
|
643
|
+
|
|
644
|
+
:param clusterOrder: Output from clsutering with sklearn. required
|
|
645
|
+
:type clusterOrder: dict
|
|
646
|
+
|
|
647
|
+
:param extremePeriodMethod: Chosen extremePeriodMethod. The method. optional(default: 'new_cluster_center' )
|
|
648
|
+
:type extremePeriodMethod: string
|
|
649
|
+
|
|
650
|
+
:returns: - **newClusterCenters** -- The new cluster centers extended with the extreme periods.
|
|
651
|
+
- **newClusterOrder** -- The new cluster order including the extreme periods.
|
|
652
|
+
- **extremeClusterIdx** -- A list of indices where in the newClusterCenters are the extreme
|
|
653
|
+
periods located.
|
|
654
|
+
"""
|
|
655
|
+
|
|
656
|
+
# init required dicts and lists
|
|
657
|
+
self.extremePeriods = {}
|
|
658
|
+
extremePeriodNo = []
|
|
659
|
+
|
|
660
|
+
ccList = [center.tolist() for center in clusterCenters]
|
|
661
|
+
|
|
662
|
+
# check which extreme periods exist in the profile and add them to
|
|
663
|
+
# self.extremePeriods dict
|
|
664
|
+
for column in self.timeSeries.columns:
|
|
665
|
+
|
|
666
|
+
if column in addPeakMax:
|
|
667
|
+
stepNo = groupedSeries[column].max(axis=1).idxmax()
|
|
668
|
+
# add only if stepNo is not already in extremePeriods
|
|
669
|
+
# if it is not already a cluster center
|
|
670
|
+
if (
|
|
671
|
+
stepNo not in extremePeriodNo
|
|
672
|
+
and groupedSeries.loc[stepNo, :].values.tolist() not in ccList
|
|
673
|
+
):
|
|
674
|
+
max_col = self._append_col_with(column, " max.")
|
|
675
|
+
self.extremePeriods[max_col] = {
|
|
676
|
+
"stepNo": stepNo,
|
|
677
|
+
"profile": groupedSeries.loc[stepNo, :].values,
|
|
678
|
+
"column": column,
|
|
679
|
+
}
|
|
680
|
+
extremePeriodNo.append(stepNo)
|
|
681
|
+
|
|
682
|
+
if column in addPeakMin:
|
|
683
|
+
stepNo = groupedSeries[column].min(axis=1).idxmin()
|
|
684
|
+
# add only if stepNo is not already in extremePeriods
|
|
685
|
+
# if it is not already a cluster center
|
|
686
|
+
if (
|
|
687
|
+
stepNo not in extremePeriodNo
|
|
688
|
+
and groupedSeries.loc[stepNo, :].values.tolist() not in ccList
|
|
689
|
+
):
|
|
690
|
+
min_col = self._append_col_with(column, " min.")
|
|
691
|
+
self.extremePeriods[min_col] = {
|
|
692
|
+
"stepNo": stepNo,
|
|
693
|
+
"profile": groupedSeries.loc[stepNo, :].values,
|
|
694
|
+
"column": column,
|
|
695
|
+
}
|
|
696
|
+
extremePeriodNo.append(stepNo)
|
|
697
|
+
|
|
698
|
+
if column in addMeanMax:
|
|
699
|
+
stepNo = groupedSeries[column].mean(axis=1).idxmax()
|
|
700
|
+
# add only if stepNo is not already in extremePeriods
|
|
701
|
+
# if it is not already a cluster center
|
|
702
|
+
if (
|
|
703
|
+
stepNo not in extremePeriodNo
|
|
704
|
+
and groupedSeries.loc[stepNo, :].values.tolist() not in ccList
|
|
705
|
+
):
|
|
706
|
+
mean_max_col = self._append_col_with(column, " daily max.")
|
|
707
|
+
self.extremePeriods[mean_max_col] = {
|
|
708
|
+
"stepNo": stepNo,
|
|
709
|
+
"profile": groupedSeries.loc[stepNo, :].values,
|
|
710
|
+
"column": column,
|
|
711
|
+
}
|
|
712
|
+
extremePeriodNo.append(stepNo)
|
|
713
|
+
|
|
714
|
+
if column in addMeanMin:
|
|
715
|
+
stepNo = groupedSeries[column].mean(axis=1).idxmin()
|
|
716
|
+
# add only if stepNo is not already in extremePeriods and
|
|
717
|
+
# if it is not already a cluster center
|
|
718
|
+
if (
|
|
719
|
+
stepNo not in extremePeriodNo
|
|
720
|
+
and groupedSeries.loc[stepNo, :].values.tolist() not in ccList
|
|
721
|
+
):
|
|
722
|
+
mean_min_col = self._append_col_with(column, " daily min.")
|
|
723
|
+
self.extremePeriods[mean_min_col] = {
|
|
724
|
+
"stepNo": stepNo,
|
|
725
|
+
"profile": groupedSeries.loc[stepNo, :].values,
|
|
726
|
+
"column": column,
|
|
727
|
+
}
|
|
728
|
+
extremePeriodNo.append(stepNo)
|
|
729
|
+
|
|
730
|
+
for periodType in self.extremePeriods:
|
|
731
|
+
# get current related clusters of extreme periods
|
|
732
|
+
self.extremePeriods[periodType]["clusterNo"] = clusterOrder[
|
|
733
|
+
self.extremePeriods[periodType]["stepNo"]
|
|
734
|
+
]
|
|
735
|
+
|
|
736
|
+
# init new cluster structure
|
|
737
|
+
newClusterCenters = []
|
|
738
|
+
newClusterOrder = clusterOrder
|
|
739
|
+
extremeClusterIdx = []
|
|
740
|
+
|
|
741
|
+
# integrate extreme periods to clusters
|
|
742
|
+
if extremePeriodMethod == "append":
|
|
743
|
+
# attach extreme periods to cluster centers
|
|
744
|
+
for i, cluster_center in enumerate(clusterCenters):
|
|
745
|
+
newClusterCenters.append(cluster_center)
|
|
746
|
+
for i, periodType in enumerate(self.extremePeriods):
|
|
747
|
+
extremeClusterIdx.append(len(newClusterCenters))
|
|
748
|
+
newClusterCenters.append(self.extremePeriods[periodType]["profile"])
|
|
749
|
+
newClusterOrder[self.extremePeriods[periodType]["stepNo"]] = i + len(
|
|
750
|
+
clusterCenters
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
elif extremePeriodMethod == "new_cluster_center":
|
|
754
|
+
for i, cluster_center in enumerate(clusterCenters):
|
|
755
|
+
newClusterCenters.append(cluster_center)
|
|
756
|
+
# attach extrem periods to cluster centers and consider for all periods
|
|
757
|
+
# if the fit better to the cluster or the extrem period
|
|
758
|
+
for i, periodType in enumerate(self.extremePeriods):
|
|
759
|
+
extremeClusterIdx.append(len(newClusterCenters))
|
|
760
|
+
newClusterCenters.append(self.extremePeriods[periodType]["profile"])
|
|
761
|
+
self.extremePeriods[periodType]["newClusterNo"] = i + len(
|
|
762
|
+
clusterCenters
|
|
763
|
+
)
|
|
764
|
+
|
|
765
|
+
for i, cPeriod in enumerate(newClusterOrder):
|
|
766
|
+
# caclulate euclidean distance to cluster center
|
|
767
|
+
cluster_dist = sum(
|
|
768
|
+
(groupedSeries.iloc[i].values - clusterCenters[cPeriod]) ** 2
|
|
769
|
+
)
|
|
770
|
+
for ii, extremPeriodType in enumerate(self.extremePeriods):
|
|
771
|
+
# exclude other extreme periods from adding to the new
|
|
772
|
+
# cluster center
|
|
773
|
+
isOtherExtreme = False
|
|
774
|
+
for otherExPeriod in self.extremePeriods:
|
|
775
|
+
if (
|
|
776
|
+
i == self.extremePeriods[otherExPeriod]["stepNo"]
|
|
777
|
+
and otherExPeriod != extremPeriodType
|
|
778
|
+
):
|
|
779
|
+
isOtherExtreme = True
|
|
780
|
+
# calculate distance to extreme periods
|
|
781
|
+
extperiod_dist = sum(
|
|
782
|
+
(
|
|
783
|
+
groupedSeries.iloc[i].values
|
|
784
|
+
- self.extremePeriods[extremPeriodType]["profile"]
|
|
785
|
+
)
|
|
786
|
+
** 2
|
|
787
|
+
)
|
|
788
|
+
# choose new cluster relation
|
|
789
|
+
if extperiod_dist < cluster_dist and not isOtherExtreme:
|
|
790
|
+
newClusterOrder[i] = self.extremePeriods[extremPeriodType][
|
|
791
|
+
"newClusterNo"
|
|
792
|
+
]
|
|
793
|
+
|
|
794
|
+
elif extremePeriodMethod == "replace_cluster_center":
|
|
795
|
+
# Worst Case Clusterperiods
|
|
796
|
+
newClusterCenters = clusterCenters
|
|
797
|
+
for periodType in self.extremePeriods:
|
|
798
|
+
index = groupedSeries.columns.get_loc(
|
|
799
|
+
self.extremePeriods[periodType]["column"]
|
|
800
|
+
)
|
|
801
|
+
newClusterCenters[self.extremePeriods[periodType]["clusterNo"]][
|
|
802
|
+
index
|
|
803
|
+
] = self.extremePeriods[periodType]["profile"][index]
|
|
804
|
+
if (
|
|
805
|
+
not self.extremePeriods[periodType]["clusterNo"]
|
|
806
|
+
in extremeClusterIdx
|
|
807
|
+
):
|
|
808
|
+
extremeClusterIdx.append(
|
|
809
|
+
self.extremePeriods[periodType]["clusterNo"]
|
|
810
|
+
)
|
|
811
|
+
|
|
812
|
+
return newClusterCenters, newClusterOrder, extremeClusterIdx
|
|
813
|
+
|
|
814
|
+
def _append_col_with(self, column, append_with=" max."):
|
|
815
|
+
"""Appends a string to the column name. For MultiIndexes, which turn out to be
|
|
816
|
+
tuples when this method is called, only last level is changed"""
|
|
817
|
+
if isinstance(column, str):
|
|
818
|
+
return column + append_with
|
|
819
|
+
elif isinstance(column, tuple):
|
|
820
|
+
col = list(column)
|
|
821
|
+
col[-1] = col[-1] + append_with
|
|
822
|
+
return tuple(col)
|
|
823
|
+
|
|
824
|
+
def _rescaleClusterPeriods(self, clusterOrder, clusterPeriods, extremeClusterIdx):
|
|
825
|
+
"""
|
|
826
|
+
Rescale the values of the clustered Periods such that mean of each time
|
|
827
|
+
series in the typical Periods fits the mean value of the original time
|
|
828
|
+
series, without changing the values of the extremePeriods.
|
|
829
|
+
"""
|
|
830
|
+
weightingVec = pd.Series(self._clusterPeriodNoOccur).values
|
|
831
|
+
typicalPeriods = pd.concat([
|
|
832
|
+
pd.Series(s, index=self.normalizedPeriodlyProfiles.columns)
|
|
833
|
+
for s in self.clusterPeriods
|
|
834
|
+
], axis=1).T
|
|
835
|
+
idx_wo_peak = np.delete(typicalPeriods.index, extremeClusterIdx)
|
|
836
|
+
for column in self.timeSeries.columns:
|
|
837
|
+
diff = 1
|
|
838
|
+
sum_raw = self.normalizedPeriodlyProfiles[column].sum().sum()
|
|
839
|
+
sum_peak = np.sum(
|
|
840
|
+
weightingVec[extremeClusterIdx]
|
|
841
|
+
* typicalPeriods[column].loc[extremeClusterIdx, :].sum(axis=1)
|
|
842
|
+
)
|
|
843
|
+
sum_clu_wo_peak = np.sum(
|
|
844
|
+
weightingVec[idx_wo_peak]
|
|
845
|
+
* typicalPeriods[column].loc[idx_wo_peak, :].sum(axis=1)
|
|
846
|
+
)
|
|
847
|
+
|
|
848
|
+
# define the upper scale dependent on the weighting of the series
|
|
849
|
+
scale_ub = 1.0
|
|
850
|
+
if self.sameMean:
|
|
851
|
+
scale_ub = (
|
|
852
|
+
scale_ub
|
|
853
|
+
* self.timeSeries[column].max()
|
|
854
|
+
/ self.timeSeries[column].mean()
|
|
855
|
+
)
|
|
856
|
+
if column in self.weightDict:
|
|
857
|
+
scale_ub = scale_ub * self.weightDict[column]
|
|
858
|
+
|
|
859
|
+
# difference between predicted and original sum
|
|
860
|
+
diff = abs(sum_raw - (sum_clu_wo_peak + sum_peak))
|
|
861
|
+
|
|
862
|
+
# use while loop to rescale cluster periods
|
|
863
|
+
a = 0
|
|
864
|
+
while diff > sum_raw * TOLERANCE and a < MAX_ITERATOR:
|
|
865
|
+
# rescale values
|
|
866
|
+
typicalPeriods.loc[idx_wo_peak, column] = (
|
|
867
|
+
typicalPeriods[column].loc[idx_wo_peak, :].values
|
|
868
|
+
* (sum_raw - sum_peak)
|
|
869
|
+
/ sum_clu_wo_peak
|
|
870
|
+
)
|
|
871
|
+
|
|
872
|
+
# reset values higher than the upper sacle or less than zero
|
|
873
|
+
typicalPeriods[column].clip(lower=0, upper=scale_ub, inplace=True)
|
|
874
|
+
|
|
875
|
+
typicalPeriods[column].fillna(0.0, inplace=True)
|
|
876
|
+
|
|
877
|
+
# calc new sum and new diff to orig data
|
|
878
|
+
sum_clu_wo_peak = np.sum(
|
|
879
|
+
weightingVec[idx_wo_peak]
|
|
880
|
+
* typicalPeriods[column].loc[idx_wo_peak, :].sum(axis=1)
|
|
881
|
+
)
|
|
882
|
+
diff = abs(sum_raw - (sum_clu_wo_peak + sum_peak))
|
|
883
|
+
a += 1
|
|
884
|
+
if a == MAX_ITERATOR:
|
|
885
|
+
deviation = str(round((diff / sum_raw) * 100, 2))
|
|
886
|
+
warnings.warn(
|
|
887
|
+
'Max iteration number reached for "'
|
|
888
|
+
+ str(column)
|
|
889
|
+
+ '" while rescaling the cluster periods.'
|
|
890
|
+
+ " The integral of the aggregated time series deviates by: "
|
|
891
|
+
+ deviation
|
|
892
|
+
+ "%"
|
|
893
|
+
)
|
|
894
|
+
return typicalPeriods.values
|
|
895
|
+
|
|
896
|
+
def _clusterSortedPeriods(self, candidates, n_init=20):
|
|
897
|
+
"""
|
|
898
|
+
Runs the clustering algorithms for the sorted profiles within the period
|
|
899
|
+
instead of the original profiles. (Duration curve clustering)
|
|
900
|
+
"""
|
|
901
|
+
# initialize
|
|
902
|
+
normalizedSortedPeriodlyProfiles = copy.deepcopy(
|
|
903
|
+
self.normalizedPeriodlyProfiles
|
|
904
|
+
)
|
|
905
|
+
for column in self.timeSeries.columns:
|
|
906
|
+
# sort each period individually
|
|
907
|
+
df = normalizedSortedPeriodlyProfiles[column]
|
|
908
|
+
values = df.values
|
|
909
|
+
values.sort(axis=1)
|
|
910
|
+
values = values[:, ::-1]
|
|
911
|
+
normalizedSortedPeriodlyProfiles[column] = pd.DataFrame(
|
|
912
|
+
values, df.index, df.columns
|
|
913
|
+
)
|
|
914
|
+
sortedClusterValues = normalizedSortedPeriodlyProfiles.values
|
|
915
|
+
|
|
916
|
+
(
|
|
917
|
+
altClusterCenters,
|
|
918
|
+
self.clusterCenterIndices,
|
|
919
|
+
clusterOrders_C,
|
|
920
|
+
) = aggregatePeriods(
|
|
921
|
+
sortedClusterValues,
|
|
922
|
+
n_clusters=self.noTypicalPeriods,
|
|
923
|
+
n_iter=30,
|
|
924
|
+
solver=self.solver,
|
|
925
|
+
clusterMethod=self.clusterMethod,
|
|
926
|
+
representationMethod=self.representationMethod,
|
|
927
|
+
representationDict=self.representationDict,
|
|
928
|
+
distributionPeriodWise=self.distributionPeriodWise,
|
|
929
|
+
timeStepsPerPeriod=self.timeStepsPerPeriod,
|
|
930
|
+
)
|
|
931
|
+
|
|
932
|
+
clusterCenters_C = []
|
|
933
|
+
|
|
934
|
+
# take the clusters and determine the most representative sorted
|
|
935
|
+
# period as cluster center
|
|
936
|
+
for clusterNum in np.unique(clusterOrders_C):
|
|
937
|
+
indice = np.where(clusterOrders_C == clusterNum)[0]
|
|
938
|
+
if len(indice) > 1:
|
|
939
|
+
# mean value for each time step for each time series over
|
|
940
|
+
# all Periods in the cluster
|
|
941
|
+
currentMean_C = sortedClusterValues[indice].mean(axis=0)
|
|
942
|
+
# index of the period with the lowest distance to the cluster
|
|
943
|
+
# center
|
|
944
|
+
mindistIdx_C = np.argmin(
|
|
945
|
+
np.square(sortedClusterValues[indice] - currentMean_C).sum(axis=1)
|
|
946
|
+
)
|
|
947
|
+
# append original time series of this period
|
|
948
|
+
medoid_C = candidates[indice][mindistIdx_C]
|
|
949
|
+
|
|
950
|
+
# append to cluster center
|
|
951
|
+
clusterCenters_C.append(medoid_C)
|
|
952
|
+
|
|
953
|
+
else:
|
|
954
|
+
# if only on period is part of the cluster, add this index
|
|
955
|
+
clusterCenters_C.append(candidates[indice][0])
|
|
956
|
+
|
|
957
|
+
return clusterCenters_C, clusterOrders_C
|
|
958
|
+
|
|
959
|
+
def createTypicalPeriods(self):
|
|
960
|
+
"""
|
|
961
|
+
Clusters the Periods.
|
|
962
|
+
|
|
963
|
+
:returns: **self.typicalPeriods** -- All typical Periods in scaled form.
|
|
964
|
+
"""
|
|
965
|
+
self._preProcessTimeSeries()
|
|
966
|
+
|
|
967
|
+
# check for additional cluster parameters
|
|
968
|
+
if self.evalSumPeriods:
|
|
969
|
+
evaluationValues = (
|
|
970
|
+
self.normalizedPeriodlyProfiles.stack(level=0)
|
|
971
|
+
.sum(axis=1)
|
|
972
|
+
.unstack(level=1)
|
|
973
|
+
)
|
|
974
|
+
# how many values have to get deleted later
|
|
975
|
+
delClusterParams = -len(evaluationValues.columns)
|
|
976
|
+
candidates = np.concatenate(
|
|
977
|
+
(self.normalizedPeriodlyProfiles.values, evaluationValues.values),
|
|
978
|
+
axis=1,
|
|
979
|
+
)
|
|
980
|
+
else:
|
|
981
|
+
delClusterParams = None
|
|
982
|
+
candidates = self.normalizedPeriodlyProfiles.values
|
|
983
|
+
|
|
984
|
+
# skip aggregation procedure for the case of a predefined cluster sequence and get only the correct representation
|
|
985
|
+
if not self.predefClusterOrder is None:
|
|
986
|
+
self._clusterOrder = self.predefClusterOrder
|
|
987
|
+
# check if representatives are defined
|
|
988
|
+
if not self.predefClusterCenterIndices is None:
|
|
989
|
+
self.clusterCenterIndices = self.predefClusterCenterIndices
|
|
990
|
+
self.clusterCenters = candidates[self.predefClusterCenterIndices]
|
|
991
|
+
else:
|
|
992
|
+
# otherwise take the medoids
|
|
993
|
+
self.clusterCenters, self.clusterCenterIndices = representations(
|
|
994
|
+
candidates,
|
|
995
|
+
self._clusterOrder,
|
|
996
|
+
default="medoidRepresentation",
|
|
997
|
+
representationMethod=self.representationMethod,
|
|
998
|
+
representationDict=self.representationDict,
|
|
999
|
+
timeStepsPerPeriod=self.timeStepsPerPeriod,
|
|
1000
|
+
)
|
|
1001
|
+
else:
|
|
1002
|
+
cluster_duration = time.time()
|
|
1003
|
+
if not self.sortValues:
|
|
1004
|
+
# cluster the data
|
|
1005
|
+
(
|
|
1006
|
+
self.clusterCenters,
|
|
1007
|
+
self.clusterCenterIndices,
|
|
1008
|
+
self._clusterOrder,
|
|
1009
|
+
) = aggregatePeriods(
|
|
1010
|
+
candidates,
|
|
1011
|
+
n_clusters=self.noTypicalPeriods,
|
|
1012
|
+
n_iter=100,
|
|
1013
|
+
solver=self.solver,
|
|
1014
|
+
clusterMethod=self.clusterMethod,
|
|
1015
|
+
representationMethod=self.representationMethod,
|
|
1016
|
+
representationDict=self.representationDict,
|
|
1017
|
+
distributionPeriodWise=self.distributionPeriodWise,
|
|
1018
|
+
timeStepsPerPeriod=self.timeStepsPerPeriod,
|
|
1019
|
+
)
|
|
1020
|
+
else:
|
|
1021
|
+
self.clusterCenters, self._clusterOrder = self._clusterSortedPeriods(
|
|
1022
|
+
candidates
|
|
1023
|
+
)
|
|
1024
|
+
self.clusteringDuration = time.time() - cluster_duration
|
|
1025
|
+
|
|
1026
|
+
# get cluster centers without additional evaluation values
|
|
1027
|
+
self.clusterPeriods = []
|
|
1028
|
+
for i, cluster_center in enumerate(self.clusterCenters):
|
|
1029
|
+
self.clusterPeriods.append(cluster_center[:delClusterParams])
|
|
1030
|
+
|
|
1031
|
+
if not self.extremePeriodMethod == "None":
|
|
1032
|
+
# overwrite clusterPeriods and clusterOrder
|
|
1033
|
+
(
|
|
1034
|
+
self.clusterPeriods,
|
|
1035
|
+
self._clusterOrder,
|
|
1036
|
+
self.extremeClusterIdx,
|
|
1037
|
+
) = self._addExtremePeriods(
|
|
1038
|
+
self.normalizedPeriodlyProfiles,
|
|
1039
|
+
self.clusterPeriods,
|
|
1040
|
+
self._clusterOrder,
|
|
1041
|
+
extremePeriodMethod=self.extremePeriodMethod,
|
|
1042
|
+
addPeakMin=self.addPeakMin,
|
|
1043
|
+
addPeakMax=self.addPeakMax,
|
|
1044
|
+
addMeanMin=self.addMeanMin,
|
|
1045
|
+
addMeanMax=self.addMeanMax,
|
|
1046
|
+
)
|
|
1047
|
+
else:
|
|
1048
|
+
self.extremeClusterIdx = []
|
|
1049
|
+
|
|
1050
|
+
# get number of appearance of the the typical periods
|
|
1051
|
+
nums, counts = np.unique(self._clusterOrder, return_counts=True)
|
|
1052
|
+
self._clusterPeriodNoOccur = {num: counts[ii] for ii, num in enumerate(nums)}
|
|
1053
|
+
|
|
1054
|
+
if self.rescaleClusterPeriods:
|
|
1055
|
+
self.clusterPeriods = self._rescaleClusterPeriods(
|
|
1056
|
+
self._clusterOrder, self.clusterPeriods, self.extremeClusterIdx
|
|
1057
|
+
)
|
|
1058
|
+
|
|
1059
|
+
# if additional time steps have been added, reduce the number of occurrence of the typical period
|
|
1060
|
+
# which is related to these time steps
|
|
1061
|
+
if not len(self.timeSeries) % self.timeStepsPerPeriod == 0:
|
|
1062
|
+
self._clusterPeriodNoOccur[self._clusterOrder[-1]] -= (
|
|
1063
|
+
1
|
|
1064
|
+
- float(len(self.timeSeries) % self.timeStepsPerPeriod)
|
|
1065
|
+
/ self.timeStepsPerPeriod
|
|
1066
|
+
)
|
|
1067
|
+
|
|
1068
|
+
# put the clustered data in pandas format and scale back
|
|
1069
|
+
self.normalizedTypicalPeriods = pd.concat([
|
|
1070
|
+
pd.Series(s, index=self.normalizedPeriodlyProfiles.columns)
|
|
1071
|
+
for s in self.clusterPeriods
|
|
1072
|
+
], axis=1).unstack("TimeStep").T
|
|
1073
|
+
|
|
1074
|
+
if self.segmentation:
|
|
1075
|
+
from tsam.utils.segmentation import segmentation
|
|
1076
|
+
|
|
1077
|
+
(
|
|
1078
|
+
self.segmentedNormalizedTypicalPeriods,
|
|
1079
|
+
self.predictedSegmentedNormalizedTypicalPeriods,
|
|
1080
|
+
) = segmentation(
|
|
1081
|
+
self.normalizedTypicalPeriods,
|
|
1082
|
+
self.noSegments,
|
|
1083
|
+
self.timeStepsPerPeriod,
|
|
1084
|
+
representationMethod=self.segmentRepresentationMethod,
|
|
1085
|
+
representationDict=self.representationDict,
|
|
1086
|
+
distributionPeriodWise=self.distributionPeriodWise,
|
|
1087
|
+
)
|
|
1088
|
+
self.normalizedTypicalPeriods = (
|
|
1089
|
+
self.segmentedNormalizedTypicalPeriods.reset_index(level=3, drop=True)
|
|
1090
|
+
)
|
|
1091
|
+
|
|
1092
|
+
self.typicalPeriods = self._postProcessTimeSeries(self.normalizedTypicalPeriods)
|
|
1093
|
+
|
|
1094
|
+
# check if original time series boundaries are not exceeded
|
|
1095
|
+
if np.array(
|
|
1096
|
+
self.typicalPeriods.max(axis=0) > self.timeSeries.max(axis=0)
|
|
1097
|
+
).any():
|
|
1098
|
+
warning_list = self.typicalPeriods.max(axis=0) < self.timeSeries.max(axis=0)
|
|
1099
|
+
warnings.warn(
|
|
1100
|
+
"Something went wrong... At least one maximal value of the " +
|
|
1101
|
+
"aggregated time series exceeds the maximal value " +
|
|
1102
|
+
"the input time series for: " +
|
|
1103
|
+
"{}".format(list(warning_list[warning_list>0].index))
|
|
1104
|
+
)
|
|
1105
|
+
if np.array(
|
|
1106
|
+
self.typicalPeriods.min(axis=0) < self.timeSeries.min(axis=0)
|
|
1107
|
+
).any():
|
|
1108
|
+
warning_list = self.typicalPeriods.min(axis=0) < self.timeSeries.min(axis=0)
|
|
1109
|
+
warnings.warn(
|
|
1110
|
+
"Something went wrong... At least one minimal value of the " +
|
|
1111
|
+
"aggregated time series exceeds the minimal value " +
|
|
1112
|
+
"the input time series for: " +
|
|
1113
|
+
"{}".format(list(warning_list[warning_list>0].index))
|
|
1114
|
+
)
|
|
1115
|
+
return self.typicalPeriods
|
|
1116
|
+
|
|
1117
|
+
def prepareEnersysInput(self):
|
|
1118
|
+
"""
|
|
1119
|
+
Creates all dictionaries and lists which are required for the energy system
|
|
1120
|
+
optimization input.
|
|
1121
|
+
"""
|
|
1122
|
+
warnings.warn(
|
|
1123
|
+
'"prepareEnersysInput" is deprecated, since the created attributes can be directly accessed as properties',
|
|
1124
|
+
DeprecationWarning,
|
|
1125
|
+
)
|
|
1126
|
+
return
|
|
1127
|
+
|
|
1128
|
+
@property
|
|
1129
|
+
def stepIdx(self):
|
|
1130
|
+
"""
|
|
1131
|
+
Index inside a single cluster
|
|
1132
|
+
"""
|
|
1133
|
+
if self.segmentation:
|
|
1134
|
+
return [ix for ix in range(0, self.noSegments)]
|
|
1135
|
+
else:
|
|
1136
|
+
return [ix for ix in range(0, self.timeStepsPerPeriod)]
|
|
1137
|
+
|
|
1138
|
+
@property
|
|
1139
|
+
def clusterPeriodIdx(self):
|
|
1140
|
+
"""
|
|
1141
|
+
Index of the clustered periods
|
|
1142
|
+
"""
|
|
1143
|
+
if not hasattr(self, "clusterOrder"):
|
|
1144
|
+
self.createTypicalPeriods()
|
|
1145
|
+
return np.sort(np.unique(self._clusterOrder))
|
|
1146
|
+
|
|
1147
|
+
@property
|
|
1148
|
+
def clusterOrder(self):
|
|
1149
|
+
"""
|
|
1150
|
+
The sequence/order of the typical period to represent
|
|
1151
|
+
the original time series
|
|
1152
|
+
"""
|
|
1153
|
+
if not hasattr(self, "_clusterOrder"):
|
|
1154
|
+
self.createTypicalPeriods()
|
|
1155
|
+
return self._clusterOrder
|
|
1156
|
+
|
|
1157
|
+
@property
|
|
1158
|
+
def clusterPeriodNoOccur(self):
|
|
1159
|
+
"""
|
|
1160
|
+
How often does a typical period occur in the original time series
|
|
1161
|
+
"""
|
|
1162
|
+
if not hasattr(self, "clusterOrder"):
|
|
1163
|
+
self.createTypicalPeriods()
|
|
1164
|
+
return self._clusterPeriodNoOccur
|
|
1165
|
+
|
|
1166
|
+
@property
|
|
1167
|
+
def clusterPeriodDict(self):
|
|
1168
|
+
"""
|
|
1169
|
+
Time series data for each period index as dictionary
|
|
1170
|
+
"""
|
|
1171
|
+
if not hasattr(self, "_clusterOrder"):
|
|
1172
|
+
self.createTypicalPeriods()
|
|
1173
|
+
if not hasattr(self, "_clusterPeriodDict"):
|
|
1174
|
+
self._clusterPeriodDict = {}
|
|
1175
|
+
for column in self.typicalPeriods:
|
|
1176
|
+
self._clusterPeriodDict[column] = self.typicalPeriods[column].to_dict()
|
|
1177
|
+
return self._clusterPeriodDict
|
|
1178
|
+
|
|
1179
|
+
@property
|
|
1180
|
+
def segmentDurationDict(self):
|
|
1181
|
+
"""
|
|
1182
|
+
Segment duration in time steps for each period index as dictionary
|
|
1183
|
+
"""
|
|
1184
|
+
if not hasattr(self, "_clusterOrder"):
|
|
1185
|
+
self.createTypicalPeriods()
|
|
1186
|
+
if not hasattr(self, "_segmentDurationDict"):
|
|
1187
|
+
if self.segmentation:
|
|
1188
|
+
self._segmentDurationDict = (
|
|
1189
|
+
self.segmentedNormalizedTypicalPeriods.drop(
|
|
1190
|
+
self.segmentedNormalizedTypicalPeriods.columns, axis=1
|
|
1191
|
+
)
|
|
1192
|
+
.reset_index(level=3, drop=True)
|
|
1193
|
+
.reset_index(2)
|
|
1194
|
+
.to_dict()
|
|
1195
|
+
)
|
|
1196
|
+
else:
|
|
1197
|
+
self._segmentDurationDict = self.typicalPeriods.drop(
|
|
1198
|
+
self.typicalPeriods.columns, axis=1
|
|
1199
|
+
)
|
|
1200
|
+
self._segmentDurationDict["Segment Duration"] = 1
|
|
1201
|
+
self._segmentDurationDict = self._segmentDurationDict.to_dict()
|
|
1202
|
+
warnings.warn(
|
|
1203
|
+
"Segmentation is turned off. All segments are consistent the time steps."
|
|
1204
|
+
)
|
|
1205
|
+
return self._segmentDurationDict
|
|
1206
|
+
|
|
1207
|
+
def predictOriginalData(self):
|
|
1208
|
+
"""
|
|
1209
|
+
Predicts the overall time series if every period would be placed in the
|
|
1210
|
+
related cluster center
|
|
1211
|
+
|
|
1212
|
+
:returns: **predictedData** (pandas.DataFrame) -- DataFrame which has the same shape as the original one.
|
|
1213
|
+
"""
|
|
1214
|
+
if not hasattr(self, "_clusterOrder"):
|
|
1215
|
+
self.createTypicalPeriods()
|
|
1216
|
+
|
|
1217
|
+
# list up typical periods according to their order of occurrence using the _clusterOrder.
|
|
1218
|
+
new_data = []
|
|
1219
|
+
for label in self._clusterOrder:
|
|
1220
|
+
# if segmentation is used, use the segmented typical periods with predicted time steps with the same number
|
|
1221
|
+
# of time steps as unsegmented typical periods
|
|
1222
|
+
if self.segmentation:
|
|
1223
|
+
new_data.append(
|
|
1224
|
+
self.predictedSegmentedNormalizedTypicalPeriods.loc[label, :]
|
|
1225
|
+
.unstack()
|
|
1226
|
+
.values
|
|
1227
|
+
)
|
|
1228
|
+
else:
|
|
1229
|
+
# new_data.append(self.clusterPeriods[label])
|
|
1230
|
+
new_data.append(
|
|
1231
|
+
self.normalizedTypicalPeriods.loc[label, :].unstack().values
|
|
1232
|
+
)
|
|
1233
|
+
|
|
1234
|
+
# back in matrix
|
|
1235
|
+
clustered_data_df = pd.DataFrame(
|
|
1236
|
+
new_data,
|
|
1237
|
+
columns=self.normalizedPeriodlyProfiles.columns,
|
|
1238
|
+
index=self.normalizedPeriodlyProfiles.index,
|
|
1239
|
+
)
|
|
1240
|
+
clustered_data_df = clustered_data_df.stack(level="TimeStep")
|
|
1241
|
+
|
|
1242
|
+
# back in form
|
|
1243
|
+
self.normalizedPredictedData = pd.DataFrame(
|
|
1244
|
+
clustered_data_df.values[: len(self.timeSeries)],
|
|
1245
|
+
index=self.timeSeries.index,
|
|
1246
|
+
columns=self.timeSeries.columns,
|
|
1247
|
+
)
|
|
1248
|
+
# normalize again if sameMean = True to avoid doubled unnormalization when using _postProcessTimeSeries after
|
|
1249
|
+
# createTypicalPeriods has been called
|
|
1250
|
+
if self.sameMean:
|
|
1251
|
+
self.normalizedPredictedData /= self._normalizedMean
|
|
1252
|
+
self.predictedData = self._postProcessTimeSeries(
|
|
1253
|
+
self.normalizedPredictedData, applyWeighting=False
|
|
1254
|
+
)
|
|
1255
|
+
|
|
1256
|
+
return self.predictedData
|
|
1257
|
+
|
|
1258
|
+
def indexMatching(self):
|
|
1259
|
+
"""
|
|
1260
|
+
Relates the index of the original time series with the indices
|
|
1261
|
+
represented by the clusters
|
|
1262
|
+
|
|
1263
|
+
:returns: **timeStepMatching** (pandas.DataFrame) -- DataFrame which has the same shape as the original one.
|
|
1264
|
+
"""
|
|
1265
|
+
if not hasattr(self, "_clusterOrder"):
|
|
1266
|
+
self.createTypicalPeriods()
|
|
1267
|
+
|
|
1268
|
+
# create aggregated period and time step index lists
|
|
1269
|
+
periodIndex = []
|
|
1270
|
+
stepIndex = []
|
|
1271
|
+
for label in self._clusterOrder:
|
|
1272
|
+
for step in range(self.timeStepsPerPeriod):
|
|
1273
|
+
periodIndex.append(label)
|
|
1274
|
+
stepIndex.append(step)
|
|
1275
|
+
|
|
1276
|
+
# create a dataframe
|
|
1277
|
+
timeStepMatching = pd.DataFrame(
|
|
1278
|
+
[periodIndex, stepIndex],
|
|
1279
|
+
index=["PeriodNum", "TimeStep"],
|
|
1280
|
+
columns=self.timeIndex,
|
|
1281
|
+
).T
|
|
1282
|
+
|
|
1283
|
+
# if segmentation is chosen, append another column stating which
|
|
1284
|
+
if self.segmentation:
|
|
1285
|
+
segmentIndex = []
|
|
1286
|
+
for label in self._clusterOrder:
|
|
1287
|
+
segmentIndex.extend(
|
|
1288
|
+
np.repeat(
|
|
1289
|
+
self.segmentedNormalizedTypicalPeriods.loc[
|
|
1290
|
+
label, :
|
|
1291
|
+
].index.get_level_values(0),
|
|
1292
|
+
self.segmentedNormalizedTypicalPeriods.loc[
|
|
1293
|
+
label, :
|
|
1294
|
+
].index.get_level_values(1),
|
|
1295
|
+
).values
|
|
1296
|
+
)
|
|
1297
|
+
timeStepMatching = pd.DataFrame(
|
|
1298
|
+
[periodIndex, stepIndex, segmentIndex],
|
|
1299
|
+
index=["PeriodNum", "TimeStep", "SegmentIndex"],
|
|
1300
|
+
columns=self.timeIndex,
|
|
1301
|
+
).T
|
|
1302
|
+
|
|
1303
|
+
return timeStepMatching
|
|
1304
|
+
|
|
1305
|
+
def accuracyIndicators(self):
|
|
1306
|
+
"""
|
|
1307
|
+
Compares the predicted data with the original time series.
|
|
1308
|
+
|
|
1309
|
+
:returns: **pd.DataFrame(indicatorRaw)** (pandas.DataFrame) -- Dataframe containing indicators evaluating the
|
|
1310
|
+
accuracy of the
|
|
1311
|
+
aggregation
|
|
1312
|
+
"""
|
|
1313
|
+
if not hasattr(self, "predictedData"):
|
|
1314
|
+
self.predictOriginalData()
|
|
1315
|
+
|
|
1316
|
+
indicatorRaw = {
|
|
1317
|
+
"RMSE": {},
|
|
1318
|
+
"RMSE_duration": {},
|
|
1319
|
+
"MAE": {},
|
|
1320
|
+
} # 'Silhouette score':{},
|
|
1321
|
+
|
|
1322
|
+
for column in self.normalizedTimeSeries.columns:
|
|
1323
|
+
if self.weightDict:
|
|
1324
|
+
origTS = self.normalizedTimeSeries[column] / self.weightDict[column]
|
|
1325
|
+
else:
|
|
1326
|
+
origTS = self.normalizedTimeSeries[column]
|
|
1327
|
+
predTS = self.normalizedPredictedData[column]
|
|
1328
|
+
indicatorRaw["RMSE"][column] = np.sqrt(mean_squared_error(origTS, predTS))
|
|
1329
|
+
indicatorRaw["RMSE_duration"][column] = np.sqrt(
|
|
1330
|
+
mean_squared_error(
|
|
1331
|
+
origTS.sort_values(ascending=False).reset_index(drop=True),
|
|
1332
|
+
predTS.sort_values(ascending=False).reset_index(drop=True),
|
|
1333
|
+
)
|
|
1334
|
+
)
|
|
1335
|
+
indicatorRaw["MAE"][column] = mean_absolute_error(origTS, predTS)
|
|
1336
|
+
|
|
1337
|
+
return pd.DataFrame(indicatorRaw)
|
|
1338
|
+
|
|
1339
|
+
def totalAccuracyIndicators(self):
|
|
1340
|
+
"""
|
|
1341
|
+
Derives the accuracy indicators over all time series
|
|
1342
|
+
"""
|
|
1343
|
+
return np.sqrt(self.accuracyIndicators().pow(2).sum()/len(self.normalizedTimeSeries.columns))
|