mttf 0.31.202309070119__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mttf might be problematic. Click here for more details.

@@ -1,889 +0,0 @@
1
- # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
- # pylint: disable=protected-access
16
- """Functions for saving and loading a Keras Model from HDF5 format. Patched by MT. Only for TF 2 but `< 2.4.0`.
17
- """
18
- from __future__ import absolute_import
19
- from __future__ import division
20
- from __future__ import print_function
21
-
22
- import json
23
- import os
24
-
25
- import numpy as np
26
- from six.moves import zip # pylint: disable=redefined-builtin
27
-
28
- from tensorflow.python.keras import backend as K
29
- from tensorflow.python.keras import optimizers
30
- from tensorflow.python.keras.saving import model_config as model_config_lib
31
- from tensorflow.python.keras.saving import saving_utils
32
- from tensorflow.python.keras.utils import conv_utils
33
- from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
34
- from tensorflow.python.ops import variables as variables_module
35
- from tensorflow.python.platform import tf_logging as logging
36
- from tensorflow.python.util import serialization
37
- from tensorflow.python.util.lazy_loader import LazyLoader
38
-
39
- # pylint: disable=g-import-not-at-top
40
- try:
41
- import h5py
42
- HDF5_OBJECT_HEADER_LIMIT = 64512
43
- except ImportError:
44
- h5py = None
45
- # pylint: enable=g-import-not-at-top
46
-
47
- # TODO(b/134426265): Switch back to single-quotes to match the rest of the file
48
- # once the issue with copybara is fixed.
49
- # pylint:disable=g-inconsistent-quotes
50
- sequential_lib = LazyLoader(
51
- "sequential_lib", globals(),
52
- "tensorflow.python.keras.engine.sequential")
53
- # pylint:enable=g-inconsistent-quotes
54
-
55
-
56
- def decoded(x):
57
- return x.decode('utf8') if isinstance(x, bytes) else x
58
-
59
-
60
- def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):
61
- """Saves a model to a HDF5 file.
62
-
63
- The saved model contains:
64
- - the model's configuration (topology)
65
- - the model's weights
66
- - the model's optimizer's state (if any)
67
-
68
- Thus the saved model can be reinstantiated in
69
- the exact same state, without any of the code
70
- used for model definition or training.
71
-
72
- Arguments:
73
- model: Keras model instance to be saved.
74
- filepath: One of the following:
75
- - String, path where to save the model
76
- - `h5py.File` object where to save the model
77
- overwrite: Whether we should overwrite any existing
78
- model at the target location, or instead
79
- ask the user with a manual prompt.
80
- include_optimizer: If True, save optimizer's state together.
81
-
82
- Raises:
83
- ImportError: if h5py is not available.
84
- """
85
-
86
- if h5py is None:
87
- raise ImportError('`save_model` requires h5py.')
88
-
89
- # TODO(psv) Add warning when we save models that contain non-serializable
90
- # entities like metrics added using `add_metric` and losses added using
91
- # `add_loss.`
92
- if len(model.weights) != len(model._undeduplicated_weights):
93
- logging.warning('Found duplicated `Variable`s in Model\'s `weights`. '
94
- 'This is usually caused by `Variable`s being shared by '
95
- 'Layers in the Model. These `Variable`s will be treated '
96
- 'as separate `Variable`s when the Model is restored. To '
97
- 'avoid this, please save with `save_format="tf"`.')
98
-
99
- if not isinstance(filepath, h5py.File):
100
- # If file exists and should not be overwritten.
101
- if not overwrite and os.path.isfile(filepath):
102
- proceed = ask_to_proceed_with_overwrite(filepath)
103
- if not proceed:
104
- return
105
-
106
- f = h5py.File(filepath, mode='w')
107
- opened_new_file = True
108
- else:
109
- f = filepath
110
- opened_new_file = False
111
-
112
- try:
113
- model_metadata = saving_utils.model_metadata(model, include_optimizer)
114
- for k, v in model_metadata.items():
115
- if isinstance(v, (dict, list, tuple)):
116
- f.attrs[k] = json.dumps(
117
- v, default=serialization.get_json_type).encode('utf8')
118
- else:
119
- f.attrs[k] = v
120
-
121
- model_weights_group = f.create_group('model_weights')
122
- model_layers = model.layers
123
- save_weights_to_hdf5_group(model_weights_group, model_layers)
124
-
125
- # TODO(b/128683857): Add integration tests between tf.keras and external
126
- # Keras, to avoid breaking TF.js users.
127
- if (include_optimizer and model.optimizer and
128
- not isinstance(model.optimizer, optimizers.TFOptimizer)):
129
- save_optimizer_weights_to_hdf5_group(f, model.optimizer)
130
-
131
- f.flush()
132
- finally:
133
- if opened_new_file:
134
- f.close()
135
-
136
-
137
- def load_model_from_hdf5(filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin
138
- """Loads a model saved via `save_model_to_hdf5`.
139
-
140
- Arguments:
141
- filepath: One of the following:
142
- - String, path to the saved model
143
- - `h5py.File` object from which to load the model
144
- custom_objects: Optional dictionary mapping names
145
- (strings) to custom classes or functions to be
146
- considered during deserialization.
147
- compile: Boolean, whether to compile the model
148
- after loading.
149
-
150
- Returns:
151
- A Keras model instance. If an optimizer was found
152
- as part of the saved model, the model is already
153
- compiled. Otherwise, the model is uncompiled and
154
- a warning will be displayed. When `compile` is set
155
- to False, the compilation is omitted without any
156
- warning.
157
-
158
- Raises:
159
- ImportError: if h5py is not available.
160
- ValueError: In case of an invalid savefile.
161
- """
162
- if h5py is None:
163
- raise ImportError('`load_model` requires h5py.')
164
-
165
- if not custom_objects:
166
- custom_objects = {}
167
-
168
- opened_new_file = not isinstance(filepath, h5py.File)
169
- if opened_new_file:
170
- f = h5py.File(filepath, mode='r')
171
- else:
172
- f = filepath
173
-
174
- model = None
175
- try:
176
- # instantiate model
177
- model_config = f.attrs.get('model_config')
178
- if model_config is None:
179
- raise ValueError('No model found in config file.')
180
- model_config = json.loads(decoded(model_config))
181
- model = model_config_lib.model_from_config(model_config,
182
- custom_objects=custom_objects)
183
-
184
- # set weights
185
- load_weights_from_hdf5_group(f['model_weights'], model.layers)
186
-
187
- if compile:
188
- # instantiate optimizer
189
- training_config = f.attrs.get('training_config')
190
- if training_config is None:
191
- logging.warning('No training configuration found in the save file, so '
192
- 'the model was *not* compiled. Compile it manually.')
193
- return model
194
- training_config = json.loads(decoded(training_config))
195
-
196
- # Compile model.
197
- model.compile(**saving_utils.compile_args_from_training_config(
198
- training_config, custom_objects))
199
- saving_utils.try_build_compiled_arguments(model)
200
-
201
- # Set optimizer weights.
202
- if 'optimizer_weights' in f:
203
- try:
204
- model.optimizer._create_all_weights(model.trainable_variables)
205
- except (NotImplementedError, AttributeError):
206
- logging.warning(
207
- 'Error when creating the weights of optimizer {}, making it '
208
- 'impossible to restore the saved optimizer state. As a result, '
209
- 'your model is starting with a freshly initialized optimizer.')
210
-
211
- optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f)
212
- try:
213
- model.optimizer.set_weights(optimizer_weight_values)
214
- except ValueError:
215
- logging.warning('Error in loading the saved optimizer '
216
- 'state. As a result, your model is '
217
- 'starting with a freshly initialized '
218
- 'optimizer.')
219
- finally:
220
- if opened_new_file:
221
- f.close()
222
- return model
223
-
224
-
225
- def preprocess_weights_for_loading(layer,
226
- weights,
227
- original_keras_version=None,
228
- original_backend=None):
229
- """Preprocess layer weights between different Keras formats.
230
-
231
- Converts layers weights from Keras 1 format to Keras 2 and also weights of
232
- CuDNN layers in Keras 2.
233
-
234
- Arguments:
235
- layer: Layer instance.
236
- weights: List of weights values (Numpy arrays).
237
- original_keras_version: Keras version for the weights, as a string.
238
- original_backend: Keras backend the weights were trained with,
239
- as a string.
240
-
241
- Returns:
242
- A list of weights values (Numpy arrays).
243
- """
244
- def convert_nested_bidirectional(weights):
245
- """Converts layers nested in `Bidirectional` wrapper.
246
-
247
- This function uses `preprocess_weights_for_loading()` for converting
248
- layers.
249
-
250
- Arguments:
251
- weights: List of weights values (Numpy arrays).
252
-
253
- Returns:
254
- A list of weights values (Numpy arrays).
255
- """
256
- num_weights_per_layer = len(weights) // 2
257
- forward_weights = preprocess_weights_for_loading(
258
- layer.forward_layer, weights[:num_weights_per_layer],
259
- original_keras_version, original_backend)
260
- backward_weights = preprocess_weights_for_loading(
261
- layer.backward_layer, weights[num_weights_per_layer:],
262
- original_keras_version, original_backend)
263
- return forward_weights + backward_weights
264
-
265
- def convert_nested_time_distributed(weights):
266
- """Converts layers nested in `TimeDistributed` wrapper.
267
-
268
- This function uses `preprocess_weights_for_loading()` for converting nested
269
- layers.
270
-
271
- Arguments:
272
- weights: List of weights values (Numpy arrays).
273
-
274
- Returns:
275
- A list of weights values (Numpy arrays).
276
- """
277
- return preprocess_weights_for_loading(
278
- layer.layer, weights, original_keras_version, original_backend)
279
-
280
- def convert_nested_model(weights):
281
- """Converts layers nested in `Model` or `Sequential`.
282
-
283
- This function uses `preprocess_weights_for_loading()` for converting nested
284
- layers.
285
-
286
- Arguments:
287
- weights: List of weights values (Numpy arrays).
288
-
289
- Returns:
290
- A list of weights values (Numpy arrays).
291
- """
292
- trainable_weights = weights[:len(layer.trainable_weights)]
293
- non_trainable_weights = weights[len(layer.trainable_weights):]
294
-
295
- new_trainable_weights = []
296
- new_non_trainable_weights = []
297
-
298
- for sublayer in layer.layers:
299
- num_trainable_weights = len(sublayer.trainable_weights)
300
- num_non_trainable_weights = len(sublayer.non_trainable_weights)
301
- if sublayer.weights:
302
- preprocessed = preprocess_weights_for_loading(
303
- layer=sublayer,
304
- weights=(trainable_weights[:num_trainable_weights] +
305
- non_trainable_weights[:num_non_trainable_weights]),
306
- original_keras_version=original_keras_version,
307
- original_backend=original_backend)
308
- new_trainable_weights.extend(preprocessed[:num_trainable_weights])
309
- new_non_trainable_weights.extend(preprocessed[num_trainable_weights:])
310
-
311
- trainable_weights = trainable_weights[num_trainable_weights:]
312
- non_trainable_weights = non_trainable_weights[
313
- num_non_trainable_weights:]
314
-
315
- return new_trainable_weights + new_non_trainable_weights
316
-
317
- # Convert layers nested in Bidirectional/Model/Sequential.
318
- # Both transformation should be ran for both Keras 1->2 conversion
319
- # and for conversion of CuDNN layers.
320
- if layer.__class__.__name__ == 'Bidirectional':
321
- weights = convert_nested_bidirectional(weights)
322
- if layer.__class__.__name__ == 'TimeDistributed':
323
- weights = convert_nested_time_distributed(weights)
324
- elif layer.__class__.__name__ in ['Model', 'Sequential']:
325
- weights = convert_nested_model(weights)
326
-
327
- if original_keras_version == '1':
328
- if layer.__class__.__name__ == 'TimeDistributed':
329
- weights = preprocess_weights_for_loading(
330
- layer.layer, weights, original_keras_version, original_backend)
331
-
332
- if layer.__class__.__name__ == 'Conv1D':
333
- shape = weights[0].shape
334
- # Handle Keras 1.1 format
335
- if shape[:2] != (layer.kernel_size[0], 1) or shape[3] != layer.filters:
336
- # Legacy shape:
337
- # (filters, input_dim, filter_length, 1)
338
- assert shape[0] == layer.filters and shape[2:] == (layer.kernel_size[0],
339
- 1)
340
- weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
341
- weights[0] = weights[0][:, 0, :, :]
342
-
343
- if layer.__class__.__name__ == 'Conv2D':
344
- if layer.data_format == 'channels_first':
345
- # old: (filters, stack_size, kernel_rows, kernel_cols)
346
- # new: (kernel_rows, kernel_cols, stack_size, filters)
347
- weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
348
-
349
- if layer.__class__.__name__ == 'Conv2DTranspose':
350
- if layer.data_format == 'channels_last':
351
- # old: (kernel_rows, kernel_cols, stack_size, filters)
352
- # new: (kernel_rows, kernel_cols, filters, stack_size)
353
- weights[0] = np.transpose(weights[0], (0, 1, 3, 2))
354
- if layer.data_format == 'channels_first':
355
- # old: (filters, stack_size, kernel_rows, kernel_cols)
356
- # new: (kernel_rows, kernel_cols, filters, stack_size)
357
- weights[0] = np.transpose(weights[0], (2, 3, 0, 1))
358
-
359
- if layer.__class__.__name__ == 'Conv3D':
360
- if layer.data_format == 'channels_first':
361
- # old: (filters, stack_size, ...)
362
- # new: (..., stack_size, filters)
363
- weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))
364
-
365
- if layer.__class__.__name__ == 'GRU':
366
- if len(weights) == 9:
367
- kernel = np.concatenate([weights[0], weights[3], weights[6]], axis=-1)
368
- recurrent_kernel = np.concatenate(
369
- [weights[1], weights[4], weights[7]], axis=-1)
370
- bias = np.concatenate([weights[2], weights[5], weights[8]], axis=-1)
371
- weights = [kernel, recurrent_kernel, bias]
372
-
373
- if layer.__class__.__name__ == 'LSTM':
374
- if len(weights) == 12:
375
- # old: i, c, f, o
376
- # new: i, f, c, o
377
- kernel = np.concatenate(
378
- [weights[0], weights[6], weights[3], weights[9]], axis=-1)
379
- recurrent_kernel = np.concatenate(
380
- [weights[1], weights[7], weights[4], weights[10]], axis=-1)
381
- bias = np.concatenate(
382
- [weights[2], weights[8], weights[5], weights[11]], axis=-1)
383
- weights = [kernel, recurrent_kernel, bias]
384
-
385
- if layer.__class__.__name__ == 'ConvLSTM2D':
386
- if len(weights) == 12:
387
- kernel = np.concatenate(
388
- [weights[0], weights[6], weights[3], weights[9]], axis=-1)
389
- recurrent_kernel = np.concatenate(
390
- [weights[1], weights[7], weights[4], weights[10]], axis=-1)
391
- bias = np.concatenate(
392
- [weights[2], weights[8], weights[5], weights[11]], axis=-1)
393
- if layer.data_format == 'channels_first':
394
- # old: (filters, stack_size, kernel_rows, kernel_cols)
395
- # new: (kernel_rows, kernel_cols, stack_size, filters)
396
- kernel = np.transpose(kernel, (2, 3, 1, 0))
397
- recurrent_kernel = np.transpose(recurrent_kernel, (2, 3, 1, 0))
398
- weights = [kernel, recurrent_kernel, bias]
399
-
400
- conv_layers = ['Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D']
401
- if layer.__class__.__name__ in conv_layers:
402
- if original_backend == 'theano':
403
- weights[0] = conv_utils.convert_kernel(weights[0])
404
- if layer.__class__.__name__ == 'ConvLSTM2D':
405
- weights[1] = conv_utils.convert_kernel(weights[1])
406
- if K.int_shape(layer.weights[0]) != weights[0].shape:
407
- weights[0] = np.transpose(weights[0], (3, 2, 0, 1))
408
- if layer.__class__.__name__ == 'ConvLSTM2D':
409
- weights[1] = np.transpose(weights[1], (3, 2, 0, 1))
410
-
411
- # convert CuDNN layers
412
- return _convert_rnn_weights(layer, weights)
413
-
414
-
415
- def _convert_rnn_weights(layer, weights):
416
- """Converts weights for RNN layers between native and CuDNN format.
417
-
418
- Input kernels for each gate are transposed and converted between Fortran
419
- and C layout, recurrent kernels are transposed. For LSTM biases are summed/
420
- split in half, for GRU biases are reshaped.
421
-
422
- Weights can be converted in both directions between `LSTM` and`CuDNNSLTM`
423
- and between `CuDNNGRU` and `GRU(reset_after=True)`. Default `GRU` is not
424
- compatible with `CuDNNGRU`.
425
-
426
- For missing biases in `LSTM`/`GRU` (`use_bias=False`) no conversion is made.
427
-
428
- Arguments:
429
- layer: Target layer instance.
430
- weights: List of source weights values (input kernels, recurrent
431
- kernels, [biases]) (Numpy arrays).
432
-
433
- Returns:
434
- A list of converted weights values (Numpy arrays).
435
-
436
- Raises:
437
- ValueError: for incompatible GRU layer/weights or incompatible biases
438
- """
439
-
440
- def transform_kernels(kernels, func, n_gates):
441
- """Transforms kernel for each gate separately using given function.
442
-
443
- Arguments:
444
- kernels: Stacked array of kernels for individual gates.
445
- func: Function applied to kernel of each gate.
446
- n_gates: Number of gates (4 for LSTM, 3 for GRU).
447
-
448
- Returns:
449
- Stacked array of transformed kernels.
450
- """
451
- return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)])
452
-
453
- def transpose_input(from_cudnn):
454
- """Makes a function that transforms input kernels from/to CuDNN format.
455
-
456
- It keeps the shape, but changes between the layout (Fortran/C). Eg.:
457
-
458
- ```
459
- Keras CuDNN
460
- [[0, 1, 2], <---> [[0, 2, 4],
461
- [3, 4, 5]] [1, 3, 5]]
462
- ```
463
-
464
- It can be passed to `transform_kernels()`.
465
-
466
- Arguments:
467
- from_cudnn: `True` if source weights are in CuDNN format, `False`
468
- if they're in plain Keras format.
469
-
470
- Returns:
471
- Function that converts input kernel to the other format.
472
- """
473
- order = 'F' if from_cudnn else 'C'
474
-
475
- def transform(kernel):
476
- return kernel.T.reshape(kernel.shape, order=order)
477
-
478
- return transform
479
-
480
- target_class = layer.__class__.__name__
481
-
482
- # convert the weights between CuDNNLSTM and LSTM
483
- if target_class in ['LSTM', 'CuDNNLSTM'] and len(weights) == 3:
484
- # determine if we're loading a CuDNNLSTM layer
485
- # from the number of bias weights:
486
- # CuDNNLSTM has (units * 8) weights; while LSTM has (units * 4)
487
- # if there's no bias weight in the file, skip this conversion
488
- units = weights[1].shape[0]
489
- bias_shape = weights[2].shape
490
- n_gates = 4
491
-
492
- if bias_shape == (2 * units * n_gates,):
493
- source = 'CuDNNLSTM'
494
- elif bias_shape == (units * n_gates,):
495
- source = 'LSTM'
496
- else:
497
- raise ValueError('Invalid bias shape: ' + str(bias_shape))
498
-
499
- def convert_lstm_weights(weights, from_cudnn=True):
500
- """Converts the weights between CuDNNLSTM and LSTM.
501
-
502
- Arguments:
503
- weights: Original weights.
504
- from_cudnn: Indicates whether original weights are from CuDNN layer.
505
-
506
- Returns:
507
- Updated weights compatible with LSTM.
508
- """
509
-
510
- # Transpose (and reshape) input and recurrent kernels
511
- kernels = transform_kernels(weights[0], transpose_input(from_cudnn),
512
- n_gates)
513
- recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)
514
- if from_cudnn:
515
- # merge input and recurrent biases into a single set
516
- biases = np.sum(np.split(weights[2], 2, axis=0), axis=0)
517
- else:
518
- # Split single set of biases evenly to two sets. The way of
519
- # splitting doesn't matter as long as the two sets sum is kept.
520
- biases = np.tile(0.5 * weights[2], 2)
521
- return [kernels, recurrent_kernels, biases]
522
-
523
- if source != target_class:
524
- weights = convert_lstm_weights(weights, from_cudnn=source == 'CuDNNLSTM')
525
-
526
- # convert the weights between CuDNNGRU and GRU(reset_after=True)
527
- if target_class in ['GRU', 'CuDNNGRU'] and len(weights) == 3:
528
- # We can determine the source of the weights from the shape of the bias.
529
- # If there is no bias we skip the conversion since
530
- # CuDNNGRU always has biases.
531
-
532
- units = weights[1].shape[0]
533
- bias_shape = weights[2].shape
534
- n_gates = 3
535
-
536
- def convert_gru_weights(weights, from_cudnn=True):
537
- """Converts the weights between CuDNNGRU and GRU.
538
-
539
- Arguments:
540
- weights: Original weights.
541
- from_cudnn: Indicates whether original weights are from CuDNN layer.
542
-
543
- Returns:
544
- Updated weights compatible with GRU.
545
- """
546
-
547
- kernels = transform_kernels(weights[0], transpose_input(from_cudnn),
548
- n_gates)
549
- recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)
550
- biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)
551
- return [kernels, recurrent_kernels, biases]
552
-
553
- if bias_shape == (2 * units * n_gates,):
554
- source = 'CuDNNGRU'
555
- elif bias_shape == (2, units * n_gates):
556
- source = 'GRU(reset_after=True)'
557
- elif bias_shape == (units * n_gates,):
558
- source = 'GRU(reset_after=False)'
559
- else:
560
- raise ValueError('Invalid bias shape: ' + str(bias_shape))
561
-
562
- if target_class == 'CuDNNGRU':
563
- target = 'CuDNNGRU'
564
- elif layer.reset_after:
565
- target = 'GRU(reset_after=True)'
566
- else:
567
- target = 'GRU(reset_after=False)'
568
-
569
- # only convert between different types
570
- if source != target:
571
- types = (source, target)
572
- if 'GRU(reset_after=False)' in types:
573
- raise ValueError('%s is not compatible with %s' % types)
574
- if source == 'CuDNNGRU':
575
- weights = convert_gru_weights(weights, from_cudnn=True)
576
- elif source == 'GRU(reset_after=True)':
577
- weights = convert_gru_weights(weights, from_cudnn=False)
578
-
579
- return weights
580
-
581
-
582
- def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):
583
- """Saves optimizer weights of a optimizer to a HDF5 group.
584
-
585
- Arguments:
586
- hdf5_group: HDF5 group.
587
- optimizer: optimizer instance.
588
- """
589
-
590
- symbolic_weights = getattr(optimizer, 'weights')
591
- if symbolic_weights:
592
- weights_group = hdf5_group.create_group('optimizer_weights')
593
- weight_names = [str(w.name).encode('utf8') for w in symbolic_weights]
594
- save_attributes_to_hdf5_group(weights_group, 'weight_names', weight_names)
595
- weight_values = K.batch_get_value(symbolic_weights)
596
- for name, val in zip(weight_names, weight_values):
597
- param_dset = weights_group.create_dataset(
598
- name, val.shape, dtype=val.dtype)
599
- if not val.shape:
600
- # scalar
601
- param_dset[()] = val
602
- else:
603
- param_dset[:] = val
604
-
605
-
606
- def load_optimizer_weights_from_hdf5_group(hdf5_group):
607
- """Load optimizer weights from a HDF5 group.
608
-
609
- Arguments:
610
- hdf5_group: A pointer to a HDF5 group.
611
-
612
- Returns:
613
- data: List of optimizer weight names.
614
- """
615
- weights_group = hdf5_group['optimizer_weights']
616
- optimizer_weight_names = load_attributes_from_hdf5_group(
617
- weights_group, 'weight_names')
618
- return [weights_group[weight_name] for weight_name in optimizer_weight_names]
619
-
620
-
621
- def save_weights_to_hdf5_group(f, layers):
622
- """Saves the weights of a list of layers to a HDF5 group.
623
-
624
- Arguments:
625
- f: HDF5 group.
626
- layers: List of layer instances.
627
- """
628
- from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
629
-
630
- save_attributes_to_hdf5_group(
631
- f, 'layer_names', [layer.name.encode('utf8') for layer in layers])
632
- f.attrs['backend'] = K.backend().encode('utf8')
633
- f.attrs['keras_version'] = str(keras_version).encode('utf8')
634
-
635
- # Sort model layers by layer name to ensure that group names are strictly
636
- # growing to avoid prefix issues.
637
- for layer in sorted(layers, key=lambda x: x.name):
638
- g = f.create_group(layer.name)
639
- weights = _legacy_weights(layer)
640
- weight_values = K.batch_get_value(weights)
641
- weight_names = [w.name.encode('utf8') for w in weights]
642
- save_attributes_to_hdf5_group(g, 'weight_names', weight_names)
643
- for name, val in zip(weight_names, weight_values):
644
- param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
645
- if not val.shape:
646
- # scalar
647
- param_dset[()] = val
648
- else:
649
- param_dset[:] = val
650
-
651
-
652
- def load_weights_from_hdf5_group(f, layers):
653
- """Implements topological (order-based) weight loading.
654
-
655
- Arguments:
656
- f: A pointer to a HDF5 group.
657
- layers: a list of target layers.
658
-
659
- Raises:
660
- ValueError: in case of mismatch between provided layers
661
- and weights file.
662
- """
663
- if 'keras_version' in f.attrs:
664
- original_keras_version = decoded(f.attrs['keras_version'])
665
- else:
666
- original_keras_version = '1'
667
- if 'backend' in f.attrs:
668
- original_backend = decoded(f.attrs['backend'])
669
- else:
670
- original_backend = None
671
-
672
- filtered_layers = []
673
- for layer in layers:
674
- weights = _legacy_weights(layer)
675
- if weights:
676
- filtered_layers.append(layer)
677
-
678
- layer_names = load_attributes_from_hdf5_group(f, 'layer_names')
679
- filtered_layer_names = []
680
- for name in layer_names:
681
- g = f[name]
682
- weight_names = load_attributes_from_hdf5_group(g, 'weight_names')
683
- if weight_names:
684
- filtered_layer_names.append(name)
685
- layer_names = filtered_layer_names
686
- if len(layer_names) != len(filtered_layers):
687
- raise ValueError('You are trying to load a weight file '
688
- 'containing ' + str(len(layer_names)) +
689
- ' layers into a model with ' + str(len(filtered_layers)) +
690
- ' layers.')
691
-
692
- # We batch weight value assignments in a single backend call
693
- # which provides a speedup in TensorFlow.
694
- weight_value_tuples = []
695
- for k, name in enumerate(layer_names):
696
- g = f[name]
697
- weight_names = load_attributes_from_hdf5_group(g, 'weight_names')
698
- weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]
699
- layer = filtered_layers[k]
700
- symbolic_weights = _legacy_weights(layer)
701
- weight_values = preprocess_weights_for_loading(
702
- layer, weight_values, original_keras_version, original_backend)
703
- if len(weight_values) != len(symbolic_weights):
704
- raise ValueError('Layer #' + str(k) + ' (named "' + layer.name +
705
- '" in the current model) was found to '
706
- 'correspond to layer ' + name + ' in the save file. '
707
- 'However the new layer ' + layer.name + ' expects ' +
708
- str(len(symbolic_weights)) +
709
- ' weights, but the saved weights have ' +
710
- str(len(weight_values)) + ' elements.')
711
- weight_value_tuples += zip(symbolic_weights, weight_values)
712
- K.batch_set_value(weight_value_tuples)
713
-
714
-
715
- def load_weights_from_hdf5_group_by_name(
716
- f, layers, skip_mismatch=False):
717
- """Implements name-based weight loading.
718
-
719
- (instead of topological weight loading).
720
-
721
- Layers that have no matching name are skipped.
722
-
723
- Arguments:
724
- f: A pointer to a HDF5 group.
725
- layers: a list of target layers.
726
- skip_mismatch: Boolean, whether to skip loading of layers
727
- where there is a mismatch in the number of weights,
728
- or a mismatch in the shape of the weights.
729
-
730
- Raises:
731
- ValueError: in case of mismatch between provided layers
732
- and weights file and skip_match=False.
733
- """
734
- if 'keras_version' in f.attrs:
735
- original_keras_version = decoded(f.attrs['keras_version'])
736
- else:
737
- original_keras_version = '1'
738
- if 'backend' in f.attrs:
739
- original_backend = decoded(f.attrs['backend'])
740
- else:
741
- original_backend = None
742
-
743
- # New file format.
744
- layer_names = load_attributes_from_hdf5_group(f, 'layer_names')
745
-
746
- # Reverse index of layer name to list of layers with name.
747
- index = {}
748
- for layer in layers:
749
- if layer.name:
750
- index.setdefault(layer.name, []).append(layer)
751
-
752
- # We batch weight value assignments in a single backend call
753
- # which provides a speedup in TensorFlow.
754
- weight_value_tuples = []
755
- for k, name in enumerate(layer_names):
756
- g = f[name]
757
- weight_names = load_attributes_from_hdf5_group(g, 'weight_names')
758
- weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]
759
-
760
- for layer in index.get(name, []):
761
- symbolic_weights = _legacy_weights(layer)
762
- weight_values = preprocess_weights_for_loading(
763
- layer, weight_values, original_keras_version, original_backend)
764
- if len(weight_values) != len(symbolic_weights):
765
- if skip_mismatch:
766
- logging.warning('Skipping loading of weights for '
767
- 'layer {}'.format(layer.name) + ' due to mismatch '
768
- 'in number of weights ({} vs {}).'.format(
769
- len(symbolic_weights), len(weight_values)))
770
- continue
771
- raise ValueError('Layer #' + str(k) + ' (named "' + layer.name +
772
- '") expects ' + str(len(symbolic_weights)) +
773
- ' weight(s), but the saved weights' + ' have ' +
774
- str(len(weight_values)) + ' element(s).')
775
- # Set values.
776
- for i in range(len(weight_values)):
777
- if K.int_shape(symbolic_weights[i]) != weight_values[i].shape:
778
- if skip_mismatch:
779
- logging.warning('Skipping loading of weights for '
780
- 'layer {}'.format(layer.name) + ' due to '
781
- 'mismatch in shape ({} vs {}).'.format(
782
- symbolic_weights[i].shape,
783
- weight_values[i].shape))
784
- continue
785
- raise ValueError('Layer #' + str(k) +' (named "' + layer.name +
786
- '"), weight ' + str(symbolic_weights[i]) +
787
- ' has shape {}'.format(K.int_shape(
788
- symbolic_weights[i])) +
789
- ', but the saved weight has shape ' +
790
- str(weight_values[i].shape) + '.')
791
-
792
- else:
793
- weight_value_tuples.append((symbolic_weights[i], weight_values[i]))
794
- K.batch_set_value(weight_value_tuples)
795
-
796
-
797
- def save_attributes_to_hdf5_group(group, name, data):
798
- """Saves attributes (data) of the specified name into the HDF5 group.
799
-
800
- This method deals with an inherent problem of HDF5 file which is not
801
- able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
802
-
803
- Arguments:
804
- group: A pointer to a HDF5 group.
805
- name: A name of the attributes to save.
806
- data: Attributes data to store.
807
-
808
- Raises:
809
- RuntimeError: If any single attribute is too large to be saved.
810
- """
811
- # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
812
- # because in that case even chunking the array would not make the saving
813
- # possible.
814
- bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]
815
-
816
- # Expecting this to never be true.
817
- if bad_attributes:
818
- raise RuntimeError('The following attributes cannot be saved to HDF5 '
819
- 'file because they are larger than %d bytes: %s' %
820
- (HDF5_OBJECT_HEADER_LIMIT, ', '.join(bad_attributes)))
821
-
822
- data_npy = np.asarray(data)
823
-
824
- num_chunks = 1
825
- chunked_data = np.array_split(data_npy, num_chunks)
826
-
827
- # This will never loop forever thanks to the test above.
828
- while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):
829
- num_chunks += 1
830
- chunked_data = np.array_split(data_npy, num_chunks)
831
-
832
- if num_chunks > 1:
833
- for chunk_id, chunk_data in enumerate(chunked_data):
834
- group.attrs['%s%d' % (name, chunk_id)] = chunk_data
835
- else:
836
- group.attrs[name] = data
837
-
838
-
839
- def load_attributes_from_hdf5_group(group, name):
840
- """Loads attributes of the specified name from the HDF5 group.
841
-
842
- This method deals with an inherent problem
843
- of HDF5 file which is not able to store
844
- data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
845
-
846
- Arguments:
847
- group: A pointer to a HDF5 group.
848
- name: A name of the attributes to load.
849
-
850
- Returns:
851
- data: Attributes data.
852
- """
853
- if name in group.attrs:
854
- data = [decoded(n) for n in group.attrs[name]]
855
- else:
856
- data = []
857
- chunk_id = 0
858
- while '%s%d' % (name, chunk_id) in group.attrs:
859
- data.extend(
860
- [decoded(n) for n in group.attrs['%s%d' % (name, chunk_id)]])
861
- chunk_id += 1
862
- return data
863
-
864
-
865
- def _legacy_weights(layer):
866
- """DO NOT USE.
867
-
868
- For legacy reason, the layer.weights was in the order of
869
- [self.trainable_weights + self.non_trainable_weights], and this order was
870
- used for preserving the weights in h5 format. The new order of layer.weights
871
- are the same as layer.get_weights() which is more intuitive for user. To
872
- keep supporting the existing saved h5 file, this method should be used to
873
- save/load weights. In future version, we will delete this method and
874
- introduce a breaking change for h5 and stay with the new order for weights.
875
-
876
- Args:
877
- layer: a `tf.keras.Model` or `tf.keras.layers.Layer` instance.
878
-
879
- Returns:
880
- A list of variables with the order of trainable_weights, followed by
881
- non_trainable_weights.
882
- """
883
- weights = layer.trainable_weights + layer.non_trainable_weights
884
- if any(not isinstance(w, variables_module.Variable) for w in weights):
885
- raise NotImplementedError(
886
- 'Save or restore weights that is not an instance of `tf.Variable` is '
887
- 'not supported in h5, use `save_format=\'tf\'` instead. Got a model '
888
- 'or layer {} with weights {}'.format(layer.__class__.__name__, weights))
889
- return weights