easy-cs-rec-custommodel 0.8.6__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of easy-cs-rec-custommodel might be problematic. Click here for more details.

Files changed (336) hide show
  1. easy_cs_rec_custommodel-0.8.6.dist-info/LICENSE +203 -0
  2. easy_cs_rec_custommodel-0.8.6.dist-info/METADATA +48 -0
  3. easy_cs_rec_custommodel-0.8.6.dist-info/RECORD +336 -0
  4. easy_cs_rec_custommodel-0.8.6.dist-info/WHEEL +6 -0
  5. easy_cs_rec_custommodel-0.8.6.dist-info/top_level.txt +2 -0
  6. easy_rec/__init__.py +114 -0
  7. easy_rec/python/__init__.py +0 -0
  8. easy_rec/python/builders/__init__.py +0 -0
  9. easy_rec/python/builders/hyperparams_builder.py +78 -0
  10. easy_rec/python/builders/loss_builder.py +333 -0
  11. easy_rec/python/builders/optimizer_builder.py +211 -0
  12. easy_rec/python/builders/strategy_builder.py +44 -0
  13. easy_rec/python/compat/__init__.py +0 -0
  14. easy_rec/python/compat/adam_s.py +245 -0
  15. easy_rec/python/compat/array_ops.py +229 -0
  16. easy_rec/python/compat/dynamic_variable.py +542 -0
  17. easy_rec/python/compat/early_stopping.py +653 -0
  18. easy_rec/python/compat/embedding_ops.py +162 -0
  19. easy_rec/python/compat/embedding_parallel_saver.py +316 -0
  20. easy_rec/python/compat/estimator_train.py +116 -0
  21. easy_rec/python/compat/exporter.py +473 -0
  22. easy_rec/python/compat/feature_column/__init__.py +0 -0
  23. easy_rec/python/compat/feature_column/feature_column.py +3675 -0
  24. easy_rec/python/compat/feature_column/feature_column_v2.py +5233 -0
  25. easy_rec/python/compat/feature_column/sequence_feature_column.py +648 -0
  26. easy_rec/python/compat/feature_column/utils.py +154 -0
  27. easy_rec/python/compat/layers.py +329 -0
  28. easy_rec/python/compat/ops.py +14 -0
  29. easy_rec/python/compat/optimizers.py +619 -0
  30. easy_rec/python/compat/queues.py +311 -0
  31. easy_rec/python/compat/regularizers.py +208 -0
  32. easy_rec/python/compat/sok_optimizer.py +440 -0
  33. easy_rec/python/compat/sync_replicas_optimizer.py +528 -0
  34. easy_rec/python/compat/weight_decay_optimizers.py +475 -0
  35. easy_rec/python/core/__init__.py +0 -0
  36. easy_rec/python/core/easyrec_metrics/__init__.py +24 -0
  37. easy_rec/python/core/easyrec_metrics/distribute_metrics_impl_pai.py +3702 -0
  38. easy_rec/python/core/easyrec_metrics/distribute_metrics_impl_tf.py +3768 -0
  39. easy_rec/python/core/learning_schedules.py +228 -0
  40. easy_rec/python/core/metrics.py +402 -0
  41. easy_rec/python/core/sampler.py +844 -0
  42. easy_rec/python/eval.py +102 -0
  43. easy_rec/python/export.py +150 -0
  44. easy_rec/python/feature_column/__init__.py +0 -0
  45. easy_rec/python/feature_column/feature_column.py +664 -0
  46. easy_rec/python/feature_column/feature_group.py +89 -0
  47. easy_rec/python/hpo/__init__.py +0 -0
  48. easy_rec/python/hpo/emr_hpo.py +140 -0
  49. easy_rec/python/hpo/generate_hpo_sql.py +71 -0
  50. easy_rec/python/hpo/pai_hpo.py +297 -0
  51. easy_rec/python/inference/__init__.py +0 -0
  52. easy_rec/python/inference/csv_predictor.py +189 -0
  53. easy_rec/python/inference/hive_parquet_predictor.py +200 -0
  54. easy_rec/python/inference/hive_predictor.py +166 -0
  55. easy_rec/python/inference/odps_predictor.py +70 -0
  56. easy_rec/python/inference/parquet_predictor.py +147 -0
  57. easy_rec/python/inference/parquet_predictor_v2.py +147 -0
  58. easy_rec/python/inference/predictor.py +621 -0
  59. easy_rec/python/inference/processor/__init__.py +0 -0
  60. easy_rec/python/inference/processor/test.py +170 -0
  61. easy_rec/python/inference/vector_retrieve.py +124 -0
  62. easy_rec/python/input/__init__.py +0 -0
  63. easy_rec/python/input/batch_tfrecord_input.py +117 -0
  64. easy_rec/python/input/criteo_binary_reader.py +259 -0
  65. easy_rec/python/input/criteo_input.py +107 -0
  66. easy_rec/python/input/csv_input.py +175 -0
  67. easy_rec/python/input/csv_input_ex.py +72 -0
  68. easy_rec/python/input/csv_input_v2.py +68 -0
  69. easy_rec/python/input/datahub_input.py +320 -0
  70. easy_rec/python/input/dummy_input.py +58 -0
  71. easy_rec/python/input/hive_input.py +123 -0
  72. easy_rec/python/input/hive_parquet_input.py +140 -0
  73. easy_rec/python/input/hive_rtp_input.py +174 -0
  74. easy_rec/python/input/input.py +1064 -0
  75. easy_rec/python/input/kafka_dataset.py +144 -0
  76. easy_rec/python/input/kafka_input.py +235 -0
  77. easy_rec/python/input/load_parquet.py +317 -0
  78. easy_rec/python/input/odps_input.py +101 -0
  79. easy_rec/python/input/odps_input_v2.py +110 -0
  80. easy_rec/python/input/odps_input_v3.py +132 -0
  81. easy_rec/python/input/odps_rtp_input.py +187 -0
  82. easy_rec/python/input/odps_rtp_input_v2.py +104 -0
  83. easy_rec/python/input/parquet_input.py +397 -0
  84. easy_rec/python/input/parquet_input_v2.py +180 -0
  85. easy_rec/python/input/parquet_input_v3.py +203 -0
  86. easy_rec/python/input/rtp_input.py +225 -0
  87. easy_rec/python/input/rtp_input_v2.py +145 -0
  88. easy_rec/python/input/tfrecord_input.py +100 -0
  89. easy_rec/python/layers/__init__.py +0 -0
  90. easy_rec/python/layers/backbone.py +571 -0
  91. easy_rec/python/layers/capsule_layer.py +176 -0
  92. easy_rec/python/layers/cmbf.py +390 -0
  93. easy_rec/python/layers/common_layers.py +192 -0
  94. easy_rec/python/layers/dnn.py +87 -0
  95. easy_rec/python/layers/embed_input_layer.py +25 -0
  96. easy_rec/python/layers/fm.py +26 -0
  97. easy_rec/python/layers/input_layer.py +396 -0
  98. easy_rec/python/layers/keras/__init__.py +34 -0
  99. easy_rec/python/layers/keras/activation.py +114 -0
  100. easy_rec/python/layers/keras/attention.py +267 -0
  101. easy_rec/python/layers/keras/auxiliary_loss.py +47 -0
  102. easy_rec/python/layers/keras/blocks.py +262 -0
  103. easy_rec/python/layers/keras/bst.py +119 -0
  104. easy_rec/python/layers/keras/custom_ops.py +250 -0
  105. easy_rec/python/layers/keras/data_augment.py +133 -0
  106. easy_rec/python/layers/keras/din.py +67 -0
  107. easy_rec/python/layers/keras/einsum_dense.py +598 -0
  108. easy_rec/python/layers/keras/embedding.py +81 -0
  109. easy_rec/python/layers/keras/fibinet.py +251 -0
  110. easy_rec/python/layers/keras/interaction.py +416 -0
  111. easy_rec/python/layers/keras/layer_norm.py +364 -0
  112. easy_rec/python/layers/keras/mask_net.py +166 -0
  113. easy_rec/python/layers/keras/multi_head_attention.py +717 -0
  114. easy_rec/python/layers/keras/multi_task.py +125 -0
  115. easy_rec/python/layers/keras/numerical_embedding.py +376 -0
  116. easy_rec/python/layers/keras/ppnet.py +194 -0
  117. easy_rec/python/layers/keras/transformer.py +192 -0
  118. easy_rec/python/layers/layer_norm.py +51 -0
  119. easy_rec/python/layers/mmoe.py +83 -0
  120. easy_rec/python/layers/multihead_attention.py +162 -0
  121. easy_rec/python/layers/multihead_cross_attention.py +749 -0
  122. easy_rec/python/layers/senet.py +73 -0
  123. easy_rec/python/layers/seq_input_layer.py +134 -0
  124. easy_rec/python/layers/sequence_feature_layer.py +249 -0
  125. easy_rec/python/layers/uniter.py +301 -0
  126. easy_rec/python/layers/utils.py +248 -0
  127. easy_rec/python/layers/variational_dropout_layer.py +130 -0
  128. easy_rec/python/loss/__init__.py +0 -0
  129. easy_rec/python/loss/circle_loss.py +82 -0
  130. easy_rec/python/loss/contrastive_loss.py +79 -0
  131. easy_rec/python/loss/f1_reweight_loss.py +38 -0
  132. easy_rec/python/loss/focal_loss.py +93 -0
  133. easy_rec/python/loss/jrc_loss.py +128 -0
  134. easy_rec/python/loss/listwise_loss.py +161 -0
  135. easy_rec/python/loss/multi_similarity.py +68 -0
  136. easy_rec/python/loss/pairwise_loss.py +307 -0
  137. easy_rec/python/loss/softmax_loss_with_negative_mining.py +110 -0
  138. easy_rec/python/loss/zero_inflated_lognormal.py +76 -0
  139. easy_rec/python/main.py +878 -0
  140. easy_rec/python/model/__init__.py +0 -0
  141. easy_rec/python/model/autoint.py +73 -0
  142. easy_rec/python/model/cmbf.py +47 -0
  143. easy_rec/python/model/collaborative_metric_learning.py +182 -0
  144. easy_rec/python/model/custom_model.py +323 -0
  145. easy_rec/python/model/dat.py +138 -0
  146. easy_rec/python/model/dbmtl.py +116 -0
  147. easy_rec/python/model/dcn.py +70 -0
  148. easy_rec/python/model/deepfm.py +106 -0
  149. easy_rec/python/model/dlrm.py +73 -0
  150. easy_rec/python/model/dropoutnet.py +207 -0
  151. easy_rec/python/model/dssm.py +154 -0
  152. easy_rec/python/model/dssm_senet.py +143 -0
  153. easy_rec/python/model/dummy_model.py +48 -0
  154. easy_rec/python/model/easy_rec_estimator.py +739 -0
  155. easy_rec/python/model/easy_rec_model.py +467 -0
  156. easy_rec/python/model/esmm.py +242 -0
  157. easy_rec/python/model/fm.py +63 -0
  158. easy_rec/python/model/match_model.py +357 -0
  159. easy_rec/python/model/mind.py +445 -0
  160. easy_rec/python/model/mmoe.py +70 -0
  161. easy_rec/python/model/multi_task_model.py +303 -0
  162. easy_rec/python/model/multi_tower.py +62 -0
  163. easy_rec/python/model/multi_tower_bst.py +190 -0
  164. easy_rec/python/model/multi_tower_din.py +130 -0
  165. easy_rec/python/model/multi_tower_recall.py +68 -0
  166. easy_rec/python/model/pdn.py +203 -0
  167. easy_rec/python/model/ple.py +120 -0
  168. easy_rec/python/model/rank_model.py +485 -0
  169. easy_rec/python/model/rocket_launching.py +203 -0
  170. easy_rec/python/model/simple_multi_task.py +54 -0
  171. easy_rec/python/model/uniter.py +46 -0
  172. easy_rec/python/model/wide_and_deep.py +121 -0
  173. easy_rec/python/ops/1.12/incr_record.so +0 -0
  174. easy_rec/python/ops/1.12/kafka.so +0 -0
  175. easy_rec/python/ops/1.12/libcustom_ops.so +0 -0
  176. easy_rec/python/ops/1.12/libembed_op.so +0 -0
  177. easy_rec/python/ops/1.12/libhiredis.so.1.0.0 +0 -0
  178. easy_rec/python/ops/1.12/librdkafka++.so.1 +0 -0
  179. easy_rec/python/ops/1.12/librdkafka.so.1 +0 -0
  180. easy_rec/python/ops/1.12/libredis++.so +0 -0
  181. easy_rec/python/ops/1.12/libredis++.so.1 +0 -0
  182. easy_rec/python/ops/1.12/libredis++.so.1.2.3 +0 -0
  183. easy_rec/python/ops/1.12/libstr_avx_op.so +0 -0
  184. easy_rec/python/ops/1.12/libwrite_sparse_kv.so +0 -0
  185. easy_rec/python/ops/1.15/incr_record.so +0 -0
  186. easy_rec/python/ops/1.15/kafka.so +0 -0
  187. easy_rec/python/ops/1.15/libcustom_ops.so +0 -0
  188. easy_rec/python/ops/1.15/libembed_op.so +0 -0
  189. easy_rec/python/ops/1.15/libhiredis.so.1.0.0 +0 -0
  190. easy_rec/python/ops/1.15/librdkafka++.so +0 -0
  191. easy_rec/python/ops/1.15/librdkafka++.so.1 +0 -0
  192. easy_rec/python/ops/1.15/librdkafka.so +0 -0
  193. easy_rec/python/ops/1.15/librdkafka.so.1 +0 -0
  194. easy_rec/python/ops/1.15/libredis++.so.1 +0 -0
  195. easy_rec/python/ops/1.15/libstr_avx_op.so +0 -0
  196. easy_rec/python/ops/2.12/libcustom_ops.so +0 -0
  197. easy_rec/python/ops/2.12/libload_embed.so +0 -0
  198. easy_rec/python/ops/2.12/libstr_avx_op.so +0 -0
  199. easy_rec/python/ops/__init__.py +0 -0
  200. easy_rec/python/ops/gen_kafka_ops.py +193 -0
  201. easy_rec/python/ops/gen_str_avx_op.py +28 -0
  202. easy_rec/python/ops/incr_record.py +30 -0
  203. easy_rec/python/predict.py +170 -0
  204. easy_rec/python/protos/__init__.py +0 -0
  205. easy_rec/python/protos/autoint_pb2.py +122 -0
  206. easy_rec/python/protos/backbone_pb2.py +1416 -0
  207. easy_rec/python/protos/cmbf_pb2.py +435 -0
  208. easy_rec/python/protos/collaborative_metric_learning_pb2.py +252 -0
  209. easy_rec/python/protos/custom_model_pb2.py +57 -0
  210. easy_rec/python/protos/dat_pb2.py +262 -0
  211. easy_rec/python/protos/data_source_pb2.py +422 -0
  212. easy_rec/python/protos/dataset_pb2.py +1920 -0
  213. easy_rec/python/protos/dbmtl_pb2.py +191 -0
  214. easy_rec/python/protos/dcn_pb2.py +197 -0
  215. easy_rec/python/protos/deepfm_pb2.py +163 -0
  216. easy_rec/python/protos/dlrm_pb2.py +163 -0
  217. easy_rec/python/protos/dnn_pb2.py +329 -0
  218. easy_rec/python/protos/dropoutnet_pb2.py +239 -0
  219. easy_rec/python/protos/dssm_pb2.py +262 -0
  220. easy_rec/python/protos/dssm_senet_pb2.py +282 -0
  221. easy_rec/python/protos/easy_rec_model_pb2.py +1672 -0
  222. easy_rec/python/protos/esmm_pb2.py +133 -0
  223. easy_rec/python/protos/eval_pb2.py +930 -0
  224. easy_rec/python/protos/export_pb2.py +379 -0
  225. easy_rec/python/protos/feature_config_pb2.py +1359 -0
  226. easy_rec/python/protos/fm_pb2.py +90 -0
  227. easy_rec/python/protos/hive_config_pb2.py +138 -0
  228. easy_rec/python/protos/hyperparams_pb2.py +624 -0
  229. easy_rec/python/protos/keras_layer_pb2.py +692 -0
  230. easy_rec/python/protos/layer_pb2.py +1936 -0
  231. easy_rec/python/protos/loss_pb2.py +1713 -0
  232. easy_rec/python/protos/mind_pb2.py +497 -0
  233. easy_rec/python/protos/mmoe_pb2.py +215 -0
  234. easy_rec/python/protos/multi_tower_pb2.py +295 -0
  235. easy_rec/python/protos/multi_tower_recall_pb2.py +198 -0
  236. easy_rec/python/protos/optimizer_pb2.py +2017 -0
  237. easy_rec/python/protos/pdn_pb2.py +293 -0
  238. easy_rec/python/protos/pipeline_pb2.py +516 -0
  239. easy_rec/python/protos/ple_pb2.py +231 -0
  240. easy_rec/python/protos/predict_pb2.py +1140 -0
  241. easy_rec/python/protos/rocket_launching_pb2.py +169 -0
  242. easy_rec/python/protos/seq_encoder_pb2.py +1084 -0
  243. easy_rec/python/protos/simi_pb2.py +54 -0
  244. easy_rec/python/protos/simple_multi_task_pb2.py +97 -0
  245. easy_rec/python/protos/tf_predict_pb2.py +630 -0
  246. easy_rec/python/protos/tower_pb2.py +661 -0
  247. easy_rec/python/protos/train_pb2.py +1197 -0
  248. easy_rec/python/protos/uniter_pb2.py +307 -0
  249. easy_rec/python/protos/variational_dropout_pb2.py +91 -0
  250. easy_rec/python/protos/wide_and_deep_pb2.py +131 -0
  251. easy_rec/python/test/__init__.py +0 -0
  252. easy_rec/python/test/csv_input_test.py +340 -0
  253. easy_rec/python/test/custom_early_stop_func.py +19 -0
  254. easy_rec/python/test/dh_local_run.py +104 -0
  255. easy_rec/python/test/embed_test.py +155 -0
  256. easy_rec/python/test/emr_run.py +119 -0
  257. easy_rec/python/test/eval_metric_test.py +107 -0
  258. easy_rec/python/test/excel_convert_test.py +64 -0
  259. easy_rec/python/test/export_test.py +513 -0
  260. easy_rec/python/test/fg_test.py +70 -0
  261. easy_rec/python/test/hive_input_test.py +311 -0
  262. easy_rec/python/test/hpo_test.py +235 -0
  263. easy_rec/python/test/kafka_test.py +373 -0
  264. easy_rec/python/test/local_incr_test.py +122 -0
  265. easy_rec/python/test/loss_test.py +110 -0
  266. easy_rec/python/test/odps_command.py +61 -0
  267. easy_rec/python/test/odps_local_run.py +86 -0
  268. easy_rec/python/test/odps_run.py +254 -0
  269. easy_rec/python/test/odps_test_cls.py +39 -0
  270. easy_rec/python/test/odps_test_prepare.py +198 -0
  271. easy_rec/python/test/odps_test_util.py +237 -0
  272. easy_rec/python/test/pre_check_test.py +54 -0
  273. easy_rec/python/test/predictor_test.py +394 -0
  274. easy_rec/python/test/rtp_convert_test.py +133 -0
  275. easy_rec/python/test/run.py +138 -0
  276. easy_rec/python/test/train_eval_test.py +1299 -0
  277. easy_rec/python/test/util_test.py +85 -0
  278. easy_rec/python/test/zero_inflated_lognormal_test.py +53 -0
  279. easy_rec/python/tools/__init__.py +0 -0
  280. easy_rec/python/tools/add_boundaries_to_config.py +67 -0
  281. easy_rec/python/tools/add_feature_info_to_config.py +145 -0
  282. easy_rec/python/tools/convert_config_format.py +48 -0
  283. easy_rec/python/tools/convert_rtp_data.py +79 -0
  284. easy_rec/python/tools/convert_rtp_fg.py +106 -0
  285. easy_rec/python/tools/create_config_from_excel.py +427 -0
  286. easy_rec/python/tools/criteo/__init__.py +0 -0
  287. easy_rec/python/tools/criteo/convert_data.py +157 -0
  288. easy_rec/python/tools/edit_lookup_graph.py +134 -0
  289. easy_rec/python/tools/faiss_index_pai.py +116 -0
  290. easy_rec/python/tools/feature_selection.py +316 -0
  291. easy_rec/python/tools/hit_rate_ds.py +223 -0
  292. easy_rec/python/tools/hit_rate_pai.py +138 -0
  293. easy_rec/python/tools/pre_check.py +120 -0
  294. easy_rec/python/tools/predict_and_chk.py +111 -0
  295. easy_rec/python/tools/read_kafka.py +55 -0
  296. easy_rec/python/tools/split_model_pai.py +286 -0
  297. easy_rec/python/tools/split_pdn_model_pai.py +272 -0
  298. easy_rec/python/tools/test_saved_model.py +80 -0
  299. easy_rec/python/tools/view_saved_model.py +39 -0
  300. easy_rec/python/tools/write_kafka.py +65 -0
  301. easy_rec/python/train_eval.py +325 -0
  302. easy_rec/python/utils/__init__.py +15 -0
  303. easy_rec/python/utils/activation.py +120 -0
  304. easy_rec/python/utils/check_utils.py +87 -0
  305. easy_rec/python/utils/compat.py +14 -0
  306. easy_rec/python/utils/config_util.py +652 -0
  307. easy_rec/python/utils/constant.py +43 -0
  308. easy_rec/python/utils/convert_rtp_fg.py +616 -0
  309. easy_rec/python/utils/dag.py +192 -0
  310. easy_rec/python/utils/distribution_utils.py +268 -0
  311. easy_rec/python/utils/ds_util.py +65 -0
  312. easy_rec/python/utils/embedding_utils.py +73 -0
  313. easy_rec/python/utils/estimator_utils.py +1036 -0
  314. easy_rec/python/utils/export_big_model.py +630 -0
  315. easy_rec/python/utils/expr_util.py +118 -0
  316. easy_rec/python/utils/fg_util.py +53 -0
  317. easy_rec/python/utils/hit_rate_utils.py +220 -0
  318. easy_rec/python/utils/hive_utils.py +183 -0
  319. easy_rec/python/utils/hpo_util.py +137 -0
  320. easy_rec/python/utils/hvd_utils.py +56 -0
  321. easy_rec/python/utils/input_utils.py +108 -0
  322. easy_rec/python/utils/io_util.py +282 -0
  323. easy_rec/python/utils/load_class.py +249 -0
  324. easy_rec/python/utils/meta_graph_editor.py +941 -0
  325. easy_rec/python/utils/multi_optimizer.py +62 -0
  326. easy_rec/python/utils/numpy_utils.py +18 -0
  327. easy_rec/python/utils/odps_util.py +79 -0
  328. easy_rec/python/utils/pai_util.py +86 -0
  329. easy_rec/python/utils/proto_util.py +90 -0
  330. easy_rec/python/utils/restore_filter.py +89 -0
  331. easy_rec/python/utils/shape_utils.py +432 -0
  332. easy_rec/python/utils/static_shape.py +71 -0
  333. easy_rec/python/utils/test_utils.py +866 -0
  334. easy_rec/python/utils/tf_utils.py +56 -0
  335. easy_rec/version.py +4 -0
  336. test/__init__.py +0 -0
@@ -0,0 +1,619 @@
1
+ # -*- encoding:utf-8 -*-
2
+ # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ==============================================================================
16
+ """Optimizer ops for use in layers and tf.learn."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ import logging
23
+
24
+ import six
25
+ import tensorflow as tf
26
+ # from tensorflow.contrib import framework as contrib_framework
27
+ from tensorflow.python.framework import dtypes
28
+ from tensorflow.python.framework import ops
29
+ # from tensorflow.python.ops import logging_ops
30
+ from tensorflow.python.ops import array_ops
31
+ from tensorflow.python.ops import clip_ops
32
+ from tensorflow.python.ops import control_flow_ops
33
+ from tensorflow.python.ops import gen_nn_ops
34
+ from tensorflow.python.ops import init_ops
35
+ from tensorflow.python.ops import math_ops
36
+ from tensorflow.python.ops import random_ops
37
+ from tensorflow.python.ops import variable_scope as vs
38
+ from tensorflow.python.ops import variables as vars_
39
+ from tensorflow.python.summary import summary
40
+ from tensorflow.python.training import moving_averages
41
+ from tensorflow.python.training import optimizer as optimizer_
42
+ from tensorflow.python.training import training as train
43
+
44
+ from easy_rec.python.ops.incr_record import set_sparse_indices
45
+ from easy_rec.python.utils import constant
46
+ from easy_rec.python.utils import estimator_utils
47
+
48
+ try:
49
+ from tensorflow.python.framework import indexed_slices
50
+ except Exception:
51
+ indexed_slices = ops
52
+
53
+ try:
54
+ import horovod.tensorflow as hvd
55
+ except Exception:
56
+ hvd = None
57
+
58
+ try:
59
+ from sparse_operation_kit import experiment as sok
60
+ from easy_rec.python.compat import sok_optimizer
61
+ except Exception:
62
+ sok = None
63
+
64
+ OPTIMIZER_CLS_NAMES = {
65
+ 'Adagrad':
66
+ train.AdagradOptimizer,
67
+ 'Adam':
68
+ train.AdamOptimizer,
69
+ 'Ftrl':
70
+ train.FtrlOptimizer,
71
+ 'Momentum':
72
+ lambda learning_rate: train.MomentumOptimizer(
73
+ learning_rate, momentum=0.9), # pylint: disable=line-too-long
74
+ 'RMSProp':
75
+ train.RMSPropOptimizer,
76
+ 'SGD':
77
+ train.GradientDescentOptimizer,
78
+ }
79
+
80
+ OPTIMIZER_SUMMARIES = [
81
+ 'learning_rate',
82
+ 'loss',
83
+ 'gradients',
84
+ 'gradient_norm',
85
+ 'global_gradient_norm',
86
+ ]
87
+
88
+
89
+ def optimize_loss(loss,
90
+ global_step,
91
+ learning_rate,
92
+ optimizer,
93
+ gradient_noise_scale=None,
94
+ gradient_multipliers=None,
95
+ clip_gradients=None,
96
+ learning_rate_decay_fn=None,
97
+ update_ops=None,
98
+ variables=None,
99
+ name=None,
100
+ summaries=None,
101
+ colocate_gradients_with_ops=False,
102
+ not_apply_grad_after_first_step=False,
103
+ increment_global_step=True,
104
+ incr_save=False,
105
+ embedding_parallel=False):
106
+ """Given loss and parameters for optimizer, returns a training op.
107
+
108
+ Various ways of passing optimizers include:
109
+
110
+ - by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES
111
+ for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
112
+ - by function taking learning rate `Tensor` as argument and returning an
113
+ `Optimizer` instance. E.g. `optimize_loss(...,
114
+ optimizer=lambda lr: tf.compat.v1.train.MomentumOptimizer(lr,
115
+ momentum=0.5))`.
116
+ Alternatively, if `learning_rate` is `None`, the function takes no
117
+ arguments. E.g. `optimize_loss(..., learning_rate=None,
118
+ optimizer=lambda: tf.compat.v1.train.MomentumOptimizer(0.5,
119
+ momentum=0.5))`.
120
+ - by a subclass of `Optimizer` having a single-argument constructor
121
+ (the argument is the learning rate), such as AdamOptimizer or
122
+ AdagradOptimizer. E.g. `optimize_loss(...,
123
+ optimizer=tf.compat.v1.train.AdagradOptimizer)`.
124
+ - by an instance of a subclass of `Optimizer`.
125
+ E.g., `optimize_loss(...,
126
+ optimizer=tf.compat.v1.train.AdagradOptimizer(0.5))`.
127
+
128
+ Args:
129
+ loss: Scalar `Tensor`.
130
+ global_step: Scalar int `Tensor`, step counter to update on each step unless
131
+ `increment_global_step` is `False`. If not supplied, it will be fetched
132
+ from the default graph (see `tf.compat.v1.train.get_global_step` for
133
+ details). If it has not been created, no step will be incremented with
134
+ each weight update. `learning_rate_decay_fn` requires `global_step`.
135
+ learning_rate: float or `Tensor`, magnitude of update per each training
136
+ step. Can be `None`.
137
+ optimizer: string, class or optimizer instance, used as trainer. string
138
+ should be name of optimizer, like 'SGD', 'Adam', 'Adagrad'. Full list in
139
+ OPTIMIZER_CLS_NAMES constant. class should be sub-class of `tf.Optimizer`
140
+ that implements `compute_gradients` and `apply_gradients` functions.
141
+ optimizer instance should be instantiation of `tf.Optimizer` sub-class and
142
+ have `compute_gradients` and `apply_gradients` functions.
143
+ gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
144
+ value.
145
+ gradient_multipliers: dict of variables or variable names to floats. If
146
+ present, gradients for specified variables will be multiplied by given
147
+ constant.
148
+ clip_gradients: float, callable or `None`. If a float is provided, a global
149
+ clipping is applied to prevent the norm of the gradient from exceeding
150
+ this value. Alternatively, a callable can be provided, e.g.,
151
+ `adaptive_clipping_fn()`. This callable takes a list of `(gradients,
152
+ variables)` tuples and returns the same thing with the gradients modified.
153
+ learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
154
+ `Tensor`s, returns `Tensor`. Can be used to implement any learning rate
155
+ decay functions.
156
+ For example: `tf.compat.v1.train.exponential_decay`.
157
+ Ignored if `learning_rate` is not supplied.
158
+ update_ops: list of update `Operation`s to execute at each step. If `None`,
159
+ uses elements of UPDATE_OPS collection. The order of execution between
160
+ `update_ops` and `loss` is non-deterministic.
161
+ variables: list of variables to optimize or `None` to use all trainable
162
+ variables.
163
+ name: The name for this operation is used to scope operations and summaries.
164
+ summaries: List of internal quantities to visualize on tensorboard. If not
165
+ set, the loss, the learning rate, and the global norm of the gradients
166
+ will be reported. The complete list of possible values is in
167
+ OPTIMIZER_SUMMARIES.
168
+ colocate_gradients_with_ops: If True, try colocating gradients with the
169
+ corresponding op.
170
+ not_apply_grad_after_first_step: If true, do not apply gradient apply gradient
171
+ after first step, for chief_redundant.
172
+ increment_global_step: Whether to increment `global_step`. If your model
173
+ calls `optimize_loss` multiple times per training step (e.g. to optimize
174
+ different parts of the model), use this arg to avoid incrementing
175
+ `global_step` more times than necessary.
176
+ incr_save: increment dump checkpoints.
177
+ embedding_parallel: whether to shard embedding and place embedding parts on
178
+ different works.
179
+
180
+ Returns:
181
+ Training op.
182
+
183
+ Raises:
184
+ ValueError: if:
185
+ * `loss` is an invalid type or shape.
186
+ * `global_step` is an invalid type or shape.
187
+ * `learning_rate` is an invalid type or value.
188
+ * `optimizer` has the wrong type.
189
+ * `clip_gradients` is neither float nor callable.
190
+ * `learning_rate` and `learning_rate_decay_fn` are supplied, but no
191
+ `global_step` is available.
192
+ * `gradients` is empty.
193
+ """
194
+ loss = ops.convert_to_tensor(loss)
195
+ # contrib_framework.assert_scalar(loss)
196
+ if global_step is None:
197
+ global_step = train.get_global_step()
198
+ else:
199
+ train.assert_global_step(global_step)
200
+ with vs.variable_scope(name, 'OptimizeLoss', [loss, global_step]):
201
+ # Update ops take UPDATE_OPS collection if not provided.
202
+ if update_ops is None:
203
+ update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
204
+ # Make sure update ops are ran before computing loss.
205
+ if update_ops:
206
+ loss = control_flow_ops.with_dependencies(list(update_ops), loss)
207
+
208
+ # Learning rate variable, with possible decay.
209
+ lr = None
210
+ if learning_rate is not None:
211
+ if (isinstance(learning_rate, ops.Tensor) and
212
+ learning_rate.get_shape().ndims == 0):
213
+ lr = learning_rate
214
+ elif isinstance(learning_rate, float):
215
+ if learning_rate < 0.0:
216
+ raise ValueError('Invalid learning_rate %s.', learning_rate)
217
+ lr = vs.get_variable(
218
+ 'learning_rate', [],
219
+ trainable=False,
220
+ initializer=init_ops.constant_initializer(learning_rate))
221
+ else:
222
+ raise ValueError('Learning rate should be 0d Tensor or float. '
223
+ 'Got %s of type %s' %
224
+ (str(learning_rate), str(type(learning_rate))))
225
+ if summaries is None:
226
+ summaries = ['loss', 'learning_rate', 'global_gradient_norm']
227
+ else:
228
+ for summ in summaries:
229
+ if summ not in OPTIMIZER_SUMMARIES:
230
+ raise ValueError('Summaries should be one of [%s], you provided %s.' %
231
+ (', '.join(OPTIMIZER_SUMMARIES), summ))
232
+ if learning_rate is not None and learning_rate_decay_fn is not None:
233
+ if global_step is None:
234
+ raise ValueError('global_step is required for learning_rate_decay_fn.')
235
+ lr = learning_rate_decay_fn(lr, global_step)
236
+ if 'learning_rate' in summaries:
237
+ summary.scalar('learning_rate', lr)
238
+
239
+ # Create optimizer, given specified parameters.
240
+ if isinstance(optimizer, six.string_types):
241
+ if lr is None:
242
+ raise ValueError('Learning rate is None, but should be specified if '
243
+ 'optimizer is string (%s).' % optimizer)
244
+ if optimizer not in OPTIMIZER_CLS_NAMES:
245
+ raise ValueError(
246
+ 'Optimizer name should be one of [%s], you provided %s.' %
247
+ (', '.join(OPTIMIZER_CLS_NAMES), optimizer))
248
+ opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
249
+ elif (isinstance(optimizer, type) and
250
+ issubclass(optimizer, optimizer_.Optimizer)):
251
+ if lr is None:
252
+ raise ValueError('Learning rate is None, but should be specified if '
253
+ 'optimizer is class (%s).' % optimizer)
254
+ opt = optimizer(learning_rate=lr)
255
+ elif isinstance(optimizer, optimizer_.Optimizer):
256
+ opt = optimizer
257
+ elif callable(optimizer):
258
+ if learning_rate is not None:
259
+ opt = optimizer(lr)
260
+ else:
261
+ opt = optimizer()
262
+ if not isinstance(opt, optimizer_.Optimizer):
263
+ raise ValueError('Unrecognized optimizer: function should return '
264
+ 'subclass of Optimizer. Got %s.' % str(opt))
265
+ elif isinstance(optimizer, sok_optimizer.OptimizerWrapperV1) or \
266
+ isinstance(optimizer, sok_optimizer.OptimizerWrapperV2):
267
+ opt = optimizer
268
+ else:
269
+ raise ValueError('Unrecognized optimizer: should be string, '
270
+ 'subclass of Optimizer, instance of '
271
+ 'subclass of Optimizer or function with one argument. '
272
+ 'Got %s[type=%s].' %
273
+ (str(optimizer), str(type(optimizer))))
274
+
275
+ # All trainable variables, if specific variables are not specified.
276
+ if variables is None:
277
+ variables = vars_.trainable_variables()
278
+
279
+ # Compute gradients.
280
+ gradients = opt.compute_gradients(
281
+ loss,
282
+ variables,
283
+ colocate_gradients_with_ops=colocate_gradients_with_ops)
284
+
285
+ if estimator_utils.has_hvd() and hvd.size() > 1:
286
+ if not embedding_parallel:
287
+ # embedding parameters not partitioned
288
+ reduced_grads = []
289
+ for g, v in gradients:
290
+ reduced_grads.append((hvd.allreduce(
291
+ g, op=hvd.Average,
292
+ compression=hvd.compression.NoneCompressor), v))
293
+ gradients = reduced_grads
294
+ else:
295
+ # embedding parameters partitioned:
296
+ # the gradients for embeddings from different workers are
297
+ # already summed together in the backward pass through
298
+ # hvd.alltoall, as the loss are not divided, the gradients
299
+ # need to be normalized, divide by worker number
300
+ embed_para_vars = ops.get_collection(constant.EmbeddingParallel)
301
+ part_grads = []
302
+ part_vars = []
303
+ part_sparse_grads = []
304
+ part_sparse_vars = []
305
+ reduced_grads = []
306
+ for g, v in gradients:
307
+ if v.name not in embed_para_vars:
308
+ if isinstance(g, indexed_slices.IndexedSlices):
309
+ part_sparse_grads.append(g)
310
+ part_sparse_vars.append(v)
311
+ else:
312
+ part_grads.append(g)
313
+ part_vars.append(v)
314
+ else:
315
+ reduced_grads.append((indexed_slices.IndexedSlices(
316
+ indices=g.indices, values=g.values / hvd.size()), v))
317
+
318
+ group_allreduce = False
319
+ if len(part_grads) > 0:
320
+ if group_allreduce:
321
+ reduced_part_grads = hvd.grouped_allreduce(
322
+ part_grads,
323
+ op=hvd.Average,
324
+ compression=hvd.compression.NoneCompressor)
325
+ for g, v in zip(reduced_part_grads, part_vars):
326
+ reduced_grads.append((g, v))
327
+ else:
328
+ for g, v in zip(part_grads, part_vars):
329
+ g = hvd.allreduce(
330
+ g, op=hvd.Average, compression=hvd.compression.NoneCompressor)
331
+ reduced_grads.append((g, v))
332
+ if len(part_sparse_grads) > 0:
333
+ if group_allreduce:
334
+ reduced_part_grads = hvd.grouped_allreduce(
335
+ part_sparse_grads,
336
+ op=hvd.Average,
337
+ compression=hvd.compression.NoneCompressor)
338
+ for g, v in zip(reduced_part_grads, part_sparse_vars):
339
+ reduced_grads.append((g, v))
340
+ else:
341
+ for g, v in zip(part_sparse_grads, part_sparse_vars):
342
+ g = hvd.allreduce(
343
+ g, op=hvd.Average, compression=hvd.compression.NoneCompressor)
344
+ reduced_grads.append((g, v))
345
+ gradients = reduced_grads
346
+
347
+ # Optionally add gradient noise.
348
+ if gradient_noise_scale is not None:
349
+ gradients = _add_scaled_noise_to_gradients(gradients,
350
+ gradient_noise_scale)
351
+
352
+ # Multiply some gradients.
353
+ if gradient_multipliers is not None:
354
+ gradients = _multiply_gradients(gradients, gradient_multipliers)
355
+ if not gradients:
356
+ raise ValueError(
357
+ 'Empty list of (gradient, var) pairs encountered. This is most '
358
+ 'likely to be caused by an improper value of gradient_multipliers.')
359
+
360
+ # if 'global_gradient_norm' in summaries or 'gradient_norm' in summaries:
361
+ # summary.scalar('global_norm/gradient_norm',
362
+ # clip_ops.global_norm(list(zip(*gradients))[0]))
363
+
364
+ # Optionally clip gradients by global norm.
365
+ if isinstance(clip_gradients, float):
366
+ # gradients = _clip_gradients_by_norm(gradients, clip_gradients)
367
+ sparse_norm, dense_norm, grad_norm = _get_grad_norm(
368
+ gradients, embedding_parallel)
369
+ summary.scalar('global_norm/sparse_grad', sparse_norm)
370
+ summary.scalar('global_norm/dense_grad', dense_norm)
371
+ summary.scalar('global_norm/gradient_norm', grad_norm)
372
+ grads = [x[0] for x in gradients]
373
+ vars = [x[1] for x in gradients]
374
+ clipped_grads, _ = clip_ops.clip_by_global_norm(
375
+ grads, clip_gradients, use_norm=grad_norm)
376
+ gradients = list(zip(clipped_grads, vars))
377
+ elif callable(clip_gradients):
378
+ gradients = clip_gradients(gradients)
379
+ elif clip_gradients is not None:
380
+ raise ValueError('Unknown type %s for clip_gradients' %
381
+ type(clip_gradients))
382
+
383
+ # Add scalar summary for loss.
384
+ if 'loss' in summaries:
385
+ summary.scalar('loss', loss)
386
+
387
+ # Add histograms for variables, gradients and gradient norms.
388
+ if not embedding_parallel:
389
+ for gradient, variable in gradients:
390
+ if isinstance(gradient, indexed_slices.IndexedSlices):
391
+ grad_values = gradient.values
392
+ else:
393
+ grad_values = gradient
394
+
395
+ if grad_values is not None:
396
+ var_name = variable.name.replace(':', '_')
397
+ if 'gradients' in summaries:
398
+ summary.histogram('gradients/%s' % var_name, grad_values)
399
+ if 'gradient_norm' in summaries:
400
+ summary.scalar('gradient_norm/%s' % var_name,
401
+ clip_ops.global_norm([grad_values]))
402
+
403
+ if clip_gradients is not None and ('global_gradient_norm' in summaries or
404
+ 'gradient_norm' in summaries):
405
+ sparse_norm, dense_norm, grad_norm = _get_grad_norm(
406
+ gradients, embedding_parallel)
407
+ summary.scalar('global_norm/clipped_sparse_grad', sparse_norm)
408
+ summary.scalar('global_norm/clipped_dense_grad', dense_norm)
409
+ summary.scalar('global_norm/clipped_gradient_norm', grad_norm)
410
+
411
+ # Create gradient updates.
412
+ def _apply_grad():
413
+ grad_updates = opt.apply_gradients(
414
+ gradients,
415
+ global_step=global_step if increment_global_step else None,
416
+ name='train')
417
+
418
+ embed_para_vars = ops.get_collection(constant.EmbeddingParallel)
419
+ slot_names = opt.get_slot_names()
420
+ for var in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES):
421
+ if var.name in embed_para_vars:
422
+ for slot_name in slot_names:
423
+ tmp_var = opt.get_slot(var, slot_name)
424
+ logging.info('add shard embedding optimizer var: %s' % tmp_var.name)
425
+ ops.add_to_collection(constant.EmbeddingParallel, tmp_var.name)
426
+
427
+ incr_save_ops = []
428
+ if incr_save:
429
+ for grad, var in gradients:
430
+ if isinstance(grad, indexed_slices.IndexedSlices):
431
+ indices = grad.indices
432
+ with ops.colocate_with(var), ops.control_dependencies(
433
+ [grad_updates]):
434
+ incr_save_op = set_sparse_indices(indices, var_name=var.op.name)
435
+ incr_save_ops.append(incr_save_op)
436
+ ops.add_to_collection('SPARSE_UPDATE_VARIABLES',
437
+ (var, grad.indices.dtype))
438
+ else:
439
+ ops.add_to_collection('DENSE_UPDATE_VARIABLES', var)
440
+ return tf.group(incr_save_ops)
441
+ else:
442
+ return grad_updates
443
+
444
+ if not_apply_grad_after_first_step:
445
+ _apply_grad()
446
+ train_tensor = loss
447
+ else:
448
+ train_tensor = _apply_grad()
449
+
450
+ return train_tensor
451
+
452
+
453
+ def _get_grad_norm(grads_and_vars, embedding_parallel=False):
454
+ part_norms = []
455
+ sparse_norms = []
456
+ dense_norms = []
457
+ emb_para_names = ops.get_collection(constant.EmbeddingParallel)
458
+ for grad, var in grads_and_vars:
459
+ if embedding_parallel and hvd is not None and hvd.size() > 1:
460
+ if var.name in emb_para_names:
461
+ part_norms.append(gen_nn_ops.l2_loss(grad.values))
462
+ continue
463
+ if isinstance(grad, indexed_slices.IndexedSlices):
464
+ sparse_norms.append(gen_nn_ops.l2_loss(grad.values))
465
+ else:
466
+ dense_norms.append(gen_nn_ops.l2_loss(grad))
467
+ reduced_norms = hvd.grouped_allreduce(
468
+ part_norms, op=hvd.Sum, compression=hvd.compression.NoneCompressor)
469
+ sparse_norms = sparse_norms + reduced_norms
470
+ all_norms = reduced_norms + dense_norms
471
+ sparse_norm = math_ops.sqrt(
472
+ math_ops.reduce_sum(array_ops.stack(sparse_norms) * 2.0))
473
+ dense_norm = math_ops.sqrt(
474
+ math_ops.reduce_sum(array_ops.stack(dense_norms) * 2.0))
475
+ grad_norm = math_ops.sqrt(
476
+ math_ops.reduce_sum(array_ops.stack(all_norms)) * 2.0)
477
+ return sparse_norm, dense_norm, grad_norm
478
+
479
+
480
+ def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
481
+ """Clips gradients by global norm."""
482
+ gradients, variables = zip(*grads_and_vars)
483
+
484
+ clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
485
+ return list(zip(clipped_gradients, variables))
486
+
487
+
488
+ def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
489
+ """Find max_norm given norm and previous average."""
490
+ with vs.variable_scope(name, 'AdaptiveMaxNorm', [norm]):
491
+ log_norm = math_ops.log(norm + epsilon)
492
+
493
+ def moving_average(name, value, decay):
494
+ moving_average_variable = vs.get_variable(
495
+ name,
496
+ shape=value.get_shape(),
497
+ dtype=value.dtype,
498
+ initializer=init_ops.zeros_initializer(),
499
+ trainable=False)
500
+ return moving_averages.assign_moving_average(
501
+ moving_average_variable, value, decay, zero_debias=False)
502
+
503
+ # quicker adaptation at the beginning
504
+ if global_step is not None:
505
+ n = math_ops.cast(global_step, dtypes.float32)
506
+ decay = math_ops.minimum(decay, n / (n + 1.))
507
+
508
+ # update averages
509
+ mean = moving_average('mean', log_norm, decay)
510
+ sq_mean = moving_average('sq_mean', math_ops.square(log_norm), decay)
511
+
512
+ variance = sq_mean - math_ops.square(mean)
513
+ std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
514
+ max_norms = math_ops.exp(mean + std_factor * std)
515
+ return max_norms, mean
516
+
517
+
518
+ def adaptive_clipping_fn(std_factor=2.,
519
+ decay=0.95,
520
+ static_max_norm=None,
521
+ global_step=None,
522
+ report_summary=False,
523
+ epsilon=1e-8,
524
+ name=None):
525
+ """Adapt the clipping value using statistics on the norms.
526
+
527
+ Implement adaptive gradient as presented in section 3.2.1 of
528
+ https://arxiv.org/abs/1412.1602.
529
+
530
+ Keeps a moving average of the mean and std of the log(norm) of the gradient.
531
+ If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be
532
+ rescaled such that the global norm becomes `exp(mean)`.
533
+
534
+ Args:
535
+ std_factor: Python scaler (or tensor). `max_norm = exp(mean +
536
+ std_factor*std)`
537
+ decay: The smoothing factor of the moving averages.
538
+ static_max_norm: If provided, will threshold the norm to this value as an
539
+ extra safety.
540
+ global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
541
+ This provides a quicker adaptation of the mean for the first steps.
542
+ report_summary: If `True`, will add histogram summaries of the `max_norm`.
543
+ epsilon: Small value chosen to avoid zero variance.
544
+ name: The name for this operation is used to scope operations and summaries.
545
+
546
+ Returns:
547
+ A function for applying gradient clipping.
548
+ """
549
+
550
+ def gradient_clipping(grads_and_vars):
551
+ """Internal function for adaptive clipping."""
552
+ grads, variables = zip(*grads_and_vars)
553
+
554
+ norm = clip_ops.global_norm(grads)
555
+
556
+ max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
557
+ global_step, epsilon, name)
558
+
559
+ # reports the max gradient norm for debugging
560
+ if report_summary:
561
+ summary.scalar('global_norm/adaptive_max_gradient_norm', max_norm)
562
+
563
+ # factor will be 1. if norm is smaller than max_norm
564
+ factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm),
565
+ math_ops.exp(log_mean) / norm)
566
+
567
+ if static_max_norm is not None:
568
+ factor = math_ops.minimum(static_max_norm / norm, factor)
569
+
570
+ # apply factor
571
+ clipped_grads = []
572
+ for grad in grads:
573
+ if grad is None:
574
+ clipped_grads.append(None)
575
+ elif isinstance(grad, indexed_slices.IndexedSlices):
576
+ clipped_grads.append(
577
+ indexed_slices.IndexedSlices(grad.values * factor, grad.indices,
578
+ grad.dense_shape))
579
+ else:
580
+ clipped_grads.append(grad * factor)
581
+
582
+ return list(zip(clipped_grads, variables))
583
+
584
+ return gradient_clipping
585
+
586
+
587
+ def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
588
+ """Adds scaled noise from a 0-mean normal distribution to gradients."""
589
+ gradients, variables = zip(*grads_and_vars)
590
+ noisy_gradients = []
591
+ for gradient in gradients:
592
+ if gradient is None:
593
+ noisy_gradients.append(None)
594
+ continue
595
+ if isinstance(gradient, indexed_slices.IndexedSlices):
596
+ gradient_shape = gradient.dense_shape
597
+ else:
598
+ gradient_shape = gradient.get_shape()
599
+ noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
600
+ noisy_gradients.append(gradient + noise)
601
+ return list(zip(noisy_gradients, variables))
602
+
603
+
604
+ def _multiply_gradients(grads_and_vars, gradient_multipliers):
605
+ """Multiply specified gradients."""
606
+ multiplied_grads_and_vars = []
607
+ for grad, var in grads_and_vars:
608
+ if (grad is not None and
609
+ (var in gradient_multipliers or var.name in gradient_multipliers)):
610
+ key = var if var in gradient_multipliers else var.name
611
+ multiplier = gradient_multipliers[key]
612
+ if isinstance(grad, indexed_slices.IndexedSlices):
613
+ grad_values = grad.values * multiplier
614
+ grad = indexed_slices.IndexedSlices(grad_values, grad.indices,
615
+ grad.dense_shape)
616
+ else:
617
+ grad *= math_ops.cast(multiplier, grad.dtype)
618
+ multiplied_grads_and_vars.append((grad, var))
619
+ return multiplied_grads_and_vars