easy-cs-rec-custommodel 0.8.6__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of easy-cs-rec-custommodel might be problematic. Click here for more details.

Files changed (336) hide show
  1. easy_cs_rec_custommodel-0.8.6.dist-info/LICENSE +203 -0
  2. easy_cs_rec_custommodel-0.8.6.dist-info/METADATA +48 -0
  3. easy_cs_rec_custommodel-0.8.6.dist-info/RECORD +336 -0
  4. easy_cs_rec_custommodel-0.8.6.dist-info/WHEEL +6 -0
  5. easy_cs_rec_custommodel-0.8.6.dist-info/top_level.txt +2 -0
  6. easy_rec/__init__.py +114 -0
  7. easy_rec/python/__init__.py +0 -0
  8. easy_rec/python/builders/__init__.py +0 -0
  9. easy_rec/python/builders/hyperparams_builder.py +78 -0
  10. easy_rec/python/builders/loss_builder.py +333 -0
  11. easy_rec/python/builders/optimizer_builder.py +211 -0
  12. easy_rec/python/builders/strategy_builder.py +44 -0
  13. easy_rec/python/compat/__init__.py +0 -0
  14. easy_rec/python/compat/adam_s.py +245 -0
  15. easy_rec/python/compat/array_ops.py +229 -0
  16. easy_rec/python/compat/dynamic_variable.py +542 -0
  17. easy_rec/python/compat/early_stopping.py +653 -0
  18. easy_rec/python/compat/embedding_ops.py +162 -0
  19. easy_rec/python/compat/embedding_parallel_saver.py +316 -0
  20. easy_rec/python/compat/estimator_train.py +116 -0
  21. easy_rec/python/compat/exporter.py +473 -0
  22. easy_rec/python/compat/feature_column/__init__.py +0 -0
  23. easy_rec/python/compat/feature_column/feature_column.py +3675 -0
  24. easy_rec/python/compat/feature_column/feature_column_v2.py +5233 -0
  25. easy_rec/python/compat/feature_column/sequence_feature_column.py +648 -0
  26. easy_rec/python/compat/feature_column/utils.py +154 -0
  27. easy_rec/python/compat/layers.py +329 -0
  28. easy_rec/python/compat/ops.py +14 -0
  29. easy_rec/python/compat/optimizers.py +619 -0
  30. easy_rec/python/compat/queues.py +311 -0
  31. easy_rec/python/compat/regularizers.py +208 -0
  32. easy_rec/python/compat/sok_optimizer.py +440 -0
  33. easy_rec/python/compat/sync_replicas_optimizer.py +528 -0
  34. easy_rec/python/compat/weight_decay_optimizers.py +475 -0
  35. easy_rec/python/core/__init__.py +0 -0
  36. easy_rec/python/core/easyrec_metrics/__init__.py +24 -0
  37. easy_rec/python/core/easyrec_metrics/distribute_metrics_impl_pai.py +3702 -0
  38. easy_rec/python/core/easyrec_metrics/distribute_metrics_impl_tf.py +3768 -0
  39. easy_rec/python/core/learning_schedules.py +228 -0
  40. easy_rec/python/core/metrics.py +402 -0
  41. easy_rec/python/core/sampler.py +844 -0
  42. easy_rec/python/eval.py +102 -0
  43. easy_rec/python/export.py +150 -0
  44. easy_rec/python/feature_column/__init__.py +0 -0
  45. easy_rec/python/feature_column/feature_column.py +664 -0
  46. easy_rec/python/feature_column/feature_group.py +89 -0
  47. easy_rec/python/hpo/__init__.py +0 -0
  48. easy_rec/python/hpo/emr_hpo.py +140 -0
  49. easy_rec/python/hpo/generate_hpo_sql.py +71 -0
  50. easy_rec/python/hpo/pai_hpo.py +297 -0
  51. easy_rec/python/inference/__init__.py +0 -0
  52. easy_rec/python/inference/csv_predictor.py +189 -0
  53. easy_rec/python/inference/hive_parquet_predictor.py +200 -0
  54. easy_rec/python/inference/hive_predictor.py +166 -0
  55. easy_rec/python/inference/odps_predictor.py +70 -0
  56. easy_rec/python/inference/parquet_predictor.py +147 -0
  57. easy_rec/python/inference/parquet_predictor_v2.py +147 -0
  58. easy_rec/python/inference/predictor.py +621 -0
  59. easy_rec/python/inference/processor/__init__.py +0 -0
  60. easy_rec/python/inference/processor/test.py +170 -0
  61. easy_rec/python/inference/vector_retrieve.py +124 -0
  62. easy_rec/python/input/__init__.py +0 -0
  63. easy_rec/python/input/batch_tfrecord_input.py +117 -0
  64. easy_rec/python/input/criteo_binary_reader.py +259 -0
  65. easy_rec/python/input/criteo_input.py +107 -0
  66. easy_rec/python/input/csv_input.py +175 -0
  67. easy_rec/python/input/csv_input_ex.py +72 -0
  68. easy_rec/python/input/csv_input_v2.py +68 -0
  69. easy_rec/python/input/datahub_input.py +320 -0
  70. easy_rec/python/input/dummy_input.py +58 -0
  71. easy_rec/python/input/hive_input.py +123 -0
  72. easy_rec/python/input/hive_parquet_input.py +140 -0
  73. easy_rec/python/input/hive_rtp_input.py +174 -0
  74. easy_rec/python/input/input.py +1064 -0
  75. easy_rec/python/input/kafka_dataset.py +144 -0
  76. easy_rec/python/input/kafka_input.py +235 -0
  77. easy_rec/python/input/load_parquet.py +317 -0
  78. easy_rec/python/input/odps_input.py +101 -0
  79. easy_rec/python/input/odps_input_v2.py +110 -0
  80. easy_rec/python/input/odps_input_v3.py +132 -0
  81. easy_rec/python/input/odps_rtp_input.py +187 -0
  82. easy_rec/python/input/odps_rtp_input_v2.py +104 -0
  83. easy_rec/python/input/parquet_input.py +397 -0
  84. easy_rec/python/input/parquet_input_v2.py +180 -0
  85. easy_rec/python/input/parquet_input_v3.py +203 -0
  86. easy_rec/python/input/rtp_input.py +225 -0
  87. easy_rec/python/input/rtp_input_v2.py +145 -0
  88. easy_rec/python/input/tfrecord_input.py +100 -0
  89. easy_rec/python/layers/__init__.py +0 -0
  90. easy_rec/python/layers/backbone.py +571 -0
  91. easy_rec/python/layers/capsule_layer.py +176 -0
  92. easy_rec/python/layers/cmbf.py +390 -0
  93. easy_rec/python/layers/common_layers.py +192 -0
  94. easy_rec/python/layers/dnn.py +87 -0
  95. easy_rec/python/layers/embed_input_layer.py +25 -0
  96. easy_rec/python/layers/fm.py +26 -0
  97. easy_rec/python/layers/input_layer.py +396 -0
  98. easy_rec/python/layers/keras/__init__.py +34 -0
  99. easy_rec/python/layers/keras/activation.py +114 -0
  100. easy_rec/python/layers/keras/attention.py +267 -0
  101. easy_rec/python/layers/keras/auxiliary_loss.py +47 -0
  102. easy_rec/python/layers/keras/blocks.py +262 -0
  103. easy_rec/python/layers/keras/bst.py +119 -0
  104. easy_rec/python/layers/keras/custom_ops.py +250 -0
  105. easy_rec/python/layers/keras/data_augment.py +133 -0
  106. easy_rec/python/layers/keras/din.py +67 -0
  107. easy_rec/python/layers/keras/einsum_dense.py +598 -0
  108. easy_rec/python/layers/keras/embedding.py +81 -0
  109. easy_rec/python/layers/keras/fibinet.py +251 -0
  110. easy_rec/python/layers/keras/interaction.py +416 -0
  111. easy_rec/python/layers/keras/layer_norm.py +364 -0
  112. easy_rec/python/layers/keras/mask_net.py +166 -0
  113. easy_rec/python/layers/keras/multi_head_attention.py +717 -0
  114. easy_rec/python/layers/keras/multi_task.py +125 -0
  115. easy_rec/python/layers/keras/numerical_embedding.py +376 -0
  116. easy_rec/python/layers/keras/ppnet.py +194 -0
  117. easy_rec/python/layers/keras/transformer.py +192 -0
  118. easy_rec/python/layers/layer_norm.py +51 -0
  119. easy_rec/python/layers/mmoe.py +83 -0
  120. easy_rec/python/layers/multihead_attention.py +162 -0
  121. easy_rec/python/layers/multihead_cross_attention.py +749 -0
  122. easy_rec/python/layers/senet.py +73 -0
  123. easy_rec/python/layers/seq_input_layer.py +134 -0
  124. easy_rec/python/layers/sequence_feature_layer.py +249 -0
  125. easy_rec/python/layers/uniter.py +301 -0
  126. easy_rec/python/layers/utils.py +248 -0
  127. easy_rec/python/layers/variational_dropout_layer.py +130 -0
  128. easy_rec/python/loss/__init__.py +0 -0
  129. easy_rec/python/loss/circle_loss.py +82 -0
  130. easy_rec/python/loss/contrastive_loss.py +79 -0
  131. easy_rec/python/loss/f1_reweight_loss.py +38 -0
  132. easy_rec/python/loss/focal_loss.py +93 -0
  133. easy_rec/python/loss/jrc_loss.py +128 -0
  134. easy_rec/python/loss/listwise_loss.py +161 -0
  135. easy_rec/python/loss/multi_similarity.py +68 -0
  136. easy_rec/python/loss/pairwise_loss.py +307 -0
  137. easy_rec/python/loss/softmax_loss_with_negative_mining.py +110 -0
  138. easy_rec/python/loss/zero_inflated_lognormal.py +76 -0
  139. easy_rec/python/main.py +878 -0
  140. easy_rec/python/model/__init__.py +0 -0
  141. easy_rec/python/model/autoint.py +73 -0
  142. easy_rec/python/model/cmbf.py +47 -0
  143. easy_rec/python/model/collaborative_metric_learning.py +182 -0
  144. easy_rec/python/model/custom_model.py +323 -0
  145. easy_rec/python/model/dat.py +138 -0
  146. easy_rec/python/model/dbmtl.py +116 -0
  147. easy_rec/python/model/dcn.py +70 -0
  148. easy_rec/python/model/deepfm.py +106 -0
  149. easy_rec/python/model/dlrm.py +73 -0
  150. easy_rec/python/model/dropoutnet.py +207 -0
  151. easy_rec/python/model/dssm.py +154 -0
  152. easy_rec/python/model/dssm_senet.py +143 -0
  153. easy_rec/python/model/dummy_model.py +48 -0
  154. easy_rec/python/model/easy_rec_estimator.py +739 -0
  155. easy_rec/python/model/easy_rec_model.py +467 -0
  156. easy_rec/python/model/esmm.py +242 -0
  157. easy_rec/python/model/fm.py +63 -0
  158. easy_rec/python/model/match_model.py +357 -0
  159. easy_rec/python/model/mind.py +445 -0
  160. easy_rec/python/model/mmoe.py +70 -0
  161. easy_rec/python/model/multi_task_model.py +303 -0
  162. easy_rec/python/model/multi_tower.py +62 -0
  163. easy_rec/python/model/multi_tower_bst.py +190 -0
  164. easy_rec/python/model/multi_tower_din.py +130 -0
  165. easy_rec/python/model/multi_tower_recall.py +68 -0
  166. easy_rec/python/model/pdn.py +203 -0
  167. easy_rec/python/model/ple.py +120 -0
  168. easy_rec/python/model/rank_model.py +485 -0
  169. easy_rec/python/model/rocket_launching.py +203 -0
  170. easy_rec/python/model/simple_multi_task.py +54 -0
  171. easy_rec/python/model/uniter.py +46 -0
  172. easy_rec/python/model/wide_and_deep.py +121 -0
  173. easy_rec/python/ops/1.12/incr_record.so +0 -0
  174. easy_rec/python/ops/1.12/kafka.so +0 -0
  175. easy_rec/python/ops/1.12/libcustom_ops.so +0 -0
  176. easy_rec/python/ops/1.12/libembed_op.so +0 -0
  177. easy_rec/python/ops/1.12/libhiredis.so.1.0.0 +0 -0
  178. easy_rec/python/ops/1.12/librdkafka++.so.1 +0 -0
  179. easy_rec/python/ops/1.12/librdkafka.so.1 +0 -0
  180. easy_rec/python/ops/1.12/libredis++.so +0 -0
  181. easy_rec/python/ops/1.12/libredis++.so.1 +0 -0
  182. easy_rec/python/ops/1.12/libredis++.so.1.2.3 +0 -0
  183. easy_rec/python/ops/1.12/libstr_avx_op.so +0 -0
  184. easy_rec/python/ops/1.12/libwrite_sparse_kv.so +0 -0
  185. easy_rec/python/ops/1.15/incr_record.so +0 -0
  186. easy_rec/python/ops/1.15/kafka.so +0 -0
  187. easy_rec/python/ops/1.15/libcustom_ops.so +0 -0
  188. easy_rec/python/ops/1.15/libembed_op.so +0 -0
  189. easy_rec/python/ops/1.15/libhiredis.so.1.0.0 +0 -0
  190. easy_rec/python/ops/1.15/librdkafka++.so +0 -0
  191. easy_rec/python/ops/1.15/librdkafka++.so.1 +0 -0
  192. easy_rec/python/ops/1.15/librdkafka.so +0 -0
  193. easy_rec/python/ops/1.15/librdkafka.so.1 +0 -0
  194. easy_rec/python/ops/1.15/libredis++.so.1 +0 -0
  195. easy_rec/python/ops/1.15/libstr_avx_op.so +0 -0
  196. easy_rec/python/ops/2.12/libcustom_ops.so +0 -0
  197. easy_rec/python/ops/2.12/libload_embed.so +0 -0
  198. easy_rec/python/ops/2.12/libstr_avx_op.so +0 -0
  199. easy_rec/python/ops/__init__.py +0 -0
  200. easy_rec/python/ops/gen_kafka_ops.py +193 -0
  201. easy_rec/python/ops/gen_str_avx_op.py +28 -0
  202. easy_rec/python/ops/incr_record.py +30 -0
  203. easy_rec/python/predict.py +170 -0
  204. easy_rec/python/protos/__init__.py +0 -0
  205. easy_rec/python/protos/autoint_pb2.py +122 -0
  206. easy_rec/python/protos/backbone_pb2.py +1416 -0
  207. easy_rec/python/protos/cmbf_pb2.py +435 -0
  208. easy_rec/python/protos/collaborative_metric_learning_pb2.py +252 -0
  209. easy_rec/python/protos/custom_model_pb2.py +57 -0
  210. easy_rec/python/protos/dat_pb2.py +262 -0
  211. easy_rec/python/protos/data_source_pb2.py +422 -0
  212. easy_rec/python/protos/dataset_pb2.py +1920 -0
  213. easy_rec/python/protos/dbmtl_pb2.py +191 -0
  214. easy_rec/python/protos/dcn_pb2.py +197 -0
  215. easy_rec/python/protos/deepfm_pb2.py +163 -0
  216. easy_rec/python/protos/dlrm_pb2.py +163 -0
  217. easy_rec/python/protos/dnn_pb2.py +329 -0
  218. easy_rec/python/protos/dropoutnet_pb2.py +239 -0
  219. easy_rec/python/protos/dssm_pb2.py +262 -0
  220. easy_rec/python/protos/dssm_senet_pb2.py +282 -0
  221. easy_rec/python/protos/easy_rec_model_pb2.py +1672 -0
  222. easy_rec/python/protos/esmm_pb2.py +133 -0
  223. easy_rec/python/protos/eval_pb2.py +930 -0
  224. easy_rec/python/protos/export_pb2.py +379 -0
  225. easy_rec/python/protos/feature_config_pb2.py +1359 -0
  226. easy_rec/python/protos/fm_pb2.py +90 -0
  227. easy_rec/python/protos/hive_config_pb2.py +138 -0
  228. easy_rec/python/protos/hyperparams_pb2.py +624 -0
  229. easy_rec/python/protos/keras_layer_pb2.py +692 -0
  230. easy_rec/python/protos/layer_pb2.py +1936 -0
  231. easy_rec/python/protos/loss_pb2.py +1713 -0
  232. easy_rec/python/protos/mind_pb2.py +497 -0
  233. easy_rec/python/protos/mmoe_pb2.py +215 -0
  234. easy_rec/python/protos/multi_tower_pb2.py +295 -0
  235. easy_rec/python/protos/multi_tower_recall_pb2.py +198 -0
  236. easy_rec/python/protos/optimizer_pb2.py +2017 -0
  237. easy_rec/python/protos/pdn_pb2.py +293 -0
  238. easy_rec/python/protos/pipeline_pb2.py +516 -0
  239. easy_rec/python/protos/ple_pb2.py +231 -0
  240. easy_rec/python/protos/predict_pb2.py +1140 -0
  241. easy_rec/python/protos/rocket_launching_pb2.py +169 -0
  242. easy_rec/python/protos/seq_encoder_pb2.py +1084 -0
  243. easy_rec/python/protos/simi_pb2.py +54 -0
  244. easy_rec/python/protos/simple_multi_task_pb2.py +97 -0
  245. easy_rec/python/protos/tf_predict_pb2.py +630 -0
  246. easy_rec/python/protos/tower_pb2.py +661 -0
  247. easy_rec/python/protos/train_pb2.py +1197 -0
  248. easy_rec/python/protos/uniter_pb2.py +307 -0
  249. easy_rec/python/protos/variational_dropout_pb2.py +91 -0
  250. easy_rec/python/protos/wide_and_deep_pb2.py +131 -0
  251. easy_rec/python/test/__init__.py +0 -0
  252. easy_rec/python/test/csv_input_test.py +340 -0
  253. easy_rec/python/test/custom_early_stop_func.py +19 -0
  254. easy_rec/python/test/dh_local_run.py +104 -0
  255. easy_rec/python/test/embed_test.py +155 -0
  256. easy_rec/python/test/emr_run.py +119 -0
  257. easy_rec/python/test/eval_metric_test.py +107 -0
  258. easy_rec/python/test/excel_convert_test.py +64 -0
  259. easy_rec/python/test/export_test.py +513 -0
  260. easy_rec/python/test/fg_test.py +70 -0
  261. easy_rec/python/test/hive_input_test.py +311 -0
  262. easy_rec/python/test/hpo_test.py +235 -0
  263. easy_rec/python/test/kafka_test.py +373 -0
  264. easy_rec/python/test/local_incr_test.py +122 -0
  265. easy_rec/python/test/loss_test.py +110 -0
  266. easy_rec/python/test/odps_command.py +61 -0
  267. easy_rec/python/test/odps_local_run.py +86 -0
  268. easy_rec/python/test/odps_run.py +254 -0
  269. easy_rec/python/test/odps_test_cls.py +39 -0
  270. easy_rec/python/test/odps_test_prepare.py +198 -0
  271. easy_rec/python/test/odps_test_util.py +237 -0
  272. easy_rec/python/test/pre_check_test.py +54 -0
  273. easy_rec/python/test/predictor_test.py +394 -0
  274. easy_rec/python/test/rtp_convert_test.py +133 -0
  275. easy_rec/python/test/run.py +138 -0
  276. easy_rec/python/test/train_eval_test.py +1299 -0
  277. easy_rec/python/test/util_test.py +85 -0
  278. easy_rec/python/test/zero_inflated_lognormal_test.py +53 -0
  279. easy_rec/python/tools/__init__.py +0 -0
  280. easy_rec/python/tools/add_boundaries_to_config.py +67 -0
  281. easy_rec/python/tools/add_feature_info_to_config.py +145 -0
  282. easy_rec/python/tools/convert_config_format.py +48 -0
  283. easy_rec/python/tools/convert_rtp_data.py +79 -0
  284. easy_rec/python/tools/convert_rtp_fg.py +106 -0
  285. easy_rec/python/tools/create_config_from_excel.py +427 -0
  286. easy_rec/python/tools/criteo/__init__.py +0 -0
  287. easy_rec/python/tools/criteo/convert_data.py +157 -0
  288. easy_rec/python/tools/edit_lookup_graph.py +134 -0
  289. easy_rec/python/tools/faiss_index_pai.py +116 -0
  290. easy_rec/python/tools/feature_selection.py +316 -0
  291. easy_rec/python/tools/hit_rate_ds.py +223 -0
  292. easy_rec/python/tools/hit_rate_pai.py +138 -0
  293. easy_rec/python/tools/pre_check.py +120 -0
  294. easy_rec/python/tools/predict_and_chk.py +111 -0
  295. easy_rec/python/tools/read_kafka.py +55 -0
  296. easy_rec/python/tools/split_model_pai.py +286 -0
  297. easy_rec/python/tools/split_pdn_model_pai.py +272 -0
  298. easy_rec/python/tools/test_saved_model.py +80 -0
  299. easy_rec/python/tools/view_saved_model.py +39 -0
  300. easy_rec/python/tools/write_kafka.py +65 -0
  301. easy_rec/python/train_eval.py +325 -0
  302. easy_rec/python/utils/__init__.py +15 -0
  303. easy_rec/python/utils/activation.py +120 -0
  304. easy_rec/python/utils/check_utils.py +87 -0
  305. easy_rec/python/utils/compat.py +14 -0
  306. easy_rec/python/utils/config_util.py +652 -0
  307. easy_rec/python/utils/constant.py +43 -0
  308. easy_rec/python/utils/convert_rtp_fg.py +616 -0
  309. easy_rec/python/utils/dag.py +192 -0
  310. easy_rec/python/utils/distribution_utils.py +268 -0
  311. easy_rec/python/utils/ds_util.py +65 -0
  312. easy_rec/python/utils/embedding_utils.py +73 -0
  313. easy_rec/python/utils/estimator_utils.py +1036 -0
  314. easy_rec/python/utils/export_big_model.py +630 -0
  315. easy_rec/python/utils/expr_util.py +118 -0
  316. easy_rec/python/utils/fg_util.py +53 -0
  317. easy_rec/python/utils/hit_rate_utils.py +220 -0
  318. easy_rec/python/utils/hive_utils.py +183 -0
  319. easy_rec/python/utils/hpo_util.py +137 -0
  320. easy_rec/python/utils/hvd_utils.py +56 -0
  321. easy_rec/python/utils/input_utils.py +108 -0
  322. easy_rec/python/utils/io_util.py +282 -0
  323. easy_rec/python/utils/load_class.py +249 -0
  324. easy_rec/python/utils/meta_graph_editor.py +941 -0
  325. easy_rec/python/utils/multi_optimizer.py +62 -0
  326. easy_rec/python/utils/numpy_utils.py +18 -0
  327. easy_rec/python/utils/odps_util.py +79 -0
  328. easy_rec/python/utils/pai_util.py +86 -0
  329. easy_rec/python/utils/proto_util.py +90 -0
  330. easy_rec/python/utils/restore_filter.py +89 -0
  331. easy_rec/python/utils/shape_utils.py +432 -0
  332. easy_rec/python/utils/static_shape.py +71 -0
  333. easy_rec/python/utils/test_utils.py +866 -0
  334. easy_rec/python/utils/tf_utils.py +56 -0
  335. easy_rec/version.py +4 -0
  336. test/__init__.py +0 -0
@@ -0,0 +1,475 @@
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Base class to make optimizers weight decay ready."""
16
+ from __future__ import absolute_import
17
+ from __future__ import division
18
+ from __future__ import print_function
19
+
20
+ from tensorflow.python.framework import ops
21
+ from tensorflow.python.ops import array_ops
22
+ from tensorflow.python.ops import control_flow_ops
23
+ from tensorflow.python.ops import resource_variable_ops
24
+ from tensorflow.python.ops import state_ops
25
+ from tensorflow.python.training import adam
26
+ from tensorflow.python.training import momentum as momentum_opt
27
+ from tensorflow.python.training import optimizer
28
+ from tensorflow.python.util.tf_export import tf_export
29
+
30
+
31
+ class DecoupledWeightDecayExtension(object):
32
+ """This class allows to extend optimizers with decoupled weight decay.
33
+
34
+ It implements the decoupled weight decay described by Loshchilov & Hutter
35
+ (https://arxiv.org/pdf/1711.05101.pdf), in which the weight decay is
36
+ decoupled from the optimization steps w.r.t. to the loss function.
37
+ For SGD variants, this simplifies hyperparameter search since it decouples
38
+ the settings of weight decay and learning rate.
39
+ For adaptive gradient algorithms, it regularizes variables with large
40
+ gradients more than L2 regularization would, which was shown to yield better
41
+ training loss and generalization error in the paper above.
42
+
43
+ This class alone is not an optimizer but rather extends existing
44
+ optimizers with decoupled weight decay. We explicitly define the two examples
45
+ used in the above paper (SGDW and AdamW), but in general this can extend
46
+ any OptimizerX by using
47
+ `extend_with_weight_decay(OptimizerX, weight_decay=weight_decay)`.
48
+ In order for it to work, it must be the first class the Optimizer with
49
+ weight decay inherits from, e.g.
50
+
51
+ ```python
52
+ class AdamWOptimizer(DecoupledWeightDecayExtension, adam.AdamOptimizer):
53
+ def __init__(self, weight_decay, *args, **kwargs):
54
+ super(AdamWOptimizer, self).__init__(weight_decay, *args, **kwargs).
55
+ ```
56
+
57
+ Note that this extension decays weights BEFORE applying the update based
58
+ on the gradient, i.e. this extension only has the desired behaviour for
59
+ optimizers which do not depend on the value of'var' in the update step!
60
+
61
+ Note: when applying a decay to the learning rate, be sure to manually apply
62
+ the decay to the `weight_decay` as well. For example:
63
+
64
+ ```python
65
+ schedule =
66
+ tf.compat.v1.train.piecewise_constant(tf.compat.v1.train.get_global_step(),
67
+ [10000, 15000], [1e-0, 1e-1, 1e-2])
68
+ lr = 1e-1 * schedule()
69
+ wd = lambda: 1e-4 * schedule()
70
+
71
+ # ...
72
+
73
+ optimizer = tf.contrib.opt.MomentumWOptimizer(learning_rate=lr,
74
+ weight_decay=wd,
75
+ momentum=0.9,
76
+ use_nesterov=True)
77
+ ```
78
+ """
79
+
80
+ def __init__(self, weight_decay, **kwargs):
81
+ """Construct the extension class that adds weight decay to an optimizer.
82
+
83
+ Args:
84
+ weight_decay: A `Tensor` or a floating point value, the factor by which a
85
+ variable is decayed in the update step.
86
+ **kwargs: Optional list or tuple or set of `Variable` objects to decay.
87
+ """
88
+ self._decay_var_list = None # is set in minimize or apply_gradients
89
+ self._weight_decay = weight_decay
90
+ # The tensors are initialized in call to _prepare
91
+ self._weight_decay_tensor = None
92
+ super(DecoupledWeightDecayExtension, self).__init__(**kwargs)
93
+
94
+ def minimize(self,
95
+ loss,
96
+ global_step=None,
97
+ var_list=None,
98
+ gate_gradients=optimizer.Optimizer.GATE_OP,
99
+ aggregation_method=None,
100
+ colocate_gradients_with_ops=False,
101
+ name=None,
102
+ grad_loss=None,
103
+ decay_var_list=None):
104
+ """Add operations to minimize `loss` by updating `var_list` with decay.
105
+
106
+ This function is the same as Optimizer.minimize except that it allows to
107
+ specify the variables that should be decayed using decay_var_list.
108
+ If decay_var_list is None, all variables in var_list are decayed.
109
+
110
+ For more information see the documentation of Optimizer.minimize.
111
+
112
+ Args:
113
+ loss: A `Tensor` containing the value to minimize.
114
+ global_step: Optional `Variable` to increment by one after the variables
115
+ have been updated.
116
+ var_list: Optional list or tuple of `Variable` objects to update to
117
+ minimize `loss`. Defaults to the list of variables collected in the
118
+ graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
119
+ gate_gradients: How to gate the computation of gradients. Can be
120
+ `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
121
+ aggregation_method: Specifies the method used to combine gradient terms.
122
+ Valid values are defined in the class `AggregationMethod`.
123
+ colocate_gradients_with_ops: If True, try colocating gradients with the
124
+ corresponding op.
125
+ name: Optional name for the returned operation.
126
+ grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
127
+ decay_var_list: Optional list of decay variables.
128
+
129
+ Returns:
130
+ An Operation that updates the variables in `var_list`. If `global_step`
131
+ was not `None`, that operation also increments `global_step`.
132
+ """
133
+ self._decay_var_list = set(decay_var_list) if decay_var_list else False
134
+ return super(DecoupledWeightDecayExtension, self).minimize(
135
+ loss,
136
+ global_step=global_step,
137
+ var_list=var_list,
138
+ gate_gradients=gate_gradients,
139
+ aggregation_method=aggregation_method,
140
+ colocate_gradients_with_ops=colocate_gradients_with_ops,
141
+ name=name,
142
+ grad_loss=grad_loss)
143
+
144
+ def apply_gradients(self,
145
+ grads_and_vars,
146
+ global_step=None,
147
+ name=None,
148
+ decay_var_list=None):
149
+ """Apply gradients to variables and decay the variables.
150
+
151
+ This function is the same as Optimizer.apply_gradients except that it
152
+ allows to specify the variables that should be decayed using
153
+ decay_var_list. If decay_var_list is None, all variables in var_list
154
+ are decayed.
155
+
156
+ For more information see the documentation of Optimizer.apply_gradients.
157
+
158
+ Args:
159
+ grads_and_vars: List of (gradient, variable) pairs as returned by
160
+ `compute_gradients()`.
161
+ global_step: Optional `Variable` to increment by one after the variables
162
+ have been updated.
163
+ name: Optional name for the returned operation. Default to the name
164
+ passed to the `Optimizer` constructor.
165
+ decay_var_list: Optional list of decay variables.
166
+
167
+ Returns:
168
+ An `Operation` that applies the specified gradients. If `global_step`
169
+ was not None, that operation also increments `global_step`.
170
+ """
171
+ self._decay_var_list = set(decay_var_list) if decay_var_list else False
172
+ return super(DecoupledWeightDecayExtension, self).apply_gradients(
173
+ grads_and_vars, global_step=global_step, name=name)
174
+
175
+ def _prepare(self):
176
+ weight_decay = self._weight_decay
177
+ if callable(weight_decay):
178
+ weight_decay = weight_decay()
179
+ self._weight_decay_tensor = ops.convert_to_tensor(
180
+ weight_decay, name='weight_decay')
181
+ # Call the optimizers _prepare function.
182
+ super(DecoupledWeightDecayExtension, self)._prepare()
183
+
184
+ def _decay_weights_op(self, var):
185
+ if not self._decay_var_list or var in self._decay_var_list:
186
+ return var.assign_sub(self._weight_decay * var, self._use_locking)
187
+ return control_flow_ops.no_op()
188
+
189
+ def _decay_weights_sparse_op(self, var, indices, scatter_add):
190
+ if not self._decay_var_list or var in self._decay_var_list:
191
+ update = -self._weight_decay * array_ops.gather(var, indices)
192
+ return scatter_add(var, indices, update, self._use_locking)
193
+ return control_flow_ops.no_op()
194
+
195
+ # Here, we overwrite the apply functions that the base optimizer calls.
196
+ # super().apply_x resolves to the apply_x function of the BaseOptimizer.
197
+ def _apply_dense(self, grad, var):
198
+ with ops.control_dependencies([self._decay_weights_op(var)]):
199
+ return super(DecoupledWeightDecayExtension, self)._apply_dense(grad, var)
200
+
201
+ def _resource_apply_dense(self, grad, var):
202
+ with ops.control_dependencies([self._decay_weights_op(var)]):
203
+ return super(DecoupledWeightDecayExtension,
204
+ self)._resource_apply_dense(grad, var)
205
+
206
+ def _apply_sparse(self, grad, var):
207
+ scatter_add = state_ops.scatter_add
208
+ decay_op = self._decay_weights_sparse_op(var, grad.indices, scatter_add)
209
+ with ops.control_dependencies([decay_op]):
210
+ return super(DecoupledWeightDecayExtension, self)._apply_sparse(grad, var)
211
+
212
+ def _resource_scatter_add(self, x, i, v, _=None):
213
+ # last argument allows for one overflow argument, to have the same function
214
+ # signature as state_ops.scatter_add
215
+ with ops.control_dependencies(
216
+ [resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
217
+ return x.value()
218
+
219
+ def _resource_apply_sparse(self, grad, var, indices):
220
+ scatter_add = self._resource_scatter_add
221
+ decay_op = self._decay_weights_sparse_op(var, indices, scatter_add)
222
+ with ops.control_dependencies([decay_op]):
223
+ return super(DecoupledWeightDecayExtension,
224
+ self)._resource_apply_sparse(grad, var, indices)
225
+
226
+
227
+ def extend_with_decoupled_weight_decay(base_optimizer):
228
+ """Factory function returning an optimizer class with decoupled weight decay.
229
+
230
+ Returns an optimizer class. An instance of the returned class computes the
231
+ update step of `base_optimizer` and additionally decays the weights.
232
+ E.g., the class returned by
233
+ `extend_with_decoupled_weight_decay(tf.compat.v1.train.AdamOptimizer)` is
234
+ equivalent to
235
+ `tf.contrib.opt.AdamWOptimizer`.
236
+
237
+ The API of the new optimizer class slightly differs from the API of the
238
+ base optimizer:
239
+ - The first argument to the constructor is the weight decay rate.
240
+ - `minimize` and `apply_gradients` accept the optional keyword argument
241
+ `decay_var_list`, which specifies the variables that should be decayed.
242
+ If `None`, all variables that are optimized are decayed.
243
+
244
+ Usage example:
245
+ ```python
246
+ # MyAdamW is a new class
247
+ MyAdamW = extend_with_decoupled_weight_decay(tf.compat.v1.train.AdamOptimizer)
248
+ # Create a MyAdamW object
249
+ optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
250
+ sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
251
+
252
+ Note that this extension decays weights BEFORE applying the update based
253
+ on the gradient, i.e. this extension only has the desired behaviour for
254
+ optimizers which do not depend on the value of'var' in the update step!
255
+ ```
256
+
257
+ Args:
258
+ base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
259
+
260
+ Returns:
261
+ A new optimizer class that inherits from DecoupledWeightDecayExtension
262
+ and base_optimizer.
263
+ """
264
+
265
+ class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
266
+ base_optimizer):
267
+ """Base_optimizer with decoupled weight decay.
268
+
269
+ This class computes the update step of `base_optimizer` and
270
+ additionally decays the variable with the weight decay being decoupled from
271
+ the optimization steps w.r.t. to the loss function, as described by
272
+ Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
273
+ For SGD variants, this simplifies hyperparameter search since
274
+ it decouples the settings of weight decay and learning rate.
275
+ For adaptive gradient algorithms, it regularizes variables with large
276
+ gradients more than L2 regularization would, which was shown to yield
277
+ better training loss and generalization error in the paper above.
278
+ """
279
+
280
+ def __init__(self, weight_decay, *args, **kwargs):
281
+ # super delegation is necessary here
282
+ # pylint: disable=useless-super-delegation
283
+ super(OptimizerWithDecoupledWeightDecay,
284
+ self).__init__(weight_decay, *args, **kwargs)
285
+ # pylint: enable=useless-super-delegation
286
+
287
+ return OptimizerWithDecoupledWeightDecay
288
+
289
+
290
+ @tf_export('contrib.opt.MomentumWOptimizer')
291
+ class MomentumWOptimizer(DecoupledWeightDecayExtension,
292
+ momentum_opt.MomentumOptimizer):
293
+ """Optimizer that implements the Momentum algorithm with weight_decay.
294
+
295
+ This is an implementation of the SGDW optimizer described in "Fixing
296
+ Weight Decay Regularization in Adam" by Loshchilov & Hutter
297
+ (https://arxiv.org/abs/1711.05101)
298
+ ([pdf])(https://arxiv.org/pdf/1711.05101.pdf).
299
+ It computes the update step of `train.MomentumOptimizer` and additionally
300
+ decays the variable. Note that this is different from adding
301
+ L2 regularization on the variables to the loss. Decoupling the weight decay
302
+ from other hyperparameters (in particular the learning rate) simplifies
303
+ hyperparameter search.
304
+
305
+ For further information see the documentation of the Momentum Optimizer.
306
+
307
+ Note that this optimizer can also be instantiated as
308
+ ```python
309
+ extend_with_weight_decay(tf.compat.v1.train.MomentumOptimizer,
310
+ weight_decay=weight_decay)
311
+ ```
312
+ """
313
+
314
+ def __init__(self,
315
+ weight_decay,
316
+ learning_rate,
317
+ momentum,
318
+ use_locking=False,
319
+ name='MomentumW',
320
+ use_nesterov=False):
321
+ """Construct a new MomentumW optimizer.
322
+
323
+ For further information see the documentation of the Momentum Optimizer.
324
+
325
+ Args:
326
+ weight_decay: A `Tensor` or a floating point value. The weight decay.
327
+ learning_rate: A `Tensor` or a floating point value. The learning rate.
328
+ momentum: A `Tensor` or a floating point value. The momentum.
329
+ use_locking: If `True` use locks for update operations.
330
+ name: Optional name prefix for the operations created when applying
331
+ gradients. Defaults to "Momentum".
332
+ use_nesterov: If `True` use Nesterov Momentum. See [Sutskever et al.,
333
+ 2013](
334
+ http://jmlr.org/proceedings/papers/v28/sutskever13.pdf). This
335
+ implementation always computes gradients at the value of the
336
+ variable(s) passed to the optimizer. Using Nesterov Momentum makes the
337
+ variable(s) track the values called `theta_t + mu*v_t` in the paper.
338
+ @compatibility(eager) When eager execution is enabled, learning_rate,
339
+ weight_decay and momentum can each be a callable that takes no
340
+ arguments and returns the actual value to use. This can be useful for
341
+ changing these values across different invocations of optimizer
342
+ functions. @end_compatibility
343
+ """
344
+ super(MomentumWOptimizer, self).__init__(
345
+ weight_decay,
346
+ learning_rate=learning_rate,
347
+ momentum=momentum,
348
+ use_locking=use_locking,
349
+ name=name,
350
+ use_nesterov=use_nesterov)
351
+
352
+
353
+ @tf_export('contrib.opt.AdamWOptimizer')
354
+ class AdamWOptimizer(DecoupledWeightDecayExtension, adam.AdamOptimizer):
355
+ """Optimizer that implements the Adam algorithm with weight decay.
356
+
357
+ This is an implementation of the AdamW optimizer described in ["Fixing
358
+ Weight Decay Regularization in Adam" by Loshchilov & Hutter]
359
+ (https://arxiv.org/abs/1711.05101)
360
+ ([pdf](https://arxiv.org/pdf/1711.05101.pdf)).
361
+
362
+ It computes the update step of `train.AdamOptimizer` and additionally decays
363
+ the variable. Note that this is different from adding L2 regularization on
364
+ the variables to the loss: it regularizes variables with large
365
+ gradients more than L2 regularization would, which was shown to yield better
366
+ training loss and generalization error in the paper above.
367
+
368
+ For further information see the documentation of the Adam Optimizer.
369
+
370
+ Note that this optimizer can also be instantiated as
371
+ ```python
372
+ extend_with_weight_decay(tf.compat.v1.train.AdamOptimizer,
373
+ weight_decay=weight_decay)
374
+ ```
375
+ """
376
+
377
+ def __init__(self,
378
+ weight_decay,
379
+ learning_rate=0.001,
380
+ beta1=0.9,
381
+ beta2=0.999,
382
+ epsilon=1e-8,
383
+ use_locking=False,
384
+ name='AdamW'):
385
+ """Construct a new AdamW optimizer.
386
+
387
+ For further information see the documentation of the Adam Optimizer.
388
+
389
+ Args:
390
+ weight_decay: A `Tensor` or a floating point value. The weight decay.
391
+ learning_rate: A Tensor or a floating point value. The learning rate.
392
+ beta1: A float value or a constant float tensor. The exponential decay
393
+ rate for the 1st moment estimates.
394
+ beta2: A float value or a constant float tensor. The exponential decay
395
+ rate for the 2nd moment estimates.
396
+ epsilon: A small constant for numerical stability. This epsilon is
397
+ "epsilon hat" in the Kingma and Ba paper (in the formula just before
398
+ Section 2.1), not the epsilon in Algorithm 1 of the paper.
399
+ use_locking: If True use locks for update operations.
400
+ name: Optional name for the operations created when applying gradients.
401
+ Defaults to "Adam".
402
+ """
403
+ super(AdamWOptimizer, self).__init__(
404
+ weight_decay,
405
+ learning_rate=learning_rate,
406
+ beta1=beta1,
407
+ beta2=beta2,
408
+ epsilon=epsilon,
409
+ use_locking=use_locking,
410
+ name=name)
411
+
412
+
413
+ try:
414
+ from tensorflow.train import AdamAsyncOptimizer
415
+
416
+ @tf_export('contrib.opt.AdamAsyncWOptimizer')
417
+ class AdamAsyncWOptimizer(DecoupledWeightDecayExtension, AdamAsyncOptimizer):
418
+ """Optimizer that implements the Adam algorithm with weight decay.
419
+
420
+ This is an implementation of the AdamW optimizer described in ["Fixing
421
+ Weight Decay Regularization in Adam" by Loshchilov & Hutter]
422
+ (https://arxiv.org/abs/1711.05101)
423
+ ([pdf](https://arxiv.org/pdf/1711.05101.pdf)).
424
+
425
+ It computes the update step of `train.AdamOptimizer` and additionally decays
426
+ the variable. Note that this is different from adding L2 regularization on
427
+ the variables to the loss: it regularizes variables with large
428
+ gradients more than L2 regularization would, which was shown to yield better
429
+ training loss and generalization error in the paper above.
430
+
431
+ For further information see the documentation of the Adam Optimizer.
432
+
433
+ Note that this optimizer can also be instantiated as
434
+ ```python
435
+ extend_with_weight_decay(tf.compat.v1.train.AdamAsyncWOptimizer,
436
+ weight_decay=weight_decay)
437
+ ```
438
+ """
439
+
440
+ def __init__(self,
441
+ weight_decay,
442
+ learning_rate=0.001,
443
+ beta1=0.9,
444
+ beta2=0.999,
445
+ epsilon=1e-8,
446
+ use_locking=False,
447
+ name='AdamAsyncW'):
448
+ """Construct a new AdamW optimizer.
449
+
450
+ For further information see the documentation of the Adam Optimizer.
451
+
452
+ Args:
453
+ weight_decay: A `Tensor` or a floating point value. The weight decay.
454
+ learning_rate: A Tensor or a floating point value. The learning rate.
455
+ beta1: A float value or a constant float tensor. The exponential decay
456
+ rate for the 1st moment estimates.
457
+ beta2: A float value or a constant float tensor. The exponential decay
458
+ rate for the 2nd moment estimates.
459
+ epsilon: A small constant for numerical stability. This epsilon is
460
+ "epsilon hat" in the Kingma and Ba paper (in the formula just before
461
+ Section 2.1), not the epsilon in Algorithm 1 of the paper.
462
+ use_locking: If True use locks for update operations.
463
+ name: Optional name for the operations created when applying gradients.
464
+ Defaults to "Adam".
465
+ """
466
+ super(AdamAsyncWOptimizer, self).__init__(
467
+ weight_decay,
468
+ learning_rate=learning_rate,
469
+ beta1=beta1,
470
+ beta2=beta2,
471
+ epsilon=epsilon,
472
+ use_locking=use_locking,
473
+ name=name)
474
+ except ImportError:
475
+ pass
File without changes
@@ -0,0 +1,24 @@
1
+ import logging
2
+ import os
3
+
4
+ import tensorflow as tf
5
+
6
+ from easy_rec.python.utils import pai_util
7
+
8
+ if tf.__version__ >= '2.0':
9
+ tf = tf.compat.v1
10
+
11
+ distribute_eval = os.environ.get('distribute_eval')
12
+ logging.info('distribute_eval = {}'.format(distribute_eval))
13
+ if distribute_eval == 'True':
14
+ if pai_util.is_on_pai() or tf.__version__ <= '1.13':
15
+ logging.info('Will use distribute pai_tf metrics impl')
16
+ from easy_rec.python.core.easyrec_metrics import distribute_metrics_impl_pai as metrics_tf
17
+ else:
18
+ logging.info('Will use distribute tf metrics impl')
19
+ from easy_rec.python.core.easyrec_metrics import distribute_metrics_impl_tf as metrics_tf
20
+ else:
21
+ if tf.__version__ >= '2.0':
22
+ from tensorflow.compat.v1 import metrics as metrics_tf
23
+ else:
24
+ from tensorflow import metrics as metrics_tf