pyxllib 0.3.96__py3-none-any.whl → 0.3.197__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyxllib/algo/geo.py +12 -0
- pyxllib/algo/intervals.py +1 -1
- pyxllib/algo/matcher.py +78 -0
- pyxllib/algo/pupil.py +187 -19
- pyxllib/algo/specialist.py +2 -1
- pyxllib/algo/stat.py +38 -2
- {pyxlpr → pyxllib/autogui}/__init__.py +1 -1
- pyxllib/autogui/activewin.py +246 -0
- pyxllib/autogui/all.py +9 -0
- pyxllib/{ext/autogui → autogui}/autogui.py +40 -11
- pyxllib/autogui/uiautolib.py +362 -0
- pyxllib/autogui/wechat.py +827 -0
- pyxllib/autogui/wechat_msg.py +421 -0
- pyxllib/autogui/wxautolib.py +84 -0
- pyxllib/cv/slidercaptcha.py +137 -0
- pyxllib/data/echarts.py +123 -12
- pyxllib/data/jsonlib.py +89 -0
- pyxllib/data/pglib.py +514 -30
- pyxllib/data/sqlite.py +231 -4
- pyxllib/ext/JLineViewer.py +14 -1
- pyxllib/ext/drissionlib.py +277 -0
- pyxllib/ext/kq5034lib.py +0 -1594
- pyxllib/ext/robustprocfile.py +497 -0
- pyxllib/ext/unixlib.py +6 -5
- pyxllib/ext/utools.py +108 -95
- pyxllib/ext/webhook.py +32 -14
- pyxllib/ext/wjxlib.py +88 -0
- pyxllib/ext/wpsapi.py +124 -0
- pyxllib/ext/xlwork.py +9 -0
- pyxllib/ext/yuquelib.py +1003 -71
- pyxllib/file/docxlib.py +1 -1
- pyxllib/file/libreoffice.py +165 -0
- pyxllib/file/movielib.py +9 -0
- pyxllib/file/packlib/__init__.py +112 -75
- pyxllib/file/pdflib.py +1 -1
- pyxllib/file/pupil.py +1 -1
- pyxllib/file/specialist/dirlib.py +1 -1
- pyxllib/file/specialist/download.py +10 -3
- pyxllib/file/specialist/filelib.py +266 -55
- pyxllib/file/xlsxlib.py +205 -50
- pyxllib/file/xlsyncfile.py +341 -0
- pyxllib/prog/cachetools.py +64 -0
- pyxllib/prog/filelock.py +42 -0
- pyxllib/prog/multiprogs.py +940 -0
- pyxllib/prog/newbie.py +9 -2
- pyxllib/prog/pupil.py +129 -60
- pyxllib/prog/specialist/__init__.py +176 -2
- pyxllib/prog/specialist/bc.py +5 -2
- pyxllib/prog/specialist/browser.py +11 -2
- pyxllib/prog/specialist/datetime.py +68 -0
- pyxllib/prog/specialist/tictoc.py +12 -13
- pyxllib/prog/specialist/xllog.py +5 -5
- pyxllib/prog/xlosenv.py +7 -0
- pyxllib/text/airscript.js +744 -0
- pyxllib/text/charclasslib.py +17 -5
- pyxllib/text/jiebalib.py +6 -3
- pyxllib/text/jinjalib.py +32 -0
- pyxllib/text/jsa_ai_prompt.md +271 -0
- pyxllib/text/jscode.py +159 -4
- pyxllib/text/nestenv.py +1 -1
- pyxllib/text/newbie.py +12 -0
- pyxllib/text/pupil/common.py +26 -0
- pyxllib/text/specialist/ptag.py +2 -2
- pyxllib/text/templates/echart_base.html +11 -0
- pyxllib/text/templates/highlight_code.html +17 -0
- pyxllib/text/templates/latex_editor.html +103 -0
- pyxllib/text/xmllib.py +76 -14
- pyxllib/xl.py +2 -1
- pyxllib-0.3.197.dist-info/METADATA +48 -0
- pyxllib-0.3.197.dist-info/RECORD +126 -0
- {pyxllib-0.3.96.dist-info → pyxllib-0.3.197.dist-info}/WHEEL +1 -2
- pyxllib/ext/autogui/__init__.py +0 -8
- pyxllib-0.3.96.dist-info/METADATA +0 -51
- pyxllib-0.3.96.dist-info/RECORD +0 -333
- pyxllib-0.3.96.dist-info/top_level.txt +0 -2
- pyxlpr/ai/__init__.py +0 -5
- pyxlpr/ai/clientlib.py +0 -1281
- pyxlpr/ai/specialist.py +0 -286
- pyxlpr/ai/torch_app.py +0 -172
- pyxlpr/ai/xlpaddle.py +0 -655
- pyxlpr/ai/xltorch.py +0 -705
- pyxlpr/data/__init__.py +0 -11
- pyxlpr/data/coco.py +0 -1325
- pyxlpr/data/datacls.py +0 -365
- pyxlpr/data/datasets.py +0 -200
- pyxlpr/data/gptlib.py +0 -1291
- pyxlpr/data/icdar/__init__.py +0 -96
- pyxlpr/data/icdar/deteval.py +0 -377
- pyxlpr/data/icdar/icdar2013.py +0 -341
- pyxlpr/data/icdar/iou.py +0 -340
- pyxlpr/data/icdar/rrc_evaluation_funcs_1_1.py +0 -463
- pyxlpr/data/imtextline.py +0 -473
- pyxlpr/data/labelme.py +0 -866
- pyxlpr/data/removeline.py +0 -179
- pyxlpr/data/specialist.py +0 -57
- pyxlpr/eval/__init__.py +0 -85
- pyxlpr/paddleocr.py +0 -776
- pyxlpr/ppocr/__init__.py +0 -15
- pyxlpr/ppocr/configs/rec/multi_language/generate_multi_language_configs.py +0 -226
- pyxlpr/ppocr/data/__init__.py +0 -135
- pyxlpr/ppocr/data/imaug/ColorJitter.py +0 -26
- pyxlpr/ppocr/data/imaug/__init__.py +0 -67
- pyxlpr/ppocr/data/imaug/copy_paste.py +0 -170
- pyxlpr/ppocr/data/imaug/east_process.py +0 -437
- pyxlpr/ppocr/data/imaug/gen_table_mask.py +0 -244
- pyxlpr/ppocr/data/imaug/iaa_augment.py +0 -114
- pyxlpr/ppocr/data/imaug/label_ops.py +0 -789
- pyxlpr/ppocr/data/imaug/make_border_map.py +0 -184
- pyxlpr/ppocr/data/imaug/make_pse_gt.py +0 -106
- pyxlpr/ppocr/data/imaug/make_shrink_map.py +0 -126
- pyxlpr/ppocr/data/imaug/operators.py +0 -433
- pyxlpr/ppocr/data/imaug/pg_process.py +0 -906
- pyxlpr/ppocr/data/imaug/randaugment.py +0 -143
- pyxlpr/ppocr/data/imaug/random_crop_data.py +0 -239
- pyxlpr/ppocr/data/imaug/rec_img_aug.py +0 -533
- pyxlpr/ppocr/data/imaug/sast_process.py +0 -777
- pyxlpr/ppocr/data/imaug/text_image_aug/__init__.py +0 -17
- pyxlpr/ppocr/data/imaug/text_image_aug/augment.py +0 -120
- pyxlpr/ppocr/data/imaug/text_image_aug/warp_mls.py +0 -168
- pyxlpr/ppocr/data/lmdb_dataset.py +0 -115
- pyxlpr/ppocr/data/pgnet_dataset.py +0 -104
- pyxlpr/ppocr/data/pubtab_dataset.py +0 -107
- pyxlpr/ppocr/data/simple_dataset.py +0 -372
- pyxlpr/ppocr/losses/__init__.py +0 -61
- pyxlpr/ppocr/losses/ace_loss.py +0 -52
- pyxlpr/ppocr/losses/basic_loss.py +0 -135
- pyxlpr/ppocr/losses/center_loss.py +0 -88
- pyxlpr/ppocr/losses/cls_loss.py +0 -30
- pyxlpr/ppocr/losses/combined_loss.py +0 -67
- pyxlpr/ppocr/losses/det_basic_loss.py +0 -208
- pyxlpr/ppocr/losses/det_db_loss.py +0 -80
- pyxlpr/ppocr/losses/det_east_loss.py +0 -63
- pyxlpr/ppocr/losses/det_pse_loss.py +0 -149
- pyxlpr/ppocr/losses/det_sast_loss.py +0 -121
- pyxlpr/ppocr/losses/distillation_loss.py +0 -272
- pyxlpr/ppocr/losses/e2e_pg_loss.py +0 -140
- pyxlpr/ppocr/losses/kie_sdmgr_loss.py +0 -113
- pyxlpr/ppocr/losses/rec_aster_loss.py +0 -99
- pyxlpr/ppocr/losses/rec_att_loss.py +0 -39
- pyxlpr/ppocr/losses/rec_ctc_loss.py +0 -44
- pyxlpr/ppocr/losses/rec_enhanced_ctc_loss.py +0 -70
- pyxlpr/ppocr/losses/rec_nrtr_loss.py +0 -30
- pyxlpr/ppocr/losses/rec_sar_loss.py +0 -28
- pyxlpr/ppocr/losses/rec_srn_loss.py +0 -47
- pyxlpr/ppocr/losses/table_att_loss.py +0 -109
- pyxlpr/ppocr/metrics/__init__.py +0 -44
- pyxlpr/ppocr/metrics/cls_metric.py +0 -45
- pyxlpr/ppocr/metrics/det_metric.py +0 -82
- pyxlpr/ppocr/metrics/distillation_metric.py +0 -73
- pyxlpr/ppocr/metrics/e2e_metric.py +0 -86
- pyxlpr/ppocr/metrics/eval_det_iou.py +0 -274
- pyxlpr/ppocr/metrics/kie_metric.py +0 -70
- pyxlpr/ppocr/metrics/rec_metric.py +0 -75
- pyxlpr/ppocr/metrics/table_metric.py +0 -50
- pyxlpr/ppocr/modeling/architectures/__init__.py +0 -32
- pyxlpr/ppocr/modeling/architectures/base_model.py +0 -88
- pyxlpr/ppocr/modeling/architectures/distillation_model.py +0 -60
- pyxlpr/ppocr/modeling/backbones/__init__.py +0 -54
- pyxlpr/ppocr/modeling/backbones/det_mobilenet_v3.py +0 -268
- pyxlpr/ppocr/modeling/backbones/det_resnet_vd.py +0 -246
- pyxlpr/ppocr/modeling/backbones/det_resnet_vd_sast.py +0 -285
- pyxlpr/ppocr/modeling/backbones/e2e_resnet_vd_pg.py +0 -265
- pyxlpr/ppocr/modeling/backbones/kie_unet_sdmgr.py +0 -186
- pyxlpr/ppocr/modeling/backbones/rec_mobilenet_v3.py +0 -138
- pyxlpr/ppocr/modeling/backbones/rec_mv1_enhance.py +0 -258
- pyxlpr/ppocr/modeling/backbones/rec_nrtr_mtb.py +0 -48
- pyxlpr/ppocr/modeling/backbones/rec_resnet_31.py +0 -210
- pyxlpr/ppocr/modeling/backbones/rec_resnet_aster.py +0 -143
- pyxlpr/ppocr/modeling/backbones/rec_resnet_fpn.py +0 -307
- pyxlpr/ppocr/modeling/backbones/rec_resnet_vd.py +0 -286
- pyxlpr/ppocr/modeling/heads/__init__.py +0 -54
- pyxlpr/ppocr/modeling/heads/cls_head.py +0 -52
- pyxlpr/ppocr/modeling/heads/det_db_head.py +0 -118
- pyxlpr/ppocr/modeling/heads/det_east_head.py +0 -121
- pyxlpr/ppocr/modeling/heads/det_pse_head.py +0 -37
- pyxlpr/ppocr/modeling/heads/det_sast_head.py +0 -128
- pyxlpr/ppocr/modeling/heads/e2e_pg_head.py +0 -253
- pyxlpr/ppocr/modeling/heads/kie_sdmgr_head.py +0 -206
- pyxlpr/ppocr/modeling/heads/multiheadAttention.py +0 -163
- pyxlpr/ppocr/modeling/heads/rec_aster_head.py +0 -393
- pyxlpr/ppocr/modeling/heads/rec_att_head.py +0 -202
- pyxlpr/ppocr/modeling/heads/rec_ctc_head.py +0 -88
- pyxlpr/ppocr/modeling/heads/rec_nrtr_head.py +0 -826
- pyxlpr/ppocr/modeling/heads/rec_sar_head.py +0 -402
- pyxlpr/ppocr/modeling/heads/rec_srn_head.py +0 -280
- pyxlpr/ppocr/modeling/heads/self_attention.py +0 -406
- pyxlpr/ppocr/modeling/heads/table_att_head.py +0 -246
- pyxlpr/ppocr/modeling/necks/__init__.py +0 -32
- pyxlpr/ppocr/modeling/necks/db_fpn.py +0 -111
- pyxlpr/ppocr/modeling/necks/east_fpn.py +0 -188
- pyxlpr/ppocr/modeling/necks/fpn.py +0 -138
- pyxlpr/ppocr/modeling/necks/pg_fpn.py +0 -314
- pyxlpr/ppocr/modeling/necks/rnn.py +0 -92
- pyxlpr/ppocr/modeling/necks/sast_fpn.py +0 -284
- pyxlpr/ppocr/modeling/necks/table_fpn.py +0 -110
- pyxlpr/ppocr/modeling/transforms/__init__.py +0 -28
- pyxlpr/ppocr/modeling/transforms/stn.py +0 -135
- pyxlpr/ppocr/modeling/transforms/tps.py +0 -308
- pyxlpr/ppocr/modeling/transforms/tps_spatial_transformer.py +0 -156
- pyxlpr/ppocr/optimizer/__init__.py +0 -61
- pyxlpr/ppocr/optimizer/learning_rate.py +0 -228
- pyxlpr/ppocr/optimizer/lr_scheduler.py +0 -49
- pyxlpr/ppocr/optimizer/optimizer.py +0 -160
- pyxlpr/ppocr/optimizer/regularizer.py +0 -52
- pyxlpr/ppocr/postprocess/__init__.py +0 -55
- pyxlpr/ppocr/postprocess/cls_postprocess.py +0 -33
- pyxlpr/ppocr/postprocess/db_postprocess.py +0 -234
- pyxlpr/ppocr/postprocess/east_postprocess.py +0 -143
- pyxlpr/ppocr/postprocess/locality_aware_nms.py +0 -200
- pyxlpr/ppocr/postprocess/pg_postprocess.py +0 -52
- pyxlpr/ppocr/postprocess/pse_postprocess/__init__.py +0 -15
- pyxlpr/ppocr/postprocess/pse_postprocess/pse/__init__.py +0 -29
- pyxlpr/ppocr/postprocess/pse_postprocess/pse/setup.py +0 -14
- pyxlpr/ppocr/postprocess/pse_postprocess/pse_postprocess.py +0 -118
- pyxlpr/ppocr/postprocess/rec_postprocess.py +0 -654
- pyxlpr/ppocr/postprocess/sast_postprocess.py +0 -355
- pyxlpr/ppocr/tools/__init__.py +0 -14
- pyxlpr/ppocr/tools/eval.py +0 -83
- pyxlpr/ppocr/tools/export_center.py +0 -77
- pyxlpr/ppocr/tools/export_model.py +0 -129
- pyxlpr/ppocr/tools/infer/predict_cls.py +0 -151
- pyxlpr/ppocr/tools/infer/predict_det.py +0 -300
- pyxlpr/ppocr/tools/infer/predict_e2e.py +0 -169
- pyxlpr/ppocr/tools/infer/predict_rec.py +0 -414
- pyxlpr/ppocr/tools/infer/predict_system.py +0 -204
- pyxlpr/ppocr/tools/infer/utility.py +0 -629
- pyxlpr/ppocr/tools/infer_cls.py +0 -83
- pyxlpr/ppocr/tools/infer_det.py +0 -134
- pyxlpr/ppocr/tools/infer_e2e.py +0 -122
- pyxlpr/ppocr/tools/infer_kie.py +0 -153
- pyxlpr/ppocr/tools/infer_rec.py +0 -146
- pyxlpr/ppocr/tools/infer_table.py +0 -107
- pyxlpr/ppocr/tools/program.py +0 -596
- pyxlpr/ppocr/tools/test_hubserving.py +0 -117
- pyxlpr/ppocr/tools/train.py +0 -163
- pyxlpr/ppocr/tools/xlprog.py +0 -748
- pyxlpr/ppocr/utils/EN_symbol_dict.txt +0 -94
- pyxlpr/ppocr/utils/__init__.py +0 -24
- pyxlpr/ppocr/utils/dict/ar_dict.txt +0 -117
- pyxlpr/ppocr/utils/dict/arabic_dict.txt +0 -162
- pyxlpr/ppocr/utils/dict/be_dict.txt +0 -145
- pyxlpr/ppocr/utils/dict/bg_dict.txt +0 -140
- pyxlpr/ppocr/utils/dict/chinese_cht_dict.txt +0 -8421
- pyxlpr/ppocr/utils/dict/cyrillic_dict.txt +0 -163
- pyxlpr/ppocr/utils/dict/devanagari_dict.txt +0 -167
- pyxlpr/ppocr/utils/dict/en_dict.txt +0 -63
- pyxlpr/ppocr/utils/dict/fa_dict.txt +0 -136
- pyxlpr/ppocr/utils/dict/french_dict.txt +0 -136
- pyxlpr/ppocr/utils/dict/german_dict.txt +0 -143
- pyxlpr/ppocr/utils/dict/hi_dict.txt +0 -162
- pyxlpr/ppocr/utils/dict/it_dict.txt +0 -118
- pyxlpr/ppocr/utils/dict/japan_dict.txt +0 -4399
- pyxlpr/ppocr/utils/dict/ka_dict.txt +0 -153
- pyxlpr/ppocr/utils/dict/korean_dict.txt +0 -3688
- pyxlpr/ppocr/utils/dict/latin_dict.txt +0 -185
- pyxlpr/ppocr/utils/dict/mr_dict.txt +0 -153
- pyxlpr/ppocr/utils/dict/ne_dict.txt +0 -153
- pyxlpr/ppocr/utils/dict/oc_dict.txt +0 -96
- pyxlpr/ppocr/utils/dict/pu_dict.txt +0 -130
- pyxlpr/ppocr/utils/dict/rs_dict.txt +0 -91
- pyxlpr/ppocr/utils/dict/rsc_dict.txt +0 -134
- pyxlpr/ppocr/utils/dict/ru_dict.txt +0 -125
- pyxlpr/ppocr/utils/dict/ta_dict.txt +0 -128
- pyxlpr/ppocr/utils/dict/table_dict.txt +0 -277
- pyxlpr/ppocr/utils/dict/table_structure_dict.txt +0 -2759
- pyxlpr/ppocr/utils/dict/te_dict.txt +0 -151
- pyxlpr/ppocr/utils/dict/ug_dict.txt +0 -114
- pyxlpr/ppocr/utils/dict/uk_dict.txt +0 -142
- pyxlpr/ppocr/utils/dict/ur_dict.txt +0 -137
- pyxlpr/ppocr/utils/dict/xi_dict.txt +0 -110
- pyxlpr/ppocr/utils/dict90.txt +0 -90
- pyxlpr/ppocr/utils/e2e_metric/Deteval.py +0 -574
- pyxlpr/ppocr/utils/e2e_metric/polygon_fast.py +0 -83
- pyxlpr/ppocr/utils/e2e_utils/extract_batchsize.py +0 -87
- pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_fast.py +0 -457
- pyxlpr/ppocr/utils/e2e_utils/extract_textpoint_slow.py +0 -592
- pyxlpr/ppocr/utils/e2e_utils/pgnet_pp_utils.py +0 -162
- pyxlpr/ppocr/utils/e2e_utils/visual.py +0 -162
- pyxlpr/ppocr/utils/en_dict.txt +0 -95
- pyxlpr/ppocr/utils/gen_label.py +0 -81
- pyxlpr/ppocr/utils/ic15_dict.txt +0 -36
- pyxlpr/ppocr/utils/iou.py +0 -54
- pyxlpr/ppocr/utils/logging.py +0 -69
- pyxlpr/ppocr/utils/network.py +0 -84
- pyxlpr/ppocr/utils/ppocr_keys_v1.txt +0 -6623
- pyxlpr/ppocr/utils/profiler.py +0 -110
- pyxlpr/ppocr/utils/save_load.py +0 -150
- pyxlpr/ppocr/utils/stats.py +0 -72
- pyxlpr/ppocr/utils/utility.py +0 -80
- pyxlpr/ppstructure/__init__.py +0 -13
- pyxlpr/ppstructure/predict_system.py +0 -187
- pyxlpr/ppstructure/table/__init__.py +0 -13
- pyxlpr/ppstructure/table/eval_table.py +0 -72
- pyxlpr/ppstructure/table/matcher.py +0 -192
- pyxlpr/ppstructure/table/predict_structure.py +0 -136
- pyxlpr/ppstructure/table/predict_table.py +0 -221
- pyxlpr/ppstructure/table/table_metric/__init__.py +0 -16
- pyxlpr/ppstructure/table/table_metric/parallel.py +0 -51
- pyxlpr/ppstructure/table/table_metric/table_metric.py +0 -247
- pyxlpr/ppstructure/table/tablepyxl/__init__.py +0 -13
- pyxlpr/ppstructure/table/tablepyxl/style.py +0 -283
- pyxlpr/ppstructure/table/tablepyxl/tablepyxl.py +0 -118
- pyxlpr/ppstructure/utility.py +0 -71
- pyxlpr/xlai.py +0 -10
- /pyxllib/{ext/autogui → autogui}/virtualkey.py +0 -0
- {pyxllib-0.3.96.dist-info → pyxllib-0.3.197.dist-info/licenses}/LICENSE +0 -0
@@ -1,156 +0,0 @@
|
|
1
|
-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
"""
|
15
|
-
This code is refer from:
|
16
|
-
https://github.com/ayumiymk/aster.pytorch/blob/master/lib/models/tps_spatial_transformer.py
|
17
|
-
"""
|
18
|
-
from __future__ import absolute_import
|
19
|
-
from __future__ import division
|
20
|
-
from __future__ import print_function
|
21
|
-
|
22
|
-
import math
|
23
|
-
import paddle
|
24
|
-
from paddle import nn, ParamAttr
|
25
|
-
from paddle.nn import functional as F
|
26
|
-
import numpy as np
|
27
|
-
import itertools
|
28
|
-
|
29
|
-
|
30
|
-
def grid_sample(input, grid, canvas=None):
|
31
|
-
input.stop_gradient = False
|
32
|
-
output = F.grid_sample(input, grid)
|
33
|
-
if canvas is None:
|
34
|
-
return output
|
35
|
-
else:
|
36
|
-
input_mask = paddle.ones(shape=input.shape)
|
37
|
-
output_mask = F.grid_sample(input_mask, grid)
|
38
|
-
padded_output = output * output_mask + canvas * (1 - output_mask)
|
39
|
-
return padded_output
|
40
|
-
|
41
|
-
|
42
|
-
# phi(x1, x2) = r^2 * log(r), where r = ||x1 - x2||_2
|
43
|
-
def compute_partial_repr(input_points, control_points):
|
44
|
-
N = input_points.shape[0]
|
45
|
-
M = control_points.shape[0]
|
46
|
-
pairwise_diff = paddle.reshape(
|
47
|
-
input_points, shape=[N, 1, 2]) - paddle.reshape(
|
48
|
-
control_points, shape=[1, M, 2])
|
49
|
-
# original implementation, very slow
|
50
|
-
# pairwise_dist = torch.sum(pairwise_diff ** 2, dim = 2) # square of distance
|
51
|
-
pairwise_diff_square = pairwise_diff * pairwise_diff
|
52
|
-
pairwise_dist = pairwise_diff_square[:, :, 0] + pairwise_diff_square[:, :,
|
53
|
-
1]
|
54
|
-
repr_matrix = 0.5 * pairwise_dist * paddle.log(pairwise_dist)
|
55
|
-
# fix numerical error for 0 * log(0), substitute all nan with 0
|
56
|
-
mask = np.array(repr_matrix != repr_matrix)
|
57
|
-
repr_matrix[mask] = 0
|
58
|
-
return repr_matrix
|
59
|
-
|
60
|
-
|
61
|
-
# output_ctrl_pts are specified, according to our task.
|
62
|
-
def build_output_control_points(num_control_points, margins):
|
63
|
-
margin_x, margin_y = margins
|
64
|
-
num_ctrl_pts_per_side = num_control_points // 2
|
65
|
-
ctrl_pts_x = np.linspace(margin_x, 1.0 - margin_x, num_ctrl_pts_per_side)
|
66
|
-
ctrl_pts_y_top = np.ones(num_ctrl_pts_per_side) * margin_y
|
67
|
-
ctrl_pts_y_bottom = np.ones(num_ctrl_pts_per_side) * (1.0 - margin_y)
|
68
|
-
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
|
69
|
-
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
|
70
|
-
output_ctrl_pts_arr = np.concatenate(
|
71
|
-
[ctrl_pts_top, ctrl_pts_bottom], axis=0)
|
72
|
-
output_ctrl_pts = paddle.to_tensor(output_ctrl_pts_arr)
|
73
|
-
return output_ctrl_pts
|
74
|
-
|
75
|
-
|
76
|
-
class TPSSpatialTransformer(nn.Layer):
|
77
|
-
def __init__(self,
|
78
|
-
output_image_size=None,
|
79
|
-
num_control_points=None,
|
80
|
-
margins=None):
|
81
|
-
super(TPSSpatialTransformer, self).__init__()
|
82
|
-
self.output_image_size = output_image_size
|
83
|
-
self.num_control_points = num_control_points
|
84
|
-
self.margins = margins
|
85
|
-
|
86
|
-
self.target_height, self.target_width = output_image_size
|
87
|
-
target_control_points = build_output_control_points(num_control_points,
|
88
|
-
margins)
|
89
|
-
N = num_control_points
|
90
|
-
|
91
|
-
# create padded kernel matrix
|
92
|
-
forward_kernel = paddle.zeros(shape=[N + 3, N + 3])
|
93
|
-
target_control_partial_repr = compute_partial_repr(
|
94
|
-
target_control_points, target_control_points)
|
95
|
-
target_control_partial_repr = paddle.cast(target_control_partial_repr,
|
96
|
-
forward_kernel.dtype)
|
97
|
-
forward_kernel[:N, :N] = target_control_partial_repr
|
98
|
-
forward_kernel[:N, -3] = 1
|
99
|
-
forward_kernel[-3, :N] = 1
|
100
|
-
target_control_points = paddle.cast(target_control_points,
|
101
|
-
forward_kernel.dtype)
|
102
|
-
forward_kernel[:N, -2:] = target_control_points
|
103
|
-
forward_kernel[-2:, :N] = paddle.transpose(
|
104
|
-
target_control_points, perm=[1, 0])
|
105
|
-
# compute inverse matrix
|
106
|
-
inverse_kernel = paddle.inverse(forward_kernel)
|
107
|
-
|
108
|
-
# create target cordinate matrix
|
109
|
-
HW = self.target_height * self.target_width
|
110
|
-
target_coordinate = list(
|
111
|
-
itertools.product(
|
112
|
-
range(self.target_height), range(self.target_width)))
|
113
|
-
target_coordinate = paddle.to_tensor(target_coordinate) # HW x 2
|
114
|
-
Y, X = paddle.split(
|
115
|
-
target_coordinate, target_coordinate.shape[1], axis=1)
|
116
|
-
Y = Y / (self.target_height - 1)
|
117
|
-
X = X / (self.target_width - 1)
|
118
|
-
target_coordinate = paddle.concat(
|
119
|
-
[X, Y], axis=1) # convert from (y, x) to (x, y)
|
120
|
-
target_coordinate_partial_repr = compute_partial_repr(
|
121
|
-
target_coordinate, target_control_points)
|
122
|
-
target_coordinate_repr = paddle.concat(
|
123
|
-
[
|
124
|
-
target_coordinate_partial_repr, paddle.ones(shape=[HW, 1]),
|
125
|
-
target_coordinate
|
126
|
-
],
|
127
|
-
axis=1)
|
128
|
-
|
129
|
-
# register precomputed matrices
|
130
|
-
self.inverse_kernel = inverse_kernel
|
131
|
-
self.padding_matrix = paddle.zeros(shape=[3, 2])
|
132
|
-
self.target_coordinate_repr = target_coordinate_repr
|
133
|
-
self.target_control_points = target_control_points
|
134
|
-
|
135
|
-
def forward(self, input, source_control_points):
|
136
|
-
assert source_control_points.ndimension() == 3
|
137
|
-
assert source_control_points.shape[1] == self.num_control_points
|
138
|
-
assert source_control_points.shape[2] == 2
|
139
|
-
batch_size = paddle.shape(source_control_points)[0]
|
140
|
-
|
141
|
-
self.padding_matrix = paddle.expand(
|
142
|
-
self.padding_matrix, shape=[batch_size, 3, 2])
|
143
|
-
Y = paddle.concat([source_control_points, self.padding_matrix], 1)
|
144
|
-
mapping_matrix = paddle.matmul(self.inverse_kernel, Y)
|
145
|
-
source_coordinate = paddle.matmul(self.target_coordinate_repr,
|
146
|
-
mapping_matrix)
|
147
|
-
|
148
|
-
grid = paddle.reshape(
|
149
|
-
source_coordinate,
|
150
|
-
shape=[-1, self.target_height, self.target_width, 2])
|
151
|
-
grid = paddle.clip(grid, 0,
|
152
|
-
1) # the source_control_points may be out of [0, 1].
|
153
|
-
# the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]
|
154
|
-
grid = 2.0 * grid - 1.0
|
155
|
-
output_maps = grid_sample(input, grid, canvas=None)
|
156
|
-
return output_maps, source_coordinate
|
@@ -1,61 +0,0 @@
|
|
1
|
-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
from __future__ import absolute_import
|
16
|
-
from __future__ import division
|
17
|
-
from __future__ import print_function
|
18
|
-
from __future__ import unicode_literals
|
19
|
-
import copy
|
20
|
-
import paddle
|
21
|
-
|
22
|
-
__all__ = ['build_optimizer']
|
23
|
-
|
24
|
-
|
25
|
-
def build_lr_scheduler(lr_config, epochs, step_each_epoch):
|
26
|
-
from . import learning_rate
|
27
|
-
lr_config.update({'epochs': epochs, 'step_each_epoch': step_each_epoch})
|
28
|
-
if 'name' in lr_config:
|
29
|
-
lr_name = lr_config.pop('name')
|
30
|
-
lr = getattr(learning_rate, lr_name)(**lr_config)()
|
31
|
-
else:
|
32
|
-
lr = lr_config['learning_rate']
|
33
|
-
return lr
|
34
|
-
|
35
|
-
|
36
|
-
def build_optimizer(config, epochs, step_each_epoch, parameters):
|
37
|
-
from . import regularizer, optimizer
|
38
|
-
config = copy.deepcopy(config)
|
39
|
-
# step1 build lr
|
40
|
-
lr = build_lr_scheduler(config.pop('lr'), epochs, step_each_epoch)
|
41
|
-
|
42
|
-
# step2 build regularization
|
43
|
-
if 'regularizer' in config and config['regularizer'] is not None:
|
44
|
-
reg_config = config.pop('regularizer')
|
45
|
-
reg_name = reg_config.pop('name') + 'Decay'
|
46
|
-
reg = getattr(regularizer, reg_name)(**reg_config)()
|
47
|
-
else:
|
48
|
-
reg = None
|
49
|
-
|
50
|
-
# step3 build optimizer
|
51
|
-
optim_name = config.pop('name')
|
52
|
-
if 'clip_norm' in config:
|
53
|
-
clip_norm = config.pop('clip_norm')
|
54
|
-
grad_clip = paddle.nn.ClipGradByNorm(clip_norm=clip_norm)
|
55
|
-
else:
|
56
|
-
grad_clip = None
|
57
|
-
optim = getattr(optimizer, optim_name)(learning_rate=lr,
|
58
|
-
weight_decay=reg,
|
59
|
-
grad_clip=grad_clip,
|
60
|
-
**config)
|
61
|
-
return optim(parameters), lr
|
@@ -1,228 +0,0 @@
|
|
1
|
-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
from __future__ import absolute_import
|
16
|
-
from __future__ import division
|
17
|
-
from __future__ import print_function
|
18
|
-
from __future__ import unicode_literals
|
19
|
-
|
20
|
-
from paddle.optimizer import lr
|
21
|
-
from .lr_scheduler import CyclicalCosineDecay
|
22
|
-
|
23
|
-
|
24
|
-
class Linear(object):
|
25
|
-
"""
|
26
|
-
Linear learning rate decay
|
27
|
-
Args:
|
28
|
-
lr (float): The initial learning rate. It is a python float number.
|
29
|
-
epochs(int): The decay step size. It determines the decay cycle.
|
30
|
-
end_lr(float, optional): The minimum final learning rate. Default: 0.0001.
|
31
|
-
power(float, optional): Power of polynomial. Default: 1.0.
|
32
|
-
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
|
33
|
-
"""
|
34
|
-
|
35
|
-
def __init__(self,
|
36
|
-
learning_rate,
|
37
|
-
epochs,
|
38
|
-
step_each_epoch,
|
39
|
-
end_lr=0.0,
|
40
|
-
power=1.0,
|
41
|
-
warmup_epoch=0,
|
42
|
-
last_epoch=-1,
|
43
|
-
**kwargs):
|
44
|
-
super(Linear, self).__init__()
|
45
|
-
self.learning_rate = learning_rate
|
46
|
-
self.epochs = epochs * step_each_epoch
|
47
|
-
self.end_lr = end_lr
|
48
|
-
self.power = power
|
49
|
-
self.last_epoch = last_epoch
|
50
|
-
self.warmup_epoch = round(warmup_epoch * step_each_epoch)
|
51
|
-
|
52
|
-
def __call__(self):
|
53
|
-
learning_rate = lr.PolynomialDecay(
|
54
|
-
learning_rate=self.learning_rate,
|
55
|
-
decay_steps=self.epochs,
|
56
|
-
end_lr=self.end_lr,
|
57
|
-
power=self.power,
|
58
|
-
last_epoch=self.last_epoch)
|
59
|
-
if self.warmup_epoch > 0:
|
60
|
-
learning_rate = lr.LinearWarmup(
|
61
|
-
learning_rate=learning_rate,
|
62
|
-
warmup_steps=self.warmup_epoch,
|
63
|
-
start_lr=0.0,
|
64
|
-
end_lr=self.learning_rate,
|
65
|
-
last_epoch=self.last_epoch)
|
66
|
-
return learning_rate
|
67
|
-
|
68
|
-
|
69
|
-
class Cosine(object):
|
70
|
-
"""
|
71
|
-
Cosine learning rate decay
|
72
|
-
lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1)
|
73
|
-
Args:
|
74
|
-
lr(float): initial learning rate
|
75
|
-
step_each_epoch(int): steps each epoch
|
76
|
-
epochs(int): total training epochs
|
77
|
-
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
|
78
|
-
"""
|
79
|
-
|
80
|
-
def __init__(self,
|
81
|
-
learning_rate,
|
82
|
-
step_each_epoch,
|
83
|
-
epochs,
|
84
|
-
warmup_epoch=0,
|
85
|
-
last_epoch=-1,
|
86
|
-
**kwargs):
|
87
|
-
super(Cosine, self).__init__()
|
88
|
-
self.learning_rate = learning_rate
|
89
|
-
self.T_max = step_each_epoch * epochs
|
90
|
-
self.last_epoch = last_epoch
|
91
|
-
self.warmup_epoch = round(warmup_epoch * step_each_epoch)
|
92
|
-
|
93
|
-
def __call__(self):
|
94
|
-
learning_rate = lr.CosineAnnealingDecay(
|
95
|
-
learning_rate=self.learning_rate,
|
96
|
-
T_max=self.T_max,
|
97
|
-
last_epoch=self.last_epoch)
|
98
|
-
if self.warmup_epoch > 0:
|
99
|
-
learning_rate = lr.LinearWarmup(
|
100
|
-
learning_rate=learning_rate,
|
101
|
-
warmup_steps=self.warmup_epoch,
|
102
|
-
start_lr=0.0,
|
103
|
-
end_lr=self.learning_rate,
|
104
|
-
last_epoch=self.last_epoch)
|
105
|
-
return learning_rate
|
106
|
-
|
107
|
-
|
108
|
-
class Step(object):
|
109
|
-
"""
|
110
|
-
Piecewise learning rate decay
|
111
|
-
Args:
|
112
|
-
step_each_epoch(int): steps each epoch
|
113
|
-
learning_rate (float): The initial learning rate. It is a python float number.
|
114
|
-
step_size (int): the interval to update.
|
115
|
-
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
|
116
|
-
It should be less than 1.0. Default: 0.1.
|
117
|
-
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
|
118
|
-
"""
|
119
|
-
|
120
|
-
def __init__(self,
|
121
|
-
learning_rate,
|
122
|
-
step_size,
|
123
|
-
step_each_epoch,
|
124
|
-
gamma,
|
125
|
-
warmup_epoch=0,
|
126
|
-
last_epoch=-1,
|
127
|
-
**kwargs):
|
128
|
-
super(Step, self).__init__()
|
129
|
-
self.step_size = step_each_epoch * step_size
|
130
|
-
self.learning_rate = learning_rate
|
131
|
-
self.gamma = gamma
|
132
|
-
self.last_epoch = last_epoch
|
133
|
-
self.warmup_epoch = round(warmup_epoch * step_each_epoch)
|
134
|
-
|
135
|
-
def __call__(self):
|
136
|
-
learning_rate = lr.StepDecay(
|
137
|
-
learning_rate=self.learning_rate,
|
138
|
-
step_size=self.step_size,
|
139
|
-
gamma=self.gamma,
|
140
|
-
last_epoch=self.last_epoch)
|
141
|
-
if self.warmup_epoch > 0:
|
142
|
-
learning_rate = lr.LinearWarmup(
|
143
|
-
learning_rate=learning_rate,
|
144
|
-
warmup_steps=self.warmup_epoch,
|
145
|
-
start_lr=0.0,
|
146
|
-
end_lr=self.learning_rate,
|
147
|
-
last_epoch=self.last_epoch)
|
148
|
-
return learning_rate
|
149
|
-
|
150
|
-
|
151
|
-
class Piecewise(object):
|
152
|
-
"""
|
153
|
-
Piecewise learning rate decay
|
154
|
-
Args:
|
155
|
-
boundaries(list): A list of steps numbers. The type of element in the list is python int.
|
156
|
-
values(list): A list of learning rate values that will be picked during different epoch boundaries.
|
157
|
-
The type of element in the list is python float.
|
158
|
-
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
|
159
|
-
"""
|
160
|
-
|
161
|
-
def __init__(self,
|
162
|
-
step_each_epoch,
|
163
|
-
decay_epochs,
|
164
|
-
values,
|
165
|
-
warmup_epoch=0,
|
166
|
-
last_epoch=-1,
|
167
|
-
**kwargs):
|
168
|
-
super(Piecewise, self).__init__()
|
169
|
-
self.boundaries = [step_each_epoch * e for e in decay_epochs]
|
170
|
-
self.values = values
|
171
|
-
self.last_epoch = last_epoch
|
172
|
-
self.warmup_epoch = round(warmup_epoch * step_each_epoch)
|
173
|
-
|
174
|
-
def __call__(self):
|
175
|
-
learning_rate = lr.PiecewiseDecay(
|
176
|
-
boundaries=self.boundaries,
|
177
|
-
values=self.values,
|
178
|
-
last_epoch=self.last_epoch)
|
179
|
-
if self.warmup_epoch > 0:
|
180
|
-
learning_rate = lr.LinearWarmup(
|
181
|
-
learning_rate=learning_rate,
|
182
|
-
warmup_steps=self.warmup_epoch,
|
183
|
-
start_lr=0.0,
|
184
|
-
end_lr=self.values[0],
|
185
|
-
last_epoch=self.last_epoch)
|
186
|
-
return learning_rate
|
187
|
-
|
188
|
-
|
189
|
-
class CyclicalCosine(object):
|
190
|
-
"""
|
191
|
-
Cyclical cosine learning rate decay
|
192
|
-
Args:
|
193
|
-
learning_rate(float): initial learning rate
|
194
|
-
step_each_epoch(int): steps each epoch
|
195
|
-
epochs(int): total training epochs
|
196
|
-
cycle(int): period of the cosine learning rate
|
197
|
-
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
|
198
|
-
"""
|
199
|
-
|
200
|
-
def __init__(self,
|
201
|
-
learning_rate,
|
202
|
-
step_each_epoch,
|
203
|
-
epochs,
|
204
|
-
cycle,
|
205
|
-
warmup_epoch=0,
|
206
|
-
last_epoch=-1,
|
207
|
-
**kwargs):
|
208
|
-
super(CyclicalCosine, self).__init__()
|
209
|
-
self.learning_rate = learning_rate
|
210
|
-
self.T_max = step_each_epoch * epochs
|
211
|
-
self.last_epoch = last_epoch
|
212
|
-
self.warmup_epoch = round(warmup_epoch * step_each_epoch)
|
213
|
-
self.cycle = round(cycle * step_each_epoch)
|
214
|
-
|
215
|
-
def __call__(self):
|
216
|
-
learning_rate = CyclicalCosineDecay(
|
217
|
-
learning_rate=self.learning_rate,
|
218
|
-
T_max=self.T_max,
|
219
|
-
cycle=self.cycle,
|
220
|
-
last_epoch=self.last_epoch)
|
221
|
-
if self.warmup_epoch > 0:
|
222
|
-
learning_rate = lr.LinearWarmup(
|
223
|
-
learning_rate=learning_rate,
|
224
|
-
warmup_steps=self.warmup_epoch,
|
225
|
-
start_lr=0.0,
|
226
|
-
end_lr=self.learning_rate,
|
227
|
-
last_epoch=self.last_epoch)
|
228
|
-
return learning_rate
|
@@ -1,49 +0,0 @@
|
|
1
|
-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
import math
|
16
|
-
from paddle.optimizer.lr import LRScheduler
|
17
|
-
|
18
|
-
|
19
|
-
class CyclicalCosineDecay(LRScheduler):
|
20
|
-
def __init__(self,
|
21
|
-
learning_rate,
|
22
|
-
T_max,
|
23
|
-
cycle=1,
|
24
|
-
last_epoch=-1,
|
25
|
-
eta_min=0.0,
|
26
|
-
verbose=False):
|
27
|
-
"""
|
28
|
-
Cyclical cosine learning rate decay
|
29
|
-
A learning rate which can be referred in https://arxiv.org/pdf/2012.12645.pdf
|
30
|
-
Args:
|
31
|
-
learning rate(float): learning rate
|
32
|
-
T_max(int): maximum epoch num
|
33
|
-
cycle(int): period of the cosine decay
|
34
|
-
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
|
35
|
-
eta_min(float): minimum learning rate during training
|
36
|
-
verbose(bool): whether to print learning rate for each epoch
|
37
|
-
"""
|
38
|
-
super(CyclicalCosineDecay, self).__init__(learning_rate, last_epoch,
|
39
|
-
verbose)
|
40
|
-
self.cycle = cycle
|
41
|
-
self.eta_min = eta_min
|
42
|
-
|
43
|
-
def get_lr(self):
|
44
|
-
if self.last_epoch == 0:
|
45
|
-
return self.base_lr
|
46
|
-
reletive_epoch = self.last_epoch % self.cycle
|
47
|
-
lr = self.eta_min + 0.5 * (self.base_lr - self.eta_min) * \
|
48
|
-
(1 + math.cos(math.pi * reletive_epoch / self.cycle))
|
49
|
-
return lr
|
@@ -1,160 +0,0 @@
|
|
1
|
-
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
from __future__ import absolute_import
|
16
|
-
from __future__ import division
|
17
|
-
from __future__ import print_function
|
18
|
-
from __future__ import unicode_literals
|
19
|
-
|
20
|
-
from paddle import optimizer as optim
|
21
|
-
|
22
|
-
|
23
|
-
class Momentum(object):
|
24
|
-
"""
|
25
|
-
Simple Momentum optimizer with velocity state.
|
26
|
-
Args:
|
27
|
-
learning_rate (float|Variable) - The learning rate used to update parameters.
|
28
|
-
Can be a float value or a Variable with one float value as data element.
|
29
|
-
momentum (float) - Momentum factor.
|
30
|
-
regularization (WeightDecayRegularizer, optional) - The strategy of regularization.
|
31
|
-
"""
|
32
|
-
|
33
|
-
def __init__(self,
|
34
|
-
learning_rate,
|
35
|
-
momentum,
|
36
|
-
weight_decay=None,
|
37
|
-
grad_clip=None,
|
38
|
-
**args):
|
39
|
-
super(Momentum, self).__init__()
|
40
|
-
self.learning_rate = learning_rate
|
41
|
-
self.momentum = momentum
|
42
|
-
self.weight_decay = weight_decay
|
43
|
-
self.grad_clip = grad_clip
|
44
|
-
|
45
|
-
def __call__(self, parameters):
|
46
|
-
opt = optim.Momentum(
|
47
|
-
learning_rate=self.learning_rate,
|
48
|
-
momentum=self.momentum,
|
49
|
-
weight_decay=self.weight_decay,
|
50
|
-
grad_clip=self.grad_clip,
|
51
|
-
parameters=parameters)
|
52
|
-
return opt
|
53
|
-
|
54
|
-
|
55
|
-
class Adam(object):
|
56
|
-
def __init__(self,
|
57
|
-
learning_rate=0.001,
|
58
|
-
beta1=0.9,
|
59
|
-
beta2=0.999,
|
60
|
-
epsilon=1e-08,
|
61
|
-
parameter_list=None,
|
62
|
-
weight_decay=None,
|
63
|
-
grad_clip=None,
|
64
|
-
name=None,
|
65
|
-
lazy_mode=False,
|
66
|
-
**kwargs):
|
67
|
-
self.learning_rate = learning_rate
|
68
|
-
self.beta1 = beta1
|
69
|
-
self.beta2 = beta2
|
70
|
-
self.epsilon = epsilon
|
71
|
-
self.parameter_list = parameter_list
|
72
|
-
self.learning_rate = learning_rate
|
73
|
-
self.weight_decay = weight_decay
|
74
|
-
self.grad_clip = grad_clip
|
75
|
-
self.name = name
|
76
|
-
self.lazy_mode = lazy_mode
|
77
|
-
|
78
|
-
def __call__(self, parameters):
|
79
|
-
opt = optim.Adam(
|
80
|
-
learning_rate=self.learning_rate,
|
81
|
-
beta1=self.beta1,
|
82
|
-
beta2=self.beta2,
|
83
|
-
epsilon=self.epsilon,
|
84
|
-
weight_decay=self.weight_decay,
|
85
|
-
grad_clip=self.grad_clip,
|
86
|
-
name=self.name,
|
87
|
-
lazy_mode=self.lazy_mode,
|
88
|
-
parameters=parameters)
|
89
|
-
return opt
|
90
|
-
|
91
|
-
|
92
|
-
class RMSProp(object):
|
93
|
-
"""
|
94
|
-
Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method.
|
95
|
-
Args:
|
96
|
-
learning_rate (float|Variable) - The learning rate used to update parameters.
|
97
|
-
Can be a float value or a Variable with one float value as data element.
|
98
|
-
momentum (float) - Momentum factor.
|
99
|
-
rho (float) - rho value in equation.
|
100
|
-
epsilon (float) - avoid division by zero, default is 1e-6.
|
101
|
-
regularization (WeightDecayRegularizer, optional) - The strategy of regularization.
|
102
|
-
"""
|
103
|
-
|
104
|
-
def __init__(self,
|
105
|
-
learning_rate,
|
106
|
-
momentum=0.0,
|
107
|
-
rho=0.95,
|
108
|
-
epsilon=1e-6,
|
109
|
-
weight_decay=None,
|
110
|
-
grad_clip=None,
|
111
|
-
**args):
|
112
|
-
super(RMSProp, self).__init__()
|
113
|
-
self.learning_rate = learning_rate
|
114
|
-
self.momentum = momentum
|
115
|
-
self.rho = rho
|
116
|
-
self.epsilon = epsilon
|
117
|
-
self.weight_decay = weight_decay
|
118
|
-
self.grad_clip = grad_clip
|
119
|
-
|
120
|
-
def __call__(self, parameters):
|
121
|
-
opt = optim.RMSProp(
|
122
|
-
learning_rate=self.learning_rate,
|
123
|
-
momentum=self.momentum,
|
124
|
-
rho=self.rho,
|
125
|
-
epsilon=self.epsilon,
|
126
|
-
weight_decay=self.weight_decay,
|
127
|
-
grad_clip=self.grad_clip,
|
128
|
-
parameters=parameters)
|
129
|
-
return opt
|
130
|
-
|
131
|
-
|
132
|
-
class Adadelta(object):
|
133
|
-
def __init__(self,
|
134
|
-
learning_rate=0.001,
|
135
|
-
epsilon=1e-08,
|
136
|
-
rho=0.95,
|
137
|
-
parameter_list=None,
|
138
|
-
weight_decay=None,
|
139
|
-
grad_clip=None,
|
140
|
-
name=None,
|
141
|
-
**kwargs):
|
142
|
-
self.learning_rate = learning_rate
|
143
|
-
self.epsilon = epsilon
|
144
|
-
self.rho = rho
|
145
|
-
self.parameter_list = parameter_list
|
146
|
-
self.learning_rate = learning_rate
|
147
|
-
self.weight_decay = weight_decay
|
148
|
-
self.grad_clip = grad_clip
|
149
|
-
self.name = name
|
150
|
-
|
151
|
-
def __call__(self, parameters):
|
152
|
-
opt = optim.Adadelta(
|
153
|
-
learning_rate=self.learning_rate,
|
154
|
-
epsilon=self.epsilon,
|
155
|
-
rho=self.rho,
|
156
|
-
weight_decay=self.weight_decay,
|
157
|
-
grad_clip=self.grad_clip,
|
158
|
-
name=self.name,
|
159
|
-
parameters=parameters)
|
160
|
-
return opt
|