buildz 0.7.0__tar.gz → 0.7.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {buildz-0.7.0/buildz.egg-info → buildz-0.7.2}/PKG-INFO +8 -1
- {buildz-0.7.0 → buildz-0.7.2}/README.md +7 -0
- buildz-0.7.2/buildz/__init__.py +25 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/gpuz/test/analyze.py +17 -7
- buildz-0.7.2/buildz/gpuz/test/demo.py +84 -0
- buildz-0.7.2/buildz/gpuz/test/report.txt +107 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/gpuz/test/take_gpu_mem.py +1 -1
- buildz-0.7.2/buildz/gpuz/test/test_atten_demo.py +165 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/gpuz/test/test_linear_demo.py +1 -1
- buildz-0.7.2/buildz/gpuz/test/test_multi_demo.py +147 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/gpuz/test/test_resnet_demo.py +2 -2
- {buildz-0.7.0 → buildz-0.7.2}/buildz/gpuz/torch/__init__.py +2 -1
- buildz-0.7.2/buildz/gpuz/torch/dict_middle.py +270 -0
- buildz-0.7.2/buildz/iocz/__init__.py +6 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/mhttp/gateway.py +17 -2
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/mhttp/mhttp.py +4 -4
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/mhttp/proxy.py +12 -8
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/mhttp/record.py +11 -1
- buildz-0.7.2/buildz/netz/tcp/__init__.py +1 -0
- buildz-0.7.2/buildz/netz/tcp/__main__.py +4 -0
- buildz-0.7.2/buildz/netz/tcp/middle.py +117 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/test/test_gw.py +8 -0
- {buildz-0.7.0 → buildz-0.7.2/buildz.egg-info}/PKG-INFO +8 -1
- {buildz-0.7.0 → buildz-0.7.2}/buildz.egg-info/SOURCES.txt +6 -1
- {buildz-0.7.0 → buildz-0.7.2}/setup.py +1 -1
- buildz-0.7.0/buildz/__init__.py +0 -9
- buildz-0.7.0/buildz/gpuz/test/report.txt +0 -52
- buildz-0.7.0/buildz/gpuz/test/test_resnet.py +0 -179
- buildz-0.7.0/buildz/gpuz/torch/dict_middle.py +0 -156
- buildz-0.7.0/buildz/iocz/__init__.py +0 -3
- {buildz-0.7.0 → buildz-0.7.2}/LICENSE +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/MANIFEST.in +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/__main__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argx.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/argz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/callz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/conf_argz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/conf_callz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/evalx.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/init.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz/test_call.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/argz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/build.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/callz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/evalx.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/test_obj.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/tests/conf.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/argz_bk/testx.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/cache.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/config.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/dbs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/deal.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/deal_list.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/deal_type.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/defs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/factory.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/init.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/log.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/request.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/run.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/save.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/res/cache/cache.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/res/config/base.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/res/config/config.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/res/data/fp.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/res/data/item1.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/res/data/item2.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/res/data/test.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/test/xtest.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/auto/verify.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/cachez/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/cachez/cache.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/cmd.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/confz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/__main__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/basez.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/clickhousez.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/lib/readme +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/mysqlz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/oraclez.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/orm.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/postgresqlz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/sqlite3z.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/dv/structz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/install.txt +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/run.conf +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/runz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/db/tls.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/ioc/deal.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/ioc/help.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/myers/deal.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/myers/help.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/conf/ioc.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/conf/main.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/conf/myers.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/conf/search.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/conf/xf.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/help/default.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/help/ioc.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/help/myers.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/help/search.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/help/xf.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/res/test.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/search/deal.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/search/help.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/xf/deal.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/demo/xf/help.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/dz/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/dz/mapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/evalz/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/evalz/evalz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/evalz/res/default.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/evalz/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/fz/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/fz/dirz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/fz/fhs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/fz/fio.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/fz/lsf.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/gpuz/torch/seq_middle.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/html/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/html/test/demo.html +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/html/test/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/html/xml.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/init.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc/conf.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc/confs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc/decorator.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc/loads.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc/single.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/branch.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/call.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/calls.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/branch_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/call_defaults.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/call_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/calls_defaults.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/calls_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/deal_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/deals.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/env_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/ioc_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/iocf_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/join_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/list_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/map_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/mcall_defaults.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/mcall_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/obj_cst_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/obj_defaults.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/obj_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/obj_set_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/ovar_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/ref_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/refs_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/var_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/xfile_defaults.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/conf/xfile_lists.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/deal.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/demo.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/env.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/init.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/ioc.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/iocf.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/join.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/list.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/map.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/mcall.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/obj.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/ovar.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/ref.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/refs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/val.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/var.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/ioc_deal/xfile.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/ioc/wrap.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf/conf.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf/mg.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf/unit.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf/up.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/attr.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/call.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/cvar.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/deal.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/env.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/ioc.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/method.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/obj.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/ref.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/conf_deal/val.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/init.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/builds.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/confs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/datas.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/dataset.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/encapes.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/envs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/ids.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/init.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/mg.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/single.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/sys_envs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/tdata.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/tdict.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/unit.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc/vars.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc_deal/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc_deal/deal.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc_deal/ioc.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc_deal/obj.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc_deal/ref.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/ioc_deal/val.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/test/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/wrap/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/wrap/default_wraps.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/wrap/env.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/wrap/obj.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/iocz/wrap/wraps.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/logz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/mhttp/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/mhttp/caps.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/mhttp/mhttps.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/sslz/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/sslz/gen.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/sslz/gen.pyi +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/test/__main__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/test/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/netz/test/test_cli.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/pathz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/pyz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tls.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tools.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/myers_diff.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/test_xfind.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/time/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/time/timez.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/tio/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/tio/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/tio/getch.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/tio/lx.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/tio/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/tio/win.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/tz/xfind.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/__main__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/code.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/code_modify.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/copy_old.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/file.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/buffer.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/listz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/lr.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/lrval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/mapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/nextz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/reval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/setz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/spc.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/spt.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/deal/strz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/exp.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/item.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/mg.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loader/pos.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/buffer.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/listmapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/listz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/lr.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/lrval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/mapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/nextz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/reval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/setz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/spc.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/spt.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/strz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/strz_new.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/deal/strz_old.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/exp.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/item.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/mg.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/pos.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz/test1.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/buffer.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/listmapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/listz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/lr.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/lrval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/mapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/nextz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/reval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/setz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/spc.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/spt.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/strz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/strz_new.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/deal/strz_old.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/exp.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/item.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/mg.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/pos.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/loaderz_nexp/test1.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/mapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/read.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/readz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/readz_nexp.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/stack.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/write.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/conf.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/deal/jsonval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/deal/listmapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/deal/listz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/deal/mapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/deal/reval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/deal/strz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/itemz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writer/mg.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/base.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/conf.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/deal/jsonval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/deal/listmapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/deal/listz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/deal/mapz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/deal/reval.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/deal/strz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/itemz.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/mg.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writerz/testx.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/writez.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xf/xargs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xz/__init__.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xz/conf.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xz/data.js +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xz/test.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz/xz/trs.py +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz.egg-info/dependency_links.txt +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/buildz.egg-info/top_level.txt +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/notes/notes.txt +0 -0
- {buildz-0.7.0 → buildz-0.7.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: buildz
|
3
|
-
Version: 0.7.
|
3
|
+
Version: 0.7.2
|
4
4
|
Summary: 配置读写(基于json格式进行简化)、ioc、以及其他工具代码。a json-base file format's read and write code by python, and codes to read and product object from configure file in such format(ioc), and other tool codes
|
5
5
|
Home-page: https://github.com/buildCodeZ/buildz
|
6
6
|
Author: Zzz
|
@@ -16,6 +16,13 @@ License-File: LICENSE
|
|
16
16
|
禁止将本项目代码用于ai训练
|
17
17
|
declaration:
|
18
18
|
Codes of this project are not allowed to be used for AI training or any other form of machine learning processes.
|
19
|
+
|
20
|
+
使用此代码库发现bug或者有需求需要开发都可联系(QQ或邮箱联系,QQ就是邮箱号)
|
21
|
+
|
22
|
+
邮箱1:1174534295@qq.com
|
23
|
+
|
24
|
+
邮箱2:1309458652@qq.com
|
25
|
+
|
19
26
|
```
|
20
27
|
1,在json格式基础上加了点东西,让配置文件写起来更简单,模块在buildz.xf下
|
21
28
|
2,基于xf格式写了个ioc控制反转配置文件读取的程序,模块在buildz.ioc下
|
@@ -3,6 +3,13 @@
|
|
3
3
|
禁止将本项目代码用于ai训练
|
4
4
|
declaration:
|
5
5
|
Codes of this project are not allowed to be used for AI training or any other form of machine learning processes.
|
6
|
+
|
7
|
+
使用此代码库发现bug或者有需求需要开发都可联系(QQ或邮箱联系,QQ就是邮箱号)
|
8
|
+
|
9
|
+
邮箱1:1174534295@qq.com
|
10
|
+
|
11
|
+
邮箱2:1309458652@qq.com
|
12
|
+
|
6
13
|
```
|
7
14
|
1,在json格式基础上加了点东西,让配置文件写起来更简单,模块在buildz.xf下
|
8
15
|
2,基于xf格式写了个ioc控制反转配置文件读取的程序,模块在buildz.ioc下
|
@@ -0,0 +1,25 @@
|
|
1
|
+
#coding=utf-8
|
2
|
+
|
3
|
+
__version__="0.7.2"
|
4
|
+
|
5
|
+
# 小号多
|
6
|
+
__author__ = "Zzz, emails: 1174534295@qq.com, 1309458652@qq.com"
|
7
|
+
__doc__ = '''
|
8
|
+
使用此代码库发现bug或者有需求需要开发都可联系(QQ或邮箱联系,QQ就是邮箱号)
|
9
|
+
buildz.xf: 简化的json
|
10
|
+
buildz.xz: 简化对dict和list的读写
|
11
|
+
buildz.ioc, buildz.iocz: ioc注入
|
12
|
+
buildz.netz: 网络相关,包括ssl证书生成,正反向代理,端口映射,抓包
|
13
|
+
buildz.db: SQL脚本处理工具,基于其他SQL处理库做了使用简化
|
14
|
+
buildz.html: html文本处理
|
15
|
+
buildz.gpuz: 机器学习用内存做显存的模型缓存,目前只写了基于pytorch的工具,测试卷积和注意力的效果还行
|
16
|
+
buildz.fz: 文件处理
|
17
|
+
buildz.auto: 自动化测试
|
18
|
+
buildz.logz: 简单的日志工具
|
19
|
+
buildz.pyz: 简化python系统相关调用
|
20
|
+
buildz.base: 简化python类代码编写
|
21
|
+
buildz.pathz: 简化文件路径相关的代码编写
|
22
|
+
'''
|
23
|
+
|
24
|
+
from .argx import fetch as args
|
25
|
+
from .base import Base, WBase
|
@@ -32,16 +32,24 @@ def default_fc_opt(net, opt):
|
|
32
32
|
torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
|
33
33
|
opt.step()
|
34
34
|
def analyze(use_cache, use_cuda, mark_train, loop, fc_gen, dataloader, loss_fn, fc_opt, win_size):
|
35
|
-
|
35
|
+
tmp = list(fc_gen())
|
36
|
+
mds = tmp.pop(0)
|
37
|
+
gmodel = tmp.pop(0)
|
38
|
+
opts = tmp.pop(0)
|
39
|
+
gopt = tmp.pop(0)
|
40
|
+
dvs_nets = None
|
41
|
+
if len(tmp)>0:
|
42
|
+
dvs_nets = tmp.pop(0)
|
36
43
|
dv = cuda
|
37
44
|
if not use_cuda:
|
38
45
|
use_cache = False
|
39
46
|
cache = None
|
40
47
|
if use_cache:
|
41
|
-
cache = DictCache([cuda, cpu], mds, opts, win_size, fc_opt)
|
48
|
+
cache = DictCache([cuda, cpu], mds, opts, win_size, fc_opt, dvs_nets)
|
42
49
|
if not use_cuda:
|
43
50
|
dv = cpu
|
44
|
-
|
51
|
+
if not use_cache:
|
52
|
+
gmodel = gmodel.to(dv)
|
45
53
|
s_val = "mean loss"
|
46
54
|
if mark_train:
|
47
55
|
if not use_cache:
|
@@ -59,23 +67,24 @@ def analyze(use_cache, use_cuda, mark_train, loop, fc_gen, dataloader, loss_fn,
|
|
59
67
|
total_loss = 0
|
60
68
|
curr=time.time()
|
61
69
|
if mark_train:
|
62
|
-
for dt in dataloader:
|
70
|
+
for dt,tgt in dataloader:
|
63
71
|
dt=dt.to(dv)
|
72
|
+
tgt = tgt.to(dv)
|
64
73
|
if not use_cache:
|
65
74
|
gopt.zero_grad()
|
66
75
|
out = gmodel(dt)
|
67
|
-
loss = loss_fn(out,
|
76
|
+
loss = loss_fn(out, tgt)
|
68
77
|
loss.backward()
|
69
78
|
fc_opt(gmodel, gopt)
|
70
79
|
else:
|
71
80
|
[opt.zero_grad() for opt in opts] #写gopt.zero_grad()应该也可以,只是删掉之前计算的梯度
|
72
81
|
out = cache.do_forward(lambda :gmodel(dt)) # 其实只是加了勾子函数,实际的计算还是模型计算
|
73
|
-
loss = loss_fn(out,
|
82
|
+
loss = loss_fn(out, tgt)
|
74
83
|
cache.do_backward(lambda : loss.backward()) # 加勾子函数
|
75
84
|
total_loss+=loss.item()
|
76
85
|
else:
|
77
86
|
with torch.no_grad():
|
78
|
-
for dt in dataloader:
|
87
|
+
for dt,tgt in dataloader:
|
79
88
|
dt=dt.to(dv)
|
80
89
|
if not use_cache:
|
81
90
|
out = gmodel(dt)
|
@@ -102,6 +111,7 @@ def analyzes(mark_train, loop, fc_gen, dataloader, loss_fn = None, fc_opt = None
|
|
102
111
|
fc_opt = default_fc_opt
|
103
112
|
if 'gpu' in modes:
|
104
113
|
modes = list(modes)+['cuda']
|
114
|
+
print(f"modes: {modes}")
|
105
115
|
# 正常做法:只用显卡
|
106
116
|
if 'cuda' in modes:
|
107
117
|
print("No Used DictCache")
|
@@ -0,0 +1,84 @@
|
|
1
|
+
|
2
|
+
from buildz.gpuz.torch import DictCache
|
3
|
+
import torch
|
4
|
+
from torch import nn,optim
|
5
|
+
from torch.utils.data import DataLoader, Dataset
|
6
|
+
class TestDataset(Dataset):
|
7
|
+
def __init__(self, num, dims):
|
8
|
+
self.num = num
|
9
|
+
self.dims = dims
|
10
|
+
self.datas = torch.rand(num, dims)
|
11
|
+
self.targets = torch.rand(num, dims)
|
12
|
+
def __len__(self):
|
13
|
+
return self.num
|
14
|
+
def __getitem__(self, i):
|
15
|
+
return self.datas[i], self.targets[i]
|
16
|
+
dims = 12
|
17
|
+
dataset = TestDataset(30, dims)
|
18
|
+
dataloader = DataLoader(dataset, 10)
|
19
|
+
class MiniModel(nn.Module):
|
20
|
+
def __init__(self, dims, mdims, num):
|
21
|
+
super().__init__()
|
22
|
+
nets = [nn.Linear(dims, mdims)]
|
23
|
+
nets += [nn.Linear(mdims,mdims) for i in range(num)]
|
24
|
+
nets.append(nn.Linear(mdims,dims))
|
25
|
+
self.nets = nn.Sequential(*nets)
|
26
|
+
def forward(self, inputs):
|
27
|
+
return self.nets(inputs)
|
28
|
+
models = [MiniModel(dims, 32, 3) for i in range(10)]
|
29
|
+
opts = [optim.Adam(model.parameters(), lr=0.001) for model in models]
|
30
|
+
#可以指定哪些模型全部放cuda或者全部放cpu
|
31
|
+
cuda_models = [models[1],models[2]]
|
32
|
+
cpu_models = [models[-1]]
|
33
|
+
real_model = nn.Sequential(*models)
|
34
|
+
loss_fn = torch.nn.MSELoss()
|
35
|
+
def opt_step(net, opt):
|
36
|
+
# 如果模型只是用来测试,不做训练,可以不传该函数,同时opts传入空就可以
|
37
|
+
# 对模型的一些其他优化,可以写可以不写,主要是调用opt.step()进行当前小模型的模型训练
|
38
|
+
# 另外,opt不一定就是优化函数,可以是任何数据,其只取决于创建DictCache的时候传入的opts是什么
|
39
|
+
torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
|
40
|
+
opt.step()
|
41
|
+
cache = DictCache([torch.device('cuda'), torch.device('cpu')],models,opts,3,opt_step, [cuda_models,cpu_models])
|
42
|
+
|
43
|
+
# 训练:
|
44
|
+
[md.train() for md in models]
|
45
|
+
for inputs,targets in dataloader:
|
46
|
+
inputs,targets = inputs.cuda(),targets.cuda()
|
47
|
+
[opt.zero_grad() for opt in opts]
|
48
|
+
outs = cache.do_forward(lambda:real_model(inputs))
|
49
|
+
loss = loss_fn(outs, targets)
|
50
|
+
cache.do_backward(lambda: loss.backward())
|
51
|
+
# opt.step()在do_backward里会自动调用
|
52
|
+
print(loss.item())
|
53
|
+
|
54
|
+
# 测试:
|
55
|
+
inputs = torch.rand(1, dims).cuda()
|
56
|
+
with torch.no_grad():
|
57
|
+
outputs = cache.do_forward(lambda:real_model(inputs))
|
58
|
+
print(outputs)
|
59
|
+
|
60
|
+
# 对比不用DictCache的时候
|
61
|
+
# 注意:模型放入DictCache之后会挂上勾子函数,不再用DictCache的时要先调用DictCache的remove方法
|
62
|
+
cache.remove()
|
63
|
+
full_opt = optim.Adam(real_model.parameters(), lr=0.001)
|
64
|
+
# 训练:
|
65
|
+
real_model.cuda()
|
66
|
+
real_model.train()
|
67
|
+
for inputs,targets in dataloader:
|
68
|
+
inputs,targets = inputs.cuda(),targets.cuda()
|
69
|
+
full_opt.zero_grad()
|
70
|
+
outs = real_model(inputs)
|
71
|
+
loss = loss_fn(outs, targets)
|
72
|
+
loss.backward()
|
73
|
+
torch.nn.utils.clip_grad_norm_(real_model.parameters(), max_norm=1.0)
|
74
|
+
full_opt.step()
|
75
|
+
print(loss.item())
|
76
|
+
|
77
|
+
pass
|
78
|
+
|
79
|
+
# 测试:
|
80
|
+
inputs = torch.rand(1, dims).cuda()
|
81
|
+
with torch.no_grad():
|
82
|
+
outputs = real_model(inputs)
|
83
|
+
print(outputs)
|
84
|
+
|
@@ -0,0 +1,107 @@
|
|
1
|
+
|
2
|
+
测试结果:
|
3
|
+
训练模式大概能达到纯显卡(75W,更高的没测)二分之一或三分之一的性能,显存不够的可以用用
|
4
|
+
|
5
|
+
环境:
|
6
|
+
笔记本(游戏本)
|
7
|
+
显卡: RTX4060 8GB 75W残血版
|
8
|
+
CPU: i7-13700H
|
9
|
+
内存: 16GB
|
10
|
+
|
11
|
+
1)线性层:
|
12
|
+
训练模式:
|
13
|
+
python -m buildz.gpuz.test.test_linear_demo train cuda,cache,cpu
|
14
|
+
结果:
|
15
|
+
data size: 0.457763671875 MB
|
16
|
+
Model Size: 1.7890334129333496 GB
|
17
|
+
Analyze
|
18
|
+
mean time cost not used DictCache: 4.274392604827881 sec
|
19
|
+
mean time cost using DictCache: 4.48450231552124 sec
|
20
|
+
mean time cost using CPU: 24.141416311264038 sec
|
21
|
+
|
22
|
+
用不用DictCache耗时都是4秒左右,猜测是因为显存足够,可能转内存后显存还在哪里做了缓存
|
23
|
+
先占掉4GB显存再测试:
|
24
|
+
占用4GB显存(开另一个命令行窗口执行)
|
25
|
+
python -m buildz.gpuz.test.take_gpu_mem 20
|
26
|
+
时间测试(去掉cuda是因为显存不够,全部用cuda直接报错显存不足,根本用不了):
|
27
|
+
python -m buildz.gpuz.test.test_linear_demo train cache,cpu
|
28
|
+
Analyze
|
29
|
+
mean time cost using DictCache: 12.55063670873642 sec
|
30
|
+
mean time cost using CPU: 23.80371403694153 sec
|
31
|
+
|
32
|
+
用DictCache要12.6秒左右,性能推测是全部用显存的1/3,但起码比CPU快
|
33
|
+
|
34
|
+
测试模式:
|
35
|
+
python -m buildz.gpuz.test.test_linear_demo eval cuda,cache,cpu
|
36
|
+
Analyze
|
37
|
+
mean time cost not used DictCache: 0.022499561309814453 sec
|
38
|
+
mean time cost using DictCache: 1.3205116987228394 sec
|
39
|
+
mean time cost using CPU: 0.3452974557876587 sec
|
40
|
+
|
41
|
+
测试模式DictCache比CPU还慢,以测试结果为准,结论是测试模式线性层有显存用显存,没显存用CPU,尽量别用DictCache,具体原因不清楚
|
42
|
+
|
43
|
+
2)卷积层:
|
44
|
+
训练模式:
|
45
|
+
python -m buildz.gpuz.test.test_resnet_demo train cuda,cache,cpu
|
46
|
+
结果:
|
47
|
+
data size: 60.0 MB
|
48
|
+
Model Size: 2.75390625 MB
|
49
|
+
Analyze
|
50
|
+
mean time cost not used DictCache: 4.0497170554267035 sec
|
51
|
+
mean time cost using DictCache: 8.11503267288208 sec
|
52
|
+
mean time cost using CPU: 60.47528860304091 sec
|
53
|
+
CPU模式下卷积一直很慢,另外别看模型参数很小,全放显存的时候8GB显卡直接占满了
|
54
|
+
|
55
|
+
显存占用一半后计算:
|
56
|
+
python -m buildz.gpuz.test.take_gpu_mem 20
|
57
|
+
#去掉cpu是因为显存对cpu没影响,没必要再测,去掉cuda到不是显存报错了,cuda能运行,但在显存不够的情况下,时间是两分钟左右,比cpu还慢,不测了
|
58
|
+
python -m buildz.gpuz.test.test_linear_demo train cache
|
59
|
+
Analyze
|
60
|
+
mean time cost using DictCache: 12.734953820705414 sec
|
61
|
+
|
62
|
+
|
63
|
+
测试模式:
|
64
|
+
python -m buildz.gpuz.test.test_resnet_demo eval cuda,cache,cpu
|
65
|
+
Analyze
|
66
|
+
mean time cost not used DictCache: 2.141914208730062 sec
|
67
|
+
mean time cost using DictCache: 2.1802857451968722 sec
|
68
|
+
mean time cost using CPU: 18.77076021830241 sec
|
69
|
+
|
70
|
+
显存足够,卷积层用不用DictCache感觉没啥差别,本来测试模式占用显存就少,不如不用DictCache
|
71
|
+
|
72
|
+
多头注意力
|
73
|
+
训练模式:
|
74
|
+
python -m buildz.gpuz.test.test_resnet_demo train cuda,cache,cpu
|
75
|
+
Analyze
|
76
|
+
mean time cost not used DictCache: 1.6817578077316284 sec
|
77
|
+
mean time cost using DictCache: 9.037651598453522 sec
|
78
|
+
mean time cost using CPU: 16.668074309825897 sec
|
79
|
+
显存扣掉3GB
|
80
|
+
python -m buildz.gpuz.test.take_gpu_mem 12
|
81
|
+
python -m buildz.gpuz.test.test_resnet_demo train cuda,cache,cpu
|
82
|
+
No Used DictCache
|
83
|
+
train: 0 mean loss: 7.049219369888306 time: 32.67351007461548
|
84
|
+
train: 1 mean loss: 6.776383876800537 time: 26.468174934387207
|
85
|
+
train: 2 mean loss: 6.425022125244141 time: 26.648658752441406
|
86
|
+
train: 3 mean loss: 6.085292339324951 time: 26.714829921722412
|
87
|
+
train: 4 mean loss: 5.76223087310791 time: 26.91315507888794
|
88
|
+
Using DictCache:
|
89
|
+
train: 0 mean loss: 7.036778688430786 time: 14.886789083480835
|
90
|
+
train: 1 mean loss: 6.7514262199401855 time: 8.72349238395691
|
91
|
+
train: 2 mean loss: 6.392669439315796 time: 8.646085023880005
|
92
|
+
看DictCache和之前差不多就没测完直接ctrl+C了,显存不够的时候,多头注意力全放显存比使用CPU还慢
|
93
|
+
目前看卷积和多头注意力都有这种情况,在显存不够情况下和CPU差不多或者比CPU还慢,还不如用DictCache做下缓存
|
94
|
+
|
95
|
+
测试模式:
|
96
|
+
python -m buildz.gpuz.test.test_resnet_demo eval cuda,cache,cpu
|
97
|
+
Analyze
|
98
|
+
mean time cost not used DictCache: 0.6104159355163574 sec
|
99
|
+
mean time cost using DictCache: 0.6807375550270081 sec
|
100
|
+
mean time cost using CPU: 6.312976717948914 sec
|
101
|
+
|
102
|
+
|
103
|
+
如果是测试模式,显存不够的时候,可以部分模型全放显存,部分全放内存,建议是卷积和注意力多放显存,线性层多放内存,因为线性层占用空间大
|
104
|
+
convs = [...]
|
105
|
+
linears = [...]
|
106
|
+
cache = DictCache(dvs=[torch.device('cuda'), torch.device('cpu')], dvs_nets = [convs, linears])
|
107
|
+
...
|
@@ -0,0 +1,165 @@
|
|
1
|
+
#
|
2
|
+
|
3
|
+
import sys
|
4
|
+
from buildz.gpuz.torch import DictCache
|
5
|
+
from buildz.gpuz.test import analyze
|
6
|
+
from buildz import pyz
|
7
|
+
import math
|
8
|
+
import torch,time
|
9
|
+
from torch import nn,optim
|
10
|
+
from torch.utils.data import DataLoader, Dataset
|
11
|
+
cpu,cuda = analyze.dvs
|
12
|
+
MultiheadAttention = nn.MultiheadAttention
|
13
|
+
class PostionalEncoding(nn.Module):
|
14
|
+
def __init__(self, word_size, max_length, batch_first=False):
|
15
|
+
super().__init__()
|
16
|
+
pos = torch.arange(max_length).unsqueeze(1)
|
17
|
+
words = torch.arange(word_size)
|
18
|
+
mod2 = words%2
|
19
|
+
offset = torch.pi*0.5*mod2
|
20
|
+
# exp(log(A)*B)等价于A^B
|
21
|
+
x = pos*torch.exp(-math.log(1e4)*(words-mod2)/word_size)
|
22
|
+
data = torch.sin(x+offset)
|
23
|
+
data.requires_grad=False
|
24
|
+
if batch_first:
|
25
|
+
data = data.unsqueeze(0)
|
26
|
+
else:
|
27
|
+
data = data.unsqueeze(1)
|
28
|
+
self.vecs = nn.Parameter(data, requires_grad=False)
|
29
|
+
self.batch_first = batch_first
|
30
|
+
def forward(self, ins):
|
31
|
+
if self.batch_first:
|
32
|
+
ins = ins + self.vecs[:, :ins.size(1)]
|
33
|
+
else:
|
34
|
+
ins = ins + self.vecs[:ins.size(1)]
|
35
|
+
return ins
|
36
|
+
def make_linear(input_size, middle_size, middle_fc):
|
37
|
+
if middle_size is not None:
|
38
|
+
nets = []
|
39
|
+
nets.append(nn.Linear(input_size, middle_size))
|
40
|
+
if middle_fc is not None:
|
41
|
+
nets.append(middle_fc())
|
42
|
+
nets.append(nn.Linear(middle_size, input_size))
|
43
|
+
linear = nn.Sequential(*nets)
|
44
|
+
else:
|
45
|
+
linear = nn.Linear(input_size, input_size)
|
46
|
+
return linear
|
47
|
+
class Decoder(nn.Module):
|
48
|
+
def __init__(self, word_dims, num_heads=1, kv_dims=None, batch_first=False, bias=True, linear_size=None, linear_fc = nn.ReLU):
|
49
|
+
super().__init__()
|
50
|
+
self.self_multi = MultiheadAttention(word_dims,num_heads,bias=bias,batch_first=batch_first,kdim=kv_dims,vdim=kv_dims)
|
51
|
+
self.linear = make_linear(word_dims, linear_size, linear_fc)
|
52
|
+
self.ln1 = nn.LayerNorm(word_dims)
|
53
|
+
self.ln2 = nn.LayerNorm(word_dims)
|
54
|
+
self.batch_first = batch_first
|
55
|
+
def forward(self, outputs, outs_mask):
|
56
|
+
self_attn, _ = self.self_multi(outputs,outputs,outputs, attn_mask=outs_mask)
|
57
|
+
outputs = self.ln1(outputs+self_attn)
|
58
|
+
outputs = self.ln2(outputs+self.linear(outputs))
|
59
|
+
return outputs
|
60
|
+
class DecodePart(nn.Module):
|
61
|
+
def __init__(self, num_words, word_dims, sequence_length, num_decoders=6, num_heads=1, kv_dims=None, batch_first=False, bias=True, linear_size=None, linear_fc = nn.ReLU):
|
62
|
+
super().__init__()
|
63
|
+
self.embedding = nn.Embedding(num_words, word_dims)
|
64
|
+
self.pos_encoding = PostionalEncoding(word_dims, sequence_length, batch_first)
|
65
|
+
self.num_decoders = num_decoders
|
66
|
+
decoders = [Decoder(word_dims, num_heads, kv_dims, batch_first, bias, linear_size, linear_fc) for i in range(num_decoders)]
|
67
|
+
self.src_decoders = decoders
|
68
|
+
self.decoders = nn.ModuleList(decoders)
|
69
|
+
def mds(self):
|
70
|
+
return [self.embedding, self.pos_encoding]+self.src_decoders
|
71
|
+
def forward(self, outputs, outs_mask):
|
72
|
+
outputs = self.embedding(outputs)
|
73
|
+
outputs = self.pos_encoding(outputs)
|
74
|
+
for decoder in self.decoders:
|
75
|
+
outputs = decoder(outputs, outs_mask)
|
76
|
+
return outputs
|
77
|
+
class Chats(nn.Module):
|
78
|
+
def __init__(self, num_words_outputs, word_dims, sequence_length_outputs, num_decoders=6, num_heads=1, kv_dims=None, batch_first=False, bias=True, linear_size=None, linear_fc = nn.ReLU, mask_index = -1):
|
79
|
+
super().__init__()
|
80
|
+
self.decode = DecodePart(num_words_outputs, word_dims, sequence_length_outputs, num_decoders, num_heads, word_dims, batch_first, bias, linear_size, linear_fc)
|
81
|
+
self.linear = nn.Linear(word_dims, num_words_outputs)
|
82
|
+
self.sequence_length_outputs = sequence_length_outputs
|
83
|
+
self.mask_index = mask_index
|
84
|
+
self.batch_first = batch_first
|
85
|
+
self.num_heads = num_heads
|
86
|
+
def mds(self):
|
87
|
+
return [self.linear]+self.decode.mds()
|
88
|
+
def gen_masks(self, outputs):
|
89
|
+
if self.batch_first:
|
90
|
+
batch_id, dt_id=0,1
|
91
|
+
else:
|
92
|
+
batch_id, dt_id=1,0
|
93
|
+
batch_size, outs_len = outputs.size(batch_id), outputs.size(dt_id)
|
94
|
+
mask = torch.ones(batch_size, self.num_heads, outs_len, outs_len)
|
95
|
+
mask = torch.triu(mask, diagonal=1)
|
96
|
+
outs_mask = mask.to(torch.bool)
|
97
|
+
if self.mask_index>=0:
|
98
|
+
outs_mask_1 = (outputs==self.mask_index).unsqueeze(1).unsqueeze(1)
|
99
|
+
if not self.batch_first:
|
100
|
+
outs_mask_1 = outs_mask_1.transpose(0,3)
|
101
|
+
if self.num_heads>1 or outs_len>1:
|
102
|
+
outs_mask_1 = outs_mask_1.expand(batch_size, self.num_heads, outs_len, outs_len)
|
103
|
+
outs_mask = outs_mask | outs_mask_1
|
104
|
+
outs_mask = outs_mask.reshape(-1, outs_len, outs_len).bool().to(outputs.device)
|
105
|
+
return outs_mask
|
106
|
+
def forward(self, outputs):
|
107
|
+
outs_mask = self.gen_masks(outputs)
|
108
|
+
rst = self.decode(outputs, outs_mask)
|
109
|
+
rst = self.linear(rst)
|
110
|
+
return rst
|
111
|
+
|
112
|
+
class TestDataset(Dataset):
|
113
|
+
def __init__(self, num, std_len, words):
|
114
|
+
self.num = num
|
115
|
+
self.datas = torch.randint(0, words, (num,std_len))
|
116
|
+
sz = analyze.sz(self.datas)
|
117
|
+
sz, unit = analyze.show_size(sz)
|
118
|
+
print(f"data size: {sz} {unit}")
|
119
|
+
def __len__(self):
|
120
|
+
return self.num
|
121
|
+
def __getitem__(self, i):
|
122
|
+
return self.datas[i], self.datas[i]
|
123
|
+
|
124
|
+
pass
|
125
|
+
|
126
|
+
def test():
|
127
|
+
num_words_outputs = 1024
|
128
|
+
word_dims = 512
|
129
|
+
sequence_length_outputs = 512
|
130
|
+
num_decoders=12
|
131
|
+
num_heads=8
|
132
|
+
loop = 5
|
133
|
+
num_datas = 60
|
134
|
+
batch=30
|
135
|
+
lr=0.0001
|
136
|
+
win_size=3
|
137
|
+
args = sys.argv[1:]
|
138
|
+
mark_train = True
|
139
|
+
if len(args)>0:
|
140
|
+
mark_train = args.pop(0).lower()=='train'
|
141
|
+
modes = 'cuda,cache,cpu'
|
142
|
+
if len(args)>0:
|
143
|
+
modes = args.pop(0)
|
144
|
+
if len(args)>0:
|
145
|
+
num_decoders = int(args.pop(0))
|
146
|
+
print(f"num_decoders: {num_decoders}")
|
147
|
+
ds = TestDataset(num_datas, sequence_length_outputs, num_words_outputs)
|
148
|
+
dl = DataLoader(ds, batch)
|
149
|
+
def fc_gen():
|
150
|
+
gmodel = Chats(num_words_outputs, word_dims, sequence_length_outputs, num_decoders, num_heads, batch_first=1)
|
151
|
+
mds = gmodel.mds()
|
152
|
+
opts =[optim.Adam(md.parameters(), lr=lr) for md in mds]
|
153
|
+
gopt = optim.Adam(gmodel.parameters(), lr=lr)
|
154
|
+
return mds, gmodel, opts, gopt
|
155
|
+
def fc_opt(net, opt):
|
156
|
+
torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
|
157
|
+
opt.step()
|
158
|
+
loss_fn = nn.CrossEntropyLoss()
|
159
|
+
def wrap_fn(outs, targets):
|
160
|
+
outs = outs.view(-1, outs.shape[-1])
|
161
|
+
targets = targets.reshape(-1)
|
162
|
+
return loss_fn(outs, targets)
|
163
|
+
analyze.analyzes(mark_train, loop, fc_gen, dl, wrap_fn, fc_opt, win_size, modes)
|
164
|
+
|
165
|
+
pyz.lc(locals(),test)
|
@@ -0,0 +1,147 @@
|
|
1
|
+
#
|
2
|
+
|
3
|
+
import sys
|
4
|
+
from buildz.gpuz.torch import DictCache
|
5
|
+
from buildz.gpuz.test import analyze
|
6
|
+
from buildz import pyz
|
7
|
+
import torch,time
|
8
|
+
from torch import nn,optim
|
9
|
+
from torch.utils.data import DataLoader, Dataset
|
10
|
+
cpu,cuda = analyze.dvs
|
11
|
+
class ConvModel(nn.Module):
|
12
|
+
def __init__(self, dims, ins_channels, middle_channels):
|
13
|
+
super().__init__()
|
14
|
+
nets=[]
|
15
|
+
nets.append(nn.Conv2d(ins_channels, middle_channels, 5, padding=2))
|
16
|
+
nets.append(nn.LeakyReLU())
|
17
|
+
self.nets = nn.Sequential(*nets)
|
18
|
+
def forward(self, inputs):
|
19
|
+
return self.nets(inputs)
|
20
|
+
def size(self):
|
21
|
+
return sum([analyze.unit_sz(net) for net in self.nets])
|
22
|
+
|
23
|
+
pass
|
24
|
+
class UNet(nn.Module):
|
25
|
+
def __init__(self, encoder, dealer, decoder):
|
26
|
+
super().__init__()
|
27
|
+
self.encoder, self.dealer, self.decoder = encoder, dealer, decoder
|
28
|
+
def forward(self, inputs):
|
29
|
+
tmp = self.encoder(inputs)
|
30
|
+
out = tmp
|
31
|
+
if self.dealer is not None:
|
32
|
+
out = self.dealer(tmp)
|
33
|
+
tmp = torch.cat([tmp, out], dim=1)
|
34
|
+
return self.decoder(tmp)
|
35
|
+
class ResNet(nn.Module):
|
36
|
+
def __init__(self, net):
|
37
|
+
super().__init__()
|
38
|
+
self.net = net
|
39
|
+
def forward(self, inputs):
|
40
|
+
return inputs+self.net(inputs)
|
41
|
+
class TestDataset(Dataset):
|
42
|
+
def __init__(self, num, dims, channels, outs_dims):
|
43
|
+
self.num = num
|
44
|
+
self.dims = dims
|
45
|
+
self.datas = torch.rand(num, channels, dims,dims)
|
46
|
+
self.targets = torch.rand(num, outs_dims)
|
47
|
+
sz = analyze.sz(self.datas)+analyze.sz(self.targets)
|
48
|
+
sz, unit = analyze.show_size(sz)
|
49
|
+
print(f"data size: {sz} {unit}")
|
50
|
+
def __len__(self):
|
51
|
+
return self.num
|
52
|
+
def __getitem__(self, i):
|
53
|
+
return self.datas[i], self.targets[i]
|
54
|
+
|
55
|
+
pass
|
56
|
+
class LnsNet(nn.Module):
|
57
|
+
def __init__(self, input_dims, outs_dims, num):
|
58
|
+
super().__init__()
|
59
|
+
curr_dims = input_dims
|
60
|
+
lns = []
|
61
|
+
for i in range(num):
|
62
|
+
rate = (i+1)/num
|
63
|
+
dims = int(input_dims*(1-rate)+outs_dims*rate)
|
64
|
+
ln = nn.Linear(curr_dims, dims)
|
65
|
+
lns.append(ln)
|
66
|
+
lns.append(nn.LeakyReLU())
|
67
|
+
curr_dims = dims
|
68
|
+
self.nets = nn.Sequential(*lns)
|
69
|
+
def forward(self, inputs):
|
70
|
+
batch = inputs.size(0)
|
71
|
+
inputs = inputs.reshape(batch, -1)
|
72
|
+
return self.nets(inputs)
|
73
|
+
def size(self):
|
74
|
+
return sum([analyze.unit_sz(net) for net in self.nets])
|
75
|
+
|
76
|
+
def gen(dims, outs_dims, channels, num_conv, num_ln, lr):
|
77
|
+
"""
|
78
|
+
类似UNet的模型,不过cat改成了resnet的加
|
79
|
+
"""
|
80
|
+
mds = []
|
81
|
+
curr = channels
|
82
|
+
encoders = []
|
83
|
+
decoders = []
|
84
|
+
for i in range(num_conv):
|
85
|
+
encoder = ConvModel(dims, curr, curr*2)
|
86
|
+
decoder = ConvModel(dims, curr*4, curr)
|
87
|
+
curr*=2
|
88
|
+
encoders.append(encoder)
|
89
|
+
decoders.append(decoder)
|
90
|
+
base = None
|
91
|
+
mds += encoders
|
92
|
+
mds+=decoders
|
93
|
+
print(f"curr:{curr}")
|
94
|
+
for i in range(num_conv):
|
95
|
+
e = encoders[num_conv-i-1]
|
96
|
+
d = decoders[num_conv-i-1]
|
97
|
+
base = UNet(e, base, d)
|
98
|
+
cvnets = base
|
99
|
+
input_dims = dims*dims*channels
|
100
|
+
lns = LnsNet(input_dims, outs_dims, num_ln)
|
101
|
+
print(lns)
|
102
|
+
mds.append(lns)
|
103
|
+
gmds = [lns]
|
104
|
+
mds_sz = [md.size() for md in mds]
|
105
|
+
fullnet = nn.Sequential(cvnets, lns)
|
106
|
+
opts =[optim.Adam(md.parameters(), lr=lr) for md in mds]
|
107
|
+
gopt = optim.Adam(fullnet.parameters(), lr=lr)
|
108
|
+
sz, unit = analyze.show_size(sum(mds_sz))
|
109
|
+
print(f"Model Size: {sz} {unit}, mds: {len(mds)}")
|
110
|
+
return mds, fullnet, opts, gopt, [gmds, None]
|
111
|
+
|
112
|
+
|
113
|
+
|
114
|
+
def fc_opt(net, opt):
|
115
|
+
torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
|
116
|
+
opt.step()
|
117
|
+
def test():
|
118
|
+
channels=3
|
119
|
+
dims= 32
|
120
|
+
out_dims = 10
|
121
|
+
loop = 10
|
122
|
+
datas = 6
|
123
|
+
batch=2
|
124
|
+
lr=0.0001
|
125
|
+
win_size=3
|
126
|
+
num_conv = 6
|
127
|
+
num_ln = 2
|
128
|
+
args = sys.argv[1:]
|
129
|
+
mark_train = True
|
130
|
+
if len(args)>0:
|
131
|
+
mark_train = args.pop(0).lower()=='train'
|
132
|
+
modes = 'cuda,cache,cpu'
|
133
|
+
if len(args)>0:
|
134
|
+
modes = args.pop(0)
|
135
|
+
if len(args)>0:
|
136
|
+
num_conv = int(args.pop(0))
|
137
|
+
if len(args)>0:
|
138
|
+
num_ln = int(args.pop(0))
|
139
|
+
print(f"num_conv: {num_conv}, num_ln: {num_ln}")
|
140
|
+
ds = TestDataset(datas, dims, channels, out_dims)
|
141
|
+
dl = DataLoader(ds, batch)
|
142
|
+
loss_fn = torch.nn.MSELoss()
|
143
|
+
def fc_gen():
|
144
|
+
return gen(dims, out_dims, channels, num_conv, num_ln, lr)
|
145
|
+
analyze.analyzes(mark_train, loop, fc_gen, dl, loss_fn, fc_opt, win_size, modes)
|
146
|
+
|
147
|
+
pyz.lc(locals(),test)
|
@@ -40,7 +40,7 @@ class TestDataset(Dataset):
|
|
40
40
|
def __len__(self):
|
41
41
|
return self.n
|
42
42
|
def __getitem__(self, i):
|
43
|
-
return self.datas[i]
|
43
|
+
return self.datas[i], self.datas[i]
|
44
44
|
|
45
45
|
pass
|
46
46
|
def gen(dims, nets_num, channels, middle_channels, num_conv, lr):
|
@@ -99,6 +99,6 @@ def test():
|
|
99
99
|
loss_fn = torch.nn.MSELoss()
|
100
100
|
def fc_gen():
|
101
101
|
return gen(dims, nets, channels, middle_channels, num_conv, lr)
|
102
|
-
analyze.analyzes(mark_train, loop, fc_gen, dl, loss_fn, fc_opt, win_size,
|
102
|
+
analyze.analyzes(mark_train, loop, fc_gen, dl, loss_fn, fc_opt, win_size, modes)
|
103
103
|
|
104
104
|
pyz.lc(locals(),test)
|