buildz 0.6.59__tar.gz → 0.6.60__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {buildz-0.6.59/buildz.egg-info → buildz-0.6.60}/PKG-INFO +1 -1
- {buildz-0.6.59 → buildz-0.6.60}/buildz/__init__.py +1 -1
- buildz-0.6.60/buildz/gpuz/test/report.txt +50 -0
- buildz-0.6.60/buildz/gpuz/test/test_middle.py +118 -0
- buildz-0.6.60/buildz/gpuz/test/test_middle_conv.py +118 -0
- buildz-0.6.60/buildz/gpuz/test/test_middle_conv1.py +120 -0
- buildz-0.6.60/buildz/gpuz/torch/__init__.py +8 -0
- buildz-0.6.60/buildz/gpuz/torch/middlez.py +181 -0
- {buildz-0.6.59 → buildz-0.6.60/buildz.egg-info}/PKG-INFO +1 -1
- {buildz-0.6.59 → buildz-0.6.60}/buildz.egg-info/SOURCES.txt +6 -0
- {buildz-0.6.59 → buildz-0.6.60}/setup.py +1 -1
- {buildz-0.6.59 → buildz-0.6.60}/LICENSE +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/MANIFEST.in +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/README.md +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/__main__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argx.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/argz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/callz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/conf_argz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/conf_callz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/evalx.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/init.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz/test_call.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/argz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/build.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/callz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/evalx.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/test_obj.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/tests/conf.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/argz_bk/testx.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/cache.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/config.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/dbs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/deal.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/deal_list.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/deal_type.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/defs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/factory.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/init.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/log.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/request.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/run.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/save.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/res/cache/cache.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/res/config/base.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/res/config/config.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/res/data/fp.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/res/data/item1.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/res/data/item2.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/res/data/test.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/test/xtest.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/auto/verify.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/cachez/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/cachez/cache.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/cmd.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/confz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/__main__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/basez.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/clickhousez.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/lib/readme +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/mysqlz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/oraclez.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/orm.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/postgresqlz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/sqlite3z.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/dv/structz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/install.txt +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/run.conf +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/runz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/db/tls.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/ioc/deal.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/ioc/help.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/myers/deal.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/myers/help.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/conf/ioc.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/conf/main.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/conf/myers.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/conf/search.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/conf/xf.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/help/default.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/help/ioc.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/help/myers.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/help/search.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/help/xf.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/res/test.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/search/deal.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/search/help.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/xf/deal.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/demo/xf/help.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/dz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/dz/mapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/evalz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/evalz/evalz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/evalz/res/default.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/evalz/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/fz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/fz/dirz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/fz/fhs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/fz/fio.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/fz/lsf.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/html/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/html/test/demo.html +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/html/test/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/html/xml.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/init.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc/conf.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc/confs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc/decorator.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc/loads.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc/single.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/branch.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/call.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/calls.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/branch_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/call_defaults.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/call_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/calls_defaults.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/calls_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/deal_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/deals.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/env_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/ioc_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/iocf_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/join_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/list_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/map_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/mcall_defaults.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/mcall_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/obj_cst_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/obj_defaults.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/obj_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/obj_set_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/ovar_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/ref_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/refs_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/var_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/xfile_defaults.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/conf/xfile_lists.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/deal.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/demo.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/env.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/init.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/ioc.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/iocf.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/join.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/list.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/map.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/mcall.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/obj.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/ovar.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/ref.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/refs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/val.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/var.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/ioc_deal/xfile.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/ioc/wrap.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf/conf.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf/mg.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf/unit.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf/up.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/attr.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/call.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/cvar.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/deal.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/env.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/ioc.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/method.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/obj.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/ref.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/conf_deal/val.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/init.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/builds.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/confs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/datas.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/dataset.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/encapes.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/envs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/ids.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/init.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/mg.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/single.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/sys_envs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/tdata.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/tdict.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/unit.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc/vars.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc_deal/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc_deal/deal.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc_deal/ioc.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc_deal/obj.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc_deal/ref.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/ioc_deal/val.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/test/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/wrap/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/wrap/default_wraps.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/wrap/env.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/wrap/obj.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/iocz/wrap/wraps.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/logz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/mhttp/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/mhttp/caps.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/mhttp/gateway.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/mhttp/mhttp.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/mhttp/mhttps.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/mhttp/proxy.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/mhttp/record.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/sslz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/sslz/gen.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/sslz/gen.pyi +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/test/__main__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/test/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/test/test_cli.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/netz/test/test_gw.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/pathz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/pyz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tls.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tools.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/myers_diff.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/test_xfind.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/time/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/time/timez.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/tio/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/tio/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/tio/getch.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/tio/lx.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/tio/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/tio/win.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/tz/xfind.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/__main__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/code.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/code_modify.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/copy_old.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/file.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/buffer.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/listz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/lr.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/lrval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/mapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/nextz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/reval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/setz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/spc.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/spt.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/deal/strz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/exp.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/item.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/mg.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loader/pos.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/buffer.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/listmapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/listz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/lr.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/lrval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/mapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/nextz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/reval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/setz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/spc.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/spt.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/strz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/strz_new.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/deal/strz_old.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/exp.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/item.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/mg.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/pos.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz/test1.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/buffer.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/listmapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/listz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/lr.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/lrval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/mapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/nextz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/reval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/setz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/spc.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/spt.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/strz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/strz_new.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/deal/strz_old.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/exp.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/item.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/mg.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/pos.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/loaderz_nexp/test1.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/mapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/read.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/readz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/readz_nexp.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/stack.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/write.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/conf.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/deal/jsonval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/deal/listmapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/deal/listz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/deal/mapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/deal/reval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/deal/strz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/itemz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writer/mg.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/base.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/conf.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/deal/jsonval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/deal/listmapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/deal/listz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/deal/mapz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/deal/reval.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/deal/strz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/itemz.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/mg.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writerz/testx.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/writez.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xf/xargs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xz/__init__.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xz/conf.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xz/data.js +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xz/test.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz/xz/trs.py +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz.egg-info/dependency_links.txt +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/buildz.egg-info/top_level.txt +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/notes/notes.txt +0 -0
- {buildz-0.6.59 → buildz-0.6.60}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: buildz
|
3
|
-
Version: 0.6.
|
3
|
+
Version: 0.6.60
|
4
4
|
Summary: 配置读写(基于json格式进行简化)、ioc、以及其他工具代码。a json-base file format's read and write code by python, and codes to read and product object from configure file in such format(ioc), and other tool codes
|
5
5
|
Home-page: https://github.com/buildCodeZ/buildz
|
6
6
|
Author: Zzz
|
@@ -0,0 +1,50 @@
|
|
1
|
+
|
2
|
+
测试结果:
|
3
|
+
结论是:大概能达到纯显卡(75W,更高的没测)二分之一或三分之一的性能,显存不够的可以用用,主要是卷积层,卷积层cpu计算耗时很大,并且神经网络参数相对较少,线性层感觉cpu和显卡本来就差不太多
|
4
|
+
|
5
|
+
环境:
|
6
|
+
笔记本(游戏本)
|
7
|
+
显卡: RTX4060 8GB 75W残血版
|
8
|
+
CPU: i7-13700H
|
9
|
+
内存: 16GB
|
10
|
+
|
11
|
+
1)线性层测试:
|
12
|
+
模型:
|
13
|
+
float32,120层,每层的输入输出维度都是2000,数据量1.789GB,每个线性层后面加一个LeakyReLU激活函数
|
14
|
+
数据:
|
15
|
+
60个数据,每个数据2000维度,分成两个批量(每个批量30个数据),数据量0.000447GB
|
16
|
+
|
17
|
+
训练一次(两个批量的数据)平均耗时
|
18
|
+
纯显卡: 4.13秒
|
19
|
+
纯CPU耗时: 14.7秒
|
20
|
+
模型分层,轮流传显卡里计算: 3.67秒(因为显存足够,实际上等于纯显卡)
|
21
|
+
模型分层,显卡已经被使用2.3GB: 6.5秒
|
22
|
+
模型分层,显卡已经被使用3.9GB: 9.5秒
|
23
|
+
|
24
|
+
2)卷积测试:
|
25
|
+
模型:
|
26
|
+
float32,60层,卷积核输入输出channel都是3,卷积核大小5,数据量0.00005GB,每个线性层后面加一个LeakyReLU激活函数
|
27
|
+
数据:
|
28
|
+
20个数据,每个数据3个通道*512宽*512高,分成两个批量(每个批量10个数据),数据量0.059GB
|
29
|
+
|
30
|
+
训练一次(两个批量的数据)平均耗时
|
31
|
+
纯显卡: 1.52秒
|
32
|
+
纯CPU耗时: 41秒
|
33
|
+
模型分层,轮流传显卡里计算: 3.36秒(因为显存足够,实际上等于纯显卡)
|
34
|
+
纯显卡,显卡已经被使用3.9GB: 23秒(cuda+torch奇葩的地方出现了,猜测是在显存不够的时候,舍弃一些计算反向梯度要用的缓存,反向梯度的时候重新计算,导致时间开销增大很多)
|
35
|
+
模型分层,显卡已经被使用3.9GB: 3.3秒
|
36
|
+
|
37
|
+
|
38
|
+
|
39
|
+
3)卷积测试1(test_middle_conv1.py):
|
40
|
+
模型:
|
41
|
+
float32,22层网络,卷积核大小5,每层网络两层卷积(30通道到60通道一层卷积,60通道回到30通道一层卷积)再加一个LeakyReLU激活函数,数据量0.0074GB,每个线性层后面加一个LeakyReLU激活函数
|
42
|
+
数据:
|
43
|
+
6个数据,每个数据30个通道*512宽*512高,分成三个批量(每个批量2个数据),数据量0.059GB
|
44
|
+
|
45
|
+
训练一次(两个批量的数据)平均耗时
|
46
|
+
纯显卡: 3.73秒
|
47
|
+
纯CPU耗时: 44秒
|
48
|
+
模型分层,轮流传显卡里计算: 7.5秒(因为显存足够,实际上等于纯显卡)
|
49
|
+
纯显卡,显卡已经被使用3.9GB: 116秒
|
50
|
+
模型分层,显卡已经被使用3.9GB: 7.7秒
|
@@ -0,0 +1,118 @@
|
|
1
|
+
#
|
2
|
+
|
3
|
+
from buildz.gpuz.torch import CacheModel
|
4
|
+
import sys
|
5
|
+
import torch,time
|
6
|
+
from torch import nn,optim
|
7
|
+
from torch.utils.data import DataLoader, Dataset
|
8
|
+
cpu = torch.device('cpu')
|
9
|
+
cuda = cpu
|
10
|
+
if torch.cuda.is_available():
|
11
|
+
cuda = torch.device('cuda')
|
12
|
+
def sz(tensor):
|
13
|
+
return tensor.element_size()*tensor.nelement()
|
14
|
+
class Model(nn.Module):
|
15
|
+
def __init__(self, dims, num):
|
16
|
+
super().__init__()
|
17
|
+
#nets = [nn.Conv2d(3, 3, 5, padding=2) for i in range(num)]
|
18
|
+
nets=[]
|
19
|
+
for i in range(num):
|
20
|
+
nets.append(nn.Linear(dims,dims))
|
21
|
+
nets.append(nn.LeakyReLU())
|
22
|
+
#nets = [nn.Linear(dims,dims) for i in range(num)]
|
23
|
+
print(f"nets:{len(nets)}")
|
24
|
+
self.nets = nn.Sequential(*nets)
|
25
|
+
def forward(self, inputs):
|
26
|
+
return self.nets(inputs)
|
27
|
+
def size(self):
|
28
|
+
total = 0.0
|
29
|
+
for net in self.nets:
|
30
|
+
if not hasattr(net, "weight"):
|
31
|
+
continue
|
32
|
+
w = net.weight
|
33
|
+
b = net.bias
|
34
|
+
total+=sz(w)+sz(b)
|
35
|
+
return total
|
36
|
+
|
37
|
+
pass
|
38
|
+
class TestDataset(Dataset):
|
39
|
+
def __init__(self, n, dims):
|
40
|
+
self.n = n
|
41
|
+
self.dims = dims
|
42
|
+
self.datas = torch.rand(n, dims)
|
43
|
+
print(f"data size: {sz(self.datas)/1024/1024/1024} GB")
|
44
|
+
def __len__(self):
|
45
|
+
return self.n
|
46
|
+
def __getitem__(self, i):
|
47
|
+
return self.datas[i]
|
48
|
+
return torch.rand(self.dims)
|
49
|
+
|
50
|
+
pass
|
51
|
+
def fc_opt(net, opt):
|
52
|
+
#torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
|
53
|
+
opt.step()
|
54
|
+
def test():
|
55
|
+
nets=10
|
56
|
+
dims=2000
|
57
|
+
trains = 5
|
58
|
+
datas = 60
|
59
|
+
batch=30
|
60
|
+
lr=0.0001
|
61
|
+
win_size=3
|
62
|
+
num = 12
|
63
|
+
mds = [Model(dims, nets) for i in range(num)]
|
64
|
+
mds_sz = [md.size() for md in mds]
|
65
|
+
print(f"Model Size: {sum(mds_sz)/1024/1024/1024} GB")
|
66
|
+
opts =[optim.Adam(md.parameters(), lr=lr) for md in mds]
|
67
|
+
cuda=cpu
|
68
|
+
ds = TestDataset(datas, dims)
|
69
|
+
dl = DataLoader(ds, batch)
|
70
|
+
#return
|
71
|
+
loss_fn = torch.nn.MSELoss()
|
72
|
+
print("start train")
|
73
|
+
nets= []
|
74
|
+
for md in mds:
|
75
|
+
nets+=md.nets
|
76
|
+
gmodel = nn.Sequential(*nets)
|
77
|
+
gmodel = gmodel.to(cuda)
|
78
|
+
gopt = optim.Adam(gmodel.parameters(), lr=lr)
|
79
|
+
gmodel.train()
|
80
|
+
#with torch.no_grad():
|
81
|
+
for i in range(trains):
|
82
|
+
total_loss = 0
|
83
|
+
curr=time.time()
|
84
|
+
for dt in dl:
|
85
|
+
dt=dt.to(cuda)
|
86
|
+
gopt.zero_grad()
|
87
|
+
out = gmodel(dt)
|
88
|
+
loss = loss_fn(out, dt)
|
89
|
+
print(f"loss: {loss, type(loss)}")
|
90
|
+
loss.backward()
|
91
|
+
gopt.step()
|
92
|
+
total_loss+=loss.item()
|
93
|
+
sec = time.time()-curr
|
94
|
+
print("train:", i, "loss:", total_loss/len(dl), "time:", sec)
|
95
|
+
del gmodel,gopt
|
96
|
+
torch.cuda.empty_cache()
|
97
|
+
input("start middle:")
|
98
|
+
md = CacheModel(cuda, cpu, mds, opts, win_size, fc_opt)
|
99
|
+
md.nfc("train")
|
100
|
+
#with torch.no_grad():
|
101
|
+
for i in range(trains):
|
102
|
+
total_loss = 0
|
103
|
+
curr=time.time()
|
104
|
+
for dt in dl:
|
105
|
+
dt=dt.to(cuda)
|
106
|
+
[opt.zero_grad() for opt in opts]
|
107
|
+
out = md.do_forward(dt)
|
108
|
+
loss = loss_fn(out, dt)
|
109
|
+
print(f"loss: {loss, type(loss)}")
|
110
|
+
md.do_backward(lambda : loss.backward())
|
111
|
+
total_loss+=loss.item()
|
112
|
+
sec = time.time()-curr
|
113
|
+
print("train:", i, "loss:", total_loss/len(dl), "time:", sec)
|
114
|
+
|
115
|
+
|
116
|
+
|
117
|
+
pass
|
118
|
+
test()
|
@@ -0,0 +1,118 @@
|
|
1
|
+
#
|
2
|
+
|
3
|
+
from buildz.gpuz.torch import CacheModel
|
4
|
+
import sys
|
5
|
+
import torch,time
|
6
|
+
from torch import nn,optim
|
7
|
+
from torch.utils.data import DataLoader, Dataset
|
8
|
+
cpu = torch.device('cpu')
|
9
|
+
cuda = cpu
|
10
|
+
if torch.cuda.is_available():
|
11
|
+
cuda = torch.device('cuda')
|
12
|
+
def sz(tensor):
|
13
|
+
return tensor.element_size()*tensor.nelement()
|
14
|
+
class Model(nn.Module):
|
15
|
+
def __init__(self, dims, num):
|
16
|
+
super().__init__()
|
17
|
+
#nets = [nn.Conv2d(3, 3, 5, padding=2) for i in range(num)]
|
18
|
+
nets=[]
|
19
|
+
for i in range(num):
|
20
|
+
nets.append(nn.Conv2d(3, 3, 5, padding=2))
|
21
|
+
nets.append(nn.LeakyReLU())
|
22
|
+
#nets = [nn.Linear(dims,dims) for i in range(num)]
|
23
|
+
print(f"nets:{len(nets)}")
|
24
|
+
self.nets = nn.Sequential(*nets)
|
25
|
+
def forward(self, inputs):
|
26
|
+
return self.nets(inputs)
|
27
|
+
def size(self):
|
28
|
+
total = 0.0
|
29
|
+
for net in self.nets:
|
30
|
+
if not hasattr(net, "weight"):
|
31
|
+
continue
|
32
|
+
w = net.weight
|
33
|
+
b = net.bias
|
34
|
+
total+=sz(w)+sz(b)
|
35
|
+
return total
|
36
|
+
|
37
|
+
pass
|
38
|
+
class TestDataset(Dataset):
|
39
|
+
def __init__(self, n, dims):
|
40
|
+
self.n = n
|
41
|
+
self.dims = dims
|
42
|
+
self.datas = torch.rand(n, 3,dims,dims)
|
43
|
+
print(f"data size: {sz(self.datas)/1024/1024/1024} GB")
|
44
|
+
def __len__(self):
|
45
|
+
return self.n
|
46
|
+
def __getitem__(self, i):
|
47
|
+
return self.datas[i]
|
48
|
+
return torch.rand(self.dims)
|
49
|
+
|
50
|
+
pass
|
51
|
+
def fc_opt(net, opt):
|
52
|
+
#torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
|
53
|
+
opt.step()
|
54
|
+
def test():
|
55
|
+
nets=5
|
56
|
+
dims=512
|
57
|
+
trains = 5
|
58
|
+
datas = 20
|
59
|
+
batch=10
|
60
|
+
lr=0.0001
|
61
|
+
win_size=3
|
62
|
+
num = 12
|
63
|
+
mds = [Model(dims, nets) for i in range(num)]
|
64
|
+
mds_sz = [md.size() for md in mds]
|
65
|
+
print(f"Model Size: {sum(mds_sz)/1024/1024/1024} GB")
|
66
|
+
opts =[optim.Adam(md.parameters(), lr=lr) for md in mds]
|
67
|
+
#cuda=cpu
|
68
|
+
ds = TestDataset(datas, dims)
|
69
|
+
dl = DataLoader(ds, batch)
|
70
|
+
#return
|
71
|
+
loss_fn = torch.nn.MSELoss()
|
72
|
+
print("start train")
|
73
|
+
nets= []
|
74
|
+
for md in mds:
|
75
|
+
nets+=md.nets
|
76
|
+
gmodel = nn.Sequential(*nets)
|
77
|
+
gmodel = gmodel.to(cuda)
|
78
|
+
gopt = optim.Adam(gmodel.parameters(), lr=lr)
|
79
|
+
gmodel.train()
|
80
|
+
#with torch.no_grad():
|
81
|
+
for i in range(trains):
|
82
|
+
total_loss = 0
|
83
|
+
curr=time.time()
|
84
|
+
for dt in dl:
|
85
|
+
dt=dt.to(cuda)
|
86
|
+
gopt.zero_grad()
|
87
|
+
out = gmodel(dt)
|
88
|
+
loss = loss_fn(out, dt)
|
89
|
+
print(f"loss: {loss, type(loss)}")
|
90
|
+
loss.backward()
|
91
|
+
gopt.step()
|
92
|
+
total_loss+=loss.item()
|
93
|
+
sec = time.time()-curr
|
94
|
+
print("train:", i, "loss:", total_loss/len(dl), "time:", sec)
|
95
|
+
del gmodel,gopt
|
96
|
+
torch.cuda.empty_cache()
|
97
|
+
input("start middle:")
|
98
|
+
md = CacheModel(cuda, cpu, mds, opts, win_size, fc_opt)
|
99
|
+
md.nfc("train")
|
100
|
+
#with torch.no_grad():
|
101
|
+
for i in range(trains):
|
102
|
+
total_loss = 0
|
103
|
+
curr=time.time()
|
104
|
+
for dt in dl:
|
105
|
+
dt=dt.to(cuda)
|
106
|
+
[opt.zero_grad() for opt in opts]
|
107
|
+
out = md.do_forward(dt)
|
108
|
+
loss = loss_fn(out, dt)
|
109
|
+
print(f"loss: {loss, type(loss)}")
|
110
|
+
md.do_backward(lambda : loss.backward())
|
111
|
+
total_loss+=loss.item()
|
112
|
+
sec = time.time()-curr
|
113
|
+
print("train:", i, "loss:", total_loss/len(dl), "time:", sec)
|
114
|
+
|
115
|
+
|
116
|
+
|
117
|
+
pass
|
118
|
+
test()
|
@@ -0,0 +1,120 @@
|
|
1
|
+
#
|
2
|
+
|
3
|
+
import sys
|
4
|
+
from buildz.gpuz.torch import CacheModel
|
5
|
+
import torch,time
|
6
|
+
from torch import nn,optim
|
7
|
+
from torch.utils.data import DataLoader, Dataset
|
8
|
+
cpu = torch.device('cpu')
|
9
|
+
cuda = cpu
|
10
|
+
if torch.cuda.is_available():
|
11
|
+
cuda = torch.device('cuda')
|
12
|
+
def sz(tensor):
|
13
|
+
return tensor.element_size()*tensor.nelement()
|
14
|
+
class ConvModel(nn.Module):
|
15
|
+
def __init__(self, dims, num, ins_channels, middle_channels):
|
16
|
+
super().__init__()
|
17
|
+
nets=[]
|
18
|
+
for i in range(num):
|
19
|
+
nets.append(nn.Conv2d(ins_channels, middle_channels, 5, padding=2))
|
20
|
+
nets.append(nn.Conv2d(middle_channels, ins_channels, 5, padding=2))
|
21
|
+
nets.append(nn.LeakyReLU())
|
22
|
+
print(f"nets:{len(nets)}")
|
23
|
+
self.nets = nn.Sequential(*nets)
|
24
|
+
def forward(self, inputs):
|
25
|
+
return self.nets(inputs)
|
26
|
+
def size(self):
|
27
|
+
total = 0.0
|
28
|
+
for net in self.nets:
|
29
|
+
if not hasattr(net, "weight"):
|
30
|
+
continue
|
31
|
+
w = net.weight
|
32
|
+
b = net.bias
|
33
|
+
total+=sz(w)+sz(b)
|
34
|
+
return total
|
35
|
+
|
36
|
+
pass
|
37
|
+
class TestDataset(Dataset):
|
38
|
+
def __init__(self, n, dims, channels):
|
39
|
+
self.n = n
|
40
|
+
self.dims = dims
|
41
|
+
self.datas = torch.rand(n, channels, dims,dims)
|
42
|
+
print(f"data size: {sz(self.datas)/1024/1024/1024} GB")
|
43
|
+
def __len__(self):
|
44
|
+
return self.n
|
45
|
+
def __getitem__(self, i):
|
46
|
+
return self.datas[i]
|
47
|
+
return torch.rand(self.dims)
|
48
|
+
|
49
|
+
pass
|
50
|
+
def fc_opt(net, opt):
|
51
|
+
torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
|
52
|
+
opt.step()
|
53
|
+
def test():
|
54
|
+
nets=2
|
55
|
+
channels=30
|
56
|
+
middle_channels = 60
|
57
|
+
dims=512
|
58
|
+
trains = 5
|
59
|
+
datas = 6
|
60
|
+
batch=2
|
61
|
+
lr=0.0001
|
62
|
+
win_size=3
|
63
|
+
num_conv = 11
|
64
|
+
num_ln = 3
|
65
|
+
mds = [ConvModel(dims, nets, channels, middle_channels) for i in range(num_conv)]
|
66
|
+
mds_sz = [md.size() for md in mds]
|
67
|
+
print(f"Model Size: {sum(mds_sz)/1024/1024/1024} GB")
|
68
|
+
opts =[optim.Adam(md.parameters(), lr=lr) for md in mds]
|
69
|
+
#cuda=cpu
|
70
|
+
ds = TestDataset(datas, dims, channels)
|
71
|
+
dl = DataLoader(ds, batch)
|
72
|
+
#return
|
73
|
+
loss_fn = torch.nn.MSELoss()
|
74
|
+
print("start train")
|
75
|
+
# nets= []
|
76
|
+
# for md in mds:
|
77
|
+
# nets+=md.nets
|
78
|
+
# gmodel = nn.Sequential(*nets)
|
79
|
+
# gmodel = gmodel.to(cuda)
|
80
|
+
# gopt = optim.Adam(gmodel.parameters(), lr=lr)
|
81
|
+
# gmodel.train()
|
82
|
+
# #with torch.no_grad():
|
83
|
+
# for i in range(trains):
|
84
|
+
# total_loss = 0
|
85
|
+
# curr=time.time()
|
86
|
+
# for dt in dl:
|
87
|
+
# dt=dt.to(cuda)
|
88
|
+
# gopt.zero_grad()
|
89
|
+
# out = gmodel(dt)
|
90
|
+
# loss = loss_fn(out, dt)
|
91
|
+
# print(f"loss: {loss, type(loss)}")
|
92
|
+
# loss.backward()
|
93
|
+
# gopt.step()
|
94
|
+
# total_loss+=loss.item()
|
95
|
+
# sec = time.time()-curr
|
96
|
+
# print("train:", i, "loss:", total_loss/len(dl), "time:", sec)
|
97
|
+
# del gmodel,gopt
|
98
|
+
# torch.cuda.empty_cache()
|
99
|
+
# input("start middle:")
|
100
|
+
md = CacheModel(cuda, cpu, mds, opts, win_size, fc_opt)
|
101
|
+
md.nfc("train")
|
102
|
+
#with torch.no_grad():
|
103
|
+
for i in range(trains):
|
104
|
+
total_loss = 0
|
105
|
+
curr=time.time()
|
106
|
+
for dt in dl:
|
107
|
+
dt=dt.to(cuda)
|
108
|
+
[opt.zero_grad() for opt in opts] #这一步不必也扔gpu里计算吧,直接这样写了
|
109
|
+
out = md.do_forward(dt)
|
110
|
+
loss = loss_fn(out, dt)
|
111
|
+
print(f"loss: {loss, type(loss)}")
|
112
|
+
md.do_backward(lambda : loss.backward())
|
113
|
+
total_loss+=loss.item()
|
114
|
+
sec = time.time()-curr
|
115
|
+
print("train:", i, "loss:", total_loss/len(dl), "time:", sec)
|
116
|
+
|
117
|
+
|
118
|
+
|
119
|
+
pass
|
120
|
+
test()
|
@@ -0,0 +1,181 @@
|
|
1
|
+
#
|
2
|
+
import torch
|
3
|
+
from torch import nn
|
4
|
+
import threading as th
|
5
|
+
class CacheModel:
|
6
|
+
'''
|
7
|
+
用处:显存不够,同时模型可以拆成多个小模型线性连接的时候,可以用本代码,本代码会在forward和backward的时候自动把小的多层网络轮流放到gpu里计算,计算完再转cpu里
|
8
|
+
需要使用者手动将多层网络拆分成多个更小一点的多层网络
|
9
|
+
测试代码见test_moddle_conv1.py
|
10
|
+
大概有纯显卡二分之一到三分之一的性能,起码比cpu好,尤其是进行卷积计算,比cpu好太多
|
11
|
+
代码例子:
|
12
|
+
|
13
|
+
from buildz.gpuz.torch import CacheModel
|
14
|
+
from torch import nn,optim
|
15
|
+
model1 = nn.Sequential(*[nn.Linear(1024,1024) for i in range(10)])
|
16
|
+
model2 = nn.Sequential(*[nn.Linear(1024,1024) for i in range(10)])
|
17
|
+
model3 = nn.Sequential(*[nn.Linear(1024,1024) for i in range(10)])
|
18
|
+
opt1 = optim.Adam(model1.parameters(), lr=0.001)
|
19
|
+
opt2 = optim.Adam(model2.parameters(), lr=0.001)
|
20
|
+
opt3 = optim.Adam(model3.parameters(), lr=0.001)
|
21
|
+
models = [model1,model2,model3]
|
22
|
+
opts = [opt1,opt2,opt3]
|
23
|
+
loss_fn = torch.nn.MSELoss()
|
24
|
+
def opt_step(net, opt):
|
25
|
+
# 如果模型只是用来测试,不做训练,可以不传该函数,同时opts传入空就可以
|
26
|
+
# 对模型的一些其他优化,可以写可以不写,主要是调用opt.step()进行当前小模型的模型训练
|
27
|
+
# torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=1.0)
|
28
|
+
opt.step()
|
29
|
+
cmodel = CacheModel(torch.device('cuda'), torch.device('cpu'),models,opts,3,opt_step)
|
30
|
+
|
31
|
+
# 训练:
|
32
|
+
[md.train() for md in models]
|
33
|
+
for inputs,targets in dataloader: #批量数据集,这个自己实现
|
34
|
+
[opt.zero_grad() for opt in opts]
|
35
|
+
outs = cmodel.do_forward(inputs)
|
36
|
+
loss = loss_fn(outs, targets)
|
37
|
+
cmodel.do_backward(lambda: loss.backward())
|
38
|
+
# opt.step()在do_backward里会自动调用
|
39
|
+
print(loss.item())
|
40
|
+
|
41
|
+
# 测试:
|
42
|
+
with torch.no_grad():
|
43
|
+
outputs = cmodel.do_forward(inputs)
|
44
|
+
print(outputs)
|
45
|
+
'''
|
46
|
+
def __init__(self, gdv, cdv, nets, opts, win_size = 1, backward_deal = None):
|
47
|
+
'''
|
48
|
+
gdv: 显卡设备,应该传入torch.device('cuda')
|
49
|
+
cdv: CPU设备,应该传入torch.device('cpu')
|
50
|
+
gdv和cdv如果都传入torch.device('cpu'),则是完全CPU存储和计算
|
51
|
+
如果都传入torch.device('cuda'),则是完全显卡存储和计算
|
52
|
+
'''
|
53
|
+
self.gdv = gdv
|
54
|
+
self.cdv = cdv
|
55
|
+
# event和condition未使用,本来打算做成多线程,但python线程一次只能有一个在运行,抢占有点严重,待修改
|
56
|
+
self.event = th.Event()
|
57
|
+
self.condition=th.Condition()
|
58
|
+
[net.register_full_backward_hook(self.hook_backward) for net in nets]
|
59
|
+
self.nets = nets
|
60
|
+
self.size = len(nets)
|
61
|
+
self.ctxs = [[] for i in range(self.size)]
|
62
|
+
self.size_1 = self.size-1
|
63
|
+
self.opts = opts
|
64
|
+
self.win_size = win_size
|
65
|
+
self.backward_deal = backward_deal
|
66
|
+
self.base = -1
|
67
|
+
self.curr = 0
|
68
|
+
self.last = -1
|
69
|
+
self.running = False
|
70
|
+
def hook_pack(self, dt):
|
71
|
+
# forward时候为了后面计算梯度存的缓存,放到列表里方便转cpu和gpu
|
72
|
+
self.ctxs[self.curr].append(dt)
|
73
|
+
return len(self.ctxs[self.curr])-1
|
74
|
+
def hook_unpack(self, x):
|
75
|
+
dt = self.ctxs[self.curr][x]
|
76
|
+
return dt
|
77
|
+
def nfc(self, fc, *a,**b):
|
78
|
+
[getattr(net, fc)(*a,**b) for net in self.nets]
|
79
|
+
def reset(self):
|
80
|
+
for i in range(self.base,self.last+1):
|
81
|
+
self.nets[i].to(self.cdv)
|
82
|
+
self.ctxs_to(i, self.cdv)
|
83
|
+
self.base,self.last=-1,-1
|
84
|
+
def ctxs_to(self, i, dv):
|
85
|
+
if dv is None:
|
86
|
+
self.ctxs[i] = []
|
87
|
+
else:
|
88
|
+
self.ctxs[i] = [k.to(dv) for k in self.ctxs[i]]
|
89
|
+
def copy_backward(self):
|
90
|
+
if self.last<self.curr:
|
91
|
+
self.reset()
|
92
|
+
if self.base==0:
|
93
|
+
return False
|
94
|
+
if self.last<0:
|
95
|
+
self.nets[self.size_1].to(self.gdv)
|
96
|
+
self.last = self.size_1
|
97
|
+
self.base = self.size_1
|
98
|
+
diff = self.win_size-(self.last-self.base+1)
|
99
|
+
diff = min(diff, self.base)
|
100
|
+
for i in range(diff):
|
101
|
+
self.nets[self.base-1].to(self.gdv)
|
102
|
+
self.ctxs_to(self.base-1, self.gdv)
|
103
|
+
self.base-=1
|
104
|
+
rels = self.last-self.curr
|
105
|
+
for i in range(rels):
|
106
|
+
self.nets[self.last].to(self.cdv)
|
107
|
+
self.ctxs_to(self.last, None)
|
108
|
+
self.last-=1
|
109
|
+
return True
|
110
|
+
def copy_forward(self):
|
111
|
+
if self.base>self.curr:
|
112
|
+
self.reset()
|
113
|
+
if self.last==self.size_1:
|
114
|
+
return False
|
115
|
+
if self.base<0:
|
116
|
+
self.nets[0].to(self.gdv)
|
117
|
+
self.base=0
|
118
|
+
self.last=0
|
119
|
+
diff = self.win_size-(self.last-self.base+1)
|
120
|
+
diff = min(diff, self.size_1-self.last)
|
121
|
+
for i in range(diff):
|
122
|
+
self.nets[self.last+1].to(self.gdv)
|
123
|
+
self.last+=1
|
124
|
+
rels = self.curr-self.base
|
125
|
+
for i in range(rels):
|
126
|
+
self.nets[self.base].to(self.cdv)
|
127
|
+
self.ctxs_to(self.base, self.cdv)
|
128
|
+
self.base+=1
|
129
|
+
return True
|
130
|
+
def th_copy_forward(self):
|
131
|
+
while self.copy_forward():
|
132
|
+
self.event.set()
|
133
|
+
self.running = False
|
134
|
+
def th_copy_backward(self):
|
135
|
+
while self.copy_backward():
|
136
|
+
self.event.set()
|
137
|
+
def wait(self):
|
138
|
+
with self.condition:
|
139
|
+
self.condition.notify()
|
140
|
+
self.event.wait()
|
141
|
+
def do_forward(self, inputs):
|
142
|
+
# while self.running:
|
143
|
+
# import time
|
144
|
+
# time.sleep(0.01)
|
145
|
+
# t = th.Thread(target=self.th_copy_forward, daemon=True)
|
146
|
+
# self.running = True
|
147
|
+
# t.start()
|
148
|
+
self.ctxs = [[] for i in range(self.size)]
|
149
|
+
with torch.autograd.graph.saved_tensors_hooks(self.hook_pack, self.hook_unpack):
|
150
|
+
rst = self.forward(inputs)
|
151
|
+
return rst
|
152
|
+
def forward(self, inputs):
|
153
|
+
for self.curr in range(len(self.nets)):
|
154
|
+
while not (self.base<=self.curr<=self.last):
|
155
|
+
self.copy_forward()
|
156
|
+
#self.wait()
|
157
|
+
inputs = self.nets[self.curr](inputs)
|
158
|
+
return inputs
|
159
|
+
def wrap_backward_deal(self, i):
|
160
|
+
if self.backward_deal is None:
|
161
|
+
return
|
162
|
+
try:
|
163
|
+
self.backward_deal(self.nets[i], self.opts[i])
|
164
|
+
finally:
|
165
|
+
pass
|
166
|
+
def hook_backward(self, model, ins, outs):
|
167
|
+
if self.backward_curr<self.size_1:
|
168
|
+
self.wrap_backward_deal(self.backward_curr+1)
|
169
|
+
self.curr = self.backward_curr
|
170
|
+
while not (self.base<=self.backward_curr<=self.last):
|
171
|
+
#self.wait()
|
172
|
+
self.copy_backward()
|
173
|
+
self.backward_curr-=1
|
174
|
+
def do_backward(self, fc):
|
175
|
+
self.backward_curr=self.curr
|
176
|
+
# t = th.Thread(target=self.th_copy_backward,daemon=True)
|
177
|
+
# t.start()
|
178
|
+
fc()
|
179
|
+
self.wrap_backward_deal(0)
|
180
|
+
|
181
|
+
pass
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: buildz
|
3
|
-
Version: 0.6.
|
3
|
+
Version: 0.6.60
|
4
4
|
Summary: 配置读写(基于json格式进行简化)、ioc、以及其他工具代码。a json-base file format's read and write code by python, and codes to read and product object from configure file in such format(ioc), and other tool codes
|
5
5
|
Home-page: https://github.com/buildCodeZ/buildz
|
6
6
|
Author: Zzz
|
@@ -108,6 +108,12 @@ buildz/fz/dirz.py
|
|
108
108
|
buildz/fz/fhs.py
|
109
109
|
buildz/fz/fio.py
|
110
110
|
buildz/fz/lsf.py
|
111
|
+
buildz/gpuz/test/report.txt
|
112
|
+
buildz/gpuz/test/test_middle.py
|
113
|
+
buildz/gpuz/test/test_middle_conv.py
|
114
|
+
buildz/gpuz/test/test_middle_conv1.py
|
115
|
+
buildz/gpuz/torch/__init__.py
|
116
|
+
buildz/gpuz/torch/middlez.py
|
111
117
|
buildz/html/__init__.py
|
112
118
|
buildz/html/xml.py
|
113
119
|
buildz/html/test/demo.html
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|