clarifai 10.0.1__py3-none-any.whl → 10.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/client/app.py +23 -43
- clarifai/client/base.py +46 -4
- clarifai/client/dataset.py +85 -33
- clarifai/client/input.py +35 -7
- clarifai/client/model.py +192 -11
- clarifai/client/module.py +8 -6
- clarifai/client/runner.py +3 -1
- clarifai/client/search.py +6 -3
- clarifai/client/user.py +14 -12
- clarifai/client/workflow.py +8 -5
- clarifai/datasets/upload/features.py +3 -0
- clarifai/datasets/upload/image.py +57 -26
- clarifai/datasets/upload/loaders/README.md +3 -4
- clarifai/datasets/upload/loaders/xview_detection.py +9 -5
- clarifai/datasets/upload/utils.py +23 -7
- clarifai/models/model_serving/README.md +113 -121
- clarifai/models/model_serving/__init__.py +2 -0
- clarifai/models/model_serving/cli/_utils.py +53 -0
- clarifai/models/model_serving/cli/base.py +14 -0
- clarifai/models/model_serving/cli/build.py +79 -0
- clarifai/models/model_serving/cli/clarifai_clis.py +33 -0
- clarifai/models/model_serving/cli/create.py +171 -0
- clarifai/models/model_serving/cli/example_cli.py +34 -0
- clarifai/models/model_serving/cli/login.py +26 -0
- clarifai/models/model_serving/cli/upload.py +182 -0
- clarifai/models/model_serving/constants.py +20 -0
- clarifai/models/model_serving/docs/cli.md +150 -0
- clarifai/models/model_serving/docs/concepts.md +229 -0
- clarifai/models/model_serving/docs/dependencies.md +1 -1
- clarifai/models/model_serving/docs/inference_parameters.md +112 -107
- clarifai/models/model_serving/docs/model_types.md +16 -17
- clarifai/models/model_serving/model_config/__init__.py +4 -2
- clarifai/models/model_serving/model_config/base.py +369 -0
- clarifai/models/model_serving/model_config/config.py +219 -224
- clarifai/models/model_serving/model_config/inference_parameter.py +5 -0
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +25 -24
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +19 -18
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +20 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +19 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +19 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +22 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +32 -28
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +19 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +19 -18
- clarifai/models/model_serving/{models → model_config}/output.py +8 -0
- clarifai/models/model_serving/model_config/triton/__init__.py +14 -0
- clarifai/models/model_serving/model_config/{serializer.py → triton/serializer.py} +3 -1
- clarifai/models/model_serving/model_config/triton/triton_config.py +182 -0
- clarifai/models/model_serving/{models/model_types.py → model_config/triton/wrappers.py} +4 -4
- clarifai/models/model_serving/{models → repo_build}/__init__.py +2 -0
- clarifai/models/model_serving/repo_build/build.py +198 -0
- clarifai/models/model_serving/repo_build/static_files/_requirements.txt +2 -0
- clarifai/models/model_serving/repo_build/static_files/base_test.py +169 -0
- clarifai/models/model_serving/repo_build/static_files/inference.py +26 -0
- clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +25 -0
- clarifai/models/model_serving/repo_build/static_files/test.py +40 -0
- clarifai/models/model_serving/{models/pb_model.py → repo_build/static_files/triton/model.py} +15 -14
- clarifai/models/model_serving/utils.py +21 -0
- clarifai/rag/rag.py +67 -23
- clarifai/rag/utils.py +21 -5
- clarifai/utils/evaluation/__init__.py +427 -0
- clarifai/utils/evaluation/helpers.py +522 -0
- clarifai/utils/logging.py +7 -0
- clarifai/utils/model_train.py +3 -1
- clarifai/versions.py +1 -1
- {clarifai-10.0.1.dist-info → clarifai-10.1.1.dist-info}/METADATA +58 -10
- clarifai-10.1.1.dist-info/RECORD +115 -0
- clarifai-10.1.1.dist-info/entry_points.txt +2 -0
- clarifai/datasets/upload/loaders/coco_segmentation.py +0 -98
- clarifai/models/model_serving/cli/deploy_cli.py +0 -123
- clarifai/models/model_serving/cli/model_zip.py +0 -61
- clarifai/models/model_serving/cli/repository.py +0 -89
- clarifai/models/model_serving/docs/custom_config.md +0 -33
- clarifai/models/model_serving/docs/output.md +0 -28
- clarifai/models/model_serving/models/default_test.py +0 -281
- clarifai/models/model_serving/models/inference.py +0 -50
- clarifai/models/model_serving/models/test.py +0 -64
- clarifai/models/model_serving/pb_model_repository.py +0 -108
- clarifai-10.0.1.dist-info/RECORD +0 -103
- clarifai-10.0.1.dist-info/entry_points.txt +0 -4
- {clarifai-10.0.1.dist-info → clarifai-10.1.1.dist-info}/LICENSE +0 -0
- {clarifai-10.0.1.dist-info → clarifai-10.1.1.dist-info}/WHEEL +0 -0
- {clarifai-10.0.1.dist-info → clarifai-10.1.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,115 @@
|
|
1
|
+
clarifai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
clarifai/errors.py,sha256=RwzTajwds51wLD0MVlMC5kcpBnzRpreDLlazPSBZxrg,2605
|
4
|
+
clarifai/versions.py,sha256=bk6R6cGyCh8H_XRfcozmi8J8jsQ_tIACnIy4a-o0gbI,186
|
5
|
+
clarifai/client/__init__.py,sha256=xI1U0l5AZdRThvQAXCLsd9axxyFzXXJ22m8LHqVjQRU,662
|
6
|
+
clarifai/client/app.py,sha256=_wDiHrMVhtofVHLZ2-4JHk_WoGCETPvHFe8ZQ3rRjFE,26700
|
7
|
+
clarifai/client/base.py,sha256=4XQU_cPyo8cCGUcZarCBXra_IVdT1KZGt_5c3OtdKig,6489
|
8
|
+
clarifai/client/dataset.py,sha256=hA7fmUcCPOE_Of1pYKqX_9e5pEdmTkODaZaC9adXMJ8,23820
|
9
|
+
clarifai/client/input.py,sha256=GZ7JWhS79GTQOqJ8KvexqLfWCyR-ANHACzciKE-wWxI,39769
|
10
|
+
clarifai/client/lister.py,sha256=03KGMvs5RVyYqxLsSrWhNc34I8kiF1Ph0NeyEwu7nMU,2082
|
11
|
+
clarifai/client/model.py,sha256=NoCfJ9vU9NvhXBszEV1Bi0O9xkNVzjWmmP6SFi8ZG1g,32311
|
12
|
+
clarifai/client/module.py,sha256=BunlC4Uv7TX9JaZ0Kciwy_1_Mtg2GPZV5OLLZZcGz6I,3977
|
13
|
+
clarifai/client/runner.py,sha256=oZkydj1Lfxn6pVx4_-CLzyaneE-dHvBIGL44usW45gA,9867
|
14
|
+
clarifai/client/search.py,sha256=XadJjdV1PqM288LcU6DSnKmaiuVi7kzA5Tt1q0mS_Js,10767
|
15
|
+
clarifai/client/user.py,sha256=QYngaFYINw-U-3FUwyrN2rFbwGyaHavuCXMGqV34pWA,10139
|
16
|
+
clarifai/client/workflow.py,sha256=oALMJfdgTqiilfpDT3H_nepqX9mexLu-uWV0NvtxUs0,10291
|
17
|
+
clarifai/client/auth/__init__.py,sha256=7EwR0NrozkAUwpUnCsqXvE_p0wqx_SelXlSpKShKJK0,136
|
18
|
+
clarifai/client/auth/helper.py,sha256=3lCKo24ZIOlcSh50juJh3ZDagOo_pxEKyoPjWUokYoA,13450
|
19
|
+
clarifai/client/auth/register.py,sha256=2CMdBsoVLoTfjyksE6j7BM2tiEc73WKYvxnwDDgNn1k,536
|
20
|
+
clarifai/client/auth/stub.py,sha256=KIzJZ8aRB1RzXJeWHDAx19HNdBsblPPHwYLfAkgI3rY,3779
|
21
|
+
clarifai/constants/dataset.py,sha256=2QlHF0NMXfAdFlOpEzkNYVZcxSL-dIxq-ZsY_LsIPBA,499
|
22
|
+
clarifai/constants/model.py,sha256=LsMkLVkuBpfS4j4yDW9M4O7HxzRpIuSo9qU5T8Wg2Co,217
|
23
|
+
clarifai/constants/rag.py,sha256=WcHwToUVIK9ItAhDefaSohQHCLNeR55PSjZ0BFnoZ3U,28
|
24
|
+
clarifai/constants/search.py,sha256=_g3S-JEvuygiFfMVK3cl4Ry9erZpt8Zo4ilXL2i3DAE,52
|
25
|
+
clarifai/constants/workflow.py,sha256=cECq1xdvf44MCdtK2AbkiuuwhyL-6OWZdQfYbsLKy_o,33
|
26
|
+
clarifai/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
27
|
+
clarifai/datasets/export/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
28
|
+
clarifai/datasets/export/inputs_annotations.py,sha256=z7kmU9K5m9F5u3iEyCnuKk8Bb97kqGaixm8vJZYT554,9325
|
29
|
+
clarifai/datasets/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
30
|
+
clarifai/datasets/upload/base.py,sha256=IP4sdBRfThk2l0W1rDWciFrAJnKwVsM-gu4zEslJ2_E,2198
|
31
|
+
clarifai/datasets/upload/features.py,sha256=KeVxO36WrL3uqWCN_-aex1k28C5ZRTm6G8SmTtus6KA,1571
|
32
|
+
clarifai/datasets/upload/image.py,sha256=Dlt0RM9qWSi4NcbVM1EjS1sp8zfIO3xWZS6TSSLAbVY,7481
|
33
|
+
clarifai/datasets/upload/text.py,sha256=ek29V18x5LqmHqc-nmAljQcud9uRjZx8IV_lDX78zsY,1980
|
34
|
+
clarifai/datasets/upload/utils.py,sha256=h7mtN9FZXhQQbf47EXczgb-NTY2uOE9AJlE9u4-hDwI,9627
|
35
|
+
clarifai/datasets/upload/loaders/README.md,sha256=aNRutSCTzLp2ruIZx74ZkN5AxpzwKOxMa7OzabnKpwg,2980
|
36
|
+
clarifai/datasets/upload/loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
37
|
+
clarifai/datasets/upload/loaders/coco_captions.py,sha256=t-IaIXukDk1mFdeeqdwe0hLrBLuaF-cZWl2aumGUAls,1297
|
38
|
+
clarifai/datasets/upload/loaders/coco_detection.py,sha256=dBYl2a1D7e-N1heXbFK0bImJAuq_lPQ8nxZMa1zq-Ts,2612
|
39
|
+
clarifai/datasets/upload/loaders/imagenet_classification.py,sha256=LuylazxpI5V8fAPGCUxDirGpYMfxzRxix-MEWaCvwxI,1895
|
40
|
+
clarifai/datasets/upload/loaders/xview_detection.py,sha256=hk8cZdYZimm4KOaZvBjYcC6ikURZMn51xmn7pXZT3HE,6052
|
41
|
+
clarifai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
42
|
+
clarifai/models/api.py,sha256=d3FQQlG0mNDLrfEvchqaVcq4Tgb_TqryNnJtwp3c7sE,10961
|
43
|
+
clarifai/models/model_serving/README.md,sha256=Ln8hsyE38J3yiLZruKHjU_hdq9CjzzbDUAO28Xyw1dQ,4060
|
44
|
+
clarifai/models/model_serving/__init__.py,sha256=78fiK9LvdGvpMxICmZWqSIyS6BFATjW2s5R6_GgtbPA,645
|
45
|
+
clarifai/models/model_serving/constants.py,sha256=uoi8TqEFkdsHhSZu90HOO3R0BmPC3G0z9qA5ER-5H7w,688
|
46
|
+
clarifai/models/model_serving/utils.py,sha256=MXeOHsNHiwx9qsRoX-FzBO2Tmbgo_IVwTf3EUmgdtSQ,524
|
47
|
+
clarifai/models/model_serving/cli/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
48
|
+
clarifai/models/model_serving/cli/_utils.py,sha256=oDd885kwX7u5vf-8dssJFyrR3lEof8x4BXt32egaoKA,1722
|
49
|
+
clarifai/models/model_serving/cli/base.py,sha256=k4ARNU1koNzGAi9ach6Vpk7hpISZySiYHyKjkBLuHLg,283
|
50
|
+
clarifai/models/model_serving/cli/build.py,sha256=Bfa-PuLIGcreiBr_72XKqCS_IlVJdzJudZkINmX082Y,2774
|
51
|
+
clarifai/models/model_serving/cli/clarifai_clis.py,sha256=sGDDj7MrlU3goWLQm4H9dCf4lPD2Ojx50_jdIoxb5QM,663
|
52
|
+
clarifai/models/model_serving/cli/create.py,sha256=wtKcVi8XSPN-Fx0RrSUxEwH1hm5TbZ_FrCEMIS9yszM,5598
|
53
|
+
clarifai/models/model_serving/cli/example_cli.py,sha256=tCm0J4EI0kuuSRhEiPTuraSA-bUYwtEFEHcL1eOXzRI,1039
|
54
|
+
clarifai/models/model_serving/cli/login.py,sha256=TYRQALJZUhNvtx2VcChO0y41YXs8-yP9BrShYb9tcOM,743
|
55
|
+
clarifai/models/model_serving/cli/upload.py,sha256=8wYviCTLZYjnXhGykGlm0HhjBd_x5PKp7IKiB8BeOGc,6871
|
56
|
+
clarifai/models/model_serving/docs/cli.md,sha256=AM45FZag3520ri4Terb0t7_MmLTs7gjHXAf7TYVZjZk,3942
|
57
|
+
clarifai/models/model_serving/docs/concepts.md,sha256=ppQADibKQInf9JpfcH7wIpcMndTZ3618or5yzMhGNOE,9376
|
58
|
+
clarifai/models/model_serving/docs/dependencies.md,sha256=apwg_IxDBzovtQYXRpWMU9pUqdf0VaS10yMVOYYXhoc,728
|
59
|
+
clarifai/models/model_serving/docs/inference_parameters.md,sha256=EFBQs3OGQNH512zoLJKMfFD6WXE_Tzt_Uvts877VvpQ,4111
|
60
|
+
clarifai/models/model_serving/docs/model_types.md,sha256=3sALugeBTMspEnlPNWXI8xtWCxjMDQYjrAji_jgqHVo,1013
|
61
|
+
clarifai/models/model_serving/model_config/__init__.py,sha256=MLnCl4U2UlL8hkvKbKifFX2nKRjVN63687-gxiKf8g4,734
|
62
|
+
clarifai/models/model_serving/model_config/base.py,sha256=Jow6cFvREtWRaaXw1hobWJks0uYsOi9oL973ZPEfIkk,14636
|
63
|
+
clarifai/models/model_serving/model_config/config.py,sha256=EWkPcui370QEYJAjlzuLupLlaZF2BgFbK0Jhx_JDHnk,10188
|
64
|
+
clarifai/models/model_serving/model_config/inference_parameter.py,sha256=fDPRkwsntaGZWQWOiCW8x0tcyHPeSCYZwBZoZb2oBzw,3924
|
65
|
+
clarifai/models/model_serving/model_config/output.py,sha256=uyXY-B9mmoe8lizTpYEBRYI1KDNQh3ihEiEB4Ne65uc,4634
|
66
|
+
clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml,sha256=4wFQ2R8PiJrXR_8AEgUDD-22gY9sK93y9r68mSOOVnw,541
|
67
|
+
clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml,sha256=0hicyQM-R2Za62RaBexdNCkHBDdacwMRVAL8Yk_sVzs,421
|
68
|
+
clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml,sha256=MEnVsO3-SAOFSW7-b0BOSxgUNxdhXfmE98hXstBt104,395
|
69
|
+
clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml,sha256=FPO9ic0R_mcFa3nIGon9z3negy1q6LsPRNmJ-wqGhyw,383
|
70
|
+
clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml,sha256=7u_0kdiR2iEuXTKHtErUzZZ8ghUdep-RuWmJd9i8BdY,371
|
71
|
+
clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml,sha256=UDq-VtnnnhuI7NCJOYM19kFvcMS0aOvDDMSblPk5iYY,468
|
72
|
+
clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml,sha256=cJsalUTzXclXpgzH9CutpWQqseJNg9FrI7WjU3wpfuQ,852
|
73
|
+
clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml,sha256=OQYdrY81rD3WNooHRkOiQASvL3XfGG9GGzT61jEsrT8,406
|
74
|
+
clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml,sha256=mQLwA1JnnvWZwp26lVxzwfhp1GS7hH9yHh7mtOUt9rY,474
|
75
|
+
clarifai/models/model_serving/model_config/triton/__init__.py,sha256=uJUjpRauhVp6_9sN5DRQi7bwIKEtHPKyQqcCVj6Aj2g,719
|
76
|
+
clarifai/models/model_serving/model_config/triton/serializer.py,sha256=eYwXfaJkeXMaiQq_EDG4vWOCc1CKfnC_U6dSp2Urak0,4278
|
77
|
+
clarifai/models/model_serving/model_config/triton/triton_config.py,sha256=mDZafUByvEgM1vd0QZL8nM-cOCqeR-06iOC2T6x8hr4,4696
|
78
|
+
clarifai/models/model_serving/model_config/triton/wrappers.py,sha256=-O8t2AEJXvqJlUNtKtr8CUlxLjheV2GfBtM0sB_B1v0,8660
|
79
|
+
clarifai/models/model_serving/repo_build/__init__.py,sha256=jFb0RNG4Jh63TH35_Urv0EyNXVMW8FEC2NVHXhlbvqg,673
|
80
|
+
clarifai/models/model_serving/repo_build/build.py,sha256=IlJTjt5YI1alAGv1Fw3kPZeh3yqi45R20rKbWN9vV1s,7195
|
81
|
+
clarifai/models/model_serving/repo_build/static_files/_requirements.txt,sha256=lIXMfxC4BP6QA5hraObPOwUS3PK9F2mA0Gf8KvlijQE,34
|
82
|
+
clarifai/models/model_serving/repo_build/static_files/base_test.py,sha256=wuwoXk37bgDaLmE-h4KfMoz0Qvr6B-InLzSORYzwF3A,6780
|
83
|
+
clarifai/models/model_serving/repo_build/static_files/inference.py,sha256=TejkXZw43mcZD-M9TkfuqMuABz_cliJgf53_Teodtf0,721
|
84
|
+
clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml,sha256=VOFSSb7D_CgRRcqi-plaCH-6hoFO8NAGDNXVSOJGylo,678
|
85
|
+
clarifai/models/model_serving/repo_build/static_files/test.py,sha256=GunBqWgTyo0aF5W9ckKz55tGS-wkL9S9TRfytIjB7Eo,1505
|
86
|
+
clarifai/models/model_serving/repo_build/static_files/triton/model.py,sha256=l9lkwyeXw9H_K4Om9dGcuylnj4hAlzohspUZkSnQ7Qg,2429
|
87
|
+
clarifai/modules/README.md,sha256=mx8pVx6cPp-pP4LcFPT_nX3ngGmhygVK0WiXeD3cbIo,367
|
88
|
+
clarifai/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
89
|
+
clarifai/modules/css.py,sha256=kadCEunmyh5h2yf0-4aysE3ZcZ6qaQcxuAgDXS96yF8,2020
|
90
|
+
clarifai/modules/pages.py,sha256=iOoM3RNRMgXlV0qBqcdQofxoXo2RuRQh0h9c9BIS0-I,1383
|
91
|
+
clarifai/modules/style.css,sha256=j7FNPZVhLPj35vvBksAJ90RuX5sLuqzDR5iM2WIEhiA,6073
|
92
|
+
clarifai/rag/__init__.py,sha256=wu3PzAzo7uqgrEzuaC9lY_3gj1HFiR3GU3elZIKTT5g,40
|
93
|
+
clarifai/rag/rag.py,sha256=fYCIs9WJKugRFZ6Xt468_7PE6ipE3x4DfaQzvw4EkuY,12392
|
94
|
+
clarifai/rag/utils.py,sha256=aqAM120xC8DcpqWMrsKsmT9QwrKKJZYBLyDgYb8_L-8,4061
|
95
|
+
clarifai/runners/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
96
|
+
clarifai/runners/example.py,sha256=V0Nc52JkhCm97oaWzKVg71g50M1ltxI9jyPMo6tKU6E,1302
|
97
|
+
clarifai/runners/example_llama2.py,sha256=WMGTqv3v9t3ID1rjW9BTLMkIuvyTESL6xHcOO6A220Y,2712
|
98
|
+
clarifai/schema/search.py,sha256=JjTi8ammJgZZ2OGl4K6tIA4zEJ1Fr2ASZARXavI1j5c,2448
|
99
|
+
clarifai/urls/helper.py,sha256=tjoMGGHuWX68DUB0pk4MEjrmFsClUAQj2jmVEM_Sy78,4751
|
100
|
+
clarifai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
101
|
+
clarifai/utils/logging.py,sha256=F19UmdeJKwIy8Nqo8o0hegf-qJGqzqtQ5Bi0Rz2NP4Q,3582
|
102
|
+
clarifai/utils/misc.py,sha256=cC_j0eEsJ8bfnj0oRd2z-Rms1mQbAfLwrSs07hwQuCE,1420
|
103
|
+
clarifai/utils/model_train.py,sha256=JlMJAclOQ6Nx4_30DiQrlgHbQnNedl9UKQILq_HwK7I,8001
|
104
|
+
clarifai/utils/evaluation/__init__.py,sha256=0gmQxbzejnv1tKLj4lKcV7DHQX69irBJkWhA9oYXL1k,15813
|
105
|
+
clarifai/utils/evaluation/helpers.py,sha256=d_dcASRI_lhsHIRukAF1S-w7XazLpK9y6E_ug3l50t4,18440
|
106
|
+
clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
107
|
+
clarifai/workflows/export.py,sha256=vICRhIreqDSShxLKjHNM2JwzKsf1B4fdXB0ciMcA70k,1945
|
108
|
+
clarifai/workflows/utils.py,sha256=nGeB_yjVgUO9kOeKTg4OBBaBz-AwXI3m-huSVj-9W18,1924
|
109
|
+
clarifai/workflows/validate.py,sha256=iCEKBTtB-57uE3LVU7D4AI9BRHxIxahk3U1Ro08HP-o,2535
|
110
|
+
clarifai-10.1.1.dist-info/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
|
111
|
+
clarifai-10.1.1.dist-info/METADATA,sha256=oSrsyv-IDTTXBLdKGNaIeyHVh2vxkUj5FXNNbRcFa2c,18007
|
112
|
+
clarifai-10.1.1.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
113
|
+
clarifai-10.1.1.dist-info/entry_points.txt,sha256=qZOr_MIPG0dBBE1zringDJS_wXNGTAA_SQ-zcbmDHOw,82
|
114
|
+
clarifai-10.1.1.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
|
115
|
+
clarifai-10.1.1.dist-info/RECORD,,
|
@@ -1,98 +0,0 @@
|
|
1
|
-
#! COCO 2017 Image Segmentation dataset
|
2
|
-
|
3
|
-
import gc
|
4
|
-
import os
|
5
|
-
from functools import reduce
|
6
|
-
|
7
|
-
import cv2
|
8
|
-
import numpy as np
|
9
|
-
from pycocotools import mask as maskUtils
|
10
|
-
from pycocotools.coco import COCO
|
11
|
-
|
12
|
-
from clarifai.datasets.upload.base import ClarifaiDataLoader
|
13
|
-
|
14
|
-
from ..features import VisualSegmentationFeatures
|
15
|
-
|
16
|
-
|
17
|
-
class COCOSegmentationDataLoader(ClarifaiDataLoader):
|
18
|
-
"""COCO Image Segmentation Dataset."""
|
19
|
-
|
20
|
-
def __init__(self, images_dir, label_filepath):
|
21
|
-
"""
|
22
|
-
Args:
|
23
|
-
images_dir: Directory containing the images.
|
24
|
-
label_filepath: Path to the COCO annotation file.
|
25
|
-
"""
|
26
|
-
self.images_dir = images_dir
|
27
|
-
self.label_filepath = label_filepath
|
28
|
-
|
29
|
-
self.map_ids = {}
|
30
|
-
self.load_data()
|
31
|
-
|
32
|
-
@property
|
33
|
-
def task(self):
|
34
|
-
return "visual_segmentation"
|
35
|
-
|
36
|
-
def load_data(self) -> None:
|
37
|
-
self.coco = COCO(self.label_filepath)
|
38
|
-
self.map_ids = {i: img_id for i, img_id in enumerate(list(self.coco.imgs.keys()))}
|
39
|
-
|
40
|
-
def __len__(self):
|
41
|
-
return len(self.coco.imgs)
|
42
|
-
|
43
|
-
def __getitem__(self, index):
|
44
|
-
"""Get image and annotations for a given index."""
|
45
|
-
value = self.coco.imgs[self.map_ids[index]]
|
46
|
-
image_path = os.path.join(self.images_dir, value['file_name'])
|
47
|
-
annots = [] # polygons
|
48
|
-
concept_ids = []
|
49
|
-
|
50
|
-
input_ann_ids = self.coco.getAnnIds(imgIds=[value['id']])
|
51
|
-
input_anns = self.coco.loadAnns(input_ann_ids)
|
52
|
-
|
53
|
-
for ann in input_anns:
|
54
|
-
# get concept info
|
55
|
-
# note1: concept_name can be human readable
|
56
|
-
# note2: concept_id can only be alphanumeric, up to 32 characters, with no special chars except `-` and `_`
|
57
|
-
concept_name = self.coco.cats[ann['category_id']]['name']
|
58
|
-
concept_id = concept_name.lower().replace(' ', '-')
|
59
|
-
|
60
|
-
# get polygons
|
61
|
-
if isinstance(ann['segmentation'], list):
|
62
|
-
poly = np.array(ann['segmentation']).reshape((int(len(ann['segmentation'][0]) / 2),
|
63
|
-
2)).astype(float)
|
64
|
-
poly[:, 0], poly[:, 1] = poly[:, 0] / value['width'], poly[:, 1] / value['height']
|
65
|
-
poly = np.clip(poly, 0, 1)
|
66
|
-
annots.append(poly.tolist()) #[[x=col, y=row],...]
|
67
|
-
concept_ids.append(concept_id)
|
68
|
-
else: # seg: {"counts":[...]}
|
69
|
-
if isinstance(ann['segmentation']['counts'], list):
|
70
|
-
rle = maskUtils.frPyObjects([ann['segmentation']], value['height'], value['width'])
|
71
|
-
else:
|
72
|
-
rle = ann['segmentation']
|
73
|
-
mask = maskUtils.decode(rle) #binary mask
|
74
|
-
#convert mask to polygons and add to annots
|
75
|
-
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
76
|
-
polygons = []
|
77
|
-
for cont in contours:
|
78
|
-
if cont.size >= 6:
|
79
|
-
polygons.append(cont.astype(float).flatten().tolist())
|
80
|
-
# store polygons in (x,y) pairs
|
81
|
-
polygons_flattened = reduce(lambda x, y: x + y, polygons)
|
82
|
-
del polygons
|
83
|
-
del contours
|
84
|
-
del mask
|
85
|
-
gc.collect()
|
86
|
-
|
87
|
-
polygons = np.array(polygons_flattened).reshape((int(len(polygons_flattened) / 2),
|
88
|
-
2)).astype(float)
|
89
|
-
polygons[:, 0] = polygons[:, 0] / value['width']
|
90
|
-
polygons[:, 1] = polygons[:, 1] / value['height']
|
91
|
-
polygons = np.clip(polygons, 0, 1)
|
92
|
-
annots.append(polygons.tolist()) #[[x=col, y=row],...,[x=col, y=row]]
|
93
|
-
concept_ids.append(concept_id)
|
94
|
-
|
95
|
-
assert len(concept_ids) == len(annots), f"Num concepts must match num bbox annotations\
|
96
|
-
for a single image. Found {len(concept_ids)} concepts and {len(annots)} bboxes."
|
97
|
-
|
98
|
-
return VisualSegmentationFeatures(image_path, concept_ids, annots, id=str(value['id']))
|
@@ -1,123 +0,0 @@
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3
|
-
# you may not use this file except in compliance with the License.
|
4
|
-
# You may obtain a copy of the License at
|
5
|
-
#
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7
|
-
#
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
|
-
# See the License for the specific language governing permissions and
|
12
|
-
# limitations under the License.
|
13
|
-
"""Commandline interface for model upload utils."""
|
14
|
-
import argparse
|
15
|
-
|
16
|
-
from clarifai.client.auth.helper import ClarifaiAuthHelper
|
17
|
-
from clarifai.models.api import Models
|
18
|
-
from clarifai.models.model_serving.model_config import MODEL_TYPES, get_model_config
|
19
|
-
from clarifai.models.model_serving.model_config.inference_parameter import InferParamManager
|
20
|
-
|
21
|
-
|
22
|
-
def deploy(model_url,
|
23
|
-
model_id: str = None,
|
24
|
-
model_type: str = None,
|
25
|
-
desc: str = "",
|
26
|
-
update_version: bool = False,
|
27
|
-
inference_params_file: str = ""):
|
28
|
-
# init Auth from env vars
|
29
|
-
auth = ClarifaiAuthHelper.from_env()
|
30
|
-
# init api
|
31
|
-
model_api = Models(auth)
|
32
|
-
|
33
|
-
# parsing model name/type.
|
34
|
-
# if filename having this format: <model_id>_<model-type>
|
35
|
-
# e.i yolov5s_coco_visual-dectector
|
36
|
-
# else user has to input model_type and model_id
|
37
|
-
zip_filename = model_url.split('/')[-1]
|
38
|
-
zip_filename = zip_filename.split('.')[0]
|
39
|
-
|
40
|
-
def _parse_name(name):
|
41
|
-
*id_, type_ = name.split('_')
|
42
|
-
return "_".join(id_), type_
|
43
|
-
|
44
|
-
# parse model_id
|
45
|
-
if not model_id and "_" in zip_filename:
|
46
|
-
model_id = _parse_name(zip_filename)[0]
|
47
|
-
assert model_id, "Can not parse model_id from url, please input it directly"
|
48
|
-
# parse model_type
|
49
|
-
if not model_type and "_" in zip_filename:
|
50
|
-
model_type = _parse_name(zip_filename)[-1]
|
51
|
-
assert model_type, "Can not parse model_type from url, please input it directly"
|
52
|
-
# key map
|
53
|
-
assert model_type in MODEL_TYPES, f"model_type should be one of {MODEL_TYPES}"
|
54
|
-
clarifai_key_map = get_model_config(model_type=model_type).field_maps
|
55
|
-
# inference parameters
|
56
|
-
inference_parameters = InferParamManager(json_path=inference_params_file).get_list_params()
|
57
|
-
|
58
|
-
# if updating new version of existing model
|
59
|
-
if update_version:
|
60
|
-
resp = model_api.post_model_version(
|
61
|
-
model_id=model_id,
|
62
|
-
model_zip_url=model_url,
|
63
|
-
input=clarifai_key_map.input_fields_map,
|
64
|
-
outputs=clarifai_key_map.output_fields_map,
|
65
|
-
param_specs=inference_parameters)
|
66
|
-
# creating new model
|
67
|
-
else:
|
68
|
-
# post model
|
69
|
-
resp = model_api.upload_model(
|
70
|
-
model_id=model_id,
|
71
|
-
model_zip_url=model_url,
|
72
|
-
model_type=model_type,
|
73
|
-
input=clarifai_key_map.input_fields_map,
|
74
|
-
outputs=clarifai_key_map.output_fields_map,
|
75
|
-
description=desc,
|
76
|
-
param_specs=inference_parameters)
|
77
|
-
# response
|
78
|
-
if resp["status"]["code"] != "SUCCESS":
|
79
|
-
raise Exception("Post models failed, details: {}, {}".format(resp["status"]["description"],
|
80
|
-
resp["status"]["details"]))
|
81
|
-
else:
|
82
|
-
print("Success!")
|
83
|
-
print(f'Model version: {resp["model"]["model_version"]["id"]}')
|
84
|
-
|
85
|
-
|
86
|
-
def main():
|
87
|
-
parser = argparse.ArgumentParser(description=__doc__)
|
88
|
-
# args
|
89
|
-
parser.add_argument("--url", type=str, required=True, help="Direct download url of zip file")
|
90
|
-
parser.add_argument("--model_id", type=str, required=False, default="", help="Custom model id.")
|
91
|
-
parser.add_argument(
|
92
|
-
"--model_type",
|
93
|
-
type=str,
|
94
|
-
required=False,
|
95
|
-
choices=MODEL_TYPES,
|
96
|
-
default="",
|
97
|
-
help="Clarifai model type")
|
98
|
-
parser.add_argument(
|
99
|
-
"--desc", type=str, required=False, default="", help="Short desccription of model")
|
100
|
-
parser.add_argument(
|
101
|
-
"--update_version",
|
102
|
-
action="store_true",
|
103
|
-
required=False,
|
104
|
-
help="Update exist model with new version")
|
105
|
-
|
106
|
-
parser.add_argument(
|
107
|
-
"--infer_param",
|
108
|
-
required=False,
|
109
|
-
default="",
|
110
|
-
help="Path to json file contains inference parameters")
|
111
|
-
|
112
|
-
args = parser.parse_args()
|
113
|
-
deploy(
|
114
|
-
model_url=args.url,
|
115
|
-
model_id=args.model_id,
|
116
|
-
desc=args.desc,
|
117
|
-
model_type=args.model_type,
|
118
|
-
update_version=args.update_version,
|
119
|
-
inference_params_file=args.infer_param)
|
120
|
-
|
121
|
-
|
122
|
-
if __name__ == "__main__":
|
123
|
-
main()
|
@@ -1,61 +0,0 @@
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3
|
-
# you may not use this file except in compliance with the License.
|
4
|
-
# You may obtain a copy of the License at
|
5
|
-
#
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7
|
-
#
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
|
-
# See the License for the specific language governing permissions and
|
12
|
-
# limitations under the License.
|
13
|
-
"""Triton model zip commandline interface."""
|
14
|
-
|
15
|
-
import argparse
|
16
|
-
import zipfile
|
17
|
-
from pathlib import Path
|
18
|
-
from typing import Union
|
19
|
-
|
20
|
-
|
21
|
-
def zip_dir(triton_repository_dir: Union[Path, str], zip_filename: Union[Path, str]):
|
22
|
-
"""
|
23
|
-
Generate triton model repository zip file for upload.
|
24
|
-
Args:
|
25
|
-
-----
|
26
|
-
triton_repository_dir: Directory of triton model respository to be zipped
|
27
|
-
zip_filename: Triton model repository zip filename
|
28
|
-
|
29
|
-
Returns:
|
30
|
-
--------
|
31
|
-
None
|
32
|
-
"""
|
33
|
-
# Convert to Path object
|
34
|
-
dir = Path(triton_repository_dir)
|
35
|
-
|
36
|
-
with zipfile.ZipFile(zip_filename, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
37
|
-
for entry in dir.rglob("*"):
|
38
|
-
zip_file.write(entry, entry.relative_to(dir))
|
39
|
-
|
40
|
-
|
41
|
-
def main():
|
42
|
-
"""Triton model zip cli."""
|
43
|
-
parser = argparse.ArgumentParser(__doc__)
|
44
|
-
parser.add_argument(
|
45
|
-
"--triton_model_repository",
|
46
|
-
type=str,
|
47
|
-
required=True,
|
48
|
-
help="Path to the triton model repository to zip.")
|
49
|
-
parser.add_argument(
|
50
|
-
"--zipfile_name",
|
51
|
-
type=str,
|
52
|
-
required=True,
|
53
|
-
help="Name of the zipfile to be created. \
|
54
|
-
<model_name>_<model_type> is the recommended naming convention.e.g. yolov5_visual-detector.zip"
|
55
|
-
)
|
56
|
-
args = parser.parse_args()
|
57
|
-
zip_dir(args.triton_model_repository, args.zipfile_name)
|
58
|
-
|
59
|
-
|
60
|
-
if __name__ == "__main__":
|
61
|
-
main()
|
@@ -1,89 +0,0 @@
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3
|
-
# you may not use this file except in compliance with the License.
|
4
|
-
# You may obtain a copy of the License at
|
5
|
-
#
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7
|
-
#
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
|
-
# See the License for the specific language governing permissions and
|
12
|
-
# limitations under the License.
|
13
|
-
"""Triton model repository generation commandline interface."""
|
14
|
-
|
15
|
-
import argparse
|
16
|
-
|
17
|
-
from ..constants import MAX_HW_DIM
|
18
|
-
from ..model_config import MODEL_TYPES, get_model_config
|
19
|
-
from ..pb_model_repository import TritonModelRepository
|
20
|
-
|
21
|
-
|
22
|
-
def dims_type(shape_string: str):
|
23
|
-
"""Read list string from cli and convert values to a list of integers."""
|
24
|
-
shape_string = shape_string.replace("[", "").replace("]", "")
|
25
|
-
shapes = list(map(int, shape_string.split(",")))
|
26
|
-
return shapes
|
27
|
-
|
28
|
-
|
29
|
-
def model_upload_init():
|
30
|
-
"""
|
31
|
-
Clarifai triton model upload commandline tool.
|
32
|
-
"""
|
33
|
-
parser = argparse.ArgumentParser(description=__doc__)
|
34
|
-
# TritonModelConfig args
|
35
|
-
parser.add_argument("--model_name", type=str, required=True, help="Inference Model Name")
|
36
|
-
parser.add_argument(
|
37
|
-
"--model_version",
|
38
|
-
type=str,
|
39
|
-
default="1",
|
40
|
-
required=False,
|
41
|
-
help="Triton inference model version name. 1 stands for version 1. \
|
42
|
-
Leave as default value (Recommended).")
|
43
|
-
parser.add_argument(
|
44
|
-
"--model_type",
|
45
|
-
type=str,
|
46
|
-
choices=MODEL_TYPES,
|
47
|
-
required=True,
|
48
|
-
help=f"Clarifai supported model types.\n Model-types-map: {MODEL_TYPES}",
|
49
|
-
)
|
50
|
-
parser.add_argument(
|
51
|
-
"--image_shape",
|
52
|
-
type=dims_type,
|
53
|
-
default="[-1, -1]",
|
54
|
-
required=False,
|
55
|
-
help="(H, W) dims for models with an image input type. H and W each have a max value of 1024",
|
56
|
-
)
|
57
|
-
parser.add_argument(
|
58
|
-
"--repo_dir",
|
59
|
-
type=str,
|
60
|
-
default=".",
|
61
|
-
required=True,
|
62
|
-
help="Directory to create triton repository.")
|
63
|
-
parser.add_argument("--max_bs", type=int, default=1, required=False, help="Max batch size")
|
64
|
-
|
65
|
-
args = parser.parse_args()
|
66
|
-
|
67
|
-
if len(args.image_shape) != 2:
|
68
|
-
raise ValueError(
|
69
|
-
f"image_shape takes 2 values, Height and Width. Got {len(args.image_shape)} values instead."
|
70
|
-
)
|
71
|
-
|
72
|
-
if args.image_shape[0] > MAX_HW_DIM or args.image_shape[1] > MAX_HW_DIM:
|
73
|
-
raise ValueError(
|
74
|
-
f"H and W each have a maximum value of 1024. Got H: {args.image_shape[0]}, W: {args.image_shape[1]}"
|
75
|
-
)
|
76
|
-
|
77
|
-
model_config = get_model_config(args.model_type).make_triton_model_config(
|
78
|
-
model_name=args.model_name,
|
79
|
-
model_version="1",
|
80
|
-
image_shape=args.image_shape,
|
81
|
-
max_batch_size=args.max_bs,
|
82
|
-
)
|
83
|
-
|
84
|
-
triton_repo = TritonModelRepository(model_config)
|
85
|
-
triton_repo.build_repository(args.repo_dir)
|
86
|
-
|
87
|
-
|
88
|
-
if __name__ == "__main__":
|
89
|
-
model_upload_init()
|
@@ -1,33 +0,0 @@
|
|
1
|
-
## Custom Triton Configurations
|
2
|
-
|
3
|
-
The commandline triton model repository generation utils do work with default values for the various triton configurations but a few of these config values can be modified to suit different task specific needs.
|
4
|
-
|
5
|
-
* For vision models for instance, different input shapes for the `Height (H)` and `Width (W)` are supported and can be set via the commandline too.i.e.
|
6
|
-
```console
|
7
|
-
$ clarifai-model-upload-init --model_name <Your model name> \
|
8
|
-
--model_type <select model type from available ones> \
|
9
|
-
--image_shape "H, W"
|
10
|
-
--repo_dir <directory in which to create your model repository>
|
11
|
-
```
|
12
|
-
`H` and `W` each have a maximum value of 1024.
|
13
|
-
`--image_shape` accepts both `"H, W"` and `"[H, W]"` format input.
|
14
|
-
|
15
|
-
|
16
|
-
## Generating the triton model repository without the commandline
|
17
|
-
|
18
|
-
The triton model repository can be generated via a python script specifying the same values as required in the commandline. Below is a sample of how the code would be structured with `visual_classifier`.
|
19
|
-
|
20
|
-
```python
|
21
|
-
from clarifai.models.model_serving.model_config import get_model_config, ModelTypes, TritonModelConfig
|
22
|
-
from clarifai.models.model_serving.pb_model_repository import TritonModelRepository
|
23
|
-
|
24
|
-
model_type = ModelTypes.visual_classifier
|
25
|
-
model_config: TritonModelConfig = get_model_config(model_type).make_triton_model_config(
|
26
|
-
model_name="<model_name>",
|
27
|
-
model_version="1",
|
28
|
-
image_shape=<[H,W]>, # 0 < [H,W] <= 1024
|
29
|
-
)
|
30
|
-
|
31
|
-
triton_repo = TritonModelRepository(model_config)
|
32
|
-
triton_repo.build_repository("<dir>")
|
33
|
-
```
|
@@ -1,28 +0,0 @@
|
|
1
|
-
## Clarifai Model Prediction Output Formats.
|
2
|
-
|
3
|
-
Different models return different types of predictions and Clarifai output dataclasses aim at standardizing the output formats per model type for compatibility with the Clarifai API.
|
4
|
-
|
5
|
-
Each machine learning modality supported by the Clarifai API has a predefined dataclass output format with all attributes being of numpy ndarray type.
|
6
|
-
|
7
|
-
## Supported Formats
|
8
|
-
|
9
|
-
Usage:
|
10
|
-
```python
|
11
|
-
from clarifai.models.model_serving.models.output import VisualDetectorOutput
|
12
|
-
```
|
13
|
-
| Output Type (dataclass) | Attributes | Attribute Data Type| Attribute Shapes | Description |
|
14
|
-
| --- | --- | --- | --- | --- |
|
15
|
-
| [VisualDetectorOutput](../models/output.py) | `predicted_bboxes` | float32 | [-1, 4] | A 2D detected bounding boxes array of any length with each element array having a length of exactly 4. All bbox coordinates MUST be normalized between 0 & 1. |
|
16
|
-
| | `predicted_labels` | int32 | [-1, 1] | A 2D detected labels array of length equal to that of predicted_bboxes with each element array having a length of exactly 1.
|
17
|
-
| | `predicted_scores` | float32 | [-1, 1] | A 2D detection scores array of length equal to that of predicted_bboxes & predicted_labels with each element array having a length of exactly 1.
|
18
|
-
| | | | | |
|
19
|
-
| [ClassifierOutput](../models/output.py) | `predicted_scores` | float32 | [-1] | The softmax of the model's predictions. The index of each predicted probability as returned by the model must correspond to the label index in the labels.txt file |
|
20
|
-
| | | | | |
|
21
|
-
| [TextOutput](../models/output.py) | `predicted_text` | string | [1] | Predicted text from a model |
|
22
|
-
| | | | | |
|
23
|
-
| [EmbeddingOutput](../models/output.py) | `embedding_vector` | float32 | [-1] | The embedding vector (image or text embedding) returned by a model |
|
24
|
-
| | | | | |
|
25
|
-
| [MasksOutput](../models/output.py) | `predicted_mask` | int64 | [-1, -1] | The model predicted image mask. The predicted class indices must be assigned to the corresponding image pixels in the mask where that class is predicted by the model. |
|
26
|
-
| | | | | |
|
27
|
-
| [ImageOutput](../models/output.py) | `image` | unint8 | [-1, -1, 3] | The model predicted/generated image |
|
28
|
-
| | | | | |
|