clarifai 9.10.1__py3-none-any.whl → 9.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (323) hide show
  1. clarifai/client/__init__.py +3 -2
  2. clarifai/client/app.py +39 -23
  3. clarifai/client/base.py +6 -6
  4. clarifai/client/dataset.py +113 -55
  5. clarifai/client/input.py +47 -55
  6. clarifai/client/model.py +27 -25
  7. clarifai/client/module.py +13 -11
  8. clarifai/client/runner.py +5 -3
  9. clarifai/client/search.py +29 -10
  10. clarifai/client/user.py +14 -8
  11. clarifai/client/workflow.py +22 -20
  12. clarifai/constants/dataset.py +22 -0
  13. clarifai/datasets/upload/base.py +9 -7
  14. clarifai/datasets/upload/features.py +3 -3
  15. clarifai/datasets/upload/image.py +49 -50
  16. clarifai/datasets/upload/loaders/coco_captions.py +26 -80
  17. clarifai/datasets/upload/loaders/coco_detection.py +56 -115
  18. clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
  19. clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
  20. clarifai/datasets/upload/loaders/xview_detection.py +3 -3
  21. clarifai/datasets/upload/text.py +16 -16
  22. clarifai/datasets/upload/utils.py +196 -21
  23. clarifai/utils/misc.py +21 -0
  24. clarifai/versions.py +1 -1
  25. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
  26. clarifai-9.10.3.dist-info/RECORD +96 -0
  27. clarifai-9.10.3.dist-info/top_level.txt +1 -0
  28. clarifai/auth/__init__.py +0 -6
  29. clarifai/auth/helper.py +0 -367
  30. clarifai/auth/register.py +0 -23
  31. clarifai/auth/stub.py +0 -127
  32. clarifai/datasets/upload/examples/README.md +0 -31
  33. clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
  34. clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  35. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  36. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  37. clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  38. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  39. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  40. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  41. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  42. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  43. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  44. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  45. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  46. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  47. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  48. clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  49. clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  50. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  51. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  52. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  53. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  54. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  55. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  56. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  57. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  58. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  59. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  60. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  61. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  62. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  63. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  64. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  65. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  66. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  67. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  68. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  69. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  70. clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
  71. clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  72. clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  73. clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  74. clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  75. clarifai/datasets/upload/loaders/README.md +0 -49
  76. clarifai/models/model_serving/README.md +0 -155
  77. clarifai/models/model_serving/docs/custom_config.md +0 -33
  78. clarifai/models/model_serving/docs/dependencies.md +0 -11
  79. clarifai/models/model_serving/docs/inference_parameters.md +0 -134
  80. clarifai/models/model_serving/docs/model_types.md +0 -20
  81. clarifai/models/model_serving/docs/output.md +0 -28
  82. clarifai/models/model_serving/examples/README.md +0 -7
  83. clarifai/models/model_serving/examples/image_classification/README.md +0 -9
  84. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  85. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  86. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  87. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  88. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  89. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  90. clarifai/models/model_serving/examples/text_classification/README.md +0 -9
  91. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  92. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  93. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  94. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  95. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  96. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  97. clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
  98. clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
  99. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  100. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  101. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  102. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  104. clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
  105. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  106. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  107. clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
  108. clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  109. clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  110. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  111. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
  112. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  113. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  114. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
  115. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  116. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  117. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  118. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  119. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  120. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  121. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  122. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  123. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  124. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  125. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  126. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  127. clarifai/modules/README.md +0 -5
  128. clarifai/modules/style.css +0 -217
  129. clarifai-9.10.1.dist-info/RECORD +0 -386
  130. clarifai-9.10.1.dist-info/top_level.txt +0 -2
  131. clarifai_utils/__init__.py +0 -0
  132. clarifai_utils/auth/__init__.py +0 -6
  133. clarifai_utils/auth/helper.py +0 -367
  134. clarifai_utils/auth/register.py +0 -23
  135. clarifai_utils/auth/stub.py +0 -127
  136. clarifai_utils/cli.py +0 -0
  137. clarifai_utils/client/__init__.py +0 -16
  138. clarifai_utils/client/app.py +0 -684
  139. clarifai_utils/client/auth/__init__.py +0 -4
  140. clarifai_utils/client/auth/helper.py +0 -367
  141. clarifai_utils/client/auth/register.py +0 -23
  142. clarifai_utils/client/auth/stub.py +0 -127
  143. clarifai_utils/client/base.py +0 -131
  144. clarifai_utils/client/dataset.py +0 -442
  145. clarifai_utils/client/input.py +0 -892
  146. clarifai_utils/client/lister.py +0 -54
  147. clarifai_utils/client/model.py +0 -575
  148. clarifai_utils/client/module.py +0 -94
  149. clarifai_utils/client/runner.py +0 -161
  150. clarifai_utils/client/search.py +0 -239
  151. clarifai_utils/client/user.py +0 -253
  152. clarifai_utils/client/workflow.py +0 -223
  153. clarifai_utils/constants/model.py +0 -4
  154. clarifai_utils/constants/search.py +0 -2
  155. clarifai_utils/datasets/__init__.py +0 -0
  156. clarifai_utils/datasets/export/__init__.py +0 -0
  157. clarifai_utils/datasets/export/inputs_annotations.py +0 -222
  158. clarifai_utils/datasets/upload/__init__.py +0 -0
  159. clarifai_utils/datasets/upload/base.py +0 -66
  160. clarifai_utils/datasets/upload/examples/README.md +0 -31
  161. clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
  162. clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  163. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  164. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  165. clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  166. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  167. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  168. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  169. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  170. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  171. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  172. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  173. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  174. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  175. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  176. clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  177. clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  178. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  179. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  180. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  181. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  182. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  183. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  184. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  185. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  186. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  187. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  188. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  189. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  190. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  191. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  192. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  193. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  194. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  195. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  196. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  197. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  198. clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
  199. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  200. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  201. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  202. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  203. clarifai_utils/datasets/upload/features.py +0 -44
  204. clarifai_utils/datasets/upload/image.py +0 -165
  205. clarifai_utils/datasets/upload/loaders/README.md +0 -49
  206. clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
  207. clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
  208. clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
  209. clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
  210. clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
  211. clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
  212. clarifai_utils/datasets/upload/text.py +0 -53
  213. clarifai_utils/datasets/upload/utils.py +0 -63
  214. clarifai_utils/errors.py +0 -89
  215. clarifai_utils/models/__init__.py +0 -0
  216. clarifai_utils/models/api.py +0 -283
  217. clarifai_utils/models/model_serving/README.md +0 -155
  218. clarifai_utils/models/model_serving/__init__.py +0 -12
  219. clarifai_utils/models/model_serving/cli/__init__.py +0 -12
  220. clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
  221. clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
  222. clarifai_utils/models/model_serving/cli/repository.py +0 -87
  223. clarifai_utils/models/model_serving/constants.py +0 -1
  224. clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
  225. clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
  226. clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
  227. clarifai_utils/models/model_serving/docs/model_types.md +0 -20
  228. clarifai_utils/models/model_serving/docs/output.md +0 -28
  229. clarifai_utils/models/model_serving/examples/README.md +0 -7
  230. clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
  231. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  232. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
  233. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
  234. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  235. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  236. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  237. clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  238. clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  239. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  240. clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
  241. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  242. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
  243. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
  244. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  245. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  246. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  247. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  248. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  249. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  250. clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
  251. clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
  252. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  253. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  254. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  255. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  256. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  257. clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
  258. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  259. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
  260. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
  261. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  262. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  263. clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
  264. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
  265. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
  266. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  267. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  268. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  269. clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
  270. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  271. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
  272. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
  273. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  274. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  275. clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
  276. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  277. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
  278. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
  279. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  280. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  281. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  282. clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
  283. clarifai_utils/models/model_serving/model_config/config.py +0 -302
  284. clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
  285. clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  286. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  287. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  288. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  289. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  290. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  291. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  292. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  293. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  294. clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
  295. clarifai_utils/models/model_serving/models/__init__.py +0 -12
  296. clarifai_utils/models/model_serving/models/default_test.py +0 -275
  297. clarifai_utils/models/model_serving/models/inference.py +0 -42
  298. clarifai_utils/models/model_serving/models/model_types.py +0 -265
  299. clarifai_utils/models/model_serving/models/output.py +0 -124
  300. clarifai_utils/models/model_serving/models/pb_model.py +0 -74
  301. clarifai_utils/models/model_serving/models/test.py +0 -64
  302. clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
  303. clarifai_utils/modules/README.md +0 -5
  304. clarifai_utils/modules/__init__.py +0 -0
  305. clarifai_utils/modules/css.py +0 -60
  306. clarifai_utils/modules/pages.py +0 -42
  307. clarifai_utils/modules/style.css +0 -217
  308. clarifai_utils/runners/__init__.py +0 -0
  309. clarifai_utils/runners/example.py +0 -33
  310. clarifai_utils/schema/search.py +0 -69
  311. clarifai_utils/urls/helper.py +0 -103
  312. clarifai_utils/utils/__init__.py +0 -0
  313. clarifai_utils/utils/logging.py +0 -90
  314. clarifai_utils/utils/misc.py +0 -33
  315. clarifai_utils/utils/model_train.py +0 -157
  316. clarifai_utils/versions.py +0 -6
  317. clarifai_utils/workflows/__init__.py +0 -0
  318. clarifai_utils/workflows/export.py +0 -68
  319. clarifai_utils/workflows/utils.py +0 -59
  320. clarifai_utils/workflows/validate.py +0 -67
  321. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
  322. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
  323. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
@@ -1,49 +0,0 @@
1
- ## Dataset Loaders
2
-
3
- A collection of data preprocessing modules for popular public datasets to allow for compatible upload into Clarifai user app datasets.
4
-
5
- ## Usage
6
-
7
- If a dataset module exists in the zoo, uploading the specific dataset can be easily done by simply creating a python script (or via commandline) and specifying the dataset module name in the `dataset_loader` parameter of the `Dataset` class, `upload_dataset` method .i.e.
8
-
9
- ```python
10
- from clarifai.client.app import App
11
-
12
- app = App(app_id="", user_id="")
13
- # Create a dataset in Clarifai App
14
- dataset = app.create_dataset(dataset_id="")
15
- # execute data upload to Clarifai app dataset
16
- dataset.upload_dataset(task='visual_segmentation', split="train", dataset_loader='coco_segmentation')
17
- ```
18
-
19
- ## Dataset Loaders
20
-
21
- | dataset name | task | module name (.py) | splits |
22
- | --- | --- | --- | --- |
23
- | [COCO 2017](https://cocodataset.org/#download) | Detection | `coco_detection` | `train`, `val` |
24
- | | Segmentation | `coco_segmentation` | `train`, `val` |
25
- | | Captions | `coco_captions` | `train`, `val` |
26
- |[xVIEW](http://xviewdataset.org/) | Detection | `xview_detection` | `train`
27
- | [ImageNet](https://www.image-net.org/) | Classification | `imagenet_classification` | `train`
28
- ## Contributing Modules
29
-
30
- A dataloader (preprocessing) module is a python script that contains a dataloader class which implements data download (to download the dataloader from a source to local disk dir) & extraction and dataloader methods.
31
-
32
- The class naming convention is `<datasetname>DataLoader`. The dataset class must accept `split` as the only argument in the `__init__` method and the `__getitem__` method must return either of `VisualClassificationFeatures()`, `VisualDetectionFeatures()`, `VisualSegmentationFeatures()` or `TextFeatures()` as defined in [clarifai/datasets/upload/features.py](../features.py). Other methods can be added as seen fit but must be inherited from parent `ClarifaiDataLoader` base class [clarifai/datasets/upload/base.py](../base.py).
33
- Reference can be taken from the existing dataset modules in the zoo for development.
34
-
35
- ## Notes
36
-
37
- * Dataloaders in the zoo by default first create a `data` directory in the zoo directory then download the data into this `data` directory, preprocess the data and finally execute upload to a Clarifai app dataset. For instance with the COCO dataset modules above, the coco2017 dataset is by default downloaded first into a `data` directory, extracted and then preprocessing is performed on it and finally uploaded to Clarifai.
38
-
39
- * Taking the above into consideration, to avoid the scripts re-downloading data you already have locally, create a `data` directory in the loaders directory and move your extracted data there. **Ensure that the extracted folder/file names and file structure MATCH those when the downloaded zips are extracted.**
40
-
41
- * COCO Format: To reuse the coco modules above on your coco format data, ensure the criteria in the two points above is adhered to first. If so, pass the coco module name from any of the above in the loaders to the `dataset_loader=` parameter in `upload_dataset()`.
42
-
43
- * xVIEW Dataset: To upload, you have to register and download images,label from [xviewdataset](http://xviewdataset.org/#dataset) follow the above mentioned steps to place extracted folder in `data` directory. Finally pass the xview module name to `dataset_loader=` parameter in `upload_dataset()`.
44
-
45
- * ImageNet Dataset: ImageNet Dataset should be downloaded and placed in the 'data' folder along with the [label mapping file](https://www.kaggle.com/competitions/imagenet-object-localization-challenge/data?select=LOC_synset_mapping.txt).
46
-
47
- <data>/
48
- ├── train/
49
- ├── LOC_synset_mapping.txt
@@ -1,155 +0,0 @@
1
- ## Clarifai Model Serving: Deploy Your Machine Learning Models to Clarifai.
2
-
3
- Build and easily deploy machine learning models to Clarifai for inference using the [Nvidia Triton Inference Server](https://github.com/triton-inference-server/server).
4
-
5
- ## QuickStart Guide: Build a deployment ready model.
6
-
7
- A step by step guide to building your own triton inference model and deploying it into a Clarifai app.
8
-
9
- 1. Generate a triton model repository via commandline.
10
- ```console
11
- clarifai-model-upload-init --model_name <Your model name> \
12
- --model_type <select model type from available ones> \
13
- --repo_dir <directory in which to create your model repository>
14
- ```
15
- 2. 1. Edit the `requirements.txt` file with dependencies needed to run inference on your model and the `labels.txt` (if available in dir) with the labels your model is to predict.
16
- 2. Add your model loading and inference code inside `inference.py` script of the generated model repository under the `setup()` and `predict()` functions respectively. Refer to The [Inference Script section]() for a description of this file.
17
- 3. Inference parameters (optional): you can define some inference parameters that can be adjusted on model view of Clarifai platform when making prediction. Follow [this doc](./docs/inference_parameters.md) to build the json file.
18
- 3. Testing (Recommend) your implementation locally by running `<your_triton_folder>/1/test.py` with basic predefined tests.
19
- To avoid missing dependencies when deploying, recommend to use conda to create clean environment. Then install everything in `requirements.txt`. Follow instruction inside [test.py](./models/test.py) for implementing custom tests.
20
- * Create conda env and install requirements:
21
- ```bash
22
- # create env (note: only python version 3.8 is supported currently)
23
- conda create -n <your_env> python=3.8
24
- # activate it
25
- conda activate <your_env>
26
- # install dependencies
27
- pip install -r <your_triton_folder>/requirements.txt
28
- ```
29
- * Then run the test by using pytest:
30
-
31
- ```bash
32
- # Run the test
33
- pytest ./your_triton_folder/1/test.py
34
- # to see std output
35
- pytest --log-cli-level=INFO -s ./your_triton_folder/1/test.py
36
- ```
37
- 4. Generate a zip of your triton model for deployment via commandline.
38
- ```console
39
- clarifai-triton-zip --triton_model_repository <path to triton model repository to be compressed> \
40
- --zipfile_name <name of the triton model zip> (Recommended to use <model_name>_<model-type> convention for naming)
41
- ```
42
- 5. Upload the generated zip to a public file storage service to get a URL to the zip. This URL must be publicly accessible and downloadable as it's necessary for the last step: uploading the model to a Clarifai app.
43
- 6. Set your Clarifai auth credentials as environment variables.
44
- ```console
45
- export CLARIFAI_USER_ID=<your clarifai user_id>
46
- export CLARIFAI_APP_ID=<your clarifai app_id>
47
- export CLARIFAI_PAT=<your clarifai PAT>
48
- ```
49
- 7. Upload your model to Clarifai. Please ensure that your configuration field maps adhere to [this](https://github.com/Clarifai/clarifai-python-utils/blob/main/clarifai/models/model_serving/model_config/deploy.py)
50
- ```console
51
- clarifai-upload-model --url <URL to your model zip. Your zip file name is expected to have "zipfile_name" format (in clarifai-triton-zip), if not you need to specify your model_id and model_type> \
52
- --model_id <Your model ID on the platform> \
53
- --model_type <Clarifai model types> \
54
- --desc <A description of your model> \
55
- --update_version <Optional. Add new version of existing model> \
56
- --infer_param <Optional. Path to json file contains inference parameters>
57
- ```
58
-
59
- * Finally, navigate to your Clarifai app models and check that the deployed model appears. Click it on the model name to go the model versions table to track the status of the model deployment.
60
-
61
- ## Triton Model Repository
62
- ```diff
63
- <model_name>/
64
- ├── config.pbtx
65
- ├── requirements.txt
66
- ├── labels.txt (If applicable for given model-type)
67
- - ├── triton_conda.yaml
68
- └── 1/
69
- ├── __init__.py
70
- ├── inference.py
71
- ├── test.py
72
- └── model.py
73
- ```
74
-
75
- A generated triton model repository looks as illustrated in the directory tree above. Any additional files such as model checkpoints and folders needed at inference time must all be placed under the `1/` directory.
76
-
77
- - File Descriptions
78
-
79
- | Filename | Description & Use |
80
- | --- | --- |
81
- | `config.pbtxt` | Contains the triton model configuration used by the triton inference server to guide inference requests processing. |
82
- | `requirements.txt` | Contains dependencies needed by a user model to successfully make predictions.|
83
- | `labels.txt` | Contains labels listed one per line, a model is trained to predict. The order of labels should match the model predicted class indexes. |
84
- | `triton_conda.yaml` | Contains dependencies available in pre-configured execution environment. |
85
- | `1/inference.py` | The inference script where users write their inference code. |
86
- | `1/model.py` | The triton python backend model file run to serve inference requests. |
87
- | `1/test.py` | Contains some predefined tests in order to test inference implementation and dependencies locally. |
88
-
89
- ## Inference Script
90
-
91
- An `inference.py` script with template code is generated during the triton model repository generation.
92
- **This is the script where users write their inference code**.
93
- This script is composed of a single class that contains a default init method and the `get_predictions()` method whose names mustn't be changed.
94
-
95
- ```python
96
- """User model inference script."""
97
-
98
- import os
99
- from pathlib import Path
100
- from typing import Callable
101
-
102
- class InferenceModel:
103
- """User model inference class."""
104
-
105
- def __init__(self) -> None:
106
- """
107
- Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
108
- in this method so they are loaded only once for faster inference.
109
- """
110
- self.base_path: Path = os.path.dirname(__file__)
111
- ## sample model loading code:
112
- #self.checkpoint_path: Path = os.path.join(self.base_path, "your checkpoint filename/path")
113
- #self.model: Callable = <load_your_model_here from checkpoint or folder>
114
-
115
- #Add relevant model type decorator to the method below (see docs/model_types for ref.)
116
- def get_predictions(self, input_data, **kwargs):
117
- """
118
- Main model inference method.
119
-
120
- Args:
121
- -----
122
- input_data: A single input data item to predict on.
123
- Input data can be an image or text, etc depending on the model type.
124
-
125
- Returns:
126
- --------
127
- One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
128
- """
129
- # Delete/Comment out line below and add your inference code
130
- raise NotImplementedError()
131
- ```
132
-
133
- - `__init__()` used for one-time loading of inference time artifacts such as models, tokenizers, etc that are frequently called during inference to improve inference speed.
134
-
135
- - `get_predictions()` takes an input data item whose type depends on the task the model solves, & returns predictions for an input data item.
136
-
137
- `get_predictions()` should return any of the output types defined under [output](docs/output.md) and the predict function MUST be decorated with a task corresponding [model type decorator](docs/model_types.md). The model type decorators are responsible for passing input request batches for prediction and formatting the resultant predictions into triton inference responses.
138
-
139
- Additional methods can be added to this script's `Infer` class by the user as deemed necessary for their model inference provided they are invoked inside `get_predictions()` if used at inference time.
140
-
141
- ## Next steps
142
-
143
- - [Model types docs](docs/model_types.md)
144
- - [Model Output types docs](docs/output.md)
145
- - [Dependencies](docs/dependencies.md)
146
- - [Examples](examples/)
147
- - [Custom Configs](docs/custom_config.md/)
148
-
149
- ## Prerequisites
150
-
151
- * For deployment to Clarifai, you need a [Clarifai account](https://clarifai.com/signup).
152
-
153
- ## Testing
154
-
155
- * Please see https://github.com/Clarifai/clarifai-python/blob/master/clarifai/models/model_serving/models/test.py
@@ -1,33 +0,0 @@
1
- ## Custom Triton Configurations
2
-
3
- The commandline triton model repository generation utils do work with default values for the various triton configurations but a few of these config values can be modified to suit different task specific needs.
4
-
5
- * For vision models for instance, different input shapes for the `Height (H)` and `Width (W)` are supported and can be set via the commandline too.i.e.
6
- ```console
7
- $ clarifai-model-upload-init --model_name <Your model name> \
8
- --model_type <select model type from available ones> \
9
- --image_shape "H, W"
10
- --repo_dir <directory in which to create your model repository>
11
- ```
12
- `H` and `W` each have a maximum value of 1024.
13
- `--image_shape` accepts both `"H, W"` and `"[H, W]"` format input.
14
-
15
-
16
- ## Generating the triton model repository without the commandline
17
-
18
- The triton model repository can be generated via a python script specifying the same values as required in the commandline. Below is a sample of how the code would be structured with `visual_classifier`.
19
-
20
- ```python
21
- from clarifai.models.model_serving.model_config import get_model_config, ModelTypes, TritonModelConfig
22
- from clarifai.models.model_serving.pb_model_repository import TritonModelRepository
23
-
24
- model_type = ModelTypes.visual_classifier
25
- model_config: TritonModelConfig = get_model_config(model_type).make_triton_model_config(
26
- model_name="<model_name>",
27
- model_version="1",
28
- image_shape=<[H,W]>, # 0 < [H,W] <= 1024
29
- )
30
-
31
- triton_repo = TritonModelRepository(model_config)
32
- triton_repo.build_repository("<dir>")
33
- ```
@@ -1,11 +0,0 @@
1
- ## Inference Execution Environments
2
-
3
- Each model built for inference with triton requires certain dependencies & dependency versions be installed for successful inference execution.
4
- An execution environment is created for each model to be deployed on Clarifai and all necessary dependencies as listed in the `requirements.txt` file are installed there.
5
-
6
- ## Supported python and torch versions
7
-
8
- Currently, models must use python 3.8 (any 3.8.x). Supported torch versions are 1.13.1 and 2.0.1.
9
- If your model depends on torch, torch must be listed in your requirements.txt file (even if it is
10
- already a dependency of another package). An appropriate supported torch version will be selected
11
- based on your requirements.txt.
@@ -1,134 +0,0 @@
1
- ## Inference paramaters
2
-
3
- When making prediction, you may need to change some paramaters to adjust the result. Those paramaters will be passed through `paramaters()` of a request in triton python model.
4
-
5
- In order to send it to `**kwargs` of `get_predictions` in `inference.py`, you can define some parameters and they will be visible and adjustable on Clarifai model view.
6
-
7
- This document helps you to create your inference parameters that can be visibale and adjustable easily on Clarifai platform. The defined parameters will be sent as `json` file when you use `clarifai-upload-model` cli.
8
-
9
- ### JSON file structure:
10
- The file contains a list of object has 4 fields:
11
- * `path` (str): name of your parameter, it must be valid as python variable
12
- * `field_type` (int): the parameter data type is one of {1,2,21,3}, it means {boolean, string, encrypted_string, number} respectively. `Number` means `int` or `float`. "Encrypted_string is a string that can be used to store your secrets, like API key. The API will not return the values for this as plaintext.
13
- * `default_value`: a default value of the parameter.
14
- * `description` (str): short sentence describes what the parameter does
15
-
16
- An example of 4 parameters:
17
- ```json
18
- [
19
- {
20
- "path": "boolean_var",
21
- "field_type": 1,
22
- "default_value": true,
23
- "description": "a boolean variable"
24
- },
25
- {
26
- "path": "string_var",
27
- "field_type": 2,
28
- "default_value": "string_1",
29
- "description": "a string variable"
30
- },
31
- {
32
- "path": "number_var",
33
- "field_type": 3,
34
- "default_value": 9.9,
35
- "description": "a float number variable"
36
- },
37
- {
38
- "path": "secret_string_var",
39
- "field_type": 21,
40
- "default_value": "API_KEY",
41
- "description": "a string variable contains secret like API key"
42
- },
43
- ]
44
- ```
45
-
46
- ### Generate JSON file
47
- 1. Manually create the file based on above structure
48
- 2. By code:
49
-
50
- #### 2.1. Fully setup
51
- ```python
52
- from clarifai.models.model_serving.model_config.inference_parameter import InferParamManager, InferParam, InferParamType
53
-
54
- params = [
55
- InferParam(
56
- path="boolean_var",
57
- field_type=InferParamType.BOOL,
58
- default_value=True,
59
- description="a boolean varaiabe"
60
- ),
61
- InferParam(
62
- path="string_var",
63
- field_type=InferParamType.STRING,
64
- default_value="string_1",
65
- description="a string varaiabe"
66
- ),
67
- InferParam(
68
- path="number_var",
69
- field_type=InferParamType.NUMBER,
70
- default_value=9.9,
71
- description="a float number varaiabe"
72
- ),
73
- InferParam(
74
- path=secret_string_var",
75
- field_type=InferParamType.ENCRYPTED_STRING,
76
- default_value="API_KEY",
77
- description="a string variable contains secret like API key"
78
- ),
79
- ]
80
-
81
- ipm = InferParamManager(params=params)
82
- ipm.export("your_file.json")
83
- ```
84
-
85
- ##### 2.2. Shorten
86
- `NOTE`: in this way `description` field will be set as empty aka "".
87
- *You need to modify* `description` in order to be able to upload the settings to Clarifai.
88
-
89
- `NOTE`: in this way `ENCRYPTED_STRING` type must be defined with "_" prefix
90
-
91
- ```python
92
- params = dict(boolean_var=True, string_var="string_1", number_var=9.9, _secret_string_var="YOUR_KEY")
93
- ipm = InferParamManager.from_kwargs(**params)
94
- ipm.export("your_file.json")
95
-
96
- ```
97
-
98
- 3. In `test.py`. You can define your paramaters like `2.2. Shorten` in `inference_parameters` attribute of `CustomTestInferenceModel`, the file will be generated when you run the test. Keep in mind to change `description`
99
-
100
- ### Usage
101
- Your defined parameters will be passed through `kwargs` of `InferenceModel.get_predictions` method
102
- in `inference.py`
103
- ```python
104
- class InferenceModel:
105
- def __init__():
106
- # initialization
107
- self.model = YourModel()
108
-
109
- @some_wrapper_function
110
- def get_predictions(self, input_data, **kwargs):
111
- # `kwargs` contains your inference parameters
112
-
113
- # get a value from kwargs
114
- number_var = kwargs.get("number_var", 9.9)
115
-
116
- # pass everything to a function
117
- output = self.model.predict(input_data, **kwargs)
118
-
119
- return SomeOutputType(output)
120
-
121
- ```
122
-
123
- in `test.py`
124
- ```python
125
- class CustomTestInferenceModel:
126
- inference_parameters = "" # input a path of json file from `2.1` or a dict from `2.2`
127
-
128
- ...
129
-
130
- def test_something(self):
131
- input = ...
132
- output = self.triton_get_predictions(input, number_var=1, string_var="test", _secret="KEY")
133
- self.assert(...)
134
- ```
@@ -1,20 +0,0 @@
1
- ## Clarifai Model Types
2
-
3
- Models on the clarifai platform are deployed using the [Triton Inference Server Python Backend](https://github.com/triton-inference-server/python_backend) to allow for pre and post processing of data to and from the model.
4
-
5
- Inputs into the models are passed as numpy arrays and the predictions are similarly returned as numpy arrays.
6
- The predictions from user defined models in the [inference script](../README.md#the-inference-script) file have to match certain formats and shapes for the models to be upload compatible.
7
-
8
- Clarifai [model types](../models/model_types.py) are decorator functions that are responsible for passing input batch requests to user defined inference models to get predictions and format the resultant predictions into Triton Inference responses that are sent by the server for each client inference request.
9
-
10
- ## Supported Model Types Wrapper Functions:
11
-
12
- - visual_detector
13
- - visual_classifier
14
- - text_classifier
15
- - text_to_text
16
- - text_embedder
17
- - text_to_image
18
- - visual_embedder
19
- - visual_segmenter
20
- - multimodal_embedder
@@ -1,28 +0,0 @@
1
- ## Clarifai Model Prediction Output Formats.
2
-
3
- Different models return different types of predictions and Clarifai output dataclasses aim at standardizing the output formats per model type for compatibility with the Clarifai API.
4
-
5
- Each machine learning modality supported by the Clarifai API has a predefined dataclass output format with all attributes being of numpy ndarray type.
6
-
7
- ## Supported Formats
8
-
9
- Usage:
10
- ```python
11
- from clarifai.models.model_serving.models.output import VisualDetectorOutput
12
- ```
13
- | Output Type (dataclass) | Attributes | Attribute Data Type| Attribute Shapes | Description |
14
- | --- | --- | --- | --- | --- |
15
- | [VisualDetectorOutput](../models/output.py) | `predicted_bboxes` | float32 | [-1, 4] | A 2D detected bounding boxes array of any length with each element array having a length of exactly 4. All bbox coordinates MUST be normalized between 0 & 1. |
16
- | | `predicted_labels` | int32 | [-1, 1] | A 2D detected labels array of length equal to that of predicted_bboxes with each element array having a length of exactly 1.
17
- | | `predicted_scores` | float32 | [-1, 1] | A 2D detection scores array of length equal to that of predicted_bboxes & predicted_labels with each element array having a length of exactly 1.
18
- | | | | | |
19
- | [ClassifierOutput](../models/output.py) | `predicted_scores` | float32 | [-1] | The softmax of the model's predictions. The index of each predicted probability as returned by the model must correspond to the label index in the labels.txt file |
20
- | | | | | |
21
- | [TextOutput](../models/output.py) | `predicted_text` | string | [1] | Predicted text from a model |
22
- | | | | | |
23
- | [EmbeddingOutput](../models/output.py) | `embedding_vector` | float32 | [-1] | The embedding vector (image or text embedding) returned by a model |
24
- | | | | | |
25
- | [MasksOutput](../models/output.py) | `predicted_mask` | int64 | [-1, -1] | The model predicted image mask. The predicted class indices must be assigned to the corresponding image pixels in the mask where that class is predicted by the model. |
26
- | | | | | |
27
- | [ImageOutput](../models/output.py) | `image` | unint8 | [-1, -1, 3] | The model predicted/generated image |
28
- | | | | | |
@@ -1,7 +0,0 @@
1
- ## Clarifai Model Upload Examples
2
-
3
- A collection of pre-built triton models for different tasks.
4
- To run inference locally using any of the examples here, you need to have the [Triton Inference Server](https://github.com/triton-inference-server/server/blob/main/docs/customization_guide/build.md#building-with-docker) installed.
5
-
6
- Additionally some models may require other files such as checkpoints be downloaded before testing and/or deployment to Clarifai as they are ommitted here due to github file size limits.
7
- See the Readme files under each model to see if there any additional files required and where to place them.
@@ -1,9 +0,0 @@
1
- ## Image Classification Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy image classification models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [VIT Age Classifier](./age_vit/)
6
-
7
- Required files to run tests locally:
8
-
9
- * Download the [model checkpoint from huggingface](https://huggingface.co/nateraw/vit-age-classifier/tree/main) and store it under `age_vit/1/vit-age-classifier/`
@@ -1,11 +0,0 @@
1
- ---
2
- tags:
3
- - image-classification
4
- - pytorch
5
- datasets:
6
- - fairface
7
- ---
8
-
9
- ## ViT For Age Classification
10
-
11
- A vision transformer finetuned to classify the age of a given person's face.
@@ -1,42 +0,0 @@
1
- {
2
- "_name_or_path": "google/vit-base-patch16-224-in21k",
3
- "architectures": [
4
- "ViTForImageClassification"
5
- ],
6
- "attention_probs_dropout_prob": 0.0,
7
- "hidden_act": "gelu",
8
- "hidden_dropout_prob": 0.0,
9
- "hidden_size": 768,
10
- "id2label": {
11
- "0": "0-2",
12
- "1": "3-9",
13
- "2": "10-19",
14
- "3": "20-29",
15
- "4": "30-39",
16
- "5": "40-49",
17
- "6": "50-59",
18
- "7": "60-69",
19
- "8": "more than 70"
20
- },
21
- "image_size": 224,
22
- "initializer_range": 0.02,
23
- "intermediate_size": 3072,
24
- "label2id": {
25
- "0-2": 0,
26
- "3-9": 1,
27
- "10-19": 2,
28
- "20-29": 3,
29
- "30-39": 4,
30
- "40-49": 5,
31
- "50-59": 6,
32
- "60-69": 7,
33
- "more than 70": 8
34
- },
35
- "layer_norm_eps": 1e-12,
36
- "model_type": "vit",
37
- "num_attention_heads": 12,
38
- "num_channels": 3,
39
- "num_hidden_layers": 12,
40
- "patch_size": 16,
41
- "transformers_version": "4.5.0.dev0"
42
- }
@@ -1,15 +0,0 @@
1
- {
2
- "do_normalize": true,
3
- "do_resize": true,
4
- "image_mean": [
5
- 0.5,
6
- 0.5,
7
- 0.5
8
- ],
9
- "image_std": [
10
- 0.5,
11
- 0.5,
12
- 0.5
13
- ],
14
- "size": 224
15
- }
@@ -1,23 +0,0 @@
1
- name: "age_vit"
2
- max_batch_size: 1
3
- input {
4
- name: "image"
5
- data_type: TYPE_UINT8
6
- dims: -1
7
- dims: -1
8
- dims: 3
9
- }
10
- output {
11
- name: "softmax_predictions"
12
- data_type: TYPE_FP32
13
- dims: -1
14
- label_filename: "labels.txt"
15
- }
16
- instance_group {
17
- count: 1
18
- kind: KIND_GPU
19
- }
20
- dynamic_batching {
21
- max_queue_delay_microseconds: 500
22
- }
23
- backend: "python"
@@ -1,9 +0,0 @@
1
- 0-2
2
- 3-9
3
- 10-19
4
- 20-29
5
- 30-39
6
- 40-49
7
- 50-59
8
- 60-69
9
- more than 70
@@ -1,7 +0,0 @@
1
- clarifai>9.5.3 # for model upload features
2
- tritonclient[all]
3
- torch==1.13.1
4
- transformers==4.30.2
5
- scipy==1.10.1
6
- sentencepiece==0.1.99
7
- protobuf<4.21.3
@@ -1,9 +0,0 @@
1
- ## Text Classification Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy text classification models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [XLM-Roberta Tweet Sentiment Classifier](./xlm-roberta/)
6
-
7
- Required files to run tests locally:
8
-
9
- * Download the [model checkpoint & sentencepiece bpe model from huggingface](https://huggingface.co/cardiffnlp/twitter-xlm-roberta-base-sentiment/tree/main) and store it under `xlm-roberta/1/twitter-xlm-roberta-base-sentiment/`
@@ -1,12 +0,0 @@
1
- ---
2
- language: multilingual
3
- ---
4
-
5
-
6
- ## Twitter xlm-roberta-base for sentiment analysis
7
-
8
- This is a multilingual XLM-roBERTa-base model trained on ~198M tweets and finetuned for sentiment analysis. The sentiment fine-tuning was done on 8 languages (Ar, En, Fr, De, Hi, It, Sp, Pt) but it can be used for more languages (see paper for details).
9
-
10
- - Paper: [XLM-T: A Multilingual Language Model Toolkit for Twitter](https://arxiv.org/abs/2104.12250).
11
- - Official Github Repository: [XLM-T official repository](https://github.com/cardiffnlp/xlm-t).
12
- - Sentiment Classes: Negative, Neutral, Positive
@@ -1,34 +0,0 @@
1
- {
2
- "_name_or_path": "/home/jupyter/misc/tweeteval/TweetEval_models/xlm-twitter/local-twitter-xlm-roberta-base-sentiment/",
3
- "architectures": [
4
- "XLMRobertaForSequenceClassification"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
- "eos_token_id": 2,
9
- "gradient_checkpointing": false,
10
- "hidden_act": "gelu",
11
- "hidden_dropout_prob": 0.1,
12
- "hidden_size": 768,
13
- "id2label": {
14
- "0": "negative",
15
- "1": "neutral",
16
- "2": "positive"
17
- },
18
- "initializer_range": 0.02,
19
- "intermediate_size": 3072,
20
- "label2id": {
21
- "negative": 0,
22
- "neutral": 1,
23
- "positive": 2
24
- },
25
- "layer_norm_eps": 1e-05,
26
- "max_position_embeddings": 514,
27
- "model_type": "xlm-roberta",
28
- "num_attention_heads": 12,
29
- "num_hidden_layers": 12,
30
- "output_past": true,
31
- "pad_token_id": 1,
32
- "type_vocab_size": 1,
33
- "vocab_size": 250002
34
- }
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
@@ -1,21 +0,0 @@
1
- name: "xlm-roberta"
2
- max_batch_size: 1
3
- input {
4
- name: "text"
5
- data_type: TYPE_STRING
6
- dims: 1
7
- }
8
- output {
9
- name: "softmax_predictions"
10
- data_type: TYPE_FP32
11
- dims: -1
12
- label_filename: "labels.txt"
13
- }
14
- instance_group {
15
- count: 1
16
- kind: KIND_GPU
17
- }
18
- dynamic_batching {
19
- max_queue_delay_microseconds: 500
20
- }
21
- backend: "python"
@@ -1,3 +0,0 @@
1
- Negative
2
- Neutral
3
- Positive