clarifai 9.10.1__py3-none-any.whl → 9.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (323) hide show
  1. clarifai/client/__init__.py +3 -2
  2. clarifai/client/app.py +39 -23
  3. clarifai/client/base.py +6 -6
  4. clarifai/client/dataset.py +113 -55
  5. clarifai/client/input.py +47 -55
  6. clarifai/client/model.py +27 -25
  7. clarifai/client/module.py +13 -11
  8. clarifai/client/runner.py +5 -3
  9. clarifai/client/search.py +29 -10
  10. clarifai/client/user.py +14 -8
  11. clarifai/client/workflow.py +22 -20
  12. clarifai/constants/dataset.py +22 -0
  13. clarifai/datasets/upload/base.py +9 -7
  14. clarifai/datasets/upload/features.py +3 -3
  15. clarifai/datasets/upload/image.py +49 -50
  16. clarifai/datasets/upload/loaders/coco_captions.py +26 -80
  17. clarifai/datasets/upload/loaders/coco_detection.py +56 -115
  18. clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
  19. clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
  20. clarifai/datasets/upload/loaders/xview_detection.py +3 -3
  21. clarifai/datasets/upload/text.py +16 -16
  22. clarifai/datasets/upload/utils.py +196 -21
  23. clarifai/utils/misc.py +21 -0
  24. clarifai/versions.py +1 -1
  25. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
  26. clarifai-9.10.3.dist-info/RECORD +96 -0
  27. clarifai-9.10.3.dist-info/top_level.txt +1 -0
  28. clarifai/auth/__init__.py +0 -6
  29. clarifai/auth/helper.py +0 -367
  30. clarifai/auth/register.py +0 -23
  31. clarifai/auth/stub.py +0 -127
  32. clarifai/datasets/upload/examples/README.md +0 -31
  33. clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
  34. clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  35. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  36. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  37. clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  38. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  39. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  40. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  41. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  42. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  43. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  44. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  45. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  46. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  47. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  48. clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  49. clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  50. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  51. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  52. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  53. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  54. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  55. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  56. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  57. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  58. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  59. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  60. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  61. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  62. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  63. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  64. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  65. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  66. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  67. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  68. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  69. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  70. clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
  71. clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  72. clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  73. clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  74. clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  75. clarifai/datasets/upload/loaders/README.md +0 -49
  76. clarifai/models/model_serving/README.md +0 -155
  77. clarifai/models/model_serving/docs/custom_config.md +0 -33
  78. clarifai/models/model_serving/docs/dependencies.md +0 -11
  79. clarifai/models/model_serving/docs/inference_parameters.md +0 -134
  80. clarifai/models/model_serving/docs/model_types.md +0 -20
  81. clarifai/models/model_serving/docs/output.md +0 -28
  82. clarifai/models/model_serving/examples/README.md +0 -7
  83. clarifai/models/model_serving/examples/image_classification/README.md +0 -9
  84. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  85. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  86. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  87. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  88. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  89. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  90. clarifai/models/model_serving/examples/text_classification/README.md +0 -9
  91. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  92. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  93. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  94. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  95. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  96. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  97. clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
  98. clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
  99. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  100. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  101. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  102. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  104. clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
  105. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  106. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  107. clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
  108. clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  109. clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  110. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  111. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
  112. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  113. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  114. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
  115. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  116. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  117. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  118. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  119. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  120. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  121. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  122. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  123. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  124. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  125. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  126. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  127. clarifai/modules/README.md +0 -5
  128. clarifai/modules/style.css +0 -217
  129. clarifai-9.10.1.dist-info/RECORD +0 -386
  130. clarifai-9.10.1.dist-info/top_level.txt +0 -2
  131. clarifai_utils/__init__.py +0 -0
  132. clarifai_utils/auth/__init__.py +0 -6
  133. clarifai_utils/auth/helper.py +0 -367
  134. clarifai_utils/auth/register.py +0 -23
  135. clarifai_utils/auth/stub.py +0 -127
  136. clarifai_utils/cli.py +0 -0
  137. clarifai_utils/client/__init__.py +0 -16
  138. clarifai_utils/client/app.py +0 -684
  139. clarifai_utils/client/auth/__init__.py +0 -4
  140. clarifai_utils/client/auth/helper.py +0 -367
  141. clarifai_utils/client/auth/register.py +0 -23
  142. clarifai_utils/client/auth/stub.py +0 -127
  143. clarifai_utils/client/base.py +0 -131
  144. clarifai_utils/client/dataset.py +0 -442
  145. clarifai_utils/client/input.py +0 -892
  146. clarifai_utils/client/lister.py +0 -54
  147. clarifai_utils/client/model.py +0 -575
  148. clarifai_utils/client/module.py +0 -94
  149. clarifai_utils/client/runner.py +0 -161
  150. clarifai_utils/client/search.py +0 -239
  151. clarifai_utils/client/user.py +0 -253
  152. clarifai_utils/client/workflow.py +0 -223
  153. clarifai_utils/constants/model.py +0 -4
  154. clarifai_utils/constants/search.py +0 -2
  155. clarifai_utils/datasets/__init__.py +0 -0
  156. clarifai_utils/datasets/export/__init__.py +0 -0
  157. clarifai_utils/datasets/export/inputs_annotations.py +0 -222
  158. clarifai_utils/datasets/upload/__init__.py +0 -0
  159. clarifai_utils/datasets/upload/base.py +0 -66
  160. clarifai_utils/datasets/upload/examples/README.md +0 -31
  161. clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
  162. clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  163. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  164. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  165. clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  166. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  167. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  168. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  169. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  170. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  171. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  172. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  173. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  174. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  175. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  176. clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  177. clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  178. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  179. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  180. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  181. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  182. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  183. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  184. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  185. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  186. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  187. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  188. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  189. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  190. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  191. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  192. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  193. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  194. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  195. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  196. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  197. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  198. clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
  199. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  200. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  201. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  202. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  203. clarifai_utils/datasets/upload/features.py +0 -44
  204. clarifai_utils/datasets/upload/image.py +0 -165
  205. clarifai_utils/datasets/upload/loaders/README.md +0 -49
  206. clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
  207. clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
  208. clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
  209. clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
  210. clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
  211. clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
  212. clarifai_utils/datasets/upload/text.py +0 -53
  213. clarifai_utils/datasets/upload/utils.py +0 -63
  214. clarifai_utils/errors.py +0 -89
  215. clarifai_utils/models/__init__.py +0 -0
  216. clarifai_utils/models/api.py +0 -283
  217. clarifai_utils/models/model_serving/README.md +0 -155
  218. clarifai_utils/models/model_serving/__init__.py +0 -12
  219. clarifai_utils/models/model_serving/cli/__init__.py +0 -12
  220. clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
  221. clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
  222. clarifai_utils/models/model_serving/cli/repository.py +0 -87
  223. clarifai_utils/models/model_serving/constants.py +0 -1
  224. clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
  225. clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
  226. clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
  227. clarifai_utils/models/model_serving/docs/model_types.md +0 -20
  228. clarifai_utils/models/model_serving/docs/output.md +0 -28
  229. clarifai_utils/models/model_serving/examples/README.md +0 -7
  230. clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
  231. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  232. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
  233. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
  234. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  235. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  236. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  237. clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  238. clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  239. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  240. clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
  241. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  242. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
  243. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
  244. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  245. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  246. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  247. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  248. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  249. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  250. clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
  251. clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
  252. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  253. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  254. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  255. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  256. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  257. clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
  258. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  259. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
  260. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
  261. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  262. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  263. clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
  264. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
  265. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
  266. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  267. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  268. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  269. clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
  270. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  271. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
  272. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
  273. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  274. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  275. clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
  276. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  277. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
  278. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
  279. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  280. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  281. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  282. clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
  283. clarifai_utils/models/model_serving/model_config/config.py +0 -302
  284. clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
  285. clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  286. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  287. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  288. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  289. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  290. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  291. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  292. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  293. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  294. clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
  295. clarifai_utils/models/model_serving/models/__init__.py +0 -12
  296. clarifai_utils/models/model_serving/models/default_test.py +0 -275
  297. clarifai_utils/models/model_serving/models/inference.py +0 -42
  298. clarifai_utils/models/model_serving/models/model_types.py +0 -265
  299. clarifai_utils/models/model_serving/models/output.py +0 -124
  300. clarifai_utils/models/model_serving/models/pb_model.py +0 -74
  301. clarifai_utils/models/model_serving/models/test.py +0 -64
  302. clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
  303. clarifai_utils/modules/README.md +0 -5
  304. clarifai_utils/modules/__init__.py +0 -0
  305. clarifai_utils/modules/css.py +0 -60
  306. clarifai_utils/modules/pages.py +0 -42
  307. clarifai_utils/modules/style.css +0 -217
  308. clarifai_utils/runners/__init__.py +0 -0
  309. clarifai_utils/runners/example.py +0 -33
  310. clarifai_utils/schema/search.py +0 -69
  311. clarifai_utils/urls/helper.py +0 -103
  312. clarifai_utils/utils/__init__.py +0 -0
  313. clarifai_utils/utils/logging.py +0 -90
  314. clarifai_utils/utils/misc.py +0 -33
  315. clarifai_utils/utils/model_train.py +0 -157
  316. clarifai_utils/versions.py +0 -6
  317. clarifai_utils/workflows/__init__.py +0 -0
  318. clarifai_utils/workflows/export.py +0 -68
  319. clarifai_utils/workflows/utils.py +0 -59
  320. clarifai_utils/workflows/validate.py +0 -67
  321. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
  322. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
  323. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
@@ -1,7 +0,0 @@
1
- clarifai>9.5.3 # for model upload features
2
- tritonclient[all]
3
- torch==1.13.1
4
- transformers==4.30.2
5
- scipy==1.10.1
6
- sentencepiece==0.1.99
7
- protobuf<4.21.3
@@ -1,9 +0,0 @@
1
- ## Text Embedding Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy text embedding models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [Instructor-xl](https://huggingface.co/hkunlp/instructor-xl)
6
-
7
- Requirements to run tests locally:
8
-
9
- * Download/Clone the [huggingface model](https://huggingface.co/hkunlp/instructor-xl) into the **instructor-xl/1/** directory then start the triton server.
@@ -1,9 +0,0 @@
1
- ## Text to Image Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy text to image models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [sd-v1.5 (Stable-Diffusion-v1.5)](./sd-v1.5/)
6
-
7
- Requirements to run tests locally:
8
-
9
- * Download/Clone the [huggingface model](https://huggingface.co/runwayml/stable-diffusion-v1-5) into the **sd-v1.5/1/** directory then start the triton server.
@@ -1,52 +0,0 @@
1
- # This file contains boilerplate code to allow users write their model
2
- # inference code that will then interact with the Triton Inference Server
3
- # Python backend to serve end user requests.
4
- # The module name, module path, class name & get_predictions() method names MUST be maintained as is
5
- # but other methods may be added within the class as deemed fit provided
6
- # they are invoked within the main get_predictions() inference method
7
- # if they play a role in any step of model inference
8
- """User model inference script."""
9
-
10
- import os
11
- from pathlib import Path
12
-
13
- import numpy as np
14
- import torch
15
- from diffusers import StableDiffusionPipeline
16
-
17
- from clarifai.models.model_serving.models.model_types import text_to_image
18
- from clarifai.models.model_serving.models.output import ImageOutput
19
-
20
-
21
- class InferenceModel:
22
- """User model inference class."""
23
-
24
- def __init__(self) -> None:
25
- """
26
- Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
27
- in this method so they are loaded only once for faster inference.
28
- """
29
- self.base_path: Path = os.path.dirname(__file__)
30
- self.huggingface_model_path = os.path.join(self.base_path, "stable-diffusion-v1-5")
31
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
32
- self.pipeline = StableDiffusionPipeline.from_pretrained(
33
- self.huggingface_model_path, torch_dtype=torch.float16)
34
- self.pipeline = self.pipeline.to(self.device)
35
-
36
- @text_to_image
37
- def get_predictions(self, input_data):
38
- """
39
- Main model inference method.
40
-
41
- Args:
42
- -----
43
- input_data: A single input data item to predict on.
44
- Input data can be an image or text, etc depending on the model type.
45
-
46
- Returns:
47
- --------
48
- One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
49
- """
50
- out_image = self.pipeline(input_data).images[0]
51
- out_image = np.asarray(out_image)
52
- return ImageOutput(image=out_image)
@@ -1,60 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """Triton inference server Python Backend Model."""
14
-
15
- import os
16
- import sys
17
-
18
- try:
19
- import triton_python_backend_utils as pb_utils
20
- except ModuleNotFoundError:
21
- pass
22
- from google.protobuf import text_format
23
- from tritonclient.grpc.model_config_pb2 import ModelConfig
24
-
25
-
26
- class TritonPythonModel:
27
- """
28
- Triton Python BE Model.
29
- """
30
-
31
- def initialize(self, args):
32
- """
33
- Triton server init.
34
- """
35
- args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
36
- sys.path.append(os.path.dirname(__file__))
37
- from inference import InferenceModel
38
-
39
- self.inference_obj = InferenceModel()
40
-
41
- # Read input_name from config file
42
- self.config_msg = ModelConfig()
43
- with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
44
- cfg = f.read()
45
- text_format.Merge(cfg, self.config_msg)
46
- self.input_name = [inp.name for inp in self.config_msg.input][0]
47
-
48
- def execute(self, requests):
49
- """
50
- Serve model inference requests.
51
- """
52
- responses = []
53
-
54
- for request in requests:
55
- in_batch = pb_utils.get_input_tensor_by_name(request, self.input_name)
56
- in_batch = in_batch.as_numpy()
57
- inference_response = self.inference_obj.get_predictions(in_batch)
58
- responses.append(inference_response)
59
-
60
- return responses
@@ -1,22 +0,0 @@
1
- name: "sd-v1.5"
2
- max_batch_size: 1
3
- input {
4
- name: "text"
5
- data_type: TYPE_STRING
6
- dims: 1
7
- }
8
- output {
9
- name: "image"
10
- data_type: TYPE_UINT8
11
- dims: -1
12
- dims: -1
13
- dims: 3
14
- }
15
- instance_group {
16
- count: 1
17
- kind: KIND_GPU
18
- }
19
- dynamic_batching {
20
- max_queue_delay_microseconds: 500
21
- }
22
- backend: "python"
@@ -1,6 +0,0 @@
1
- clarifai>9.5.3
2
- tritonclient[all]
3
- torch==1.13.1
4
- transformers==4.30.2
5
- Pillow==10.0.0
6
- diffusers==0.19.0
@@ -1,10 +0,0 @@
1
- ## Text-to-Text Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy all models that take a text input and yield a text output prediction e.g. text generation, summarization and translation models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [Bart-paper2slides-summarizer](https://huggingface.co/com3dian/Bart-large-paper2slides-summarizer)
6
-
7
- Requirements to run tests locally:
8
-
9
- * Download/Clone the [huggingface model](https://huggingface.co/com3dian/Bart-large-paper2slides-summarizer) and store it under the **bart-summarize/1/** directory.
10
- * Rename the downloaded folder to **bart-large-summarizer** OR change the **self.huggingface_model_path** attribute in the [inference.py script](./bart-summarize/1/inference.py) to match the folder name
@@ -1,20 +0,0 @@
1
- name: "bart-summarize"
2
- max_batch_size: 1
3
- input {
4
- name: "text"
5
- data_type: TYPE_STRING
6
- dims: 1
7
- }
8
- output {
9
- name: "text"
10
- data_type: TYPE_STRING
11
- dims: 1
12
- }
13
- instance_group {
14
- count: 1
15
- kind: KIND_GPU
16
- }
17
- dynamic_batching {
18
- max_queue_delay_microseconds: 500
19
- }
20
- backend: "python"
@@ -1,4 +0,0 @@
1
- clarifai>9.5.3
2
- tritonclient[all]
3
- torch==1.13.1
4
- transformers==4.30.2
@@ -1,11 +0,0 @@
1
- ## Visual Detection Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy visual detection models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [Yolov5x](./yolov5x/)
6
-
7
- Required files (not included here due to upload size limits):
8
-
9
- * Download the yolov5x folder from above.
10
- * Download the `Yolov5 repo` and the `yolov5-x checkpoint` and store them under the `1/` directory of the yolov5x folder.
11
- * zip and test deploy to your Clarifai app
@@ -1,36 +0,0 @@
1
- name: "yolov5_test"
2
- max_batch_size: 1
3
- input {
4
- name: "image"
5
- data_type: TYPE_UINT8
6
- dims: -1
7
- dims: -1
8
- dims: 3
9
- }
10
- output {
11
- name: "predicted_bboxes"
12
- data_type: TYPE_FP32
13
- dims: -1
14
- dims: 4
15
- }
16
- output {
17
- name: "predicted_labels"
18
- data_type: TYPE_INT32
19
- dims: -1
20
- dims: 1
21
- label_filename: "labels.txt"
22
- }
23
- output {
24
- name: "predicted_scores"
25
- data_type: TYPE_FP32
26
- dims: -1
27
- dims: 1
28
- }
29
- instance_group {
30
- count: 1
31
- kind: KIND_GPU
32
- }
33
- dynamic_batching {
34
- max_queue_delay_microseconds: 500
35
- }
36
- backend: "python"
@@ -1,80 +0,0 @@
1
- person
2
- bicycle
3
- car
4
- motorcycle
5
- airplane
6
- bus
7
- train
8
- truck
9
- boat
10
- traffic-light
11
- fire-hydrant
12
- stop-sign
13
- parking-meter
14
- bench
15
- bird
16
- cat
17
- dog
18
- horse
19
- sheep
20
- cow
21
- elephant
22
- bear
23
- zebra
24
- giraffe
25
- backpack
26
- umbrella
27
- handbag
28
- tie
29
- suitcase
30
- frisbee
31
- skis
32
- snowboard
33
- sports-ball
34
- kite
35
- baseball-bat
36
- baseball-glove
37
- skateboard
38
- surfboard
39
- tennis-racket
40
- bottle
41
- wine-glass
42
- cup
43
- fork
44
- knife
45
- spoon
46
- bowl
47
- banana
48
- apple
49
- sandwich
50
- orange
51
- broccoli
52
- carrot
53
- hot-dog
54
- pizza
55
- donut
56
- cake
57
- chair
58
- couch
59
- potted-plant
60
- bed
61
- dining-table
62
- toilet
63
- tv
64
- laptop
65
- mouse
66
- remote
67
- keyboard
68
- cell-phone
69
- microwave
70
- oven
71
- toaster
72
- sink
73
- refrigerator
74
- book
75
- clock
76
- vase
77
- scissors
78
- teddy-bear
79
- hair-drier
80
- toothbrush
@@ -1,12 +0,0 @@
1
- # YOLOv5 requirements
2
- tritonclient[all]
3
- clarifai>9.5.3 # for model upload features
4
- matplotlib>=3.2.2
5
- opencv-python>=4.1.1
6
- Pillow>=7.1.2
7
- PyYAML>=5.3.1
8
- torch>=1.7.0,<2.0
9
- torchvision>=0.8.1
10
- protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012
11
- pandas>=1.1.4
12
- seaborn>=0.11.0
@@ -1,9 +0,0 @@
1
- ## Visual Embedding Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy visual embedding models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [vit-base](./vit-base/)
6
-
7
- Requirements to run tests locally:
8
-
9
- * Download/Clone the [huggingface model](https://huggingface.co/google/vit-base-patch16-224) into the **vit-base/1/** directory then start the triton server.
@@ -1,22 +0,0 @@
1
- name: "vit-base"
2
- max_batch_size: 1
3
- input {
4
- name: "image"
5
- data_type: TYPE_UINT8
6
- dims: -1
7
- dims: -1
8
- dims: 3
9
- }
10
- output {
11
- name: "embeddings"
12
- data_type: TYPE_FP32
13
- dims: -1
14
- }
15
- instance_group {
16
- count: 1
17
- kind: KIND_GPU
18
- }
19
- dynamic_batching {
20
- max_queue_delay_microseconds: 500
21
- }
22
- backend: "python"
@@ -1,5 +0,0 @@
1
- clarifai>9.5.3
2
- tritonclient[all]
3
- torch==1.13.1
4
- transformers==4.30.2
5
- Pillow==10.0.0
@@ -1,9 +0,0 @@
1
- ## Visual Segmentation Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy visual segmentation models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [segformer-b2](./segformer-b2/)
6
-
7
- Requirements to run tests locally:
8
-
9
- * Download/Clone the [huggingface model](https://huggingface.co/mattmdjaga/segformer_b2_clothes) into the **segformer-b2/1/** directory then start the triton server.
@@ -1,24 +0,0 @@
1
- name: "segformer-b2"
2
- max_batch_size: 1
3
- input {
4
- name: "image"
5
- data_type: TYPE_UINT8
6
- dims: -1
7
- dims: -1
8
- dims: 3
9
- }
10
- output {
11
- name: "predicted_mask"
12
- data_type: TYPE_INT64
13
- dims: -1
14
- dims: -1
15
- label_filename: "labels.txt"
16
- }
17
- instance_group {
18
- count: 1
19
- kind: KIND_GPU
20
- }
21
- dynamic_batching {
22
- max_queue_delay_microseconds: 500
23
- }
24
- backend: "python"
@@ -1,18 +0,0 @@
1
- background
2
- hat
3
- hair
4
- sunglass
5
- upper-clothes
6
- skirt
7
- pants
8
- dress
9
- belt
10
- left-shoe
11
- right-shoe
12
- face
13
- left-leg
14
- right-leg
15
- left-arm
16
- right-arm
17
- bag
18
- scarf
@@ -1,5 +0,0 @@
1
- torch==1.13.1
2
- clarifai>9.5.3
3
- tritonclient[all]
4
- transformers==4.30.2
5
- Pillow==10.0.0
@@ -1,24 +0,0 @@
1
- triton:
2
- input:
3
- - name: image
4
- data_type: TYPE_UINT8
5
- dims: [-1, -1, 3]
6
- optional: true
7
- - name: text
8
- data_type: TYPE_STRING
9
- dims: [1]
10
- optional: true
11
- output:
12
- - name: embeddings
13
- data_type: TYPE_FP32
14
- dims: [-1]
15
- labels: false
16
- inference:
17
- wrap_func: multimodal_embedder
18
- return_type: EmbeddingOutput
19
- field_maps:
20
- input_fields_map:
21
- image: image
22
- text: text
23
- output_fields_map:
24
- embeddings: embeddings
@@ -1,18 +0,0 @@
1
- triton:
2
- input:
3
- - name: text
4
- data_type: TYPE_STRING
5
- dims: [1]
6
- output:
7
- - name: softmax_predictions
8
- data_type: TYPE_FP32
9
- dims: [-1]
10
- labels: true
11
- inference:
12
- wrap_func: text_classifier
13
- return_type: ClassifierOutput
14
- field_maps:
15
- input_fields_map:
16
- text: text
17
- output_fields_map:
18
- concepts: softmax_predictions
@@ -1,18 +0,0 @@
1
- triton:
2
- input:
3
- - name: text
4
- data_type: TYPE_STRING
5
- dims: [1]
6
- output:
7
- - name: embeddings
8
- data_type: TYPE_FP32
9
- dims: [-1]
10
- labels: false
11
- inference:
12
- wrap_func: text_embedder
13
- return_type: EmbeddingOutput
14
- field_maps:
15
- input_fields_map:
16
- text: text
17
- output_fields_map:
18
- embeddings: embeddings
@@ -1,18 +0,0 @@
1
- triton:
2
- input:
3
- - name: text
4
- data_type: TYPE_STRING
5
- dims: [1]
6
- output:
7
- - name: image
8
- data_type: TYPE_UINT8
9
- dims: [-1, -1, 3]
10
- labels: false
11
- inference:
12
- wrap_func: text_to_image
13
- return_type: ImageOutput
14
- field_maps:
15
- input_fields_map:
16
- text: text
17
- output_fields_map:
18
- image: image
@@ -1,18 +0,0 @@
1
- triton:
2
- input:
3
- - name: text
4
- data_type: TYPE_STRING
5
- dims: [1]
6
- output:
7
- - name: text
8
- data_type: TYPE_STRING
9
- dims: [1]
10
- labels: false
11
- inference:
12
- wrap_func: text_to_text
13
- return_type: TextOutput
14
- field_maps:
15
- input_fields_map:
16
- text: text
17
- output_fields_map:
18
- text: text
@@ -1,18 +0,0 @@
1
- triton:
2
- input:
3
- - name: image
4
- data_type: TYPE_UINT8
5
- dims: [-1, -1, 3]
6
- output:
7
- - name: softmax_predictions
8
- data_type: TYPE_FP32
9
- dims: [-1]
10
- labels: true
11
- inference:
12
- wrap_func: visual_classifier
13
- return_type: ClassifierOutput
14
- field_maps:
15
- input_fields_map:
16
- image: image
17
- output_fields_map:
18
- concepts: softmax_predictions
@@ -1,28 +0,0 @@
1
- triton:
2
- input:
3
- - name: image
4
- data_type: TYPE_UINT8
5
- dims: [-1, -1, 3]
6
- output:
7
- - name: predicted_bboxes
8
- data_type: TYPE_FP32
9
- dims: [-1, 4]
10
- labels: false
11
- - name: predicted_labels
12
- data_type: TYPE_INT32
13
- dims: [-1, 1]
14
- labels: true
15
- - name: predicted_scores
16
- data_type: TYPE_FP32
17
- dims: [-1, 1]
18
- labels: false
19
- inference:
20
- wrap_func: visual_detector
21
- return_type: VisualDetectorOutput
22
- field_maps:
23
- input_fields_map:
24
- image: image
25
- output_fields_map:
26
- "regions[...].region_info.bounding_box": "predicted_bboxes"
27
- "regions[...].data.concepts[...].id": "predicted_labels"
28
- "regions[...].data.concepts[...].value": "predicted_scores"
@@ -1,18 +0,0 @@
1
- triton:
2
- input:
3
- - name: image
4
- data_type: TYPE_UINT8
5
- dims: [-1, -1, 3]
6
- output:
7
- - name: embeddings
8
- data_type: TYPE_FP32
9
- dims: [-1]
10
- labels: false
11
- inference:
12
- wrap_func: visual_embedder
13
- return_type: EmbeddingOutput
14
- field_maps:
15
- input_fields_map:
16
- image: image
17
- output_fields_map:
18
- embeddings: embeddings
@@ -1,18 +0,0 @@
1
- triton:
2
- input:
3
- - name: image
4
- data_type: TYPE_UINT8
5
- dims: [-1, -1, 3]
6
- output:
7
- - name: predicted_mask
8
- data_type: TYPE_INT64
9
- dims: [-1, -1]
10
- labels: true
11
- inference:
12
- wrap_func: visual_segmenter
13
- return_type: MasksOutput
14
- field_maps:
15
- input_fields_map:
16
- image: image
17
- output_fields_map:
18
- "regions[...].region_info.mask,regions[...].data.concepts": "predicted_mask"
@@ -1,5 +0,0 @@
1
- # Module Utils
2
-
3
- Additional helper functions for creating Clarifai Modules should be placed here so that they can be reused across modules.
4
-
5
- This should still not import streamlit as we want to keep clarifai-python-utils lightweight. If you find we need utilities for streamlit itself we should start a new repo for that. Please contact support@clarifai.com to do so.