clarifai 9.10.1__py3-none-any.whl → 9.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (323) hide show
  1. clarifai/client/__init__.py +3 -2
  2. clarifai/client/app.py +39 -23
  3. clarifai/client/base.py +6 -6
  4. clarifai/client/dataset.py +113 -55
  5. clarifai/client/input.py +47 -55
  6. clarifai/client/model.py +27 -25
  7. clarifai/client/module.py +13 -11
  8. clarifai/client/runner.py +5 -3
  9. clarifai/client/search.py +29 -10
  10. clarifai/client/user.py +14 -8
  11. clarifai/client/workflow.py +22 -20
  12. clarifai/constants/dataset.py +22 -0
  13. clarifai/datasets/upload/base.py +9 -7
  14. clarifai/datasets/upload/features.py +3 -3
  15. clarifai/datasets/upload/image.py +49 -50
  16. clarifai/datasets/upload/loaders/coco_captions.py +26 -80
  17. clarifai/datasets/upload/loaders/coco_detection.py +56 -115
  18. clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
  19. clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
  20. clarifai/datasets/upload/loaders/xview_detection.py +3 -3
  21. clarifai/datasets/upload/text.py +16 -16
  22. clarifai/datasets/upload/utils.py +196 -21
  23. clarifai/utils/misc.py +21 -0
  24. clarifai/versions.py +1 -1
  25. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
  26. clarifai-9.10.3.dist-info/RECORD +96 -0
  27. clarifai-9.10.3.dist-info/top_level.txt +1 -0
  28. clarifai/auth/__init__.py +0 -6
  29. clarifai/auth/helper.py +0 -367
  30. clarifai/auth/register.py +0 -23
  31. clarifai/auth/stub.py +0 -127
  32. clarifai/datasets/upload/examples/README.md +0 -31
  33. clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
  34. clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  35. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  36. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  37. clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  38. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  39. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  40. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  41. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  42. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  43. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  44. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  45. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  46. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  47. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  48. clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  49. clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  50. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  51. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  52. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  53. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  54. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  55. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  56. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  57. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  58. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  59. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  60. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  61. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  62. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  63. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  64. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  65. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  66. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  67. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  68. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  69. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  70. clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
  71. clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  72. clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  73. clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  74. clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  75. clarifai/datasets/upload/loaders/README.md +0 -49
  76. clarifai/models/model_serving/README.md +0 -155
  77. clarifai/models/model_serving/docs/custom_config.md +0 -33
  78. clarifai/models/model_serving/docs/dependencies.md +0 -11
  79. clarifai/models/model_serving/docs/inference_parameters.md +0 -134
  80. clarifai/models/model_serving/docs/model_types.md +0 -20
  81. clarifai/models/model_serving/docs/output.md +0 -28
  82. clarifai/models/model_serving/examples/README.md +0 -7
  83. clarifai/models/model_serving/examples/image_classification/README.md +0 -9
  84. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  85. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  86. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  87. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  88. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  89. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  90. clarifai/models/model_serving/examples/text_classification/README.md +0 -9
  91. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  92. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  93. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  94. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  95. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  96. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  97. clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
  98. clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
  99. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  100. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  101. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  102. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  104. clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
  105. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  106. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  107. clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
  108. clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  109. clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  110. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  111. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
  112. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  113. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  114. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
  115. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  116. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  117. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  118. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  119. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  120. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  121. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  122. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  123. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  124. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  125. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  126. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  127. clarifai/modules/README.md +0 -5
  128. clarifai/modules/style.css +0 -217
  129. clarifai-9.10.1.dist-info/RECORD +0 -386
  130. clarifai-9.10.1.dist-info/top_level.txt +0 -2
  131. clarifai_utils/__init__.py +0 -0
  132. clarifai_utils/auth/__init__.py +0 -6
  133. clarifai_utils/auth/helper.py +0 -367
  134. clarifai_utils/auth/register.py +0 -23
  135. clarifai_utils/auth/stub.py +0 -127
  136. clarifai_utils/cli.py +0 -0
  137. clarifai_utils/client/__init__.py +0 -16
  138. clarifai_utils/client/app.py +0 -684
  139. clarifai_utils/client/auth/__init__.py +0 -4
  140. clarifai_utils/client/auth/helper.py +0 -367
  141. clarifai_utils/client/auth/register.py +0 -23
  142. clarifai_utils/client/auth/stub.py +0 -127
  143. clarifai_utils/client/base.py +0 -131
  144. clarifai_utils/client/dataset.py +0 -442
  145. clarifai_utils/client/input.py +0 -892
  146. clarifai_utils/client/lister.py +0 -54
  147. clarifai_utils/client/model.py +0 -575
  148. clarifai_utils/client/module.py +0 -94
  149. clarifai_utils/client/runner.py +0 -161
  150. clarifai_utils/client/search.py +0 -239
  151. clarifai_utils/client/user.py +0 -253
  152. clarifai_utils/client/workflow.py +0 -223
  153. clarifai_utils/constants/model.py +0 -4
  154. clarifai_utils/constants/search.py +0 -2
  155. clarifai_utils/datasets/__init__.py +0 -0
  156. clarifai_utils/datasets/export/__init__.py +0 -0
  157. clarifai_utils/datasets/export/inputs_annotations.py +0 -222
  158. clarifai_utils/datasets/upload/__init__.py +0 -0
  159. clarifai_utils/datasets/upload/base.py +0 -66
  160. clarifai_utils/datasets/upload/examples/README.md +0 -31
  161. clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
  162. clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  163. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  164. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  165. clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  166. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  167. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  168. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  169. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  170. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  171. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  172. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  173. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  174. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  175. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  176. clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  177. clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  178. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  179. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  180. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  181. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  182. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  183. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  184. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  185. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  186. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  187. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  188. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  189. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  190. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  191. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  192. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  193. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  194. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  195. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  196. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  197. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  198. clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
  199. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  200. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  201. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  202. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  203. clarifai_utils/datasets/upload/features.py +0 -44
  204. clarifai_utils/datasets/upload/image.py +0 -165
  205. clarifai_utils/datasets/upload/loaders/README.md +0 -49
  206. clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
  207. clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
  208. clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
  209. clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
  210. clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
  211. clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
  212. clarifai_utils/datasets/upload/text.py +0 -53
  213. clarifai_utils/datasets/upload/utils.py +0 -63
  214. clarifai_utils/errors.py +0 -89
  215. clarifai_utils/models/__init__.py +0 -0
  216. clarifai_utils/models/api.py +0 -283
  217. clarifai_utils/models/model_serving/README.md +0 -155
  218. clarifai_utils/models/model_serving/__init__.py +0 -12
  219. clarifai_utils/models/model_serving/cli/__init__.py +0 -12
  220. clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
  221. clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
  222. clarifai_utils/models/model_serving/cli/repository.py +0 -87
  223. clarifai_utils/models/model_serving/constants.py +0 -1
  224. clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
  225. clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
  226. clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
  227. clarifai_utils/models/model_serving/docs/model_types.md +0 -20
  228. clarifai_utils/models/model_serving/docs/output.md +0 -28
  229. clarifai_utils/models/model_serving/examples/README.md +0 -7
  230. clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
  231. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  232. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
  233. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
  234. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  235. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  236. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  237. clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  238. clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  239. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  240. clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
  241. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  242. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
  243. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
  244. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  245. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  246. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  247. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  248. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  249. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  250. clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
  251. clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
  252. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  253. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  254. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  255. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  256. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  257. clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
  258. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  259. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
  260. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
  261. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  262. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  263. clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
  264. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
  265. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
  266. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  267. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  268. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  269. clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
  270. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  271. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
  272. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
  273. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  274. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  275. clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
  276. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  277. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
  278. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
  279. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  280. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  281. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  282. clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
  283. clarifai_utils/models/model_serving/model_config/config.py +0 -302
  284. clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
  285. clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  286. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  287. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  288. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  289. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  290. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  291. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  292. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  293. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  294. clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
  295. clarifai_utils/models/model_serving/models/__init__.py +0 -12
  296. clarifai_utils/models/model_serving/models/default_test.py +0 -275
  297. clarifai_utils/models/model_serving/models/inference.py +0 -42
  298. clarifai_utils/models/model_serving/models/model_types.py +0 -265
  299. clarifai_utils/models/model_serving/models/output.py +0 -124
  300. clarifai_utils/models/model_serving/models/pb_model.py +0 -74
  301. clarifai_utils/models/model_serving/models/test.py +0 -64
  302. clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
  303. clarifai_utils/modules/README.md +0 -5
  304. clarifai_utils/modules/__init__.py +0 -0
  305. clarifai_utils/modules/css.py +0 -60
  306. clarifai_utils/modules/pages.py +0 -42
  307. clarifai_utils/modules/style.css +0 -217
  308. clarifai_utils/runners/__init__.py +0 -0
  309. clarifai_utils/runners/example.py +0 -33
  310. clarifai_utils/schema/search.py +0 -69
  311. clarifai_utils/urls/helper.py +0 -103
  312. clarifai_utils/utils/__init__.py +0 -0
  313. clarifai_utils/utils/logging.py +0 -90
  314. clarifai_utils/utils/misc.py +0 -33
  315. clarifai_utils/utils/model_train.py +0 -157
  316. clarifai_utils/versions.py +0 -6
  317. clarifai_utils/workflows/__init__.py +0 -0
  318. clarifai_utils/workflows/export.py +0 -68
  319. clarifai_utils/workflows/utils.py +0 -59
  320. clarifai_utils/workflows/validate.py +0 -67
  321. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
  322. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
  323. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
@@ -1,134 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """
14
- Parse & Serialize TritonModelConfig objects into proto format.
15
- """
16
-
17
- import os
18
- from pathlib import Path
19
- from typing import Type
20
-
21
- from google.protobuf.text_format import MessageToString
22
- from tritonclient.grpc import model_config_pb2
23
-
24
- from .config import TritonModelConfig
25
-
26
-
27
- class Serializer:
28
- """
29
- Serialize TritonModelConfig type object.
30
- """
31
-
32
- def __init__(self, model_config: Type[TritonModelConfig]) -> None:
33
- self.model_config = model_config #python dataclass config
34
- self.config_proto = model_config_pb2.ModelConfig() #holds parsed python config
35
-
36
- self._set_all_fields()
37
-
38
- def _set_input(self) -> None:
39
- """
40
- Parse InputConfig object to proto.
41
- """
42
- for in_field in self.model_config.input:
43
- input_config = self.config_proto.input.add()
44
- for key, value in in_field.__dict__.items():
45
- try:
46
- setattr(input_config, key, value)
47
- except AttributeError:
48
- field = getattr(input_config, key)
49
- if isinstance(value, list):
50
- field.extend(value)
51
- else:
52
- field.extend([value])
53
- return
54
-
55
- def _set_output(self) -> None:
56
- """
57
- Parse OutputConfig object to proto.
58
- """
59
- # loop over output dataclass list
60
- for out_field in self.model_config.output:
61
- output_config = self.config_proto.output.add()
62
- for key, value in out_field.__dict__.items():
63
- try:
64
- setattr(output_config, key, value)
65
- except AttributeError: #Proto Repeated Field assignment not allowed
66
- field = getattr(output_config, key)
67
- if isinstance(value, list):
68
- field.extend(value)
69
- else:
70
- field.extend([value])
71
- return
72
-
73
- def _set_instance_group(self) -> None:
74
- """
75
- Parse triton model instance group settings to proto.
76
- """
77
- instance = self.config_proto.instance_group.add()
78
- for field_name, value in self.model_config.instance_group.__dict__.items():
79
- try:
80
- setattr(instance, field_name, value)
81
- except AttributeError:
82
- continue
83
- return
84
-
85
- def _set_batch_info(self) -> model_config_pb2.ModelDynamicBatching:
86
- """
87
- Parse triton model dynamic batching settings to proto.
88
- """
89
- dbatch_msg = model_config_pb2.ModelDynamicBatching()
90
- for key, value in self.model_config.dynamic_batching.__dict__.items():
91
- try:
92
- setattr(dbatch_msg, key, value)
93
- except AttributeError: #Proto Repeated Field assignment not allowed
94
- field = getattr(dbatch_msg, key)
95
- if isinstance(value, list):
96
- field.extend(value)
97
- else:
98
- field.extend([value])
99
-
100
- return dbatch_msg
101
-
102
- def _set_all_fields(self) -> None:
103
- """
104
- Set all config fields.
105
- """
106
- self.config_proto.name = self.model_config.model_name
107
- self.config_proto.backend = self.model_config.backend
108
- self.config_proto.max_batch_size = self.model_config.max_batch_size
109
- self._set_input()
110
- self._set_output()
111
- self._set_instance_group()
112
- dynamic_batch_msg = self._set_batch_info()
113
- self.config_proto.dynamic_batching.CopyFrom(dynamic_batch_msg)
114
-
115
- @property
116
- def get_config(self) -> model_config_pb2.ModelConfig:
117
- """
118
- Return model config proto.
119
- """
120
- return self.config_proto
121
-
122
- def to_file(self, save_dir: Path) -> None:
123
- """
124
- Serialize all triton config parameters and save output
125
- to file.
126
- Args:
127
- -----
128
- save_dir: Directory where to save resultant config.pbtxt file.
129
- Defaults to the current working dir.
130
- """
131
- msg_string = MessageToString(self.config_proto)
132
-
133
- with open(os.path.join(save_dir, "config.pbtxt"), "w") as cfile:
134
- cfile.write(msg_string)
@@ -1,12 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
@@ -1,275 +0,0 @@
1
- import dataclasses
2
- import inspect
3
- import logging
4
- import os
5
- import unittest
6
- from typing import Any, Dict, Union
7
-
8
- import numpy as np
9
-
10
- from ..model_config import ModelTypes
11
- from ..model_config.config import get_model_config
12
- from ..model_config.inference_parameter import InferParamManager
13
- from .output import (ClassifierOutput, EmbeddingOutput, ImageOutput, MasksOutput, TextOutput,
14
- VisualDetectorOutput)
15
-
16
- PREDEFINED_TEXTS = ["Photo of a cat", "A cat is playing around"]
17
-
18
- PREDEFINED_IMAGES = [
19
- np.zeros((100, 100, 3), dtype='uint8'), #black
20
- np.ones((100, 100, 3), dtype='uint8') * 255, #white
21
- np.random.uniform(0, 255, (100, 100, 3)).astype('uint8') #noise
22
- ]
23
-
24
-
25
- class DefaultTestInferenceModel(unittest.TestCase):
26
- """
27
- This file contains test cases:
28
- * Test triton config of current model vs default config
29
- * Test if labels.txt is valid for specific model types
30
- * Test inference with simple inputs
31
- ...
32
- """
33
- __test__ = False
34
-
35
- def triton_get_predictions(self, input_data, **kwargs):
36
- """Call InferenceModel.get_predictions method
37
-
38
- Args:
39
- input_data (Union[np.ndarray, str]):
40
- if model receives image or vector then type is `np.ndarray`, otherwise `string`
41
-
42
- Returns:
43
- One of types in models.output
44
- """
45
- _kwargs = self.inference_parameters.validate(**kwargs)
46
- return inspect.unwrap(self.triton_python_model.inference_obj.get_predictions)(
47
- self.triton_python_model.inference_obj, input_data, **_kwargs)
48
-
49
- def _get_preprocess(self, input):
50
- """ preprocess if input is image """
51
- if "image" in input.name:
52
- h, w, _ = input.dims
53
- if h > -1 and w > -1:
54
- import cv2
55
-
56
- def _f(x):
57
- logging.info(f"Preprocess reshape image => {(w, h, 3)}")
58
- return cv2.resize(x, (w, h))
59
-
60
- return _f
61
-
62
- return lambda x: x
63
-
64
- def intitialize(self,
65
- model_type: str,
66
- repo_version_dir: str,
67
- is_instance_kind_gpu: bool = True,
68
- inference_parameters: Union[str, Dict[str, Any]] = ""):
69
- import sys
70
- sys.path.append(repo_version_dir)
71
- self.model_type = model_type
72
- self.is_instance_kind_gpu = is_instance_kind_gpu
73
- logging.info(self.model_type)
74
-
75
- # load inference parameters
76
- if isinstance(inference_parameters, str):
77
- self.inference_parameters = InferParamManager(json_path=inference_parameters)
78
- else:
79
- self.inference_parameters = InferParamManager.from_kwargs(**inference_parameters)
80
- exported_file_path = os.path.join(repo_version_dir, "inference_parameters.json")
81
- logging.info(f"Export inference parameters to `{exported_file_path}` when loading from dict")
82
- self.inference_parameters.export(exported_file_path)
83
-
84
- # Construct TritonPythonModel object
85
- from model import TritonPythonModel
86
- self.triton_python_model = TritonPythonModel()
87
- self.triton_python_model.initialize(
88
- dict(
89
- model_repository=os.path.join(repo_version_dir, ".."),
90
- model_instance_kind="GPU" if self.is_instance_kind_gpu else "cpu"))
91
- # Get default config of model and model_type
92
- self.default_triton_model_config = get_model_config(self.model_type).make_triton_model_config(
93
- model_name=self.model_type, model_version="1", image_shape=[-1, -1])
94
- # Get current model config
95
- self.triton_model_config = self.triton_python_model.config_msg
96
- self.input_name_to_config = {each.name: each
97
- for each in self.triton_model_config.input} # name: input
98
- self.preprocess = {
99
- k: self._get_preprocess(input)
100
- for k, input in self.input_name_to_config.items()
101
- }
102
- # load labels
103
- self._required_label_model_types = [
104
- ModelTypes.visual_detector, ModelTypes.visual_classifier, ModelTypes.text_classifier,
105
- ModelTypes.visual_segmenter
106
- ]
107
- self._output_text_models = [ModelTypes.text_to_text]
108
- self.labels = []
109
- if self.model_type in self._required_label_model_types:
110
- with open(os.path.join(repo_version_dir, "../labels.txt"), 'r') as fp:
111
- labels = fp.readlines()
112
- if labels:
113
- self.labels = [line for line in labels if line]
114
-
115
- def test_triton_config(self):
116
- """ test Triton config"""
117
- # check if input names are still matched
118
- default_input_names = [each.name for each in self.default_triton_model_config.input]
119
- current_input_names = [each.name for each in self.triton_model_config.input]
120
- default_input_names.sort()
121
- current_input_names.sort()
122
- self.assertEqual(current_input_names, default_input_names,
123
- "input name of current model vs generated model must be matched "
124
- f"{current_input_names} != {default_input_names}")
125
- # check if output names are still matched
126
- default_output_names = [each.name for each in self.default_triton_model_config.output]
127
- current_output_names = [each.name for each in self.triton_model_config.output]
128
- default_output_names.sort()
129
- current_output_names.sort()
130
- self.assertEqual(current_output_names, default_output_names,
131
- "output name of current model vs generated model must be matched "
132
- f"{current_output_names} not in {default_output_names}")
133
-
134
- def test_having_labels(self):
135
- if self.model_type in self._required_label_model_types:
136
- self.assertTrue(
137
- len(self.labels),
138
- f"`labels.txt` is empty!. Model type `{self.model_type}` requires input labels in `labels.txt`"
139
- )
140
-
141
- def test_inference_with_predefined_inputs(self):
142
- """ Test Inference with predefined inputs """
143
-
144
- def _is_valid_logit(x: np.array):
145
- return np.all(0 <= x) and np.all(x <= 1)
146
-
147
- def _is_non_negative(x: np.array):
148
- return np.all(x >= 0)
149
-
150
- def _is_integer(x):
151
- return np.all(np.equal(np.mod(x, 1), 0))
152
-
153
- if len(self.input_name_to_config) == 1:
154
- if "image" in self.preprocess:
155
- inputs = [self.preprocess["image"](inp) for inp in PREDEFINED_IMAGES]
156
- else:
157
- inputs = PREDEFINED_TEXTS
158
- outputs = [self.triton_get_predictions(inp) for inp in inputs]
159
-
160
- # Test for specific model type:
161
- # 1. length of output array vs config
162
- # 2. type of outputs
163
- # 3. test range value, shape and dtype of output
164
-
165
- for inp, output in zip(inputs, outputs):
166
-
167
- field = dataclasses.fields(output)[0].name
168
- if self.model_type not in self._output_text_models:
169
- self.assertEqual(
170
- len(self.triton_model_config.output[0].dims),
171
- len(getattr(output, field).shape),
172
- "Length of 'dims' of config and output must be matched, but get "
173
- f"Config {len(self.triton_model_config.output[0].dims)} != Output {len(getattr(output, field).shape)}"
174
- )
175
-
176
- if self.model_type == ModelTypes.visual_detector:
177
- logging.info(output.predicted_labels)
178
- self.assertEqual(
179
- type(output), VisualDetectorOutput,
180
- f"Output type must be `VisualDetectorOutput`, but got {type(output)}")
181
- self.assertTrue(
182
- _is_valid_logit(output.predicted_scores),
183
- "`predicted_scores` must be in range [0, 1]")
184
- self.assertTrue(
185
- _is_non_negative(output.predicted_bboxes), "`predicted_bboxes` must be >= 0")
186
- self.assertTrue(
187
- np.all(0 <= output.predicted_labels) and
188
- np.all(output.predicted_labels < len(self.labels)),
189
- f"`predicted_labels` must be in [0, {len(self.labels) - 1}]")
190
- self.assertTrue(
191
- _is_integer(output.predicted_labels), "`predicted_labels` must be integer")
192
-
193
- elif self.model_type == ModelTypes.visual_classifier:
194
- self.assertEqual(
195
- type(output), ClassifierOutput,
196
- f"Output type must be `ClassifierOutput`, but got {type(output)}")
197
- self.assertTrue(
198
- _is_valid_logit(output.predicted_scores),
199
- "`predicted_scores` must be in range [0, 1]")
200
- if self.labels:
201
- self.assertEqual(
202
- len(output.predicted_scores),
203
- len(self.labels),
204
- f"`predicted_labels` must equal to {len(self.labels)}, however got {len(output.predicted_scores)}"
205
- )
206
-
207
- elif self.model_type == ModelTypes.text_classifier:
208
- self.assertEqual(
209
- type(output), ClassifierOutput,
210
- f"Output type must be `ClassifierOutput`, but got {type(output)}")
211
- self.assertTrue(
212
- _is_valid_logit(output.predicted_scores),
213
- "`predicted_scores` must be in range [0, 1]")
214
- if self.labels:
215
- self.assertEqual(
216
- len(output.predicted_scores),
217
- len(self.labels),
218
- f"`predicted_labels` must equal to {len(self.labels)}, however got {len(output.predicted_scores)}"
219
- )
220
-
221
- elif self.model_type == ModelTypes.text_embedder:
222
- self.assertEqual(
223
- type(output), EmbeddingOutput,
224
- f"Output type must be `EmbeddingOutput`, but got {type(output)}")
225
- self.assertNotEqual(output.embedding_vector.shape, [])
226
-
227
- elif self.model_type == ModelTypes.text_to_text:
228
- self.assertEqual(
229
- type(output), TextOutput,
230
- f"Output type must be `TextOutput`, but got {type(output)}")
231
-
232
- elif self.model_type == ModelTypes.text_to_image:
233
- self.assertEqual(
234
- type(output), ImageOutput,
235
- f"Output type must be `ImageOutput`, but got {type(output)}")
236
- self.assertTrue(_is_non_negative(output.image), "`image` elements must be >= 0")
237
-
238
- elif self.model_type == ModelTypes.visual_embedder:
239
- self.assertEqual(
240
- type(output), EmbeddingOutput,
241
- f"Output type must be `EmbeddingOutput`, but got {type(output)}")
242
- self.assertNotEqual(output.embedding_vector.shape, [])
243
-
244
- elif self.model_type == ModelTypes.visual_segmenter:
245
- self.assertEqual(
246
- type(output), MasksOutput,
247
- f"Output type must be `MasksOutput`, but got {type(output)}")
248
- self.assertTrue(_is_integer(output.predicted_mask), "`predicted_mask` must be integer")
249
- if self.labels:
250
- self.assertTrue(
251
- np.all(0 <= output.predicted_mask) and
252
- np.all(output.predicted_mask < len(self.labels)),
253
- f"`predicted_mask` must be in [0, {len(self.labels) - 1}]")
254
-
255
- elif len(self.input_name_to_config) == 2:
256
- from itertools import zip_longest
257
- if self.model_type == ModelTypes.multimodal_embedder:
258
- input_images = [self.preprocess["image"](inp) for inp in PREDEFINED_IMAGES]
259
- input_texts = PREDEFINED_TEXTS
260
-
261
- def _assert(input_data):
262
- for group in zip_longest(*input_data.values()):
263
- _input = dict(zip(input_data, group))
264
- output = self.triton_get_predictions(input_data=_input)
265
- self.assertEqual(
266
- type(output), EmbeddingOutput,
267
- f"Output type must be `EmbeddingOutput`, but got {type(output)}")
268
- self.assertNotEqual(output.embedding_vector.shape, [])
269
-
270
- _assert(dict(image=input_images, text=[]))
271
- _assert(dict(image=[], text=input_texts))
272
-
273
-
274
- if __name__ == '__main__':
275
- unittest.main()
@@ -1,42 +0,0 @@
1
- # This file contains boilerplate code to allow users write their model
2
- # inference code that will then interact with the Triton Inference Server
3
- # Python backend to serve end user requests.
4
- # The module name, module path, class name & get_predictions() method names MUST be maintained as is
5
- # but other methods may be added within the class as deemed fit provided
6
- # they are invoked within the main get_predictions() inference method
7
- # if they play a role in any step of model inference
8
- """User model inference script."""
9
-
10
- import os
11
- from pathlib import Path
12
-
13
-
14
- class InferenceModel:
15
- """User model inference class."""
16
-
17
- def __init__(self) -> None:
18
- """
19
- Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
20
- in this method so they are loaded only once for faster inference.
21
- """
22
- self.base_path: Path = os.path.dirname(__file__)
23
- ## sample model loading code:
24
- #self.checkpoint_path: Path = os.path.join(self.base_path, "your checkpoint filename/path")
25
- #self.model: Callable = <load_your_model_here from checkpoint or folder>
26
-
27
- #Add relevant model type decorator to the method below (see docs/model_types for ref.)
28
- def get_predictions(self, input_data, **kwargs):
29
- """
30
- Main model inference method.
31
-
32
- Args:
33
- -----
34
- input_data: A single input data item to predict on.
35
- Input data can be an image or text, etc depending on the model type.
36
-
37
- Returns:
38
- --------
39
- One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
40
- """
41
- # Delete/Comment out line below and add your inference code
42
- raise NotImplementedError()