clarifai 9.10.2__py3-none-any.whl → 9.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (323) hide show
  1. clarifai/client/__init__.py +3 -2
  2. clarifai/client/app.py +39 -23
  3. clarifai/client/base.py +6 -6
  4. clarifai/client/dataset.py +113 -55
  5. clarifai/client/input.py +47 -55
  6. clarifai/client/model.py +27 -25
  7. clarifai/client/module.py +13 -11
  8. clarifai/client/runner.py +5 -3
  9. clarifai/client/search.py +7 -3
  10. clarifai/client/user.py +14 -8
  11. clarifai/client/workflow.py +22 -20
  12. clarifai/constants/dataset.py +22 -0
  13. clarifai/datasets/upload/base.py +9 -7
  14. clarifai/datasets/upload/features.py +3 -3
  15. clarifai/datasets/upload/image.py +49 -50
  16. clarifai/datasets/upload/loaders/coco_captions.py +26 -80
  17. clarifai/datasets/upload/loaders/coco_detection.py +56 -115
  18. clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
  19. clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
  20. clarifai/datasets/upload/loaders/xview_detection.py +3 -3
  21. clarifai/datasets/upload/text.py +16 -16
  22. clarifai/datasets/upload/utils.py +196 -21
  23. clarifai/utils/misc.py +21 -0
  24. clarifai/versions.py +1 -1
  25. {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
  26. clarifai-9.10.3.dist-info/RECORD +96 -0
  27. clarifai-9.10.3.dist-info/top_level.txt +1 -0
  28. clarifai/auth/__init__.py +0 -6
  29. clarifai/auth/helper.py +0 -367
  30. clarifai/auth/register.py +0 -23
  31. clarifai/auth/stub.py +0 -127
  32. clarifai/datasets/upload/examples/README.md +0 -31
  33. clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
  34. clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  35. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  36. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  37. clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  38. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  39. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  40. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  41. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  42. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  43. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  44. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  45. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  46. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  47. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  48. clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  49. clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  50. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  51. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  52. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  53. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  54. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  55. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  56. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  57. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  58. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  59. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  60. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  61. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  62. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  63. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  64. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  65. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  66. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  67. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  68. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  69. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  70. clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
  71. clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  72. clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  73. clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  74. clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  75. clarifai/datasets/upload/loaders/README.md +0 -49
  76. clarifai/models/model_serving/README.md +0 -155
  77. clarifai/models/model_serving/docs/custom_config.md +0 -33
  78. clarifai/models/model_serving/docs/dependencies.md +0 -11
  79. clarifai/models/model_serving/docs/inference_parameters.md +0 -134
  80. clarifai/models/model_serving/docs/model_types.md +0 -20
  81. clarifai/models/model_serving/docs/output.md +0 -28
  82. clarifai/models/model_serving/examples/README.md +0 -7
  83. clarifai/models/model_serving/examples/image_classification/README.md +0 -9
  84. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  85. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  86. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  87. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  88. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  89. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  90. clarifai/models/model_serving/examples/text_classification/README.md +0 -9
  91. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  92. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  93. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  94. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  95. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  96. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  97. clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
  98. clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
  99. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  100. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  101. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  102. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  104. clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
  105. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  106. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  107. clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
  108. clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  109. clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  110. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  111. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
  112. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  113. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  114. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
  115. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  116. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  117. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  118. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  119. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  120. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  121. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  122. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  123. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  124. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  125. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  126. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  127. clarifai/modules/README.md +0 -5
  128. clarifai/modules/style.css +0 -217
  129. clarifai-9.10.2.dist-info/RECORD +0 -386
  130. clarifai-9.10.2.dist-info/top_level.txt +0 -2
  131. clarifai_utils/__init__.py +0 -0
  132. clarifai_utils/auth/__init__.py +0 -6
  133. clarifai_utils/auth/helper.py +0 -367
  134. clarifai_utils/auth/register.py +0 -23
  135. clarifai_utils/auth/stub.py +0 -127
  136. clarifai_utils/cli.py +0 -0
  137. clarifai_utils/client/__init__.py +0 -16
  138. clarifai_utils/client/app.py +0 -684
  139. clarifai_utils/client/auth/__init__.py +0 -4
  140. clarifai_utils/client/auth/helper.py +0 -367
  141. clarifai_utils/client/auth/register.py +0 -23
  142. clarifai_utils/client/auth/stub.py +0 -127
  143. clarifai_utils/client/base.py +0 -131
  144. clarifai_utils/client/dataset.py +0 -442
  145. clarifai_utils/client/input.py +0 -892
  146. clarifai_utils/client/lister.py +0 -54
  147. clarifai_utils/client/model.py +0 -575
  148. clarifai_utils/client/module.py +0 -94
  149. clarifai_utils/client/runner.py +0 -161
  150. clarifai_utils/client/search.py +0 -254
  151. clarifai_utils/client/user.py +0 -253
  152. clarifai_utils/client/workflow.py +0 -223
  153. clarifai_utils/constants/model.py +0 -4
  154. clarifai_utils/constants/search.py +0 -2
  155. clarifai_utils/datasets/__init__.py +0 -0
  156. clarifai_utils/datasets/export/__init__.py +0 -0
  157. clarifai_utils/datasets/export/inputs_annotations.py +0 -222
  158. clarifai_utils/datasets/upload/__init__.py +0 -0
  159. clarifai_utils/datasets/upload/base.py +0 -66
  160. clarifai_utils/datasets/upload/examples/README.md +0 -31
  161. clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
  162. clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  163. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  164. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  165. clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  166. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  167. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  168. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  169. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  170. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  171. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  172. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  173. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  174. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  175. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  176. clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  177. clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  178. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  179. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  180. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  181. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  182. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  183. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  184. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  185. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  186. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  187. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  188. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  189. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  190. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  191. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  192. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  193. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  194. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  195. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  196. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  197. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  198. clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
  199. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  200. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  201. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  202. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  203. clarifai_utils/datasets/upload/features.py +0 -44
  204. clarifai_utils/datasets/upload/image.py +0 -165
  205. clarifai_utils/datasets/upload/loaders/README.md +0 -49
  206. clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
  207. clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
  208. clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
  209. clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
  210. clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
  211. clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
  212. clarifai_utils/datasets/upload/text.py +0 -53
  213. clarifai_utils/datasets/upload/utils.py +0 -63
  214. clarifai_utils/errors.py +0 -89
  215. clarifai_utils/models/__init__.py +0 -0
  216. clarifai_utils/models/api.py +0 -283
  217. clarifai_utils/models/model_serving/README.md +0 -155
  218. clarifai_utils/models/model_serving/__init__.py +0 -12
  219. clarifai_utils/models/model_serving/cli/__init__.py +0 -12
  220. clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
  221. clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
  222. clarifai_utils/models/model_serving/cli/repository.py +0 -87
  223. clarifai_utils/models/model_serving/constants.py +0 -1
  224. clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
  225. clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
  226. clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
  227. clarifai_utils/models/model_serving/docs/model_types.md +0 -20
  228. clarifai_utils/models/model_serving/docs/output.md +0 -28
  229. clarifai_utils/models/model_serving/examples/README.md +0 -7
  230. clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
  231. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  232. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
  233. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
  234. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  235. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  236. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  237. clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  238. clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  239. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  240. clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
  241. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  242. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
  243. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
  244. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  245. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  246. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  247. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  248. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  249. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  250. clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
  251. clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
  252. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  253. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  254. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  255. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  256. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  257. clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
  258. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  259. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
  260. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
  261. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  262. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  263. clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
  264. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
  265. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
  266. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  267. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  268. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  269. clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
  270. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  271. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
  272. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
  273. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  274. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  275. clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
  276. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  277. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
  278. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
  279. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  280. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  281. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  282. clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
  283. clarifai_utils/models/model_serving/model_config/config.py +0 -302
  284. clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
  285. clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  286. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  287. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  288. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  289. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  290. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  291. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  292. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  293. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  294. clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
  295. clarifai_utils/models/model_serving/models/__init__.py +0 -12
  296. clarifai_utils/models/model_serving/models/default_test.py +0 -275
  297. clarifai_utils/models/model_serving/models/inference.py +0 -42
  298. clarifai_utils/models/model_serving/models/model_types.py +0 -265
  299. clarifai_utils/models/model_serving/models/output.py +0 -124
  300. clarifai_utils/models/model_serving/models/pb_model.py +0 -74
  301. clarifai_utils/models/model_serving/models/test.py +0 -64
  302. clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
  303. clarifai_utils/modules/README.md +0 -5
  304. clarifai_utils/modules/__init__.py +0 -0
  305. clarifai_utils/modules/css.py +0 -60
  306. clarifai_utils/modules/pages.py +0 -42
  307. clarifai_utils/modules/style.css +0 -217
  308. clarifai_utils/runners/__init__.py +0 -0
  309. clarifai_utils/runners/example.py +0 -33
  310. clarifai_utils/schema/search.py +0 -69
  311. clarifai_utils/urls/helper.py +0 -103
  312. clarifai_utils/utils/__init__.py +0 -0
  313. clarifai_utils/utils/logging.py +0 -90
  314. clarifai_utils/utils/misc.py +0 -33
  315. clarifai_utils/utils/model_train.py +0 -157
  316. clarifai_utils/versions.py +0 -6
  317. clarifai_utils/workflows/__init__.py +0 -0
  318. clarifai_utils/workflows/export.py +0 -68
  319. clarifai_utils/workflows/utils.py +0 -59
  320. clarifai_utils/workflows/validate.py +0 -67
  321. {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
  322. {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
  323. {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
@@ -1,892 +0,0 @@
1
- import csv
2
- import json
3
- import os
4
- import time
5
- import uuid
6
- from concurrent.futures import ThreadPoolExecutor, as_completed
7
- from multiprocessing import cpu_count
8
- from typing import Generator, List, Union
9
-
10
- from clarifai_grpc.grpc.api import resources_pb2, service_pb2 # noqa: F401
11
- from clarifai_grpc.grpc.api.resources_pb2 import Annotation, Audio, Image, Input, Text, Video
12
- from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
13
- from google.protobuf.json_format import MessageToDict
14
- from google.protobuf.struct_pb2 import Struct
15
- from tqdm import tqdm
16
-
17
- from clarifai.client.base import BaseClient
18
- from clarifai.client.lister import Lister
19
- from clarifai.errors import UserError
20
- from clarifai.utils.logging import get_logger
21
- from clarifai.utils.misc import BackoffIterator, Chunker
22
-
23
-
24
- class Inputs(Lister, BaseClient):
25
- """Inputs is a class that provides access to Clarifai API endpoints related to Input information."""
26
-
27
- def __init__(self,
28
- user_id: str = "",
29
- app_id: str = "",
30
- logger_level: str = "INFO",
31
- base_url: str = "https://api.clarifai.com",
32
- **kwargs):
33
- """Initializes an Input object.
34
-
35
- Args:
36
- user_id (str): A user ID for authentication.
37
- app_id (str): An app ID for the application to interact with.
38
- base_url (str): Base API url. Default "https://api.clarifai.com"
39
- **kwargs: Additional keyword arguments to be passed to the Input
40
- """
41
- self.user_id = user_id
42
- self.app_id = app_id
43
- self.kwargs = {**kwargs}
44
- self.input_info = resources_pb2.Input(**self.kwargs)
45
- self.logger = get_logger(logger_level=logger_level, name=__name__)
46
- BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url)
47
- Lister.__init__(self)
48
-
49
- def _get_proto(self,
50
- input_id: str,
51
- dataset_id: Union[str, None],
52
- imagepb: Image = None,
53
- video_pb: Video = None,
54
- audio_pb: Audio = None,
55
- text_pb: Text = None,
56
- geo_info: List = None,
57
- labels: List = None,
58
- metadata: Struct = None) -> Input:
59
- """Create input proto for image data type.
60
- Args:
61
- input_id (str): The input ID for the input to create.
62
- dataset_id (str): The dataset ID for the dataset to add the input to.
63
- imagepb (Image): The image proto to be used for the input.
64
- video_pb (Video): The video proto to be used for the input.
65
- audio_pb (Audio): The audio proto to be used for the input.
66
- text_pb (Text): The text proto to be used for the input.
67
- geo_info (list): A list of longitude and latitude for the geo point.
68
- labels (list): A list of labels for the input.
69
- metadata (Struct): A Struct of metadata for the input.
70
- Returns:
71
- Input: An Input object for the specified input ID.
72
- """
73
- assert geo_info is None or isinstance(
74
- geo_info, list), "geo_info must be a list of longitude and latitude"
75
- assert labels is None or isinstance(labels, list), "labels must be a list of strings"
76
- assert metadata is None or isinstance(metadata, Struct), "metadata must be a Struct"
77
- geo_pb = resources_pb2.Geo(geo_point=resources_pb2.GeoPoint(
78
- longitude=geo_info[0], latitude=geo_info[1])) if geo_info else None
79
- concepts=[
80
- resources_pb2.Concept(
81
- id=f"id-{''.join(_label.split(' '))}", name=_label, value=1.)\
82
- for _label in labels
83
- ]if labels else None
84
-
85
- if dataset_id:
86
- return resources_pb2.Input(
87
- id=input_id,
88
- dataset_ids=[dataset_id],
89
- data=resources_pb2.Data(
90
- image=imagepb,
91
- video=video_pb,
92
- audio=audio_pb,
93
- text=text_pb,
94
- geo=geo_pb,
95
- concepts=concepts,
96
- metadata=metadata))
97
-
98
- return resources_pb2.Input(
99
- id=input_id,
100
- data=resources_pb2.Data(
101
- image=imagepb,
102
- video=video_pb,
103
- audio=audio_pb,
104
- text=text_pb,
105
- geo=geo_pb,
106
- concepts=concepts,
107
- metadata=metadata))
108
-
109
- def get_input_from_url(self,
110
- input_id: str,
111
- image_url: str = None,
112
- video_url: str = None,
113
- audio_url: str = None,
114
- text_url: str = None,
115
- dataset_id: str = None,
116
- **kwargs) -> Input:
117
- """Create input proto from url.
118
-
119
- Args:
120
- input_id (str): The input ID for the input to create.
121
- image_url (str): The url for the image.
122
- video_url (str): The url for the video.
123
- audio_url (str): The url for the audio.
124
- text_url (str): The url for the text.
125
- dataset_id (str): The dataset ID for the dataset to add the input to.
126
-
127
- Returns:
128
- Input: An Input object for the specified input ID.
129
-
130
- Example:
131
- >>> from clarifai.client.input import Inputs
132
- >>> input_obj = Inputs()
133
- >>> input_proto = input_obj.get_input_from_url(input_id = 'demo', image_url='https://samples.clarifai.com/metro-north.jpg')
134
- """
135
- if not any((image_url, video_url, audio_url, text_url)):
136
- raise ValueError(
137
- "At least one of image_url, video_url, audio_url, text_url must be provided.")
138
- image_pb = resources_pb2.Image(url=image_url) if image_url else None
139
- video_pb = resources_pb2.Video(url=video_url) if video_url else None
140
- audio_pb = resources_pb2.Audio(url=audio_url) if audio_url else None
141
- text_pb = resources_pb2.Text(url=text_url) if text_url else None
142
- return self._get_proto(
143
- input_id=input_id,
144
- dataset_id=dataset_id,
145
- imagepb=image_pb,
146
- video_pb=video_pb,
147
- audio_pb=audio_pb,
148
- text_pb=text_pb,
149
- **kwargs)
150
-
151
- def get_input_from_file(self,
152
- input_id: str,
153
- image_file: str = None,
154
- video_file: str = None,
155
- audio_file: str = None,
156
- text_file: str = None,
157
- dataset_id: str = None,
158
- **kwargs) -> Input:
159
- """Create input proto from files.
160
-
161
- Args:
162
- input_id (str): The input ID for the input to create.
163
- image_file (str): The file_path for the image.
164
- video_file (str): The file_path for the video.
165
- audio_file (str): The file_path for the audio.
166
- text_file (str): The file_path for the text.
167
- dataset_id (str): The dataset ID for the dataset to add the input to.
168
-
169
- Returns:
170
- Input: An Input object for the specified input ID.
171
-
172
- Example:
173
- >>> from clarifai.client.input import Inputs
174
- >>> input_obj = Inputs()
175
- >>> input_proto = input_obj.get_input_from_file(input_id = 'demo', video_file='file_path')
176
- """
177
- if not any((image_file, video_file, audio_file, text_file)):
178
- raise ValueError(
179
- "At least one of image_file, video_file, audio_file, text_file must be provided.")
180
- image_pb = resources_pb2.Image(base64=open(image_file, 'rb').read()) if image_file else None
181
- video_pb = resources_pb2.Video(base64=open(video_file, 'rb').read()) if video_file else None
182
- audio_pb = resources_pb2.Audio(base64=open(audio_file, 'rb').read()) if audio_file else None
183
- text_pb = resources_pb2.Text(raw=open(text_file, 'rb').read()) if text_file else None
184
- return self._get_proto(
185
- input_id=input_id,
186
- dataset_id=dataset_id,
187
- imagepb=image_pb,
188
- video_pb=video_pb,
189
- audio_pb=audio_pb,
190
- text_pb=text_pb,
191
- **kwargs)
192
-
193
- def get_input_from_bytes(self,
194
- input_id: str,
195
- image_bytes: bytes = None,
196
- video_bytes: bytes = None,
197
- audio_bytes: bytes = None,
198
- text_bytes: bytes = None,
199
- dataset_id: str = None,
200
- **kwargs) -> Input:
201
- """Create input proto from bytes.
202
-
203
- Args:
204
- input_id (str): The input ID for the input to create.
205
- image_bytes (str): The bytes for the image.
206
- video_bytes (str): The bytes for the video.
207
- audio_bytes (str): The bytes for the audio.
208
- text_bytes (str): The bytes for the text.
209
- dataset_id (str): The dataset ID for the dataset to add the input to.
210
-
211
- Returns:
212
- Input: An Input object for the specified input ID.
213
-
214
- Example:
215
- >>> from clarifai.client.input import Inputs
216
- >>> input_obj = Inputs()
217
- >>> image = open('demo.jpg', 'rb').read()
218
- >>> video = open('demo.mp4', 'rb').read()
219
- >>> input_proto = input_obj.get_input_from_bytes(input_id = 'demo',image_bytes =image, video_bytes=video)
220
- """
221
- if not any((image_bytes, video_bytes, audio_bytes, text_bytes)):
222
- raise ValueError(
223
- "At least one of image_bytes, video_bytes, audio_bytes, text_bytes must be provided.")
224
- image_pb = resources_pb2.Image(base64=image_bytes) if image_bytes else None
225
- video_pb = resources_pb2.Video(base64=video_bytes) if video_bytes else None
226
- audio_pb = resources_pb2.Audio(base64=audio_bytes) if audio_bytes else None
227
- text_pb = resources_pb2.Text(raw=text_bytes) if text_bytes else None
228
- return self._get_proto(
229
- input_id=input_id,
230
- dataset_id=dataset_id,
231
- imagepb=image_pb,
232
- video_pb=video_pb,
233
- audio_pb=audio_pb,
234
- text_pb=text_pb,
235
- **kwargs)
236
-
237
- def get_image_inputs_from_folder(self,
238
- folder_path: str,
239
- dataset_id: str = None,
240
- labels: bool = False) -> List[Input]: #image specific
241
- """Create input protos for image data type from folder.
242
-
243
- Args:
244
- folder_path (str): Path to the folder containing images.
245
-
246
- Returns:
247
- list of Input: A list of Input objects for the specified folder.
248
-
249
- Example:
250
- >>> from clarifai.client.input import Inputs
251
- >>> input_obj = Inputs()
252
- >>> input_protos = input_obj.get_image_inputs_from_folder(folder_path='demo_folder')
253
- """
254
- input_protos = []
255
- labels = [folder_path.split('/')[-1]] if labels else None
256
- for filename in os.listdir(folder_path):
257
- if filename.split('.')[-1] not in ['jpg', 'jpeg', 'png', 'tiff', 'webp']:
258
- continue
259
- input_id = filename.split('.')[0]
260
- image_pb = resources_pb2.Image(base64=open(os.path.join(folder_path, filename), 'rb').read())
261
- input_protos.append(
262
- self._get_proto(
263
- input_id=input_id, dataset_id=dataset_id, imagepb=image_pb, labels=labels))
264
- return input_protos
265
-
266
- def get_text_input(self, input_id: str, raw_text: str, dataset_id: str = None,
267
- **kwargs) -> Text: #text specific
268
- """Create input proto for text data type from rawtext.
269
-
270
- Args:
271
- input_id (str): The input ID for the input to create.
272
- raw_text (str): The raw text input.
273
- dataset_id (str): The dataset ID for the dataset to add the input to.
274
- **kwargs: Additional keyword arguments to be passed to the Input
275
-
276
- Returns:
277
- Text: An Input object for the specified input ID.
278
-
279
- Example:
280
- >>> from clarifai.client.input import Inputs
281
- >>> input_obj = Inputs()
282
- >>> input_protos = input_obj.get_text_input(input_id = 'demo', raw_text = 'This is a test')
283
- """
284
- text_pb = resources_pb2.Text(raw=raw_text)
285
- return self._get_proto(input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, **kwargs)
286
-
287
- def get_inputs_from_csv(self,
288
- csv_path: str,
289
- input_type: str = 'text',
290
- csv_type: str = 'raw',
291
- dataset_id: str = None,
292
- labels: str = True) -> List[Text]:
293
- """Create input protos from csv.
294
-
295
- Args:
296
- csv_path (str): Path to the csv file.
297
- input_type (str): Type of input. Options: 'text', 'image', 'video', 'audio'.
298
- csv_type (str): Type of csv file. Options: 'raw', 'url', 'file_path'.
299
- dataset_id (str): The dataset ID for the dataset to add the input to.
300
- labels (str): True if csv file has labels column.
301
-
302
- Returns:
303
- inputs: List of inputs
304
-
305
- Example:
306
- >>> from clarifai.client.input import Inputs
307
- >>> input_obj = Inputs()
308
- >>> input_protos = input_obj.get_inputs_from_csv(csv_path='filepath', input_type='text', csv_type='raw')
309
- """
310
- input_protos = []
311
- with open(csv_path) as _file:
312
- reader = csv.DictReader(_file, delimiter=',', quotechar='"')
313
- columns = reader.fieldnames
314
- for column in columns:
315
- if column not in ['inputid', 'input', 'concepts', 'metadata', 'geopoints']:
316
- raise UserError(
317
- "CSV file may have 'inputid', 'input', 'concepts', 'metadata', 'geopoints' columns. Does not support '{}' column".
318
- format(column))
319
- for id, input in enumerate(reader):
320
- if labels:
321
- labels_list = input['concepts'].split(',')
322
- labels = labels_list if len(input['concepts']) > 0 else None
323
- else:
324
- labels = None
325
-
326
- if 'metadata' in columns:
327
- if len(input['metadata']) > 0:
328
- metadata_str = input['metadata'].replace("'", '"')
329
- try:
330
- metadata_dict = json.loads(metadata_str)
331
- except json.decoder.JSONDecodeError:
332
- raise UserError("metadata column in CSV file should be a valid json")
333
- metadata = Struct()
334
- metadata.update(metadata_dict)
335
- else:
336
- metadata = None
337
- else:
338
- metadata = None
339
-
340
- if 'geopoints' in columns:
341
- if len(input['geopoints']) > 0:
342
- geo_points = input['geopoints'].split(',')
343
- geo_points = [float(geo_point) for geo_point in geo_points]
344
- geo_info = geo_points if len(geo_points) == 2 else UserError(
345
- "geopoints column in CSV file should have longitude,latitude")
346
- else:
347
- geo_info = None
348
- else:
349
- geo_info = None
350
-
351
- input_id = input['inputid'] if 'inputid' in columns else uuid.uuid4().hex
352
- text = input['input'] if input_type == 'text' else None
353
- image = input['input'] if input_type == 'image' else None
354
- video = input['input'] if input_type == 'video' else None
355
- audio = input['input'] if input_type == 'audio' else None
356
-
357
- if csv_type == 'raw':
358
- input_protos.append(
359
- self.get_text_input(
360
- input_id=input_id,
361
- raw_text=text,
362
- dataset_id=dataset_id,
363
- labels=labels,
364
- metadata=metadata,
365
- geo_info=geo_info))
366
- elif csv_type == 'url':
367
- input_protos.append(
368
- self.get_input_from_url(
369
- input_id=input_id,
370
- image_url=image,
371
- text_url=text,
372
- audio_url=audio,
373
- video_url=video,
374
- dataset_id=dataset_id,
375
- labels=labels,
376
- metadata=metadata,
377
- geo_info=geo_info))
378
- else:
379
- input_protos.append(
380
- self.get_input_from_file(
381
- input_id=input_id,
382
- image_file=image,
383
- text_file=text,
384
- audio_file=audio,
385
- video_file=video,
386
- dataset_id=dataset_id,
387
- labels=labels,
388
- metadata=metadata,
389
- geo_info=geo_info))
390
-
391
- return input_protos
392
-
393
- def get_text_inputs_from_folder(self,
394
- folder_path: str,
395
- dataset_id: str = None,
396
- labels: bool = False) -> List[Text]: #text specific
397
- """Create input protos for text data type from folder.
398
-
399
- Args:
400
- folder_path (str): Path to the folder containing text.
401
-
402
- Returns:
403
- list of Input: A list of Input objects for the specified folder.
404
-
405
- Example:
406
- >>> from clarifai.client.input import Inputs
407
- >>> input_obj = Inputs()
408
- >>> input_protos = input_obj.get_text_inputs_from_folder(folder_path='demo_folder')
409
- """
410
- input_protos = []
411
- labels = [folder_path.split('/')[-1]] if labels else None
412
- for filename in os.listdir(folder_path):
413
- if filename.split('.')[-1] != 'txt':
414
- continue
415
- input_id = filename.split('.')[0]
416
- text_pb = resources_pb2.Text(raw=open(os.path.join(folder_path, filename), 'rb').read())
417
- input_protos.append(
418
- self._get_proto(
419
- input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, labels=labels))
420
- return input_protos
421
-
422
- def get_annotation_proto(self, input_id: str, label: str, annotations: List) -> Annotation:
423
- """Create an annotation proto for each bounding box, label input pair.
424
-
425
- Args:
426
- input_id (str): The input ID for the annotation to create.
427
- label (str): annotation label
428
- annotations (List): a list of a single bbox's coordinates. # Annotations ordering: [xmin, ymin, xmax, ymax]
429
-
430
- Returns:
431
- An annotation object for the specified input ID.
432
-
433
- Example:
434
- >>> from clarifai.client.input import Inputs
435
- >>> input_obj = Inputs()
436
- >>> input_obj.get_annotation_proto(input_id='demo', label='demo', annotations=[x_min, y_min, x_max, y_max])
437
- """
438
- if not isinstance(annotations, list):
439
- raise UserError("annotations must be a list of bbox cooridnates")
440
- input_annot_proto = resources_pb2.Annotation(
441
- input_id=input_id,
442
- data=resources_pb2.Data(regions=[
443
- resources_pb2.Region(
444
- region_info=resources_pb2.RegionInfo(bounding_box=resources_pb2.BoundingBox(
445
- # Annotations ordering: [xmin, ymin, xmax, ymax]
446
- # top_row must be less than bottom row
447
- # left_col must be less than right col
448
- top_row=annotations[1], #y_min
449
- left_col=annotations[0], #x_min
450
- bottom_row=annotations[3], #y_max
451
- right_col=annotations[2] #x_max
452
- )),
453
- data=resources_pb2.Data(concepts=[
454
- resources_pb2.Concept(
455
- id=f"id-{''.join(label.split(' '))}", name=label, value=1.)
456
- ]))
457
- ]))
458
-
459
- return input_annot_proto
460
-
461
- def get_mask_proto(self, input_id: str, label: str, polygons: List[List[float]]) -> Annotation:
462
- """Create an annotation proto for each polygon box, label input pair.
463
-
464
- Args:
465
- input_id (str): The input ID for the annotation to create.
466
- label (str): annotation label
467
- polygons (List): Polygon x,y points iterable
468
-
469
- Returns:
470
- An annotation object for the specified input ID.
471
-
472
- Example:
473
- >>> from clarifai.client.input import Inputs
474
- >>> input_obj = Inputs()
475
- >>> input_obj.get_mask_proto(input_id='demo', label='demo', polygons=[[[x,y],...,[x,y]],...])
476
- """
477
- if not isinstance(polygons, list):
478
- raise UserError("polygons must be a list of points")
479
- input_mask_proto = resources_pb2.Annotation(
480
- input_id=input_id,
481
- data=resources_pb2.Data(regions=[
482
- resources_pb2.Region(
483
- region_info=resources_pb2.RegionInfo(polygon=resources_pb2.Polygon(
484
- points=[
485
- resources_pb2.Point(
486
- row=_point[1], # row is y point
487
- col=_point[0], # col is x point
488
- visibility="VISIBLE") for _point in polygons
489
- ])),
490
- data=resources_pb2.Data(concepts=[
491
- resources_pb2.Concept(
492
- id=f"id-{''.join(label.split(' '))}", name=label, value=1.)
493
- ]))
494
- ]))
495
-
496
- return input_mask_proto
497
-
498
- def upload_from_url(self,
499
- input_id: str,
500
- image_url: str = None,
501
- video_url: str = None,
502
- audio_url: str = None,
503
- text_url: str = None,
504
- dataset_id: str = None,
505
- **kwargs) -> str:
506
- """Upload input from url.
507
-
508
- Args:
509
- input_id (str): The input ID for the input to create.
510
- image_url (str): The url for the image.
511
- video_url (str): The url for the video.
512
- audio_url (str): The url for the audio.
513
- text_url (str): The url for the text.
514
- dataset_id (str): The dataset ID for the dataset to add the input to.
515
-
516
- Returns:
517
- input_job_id: job id for the upload request.
518
-
519
- Example:
520
- >>> from clarifai.client.input import Inputs
521
- >>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
522
- >>> input_obj.upload_from_url(input_id='demo', image_url='https://samples.clarifai.com/metro-north.jpg')
523
- """
524
- input_pb = self.get_input_from_url(input_id, image_url, video_url, audio_url, text_url,
525
- dataset_id, **kwargs)
526
- return self.upload_inputs([input_pb])
527
-
528
- def upload_from_file(self,
529
- input_id: str,
530
- image_file: str = None,
531
- video_file: str = None,
532
- audio_file: str = None,
533
- text_file: str = None,
534
- dataset_id: str = None,
535
- **kwargs) -> str:
536
- """Upload input from file.
537
-
538
- Args:
539
- input_id (str): The input ID for the input to create.
540
- image_file (str): The file for the image.
541
- video_file (str): The file for the video.
542
- audio_file (str): The file for the audio.
543
- text_file (str): The file for the text.
544
- dataset_id (str): The dataset ID for the dataset to add the input to.
545
-
546
- Returns:
547
- input_job_id: job id for the upload request.
548
-
549
- Example:
550
- >>> from clarifai.client.input import Inputs
551
- >>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
552
- >>> input_obj.upload_from_file(input_id='demo', audio_file='demo.mp3')
553
- """
554
- input_pb = self.get_input_from_file(input_id, image_file, video_file, audio_file, text_file,
555
- dataset_id, **kwargs)
556
- return self.upload_inputs([input_pb])
557
-
558
- def upload_from_bytes(self,
559
- input_id: str,
560
- image_bytes: bytes = None,
561
- video_bytes: bytes = None,
562
- audio_bytes: bytes = None,
563
- text_bytes: bytes = None,
564
- dataset_id: str = None,
565
- **kwargs) -> str:
566
- """Upload input from bytes.
567
-
568
- Args:
569
- input_id (str): The input ID for the input to create.
570
- image_bytes (str): The bytes for the image.
571
- video_bytes (str): The bytes for the video.
572
- audio_bytes (str): The bytes for the audio.
573
- text_bytes (str): The bytes for the text.
574
- dataset_id (str): The dataset ID for the dataset to add the input to.
575
-
576
- Returns:
577
- input_job_id: job id for the upload request.
578
-
579
- Example:
580
- >>> from clarifai.client.input import Inputs
581
- >>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
582
- >>> image = open('demo.jpg', 'rb').read()
583
- >>> input_obj.upload_from_bytes(input_id='demo', image_bytes=image)
584
- """
585
- input_pb = self.get_input_from_bytes(input_id, image_bytes, video_bytes, audio_bytes,
586
- text_bytes, dataset_id, **kwargs)
587
- return self.upload_inputs([input_pb])
588
-
589
- def upload_text(self, input_id: str, raw_text: str, dataset_id: str = None,
590
- **kwargs) -> str: #text specific
591
- """Upload text from raw text.
592
-
593
- Args:
594
- input_id (str): The input ID for the input to create.
595
- raw_text (str): The raw text.
596
- dataset_id (str): The dataset ID for the dataset to add the input to.
597
-
598
- Returns:
599
- input_job_id (str): job id for the upload request.
600
-
601
- Example:
602
- >>> from clarifai.client.input import Inputs
603
- >>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
604
- >>> input_obj.upload_text(input_id = 'demo', raw_text = 'This is a test')
605
- """
606
- input_pb = self._get_proto(
607
- input_id=input_id,
608
- dataset_id=dataset_id,
609
- text_pb=resources_pb2.Text(raw=raw_text),
610
- **kwargs)
611
- return self.upload_inputs([input_pb])
612
-
613
- def upload_inputs(self, inputs: List[Input], show_log: bool = True) -> str:
614
- """Upload list of input objects to the app.
615
-
616
- Args:
617
- inputs (list): List of input objects to upload.
618
- show_log (bool): Show upload status log.
619
-
620
- Returns:
621
- input_job_id: job id for the upload request.
622
- """
623
- if not isinstance(inputs, list):
624
- raise UserError("inputs must be a list of Input objects")
625
- input_job_id = uuid.uuid4().hex # generate a unique id for this job
626
- request = service_pb2.PostInputsRequest(
627
- user_app_id=self.user_app_id, inputs=inputs, inputs_add_job_id=input_job_id)
628
- response = self._grpc_request(self.STUB.PostInputs, request)
629
- if response.status.code != status_code_pb2.SUCCESS:
630
- try:
631
- self.logger.warning(response.inputs[0].status)
632
- except IndexError:
633
- self.logger.warning(response.status)
634
- else:
635
- if show_log:
636
- self.logger.info("\nInputs Uploaded\n%s", response.status)
637
-
638
- return input_job_id
639
-
640
- def upload_annotations(self, batch_annot: List[resources_pb2.Annotation], show_log: bool = True
641
- ) -> Union[List[resources_pb2.Annotation], List[None]]:
642
- """Upload image annotations to app.
643
-
644
- Args:
645
- batch_annot: annot batch protos
646
-
647
- Returns:
648
- retry_upload: failed annot upload
649
- """
650
- retry_upload = [] # those that fail to upload are stored for retries
651
- request = service_pb2.PostAnnotationsRequest(
652
- user_app_id=self.user_app_id, annotations=batch_annot)
653
- response = self._grpc_request(self.STUB.PostAnnotations, request)
654
- if response.status.code != status_code_pb2.SUCCESS:
655
- try:
656
- self.logger.warning(
657
- f"Post annotations failed, status: {response.annotations[0].status.details}")
658
- except Exception:
659
- self.logger.warning(f"Post annotations failed, status: {response.status.details}")
660
- finally:
661
- retry_upload.extend(batch_annot)
662
- else:
663
- if show_log:
664
- self.logger.info("\nAnnotations Uploaded\n%s", response.status)
665
- return retry_upload
666
-
667
- def _upload_batch(self, inputs: List[Input]) -> List[Input]:
668
- """Upload a batch of input objects to the app.
669
-
670
- Args:
671
- inputs (List[Input]): List of input objects to upload.
672
-
673
- Returns:
674
- input_job_id: job id for the upload request.
675
- """
676
- input_job_id = self.upload_inputs(inputs, False)
677
- self._wait_for_inputs(input_job_id)
678
- failed_inputs = self._delete_failed_inputs(inputs)
679
-
680
- return failed_inputs
681
-
682
- def delete_inputs(self, inputs: List[Input]) -> None:
683
- """Delete list of input objects from the app.
684
-
685
- Args:
686
- input_ids (Input): List of input objects to delete.
687
-
688
- Example:
689
- >>> from clarifai.client.user import User
690
- >>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
691
- >>> input_obj.delete_inputs(list(input_obj.list_inputs()))
692
- """
693
- if not isinstance(inputs, list):
694
- raise UserError("input_ids must be a list of input ids")
695
- inputs_ids = [input.id for input in inputs]
696
- request = service_pb2.DeleteInputsRequest(user_app_id=self.user_app_id, ids=inputs_ids)
697
- response = self._grpc_request(self.STUB.DeleteInputs, request)
698
- if response.status.code != status_code_pb2.SUCCESS:
699
- raise Exception(response.status)
700
- self.logger.info("\nInputs Deleted\n%s", response.status)
701
-
702
- def list_inputs(self,
703
- dataset_id: str = None,
704
- page_no: int = None,
705
- per_page: int = None,
706
- input_type: str = None) -> Generator[Input, None, None]:
707
- """Lists all the inputs for the app.
708
-
709
- Args:
710
- dataset_id (str): The dataset ID for the dataset to list inputs from.
711
- page_no (int): The page number to list.
712
- per_page (int): The number of items per page.
713
- input_type (str): The type of input to list. Options: 'image', 'video', 'audio', 'text'.
714
-
715
- Yields:
716
- Input: Input objects for the app.
717
-
718
- Example:
719
- >>> from clarifai.client.user import User
720
- >>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
721
- >>> all_inputs = list(input_obj.list_inputs(input_type='image'))
722
-
723
- Note:
724
- Defaults to 16 per page if page_no is specified and per_page is not specified.
725
- If both page_no and per_page are None, then lists all the resources.
726
- """
727
- if input_type not in ['image', 'text', 'video', 'audio', None]:
728
- raise UserError('Invalid input type, it should be image,text,audio or video')
729
- if dataset_id:
730
- request_data = dict(user_app_id=self.user_app_id, dataset_id=dataset_id)
731
- all_inputs_info = self.list_pages_generator(
732
- self.STUB.ListDatasetInputs,
733
- service_pb2.ListDatasetInputsRequest,
734
- request_data,
735
- per_page=per_page,
736
- page_no=page_no)
737
- else:
738
- request_data = dict(user_app_id=self.user_app_id)
739
- all_inputs_info = self.list_pages_generator(
740
- self.STUB.ListInputs,
741
- service_pb2.ListInputsRequest,
742
- request_data,
743
- per_page=per_page,
744
- page_no=page_no)
745
- for input_info in all_inputs_info:
746
- input_info['id'] = input_info.pop('dataset_input_id') if dataset_id else input_info.pop(
747
- 'input_id')
748
- if input_type:
749
- if input_type not in input_info['data'].keys():
750
- continue
751
- yield resources_pb2.Input(**input_info)
752
-
753
- def list_annotations(self,
754
- batch_input: List[Input] = None,
755
- page_no: int = None,
756
- per_page: int = None) -> Generator[Annotation, None, None]:
757
- """Lists all the annotations for the app.
758
-
759
- Args:
760
- batch_input (List[Input]): The input objects to list annotations from.
761
- page_no (int): The page number to list.
762
- per_page (int): The number of items per page.
763
-
764
- Yields:
765
- Annotation: Annotation objects for the app.
766
-
767
- Example:
768
- >>> from clarifai.client.user import User
769
- >>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
770
- >>> all_inputs = list(input_obj.list_inputs(input_type='image'))
771
- >>> all_annotations = list(input_obj.list_annotations(batch_input=all_inputs))
772
-
773
- Note:
774
- If batch_input is not given, then lists all the annotations for the app.
775
- Defaults to 16 per page if page_no is specified and per_page is not specified.
776
- If both page_no and per_page are None, then lists all the resources.
777
- """
778
- request_data = dict(
779
- user_app_id=self.user_app_id,
780
- input_ids=[input.id for input in batch_input] if batch_input else None)
781
- all_annotations_info = self.list_pages_generator(
782
- self.STUB.ListAnnotations,
783
- service_pb2.ListAnnotationsRequest,
784
- request_data,
785
- per_page=per_page,
786
- page_no=page_no)
787
- for annotations_info in all_annotations_info:
788
- annotations_info['id'] = annotations_info.pop('annotation_id')
789
- yield Annotation(**annotations_info)
790
-
791
- def _bulk_upload(self, inputs: List[Input], chunk_size: int = 128) -> None:
792
- """Uploads process for large number of inputs.
793
-
794
- Args:
795
- inputs (List[Input]): input protos
796
- chunk_size (int): chunk size for each request
797
- """
798
- num_workers: int = min(10, cpu_count()) # limit max workers to 10
799
- chunk_size = min(128, chunk_size) # limit max protos in a req
800
- chunked_inputs = Chunker(inputs, chunk_size).chunk()
801
- with ThreadPoolExecutor(max_workers=num_workers) as executor:
802
- with tqdm(total=len(chunked_inputs), desc='Uploading inputs') as progress:
803
- # Submit all jobs to the executor and store the returned futures
804
- futures = [
805
- executor.submit(self._upload_batch, batch_input_ids)
806
- for batch_input_ids in chunked_inputs
807
- ]
808
-
809
- for job in as_completed(futures):
810
- retry_input_proto = job.result()
811
- self._retry_uploads(retry_input_proto)
812
- progress.update()
813
-
814
- def _wait_for_inputs(self, input_job_id: str) -> bool:
815
- """Wait for inputs to be processed. Cancel Job if timeout > 30 minutes.
816
-
817
- Args:
818
- input_job_id (str): Upload Input Job ID
819
-
820
- Returns:
821
- True if inputs are processed, False otherwise
822
- """
823
- backoff_iterator = BackoffIterator()
824
- max_retries = 10
825
- start_time = time.time()
826
- while True:
827
- request = service_pb2.GetInputsAddJobRequest(user_app_id=self.user_app_id, id=input_job_id)
828
- response = self._grpc_request(self.STUB.GetInputsAddJob, request)
829
-
830
- if time.time() - start_time > 60 * 30 or max_retries == 0: # 30 minutes timeout
831
- self._grpc_request(self.STUB.CancelInputsAddJob,
832
- service_pb2.CancelInputsAddJobRequest(
833
- user_app_id=self.user_app_id, id=input_job_id)) #Cancel Job
834
- return False
835
- if response.status.code != status_code_pb2.SUCCESS:
836
- max_retries -= 1
837
- self.logger.warning(f"Get input job failed, status: {response.status.details}\n")
838
- continue
839
- if response.inputs_add_job.progress.in_progress_count == 0 and response.inputs_add_job.progress.pending_count == 0:
840
- return True
841
- else:
842
- time.sleep(next(backoff_iterator))
843
-
844
- def _retry_uploads(self, failed_inputs: List[Input]) -> None:
845
- """Retry failed uploads.
846
-
847
- Args:
848
- failed_inputs (List[Input]): failed input prots
849
- """
850
- if failed_inputs:
851
- self._upload_batch(failed_inputs)
852
-
853
- def _delete_failed_inputs(self, inputs: List[Input]) -> List[Input]:
854
- """Delete failed input ids from clarifai platform dataset.
855
-
856
- Args:
857
- inputs (List[Input]): batch input protos
858
-
859
- Returns:
860
- failed_inputs: failed inputs
861
- """
862
- input_ids = [input.id for input in inputs]
863
- success_status = status_pb2.Status(code=status_code_pb2.INPUT_DOWNLOAD_SUCCESS)
864
- request = service_pb2.ListInputsRequest(
865
- ids=input_ids,
866
- per_page=len(input_ids),
867
- user_app_id=self.user_app_id,
868
- status=success_status)
869
- response = self._grpc_request(self.STUB.ListInputs, request)
870
- response_dict = MessageToDict(response)
871
- success_inputs = response_dict.get('inputs', [])
872
-
873
- success_input_ids = [input.get('id') for input in success_inputs]
874
- failed_inputs = [input for input in inputs if input.id not in success_input_ids]
875
- #delete failed inputs
876
- self._grpc_request(self.STUB.DeleteInputs,
877
- service_pb2.DeleteInputsRequest(
878
- user_app_id=self.user_app_id, ids=[input.id
879
- for input in failed_inputs]))
880
-
881
- return failed_inputs
882
-
883
- def __getattr__(self, name):
884
- return getattr(self.input_info, name)
885
-
886
- def __str__(self):
887
- init_params = [param for param in self.kwargs.keys()]
888
- attribute_strings = [
889
- f"{param}={getattr(self.input_info, param)}" for param in init_params
890
- if hasattr(self.input_info, param)
891
- ]
892
- return f"Input Details: \n{', '.join(attribute_strings)}\n"