clarifai 9.10.1__py3-none-any.whl → 9.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (323) hide show
  1. clarifai/client/__init__.py +3 -2
  2. clarifai/client/app.py +39 -23
  3. clarifai/client/base.py +6 -6
  4. clarifai/client/dataset.py +113 -55
  5. clarifai/client/input.py +47 -55
  6. clarifai/client/model.py +27 -25
  7. clarifai/client/module.py +13 -11
  8. clarifai/client/runner.py +5 -3
  9. clarifai/client/search.py +29 -10
  10. clarifai/client/user.py +14 -8
  11. clarifai/client/workflow.py +22 -20
  12. clarifai/constants/dataset.py +22 -0
  13. clarifai/datasets/upload/base.py +9 -7
  14. clarifai/datasets/upload/features.py +3 -3
  15. clarifai/datasets/upload/image.py +49 -50
  16. clarifai/datasets/upload/loaders/coco_captions.py +26 -80
  17. clarifai/datasets/upload/loaders/coco_detection.py +56 -115
  18. clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
  19. clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
  20. clarifai/datasets/upload/loaders/xview_detection.py +3 -3
  21. clarifai/datasets/upload/text.py +16 -16
  22. clarifai/datasets/upload/utils.py +196 -21
  23. clarifai/utils/misc.py +21 -0
  24. clarifai/versions.py +1 -1
  25. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
  26. clarifai-9.10.3.dist-info/RECORD +96 -0
  27. clarifai-9.10.3.dist-info/top_level.txt +1 -0
  28. clarifai/auth/__init__.py +0 -6
  29. clarifai/auth/helper.py +0 -367
  30. clarifai/auth/register.py +0 -23
  31. clarifai/auth/stub.py +0 -127
  32. clarifai/datasets/upload/examples/README.md +0 -31
  33. clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
  34. clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  35. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  36. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  37. clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  38. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  39. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  40. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  41. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  42. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  43. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  44. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  45. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  46. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  47. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  48. clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  49. clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  50. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  51. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  52. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  53. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  54. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  55. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  56. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  57. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  58. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  59. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  60. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  61. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  62. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  63. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  64. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  65. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  66. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  67. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  68. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  69. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  70. clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
  71. clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  72. clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  73. clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  74. clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  75. clarifai/datasets/upload/loaders/README.md +0 -49
  76. clarifai/models/model_serving/README.md +0 -155
  77. clarifai/models/model_serving/docs/custom_config.md +0 -33
  78. clarifai/models/model_serving/docs/dependencies.md +0 -11
  79. clarifai/models/model_serving/docs/inference_parameters.md +0 -134
  80. clarifai/models/model_serving/docs/model_types.md +0 -20
  81. clarifai/models/model_serving/docs/output.md +0 -28
  82. clarifai/models/model_serving/examples/README.md +0 -7
  83. clarifai/models/model_serving/examples/image_classification/README.md +0 -9
  84. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  85. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  86. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  87. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  88. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  89. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  90. clarifai/models/model_serving/examples/text_classification/README.md +0 -9
  91. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  92. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  93. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  94. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  95. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  96. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  97. clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
  98. clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
  99. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  100. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  101. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  102. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  104. clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
  105. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  106. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  107. clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
  108. clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  109. clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  110. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  111. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
  112. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  113. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  114. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
  115. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  116. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  117. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  118. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  119. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  120. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  121. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  122. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  123. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  124. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  125. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  126. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  127. clarifai/modules/README.md +0 -5
  128. clarifai/modules/style.css +0 -217
  129. clarifai-9.10.1.dist-info/RECORD +0 -386
  130. clarifai-9.10.1.dist-info/top_level.txt +0 -2
  131. clarifai_utils/__init__.py +0 -0
  132. clarifai_utils/auth/__init__.py +0 -6
  133. clarifai_utils/auth/helper.py +0 -367
  134. clarifai_utils/auth/register.py +0 -23
  135. clarifai_utils/auth/stub.py +0 -127
  136. clarifai_utils/cli.py +0 -0
  137. clarifai_utils/client/__init__.py +0 -16
  138. clarifai_utils/client/app.py +0 -684
  139. clarifai_utils/client/auth/__init__.py +0 -4
  140. clarifai_utils/client/auth/helper.py +0 -367
  141. clarifai_utils/client/auth/register.py +0 -23
  142. clarifai_utils/client/auth/stub.py +0 -127
  143. clarifai_utils/client/base.py +0 -131
  144. clarifai_utils/client/dataset.py +0 -442
  145. clarifai_utils/client/input.py +0 -892
  146. clarifai_utils/client/lister.py +0 -54
  147. clarifai_utils/client/model.py +0 -575
  148. clarifai_utils/client/module.py +0 -94
  149. clarifai_utils/client/runner.py +0 -161
  150. clarifai_utils/client/search.py +0 -239
  151. clarifai_utils/client/user.py +0 -253
  152. clarifai_utils/client/workflow.py +0 -223
  153. clarifai_utils/constants/model.py +0 -4
  154. clarifai_utils/constants/search.py +0 -2
  155. clarifai_utils/datasets/__init__.py +0 -0
  156. clarifai_utils/datasets/export/__init__.py +0 -0
  157. clarifai_utils/datasets/export/inputs_annotations.py +0 -222
  158. clarifai_utils/datasets/upload/__init__.py +0 -0
  159. clarifai_utils/datasets/upload/base.py +0 -66
  160. clarifai_utils/datasets/upload/examples/README.md +0 -31
  161. clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
  162. clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  163. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  164. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  165. clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  166. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  167. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  168. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  169. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  170. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  171. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  172. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  173. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  174. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  175. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  176. clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  177. clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  178. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  179. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  180. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  181. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  182. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  183. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  184. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  185. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  186. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  187. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  188. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  189. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  190. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  191. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  192. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  193. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  194. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  195. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  196. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  197. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  198. clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
  199. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  200. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  201. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  202. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  203. clarifai_utils/datasets/upload/features.py +0 -44
  204. clarifai_utils/datasets/upload/image.py +0 -165
  205. clarifai_utils/datasets/upload/loaders/README.md +0 -49
  206. clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
  207. clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
  208. clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
  209. clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
  210. clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
  211. clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
  212. clarifai_utils/datasets/upload/text.py +0 -53
  213. clarifai_utils/datasets/upload/utils.py +0 -63
  214. clarifai_utils/errors.py +0 -89
  215. clarifai_utils/models/__init__.py +0 -0
  216. clarifai_utils/models/api.py +0 -283
  217. clarifai_utils/models/model_serving/README.md +0 -155
  218. clarifai_utils/models/model_serving/__init__.py +0 -12
  219. clarifai_utils/models/model_serving/cli/__init__.py +0 -12
  220. clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
  221. clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
  222. clarifai_utils/models/model_serving/cli/repository.py +0 -87
  223. clarifai_utils/models/model_serving/constants.py +0 -1
  224. clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
  225. clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
  226. clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
  227. clarifai_utils/models/model_serving/docs/model_types.md +0 -20
  228. clarifai_utils/models/model_serving/docs/output.md +0 -28
  229. clarifai_utils/models/model_serving/examples/README.md +0 -7
  230. clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
  231. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  232. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
  233. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
  234. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  235. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  236. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  237. clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  238. clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  239. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  240. clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
  241. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  242. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
  243. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
  244. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  245. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  246. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  247. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  248. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  249. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  250. clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
  251. clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
  252. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  253. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  254. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  255. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  256. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  257. clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
  258. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  259. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
  260. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
  261. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  262. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  263. clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
  264. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
  265. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
  266. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  267. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  268. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  269. clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
  270. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  271. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
  272. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
  273. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  274. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  275. clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
  276. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  277. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
  278. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
  279. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  280. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  281. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  282. clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
  283. clarifai_utils/models/model_serving/model_config/config.py +0 -302
  284. clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
  285. clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  286. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  287. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  288. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  289. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  290. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  291. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  292. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  293. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  294. clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
  295. clarifai_utils/models/model_serving/models/__init__.py +0 -12
  296. clarifai_utils/models/model_serving/models/default_test.py +0 -275
  297. clarifai_utils/models/model_serving/models/inference.py +0 -42
  298. clarifai_utils/models/model_serving/models/model_types.py +0 -265
  299. clarifai_utils/models/model_serving/models/output.py +0 -124
  300. clarifai_utils/models/model_serving/models/pb_model.py +0 -74
  301. clarifai_utils/models/model_serving/models/test.py +0 -64
  302. clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
  303. clarifai_utils/modules/README.md +0 -5
  304. clarifai_utils/modules/__init__.py +0 -0
  305. clarifai_utils/modules/css.py +0 -60
  306. clarifai_utils/modules/pages.py +0 -42
  307. clarifai_utils/modules/style.css +0 -217
  308. clarifai_utils/runners/__init__.py +0 -0
  309. clarifai_utils/runners/example.py +0 -33
  310. clarifai_utils/schema/search.py +0 -69
  311. clarifai_utils/urls/helper.py +0 -103
  312. clarifai_utils/utils/__init__.py +0 -0
  313. clarifai_utils/utils/logging.py +0 -90
  314. clarifai_utils/utils/misc.py +0 -33
  315. clarifai_utils/utils/model_train.py +0 -157
  316. clarifai_utils/versions.py +0 -6
  317. clarifai_utils/workflows/__init__.py +0 -0
  318. clarifai_utils/workflows/export.py +0 -68
  319. clarifai_utils/workflows/utils.py +0 -59
  320. clarifai_utils/workflows/validate.py +0 -67
  321. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
  322. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
  323. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
@@ -1,44 +0,0 @@
1
- #! dataset output features (output from preprocessing & input to clarifai data proto builders)
2
- from dataclasses import dataclass
3
- from typing import List, Optional, Union
4
-
5
-
6
- @dataclass
7
- class TextFeatures:
8
- """Text classification datasets preprocessing output features."""
9
- text: str
10
- labels: List[Union[str, int]] # List[str or int] to cater for multi-class tasks
11
- id: Optional[int] = None # text_id
12
- metadata: Optional[dict] = None
13
-
14
-
15
- @dataclass
16
- class VisualClassificationFeatures:
17
- """Image classification datasets preprocessing output features."""
18
- image_path: str
19
- label: Union[str, int]
20
- geo_info: Optional[List[float]] = None #[Longitude, Latitude]
21
- id: Optional[int] = None # image_id
22
- metadata: Optional[dict] = None
23
-
24
-
25
- @dataclass
26
- class VisualDetectionFeatures:
27
- """Image Detection datasets preprocessing output features."""
28
- image_path: str
29
- classes: List[Union[str, int]]
30
- bboxes: List[List[float]]
31
- geo_info: Optional[List[float]] = None #[Longitude, Latitude]
32
- id: Optional[int] = None # image_id
33
- metadata: Optional[dict] = None
34
-
35
-
36
- @dataclass
37
- class VisualSegmentationFeatures:
38
- """Image Segmentation datasets preprocessing output features."""
39
- image_path: str
40
- classes: List[Union[str, int]]
41
- polygons: List[List[List[float]]]
42
- geo_info: Optional[List[float]] = None #[Longitude, Latitude]
43
- id: Optional[int] = None # image_id
44
- metadata: Optional[dict] = None
@@ -1,165 +0,0 @@
1
- import os
2
- from concurrent.futures import ThreadPoolExecutor
3
- from typing import Iterator, List, Tuple
4
-
5
- from clarifai_grpc.grpc.api import resources_pb2
6
- from google.protobuf.struct_pb2 import Struct
7
-
8
- from .base import ClarifaiDataset
9
-
10
-
11
- class VisualClassificationDataset(ClarifaiDataset):
12
-
13
- def __init__(self, datagen_object: Iterator, dataset_id: str, split: str) -> None:
14
- super().__init__(datagen_object, dataset_id, split)
15
-
16
- def _extract_protos(self, batch_input_ids: List[str]
17
- ) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
18
- """Create input image and annotation protos for batch of input ids.
19
- Args:
20
- batch_input_ids: List of input IDs to retrieve the protos for.
21
- Returns:
22
- input_protos: List of input protos.
23
- annotation_protos: List of annotation protos.
24
- """
25
- input_protos, annotation_protos = [], []
26
-
27
- def process_datagen_item(id):
28
- datagen_item = self.datagen_object[id]
29
- metadata = Struct()
30
- image_path = datagen_item.image_path
31
- label = datagen_item.label if isinstance(datagen_item.label,
32
- list) else [datagen_item.label] # clarifai concept
33
- input_id = f"{self.dataset_id}-{self.split}-{id}" if datagen_item.id is None else f"{self.dataset_id}-{self.split}-{str(datagen_item.id)}"
34
- geo_info = datagen_item.geo_info
35
- if datagen_item.metadata is not None:
36
- metadata.update(datagen_item.metadata)
37
- else:
38
- metadata.update({"filename": os.path.basename(image_path), "split": self.split})
39
-
40
- self.all_input_ids[id] = input_id
41
- input_protos.append(
42
- self.input_object.get_input_from_file(
43
- input_id=input_id,
44
- image_file=image_path,
45
- dataset_id=self.dataset_id,
46
- labels=label,
47
- geo_info=geo_info,
48
- metadata=metadata))
49
-
50
- with ThreadPoolExecutor(max_workers=4) as executor:
51
- futures = [executor.submit(process_datagen_item, id) for id in batch_input_ids]
52
- for job in futures:
53
- job.result()
54
-
55
- return input_protos, annotation_protos
56
-
57
-
58
- class VisualDetectionDataset(ClarifaiDataset):
59
- """Visual detection dataset proto class."""
60
-
61
- def __init__(self, datagen_object: Iterator, dataset_id: str, split: str) -> None:
62
- super().__init__(datagen_object, dataset_id, split)
63
-
64
- def _extract_protos(self, batch_input_ids: List[int]
65
- ) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
66
- """Create input image protos for each data generator item.
67
- Args:
68
- batch_input_ids: List of input IDs to retrieve the protos for.
69
- Returns:
70
- input_protos: List of input protos.
71
- annotation_protos: List of annotation protos.
72
- """
73
- input_protos, annotation_protos = [], []
74
-
75
- def process_datagen_item(id):
76
- datagen_item = self.datagen_object[id]
77
- metadata = Struct()
78
- image = datagen_item.image_path
79
- labels = datagen_item.classes # list:[l1,...,ln]
80
- bboxes = datagen_item.bboxes # [[xmin,ymin,xmax,ymax],...,[xmin,ymin,xmax,ymax]]
81
- input_id = f"{self.dataset_id}-{self.split}-{id}" if datagen_item.id is None else f"{self.dataset_id}-{self.split}-{str(datagen_item.id)}"
82
- if datagen_item.metadata is not None:
83
- metadata.update(datagen_item.metadata)
84
- else:
85
- metadata.update({"filename": os.path.basename(image), "split": self.split})
86
- geo_info = datagen_item.geo_info
87
-
88
- self.all_input_ids[id] = input_id
89
- input_protos.append(
90
- self.input_object.get_input_from_file(
91
- input_id=input_id,
92
- image_file=image,
93
- dataset_id=self.dataset_id,
94
- geo_info=geo_info,
95
- metadata=metadata))
96
- # iter over bboxes and classes
97
- # one id could have more than one bbox and label
98
- for i in range(len(bboxes)):
99
- annotation_protos.append(
100
- self.input_object.get_annotation_proto(
101
- input_id=input_id, label=labels[i], annotations=bboxes[i]))
102
-
103
- with ThreadPoolExecutor(max_workers=4) as executor:
104
- futures = [executor.submit(process_datagen_item, id) for id in batch_input_ids]
105
- for job in futures:
106
- job.result()
107
-
108
- return input_protos, annotation_protos
109
-
110
-
111
- class VisualSegmentationDataset(ClarifaiDataset):
112
- """Visual segmentation dataset proto class."""
113
-
114
- def __init__(self, datagen_object: Iterator, dataset_id: str, split: str) -> None:
115
- super().__init__(datagen_object, dataset_id, split)
116
-
117
- def _extract_protos(self, batch_input_ids: List[str]
118
- ) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
119
- """Create input image and annotation protos for batch of input ids.
120
- Args:
121
- batch_input_ids: List of input IDs to retrieve the protos for.
122
- Returns:
123
- input_protos: List of input protos.
124
- annotation_protos: List of annotation protos.
125
- """
126
- input_protos, annotation_protos = [], []
127
-
128
- def process_datagen_item(id):
129
- datagen_item = self.datagen_object[id]
130
- metadata = Struct()
131
- image = datagen_item.image_path
132
- labels = datagen_item.classes
133
- _polygons = datagen_item.polygons # list of polygons: [[[x,y],...,[x,y]],...]
134
- input_id = f"{self.dataset_id}-{self.split}-{id}" if datagen_item.id is None else f"{self.dataset_id}-{self.split}-{str(datagen_item.id)}"
135
- if datagen_item.metadata is not None:
136
- metadata.update(datagen_item.metadata)
137
- else:
138
- metadata.update({"filename": os.path.basename(image), "split": self.split})
139
- geo_info = datagen_item.geo_info
140
-
141
- self.all_input_ids[id] = input_id
142
- input_protos.append(
143
- self.input_object.get_input_from_file(
144
- input_id=input_id,
145
- image_file=image,
146
- dataset_id=self.dataset_id,
147
- geo_info=geo_info,
148
- metadata=metadata))
149
-
150
- ## Iterate over each masked image and create a proto for upload to clarifai
151
- ## The length of masks/polygons-list and labels must be equal
152
- for i, _polygon in enumerate(_polygons):
153
- try:
154
- annotation_protos.append(
155
- self.input_object.get_mask_proto(
156
- input_id=input_id, label=labels[i], polygons=_polygon))
157
- except IndexError:
158
- continue
159
-
160
- with ThreadPoolExecutor(max_workers=4) as executor:
161
- futures = [executor.submit(process_datagen_item, id) for id in batch_input_ids]
162
- for job in futures:
163
- job.result()
164
-
165
- return input_protos, annotation_protos
@@ -1,49 +0,0 @@
1
- ## Dataset Loaders
2
-
3
- A collection of data preprocessing modules for popular public datasets to allow for compatible upload into Clarifai user app datasets.
4
-
5
- ## Usage
6
-
7
- If a dataset module exists in the zoo, uploading the specific dataset can be easily done by simply creating a python script (or via commandline) and specifying the dataset module name in the `dataset_loader` parameter of the `Dataset` class, `upload_dataset` method .i.e.
8
-
9
- ```python
10
- from clarifai.client.app import App
11
-
12
- app = App(app_id="", user_id="")
13
- # Create a dataset in Clarifai App
14
- dataset = app.create_dataset(dataset_id="")
15
- # execute data upload to Clarifai app dataset
16
- dataset.upload_dataset(task='visual_segmentation', split="train", dataset_loader='coco_segmentation')
17
- ```
18
-
19
- ## Dataset Loaders
20
-
21
- | dataset name | task | module name (.py) | splits |
22
- | --- | --- | --- | --- |
23
- | [COCO 2017](https://cocodataset.org/#download) | Detection | `coco_detection` | `train`, `val` |
24
- | | Segmentation | `coco_segmentation` | `train`, `val` |
25
- | | Captions | `coco_captions` | `train`, `val` |
26
- |[xVIEW](http://xviewdataset.org/) | Detection | `xview_detection` | `train`
27
- | [ImageNet](https://www.image-net.org/) | Classification | `imagenet_classification` | `train`
28
- ## Contributing Modules
29
-
30
- A dataloader (preprocessing) module is a python script that contains a dataloader class which implements data download (to download the dataloader from a source to local disk dir) & extraction and dataloader methods.
31
-
32
- The class naming convention is `<datasetname>DataLoader`. The dataset class must accept `split` as the only argument in the `__init__` method and the `__getitem__` method must return either of `VisualClassificationFeatures()`, `VisualDetectionFeatures()`, `VisualSegmentationFeatures()` or `TextFeatures()` as defined in [clarifai/datasets/upload/features.py](../features.py). Other methods can be added as seen fit but must be inherited from parent `ClarifaiDataLoader` base class [clarifai/datasets/upload/base.py](../base.py).
33
- Reference can be taken from the existing dataset modules in the zoo for development.
34
-
35
- ## Notes
36
-
37
- * Dataloaders in the zoo by default first create a `data` directory in the zoo directory then download the data into this `data` directory, preprocess the data and finally execute upload to a Clarifai app dataset. For instance with the COCO dataset modules above, the coco2017 dataset is by default downloaded first into a `data` directory, extracted and then preprocessing is performed on it and finally uploaded to Clarifai.
38
-
39
- * Taking the above into consideration, to avoid the scripts re-downloading data you already have locally, create a `data` directory in the loaders directory and move your extracted data there. **Ensure that the extracted folder/file names and file structure MATCH those when the downloaded zips are extracted.**
40
-
41
- * COCO Format: To reuse the coco modules above on your coco format data, ensure the criteria in the two points above is adhered to first. If so, pass the coco module name from any of the above in the loaders to the `dataset_loader=` parameter in `upload_dataset()`.
42
-
43
- * xVIEW Dataset: To upload, you have to register and download images,label from [xviewdataset](http://xviewdataset.org/#dataset) follow the above mentioned steps to place extracted folder in `data` directory. Finally pass the xview module name to `dataset_loader=` parameter in `upload_dataset()`.
44
-
45
- * ImageNet Dataset: ImageNet Dataset should be downloaded and placed in the 'data' folder along with the [label mapping file](https://www.kaggle.com/competitions/imagenet-object-localization-challenge/data?select=LOC_synset_mapping.txt).
46
-
47
- <data>/
48
- ├── train/
49
- ├── LOC_synset_mapping.txt
File without changes
@@ -1,103 +0,0 @@
1
- #! COCO 2017 image captioning dataset
2
-
3
- import os
4
- import zipfile
5
- from glob import glob
6
-
7
- import requests
8
- from pycocotools.coco import COCO
9
- from tqdm import tqdm
10
-
11
- from clarifai.datasets.upload.base import ClarifaiDataLoader
12
-
13
- from ..features import VisualClassificationFeatures
14
-
15
-
16
- class COCOCaptionsDataLoader(ClarifaiDataLoader):
17
- """COCO 2017 Image Captioning Dataset."""
18
-
19
- def __init__(self, split: str = "train"):
20
- """Initialize coco dataset.
21
- Args:
22
- filenames: the coco zip filenames: Dict[str, str] to be downloaded if download=True,
23
- data_dir: the local coco dataset directory.
24
- split: "train" or "val"
25
- """
26
- self.filenames = {
27
- "train": "train2017.zip",
28
- "val": "val2017.zip",
29
- "annotations": "annotations_trainval2017.zip"
30
- }
31
- self.split = split
32
- self.url = "http://images.cocodataset.org/zips/" # coco base image-zip url
33
- self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
34
- "data") # data storage directory
35
- self.extracted_coco_dirs = {"train": None, "val": None, "annotations": None}
36
-
37
- self.load_data()
38
-
39
- def coco_download(self, save_dir):
40
- """Download coco dataset."""
41
- if not os.path.exists(save_dir):
42
- os.mkdir(save_dir)
43
-
44
- #check if train, val and annotation dirs exist
45
- #so that the coco2017 data isn't downloaded
46
- for key, filename in self.filenames.items():
47
- existing_files = glob(f"{save_dir}/{key}*")
48
- if existing_files:
49
- print(f"{key} dataset already downloded and extracted")
50
- continue
51
-
52
- print("-" * 80)
53
- print(f"Downloading {filename}")
54
- print("-" * 80)
55
-
56
- if "annotations" in filename:
57
- self.url = "http://images.cocodataset.org/annotations/"
58
-
59
- response = requests.get(self.url + filename, stream=True)
60
- response.raise_for_status()
61
- with open(os.path.join(save_dir, filename), "wb") as _file:
62
- for chunk in tqdm(response.iter_content(chunk_size=5124000)):
63
- if chunk:
64
- _file.write(chunk)
65
- print("Data download complete...")
66
-
67
- #extract files
68
- zf = zipfile.ZipFile(os.path.join(save_dir, filename))
69
- print(f" Extracting {filename} file")
70
- zf.extractall(path=save_dir)
71
- # Delete coco zip
72
- print(f" Deleting {filename}")
73
- os.remove(path=os.path.join(save_dir, filename))
74
-
75
- def load_data(self):
76
- if isinstance(self.filenames, dict) and len(self.filenames) == 3:
77
- self.coco_download(self.data_dir)
78
- self.extracted_coco_dirs["train"] = [os.path.join(self.data_dir, i) \
79
- for i in os.listdir(self.data_dir) if "train" in i][0]
80
- self.extracted_coco_dirs["val"] = [os.path.join(self.data_dir, i) \
81
- for i in os.listdir(self.data_dir) if "val" in i][0]
82
-
83
- self.extracted_coco_dirs["annotations"] = [os.path.join(self.data_dir, i) \
84
- for i in os.listdir(self.data_dir) if "annotations" in i][0]
85
- else:
86
- raise Exception(f"`filenames` must be a dict of atleast 2 coco zip file names; \
87
- train, val and annotations. Found {len(self.filenames)} items instead.")
88
-
89
- annot_file = glob(self.extracted_coco_dirs["annotations"] + "/" + f"captions_{self.split}*")[0]
90
- coco = COCO(annot_file)
91
- annot_ids = coco.getAnnIds()
92
- self.annotations = coco.loadAnns(annot_ids)
93
-
94
- def __len__(self):
95
- return len(self.annotations)
96
-
97
- def __getitem__(self, idx):
98
- annot = self.annotations[idx]
99
- image_path = glob(
100
- os.path.join(self.extracted_coco_dirs[self.split],
101
- f"{str(annot['image_id']).zfill(12)}*"))[0]
102
-
103
- return VisualClassificationFeatures(image_path, annot["caption"], id=annot["image_id"])
@@ -1,134 +0,0 @@
1
- #! COCO 2017 detection dataset
2
-
3
- import os
4
- import zipfile
5
- from glob import glob
6
-
7
- import cv2
8
- import requests
9
- from pycocotools.coco import COCO
10
- from tqdm import tqdm
11
-
12
- from clarifai.datasets.upload.base import ClarifaiDataLoader
13
-
14
- from ..features import VisualDetectionFeatures
15
-
16
-
17
- class COCODetectionDataLoader(ClarifaiDataLoader):
18
- """COCO 2017 Image Detection Dataset."""
19
-
20
- def __init__(self, split: str = "train"):
21
- """
22
- Initialize coco dataset.
23
- Args:
24
- filenames: the coco zip filenames: Dict[str, str] to be downloaded if download=True,
25
- data_dir: the local coco dataset directory.
26
- split: "train" or "val"
27
- """
28
- self.filenames = {
29
- "train": "train2017.zip",
30
- "val": "val2017.zip",
31
- "annotations": "annotations_trainval2017.zip"
32
- }
33
- self.split = split
34
- self.url = "http://images.cocodataset.org/zips/" # coco base image-zip url
35
- self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
36
- "data") # data storage directory
37
- self.extracted_coco_dirs = {"train": None, "val": None, "annotations": None}
38
-
39
- self.load_data()
40
-
41
- def coco_download(self, save_dir):
42
- """Download coco dataset."""
43
- if not os.path.exists(save_dir):
44
- os.mkdir(save_dir)
45
-
46
- #check if train*, val* and annotation* dirs exist
47
- #so that the coco2017 data isn't downloaded
48
- for key, filename in self.filenames.items():
49
- existing_files = glob(f"{save_dir}/{key}*")
50
- if existing_files:
51
- print(f"{key} dataset already downloded and extracted")
52
- continue
53
-
54
- print("-" * 80)
55
- print(f"Downloading {filename}")
56
- print("-" * 80)
57
-
58
- if "annotations" in filename:
59
- self.url = "http://images.cocodataset.org/annotations/"
60
-
61
- response = requests.get(self.url + filename, stream=True)
62
- response.raise_for_status()
63
- with open(os.path.join(save_dir, filename), "wb") as _file:
64
- for chunk in tqdm(response.iter_content(chunk_size=5124000)):
65
- if chunk:
66
- _file.write(chunk)
67
- print("Coco data download complete...")
68
-
69
- #extract files
70
- zf = zipfile.ZipFile(os.path.join(save_dir, filename))
71
- print(f" Extracting {filename} file")
72
- zf.extractall(path=save_dir)
73
- # Delete coco zip
74
- print(f" Deleting {filename}")
75
- os.remove(path=os.path.join(save_dir, filename))
76
-
77
- def load_data(self):
78
- if isinstance(self.filenames, dict) and len(self.filenames) == 3:
79
- self.coco_download(self.data_dir)
80
- self.extracted_coco_dirs["train"] = [os.path.join(self.data_dir, i) \
81
- for i in os.listdir(self.data_dir) if "train" in i][0]
82
- self.extracted_coco_dirs["val"] = [os.path.join(self.data_dir, i) \
83
- for i in os.listdir(self.data_dir) if "val" in i][0]
84
-
85
- self.extracted_coco_dirs["annotations"] = [os.path.join(self.data_dir, i) \
86
- for i in os.listdir(self.data_dir) if "annotations" in i][0]
87
- else:
88
- raise Exception(f"`filenames` must be a dict of atleast 2 coco zip file names; \
89
- train, val and annotations. Found {len(self.filenames)} items instead.")
90
-
91
- annot_file = glob(self.extracted_coco_dirs["annotations"] + "/" +\
92
- f"instances_{self.split}*")[0]
93
- self.coco = COCO(annot_file)
94
- categories = self.coco.loadCats(self.coco.getCatIds())
95
- self.cat_id_map = {category["id"]: category["name"] for category in categories}
96
- self.cat_img_ids = {}
97
- for cat_id in list(self.cat_id_map.keys()):
98
- self.cat_img_ids[cat_id] = self.coco.getImgIds(catIds=[cat_id])
99
-
100
- img_ids = []
101
- for i in list(self.cat_img_ids.values()):
102
- img_ids.extend(i)
103
-
104
- self.img_ids = list(set(img_ids))
105
-
106
- def __len__(self):
107
- return len(self.img_ids)
108
-
109
- def __getitem__(self, idx):
110
- _id = self.img_ids[idx]
111
- annots = [] # bboxes
112
- class_names = []
113
- labels = [i for i in list(filter(lambda x: _id in self.cat_img_ids[x], self.cat_img_ids))]
114
- image_path = glob(self.extracted_coco_dirs[self.split]+"/"+\
115
- f"{str(_id).zfill(12)}*")[0]
116
-
117
- image_height, image_width = cv2.imread(image_path).shape[:2]
118
- for cat_id in labels:
119
- annot_ids = self.coco.getAnnIds(imgIds=_id, catIds=[cat_id])
120
- if len(annot_ids) > 0:
121
- img_annotations = self.coco.loadAnns(annot_ids)
122
- for ann in img_annotations:
123
- class_names.append(self.cat_id_map[cat_id])
124
- x_min = ann['bbox'][0] / image_width #left_col
125
- y_min = ann['bbox'][1] / image_height #top_row
126
- x_max = (ann['bbox'][0] + ann['bbox'][2]) / image_width #right_col
127
- y_max = (ann['bbox'][1] + ann['bbox'][3]) / image_height #bottom_row
128
- annots.append([x_min, y_min, x_max, y_max])
129
- else: # if no annotations for given image_id-cat_id pair
130
- continue
131
- assert len(class_names) == len(annots), f"Num classes must match num bbox annotations\
132
- for a single image. Found {len(class_names)} classes and {len(annots)} bboxes."
133
-
134
- return VisualDetectionFeatures(image_path, class_names, annots, id=str(_id))
@@ -1,166 +0,0 @@
1
- #! COCO 2017 Image Segmentation dataset
2
-
3
- import gc
4
- import os
5
- import zipfile
6
- from functools import reduce
7
- from glob import glob
8
-
9
- import cv2
10
- import numpy as np
11
- import requests
12
- from pycocotools import mask as maskUtils
13
- from pycocotools.coco import COCO
14
- from tqdm import tqdm
15
-
16
- from clarifai.datasets.upload.base import ClarifaiDataLoader
17
-
18
- from ..features import VisualSegmentationFeatures
19
-
20
-
21
- class COCOSegmentationDataLoader(ClarifaiDataLoader):
22
- """COCO 2017 Image Segmentation Dataset."""
23
-
24
- def __init__(self, split: str = "train"):
25
- """
26
- Initialize coco dataset.
27
- Args:
28
- filenames: the coco zip filenames: Dict[str, str] to be downloaded if download=True,
29
- data_dir: the local coco dataset directory
30
- split: "train" or "val"
31
- """
32
- self.filenames = {
33
- "train": "train2017.zip",
34
- "val": "val2017.zip",
35
- "annotations": "annotations_trainval2017.zip"
36
- }
37
- self.split = split
38
- self.url = "http://images.cocodataset.org/zips/" # coco base image-zip url
39
- self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
40
- "data") # data storage dir
41
- self.extracted_coco_dirs = {"train": None, "val": None, "annotations": None}
42
-
43
- self.load_data()
44
-
45
- def coco_download(self, save_dir):
46
- """Download coco dataset."""
47
- if not os.path.exists(save_dir):
48
- os.mkdir(save_dir)
49
-
50
- #check if train, val and annotation dirs exist
51
- #so that the coco2017 data isn't downloaded
52
- for key, filename in self.filenames.items():
53
- existing_files = glob(f"{save_dir}/{key}*")
54
- if existing_files:
55
- print(f"{key} dataset already downloded and extracted")
56
- continue
57
-
58
- print("-" * 80)
59
- print(f"Downloading {filename}")
60
- print("-" * 80)
61
-
62
- if "annotations" in filename:
63
- self.url = "http://images.cocodataset.org/annotations/"
64
-
65
- response = requests.get(self.url + filename, stream=True)
66
- response.raise_for_status()
67
- with open(os.path.join(save_dir, filename), "wb") as _file:
68
- for chunk in tqdm(response.iter_content(chunk_size=5124000)):
69
- if chunk:
70
- _file.write(chunk)
71
- print("Coco data download complete...")
72
-
73
- #extract files
74
- zf = zipfile.ZipFile(os.path.join(save_dir, filename))
75
- print(f" Extracting {filename} file")
76
- zf.extractall(path=save_dir)
77
- # Delete coco zip
78
- print(f" Deleting {filename}")
79
- os.remove(path=os.path.join(save_dir, filename))
80
-
81
- def load_data(self):
82
- """Load coco dataset image ids or filenames."""
83
- if isinstance(self.filenames, dict) and len(self.filenames) == 3:
84
- self.coco_download(self.data_dir)
85
- self.extracted_coco_dirs["train"] = [os.path.join(self.data_dir, i) \
86
- for i in os.listdir(self.data_dir) if "train" in i][0]
87
- self.extracted_coco_dirs["val"] = [os.path.join(self.data_dir, i) \
88
- for i in os.listdir(self.data_dir) if "val" in i][0]
89
-
90
- self.extracted_coco_dirs["annotations"] = [os.path.join(self.data_dir, i) \
91
- for i in os.listdir(self.data_dir) if "annotations" in i][0]
92
- else:
93
- raise Exception(f"`filenames` must be a dict of atleast 3 coco zip file names; \
94
- train, val and annotations. Found {len(self.filenames)} items instead.")
95
-
96
- annot_file = glob(self.extracted_coco_dirs["annotations"] + "/" + f"instances_{self.split}*")[
97
- 0]
98
- self.coco = COCO(annot_file)
99
- categories = self.coco.loadCats(self.coco.getCatIds())
100
- self.cat_id_map = {category["id"]: category["name"] for category in categories}
101
- self.cat_img_ids = {}
102
- for cat_id in list(self.cat_id_map.keys()):
103
- self.cat_img_ids[cat_id] = self.coco.getImgIds(catIds=[cat_id])
104
-
105
- img_ids = set()
106
- for i in list(self.cat_img_ids.values()):
107
- img_ids.update(i)
108
-
109
- self.img_ids = list(img_ids)
110
-
111
- def __len__(self):
112
- return len(self.img_ids)
113
-
114
- def __getitem__(self, idx):
115
- """Get image and annotations for a given index."""
116
- _id = self.img_ids[idx]
117
- annots = [] # polygons
118
- class_names = []
119
- labels = [i for i in list(filter(lambda x: _id in self.cat_img_ids[x], self.cat_img_ids))]
120
- image_path = glob(self.extracted_coco_dirs[self.split]+"/"+\
121
- f"{str(_id).zfill(12)}*")[0]
122
-
123
- image_height, image_width = cv2.imread(image_path).shape[:2]
124
- for cat_id in labels:
125
- annot_ids = self.coco.getAnnIds(imgIds=_id, catIds=[cat_id])
126
- if len(annot_ids) > 0:
127
- img_annotations = self.coco.loadAnns(annot_ids)
128
- for ann in img_annotations:
129
- # get polygons
130
- if isinstance(ann['segmentation'], list):
131
- for seg in ann['segmentation']:
132
- poly = np.array(seg).reshape((int(len(seg) / 2), 2))
133
- poly[:, 0], poly[:, 1] = poly[:, 0] / image_width, poly[:, 1] / image_height
134
- annots.append(poly.tolist()) #[[x=col, y=row],...]
135
- class_names.append(self.cat_id_map[cat_id])
136
- else: # seg: {"counts":[...]}
137
- if isinstance(ann['segmentation']['counts'], list):
138
- rle = maskUtils.frPyObjects([ann['segmentation']], image_height, image_width)
139
- else:
140
- rle = ann['segmentation']
141
- mask = maskUtils.decode(rle) #binary mask
142
- #convert mask to polygons and add to annots
143
- contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
144
- polygons = []
145
- for cont in contours:
146
- if cont.size >= 6:
147
- polygons.append(cont.astype(float).flatten().tolist())
148
- # store polygons in (x,y) pairs
149
- polygons_flattened = reduce(lambda x, y: x + y, polygons)
150
- del polygons
151
- del contours
152
- del mask
153
- gc.collect()
154
-
155
- polygons = np.array(polygons_flattened).reshape((int(len(polygons_flattened) / 2), 2))
156
- polygons[:, 0] = polygons[:, 0] / image_width
157
- polygons[:, 1] = polygons[:, 1] / image_height
158
-
159
- annots.append(polygons.tolist()) #[[x=col, y=row],...,[x=col, y=row]]
160
- class_names.append(self.cat_id_map[cat_id])
161
- else: # if no annotations for given image_id-cat_id pair
162
- continue
163
- assert len(class_names) == len(annots), f"Num classes must match num annotations\
164
- for a single image. Found {len(class_names)} classes and {len(annots)} polygons."
165
-
166
- return VisualSegmentationFeatures(image_path, class_names, annots, id=str(_id))