clarifai 9.10.2__py3-none-any.whl → 9.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (323) hide show
  1. clarifai/client/__init__.py +3 -2
  2. clarifai/client/app.py +39 -23
  3. clarifai/client/base.py +6 -6
  4. clarifai/client/dataset.py +113 -55
  5. clarifai/client/input.py +47 -55
  6. clarifai/client/model.py +27 -25
  7. clarifai/client/module.py +13 -11
  8. clarifai/client/runner.py +5 -3
  9. clarifai/client/search.py +7 -3
  10. clarifai/client/user.py +14 -8
  11. clarifai/client/workflow.py +22 -20
  12. clarifai/constants/dataset.py +22 -0
  13. clarifai/datasets/upload/base.py +9 -7
  14. clarifai/datasets/upload/features.py +3 -3
  15. clarifai/datasets/upload/image.py +49 -50
  16. clarifai/datasets/upload/loaders/coco_captions.py +26 -80
  17. clarifai/datasets/upload/loaders/coco_detection.py +56 -115
  18. clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
  19. clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
  20. clarifai/datasets/upload/loaders/xview_detection.py +3 -3
  21. clarifai/datasets/upload/text.py +16 -16
  22. clarifai/datasets/upload/utils.py +196 -21
  23. clarifai/utils/misc.py +21 -0
  24. clarifai/versions.py +1 -1
  25. {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
  26. clarifai-9.10.3.dist-info/RECORD +96 -0
  27. clarifai-9.10.3.dist-info/top_level.txt +1 -0
  28. clarifai/auth/__init__.py +0 -6
  29. clarifai/auth/helper.py +0 -367
  30. clarifai/auth/register.py +0 -23
  31. clarifai/auth/stub.py +0 -127
  32. clarifai/datasets/upload/examples/README.md +0 -31
  33. clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
  34. clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  35. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  36. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  37. clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  38. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  39. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  40. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  41. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  42. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  43. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  44. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  45. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  46. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  47. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  48. clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  49. clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  50. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  51. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  52. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  53. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  54. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  55. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  56. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  57. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  58. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  59. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  60. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  61. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  62. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  63. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  64. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  65. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  66. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  67. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  68. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  69. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  70. clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
  71. clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  72. clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  73. clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  74. clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  75. clarifai/datasets/upload/loaders/README.md +0 -49
  76. clarifai/models/model_serving/README.md +0 -155
  77. clarifai/models/model_serving/docs/custom_config.md +0 -33
  78. clarifai/models/model_serving/docs/dependencies.md +0 -11
  79. clarifai/models/model_serving/docs/inference_parameters.md +0 -134
  80. clarifai/models/model_serving/docs/model_types.md +0 -20
  81. clarifai/models/model_serving/docs/output.md +0 -28
  82. clarifai/models/model_serving/examples/README.md +0 -7
  83. clarifai/models/model_serving/examples/image_classification/README.md +0 -9
  84. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  85. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  86. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  87. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  88. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  89. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  90. clarifai/models/model_serving/examples/text_classification/README.md +0 -9
  91. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  92. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  93. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  94. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  95. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  96. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  97. clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
  98. clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
  99. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  100. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  101. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  102. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  104. clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
  105. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  106. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  107. clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
  108. clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  109. clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  110. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  111. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
  112. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  113. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  114. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
  115. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  116. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  117. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  118. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  119. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  120. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  121. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  122. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  123. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  124. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  125. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  126. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  127. clarifai/modules/README.md +0 -5
  128. clarifai/modules/style.css +0 -217
  129. clarifai-9.10.2.dist-info/RECORD +0 -386
  130. clarifai-9.10.2.dist-info/top_level.txt +0 -2
  131. clarifai_utils/__init__.py +0 -0
  132. clarifai_utils/auth/__init__.py +0 -6
  133. clarifai_utils/auth/helper.py +0 -367
  134. clarifai_utils/auth/register.py +0 -23
  135. clarifai_utils/auth/stub.py +0 -127
  136. clarifai_utils/cli.py +0 -0
  137. clarifai_utils/client/__init__.py +0 -16
  138. clarifai_utils/client/app.py +0 -684
  139. clarifai_utils/client/auth/__init__.py +0 -4
  140. clarifai_utils/client/auth/helper.py +0 -367
  141. clarifai_utils/client/auth/register.py +0 -23
  142. clarifai_utils/client/auth/stub.py +0 -127
  143. clarifai_utils/client/base.py +0 -131
  144. clarifai_utils/client/dataset.py +0 -442
  145. clarifai_utils/client/input.py +0 -892
  146. clarifai_utils/client/lister.py +0 -54
  147. clarifai_utils/client/model.py +0 -575
  148. clarifai_utils/client/module.py +0 -94
  149. clarifai_utils/client/runner.py +0 -161
  150. clarifai_utils/client/search.py +0 -254
  151. clarifai_utils/client/user.py +0 -253
  152. clarifai_utils/client/workflow.py +0 -223
  153. clarifai_utils/constants/model.py +0 -4
  154. clarifai_utils/constants/search.py +0 -2
  155. clarifai_utils/datasets/__init__.py +0 -0
  156. clarifai_utils/datasets/export/__init__.py +0 -0
  157. clarifai_utils/datasets/export/inputs_annotations.py +0 -222
  158. clarifai_utils/datasets/upload/__init__.py +0 -0
  159. clarifai_utils/datasets/upload/base.py +0 -66
  160. clarifai_utils/datasets/upload/examples/README.md +0 -31
  161. clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
  162. clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  163. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  164. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  165. clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  166. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  167. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  168. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  169. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  170. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  171. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  172. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  173. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  174. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  175. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  176. clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  177. clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  178. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  179. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  180. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  181. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  182. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  183. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  184. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  185. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  186. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  187. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  188. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  189. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  190. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  191. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  192. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  193. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  194. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  195. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  196. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  197. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  198. clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
  199. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  200. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  201. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  202. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  203. clarifai_utils/datasets/upload/features.py +0 -44
  204. clarifai_utils/datasets/upload/image.py +0 -165
  205. clarifai_utils/datasets/upload/loaders/README.md +0 -49
  206. clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
  207. clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
  208. clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
  209. clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
  210. clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
  211. clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
  212. clarifai_utils/datasets/upload/text.py +0 -53
  213. clarifai_utils/datasets/upload/utils.py +0 -63
  214. clarifai_utils/errors.py +0 -89
  215. clarifai_utils/models/__init__.py +0 -0
  216. clarifai_utils/models/api.py +0 -283
  217. clarifai_utils/models/model_serving/README.md +0 -155
  218. clarifai_utils/models/model_serving/__init__.py +0 -12
  219. clarifai_utils/models/model_serving/cli/__init__.py +0 -12
  220. clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
  221. clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
  222. clarifai_utils/models/model_serving/cli/repository.py +0 -87
  223. clarifai_utils/models/model_serving/constants.py +0 -1
  224. clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
  225. clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
  226. clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
  227. clarifai_utils/models/model_serving/docs/model_types.md +0 -20
  228. clarifai_utils/models/model_serving/docs/output.md +0 -28
  229. clarifai_utils/models/model_serving/examples/README.md +0 -7
  230. clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
  231. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  232. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
  233. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
  234. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  235. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  236. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  237. clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  238. clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  239. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  240. clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
  241. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  242. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
  243. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
  244. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  245. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  246. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  247. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  248. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  249. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  250. clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
  251. clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
  252. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  253. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  254. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  255. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  256. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  257. clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
  258. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  259. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
  260. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
  261. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  262. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  263. clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
  264. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
  265. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
  266. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  267. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  268. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  269. clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
  270. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  271. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
  272. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
  273. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  274. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  275. clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
  276. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  277. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
  278. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
  279. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  280. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  281. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  282. clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
  283. clarifai_utils/models/model_serving/model_config/config.py +0 -302
  284. clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
  285. clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  286. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  287. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  288. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  289. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  290. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  291. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  292. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  293. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  294. clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
  295. clarifai_utils/models/model_serving/models/__init__.py +0 -12
  296. clarifai_utils/models/model_serving/models/default_test.py +0 -275
  297. clarifai_utils/models/model_serving/models/inference.py +0 -42
  298. clarifai_utils/models/model_serving/models/model_types.py +0 -265
  299. clarifai_utils/models/model_serving/models/output.py +0 -124
  300. clarifai_utils/models/model_serving/models/pb_model.py +0 -74
  301. clarifai_utils/models/model_serving/models/test.py +0 -64
  302. clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
  303. clarifai_utils/modules/README.md +0 -5
  304. clarifai_utils/modules/__init__.py +0 -0
  305. clarifai_utils/modules/css.py +0 -60
  306. clarifai_utils/modules/pages.py +0 -42
  307. clarifai_utils/modules/style.css +0 -217
  308. clarifai_utils/runners/__init__.py +0 -0
  309. clarifai_utils/runners/example.py +0 -33
  310. clarifai_utils/schema/search.py +0 -69
  311. clarifai_utils/urls/helper.py +0 -103
  312. clarifai_utils/utils/__init__.py +0 -0
  313. clarifai_utils/utils/logging.py +0 -90
  314. clarifai_utils/utils/misc.py +0 -33
  315. clarifai_utils/utils/model_train.py +0 -157
  316. clarifai_utils/versions.py +0 -6
  317. clarifai_utils/workflows/__init__.py +0 -0
  318. clarifai_utils/workflows/export.py +0 -68
  319. clarifai_utils/workflows/utils.py +0 -59
  320. clarifai_utils/workflows/validate.py +0 -67
  321. {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
  322. {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
  323. {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
@@ -1,134 +1,75 @@
1
- #! COCO 2017 detection dataset
1
+ #! COCO detection dataset
2
2
 
3
3
  import os
4
- import zipfile
5
- from glob import glob
6
4
 
7
- import cv2
8
- import requests
9
5
  from pycocotools.coco import COCO
10
- from tqdm import tqdm
11
6
 
12
- from clarifai.datasets.upload.base import ClarifaiDataLoader
7
+ from ..base import ClarifaiDataLoader
13
8
 
14
9
  from ..features import VisualDetectionFeatures
15
10
 
16
11
 
17
12
  class COCODetectionDataLoader(ClarifaiDataLoader):
18
- """COCO 2017 Image Detection Dataset."""
19
13
 
20
- def __init__(self, split: str = "train"):
14
+ def __init__(self, images_dir, label_filepath):
21
15
  """
22
- Initialize coco dataset.
23
- Args:
24
- filenames: the coco zip filenames: Dict[str, str] to be downloaded if download=True,
25
- data_dir: the local coco dataset directory.
26
- split: "train" or "val"
27
- """
28
- self.filenames = {
29
- "train": "train2017.zip",
30
- "val": "val2017.zip",
31
- "annotations": "annotations_trainval2017.zip"
32
- }
33
- self.split = split
34
- self.url = "http://images.cocodataset.org/zips/" # coco base image-zip url
35
- self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
36
- "data") # data storage directory
37
- self.extracted_coco_dirs = {"train": None, "val": None, "annotations": None}
38
-
16
+ Args:
17
+ images_dir: Directory containing the images.
18
+ label_filepath: Path to the COCO annotation file.
19
+ """
20
+ self.images_dir = images_dir
21
+ self.label_filepath = label_filepath
22
+
23
+ self.map_ids = {}
39
24
  self.load_data()
40
25
 
41
- def coco_download(self, save_dir):
42
- """Download coco dataset."""
43
- if not os.path.exists(save_dir):
44
- os.mkdir(save_dir)
45
-
46
- #check if train*, val* and annotation* dirs exist
47
- #so that the coco2017 data isn't downloaded
48
- for key, filename in self.filenames.items():
49
- existing_files = glob(f"{save_dir}/{key}*")
50
- if existing_files:
51
- print(f"{key} dataset already downloded and extracted")
52
- continue
53
-
54
- print("-" * 80)
55
- print(f"Downloading {filename}")
56
- print("-" * 80)
57
-
58
- if "annotations" in filename:
59
- self.url = "http://images.cocodataset.org/annotations/"
60
-
61
- response = requests.get(self.url + filename, stream=True)
62
- response.raise_for_status()
63
- with open(os.path.join(save_dir, filename), "wb") as _file:
64
- for chunk in tqdm(response.iter_content(chunk_size=5124000)):
65
- if chunk:
66
- _file.write(chunk)
67
- print("Coco data download complete...")
68
-
69
- #extract files
70
- zf = zipfile.ZipFile(os.path.join(save_dir, filename))
71
- print(f" Extracting {filename} file")
72
- zf.extractall(path=save_dir)
73
- # Delete coco zip
74
- print(f" Deleting {filename}")
75
- os.remove(path=os.path.join(save_dir, filename))
76
-
77
- def load_data(self):
78
- if isinstance(self.filenames, dict) and len(self.filenames) == 3:
79
- self.coco_download(self.data_dir)
80
- self.extracted_coco_dirs["train"] = [os.path.join(self.data_dir, i) \
81
- for i in os.listdir(self.data_dir) if "train" in i][0]
82
- self.extracted_coco_dirs["val"] = [os.path.join(self.data_dir, i) \
83
- for i in os.listdir(self.data_dir) if "val" in i][0]
84
-
85
- self.extracted_coco_dirs["annotations"] = [os.path.join(self.data_dir, i) \
86
- for i in os.listdir(self.data_dir) if "annotations" in i][0]
87
- else:
88
- raise Exception(f"`filenames` must be a dict of atleast 2 coco zip file names; \
89
- train, val and annotations. Found {len(self.filenames)} items instead.")
90
-
91
- annot_file = glob(self.extracted_coco_dirs["annotations"] + "/" +\
92
- f"instances_{self.split}*")[0]
93
- self.coco = COCO(annot_file)
94
- categories = self.coco.loadCats(self.coco.getCatIds())
95
- self.cat_id_map = {category["id"]: category["name"] for category in categories}
96
- self.cat_img_ids = {}
97
- for cat_id in list(self.cat_id_map.keys()):
98
- self.cat_img_ids[cat_id] = self.coco.getImgIds(catIds=[cat_id])
99
-
100
- img_ids = []
101
- for i in list(self.cat_img_ids.values()):
102
- img_ids.extend(i)
103
-
104
- self.img_ids = list(set(img_ids))
26
+ @property
27
+ def task(self):
28
+ return "visual_detection"
105
29
 
106
- def __len__(self):
107
- return len(self.img_ids)
30
+ def load_data(self) -> None:
31
+ self.coco = COCO(self.label_filepath)
32
+ self.map_ids = {i: img_id for i, img_id in enumerate(list(self.coco.imgs.keys()))}
108
33
 
109
- def __getitem__(self, idx):
110
- _id = self.img_ids[idx]
34
+ def __getitem__(self, index: int):
35
+ value = self.coco.imgs[self.map_ids[index]]
36
+ image_path = os.path.join(self.images_dir, value['file_name'])
111
37
  annots = [] # bboxes
112
- class_names = []
113
- labels = [i for i in list(filter(lambda x: _id in self.cat_img_ids[x], self.cat_img_ids))]
114
- image_path = glob(self.extracted_coco_dirs[self.split]+"/"+\
115
- f"{str(_id).zfill(12)}*")[0]
116
-
117
- image_height, image_width = cv2.imread(image_path).shape[:2]
118
- for cat_id in labels:
119
- annot_ids = self.coco.getAnnIds(imgIds=_id, catIds=[cat_id])
120
- if len(annot_ids) > 0:
121
- img_annotations = self.coco.loadAnns(annot_ids)
122
- for ann in img_annotations:
123
- class_names.append(self.cat_id_map[cat_id])
124
- x_min = ann['bbox'][0] / image_width #left_col
125
- y_min = ann['bbox'][1] / image_height #top_row
126
- x_max = (ann['bbox'][0] + ann['bbox'][2]) / image_width #right_col
127
- y_max = (ann['bbox'][1] + ann['bbox'][3]) / image_height #bottom_row
128
- annots.append([x_min, y_min, x_max, y_max])
129
- else: # if no annotations for given image_id-cat_id pair
38
+ concept_ids = []
39
+
40
+ input_ann_ids = self.coco.getAnnIds(imgIds=[value['id']])
41
+ input_anns = self.coco.loadAnns(input_ann_ids)
42
+
43
+ for ann in input_anns:
44
+ # get concept info
45
+ # note1: concept_name can be human readable
46
+ # note2: concept_id can only be alphanumeric, up to 32 characters, with no special chars except `-` and `_`
47
+ concept_name = self.coco.cats[ann['category_id']]['name']
48
+ concept_id = concept_name.lower().replace(' ', '-')
49
+
50
+ # get bbox information
51
+ # note1: coco bboxes are `[x_min, y_min, width, height]` in pixels
52
+ # note2: clarifai bboxes are `[x_min, y_min, x_max, y_max]` normalized between 0-1.0
53
+ coco_bbox = ann['bbox']
54
+ clarifai_bbox = {
55
+ 'left_col': max(0, coco_bbox[0] / value['width']),
56
+ 'top_row': max(0, coco_bbox[1] / value['height']),
57
+ 'right_col': min(1, (coco_bbox[0] + coco_bbox[2]) / value['width']),
58
+ 'bottom_row': min(1, (coco_bbox[1] + coco_bbox[3]) / value['width'])
59
+ }
60
+ if (clarifai_bbox['left_col'] >=
61
+ clarifai_bbox['right_col']) or (clarifai_bbox['top_row'] >= clarifai_bbox['bottom_row']):
130
62
  continue
131
- assert len(class_names) == len(annots), f"Num classes must match num bbox annotations\
132
- for a single image. Found {len(class_names)} classes and {len(annots)} bboxes."
63
+ annots.append([
64
+ clarifai_bbox['left_col'], clarifai_bbox['top_row'], clarifai_bbox['right_col'],
65
+ clarifai_bbox['bottom_row']
66
+ ])
67
+ concept_ids.append(concept_id)
68
+
69
+ assert len(concept_ids) == len(annots), f"Num concepts must match num bbox annotations\
70
+ for a single image. Found {len(concept_ids)} concepts and {len(annots)} bboxes."
133
71
 
134
- return VisualDetectionFeatures(image_path, class_names, annots, id=str(_id))
72
+ return VisualDetectionFeatures(image_path, concept_ids, annots, id=str(value['id']))
73
+
74
+ def __len__(self):
75
+ return len(self.coco.imgs)
@@ -2,16 +2,12 @@
2
2
 
3
3
  import gc
4
4
  import os
5
- import zipfile
6
5
  from functools import reduce
7
- from glob import glob
8
6
 
9
7
  import cv2
10
8
  import numpy as np
11
- import requests
12
9
  from pycocotools import mask as maskUtils
13
10
  from pycocotools.coco import COCO
14
- from tqdm import tqdm
15
11
 
16
12
  from clarifai.datasets.upload.base import ClarifaiDataLoader
17
13
 
@@ -19,148 +15,84 @@ from ..features import VisualSegmentationFeatures
19
15
 
20
16
 
21
17
  class COCOSegmentationDataLoader(ClarifaiDataLoader):
22
- """COCO 2017 Image Segmentation Dataset."""
18
+ """COCO Image Segmentation Dataset."""
23
19
 
24
- def __init__(self, split: str = "train"):
20
+ def __init__(self, images_dir, label_filepath):
25
21
  """
26
- Initialize coco dataset.
27
22
  Args:
28
- filenames: the coco zip filenames: Dict[str, str] to be downloaded if download=True,
29
- data_dir: the local coco dataset directory
30
- split: "train" or "val"
23
+ images_dir: Directory containing the images.
24
+ label_filepath: Path to the COCO annotation file.
31
25
  """
32
- self.filenames = {
33
- "train": "train2017.zip",
34
- "val": "val2017.zip",
35
- "annotations": "annotations_trainval2017.zip"
36
- }
37
- self.split = split
38
- self.url = "http://images.cocodataset.org/zips/" # coco base image-zip url
39
- self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
40
- "data") # data storage dir
41
- self.extracted_coco_dirs = {"train": None, "val": None, "annotations": None}
26
+ self.images_dir = images_dir
27
+ self.label_filepath = label_filepath
42
28
 
29
+ self.map_ids = {}
43
30
  self.load_data()
44
31
 
45
- def coco_download(self, save_dir):
46
- """Download coco dataset."""
47
- if not os.path.exists(save_dir):
48
- os.mkdir(save_dir)
49
-
50
- #check if train, val and annotation dirs exist
51
- #so that the coco2017 data isn't downloaded
52
- for key, filename in self.filenames.items():
53
- existing_files = glob(f"{save_dir}/{key}*")
54
- if existing_files:
55
- print(f"{key} dataset already downloded and extracted")
56
- continue
57
-
58
- print("-" * 80)
59
- print(f"Downloading {filename}")
60
- print("-" * 80)
61
-
62
- if "annotations" in filename:
63
- self.url = "http://images.cocodataset.org/annotations/"
64
-
65
- response = requests.get(self.url + filename, stream=True)
66
- response.raise_for_status()
67
- with open(os.path.join(save_dir, filename), "wb") as _file:
68
- for chunk in tqdm(response.iter_content(chunk_size=5124000)):
69
- if chunk:
70
- _file.write(chunk)
71
- print("Coco data download complete...")
72
-
73
- #extract files
74
- zf = zipfile.ZipFile(os.path.join(save_dir, filename))
75
- print(f" Extracting {filename} file")
76
- zf.extractall(path=save_dir)
77
- # Delete coco zip
78
- print(f" Deleting {filename}")
79
- os.remove(path=os.path.join(save_dir, filename))
80
-
81
- def load_data(self):
82
- """Load coco dataset image ids or filenames."""
83
- if isinstance(self.filenames, dict) and len(self.filenames) == 3:
84
- self.coco_download(self.data_dir)
85
- self.extracted_coco_dirs["train"] = [os.path.join(self.data_dir, i) \
86
- for i in os.listdir(self.data_dir) if "train" in i][0]
87
- self.extracted_coco_dirs["val"] = [os.path.join(self.data_dir, i) \
88
- for i in os.listdir(self.data_dir) if "val" in i][0]
89
-
90
- self.extracted_coco_dirs["annotations"] = [os.path.join(self.data_dir, i) \
91
- for i in os.listdir(self.data_dir) if "annotations" in i][0]
92
- else:
93
- raise Exception(f"`filenames` must be a dict of atleast 3 coco zip file names; \
94
- train, val and annotations. Found {len(self.filenames)} items instead.")
95
-
96
- annot_file = glob(self.extracted_coco_dirs["annotations"] + "/" + f"instances_{self.split}*")[
97
- 0]
98
- self.coco = COCO(annot_file)
99
- categories = self.coco.loadCats(self.coco.getCatIds())
100
- self.cat_id_map = {category["id"]: category["name"] for category in categories}
101
- self.cat_img_ids = {}
102
- for cat_id in list(self.cat_id_map.keys()):
103
- self.cat_img_ids[cat_id] = self.coco.getImgIds(catIds=[cat_id])
104
-
105
- img_ids = set()
106
- for i in list(self.cat_img_ids.values()):
107
- img_ids.update(i)
108
-
109
- self.img_ids = list(img_ids)
32
+ @property
33
+ def task(self):
34
+ return "visual_segmentation"
35
+
36
+ def load_data(self) -> None:
37
+ self.coco = COCO(self.label_filepath)
38
+ self.map_ids = {i: img_id for i, img_id in enumerate(list(self.coco.imgs.keys()))}
110
39
 
111
40
  def __len__(self):
112
- return len(self.img_ids)
41
+ return len(self.coco.imgs)
113
42
 
114
- def __getitem__(self, idx):
43
+ def __getitem__(self, index):
115
44
  """Get image and annotations for a given index."""
116
- _id = self.img_ids[idx]
45
+ value = self.coco.imgs[self.map_ids[index]]
46
+ image_path = os.path.join(self.images_dir, value['file_name'])
117
47
  annots = [] # polygons
118
- class_names = []
119
- labels = [i for i in list(filter(lambda x: _id in self.cat_img_ids[x], self.cat_img_ids))]
120
- image_path = glob(self.extracted_coco_dirs[self.split]+"/"+\
121
- f"{str(_id).zfill(12)}*")[0]
122
-
123
- image_height, image_width = cv2.imread(image_path).shape[:2]
124
- for cat_id in labels:
125
- annot_ids = self.coco.getAnnIds(imgIds=_id, catIds=[cat_id])
126
- if len(annot_ids) > 0:
127
- img_annotations = self.coco.loadAnns(annot_ids)
128
- for ann in img_annotations:
129
- # get polygons
130
- if isinstance(ann['segmentation'], list):
131
- for seg in ann['segmentation']:
132
- poly = np.array(seg).reshape((int(len(seg) / 2), 2))
133
- poly[:, 0], poly[:, 1] = poly[:, 0] / image_width, poly[:, 1] / image_height
134
- annots.append(poly.tolist()) #[[x=col, y=row],...]
135
- class_names.append(self.cat_id_map[cat_id])
136
- else: # seg: {"counts":[...]}
137
- if isinstance(ann['segmentation']['counts'], list):
138
- rle = maskUtils.frPyObjects([ann['segmentation']], image_height, image_width)
139
- else:
140
- rle = ann['segmentation']
141
- mask = maskUtils.decode(rle) #binary mask
142
- #convert mask to polygons and add to annots
143
- contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
144
- polygons = []
145
- for cont in contours:
146
- if cont.size >= 6:
147
- polygons.append(cont.astype(float).flatten().tolist())
148
- # store polygons in (x,y) pairs
149
- polygons_flattened = reduce(lambda x, y: x + y, polygons)
150
- del polygons
151
- del contours
152
- del mask
153
- gc.collect()
154
-
155
- polygons = np.array(polygons_flattened).reshape((int(len(polygons_flattened) / 2), 2))
156
- polygons[:, 0] = polygons[:, 0] / image_width
157
- polygons[:, 1] = polygons[:, 1] / image_height
158
-
159
- annots.append(polygons.tolist()) #[[x=col, y=row],...,[x=col, y=row]]
160
- class_names.append(self.cat_id_map[cat_id])
161
- else: # if no annotations for given image_id-cat_id pair
162
- continue
163
- assert len(class_names) == len(annots), f"Num classes must match num annotations\
164
- for a single image. Found {len(class_names)} classes and {len(annots)} polygons."
165
-
166
- return VisualSegmentationFeatures(image_path, class_names, annots, id=str(_id))
48
+ concept_ids = []
49
+
50
+ input_ann_ids = self.coco.getAnnIds(imgIds=[value['id']])
51
+ input_anns = self.coco.loadAnns(input_ann_ids)
52
+
53
+ for ann in input_anns:
54
+ # get concept info
55
+ # note1: concept_name can be human readable
56
+ # note2: concept_id can only be alphanumeric, up to 32 characters, with no special chars except `-` and `_`
57
+ concept_name = self.coco.cats[ann['category_id']]['name']
58
+ concept_id = concept_name.lower().replace(' ', '-')
59
+
60
+ # get polygons
61
+ if isinstance(ann['segmentation'], list):
62
+ poly = np.array(ann['segmentation']).reshape((int(len(ann['segmentation'][0]) / 2),
63
+ 2)).astype(float)
64
+ poly[:, 0], poly[:, 1] = poly[:, 0] / value['width'], poly[:, 1] / value['height']
65
+ poly = np.clip(poly, 0, 1)
66
+ annots.append(poly.tolist()) #[[x=col, y=row],...]
67
+ concept_ids.append(concept_id)
68
+ else: # seg: {"counts":[...]}
69
+ if isinstance(ann['segmentation']['counts'], list):
70
+ rle = maskUtils.frPyObjects([ann['segmentation']], value['height'], value['width'])
71
+ else:
72
+ rle = ann['segmentation']
73
+ mask = maskUtils.decode(rle) #binary mask
74
+ #convert mask to polygons and add to annots
75
+ contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
76
+ polygons = []
77
+ for cont in contours:
78
+ if cont.size >= 6:
79
+ polygons.append(cont.astype(float).flatten().tolist())
80
+ # store polygons in (x,y) pairs
81
+ polygons_flattened = reduce(lambda x, y: x + y, polygons)
82
+ del polygons
83
+ del contours
84
+ del mask
85
+ gc.collect()
86
+
87
+ polygons = np.array(polygons_flattened).reshape((int(len(polygons_flattened) / 2),
88
+ 2)).astype(float)
89
+ polygons[:, 0] = polygons[:, 0] / value['width']
90
+ polygons[:, 1] = polygons[:, 1] / value['height']
91
+ polygons = np.clip(polygons, 0, 1)
92
+ annots.append(polygons.tolist()) #[[x=col, y=row],...,[x=col, y=row]]
93
+ concept_ids.append(concept_id)
94
+
95
+ assert len(concept_ids) == len(annots), f"Num concepts must match num bbox annotations\
96
+ for a single image. Found {len(concept_ids)} concepts and {len(annots)} bboxes."
97
+
98
+ return VisualSegmentationFeatures(image_path, concept_ids, annots, id=str(value['id']))
@@ -9,7 +9,7 @@ from ..features import VisualClassificationFeatures
9
9
  class ImageNetDataLoader(ClarifaiDataLoader):
10
10
  """ImageNet Dataset."""
11
11
 
12
- def __init__(self, split: str = "train"):
12
+ def __init__(self, data_dir, split: str = "train"):
13
13
  """
14
14
  Initialize dataset params.
15
15
  Args:
@@ -17,8 +17,7 @@ class ImageNetDataLoader(ClarifaiDataLoader):
17
17
  split: "train" or "test"
18
18
  """
19
19
  self.split = split
20
- self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
21
- "data") # data storage directory
20
+ self.data_dir = data_dir
22
21
  self.label_map = dict()
23
22
  self.concepts = []
24
23
  self.image_paths = []
@@ -31,13 +31,13 @@ class xviewDetectionDataLoader(ClarifaiDataLoader):
31
31
  'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower'
32
32
  ]
33
33
 
34
- def __init__(self, split: str = "train") -> None:
34
+ def __init__(self, data_dir) -> None:
35
35
  """Initialize and Compress xview dataset.
36
36
  Args:
37
- split: "train"
37
+ data_dir: the local dataset directory.
38
38
  """
39
39
 
40
- self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
40
+ self.data_dir = data_dir
41
41
  self.img_dir = os.path.join(self.data_dir, "train_images")
42
42
  self.img_comp_dir = os.path.join(self.data_dir, "train_images_comp")
43
43
  self.label_file = os.path.join(self.data_dir, "xview_train.geojson")
@@ -1,17 +1,19 @@
1
1
  from concurrent.futures import ThreadPoolExecutor
2
- from typing import Iterator, List, Tuple
2
+ from typing import List, Tuple, Type
3
3
 
4
4
  from clarifai_grpc.grpc.api import resources_pb2
5
5
  from google.protobuf.struct_pb2 import Struct
6
6
 
7
- from .base import ClarifaiDataset
7
+ from clarifai.client.input import Inputs
8
+
9
+ from .base import ClarifaiDataLoader, ClarifaiDataset
8
10
 
9
11
 
10
12
  class TextClassificationDataset(ClarifaiDataset):
11
13
  """Upload text classification datasets to clarifai datasets"""
12
14
 
13
- def __init__(self, datagen_object: Iterator, dataset_id: str, split: str) -> None:
14
- super().__init__(datagen_object, dataset_id, split)
15
+ def __init__(self, data_generator: Type[ClarifaiDataLoader], dataset_id: str) -> None:
16
+ super().__init__(data_generator, dataset_id)
15
17
 
16
18
  def _extract_protos(self, batch_input_ids: List[int]
17
19
  ) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
@@ -24,21 +26,19 @@ class TextClassificationDataset(ClarifaiDataset):
24
26
  """
25
27
  input_protos, annotation_protos = [], []
26
28
 
27
- def process_datagen_item(id):
28
- datagen_item = self.datagen_object[id]
29
+ def process_data_item(id):
30
+ data_item = self.data_generator[id]
29
31
  metadata = Struct()
30
- text = datagen_item.text
31
- labels = datagen_item.labels if isinstance(
32
- datagen_item.labels, list) else [datagen_item.labels] # clarifai concept
33
- input_id = f"{self.dataset_id}-{self.split}-{id}" if datagen_item.id is None else f"{self.dataset_id}-{self.split}-{str(datagen_item.id)}"
34
- if datagen_item.metadata is not None:
35
- metadata.update(datagen_item.metadata)
36
- else:
37
- metadata.update({"split": self.split})
32
+ text = data_item.text
33
+ labels = data_item.labels if isinstance(data_item.labels,
34
+ list) else [data_item.labels] # clarifai concept
35
+ input_id = f"{self.dataset_id}-{id}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
36
+ if data_item.metadata is not None:
37
+ metadata.update(data_item.metadata)
38
38
 
39
39
  self.all_input_ids[id] = input_id
40
40
  input_protos.append(
41
- self.input_object.get_text_input(
41
+ Inputs.get_text_input(
42
42
  input_id=input_id,
43
43
  raw_text=text,
44
44
  dataset_id=self.dataset_id,
@@ -46,7 +46,7 @@ class TextClassificationDataset(ClarifaiDataset):
46
46
  metadata=metadata))
47
47
 
48
48
  with ThreadPoolExecutor(max_workers=4) as executor:
49
- futures = [executor.submit(process_datagen_item, id) for id in batch_input_ids]
49
+ futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
50
50
  for job in futures:
51
51
  job.result()
52
52