clarifai 9.10.1__py3-none-any.whl → 9.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (323) hide show
  1. clarifai/client/__init__.py +3 -2
  2. clarifai/client/app.py +39 -23
  3. clarifai/client/base.py +6 -6
  4. clarifai/client/dataset.py +113 -55
  5. clarifai/client/input.py +47 -55
  6. clarifai/client/model.py +27 -25
  7. clarifai/client/module.py +13 -11
  8. clarifai/client/runner.py +5 -3
  9. clarifai/client/search.py +29 -10
  10. clarifai/client/user.py +14 -8
  11. clarifai/client/workflow.py +22 -20
  12. clarifai/constants/dataset.py +22 -0
  13. clarifai/datasets/upload/base.py +9 -7
  14. clarifai/datasets/upload/features.py +3 -3
  15. clarifai/datasets/upload/image.py +49 -50
  16. clarifai/datasets/upload/loaders/coco_captions.py +26 -80
  17. clarifai/datasets/upload/loaders/coco_detection.py +56 -115
  18. clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
  19. clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
  20. clarifai/datasets/upload/loaders/xview_detection.py +3 -3
  21. clarifai/datasets/upload/text.py +16 -16
  22. clarifai/datasets/upload/utils.py +196 -21
  23. clarifai/utils/misc.py +21 -0
  24. clarifai/versions.py +1 -1
  25. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
  26. clarifai-9.10.3.dist-info/RECORD +96 -0
  27. clarifai-9.10.3.dist-info/top_level.txt +1 -0
  28. clarifai/auth/__init__.py +0 -6
  29. clarifai/auth/helper.py +0 -367
  30. clarifai/auth/register.py +0 -23
  31. clarifai/auth/stub.py +0 -127
  32. clarifai/datasets/upload/examples/README.md +0 -31
  33. clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
  34. clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  35. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  36. clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  37. clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  38. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  39. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  40. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  41. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  42. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  43. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  44. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  45. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  46. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  47. clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  48. clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  49. clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  50. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  51. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  52. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  53. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  54. clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  55. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  56. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  57. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  58. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  59. clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  60. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  61. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  62. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  63. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  64. clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  65. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  66. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  67. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  68. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  69. clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  70. clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
  71. clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  72. clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  73. clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  74. clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  75. clarifai/datasets/upload/loaders/README.md +0 -49
  76. clarifai/models/model_serving/README.md +0 -155
  77. clarifai/models/model_serving/docs/custom_config.md +0 -33
  78. clarifai/models/model_serving/docs/dependencies.md +0 -11
  79. clarifai/models/model_serving/docs/inference_parameters.md +0 -134
  80. clarifai/models/model_serving/docs/model_types.md +0 -20
  81. clarifai/models/model_serving/docs/output.md +0 -28
  82. clarifai/models/model_serving/examples/README.md +0 -7
  83. clarifai/models/model_serving/examples/image_classification/README.md +0 -9
  84. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  85. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  86. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  87. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  88. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  89. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  90. clarifai/models/model_serving/examples/text_classification/README.md +0 -9
  91. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  92. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  93. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  94. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  95. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  96. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  97. clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
  98. clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
  99. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  100. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  101. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  102. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  104. clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
  105. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  106. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  107. clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
  108. clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  109. clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  110. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  111. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
  112. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  113. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  114. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
  115. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  116. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  117. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  118. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  119. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  120. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  121. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  122. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  123. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  124. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  125. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  126. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  127. clarifai/modules/README.md +0 -5
  128. clarifai/modules/style.css +0 -217
  129. clarifai-9.10.1.dist-info/RECORD +0 -386
  130. clarifai-9.10.1.dist-info/top_level.txt +0 -2
  131. clarifai_utils/__init__.py +0 -0
  132. clarifai_utils/auth/__init__.py +0 -6
  133. clarifai_utils/auth/helper.py +0 -367
  134. clarifai_utils/auth/register.py +0 -23
  135. clarifai_utils/auth/stub.py +0 -127
  136. clarifai_utils/cli.py +0 -0
  137. clarifai_utils/client/__init__.py +0 -16
  138. clarifai_utils/client/app.py +0 -684
  139. clarifai_utils/client/auth/__init__.py +0 -4
  140. clarifai_utils/client/auth/helper.py +0 -367
  141. clarifai_utils/client/auth/register.py +0 -23
  142. clarifai_utils/client/auth/stub.py +0 -127
  143. clarifai_utils/client/base.py +0 -131
  144. clarifai_utils/client/dataset.py +0 -442
  145. clarifai_utils/client/input.py +0 -892
  146. clarifai_utils/client/lister.py +0 -54
  147. clarifai_utils/client/model.py +0 -575
  148. clarifai_utils/client/module.py +0 -94
  149. clarifai_utils/client/runner.py +0 -161
  150. clarifai_utils/client/search.py +0 -239
  151. clarifai_utils/client/user.py +0 -253
  152. clarifai_utils/client/workflow.py +0 -223
  153. clarifai_utils/constants/model.py +0 -4
  154. clarifai_utils/constants/search.py +0 -2
  155. clarifai_utils/datasets/__init__.py +0 -0
  156. clarifai_utils/datasets/export/__init__.py +0 -0
  157. clarifai_utils/datasets/export/inputs_annotations.py +0 -222
  158. clarifai_utils/datasets/upload/__init__.py +0 -0
  159. clarifai_utils/datasets/upload/base.py +0 -66
  160. clarifai_utils/datasets/upload/examples/README.md +0 -31
  161. clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
  162. clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
  163. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
  164. clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
  165. clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
  166. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
  167. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
  168. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
  169. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
  170. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
  171. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
  172. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
  173. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
  174. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
  175. clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
  176. clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
  177. clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
  178. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
  179. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
  180. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
  181. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
  182. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
  183. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
  184. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
  185. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
  186. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
  187. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
  188. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
  189. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
  190. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
  191. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
  192. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
  193. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
  194. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
  195. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
  196. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
  197. clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
  198. clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
  199. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
  200. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
  201. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
  202. clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
  203. clarifai_utils/datasets/upload/features.py +0 -44
  204. clarifai_utils/datasets/upload/image.py +0 -165
  205. clarifai_utils/datasets/upload/loaders/README.md +0 -49
  206. clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
  207. clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
  208. clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
  209. clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
  210. clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
  211. clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
  212. clarifai_utils/datasets/upload/text.py +0 -53
  213. clarifai_utils/datasets/upload/utils.py +0 -63
  214. clarifai_utils/errors.py +0 -89
  215. clarifai_utils/models/__init__.py +0 -0
  216. clarifai_utils/models/api.py +0 -283
  217. clarifai_utils/models/model_serving/README.md +0 -155
  218. clarifai_utils/models/model_serving/__init__.py +0 -12
  219. clarifai_utils/models/model_serving/cli/__init__.py +0 -12
  220. clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
  221. clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
  222. clarifai_utils/models/model_serving/cli/repository.py +0 -87
  223. clarifai_utils/models/model_serving/constants.py +0 -1
  224. clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
  225. clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
  226. clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
  227. clarifai_utils/models/model_serving/docs/model_types.md +0 -20
  228. clarifai_utils/models/model_serving/docs/output.md +0 -28
  229. clarifai_utils/models/model_serving/examples/README.md +0 -7
  230. clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
  231. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  232. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
  233. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
  234. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  235. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  236. clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  237. clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  238. clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  239. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  240. clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
  241. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  242. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
  243. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
  244. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  245. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  246. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  247. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  248. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  249. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  250. clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
  251. clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
  252. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  253. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
  254. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
  255. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  256. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  257. clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
  258. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  259. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
  260. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
  261. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  262. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  263. clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
  264. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
  265. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
  266. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
  267. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
  268. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
  269. clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
  270. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  271. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
  272. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
  273. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  274. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  275. clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
  276. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  277. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
  278. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
  279. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  280. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  281. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  282. clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
  283. clarifai_utils/models/model_serving/model_config/config.py +0 -302
  284. clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
  285. clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
  286. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
  287. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
  288. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
  289. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
  290. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
  291. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
  292. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
  293. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
  294. clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
  295. clarifai_utils/models/model_serving/models/__init__.py +0 -12
  296. clarifai_utils/models/model_serving/models/default_test.py +0 -275
  297. clarifai_utils/models/model_serving/models/inference.py +0 -42
  298. clarifai_utils/models/model_serving/models/model_types.py +0 -265
  299. clarifai_utils/models/model_serving/models/output.py +0 -124
  300. clarifai_utils/models/model_serving/models/pb_model.py +0 -74
  301. clarifai_utils/models/model_serving/models/test.py +0 -64
  302. clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
  303. clarifai_utils/modules/README.md +0 -5
  304. clarifai_utils/modules/__init__.py +0 -0
  305. clarifai_utils/modules/css.py +0 -60
  306. clarifai_utils/modules/pages.py +0 -42
  307. clarifai_utils/modules/style.css +0 -217
  308. clarifai_utils/runners/__init__.py +0 -0
  309. clarifai_utils/runners/example.py +0 -33
  310. clarifai_utils/schema/search.py +0 -69
  311. clarifai_utils/urls/helper.py +0 -103
  312. clarifai_utils/utils/__init__.py +0 -0
  313. clarifai_utils/utils/logging.py +0 -90
  314. clarifai_utils/utils/misc.py +0 -33
  315. clarifai_utils/utils/model_train.py +0 -157
  316. clarifai_utils/versions.py +0 -6
  317. clarifai_utils/workflows/__init__.py +0 -0
  318. clarifai_utils/workflows/export.py +0 -68
  319. clarifai_utils/workflows/utils.py +0 -59
  320. clarifai_utils/workflows/validate.py +0 -67
  321. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
  322. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
  323. {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
clarifai/client/input.py CHANGED
@@ -25,10 +25,11 @@ class Inputs(Lister, BaseClient):
25
25
  """Inputs is a class that provides access to Clarifai API endpoints related to Input information."""
26
26
 
27
27
  def __init__(self,
28
- user_id: str = "",
29
- app_id: str = "",
28
+ user_id: str = None,
29
+ app_id: str = None,
30
30
  logger_level: str = "INFO",
31
31
  base_url: str = "https://api.clarifai.com",
32
+ pat: str = None,
32
33
  **kwargs):
33
34
  """Initializes an Input object.
34
35
 
@@ -43,11 +44,11 @@ class Inputs(Lister, BaseClient):
43
44
  self.kwargs = {**kwargs}
44
45
  self.input_info = resources_pb2.Input(**self.kwargs)
45
46
  self.logger = get_logger(logger_level=logger_level, name=__name__)
46
- BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url)
47
+ BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url, pat=pat)
47
48
  Lister.__init__(self)
48
49
 
49
- def _get_proto(self,
50
- input_id: str,
50
+ @staticmethod
51
+ def _get_proto(input_id: str,
51
52
  dataset_id: Union[str, None],
52
53
  imagepb: Image = None,
53
54
  video_pb: Video = None,
@@ -106,8 +107,8 @@ class Inputs(Lister, BaseClient):
106
107
  concepts=concepts,
107
108
  metadata=metadata))
108
109
 
109
- def get_input_from_url(self,
110
- input_id: str,
110
+ @staticmethod
111
+ def get_input_from_url(input_id: str,
111
112
  image_url: str = None,
112
113
  video_url: str = None,
113
114
  audio_url: str = None,
@@ -129,8 +130,7 @@ class Inputs(Lister, BaseClient):
129
130
 
130
131
  Example:
131
132
  >>> from clarifai.client.input import Inputs
132
- >>> input_obj = Inputs()
133
- >>> input_proto = input_obj.get_input_from_url(input_id = 'demo', image_url='https://samples.clarifai.com/metro-north.jpg')
133
+ >>> input_proto = Inputs.get_input_from_url(input_id = 'demo', image_url='https://samples.clarifai.com/metro-north.jpg')
134
134
  """
135
135
  if not any((image_url, video_url, audio_url, text_url)):
136
136
  raise ValueError(
@@ -139,7 +139,7 @@ class Inputs(Lister, BaseClient):
139
139
  video_pb = resources_pb2.Video(url=video_url) if video_url else None
140
140
  audio_pb = resources_pb2.Audio(url=audio_url) if audio_url else None
141
141
  text_pb = resources_pb2.Text(url=text_url) if text_url else None
142
- return self._get_proto(
142
+ return Inputs._get_proto(
143
143
  input_id=input_id,
144
144
  dataset_id=dataset_id,
145
145
  imagepb=image_pb,
@@ -148,8 +148,8 @@ class Inputs(Lister, BaseClient):
148
148
  text_pb=text_pb,
149
149
  **kwargs)
150
150
 
151
- def get_input_from_file(self,
152
- input_id: str,
151
+ @staticmethod
152
+ def get_input_from_file(input_id: str,
153
153
  image_file: str = None,
154
154
  video_file: str = None,
155
155
  audio_file: str = None,
@@ -171,8 +171,7 @@ class Inputs(Lister, BaseClient):
171
171
 
172
172
  Example:
173
173
  >>> from clarifai.client.input import Inputs
174
- >>> input_obj = Inputs()
175
- >>> input_proto = input_obj.get_input_from_file(input_id = 'demo', video_file='file_path')
174
+ >>> input_proto = Inputs.get_input_from_file(input_id = 'demo', video_file='file_path')
176
175
  """
177
176
  if not any((image_file, video_file, audio_file, text_file)):
178
177
  raise ValueError(
@@ -181,7 +180,7 @@ class Inputs(Lister, BaseClient):
181
180
  video_pb = resources_pb2.Video(base64=open(video_file, 'rb').read()) if video_file else None
182
181
  audio_pb = resources_pb2.Audio(base64=open(audio_file, 'rb').read()) if audio_file else None
183
182
  text_pb = resources_pb2.Text(raw=open(text_file, 'rb').read()) if text_file else None
184
- return self._get_proto(
183
+ return Inputs._get_proto(
185
184
  input_id=input_id,
186
185
  dataset_id=dataset_id,
187
186
  imagepb=image_pb,
@@ -190,8 +189,8 @@ class Inputs(Lister, BaseClient):
190
189
  text_pb=text_pb,
191
190
  **kwargs)
192
191
 
193
- def get_input_from_bytes(self,
194
- input_id: str,
192
+ @staticmethod
193
+ def get_input_from_bytes(input_id: str,
195
194
  image_bytes: bytes = None,
196
195
  video_bytes: bytes = None,
197
196
  audio_bytes: bytes = None,
@@ -213,10 +212,9 @@ class Inputs(Lister, BaseClient):
213
212
 
214
213
  Example:
215
214
  >>> from clarifai.client.input import Inputs
216
- >>> input_obj = Inputs()
217
215
  >>> image = open('demo.jpg', 'rb').read()
218
216
  >>> video = open('demo.mp4', 'rb').read()
219
- >>> input_proto = input_obj.get_input_from_bytes(input_id = 'demo',image_bytes =image, video_bytes=video)
217
+ >>> input_proto = Inputs.get_input_from_bytes(input_id = 'demo',image_bytes =image, video_bytes=video)
220
218
  """
221
219
  if not any((image_bytes, video_bytes, audio_bytes, text_bytes)):
222
220
  raise ValueError(
@@ -225,7 +223,7 @@ class Inputs(Lister, BaseClient):
225
223
  video_pb = resources_pb2.Video(base64=video_bytes) if video_bytes else None
226
224
  audio_pb = resources_pb2.Audio(base64=audio_bytes) if audio_bytes else None
227
225
  text_pb = resources_pb2.Text(raw=text_bytes) if text_bytes else None
228
- return self._get_proto(
226
+ return Inputs._get_proto(
229
227
  input_id=input_id,
230
228
  dataset_id=dataset_id,
231
229
  imagepb=image_pb,
@@ -234,9 +232,8 @@ class Inputs(Lister, BaseClient):
234
232
  text_pb=text_pb,
235
233
  **kwargs)
236
234
 
237
- def get_image_inputs_from_folder(self,
238
- folder_path: str,
239
- dataset_id: str = None,
235
+ @staticmethod
236
+ def get_image_inputs_from_folder(folder_path: str, dataset_id: str = None,
240
237
  labels: bool = False) -> List[Input]: #image specific
241
238
  """Create input protos for image data type from folder.
242
239
 
@@ -248,8 +245,7 @@ class Inputs(Lister, BaseClient):
248
245
 
249
246
  Example:
250
247
  >>> from clarifai.client.input import Inputs
251
- >>> input_obj = Inputs()
252
- >>> input_protos = input_obj.get_image_inputs_from_folder(folder_path='demo_folder')
248
+ >>> input_protos = Inputs.get_image_inputs_from_folder(folder_path='demo_folder')
253
249
  """
254
250
  input_protos = []
255
251
  labels = [folder_path.split('/')[-1]] if labels else None
@@ -259,11 +255,12 @@ class Inputs(Lister, BaseClient):
259
255
  input_id = filename.split('.')[0]
260
256
  image_pb = resources_pb2.Image(base64=open(os.path.join(folder_path, filename), 'rb').read())
261
257
  input_protos.append(
262
- self._get_proto(
258
+ Inputs._get_proto(
263
259
  input_id=input_id, dataset_id=dataset_id, imagepb=image_pb, labels=labels))
264
260
  return input_protos
265
261
 
266
- def get_text_input(self, input_id: str, raw_text: str, dataset_id: str = None,
262
+ @staticmethod
263
+ def get_text_input(input_id: str, raw_text: str, dataset_id: str = None,
267
264
  **kwargs) -> Text: #text specific
268
265
  """Create input proto for text data type from rawtext.
269
266
 
@@ -278,14 +275,13 @@ class Inputs(Lister, BaseClient):
278
275
 
279
276
  Example:
280
277
  >>> from clarifai.client.input import Inputs
281
- >>> input_obj = Inputs()
282
- >>> input_protos = input_obj.get_text_input(input_id = 'demo', raw_text = 'This is a test')
278
+ >>> input_protos = Inputs.get_text_input(input_id = 'demo', raw_text = 'This is a test')
283
279
  """
284
280
  text_pb = resources_pb2.Text(raw=raw_text)
285
- return self._get_proto(input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, **kwargs)
281
+ return Inputs._get_proto(input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, **kwargs)
286
282
 
287
- def get_inputs_from_csv(self,
288
- csv_path: str,
283
+ @staticmethod
284
+ def get_inputs_from_csv(csv_path: str,
289
285
  input_type: str = 'text',
290
286
  csv_type: str = 'raw',
291
287
  dataset_id: str = None,
@@ -304,8 +300,7 @@ class Inputs(Lister, BaseClient):
304
300
 
305
301
  Example:
306
302
  >>> from clarifai.client.input import Inputs
307
- >>> input_obj = Inputs()
308
- >>> input_protos = input_obj.get_inputs_from_csv(csv_path='filepath', input_type='text', csv_type='raw')
303
+ >>> input_protos = Inputs.get_inputs_from_csv(csv_path='filepath', input_type='text', csv_type='raw')
309
304
  """
310
305
  input_protos = []
311
306
  with open(csv_path) as _file:
@@ -356,7 +351,7 @@ class Inputs(Lister, BaseClient):
356
351
 
357
352
  if csv_type == 'raw':
358
353
  input_protos.append(
359
- self.get_text_input(
354
+ Inputs.get_text_input(
360
355
  input_id=input_id,
361
356
  raw_text=text,
362
357
  dataset_id=dataset_id,
@@ -365,7 +360,7 @@ class Inputs(Lister, BaseClient):
365
360
  geo_info=geo_info))
366
361
  elif csv_type == 'url':
367
362
  input_protos.append(
368
- self.get_input_from_url(
363
+ Inputs.get_input_from_url(
369
364
  input_id=input_id,
370
365
  image_url=image,
371
366
  text_url=text,
@@ -377,7 +372,7 @@ class Inputs(Lister, BaseClient):
377
372
  geo_info=geo_info))
378
373
  else:
379
374
  input_protos.append(
380
- self.get_input_from_file(
375
+ Inputs.get_input_from_file(
381
376
  input_id=input_id,
382
377
  image_file=image,
383
378
  text_file=text,
@@ -390,9 +385,8 @@ class Inputs(Lister, BaseClient):
390
385
 
391
386
  return input_protos
392
387
 
393
- def get_text_inputs_from_folder(self,
394
- folder_path: str,
395
- dataset_id: str = None,
388
+ @staticmethod
389
+ def get_text_inputs_from_folder(folder_path: str, dataset_id: str = None,
396
390
  labels: bool = False) -> List[Text]: #text specific
397
391
  """Create input protos for text data type from folder.
398
392
 
@@ -404,8 +398,7 @@ class Inputs(Lister, BaseClient):
404
398
 
405
399
  Example:
406
400
  >>> from clarifai.client.input import Inputs
407
- >>> input_obj = Inputs()
408
- >>> input_protos = input_obj.get_text_inputs_from_folder(folder_path='demo_folder')
401
+ >>> input_protos = Inputs.get_text_inputs_from_folder(folder_path='demo_folder')
409
402
  """
410
403
  input_protos = []
411
404
  labels = [folder_path.split('/')[-1]] if labels else None
@@ -415,11 +408,12 @@ class Inputs(Lister, BaseClient):
415
408
  input_id = filename.split('.')[0]
416
409
  text_pb = resources_pb2.Text(raw=open(os.path.join(folder_path, filename), 'rb').read())
417
410
  input_protos.append(
418
- self._get_proto(
411
+ Inputs._get_proto(
419
412
  input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, labels=labels))
420
413
  return input_protos
421
414
 
422
- def get_annotation_proto(self, input_id: str, label: str, annotations: List) -> Annotation:
415
+ @staticmethod
416
+ def get_annotation_proto(input_id: str, label: str, annotations: List) -> Annotation:
423
417
  """Create an annotation proto for each bounding box, label input pair.
424
418
 
425
419
  Args:
@@ -432,8 +426,7 @@ class Inputs(Lister, BaseClient):
432
426
 
433
427
  Example:
434
428
  >>> from clarifai.client.input import Inputs
435
- >>> input_obj = Inputs()
436
- >>> input_obj.get_annotation_proto(input_id='demo', label='demo', annotations=[x_min, y_min, x_max, y_max])
429
+ >>> Inputs.get_annotation_proto(input_id='demo', label='demo', annotations=[x_min, y_min, x_max, y_max])
437
430
  """
438
431
  if not isinstance(annotations, list):
439
432
  raise UserError("annotations must be a list of bbox cooridnates")
@@ -458,7 +451,8 @@ class Inputs(Lister, BaseClient):
458
451
 
459
452
  return input_annot_proto
460
453
 
461
- def get_mask_proto(self, input_id: str, label: str, polygons: List[List[float]]) -> Annotation:
454
+ @staticmethod
455
+ def get_mask_proto(input_id: str, label: str, polygons: List[List[float]]) -> Annotation:
462
456
  """Create an annotation proto for each polygon box, label input pair.
463
457
 
464
458
  Args:
@@ -471,8 +465,7 @@ class Inputs(Lister, BaseClient):
471
465
 
472
466
  Example:
473
467
  >>> from clarifai.client.input import Inputs
474
- >>> input_obj = Inputs()
475
- >>> input_obj.get_mask_proto(input_id='demo', label='demo', polygons=[[[x,y],...,[x,y]],...])
468
+ >>> Inputs.get_mask_proto(input_id='demo', label='demo', polygons=[[[x,y],...,[x,y]],...])
476
469
  """
477
470
  if not isinstance(polygons, list):
478
471
  raise UserError("polygons must be a list of points")
@@ -653,8 +646,7 @@ class Inputs(Lister, BaseClient):
653
646
  response = self._grpc_request(self.STUB.PostAnnotations, request)
654
647
  if response.status.code != status_code_pb2.SUCCESS:
655
648
  try:
656
- self.logger.warning(
657
- f"Post annotations failed, status: {response.annotations[0].status.details}")
649
+ self.logger.warning(f"Post annotations failed, status: {response.annotations[0].status}")
658
650
  except Exception:
659
651
  self.logger.warning(f"Post annotations failed, status: {response.status.details}")
660
652
  finally:
@@ -788,16 +780,16 @@ class Inputs(Lister, BaseClient):
788
780
  annotations_info['id'] = annotations_info.pop('annotation_id')
789
781
  yield Annotation(**annotations_info)
790
782
 
791
- def _bulk_upload(self, inputs: List[Input], chunk_size: int = 128) -> None:
783
+ def _bulk_upload(self, inputs: List[Input], batch_size: int = 128) -> None:
792
784
  """Uploads process for large number of inputs.
793
785
 
794
786
  Args:
795
787
  inputs (List[Input]): input protos
796
- chunk_size (int): chunk size for each request
788
+ batch_size (int): batch size for each request
797
789
  """
798
790
  num_workers: int = min(10, cpu_count()) # limit max workers to 10
799
- chunk_size = min(128, chunk_size) # limit max protos in a req
800
- chunked_inputs = Chunker(inputs, chunk_size).chunk()
791
+ batch_size = min(128, batch_size) # limit max protos in a req
792
+ chunked_inputs = Chunker(inputs, batch_size).chunk()
801
793
  with ThreadPoolExecutor(max_workers=num_workers) as executor:
802
794
  with tqdm(total=len(chunked_inputs), desc='Uploading inputs') as progress:
803
795
  # Submit all jobs to the executor and store the returned futures
clarifai/client/model.py CHANGED
@@ -27,34 +27,35 @@ class Model(Lister, BaseClient):
27
27
  """Model is a class that provides access to Clarifai API endpoints related to Model information."""
28
28
 
29
29
  def __init__(self,
30
- url_init: str = "",
31
- model_id: str = "",
30
+ url: str = None,
31
+ model_id: str = None,
32
32
  model_version: Dict = {'id': ""},
33
33
  base_url: str = "https://api.clarifai.com",
34
+ pat: str = None,
34
35
  **kwargs):
35
36
  """Initializes a Model object.
36
37
 
37
38
  Args:
38
- url_init (str): The URL to initialize the model object.
39
+ url (str): The URL to initialize the model object.
39
40
  model_id (str): The Model ID to interact with.
40
41
  model_version (dict): The Model Version to interact with.
41
42
  base_url (str): Base API url. Default "https://api.clarifai.com"
43
+ pat (str): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
42
44
  **kwargs: Additional keyword arguments to be passed to the Model.
43
45
  """
44
- if url_init != "" and model_id != "":
45
- raise UserError("You can only specify one of url_init or model_id.")
46
- if url_init == "" and model_id == "":
47
- raise UserError("You must specify one of url_init or model_id.")
48
- if url_init != "":
49
- user_id, app_id, _, model_id, model_version_id = ClarifaiUrlHelper.split_clarifai_url(
50
- url_init)
46
+ if url and model_id:
47
+ raise UserError("You can only specify one of url or model_id.")
48
+ if not url and not model_id:
49
+ raise UserError("You must specify one of url or model_id.")
50
+ if url:
51
+ user_id, app_id, _, model_id, model_version_id = ClarifaiUrlHelper.split_clarifai_url(url)
51
52
  model_version = {'id': model_version_id}
52
53
  kwargs = {'user_id': user_id, 'app_id': app_id}
53
54
  self.kwargs = {**kwargs, 'id': model_id, 'model_version': model_version,}
54
55
  self.model_info = resources_pb2.Model(**self.kwargs)
55
56
  self.logger = get_logger(logger_level="INFO")
56
57
  self.training_params = {}
57
- BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url)
58
+ BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url, pat=pat)
58
59
  Lister.__init__(self)
59
60
 
60
61
  def list_training_templates(self) -> List[str]:
@@ -306,7 +307,7 @@ class Model(Lister, BaseClient):
306
307
 
307
308
  Example:
308
309
  >>> from clarifai.client.model import Model
309
- >>> model = Model("model_url")
310
+ >>> model = Model("url")
310
311
  or
311
312
  >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
312
313
  >>> model_version = model.create_version(description='model_version_description')
@@ -330,7 +331,7 @@ class Model(Lister, BaseClient):
330
331
  dict_response = MessageToDict(response, preserving_proto_field_name=True)
331
332
  kwargs = self.process_response_keys(dict_response['model'], 'model')
332
333
 
333
- return Model(base_url=self.base, **kwargs)
334
+ return Model(base_url=self.base, pat=self.pat, **kwargs)
334
335
 
335
336
  def list_versions(self, page_no: int = None,
336
337
  per_page: int = None) -> Generator['Model', None, None]:
@@ -345,7 +346,7 @@ class Model(Lister, BaseClient):
345
346
 
346
347
  Example:
347
348
  >>> from clarifai.client.model import Model
348
- >>> model = Model("model_url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
349
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
349
350
  or
350
351
  >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
351
352
  >>> all_model_versions = list(model.list_versions())
@@ -375,6 +376,7 @@ class Model(Lister, BaseClient):
375
376
  yield Model(
376
377
  model_id=self.id,
377
378
  base_url=self.base,
379
+ pat=self.pat,
378
380
  **dict(self.kwargs, model_version=model_version_info))
379
381
 
380
382
  def predict(self, inputs: List[Input], inference_params: Dict = {}, output_config: Dict = {}):
@@ -432,7 +434,7 @@ class Model(Lister, BaseClient):
432
434
 
433
435
  Example:
434
436
  >>> from clarifai.client.model import Model
435
- >>> model = Model("model_url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
437
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
436
438
  or
437
439
  >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
438
440
  >>> model_prediction = model.predict_by_filepath('/path/to/image.jpg', 'image')
@@ -476,13 +478,13 @@ class Model(Lister, BaseClient):
476
478
  raise UserError('Invalid bytes.')
477
479
 
478
480
  if input_type == "image":
479
- input_proto = Inputs().get_input_from_bytes("", image_bytes=input_bytes)
481
+ input_proto = Inputs.get_input_from_bytes("", image_bytes=input_bytes)
480
482
  elif input_type == "text":
481
- input_proto = Inputs().get_input_from_bytes("", text_bytes=input_bytes)
483
+ input_proto = Inputs.get_input_from_bytes("", text_bytes=input_bytes)
482
484
  elif input_type == "video":
483
- input_proto = Inputs().get_input_from_bytes("", video_bytes=input_bytes)
485
+ input_proto = Inputs.get_input_from_bytes("", video_bytes=input_bytes)
484
486
  elif input_type == "audio":
485
- input_proto = Inputs().get_input_from_bytes("", audio_bytes=input_bytes)
487
+ input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
486
488
 
487
489
  return self.predict(
488
490
  inputs=[input_proto], inference_params=inference_params, output_config=output_config)
@@ -505,7 +507,7 @@ class Model(Lister, BaseClient):
505
507
 
506
508
  Example:
507
509
  >>> from clarifai.client.model import Model
508
- >>> model = Model("model_url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
510
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
509
511
  or
510
512
  >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
511
513
  >>> model_prediction = model.predict_by_url('url', 'image')
@@ -515,13 +517,13 @@ class Model(Lister, BaseClient):
515
517
  f"Got input type {input_type} but expected one of image, text, video, audio.")
516
518
 
517
519
  if input_type == "image":
518
- input_proto = Inputs().get_input_from_url("", image_url=url)
520
+ input_proto = Inputs.get_input_from_url("", image_url=url)
519
521
  elif input_type == "text":
520
- input_proto = Inputs().get_input_from_url("", text_url=url)
522
+ input_proto = Inputs.get_input_from_url("", text_url=url)
521
523
  elif input_type == "video":
522
- input_proto = Inputs().get_input_from_url("", video_url=url)
524
+ input_proto = Inputs.get_input_from_url("", video_url=url)
523
525
  elif input_type == "audio":
524
- input_proto = Inputs().get_input_from_url("", audio_url=url)
526
+ input_proto = Inputs.get_input_from_url("", audio_url=url)
525
527
 
526
528
  return self.predict(
527
529
  inputs=[input_proto], inference_params=inference_params, output_config=output_config)
@@ -537,8 +539,8 @@ class Model(Lister, BaseClient):
537
539
  select_concepts (list[Concept]): The concepts to select.
538
540
  sample_ms (int): The number of milliseconds to sample.
539
541
  """
542
+ params = Struct()
540
543
  if inference_params is not None:
541
- params = Struct()
542
544
  params.update(inference_params)
543
545
 
544
546
  self.model_info.model_version.output_info.CopyFrom(
clarifai/client/module.py CHANGED
@@ -13,34 +13,35 @@ class Module(Lister, BaseClient):
13
13
  """Module is a class that provides access to Clarifai API endpoints related to Module information."""
14
14
 
15
15
  def __init__(self,
16
- url_init: str = "",
17
- module_id: str = "",
16
+ url: str = None,
17
+ module_id: str = None,
18
18
  module_version: Dict = {'id': ""},
19
19
  base_url: str = "https://api.clarifai.com",
20
+ pat: str = None,
20
21
  **kwargs):
21
22
  """Initializes a Module object.
22
23
 
23
24
  Args:
24
- url_init (str): The URL to initialize the module object.
25
+ url (str): The URL to initialize the module object.
25
26
  module_id (str): The Module ID to interact with.
26
27
  module_version (dict): The Module Version to interact with.
27
28
  base_url (str): Base API url. Default "https://api.clarifai.com"
29
+ pat (str): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
28
30
  **kwargs: Additional keyword arguments to be passed to the Module.
29
31
  """
30
- if url_init != "" and module_id != "":
31
- raise UserError("You can only specify one of url_init or module_id.")
32
- if url_init == "" and module_id == "":
33
- raise UserError("You must specify one of url_init or module_id.")
34
- if url_init != "":
35
- user_id, app_id, module_id, module_version_id = ClarifaiUrlHelper.split_module_ui_url(
36
- url_init)
32
+ if url and module_id:
33
+ raise UserError("You can only specify one of url or module_id.")
34
+ if not url and not module_id:
35
+ raise UserError("You must specify one of url or module_id.")
36
+ if url:
37
+ user_id, app_id, module_id, module_version_id = ClarifaiUrlHelper.split_module_ui_url(url)
37
38
  module_version = {'id': module_version_id}
38
39
  kwargs = {'user_id': user_id, 'app_id': app_id}
39
40
 
40
41
  self.kwargs = {**kwargs, 'id': module_id, 'module_version': module_version}
41
42
  self.module_info = resources_pb2.Module(**self.kwargs)
42
43
  self.logger = get_logger(logger_level="INFO")
43
- BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url)
44
+ BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url, pat=pat)
44
45
  Lister.__init__(self)
45
46
 
46
47
  def list_versions(self, page_no: int = None,
@@ -80,6 +81,7 @@ class Module(Lister, BaseClient):
80
81
  yield Module(
81
82
  module_id=self.id,
82
83
  base_url=self.base,
84
+ pat=self.pat,
83
85
  **dict(self.kwargs, module_version=module_version_info))
84
86
 
85
87
  def __getattr__(self, name):
clarifai/client/runner.py CHANGED
@@ -32,19 +32,21 @@ class Runner(BaseClient):
32
32
 
33
33
  def __init__(self,
34
34
  runner_id: str,
35
- user_id: str = "",
35
+ user_id: str = None,
36
36
  check_runner_exists: bool = True,
37
37
  base_url: str = "https://api.clarifai.com",
38
+ pat: str = None,
38
39
  **kwargs) -> None:
39
40
  """
40
41
  Args:
41
42
  runner_id (str): the id of the runner to use. Create the runner in the Clarifai API first
42
43
  user_id (str): Clarifai User ID
43
44
  base_url (str): Base API url. Default "https://api.clarifai.com"
45
+ pat (str): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
44
46
  """
45
47
  user_id = user_id or os.environ.get("CLARIFAI_USER_ID", "")
46
48
 
47
- if user_id == "":
49
+ if not user_id:
48
50
  raise UserError(
49
51
  "Set CLARIFAI_USER_ID as environment variables or pass user_id as input arguments")
50
52
 
@@ -52,7 +54,7 @@ class Runner(BaseClient):
52
54
  self.logger = get_logger("INFO", __name__)
53
55
  self.kwargs = {**kwargs, 'id': runner_id, 'user_id': user_id}
54
56
  self.runner_info = resources_pb2.Runner(**self.kwargs)
55
- BaseClient.__init__(self, user_id=self.user_id, app_id="", base=base_url)
57
+ BaseClient.__init__(self, user_id=self.user_id, app_id="", base=base_url, pat=pat)
56
58
 
57
59
  # Check that the runner exists.
58
60
  if check_runner_exists:
clarifai/client/search.py CHANGED
@@ -1,3 +1,4 @@
1
+ from math import ceil
1
2
  from typing import Any, Callable, Dict, Generator
2
3
 
3
4
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
@@ -20,7 +21,9 @@ class Search(Lister, BaseClient):
20
21
  user_id,
21
22
  app_id,
22
23
  top_k: int = DEFAULT_TOP_K,
23
- metric: str = DEFAULT_SEARCH_METRIC):
24
+ metric: str = DEFAULT_SEARCH_METRIC,
25
+ base_url: str = "https://api.clarifai.com",
26
+ pat: str = None):
24
27
  """Initialize the Search object.
25
28
 
26
29
  Args:
@@ -28,6 +31,8 @@ class Search(Lister, BaseClient):
28
31
  app_id (str): App ID.
29
32
  top_k (int, optional): Top K results to retrieve. Defaults to 10.
30
33
  metric (str, optional): Similarity metric (either 'cosine' or 'euclidean'). Defaults to 'cosine'.
34
+ base_url (str, optional): Base API url. Defaults to "https://api.clarifai.com".
35
+ pat (str, optional): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
31
36
 
32
37
  Raises:
33
38
  UserError: If the metric is not 'cosine' or 'euclidean'.
@@ -39,11 +44,12 @@ class Search(Lister, BaseClient):
39
44
  self.app_id = app_id
40
45
  self.metric_distance = dict(cosine="COSINE_DISTANCE", euclidean="EUCLIDEAN_DISTANCE")[metric]
41
46
  self.data_proto = resources_pb2.Data()
47
+ self.top_k = top_k
42
48
 
43
- self.inputs = Inputs(user_id=self.user_id, app_id=self.app_id)
49
+ self.inputs = Inputs(user_id=self.user_id, app_id=self.app_id, pat=pat)
44
50
  self.rank_filter_schema = get_schema()
45
- BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id)
46
- Lister.__init__(self, page_size=top_k)
51
+ BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url, pat=pat)
52
+ Lister.__init__(self, page_size=1000)
47
53
 
48
54
  def _get_annot_proto(self, **kwargs):
49
55
  """Get an Annotation proto message based on keyword arguments.
@@ -118,7 +124,7 @@ class Search(Lister, BaseClient):
118
124
  self.data_proto.video.CopyFrom(resources_pb2.Video())
119
125
  self.input_proto.data.CopyFrom(self.data_proto)
120
126
  elif key == "input_dataset_ids":
121
- self.input_proto.dataset_ids = value
127
+ self.input_proto.dataset_ids.extend(value)
122
128
  elif key == "input_status_code":
123
129
  self.input_proto.status.code = value
124
130
  else:
@@ -154,18 +160,31 @@ class Search(Lister, BaseClient):
154
160
  Yields:
155
161
  response_dict: The next item in the listing.
156
162
  """
163
+ max_pages = ceil(self.top_k / self.default_page_size)
164
+ total_hits = 0
157
165
  page = 1
158
- request_data['pagination'] = service_pb2.Pagination(page=page, per_page=self.default_page_size)
159
- while True:
160
- request_data['pagination'].page = page
166
+ while (page <= max_pages):
167
+ if (page == max_pages):
168
+ per_page = self.top_k - total_hits
169
+ else:
170
+ per_page = self.default_page_size
171
+ request_data['pagination'] = service_pb2.Pagination(page=page, per_page=per_page)
161
172
  response = self._grpc_request(endpoint, proto_message(**request_data))
162
173
  dict_response = MessageToDict(response, preserving_proto_field_name=True)
163
174
  if response.status.code != status_code_pb2.SUCCESS:
164
- raise Exception(f"Listing failed with response {response!r}")
175
+ if "page * perPage cannot exceed" in str(response.status.details):
176
+ msg = (f"Your top_k is set to {self.top_k}. "
177
+ f"The current pagination settings exceed the limit. Please reach out to "
178
+ f"support@clarifai.com to request an increase for your use case.\n"
179
+ f"req_id: {response.status.req_id}")
180
+ raise UserError(msg)
181
+ else:
182
+ raise Exception(f"Listing failed with response {response!r}")
165
183
 
166
184
  if 'hits' not in list(dict_response.keys()):
167
185
  break
168
186
  page += 1
187
+ total_hits += per_page
169
188
  yield response
170
189
 
171
190
  def query(self, ranks=[{}], filters=[{}]):
@@ -186,7 +205,7 @@ class Search(Lister, BaseClient):
186
205
 
187
206
  Vector search over inputs
188
207
  >>> from clarifai.client.search import Search
189
- >>> search = Search(user_id='user_id', app_id='app_id', top_k=10, metric='cosine')
208
+ >>> search = Search(user_id='user_id', app_id='app_id', top_k=1, metric='cosine')
190
209
  >>> res = search.query(ranks=[{'image_url': 'https://samples.clarifai.com/dog.tiff'}])
191
210
 
192
211
  Note: For more detailed search examples, please refer to [examples](https://github.com/Clarifai/examples/tree/main/search).