datachain 0.6.7__tar.gz → 0.6.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of datachain might be problematic. Click here for more details.

Files changed (262) hide show
  1. {datachain-0.6.7 → datachain-0.6.9}/.github/workflows/tests-studio.yml +1 -1
  2. {datachain-0.6.7 → datachain-0.6.9}/.pre-commit-config.yaml +1 -1
  3. {datachain-0.6.7/src/datachain.egg-info → datachain-0.6.9}/PKG-INFO +43 -21
  4. {datachain-0.6.7 → datachain-0.6.9}/README.rst +40 -20
  5. {datachain-0.6.7 → datachain-0.6.9}/pyproject.toml +4 -2
  6. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/__init__.py +2 -1
  7. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/catalog/catalog.py +5 -0
  8. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/cli.py +137 -23
  9. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/client/fsspec.py +1 -1
  10. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/metastore.py +4 -0
  11. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/dataset.py +5 -0
  12. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/dataset_info.py +3 -0
  13. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/dc.py +26 -6
  14. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/file.py +0 -3
  15. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/meta_formats.py +1 -0
  16. datachain-0.6.9/src/datachain/lib/models/__init__.py +5 -0
  17. datachain-0.6.9/src/datachain/lib/models/bbox.py +45 -0
  18. datachain-0.6.9/src/datachain/lib/models/pose.py +37 -0
  19. datachain-0.6.9/src/datachain/lib/models/yolo.py +39 -0
  20. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/signal_schema.py +1 -1
  21. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/remote/studio.py +12 -2
  22. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/studio.py +18 -6
  23. {datachain-0.6.7 → datachain-0.6.9/src/datachain.egg-info}/PKG-INFO +43 -21
  24. {datachain-0.6.7 → datachain-0.6.9}/src/datachain.egg-info/SOURCES.txt +5 -1
  25. {datachain-0.6.7 → datachain-0.6.9}/src/datachain.egg-info/requires.txt +2 -0
  26. {datachain-0.6.7 → datachain-0.6.9}/tests/conftest.py +28 -0
  27. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_datasets.py +4 -0
  28. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_ls.py +4 -13
  29. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_pull.py +4 -0
  30. {datachain-0.6.7 → datachain-0.6.9}/tests/test_cli_e2e.py +17 -8
  31. {datachain-0.6.7 → datachain-0.6.9}/tests/test_cli_studio.py +64 -18
  32. datachain-0.6.9/tests/unit/lib/test_models.py +50 -0
  33. datachain-0.6.7/docs/assets/flowchart.png +0 -0
  34. {datachain-0.6.7 → datachain-0.6.9}/.cruft.json +0 -0
  35. {datachain-0.6.7 → datachain-0.6.9}/.gitattributes +0 -0
  36. {datachain-0.6.7 → datachain-0.6.9}/.github/ISSUE_TEMPLATE/bug_report.yml +0 -0
  37. {datachain-0.6.7 → datachain-0.6.9}/.github/ISSUE_TEMPLATE/empty_issue.md +0 -0
  38. {datachain-0.6.7 → datachain-0.6.9}/.github/ISSUE_TEMPLATE/feature_request.yml +0 -0
  39. {datachain-0.6.7 → datachain-0.6.9}/.github/codecov.yaml +0 -0
  40. {datachain-0.6.7 → datachain-0.6.9}/.github/dependabot.yml +0 -0
  41. {datachain-0.6.7 → datachain-0.6.9}/.github/workflows/benchmarks.yml +0 -0
  42. {datachain-0.6.7 → datachain-0.6.9}/.github/workflows/release.yml +0 -0
  43. {datachain-0.6.7 → datachain-0.6.9}/.github/workflows/tests.yml +0 -0
  44. {datachain-0.6.7 → datachain-0.6.9}/.github/workflows/update-template.yaml +0 -0
  45. {datachain-0.6.7 → datachain-0.6.9}/.gitignore +0 -0
  46. {datachain-0.6.7 → datachain-0.6.9}/CODE_OF_CONDUCT.rst +0 -0
  47. {datachain-0.6.7 → datachain-0.6.9}/CONTRIBUTING.rst +0 -0
  48. {datachain-0.6.7 → datachain-0.6.9}/LICENSE +0 -0
  49. {datachain-0.6.7 → datachain-0.6.9}/docs/assets/captioned_cartoons.png +0 -0
  50. {datachain-0.6.7 → datachain-0.6.9}/docs/assets/datachain-white.svg +0 -0
  51. {datachain-0.6.7 → datachain-0.6.9}/docs/assets/datachain.svg +0 -0
  52. {datachain-0.6.7 → datachain-0.6.9}/docs/index.md +0 -0
  53. {datachain-0.6.7 → datachain-0.6.9}/docs/references/datachain.md +0 -0
  54. {datachain-0.6.7 → datachain-0.6.9}/docs/references/datatype.md +0 -0
  55. {datachain-0.6.7 → datachain-0.6.9}/docs/references/file.md +0 -0
  56. {datachain-0.6.7 → datachain-0.6.9}/docs/references/index.md +0 -0
  57. {datachain-0.6.7 → datachain-0.6.9}/docs/references/sql.md +0 -0
  58. {datachain-0.6.7 → datachain-0.6.9}/docs/references/torch.md +0 -0
  59. {datachain-0.6.7 → datachain-0.6.9}/docs/references/udf.md +0 -0
  60. {datachain-0.6.7 → datachain-0.6.9}/examples/computer_vision/iptc_exif_xmp_lib.py +0 -0
  61. {datachain-0.6.7 → datachain-0.6.9}/examples/computer_vision/llava2_image_desc_lib.py +0 -0
  62. {datachain-0.6.7 → datachain-0.6.9}/examples/computer_vision/openimage-detect.py +0 -0
  63. {datachain-0.6.7 → datachain-0.6.9}/examples/get_started/common_sql_functions.py +0 -0
  64. {datachain-0.6.7 → datachain-0.6.9}/examples/get_started/json-csv-reader.py +0 -0
  65. {datachain-0.6.7 → datachain-0.6.9}/examples/get_started/torch-loader.py +0 -0
  66. {datachain-0.6.7 → datachain-0.6.9}/examples/get_started/udfs/parallel.py +0 -0
  67. {datachain-0.6.7 → datachain-0.6.9}/examples/get_started/udfs/simple.py +0 -0
  68. {datachain-0.6.7 → datachain-0.6.9}/examples/get_started/udfs/stateful.py +0 -0
  69. {datachain-0.6.7 → datachain-0.6.9}/examples/llm_and_nlp/claude-query.py +0 -0
  70. {datachain-0.6.7 → datachain-0.6.9}/examples/llm_and_nlp/hf-dataset-llm-eval.py +0 -0
  71. {datachain-0.6.7 → datachain-0.6.9}/examples/llm_and_nlp/unstructured-embeddings-gen.py +0 -0
  72. {datachain-0.6.7 → datachain-0.6.9}/examples/llm_and_nlp/unstructured-summary-map.py +0 -0
  73. {datachain-0.6.7 → datachain-0.6.9}/examples/multimodal/clip_inference.py +0 -0
  74. {datachain-0.6.7 → datachain-0.6.9}/examples/multimodal/hf_pipeline.py +0 -0
  75. {datachain-0.6.7 → datachain-0.6.9}/examples/multimodal/openai_image_desc_lib.py +0 -0
  76. {datachain-0.6.7 → datachain-0.6.9}/examples/multimodal/wds.py +0 -0
  77. {datachain-0.6.7 → datachain-0.6.9}/examples/multimodal/wds_filtered.py +0 -0
  78. {datachain-0.6.7 → datachain-0.6.9}/mkdocs.yml +0 -0
  79. {datachain-0.6.7 → datachain-0.6.9}/noxfile.py +0 -0
  80. {datachain-0.6.7 → datachain-0.6.9}/overrides/main.html +0 -0
  81. {datachain-0.6.7 → datachain-0.6.9}/setup.cfg +0 -0
  82. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/__main__.py +0 -0
  83. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/asyn.py +0 -0
  84. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/cache.py +0 -0
  85. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/catalog/__init__.py +0 -0
  86. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/catalog/datasource.py +0 -0
  87. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/catalog/loader.py +0 -0
  88. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/cli_utils.py +0 -0
  89. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/client/__init__.py +0 -0
  90. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/client/azure.py +0 -0
  91. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/client/fileslice.py +0 -0
  92. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/client/gcs.py +0 -0
  93. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/client/hf.py +0 -0
  94. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/client/local.py +0 -0
  95. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/client/s3.py +0 -0
  96. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/config.py +0 -0
  97. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/__init__.py +0 -0
  98. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/db_engine.py +0 -0
  99. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/id_generator.py +0 -0
  100. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/job.py +0 -0
  101. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/schema.py +0 -0
  102. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/serializer.py +0 -0
  103. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/sqlite.py +0 -0
  104. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/data_storage/warehouse.py +0 -0
  105. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/error.py +0 -0
  106. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/job.py +0 -0
  107. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/__init__.py +0 -0
  108. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/arrow.py +0 -0
  109. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/clip.py +0 -0
  110. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/convert/__init__.py +0 -0
  111. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/convert/flatten.py +0 -0
  112. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/convert/python_to_sql.py +0 -0
  113. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/convert/sql_to_python.py +0 -0
  114. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/convert/unflatten.py +0 -0
  115. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/convert/values_to_tuples.py +0 -0
  116. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/data_model.py +0 -0
  117. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/func/__init__.py +0 -0
  118. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/func/aggregate.py +0 -0
  119. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/func/func.py +0 -0
  120. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/hf.py +0 -0
  121. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/image.py +0 -0
  122. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/listing.py +0 -0
  123. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/listing_info.py +0 -0
  124. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/model_store.py +0 -0
  125. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/pytorch.py +0 -0
  126. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/settings.py +0 -0
  127. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/tar.py +0 -0
  128. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/text.py +0 -0
  129. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/udf.py +0 -0
  130. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/udf_signature.py +0 -0
  131. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/utils.py +0 -0
  132. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/vfile.py +0 -0
  133. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/webdataset.py +0 -0
  134. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/lib/webdataset_laion.py +0 -0
  135. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/listing.py +0 -0
  136. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/node.py +0 -0
  137. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/nodes_fetcher.py +0 -0
  138. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/nodes_thread_pool.py +0 -0
  139. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/progress.py +0 -0
  140. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/py.typed +0 -0
  141. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/__init__.py +0 -0
  142. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/batch.py +0 -0
  143. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/dataset.py +0 -0
  144. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/dispatch.py +0 -0
  145. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/metrics.py +0 -0
  146. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/params.py +0 -0
  147. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/queue.py +0 -0
  148. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/schema.py +0 -0
  149. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/query/session.py +0 -0
  150. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/remote/__init__.py +0 -0
  151. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/__init__.py +0 -0
  152. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/default/__init__.py +0 -0
  153. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/default/base.py +0 -0
  154. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/functions/__init__.py +0 -0
  155. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/functions/aggregate.py +0 -0
  156. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/functions/array.py +0 -0
  157. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/functions/conditional.py +0 -0
  158. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/functions/path.py +0 -0
  159. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/functions/random.py +0 -0
  160. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/functions/string.py +0 -0
  161. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/selectable.py +0 -0
  162. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/sqlite/__init__.py +0 -0
  163. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/sqlite/base.py +0 -0
  164. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/sqlite/types.py +0 -0
  165. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/sqlite/vector.py +0 -0
  166. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/types.py +0 -0
  167. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/sql/utils.py +0 -0
  168. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/telemetry.py +0 -0
  169. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/torch/__init__.py +0 -0
  170. {datachain-0.6.7 → datachain-0.6.9}/src/datachain/utils.py +0 -0
  171. {datachain-0.6.7 → datachain-0.6.9}/src/datachain.egg-info/dependency_links.txt +0 -0
  172. {datachain-0.6.7 → datachain-0.6.9}/src/datachain.egg-info/entry_points.txt +0 -0
  173. {datachain-0.6.7 → datachain-0.6.9}/src/datachain.egg-info/top_level.txt +0 -0
  174. {datachain-0.6.7 → datachain-0.6.9}/tests/__init__.py +0 -0
  175. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/__init__.py +0 -0
  176. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/conftest.py +0 -0
  177. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/datasets/.dvc/.gitignore +0 -0
  178. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/datasets/.dvc/config +0 -0
  179. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/datasets/.gitignore +0 -0
  180. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/datasets/laion-tiny.npz.dvc +0 -0
  181. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/test_datachain.py +0 -0
  182. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/test_ls.py +0 -0
  183. {datachain-0.6.7 → datachain-0.6.9}/tests/benchmarks/test_version.py +0 -0
  184. {datachain-0.6.7 → datachain-0.6.9}/tests/data.py +0 -0
  185. {datachain-0.6.7 → datachain-0.6.9}/tests/examples/__init__.py +0 -0
  186. {datachain-0.6.7 → datachain-0.6.9}/tests/examples/test_examples.py +0 -0
  187. {datachain-0.6.7 → datachain-0.6.9}/tests/examples/test_wds_e2e.py +0 -0
  188. {datachain-0.6.7 → datachain-0.6.9}/tests/examples/wds_data.py +0 -0
  189. {datachain-0.6.7 → datachain-0.6.9}/tests/func/__init__.py +0 -0
  190. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_catalog.py +0 -0
  191. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_client.py +0 -0
  192. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_datachain.py +0 -0
  193. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_dataset_query.py +0 -0
  194. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_feature_pickling.py +0 -0
  195. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_listing.py +0 -0
  196. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_meta_formats.py +0 -0
  197. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_metrics.py +0 -0
  198. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_pytorch.py +0 -0
  199. {datachain-0.6.7 → datachain-0.6.9}/tests/func/test_query.py +0 -0
  200. {datachain-0.6.7 → datachain-0.6.9}/tests/scripts/feature_class.py +0 -0
  201. {datachain-0.6.7 → datachain-0.6.9}/tests/scripts/feature_class_exception.py +0 -0
  202. {datachain-0.6.7 → datachain-0.6.9}/tests/scripts/feature_class_parallel.py +0 -0
  203. {datachain-0.6.7 → datachain-0.6.9}/tests/scripts/feature_class_parallel_data_model.py +0 -0
  204. {datachain-0.6.7 → datachain-0.6.9}/tests/scripts/name_len_slow.py +0 -0
  205. {datachain-0.6.7 → datachain-0.6.9}/tests/test_atomicity.py +0 -0
  206. {datachain-0.6.7 → datachain-0.6.9}/tests/test_query_e2e.py +0 -0
  207. {datachain-0.6.7 → datachain-0.6.9}/tests/test_telemetry.py +0 -0
  208. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/__init__.py +0 -0
  209. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/__init__.py +0 -0
  210. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/conftest.py +0 -0
  211. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_arrow.py +0 -0
  212. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_clip.py +0 -0
  213. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_datachain.py +0 -0
  214. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_datachain_bootstrap.py +0 -0
  215. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_datachain_merge.py +0 -0
  216. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_feature.py +0 -0
  217. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_feature_utils.py +0 -0
  218. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_file.py +0 -0
  219. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_hf.py +0 -0
  220. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_image.py +0 -0
  221. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_listing_info.py +0 -0
  222. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_schema.py +0 -0
  223. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_signal_schema.py +0 -0
  224. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_sql_to_python.py +0 -0
  225. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_text.py +0 -0
  226. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_udf_signature.py +0 -0
  227. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_utils.py +0 -0
  228. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/lib/test_webdataset.py +0 -0
  229. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/__init__.py +0 -0
  230. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/sqlite/__init__.py +0 -0
  231. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/sqlite/test_utils.py +0 -0
  232. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/test_array.py +0 -0
  233. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/test_conditional.py +0 -0
  234. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/test_path.py +0 -0
  235. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/test_random.py +0 -0
  236. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/test_selectable.py +0 -0
  237. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/sql/test_string.py +0 -0
  238. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_asyn.py +0 -0
  239. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_cache.py +0 -0
  240. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_catalog.py +0 -0
  241. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_catalog_loader.py +0 -0
  242. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_cli_parsing.py +0 -0
  243. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_client.py +0 -0
  244. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_client_s3.py +0 -0
  245. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_config.py +0 -0
  246. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_data_storage.py +0 -0
  247. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_database_engine.py +0 -0
  248. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_dataset.py +0 -0
  249. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_dispatch.py +0 -0
  250. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_fileslice.py +0 -0
  251. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_id_generator.py +0 -0
  252. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_listing.py +0 -0
  253. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_metastore.py +0 -0
  254. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_module_exports.py +0 -0
  255. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_query.py +0 -0
  256. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_query_metrics.py +0 -0
  257. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_query_params.py +0 -0
  258. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_serializer.py +0 -0
  259. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_session.py +0 -0
  260. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_utils.py +0 -0
  261. {datachain-0.6.7 → datachain-0.6.9}/tests/unit/test_warehouse.py +0 -0
  262. {datachain-0.6.7 → datachain-0.6.9}/tests/utils.py +0 -0
@@ -32,7 +32,7 @@ jobs:
32
32
  POSTGRES_DB: database
33
33
  POSTGRES_HOST_AUTH_METHOD: trust
34
34
  clickhouse:
35
- image: clickhouse/clickhouse-server:24
35
+ image: clickhouse/clickhouse-server:24.6
36
36
  ports:
37
37
  - 8123:8123
38
38
  - 9010:9000
@@ -24,7 +24,7 @@ repos:
24
24
  - id: trailing-whitespace
25
25
  exclude: '^LICENSES/'
26
26
  - repo: https://github.com/astral-sh/ruff-pre-commit
27
- rev: 'v0.7.2'
27
+ rev: 'v0.7.3'
28
28
  hooks:
29
29
  - id: ruff
30
30
  args: [--fix, --exit-non-zero-on-fix]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: datachain
3
- Version: 0.6.7
3
+ Version: 0.6.9
4
4
  Summary: Wrangle unstructured AI data at scale
5
5
  Author-email: Dmitry Petrov <support@dvc.org>
6
6
  License: Apache-2.0
@@ -45,6 +45,7 @@ Requires-Dist: huggingface_hub
45
45
  Requires-Dist: iterative-telemetry>=0.0.9
46
46
  Requires-Dist: platformdirs
47
47
  Requires-Dist: dvc-studio-client<1,>=0.21
48
+ Requires-Dist: tabulate
48
49
  Provides-Extra: docs
49
50
  Requires-Dist: mkdocs>=1.5.2; extra == "docs"
50
51
  Requires-Dist: mkdocs-gen-files>=0.5.0; extra == "docs"
@@ -87,6 +88,7 @@ Requires-Dist: types-python-dateutil; extra == "dev"
87
88
  Requires-Dist: types-pytz; extra == "dev"
88
89
  Requires-Dist: types-PyYAML; extra == "dev"
89
90
  Requires-Dist: types-requests; extra == "dev"
91
+ Requires-Dist: types-tabulate; extra == "dev"
90
92
  Provides-Extra: examples
91
93
  Requires-Dist: datachain[tests]; extra == "examples"
92
94
  Requires-Dist: numpy<2,>=1; extra == "examples"
@@ -118,33 +120,41 @@ Requires-Dist: onnx==1.16.1; extra == "examples"
118
120
  :target: https://github.com/iterative/datachain/actions/workflows/tests.yml
119
121
  :alt: Tests
120
122
 
121
- DataChain is a modern Pythonic data-frame library designed for artificial intelligence.
122
- It is made to organize your unstructured data into datasets and wrangle it at scale on
123
- your local machine. Datachain does not abstract or hide the AI models and API calls, but helps to integrate them into the postmodern data stack.
123
+ DataChain is a Python-based AI-data warehouse for transforming and analyzing unstructured
124
+ data like images, audio, videos, text and PDFs. It integrates with external storage
125
+ (e.g., S3) to process data efficiently without data duplication and manages metadata
126
+ in an internal database for easy and efficient querying.
127
+
128
+
129
+ Use Cases
130
+ =========
131
+
132
+ 1. **Multimodal Dataset Preparation and Curation**: ideal for organizing and
133
+ refining data in pre-training, finetuning or LLM evaluating stages.
134
+ 2. **GenAI Data Analytics**: Enables advanced analytics for multimodal data and
135
+ ad-hoc analytics using LLMs.
124
136
 
125
137
  Key Features
126
138
  ============
127
139
 
128
- 📂 **Storage as a Source of Truth.**
129
- - Process unstructured data without redundant copies from S3, GCP, Azure, and local
130
- file systems.
131
- - Multimodal data support: images, video, text, PDFs, JSONs, CSVs, parquet.
140
+ 📂 **Multimodal Dataset Versioning.**
141
+ - Version unstructured data without redundant data copies, by supporitng
142
+ references to S3, GCP, Azure, and local file systems.
143
+ - Multimodal data support: images, video, text, PDFs, JSONs, CSVs, parquet, etc.
132
144
  - Unite files and metadata together into persistent, versioned, columnar datasets.
133
145
 
134
- 🐍 **Python-friendly data pipelines.**
135
- - Operate on Python objects and object fields.
136
- - Built-in parallelization and out-of-memory compute without SQL or Spark.
146
+ 🐍 **Python-friendly.**
147
+ - Operate on Python objects and object fields: float scores, strings, matrixes,
148
+ LLM response objects.
149
+ - Run Python code in a high-scale, terabytes size datasets, with built-in
150
+ parallelization and memory-efficient computing — no SQL or Spark required.
137
151
 
138
152
  🧠 **Data Enrichment and Processing.**
139
153
  - Generate metadata using local AI models and LLM APIs.
140
- - Filter, join, and group by metadata. Search by vector embeddings.
154
+ - Filter, join, and group datasets by metadata. Search by vector embeddings.
155
+ - High-performance vectorized operations on Python objects: sum, count, avg, etc.
141
156
  - Pass datasets to Pytorch and Tensorflow, or export them back into storage.
142
157
 
143
- 🚀 **Efficiency.**
144
- - Parallelization, out-of-memory workloads and data caching.
145
- - Vectorized operations on Python object fields: sum, count, avg, etc.
146
- - Optimized vector search.
147
-
148
158
 
149
159
  Quick Start
150
160
  -----------
@@ -194,7 +204,7 @@ Batch inference with a simple sentiment model using the `transformers` library:
194
204
 
195
205
  pip install transformers
196
206
 
197
- The code below downloads files the cloud, and applies a user-defined function
207
+ The code below downloads files from the cloud, and applies a user-defined function
198
208
  to each one of them. All files with a positive sentiment
199
209
  detected are then copied to the local directory.
200
210
 
@@ -427,6 +437,19 @@ name suffix, the following code will do it:
427
437
  loader = DataLoader(chain, batch_size=1)
428
438
 
429
439
 
440
+ DataChain Studio Platform
441
+ -------------------------
442
+
443
+ `DataChain Studio`_ is a proprietary solution for teams that offers:
444
+
445
+ - **Centralized dataset registry** to manage data, code and dependency
446
+ dependencies in one place.
447
+ - **Data Lineage** for data sources as well as direvative dataset.
448
+ - **UI for Multimodal Data** like images, videos, and PDFs.
449
+ - **Scalable Compute** to handle large datasets (100M+ files) and in-house
450
+ AI model inference.
451
+ - **Access control** including SSO and team based collaboration.
452
+
430
453
  Tutorials
431
454
  ---------
432
455
 
@@ -460,6 +483,5 @@ Community and Support
460
483
  .. _Pydantic: https://github.com/pydantic/pydantic
461
484
  .. _publicly available: https://radar.kit.edu/radar/en/dataset/FdJmclKpjHzLfExE.ExpBot%2B-%2BA%2Bdataset%2Bof%2B79%2Bdialogs%2Bwith%2Ban%2Bexperimental%2Bcustomer%2Bservice%2Bchatbot
462
485
  .. _SQLite: https://www.sqlite.org/
463
- .. _Getting Started: https://datachain.dvc.ai/
464
- .. |Flowchart| image:: https://github.com/iterative/datachain/blob/main/docs/assets/flowchart.png?raw=true
465
- :alt: DataChain FlowChart
486
+ .. _Getting Started: https://docs.datachain.ai/
487
+ .. _DataChain Studio: https://studio.datachain.ai/
@@ -19,33 +19,41 @@
19
19
  :target: https://github.com/iterative/datachain/actions/workflows/tests.yml
20
20
  :alt: Tests
21
21
 
22
- DataChain is a modern Pythonic data-frame library designed for artificial intelligence.
23
- It is made to organize your unstructured data into datasets and wrangle it at scale on
24
- your local machine. Datachain does not abstract or hide the AI models and API calls, but helps to integrate them into the postmodern data stack.
22
+ DataChain is a Python-based AI-data warehouse for transforming and analyzing unstructured
23
+ data like images, audio, videos, text and PDFs. It integrates with external storage
24
+ (e.g., S3) to process data efficiently without data duplication and manages metadata
25
+ in an internal database for easy and efficient querying.
26
+
27
+
28
+ Use Cases
29
+ =========
30
+
31
+ 1. **Multimodal Dataset Preparation and Curation**: ideal for organizing and
32
+ refining data in pre-training, finetuning or LLM evaluating stages.
33
+ 2. **GenAI Data Analytics**: Enables advanced analytics for multimodal data and
34
+ ad-hoc analytics using LLMs.
25
35
 
26
36
  Key Features
27
37
  ============
28
38
 
29
- 📂 **Storage as a Source of Truth.**
30
- - Process unstructured data without redundant copies from S3, GCP, Azure, and local
31
- file systems.
32
- - Multimodal data support: images, video, text, PDFs, JSONs, CSVs, parquet.
39
+ 📂 **Multimodal Dataset Versioning.**
40
+ - Version unstructured data without redundant data copies, by supporitng
41
+ references to S3, GCP, Azure, and local file systems.
42
+ - Multimodal data support: images, video, text, PDFs, JSONs, CSVs, parquet, etc.
33
43
  - Unite files and metadata together into persistent, versioned, columnar datasets.
34
44
 
35
- 🐍 **Python-friendly data pipelines.**
36
- - Operate on Python objects and object fields.
37
- - Built-in parallelization and out-of-memory compute without SQL or Spark.
45
+ 🐍 **Python-friendly.**
46
+ - Operate on Python objects and object fields: float scores, strings, matrixes,
47
+ LLM response objects.
48
+ - Run Python code in a high-scale, terabytes size datasets, with built-in
49
+ parallelization and memory-efficient computing — no SQL or Spark required.
38
50
 
39
51
  🧠 **Data Enrichment and Processing.**
40
52
  - Generate metadata using local AI models and LLM APIs.
41
- - Filter, join, and group by metadata. Search by vector embeddings.
53
+ - Filter, join, and group datasets by metadata. Search by vector embeddings.
54
+ - High-performance vectorized operations on Python objects: sum, count, avg, etc.
42
55
  - Pass datasets to Pytorch and Tensorflow, or export them back into storage.
43
56
 
44
- 🚀 **Efficiency.**
45
- - Parallelization, out-of-memory workloads and data caching.
46
- - Vectorized operations on Python object fields: sum, count, avg, etc.
47
- - Optimized vector search.
48
-
49
57
 
50
58
  Quick Start
51
59
  -----------
@@ -95,7 +103,7 @@ Batch inference with a simple sentiment model using the `transformers` library:
95
103
 
96
104
  pip install transformers
97
105
 
98
- The code below downloads files the cloud, and applies a user-defined function
106
+ The code below downloads files from the cloud, and applies a user-defined function
99
107
  to each one of them. All files with a positive sentiment
100
108
  detected are then copied to the local directory.
101
109
 
@@ -328,6 +336,19 @@ name suffix, the following code will do it:
328
336
  loader = DataLoader(chain, batch_size=1)
329
337
 
330
338
 
339
+ DataChain Studio Platform
340
+ -------------------------
341
+
342
+ `DataChain Studio`_ is a proprietary solution for teams that offers:
343
+
344
+ - **Centralized dataset registry** to manage data, code and dependency
345
+ dependencies in one place.
346
+ - **Data Lineage** for data sources as well as direvative dataset.
347
+ - **UI for Multimodal Data** like images, videos, and PDFs.
348
+ - **Scalable Compute** to handle large datasets (100M+ files) and in-house
349
+ AI model inference.
350
+ - **Access control** including SSO and team based collaboration.
351
+
331
352
  Tutorials
332
353
  ---------
333
354
 
@@ -361,6 +382,5 @@ Community and Support
361
382
  .. _Pydantic: https://github.com/pydantic/pydantic
362
383
  .. _publicly available: https://radar.kit.edu/radar/en/dataset/FdJmclKpjHzLfExE.ExpBot%2B-%2BA%2Bdataset%2Bof%2B79%2Bdialogs%2Bwith%2Ban%2Bexperimental%2Bcustomer%2Bservice%2Bchatbot
363
384
  .. _SQLite: https://www.sqlite.org/
364
- .. _Getting Started: https://datachain.dvc.ai/
365
- .. |Flowchart| image:: https://github.com/iterative/datachain/blob/main/docs/assets/flowchart.png?raw=true
366
- :alt: DataChain FlowChart
385
+ .. _Getting Started: https://docs.datachain.ai/
386
+ .. _DataChain Studio: https://studio.datachain.ai/
@@ -47,7 +47,8 @@ dependencies = [
47
47
  "huggingface_hub",
48
48
  "iterative-telemetry>=0.0.9",
49
49
  "platformdirs",
50
- "dvc-studio-client>=0.21,<1"
50
+ "dvc-studio-client>=0.21,<1",
51
+ "tabulate"
51
52
  ]
52
53
 
53
54
  [project.optional-dependencies]
@@ -98,7 +99,8 @@ dev = [
98
99
  "types-python-dateutil",
99
100
  "types-pytz",
100
101
  "types-PyYAML",
101
- "types-requests"
102
+ "types-requests",
103
+ "types-tabulate"
102
104
  ]
103
105
  examples = [
104
106
  "datachain[tests]",
@@ -1,4 +1,4 @@
1
- from datachain.lib import func
1
+ from datachain.lib import func, models
2
2
  from datachain.lib.data_model import DataModel, DataType, is_chain_type
3
3
  from datachain.lib.dc import C, Column, DataChain, Sys
4
4
  from datachain.lib.file import (
@@ -38,5 +38,6 @@ __all__ = [
38
38
  "func",
39
39
  "is_chain_type",
40
40
  "metrics",
41
+ "models",
41
42
  "param",
42
43
  ]
@@ -769,6 +769,7 @@ class Catalog:
769
769
  create_rows: Optional[bool] = True,
770
770
  validate_version: Optional[bool] = True,
771
771
  listing: Optional[bool] = False,
772
+ uuid: Optional[str] = None,
772
773
  ) -> "DatasetRecord":
773
774
  """
774
775
  Creates new dataset of a specific version.
@@ -816,6 +817,7 @@ class Catalog:
816
817
  query_script=query_script,
817
818
  create_rows_table=create_rows,
818
819
  columns=columns,
820
+ uuid=uuid,
819
821
  )
820
822
 
821
823
  def create_new_dataset_version(
@@ -832,6 +834,7 @@ class Catalog:
832
834
  script_output="",
833
835
  create_rows_table=True,
834
836
  job_id: Optional[str] = None,
837
+ uuid: Optional[str] = None,
835
838
  ) -> DatasetRecord:
836
839
  """
837
840
  Creates dataset version if it doesn't exist.
@@ -855,6 +858,7 @@ class Catalog:
855
858
  schema=schema,
856
859
  job_id=job_id,
857
860
  ignore_if_exists=True,
861
+ uuid=uuid,
858
862
  )
859
863
 
860
864
  if create_rows_table:
@@ -1400,6 +1404,7 @@ class Catalog:
1400
1404
  columns=columns,
1401
1405
  feature_schema=remote_dataset_version.feature_schema,
1402
1406
  validate_version=False,
1407
+ uuid=remote_dataset_version.uuid,
1403
1408
  )
1404
1409
 
1405
1410
  # asking remote to export dataset rows table to s3 and to return signed
@@ -4,18 +4,21 @@ import shlex
4
4
  import sys
5
5
  import traceback
6
6
  from argparse import Action, ArgumentParser, ArgumentTypeError, Namespace
7
- from collections.abc import Iterable, Iterator, Mapping, Sequence
7
+ from collections.abc import Iterable, Iterator, Sequence
8
8
  from importlib.metadata import PackageNotFoundError, version
9
9
  from itertools import chain
10
10
  from multiprocessing import freeze_support
11
11
  from typing import TYPE_CHECKING, Optional, Union
12
12
 
13
13
  import shtab
14
+ from tabulate import tabulate
14
15
 
15
16
  from datachain import Session, utils
16
17
  from datachain.cli_utils import BooleanOptionalAction, CommaSeparatedArgs, KeyValueArgs
18
+ from datachain.config import Config
19
+ from datachain.error import DataChainError
17
20
  from datachain.lib.dc import DataChain
18
- from datachain.studio import process_studio_cli_args
21
+ from datachain.studio import list_datasets, process_studio_cli_args
19
22
  from datachain.telemetry import telemetry
20
23
 
21
24
  if TYPE_CHECKING:
@@ -416,7 +419,36 @@ def get_parser() -> ArgumentParser: # noqa: PLR0915
416
419
  help="Dataset labels",
417
420
  )
418
421
 
419
- subp.add_parser("ls-datasets", parents=[parent_parser], description="List datasets")
422
+ datasets_parser = subp.add_parser(
423
+ "datasets", parents=[parent_parser], description="List datasets"
424
+ )
425
+ datasets_parser.add_argument(
426
+ "--studio",
427
+ action="store_true",
428
+ default=False,
429
+ help="List the files in the Studio",
430
+ )
431
+ datasets_parser.add_argument(
432
+ "-L",
433
+ "--local",
434
+ action="store_true",
435
+ default=False,
436
+ help="List local files only",
437
+ )
438
+ datasets_parser.add_argument(
439
+ "-a",
440
+ "--all",
441
+ action="store_true",
442
+ default=True,
443
+ help="List all files including hidden files",
444
+ )
445
+ datasets_parser.add_argument(
446
+ "--team",
447
+ action="store",
448
+ default=None,
449
+ help="The team to list datasets for. By default, it will use team from config.",
450
+ )
451
+
420
452
  rm_dataset_parser = subp.add_parser(
421
453
  "rm-dataset", parents=[parent_parser], description="Removes dataset"
422
454
  )
@@ -474,10 +506,30 @@ def get_parser() -> ArgumentParser: # noqa: PLR0915
474
506
  help="List files in the long format",
475
507
  )
476
508
  parse_ls.add_argument(
477
- "--remote",
509
+ "--studio",
510
+ action="store_true",
511
+ default=False,
512
+ help="List the files in the Studio",
513
+ )
514
+ parse_ls.add_argument(
515
+ "-L",
516
+ "--local",
517
+ action="store_true",
518
+ default=False,
519
+ help="List local files only",
520
+ )
521
+ parse_ls.add_argument(
522
+ "-a",
523
+ "--all",
524
+ action="store_true",
525
+ default=True,
526
+ help="List all files including hidden files",
527
+ )
528
+ parse_ls.add_argument(
529
+ "--team",
478
530
  action="store",
479
- default="",
480
- help="Name of remote to use",
531
+ default=None,
532
+ help="The team to list datasets for. By default, it will use team from config.",
481
533
  )
482
534
 
483
535
  parse_du = subp.add_parser(
@@ -758,11 +810,12 @@ def format_ls_entry(entry: str) -> str:
758
810
  def ls_remote(
759
811
  paths: Iterable[str],
760
812
  long: bool = False,
813
+ team: Optional[str] = None,
761
814
  ):
762
815
  from datachain.node import long_line_str
763
816
  from datachain.remote.studio import StudioClient
764
817
 
765
- client = StudioClient()
818
+ client = StudioClient(team=team)
766
819
  first = True
767
820
  for path, response in client.ls(paths):
768
821
  if not first:
@@ -789,28 +842,66 @@ def ls_remote(
789
842
  def ls(
790
843
  sources,
791
844
  long: bool = False,
792
- remote: str = "",
793
- config: Optional[Mapping[str, str]] = None,
845
+ studio: bool = False,
846
+ local: bool = False,
847
+ all: bool = True,
848
+ team: Optional[str] = None,
794
849
  **kwargs,
795
850
  ):
796
- if config is None:
797
- from .config import Config
851
+ token = Config().read().get("studio", {}).get("token")
852
+ all, local, studio = _determine_flavors(studio, local, all, token)
798
853
 
799
- config = Config().get_remote_config(remote=remote)
800
- remote_type = config["type"]
801
- if remote_type == "local":
854
+ if all or local:
802
855
  ls_local(sources, long=long, **kwargs)
803
- else:
804
- ls_remote(
805
- sources,
806
- long=long,
856
+
857
+ if (all or studio) and token:
858
+ ls_remote(sources, long=long, team=team)
859
+
860
+
861
+ def datasets(
862
+ catalog: "Catalog",
863
+ studio: bool = False,
864
+ local: bool = False,
865
+ all: bool = True,
866
+ team: Optional[str] = None,
867
+ ):
868
+ token = Config().read().get("studio", {}).get("token")
869
+ all, local, studio = _determine_flavors(studio, local, all, token)
870
+
871
+ local_datasets = set(list_datasets_local(catalog)) if all or local else set()
872
+ studio_datasets = (
873
+ set(list_datasets(team=team)) if (all or studio) and token else set()
874
+ )
875
+
876
+ rows = [
877
+ _datasets_tabulate_row(
878
+ name=name,
879
+ version=version,
880
+ both=(all or (local and studio)) and token,
881
+ local=(name, version) in local_datasets,
882
+ studio=(name, version) in studio_datasets,
807
883
  )
884
+ for name, version in local_datasets.union(studio_datasets)
885
+ ]
886
+
887
+ print(tabulate(rows, headers="keys"))
808
888
 
809
889
 
810
- def ls_datasets(catalog: "Catalog"):
890
+ def list_datasets_local(catalog: "Catalog"):
811
891
  for d in catalog.ls_datasets():
812
892
  for v in d.versions:
813
- print(f"{d.name} (v{v.version})")
893
+ yield (d.name, v.version)
894
+
895
+
896
+ def _datasets_tabulate_row(name, version, both, local, studio):
897
+ row = {
898
+ "Name": name,
899
+ "Version": version,
900
+ }
901
+ if both:
902
+ row["Studio"] = "\u2714" if studio else "\u2716"
903
+ row["Local"] = "\u2714" if local else "\u2716"
904
+ return row
814
905
 
815
906
 
816
907
  def rm_dataset(
@@ -953,6 +1044,20 @@ def completion(shell: str) -> str:
953
1044
  )
954
1045
 
955
1046
 
1047
+ def _determine_flavors(studio: bool, local: bool, all: bool, token: Optional[str]):
1048
+ if studio and not token:
1049
+ raise DataChainError(
1050
+ "Not logged in to Studio. Log in with 'datachain studio login'."
1051
+ )
1052
+
1053
+ if local or studio:
1054
+ all = False
1055
+
1056
+ all = all and not (local or studio)
1057
+
1058
+ return all, local, studio
1059
+
1060
+
956
1061
  def main(argv: Optional[list[str]] = None) -> int: # noqa: C901, PLR0912, PLR0915
957
1062
  # Required for Windows multiprocessing support
958
1063
  freeze_support()
@@ -1032,12 +1137,21 @@ def main(argv: Optional[list[str]] = None) -> int: # noqa: C901, PLR0912, PLR09
1032
1137
  ls(
1033
1138
  args.sources,
1034
1139
  long=bool(args.long),
1035
- remote=args.remote,
1140
+ studio=args.studio,
1141
+ local=args.local,
1142
+ all=args.all,
1143
+ team=args.team,
1036
1144
  update=bool(args.update),
1037
1145
  client_config=client_config,
1038
1146
  )
1039
- elif args.command == "ls-datasets":
1040
- ls_datasets(catalog)
1147
+ elif args.command == "datasets":
1148
+ datasets(
1149
+ catalog=catalog,
1150
+ studio=args.studio,
1151
+ local=args.local,
1152
+ all=args.all,
1153
+ team=args.team,
1154
+ )
1041
1155
  elif args.command == "show":
1042
1156
  show(
1043
1157
  catalog,
@@ -358,7 +358,7 @@ class Client(ABC):
358
358
  ) -> BinaryIO:
359
359
  """Open a file, including files in tar archives."""
360
360
  if use_cache and (cache_path := self.cache.get_path(file)):
361
- return open(cache_path, mode="rb") # noqa: SIM115
361
+ return open(cache_path, mode="rb")
362
362
  assert not file.location
363
363
  return FileWrapper(self.fs.open(self.get_full_path(file.path)), cb) # type: ignore[return-value]
364
364
 
@@ -138,6 +138,7 @@ class AbstractMetastore(ABC, Serializable):
138
138
  size: Optional[int] = None,
139
139
  preview: Optional[list[dict]] = None,
140
140
  job_id: Optional[str] = None,
141
+ uuid: Optional[str] = None,
141
142
  ) -> DatasetRecord:
142
143
  """Creates new dataset version."""
143
144
 
@@ -352,6 +353,7 @@ class AbstractDBMetastore(AbstractMetastore):
352
353
  """Datasets versions table columns."""
353
354
  return [
354
355
  Column("id", Integer, primary_key=True),
356
+ Column("uuid", Text, nullable=False, default=uuid4()),
355
357
  Column(
356
358
  "dataset_id",
357
359
  Integer,
@@ -545,6 +547,7 @@ class AbstractDBMetastore(AbstractMetastore):
545
547
  size: Optional[int] = None,
546
548
  preview: Optional[list[dict]] = None,
547
549
  job_id: Optional[str] = None,
550
+ uuid: Optional[str] = None,
548
551
  conn=None,
549
552
  ) -> DatasetRecord:
550
553
  """Creates new dataset version."""
@@ -555,6 +558,7 @@ class AbstractDBMetastore(AbstractMetastore):
555
558
 
556
559
  query = self._datasets_versions_insert().values(
557
560
  dataset_id=dataset.id,
561
+ uuid=uuid or str(uuid4()),
558
562
  version=version,
559
563
  status=status,
560
564
  feature_schema=json.dumps(feature_schema or {}),
@@ -163,6 +163,7 @@ class DatasetStatus:
163
163
  @dataclass
164
164
  class DatasetVersion:
165
165
  id: int
166
+ uuid: str
166
167
  dataset_id: int
167
168
  version: int
168
169
  status: int
@@ -184,6 +185,7 @@ class DatasetVersion:
184
185
  def parse( # noqa: PLR0913
185
186
  cls: type[V],
186
187
  id: int,
188
+ uuid: str,
187
189
  dataset_id: int,
188
190
  version: int,
189
191
  status: int,
@@ -203,6 +205,7 @@ class DatasetVersion:
203
205
  ):
204
206
  return cls(
205
207
  id,
208
+ uuid,
206
209
  dataset_id,
207
210
  version,
208
211
  status,
@@ -306,6 +309,7 @@ class DatasetRecord:
306
309
  query_script: str,
307
310
  schema: str,
308
311
  version_id: int,
312
+ version_uuid: str,
309
313
  version_dataset_id: int,
310
314
  version: int,
311
315
  version_status: int,
@@ -331,6 +335,7 @@ class DatasetRecord:
331
335
 
332
336
  dataset_version = DatasetVersion.parse(
333
337
  version_id,
338
+ version_uuid,
334
339
  version_dataset_id,
335
340
  version,
336
341
  version_status,
@@ -1,6 +1,7 @@
1
1
  import json
2
2
  from datetime import datetime
3
3
  from typing import TYPE_CHECKING, Any, Optional, Union
4
+ from uuid import uuid4
4
5
 
5
6
  from pydantic import Field, field_validator
6
7
 
@@ -15,6 +16,7 @@ if TYPE_CHECKING:
15
16
 
16
17
  class DatasetInfo(DataModel):
17
18
  name: str
19
+ uuid: str = Field(default=str(uuid4()))
18
20
  version: int = Field(default=1)
19
21
  status: int = Field(default=DatasetStatus.CREATED)
20
22
  created_at: datetime = Field(default=TIME_ZERO)
@@ -60,6 +62,7 @@ class DatasetInfo(DataModel):
60
62
  job: Optional[Job],
61
63
  ) -> "Self":
62
64
  return cls(
65
+ uuid=version.uuid,
63
66
  name=dataset.name,
64
67
  version=version.version,
65
68
  status=version.status,