datachain 0.6.9__py3-none-any.whl → 0.6.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of datachain might be problematic. Click here for more details.

@@ -603,9 +603,10 @@ class Catalog:
603
603
  )
604
604
 
605
605
  lst = Listing(
606
+ self.metastore.clone(),
606
607
  self.warehouse.clone(),
607
608
  Client.get_client(list_uri, self.cache, **self.client_config),
608
- self.get_dataset(list_ds_name),
609
+ dataset_name=list_ds_name,
609
610
  object_name=object_name,
610
611
  )
611
612
 
@@ -698,9 +699,13 @@ class Catalog:
698
699
 
699
700
  client = self.get_client(source, **client_config)
700
701
  uri = client.uri
701
- st = self.warehouse.clone()
702
702
  dataset_name, _, _, _ = DataChain.parse_uri(uri, self.session)
703
- listing = Listing(st, client, self.get_dataset(dataset_name))
703
+ listing = Listing(
704
+ self.metastore.clone(),
705
+ self.warehouse.clone(),
706
+ client,
707
+ dataset_name=dataset_name,
708
+ )
704
709
  rows = DatasetQuery(
705
710
  name=dataset.name, version=ds_version, catalog=self
706
711
  ).to_db_records()
@@ -1354,6 +1359,13 @@ class Catalog:
1354
1359
  # we will create new one if it doesn't exist
1355
1360
  pass
1356
1361
 
1362
+ if dataset and version and dataset.has_version(version):
1363
+ """No need to communicate with Studio at all"""
1364
+ dataset_uri = create_dataset_uri(remote_dataset_name, version)
1365
+ print(f"Local copy of dataset {dataset_uri} already present")
1366
+ _instantiate_dataset()
1367
+ return
1368
+
1357
1369
  remote_dataset = self.get_remote_dataset(remote_dataset_name)
1358
1370
  # if version is not specified in uri, take the latest one
1359
1371
  if not version:
@@ -747,8 +747,12 @@ class SQLiteWarehouse(AbstractWarehouse):
747
747
 
748
748
  ids = self.db.execute(select_ids).fetchall()
749
749
 
750
- select_q = query.with_only_columns(
751
- *[c for c in query.selected_columns if c.name != "sys__id"]
750
+ select_q = (
751
+ query.with_only_columns(
752
+ *[c for c in query.selected_columns if c.name != "sys__id"]
753
+ )
754
+ .offset(None)
755
+ .limit(None)
752
756
  )
753
757
 
754
758
  for batch in batched_it(ids, 10_000):
datachain/lib/dc.py CHANGED
@@ -642,6 +642,59 @@ class DataChain:
642
642
  }
643
643
  return chain.gen(**signal_dict) # type: ignore[misc, arg-type]
644
644
 
645
+ def explode(
646
+ self,
647
+ col: str,
648
+ model_name: Optional[str] = None,
649
+ object_name: Optional[str] = None,
650
+ ) -> "DataChain":
651
+ """Explodes a column containing JSON objects (dict or str DataChain type) into
652
+ individual columns based on the schema of the JSON. Schema is inferred from
653
+ the first row of the column.
654
+
655
+ Args:
656
+ col: the name of the column containing JSON to be exploded.
657
+ model_name: optional generated model name. By default generates the name
658
+ automatically.
659
+ object_name: optional generated object column name. By default generates the
660
+ name automatically.
661
+
662
+ Returns:
663
+ DataChain: A new DataChain instance with the new set of columns.
664
+ """
665
+ import json
666
+
667
+ import pyarrow as pa
668
+
669
+ from datachain.lib.arrow import schema_to_output
670
+
671
+ json_value = next(self.limit(1).collect(col))
672
+ json_dict = (
673
+ json.loads(json_value) if isinstance(json_value, str) else json_value
674
+ )
675
+
676
+ if not isinstance(json_dict, dict):
677
+ raise TypeError(f"Column {col} should be a string or dict type with JSON")
678
+
679
+ schema = pa.Table.from_pylist([json_dict]).schema
680
+ output = schema_to_output(schema, None)
681
+
682
+ if not model_name:
683
+ model_name = f"{col.title()}ExplodedModel"
684
+
685
+ model = dict_to_data_model(model_name, output)
686
+
687
+ def json_to_model(json_value: Union[str, dict]):
688
+ json_dict = (
689
+ json.loads(json_value) if isinstance(json_value, str) else json_value
690
+ )
691
+ return model.model_validate(json_dict)
692
+
693
+ if not object_name:
694
+ object_name = f"{col}_expl"
695
+
696
+ return self.map(json_to_model, params=col, output={object_name: model})
697
+
645
698
  @classmethod
646
699
  def datasets(
647
700
  cls,
@@ -1,5 +1,6 @@
1
- from . import yolo
2
- from .bbox import BBox
1
+ from . import ultralytics
2
+ from .bbox import BBox, OBBox
3
3
  from .pose import Pose, Pose3D
4
+ from .segment import Segments
4
5
 
5
- __all__ = ["BBox", "Pose", "Pose3D", "yolo"]
6
+ __all__ = ["BBox", "OBBox", "Pose", "Pose3D", "Segments", "ultralytics"]
@@ -1,5 +1,3 @@
1
- from typing import Optional
2
-
3
1
  from pydantic import Field
4
2
 
5
3
  from datachain.lib.data_model import DataModel
@@ -11,10 +9,7 @@ class BBox(DataModel):
11
9
 
12
10
  Attributes:
13
11
  title (str): The title of the bounding box.
14
- x1 (float): The x-coordinate of the top-left corner of the bounding box.
15
- y1 (float): The y-coordinate of the top-left corner of the bounding box.
16
- x2 (float): The x-coordinate of the bottom-right corner of the bounding box.
17
- y2 (float): The y-coordinate of the bottom-right corner of the bounding box.
12
+ coords (list[int]): The coordinates of the bounding box.
18
13
 
19
14
  The bounding box is defined by two points:
20
15
  - (x1, y1): The top-left corner of the box.
@@ -22,24 +17,100 @@ class BBox(DataModel):
22
17
  """
23
18
 
24
19
  title: str = Field(default="")
25
- x1: float = Field(default=0)
26
- y1: float = Field(default=0)
27
- x2: float = Field(default=0)
28
- y2: float = Field(default=0)
20
+ coords: list[int] = Field(default=None)
21
+
22
+ @staticmethod
23
+ def from_list(coords: list[float], title: str = "") -> "BBox":
24
+ assert len(coords) == 4, "Bounding box coordinates must be a list of 4 floats."
25
+ assert all(
26
+ isinstance(value, (int, float)) for value in coords
27
+ ), "Bounding box coordinates must be integers or floats."
28
+ return BBox(
29
+ title=title,
30
+ coords=[round(c) for c in coords],
31
+ )
32
+
33
+ @staticmethod
34
+ def from_dict(coords: dict[str, float], title: str = "") -> "BBox":
35
+ assert (
36
+ len(coords) == 4
37
+ ), "Bounding box coordinates must be a dictionary of 4 floats."
38
+ assert set(coords) == {
39
+ "x1",
40
+ "y1",
41
+ "x2",
42
+ "y2",
43
+ }, "Bounding box coordinates must contain keys with coordinates."
44
+ assert all(
45
+ isinstance(value, (int, float)) for value in coords.values()
46
+ ), "Bounding box coordinates must be integers or floats."
47
+ return BBox(
48
+ title=title,
49
+ coords=[
50
+ round(coords["x1"]),
51
+ round(coords["y1"]),
52
+ round(coords["x2"]),
53
+ round(coords["y2"]),
54
+ ],
55
+ )
56
+
57
+
58
+ class OBBox(DataModel):
59
+ """
60
+ A data model for representing oriented bounding boxes.
61
+
62
+ Attributes:
63
+ title (str): The title of the oriented bounding box.
64
+ coords (list[int]): The coordinates of the oriented bounding box.
65
+
66
+ The oriented bounding box is defined by four points:
67
+ - (x1, y1): The first corner of the box.
68
+ - (x2, y2): The second corner of the box.
69
+ - (x3, y3): The third corner of the box.
70
+ - (x4, y4): The fourth corner of the box.
71
+ """
72
+
73
+ title: str = Field(default="")
74
+ coords: list[int] = Field(default=None)
75
+
76
+ @staticmethod
77
+ def from_list(coords: list[float], title: str = "") -> "OBBox":
78
+ assert (
79
+ len(coords) == 8
80
+ ), "Oriented bounding box coordinates must be a list of 8 floats."
81
+ assert all(
82
+ isinstance(value, (int, float)) for value in coords
83
+ ), "Oriented bounding box coordinates must be integers or floats."
84
+ return OBBox(
85
+ title=title,
86
+ coords=[round(c) for c in coords],
87
+ )
29
88
 
30
89
  @staticmethod
31
- def from_xywh(bbox: list[float], title: Optional[str] = None) -> "BBox":
32
- """
33
- Converts a bounding box in (x, y, width, height) format
34
- to a BBox data model instance.
35
-
36
- Args:
37
- bbox (list[float]): A bounding box, represented as a list
38
- of four floats [x, y, width, height].
39
-
40
- Returns:
41
- BBox2D: An instance of the BBox data model.
42
- """
43
- assert len(bbox) == 4, f"Bounding box must have 4 elements, got f{len(bbox)}"
44
- x, y, w, h = bbox
45
- return BBox(title=title or "", x1=x, y1=y, x2=x + w, y2=y + h)
90
+ def from_dict(coords: dict[str, float], title: str = "") -> "OBBox":
91
+ assert set(coords) == {
92
+ "x1",
93
+ "y1",
94
+ "x2",
95
+ "y2",
96
+ "x3",
97
+ "y3",
98
+ "x4",
99
+ "y4",
100
+ }, "Oriented bounding box coordinates must contain keys with coordinates."
101
+ assert all(
102
+ isinstance(value, (int, float)) for value in coords.values()
103
+ ), "Oriented bounding box coordinates must be integers or floats."
104
+ return OBBox(
105
+ title=title,
106
+ coords=[
107
+ round(coords["x1"]),
108
+ round(coords["y1"]),
109
+ round(coords["x2"]),
110
+ round(coords["y2"]),
111
+ round(coords["x3"]),
112
+ round(coords["y3"]),
113
+ round(coords["x4"]),
114
+ round(coords["y4"]),
115
+ ],
116
+ )
@@ -8,15 +8,48 @@ class Pose(DataModel):
8
8
  A data model for representing pose keypoints.
9
9
 
10
10
  Attributes:
11
- x (list[float]): The x-coordinates of the keypoints.
12
- y (list[float]): The y-coordinates of the keypoints.
11
+ x (list[int]): The x-coordinates of the keypoints.
12
+ y (list[int]): The y-coordinates of the keypoints.
13
13
 
14
14
  The keypoints are represented as lists of x and y coordinates, where each index
15
15
  corresponds to a specific body part.
16
16
  """
17
17
 
18
- x: list[float] = Field(default=None)
19
- y: list[float] = Field(default=None)
18
+ x: list[int] = Field(default=None)
19
+ y: list[int] = Field(default=None)
20
+
21
+ @staticmethod
22
+ def from_list(points: list[list[float]]) -> "Pose":
23
+ assert len(points) == 2, "Pose coordinates must be a list of 2 lists."
24
+ points_x, points_y = points
25
+ assert (
26
+ len(points_x) == len(points_y) == 17
27
+ ), "Pose x and y coordinates must have the same length of 17."
28
+ assert all(
29
+ isinstance(value, (int, float)) for value in [*points_x, *points_y]
30
+ ), "Pose coordinates must be integers or floats."
31
+ return Pose(
32
+ x=[round(coord) for coord in points_x],
33
+ y=[round(coord) for coord in points_y],
34
+ )
35
+
36
+ @staticmethod
37
+ def from_dict(points: dict[str, list[float]]) -> "Pose":
38
+ assert set(points) == {
39
+ "x",
40
+ "y",
41
+ }, "Pose coordinates must contain keys 'x' and 'y'."
42
+ points_x, points_y = points["x"], points["y"]
43
+ assert (
44
+ len(points_x) == len(points_y) == 17
45
+ ), "Pose x and y coordinates must have the same length of 17."
46
+ assert all(
47
+ isinstance(value, (int, float)) for value in [*points_x, *points_y]
48
+ ), "Pose coordinates must be integers or floats."
49
+ return Pose(
50
+ x=[round(coord) for coord in points_x],
51
+ y=[round(coord) for coord in points_y],
52
+ )
20
53
 
21
54
 
22
55
  class Pose3D(DataModel):
@@ -24,14 +57,52 @@ class Pose3D(DataModel):
24
57
  A data model for representing 3D pose keypoints.
25
58
 
26
59
  Attributes:
27
- x (list[float]): The x-coordinates of the keypoints.
28
- y (list[float]): The y-coordinates of the keypoints.
60
+ x (list[int]): The x-coordinates of the keypoints.
61
+ y (list[int]): The y-coordinates of the keypoints.
29
62
  visible (list[float]): The visibility of the keypoints.
30
63
 
31
64
  The keypoints are represented as lists of x, y, and visibility values,
32
65
  where each index corresponds to a specific body part.
33
66
  """
34
67
 
35
- x: list[float] = Field(default=None)
36
- y: list[float] = Field(default=None)
68
+ x: list[int] = Field(default=None)
69
+ y: list[int] = Field(default=None)
37
70
  visible: list[float] = Field(default=None)
71
+
72
+ @staticmethod
73
+ def from_list(points: list[list[float]]) -> "Pose3D":
74
+ assert len(points) == 3, "Pose coordinates must be a list of 3 lists."
75
+ points_x, points_y, points_v = points
76
+ assert (
77
+ len(points_x) == len(points_y) == len(points_v) == 17
78
+ ), "Pose x, y, and visibility coordinates must have the same length of 17."
79
+ assert all(
80
+ isinstance(value, (int, float))
81
+ for value in [*points_x, *points_y, *points_v]
82
+ ), "Pose coordinates must be integers or floats."
83
+ return Pose3D(
84
+ x=[round(coord) for coord in points_x],
85
+ y=[round(coord) for coord in points_y],
86
+ visible=points_v,
87
+ )
88
+
89
+ @staticmethod
90
+ def from_dict(points: dict[str, list[float]]) -> "Pose3D":
91
+ assert set(points) == {
92
+ "x",
93
+ "y",
94
+ "visible",
95
+ }, "Pose coordinates must contain keys 'x', 'y', and 'visible'."
96
+ points_x, points_y, points_v = points["x"], points["y"], points["visible"]
97
+ assert (
98
+ len(points_x) == len(points_y) == len(points_v) == 17
99
+ ), "Pose x, y, and visibility coordinates must have the same length of 17."
100
+ assert all(
101
+ isinstance(value, (int, float))
102
+ for value in [*points_x, *points_y, *points_v]
103
+ ), "Pose coordinates must be integers or floats."
104
+ return Pose3D(
105
+ x=[round(coord) for coord in points_x],
106
+ y=[round(coord) for coord in points_y],
107
+ visible=points_v,
108
+ )
@@ -0,0 +1,53 @@
1
+ from pydantic import Field
2
+
3
+ from datachain.lib.data_model import DataModel
4
+
5
+
6
+ class Segments(DataModel):
7
+ """
8
+ A data model for representing segments.
9
+
10
+ Attributes:
11
+ title (str): The title of the segments.
12
+ x (list[int]): The x-coordinates of the segments.
13
+ y (list[int]): The y-coordinates of the segments.
14
+
15
+ The segments are represented as lists of x and y coordinates, where each index
16
+ corresponds to a specific segment.
17
+ """
18
+
19
+ title: str = Field(default="")
20
+ x: list[int] = Field(default=None)
21
+ y: list[int] = Field(default=None)
22
+
23
+ @staticmethod
24
+ def from_list(points: list[list[float]], title: str = "") -> "Segments":
25
+ assert len(points) == 2, "Segments coordinates must be a list of 2 lists."
26
+ points_x, points_y = points
27
+ assert len(points_x) == len(
28
+ points_y
29
+ ), "Segments x and y coordinates must have the same length."
30
+ assert all(
31
+ isinstance(value, (int, float)) for value in [*points_x, *points_y]
32
+ ), "Segments coordinates must be integers or floats."
33
+ return Segments(
34
+ title=title,
35
+ x=[round(coord) for coord in points_x],
36
+ y=[round(coord) for coord in points_y],
37
+ )
38
+
39
+ @staticmethod
40
+ def from_dict(points: dict[str, list[float]], title: str = "") -> "Segments":
41
+ assert set(points) == {
42
+ "x",
43
+ "y",
44
+ }, "Segments coordinates must contain keys 'x' and 'y'."
45
+ points_x, points_y = points["x"], points["y"]
46
+ assert all(
47
+ isinstance(value, (int, float)) for value in [*points_x, *points_y]
48
+ ), "Segments coordinates must be integers or floats."
49
+ return Segments(
50
+ title=title,
51
+ x=[round(coord) for coord in points_x],
52
+ y=[round(coord) for coord in points_y],
53
+ )
@@ -0,0 +1,14 @@
1
+ from .bbox import YoloBBox, YoloBBoxes, YoloOBBox, YoloOBBoxes
2
+ from .pose import YoloPose, YoloPoses
3
+ from .segment import YoloSegment, YoloSegments
4
+
5
+ __all__ = [
6
+ "YoloBBox",
7
+ "YoloBBoxes",
8
+ "YoloOBBox",
9
+ "YoloOBBoxes",
10
+ "YoloPose",
11
+ "YoloPoses",
12
+ "YoloSegment",
13
+ "YoloSegments",
14
+ ]
@@ -0,0 +1,189 @@
1
+ """
2
+ This module contains the YOLO models.
3
+
4
+ YOLO stands for "You Only Look Once", a family of object detection models that
5
+ are designed to be fast and accurate. The models are trained to detect objects
6
+ in images by dividing the image into a grid and predicting the bounding boxes
7
+ and class probabilities for each grid cell.
8
+
9
+ More information about YOLO can be found here:
10
+ - https://pjreddie.com/darknet/yolo/
11
+ - https://docs.ultralytics.com/
12
+ """
13
+
14
+ from io import BytesIO
15
+ from typing import TYPE_CHECKING
16
+
17
+ from PIL import Image
18
+ from pydantic import Field
19
+
20
+ from datachain.lib.data_model import DataModel
21
+ from datachain.lib.models.bbox import BBox, OBBox
22
+
23
+ if TYPE_CHECKING:
24
+ from ultralytics.engine.results import Results
25
+ from ultralytics.models import YOLO
26
+
27
+ from datachain.lib.file import File
28
+
29
+
30
+ class YoloBBox(DataModel):
31
+ """
32
+ A class representing a bounding box detected by a YOLO model.
33
+
34
+ Attributes:
35
+ cls: The class of the detected object.
36
+ name: The name of the detected object.
37
+ confidence: The confidence score of the detection.
38
+ box: The bounding box of the detected object
39
+ """
40
+
41
+ cls: int = Field(default=-1)
42
+ name: str = Field(default="")
43
+ confidence: float = Field(default=0)
44
+ box: BBox = Field(default=None)
45
+
46
+ @staticmethod
47
+ def from_file(yolo: "YOLO", file: "File") -> "YoloBBox":
48
+ results = yolo(Image.open(BytesIO(file.read())))
49
+ if len(results) == 0:
50
+ return YoloBBox()
51
+ return YoloBBox.from_result(results[0])
52
+
53
+ @staticmethod
54
+ def from_result(result: "Results") -> "YoloBBox":
55
+ summary = result.summary()
56
+ if not summary:
57
+ return YoloBBox()
58
+ name = summary[0].get("name", "")
59
+ box = (
60
+ BBox.from_dict(summary[0]["box"], title=name)
61
+ if "box" in summary[0]
62
+ else BBox()
63
+ )
64
+ return YoloBBox(
65
+ cls=summary[0]["class"],
66
+ name=name,
67
+ confidence=summary[0]["confidence"],
68
+ box=box,
69
+ )
70
+
71
+
72
+ class YoloBBoxes(DataModel):
73
+ """
74
+ A class representing a list of bounding boxes detected by a YOLO model.
75
+
76
+ Attributes:
77
+ cls: A list of classes of the detected objects.
78
+ name: A list of names of the detected objects.
79
+ confidence: A list of confidence scores of the detections.
80
+ box: A list of bounding boxes of the detected objects
81
+ """
82
+
83
+ cls: list[int]
84
+ name: list[str]
85
+ confidence: list[float]
86
+ box: list[BBox]
87
+
88
+ @staticmethod
89
+ def from_file(yolo: "YOLO", file: "File") -> "YoloBBoxes":
90
+ results = yolo(Image.open(BytesIO(file.read())))
91
+ return YoloBBoxes.from_results(results)
92
+
93
+ @staticmethod
94
+ def from_results(results: list["Results"]) -> "YoloBBoxes":
95
+ cls, names, confidence, box = [], [], [], []
96
+ for r in results:
97
+ for s in r.summary():
98
+ name = s.get("name", "")
99
+ cls.append(s["class"])
100
+ names.append(name)
101
+ confidence.append(s["confidence"])
102
+ box.append(BBox.from_dict(s.get("box", {}), title=name))
103
+ return YoloBBoxes(
104
+ cls=cls,
105
+ name=names,
106
+ confidence=confidence,
107
+ box=box,
108
+ )
109
+
110
+
111
+ class YoloOBBox(DataModel):
112
+ """
113
+ A class representing an oriented bounding box detected by a YOLO model.
114
+
115
+ Attributes:
116
+ cls: The class of the detected object.
117
+ name: The name of the detected object.
118
+ confidence: The confidence score of the detection.
119
+ box: The oriented bounding box of the detected object.
120
+ """
121
+
122
+ cls: int = Field(default=-1)
123
+ name: str = Field(default="")
124
+ confidence: float = Field(default=0)
125
+ box: OBBox = Field(default=None)
126
+
127
+ @staticmethod
128
+ def from_file(yolo: "YOLO", file: "File") -> "YoloOBBox":
129
+ results = yolo(Image.open(BytesIO(file.read())))
130
+ if len(results) == 0:
131
+ return YoloOBBox()
132
+ return YoloOBBox.from_result(results[0])
133
+
134
+ @staticmethod
135
+ def from_result(result: "Results") -> "YoloOBBox":
136
+ summary = result.summary()
137
+ if not summary:
138
+ return YoloOBBox()
139
+ name = summary[0].get("name", "")
140
+ box = (
141
+ OBBox.from_dict(summary[0]["box"], title=name)
142
+ if "box" in summary[0]
143
+ else OBBox()
144
+ )
145
+ return YoloOBBox(
146
+ cls=summary[0]["class"],
147
+ name=name,
148
+ confidence=summary[0]["confidence"],
149
+ box=box,
150
+ )
151
+
152
+
153
+ class YoloOBBoxes(DataModel):
154
+ """
155
+ A class representing a list of oriented bounding boxes detected by a YOLO model.
156
+
157
+ Attributes:
158
+ cls: A list of classes of the detected objects.
159
+ name: A list of names of the detected objects.
160
+ confidence: A list of confidence scores of the detections.
161
+ box: A list of oriented bounding boxes of the detected objects.
162
+ """
163
+
164
+ cls: list[int]
165
+ name: list[str]
166
+ confidence: list[float]
167
+ box: list[OBBox]
168
+
169
+ @staticmethod
170
+ def from_file(yolo: "YOLO", file: "File") -> "YoloOBBoxes":
171
+ results = yolo(Image.open(BytesIO(file.read())))
172
+ return YoloOBBoxes.from_results(results)
173
+
174
+ @staticmethod
175
+ def from_results(results: list["Results"]) -> "YoloOBBoxes":
176
+ cls, names, confidence, box = [], [], [], []
177
+ for r in results:
178
+ for s in r.summary():
179
+ name = s.get("name", "")
180
+ cls.append(s["class"])
181
+ names.append(name)
182
+ confidence.append(s["confidence"])
183
+ box.append(OBBox.from_dict(s.get("box", {}), title=name))
184
+ return YoloOBBoxes(
185
+ cls=cls,
186
+ name=names,
187
+ confidence=confidence,
188
+ box=box,
189
+ )
@@ -0,0 +1,126 @@
1
+ """
2
+ This module contains the YOLO models.
3
+
4
+ YOLO stands for "You Only Look Once", a family of object detection models that
5
+ are designed to be fast and accurate. The models are trained to detect objects
6
+ in images by dividing the image into a grid and predicting the bounding boxes
7
+ and class probabilities for each grid cell.
8
+
9
+ More information about YOLO can be found here:
10
+ - https://pjreddie.com/darknet/yolo/
11
+ - https://docs.ultralytics.com/
12
+ """
13
+
14
+ from typing import TYPE_CHECKING
15
+
16
+ from pydantic import Field
17
+
18
+ from datachain.lib.data_model import DataModel
19
+ from datachain.lib.models.bbox import BBox
20
+ from datachain.lib.models.pose import Pose3D
21
+
22
+ if TYPE_CHECKING:
23
+ from ultralytics.engine.results import Results
24
+
25
+
26
+ class YoloPoseBodyPart:
27
+ """An enumeration of body parts for YOLO pose keypoints."""
28
+
29
+ nose = 0
30
+ left_eye = 1
31
+ right_eye = 2
32
+ left_ear = 3
33
+ right_ear = 4
34
+ left_shoulder = 5
35
+ right_shoulder = 6
36
+ left_elbow = 7
37
+ right_elbow = 8
38
+ left_wrist = 9
39
+ right_wrist = 10
40
+ left_hip = 11
41
+ right_hip = 12
42
+ left_knee = 13
43
+ right_knee = 14
44
+ left_ankle = 15
45
+ right_ankle = 16
46
+
47
+
48
+ class YoloPose(DataModel):
49
+ """
50
+ A data model for YOLO pose keypoints.
51
+
52
+ Attributes:
53
+ cls: The class of the pose.
54
+ name: The name of the pose.
55
+ confidence: The confidence score of the pose.
56
+ box: The bounding box of the pose.
57
+ keypoints: The 3D pose keypoints.
58
+ """
59
+
60
+ cls: int = Field(default=-1)
61
+ name: str = Field(default="")
62
+ confidence: float = Field(default=0)
63
+ box: BBox = Field(default=None)
64
+ keypoints: Pose3D = Field(default=None)
65
+
66
+ @staticmethod
67
+ def from_result(result: "Results") -> "YoloPose":
68
+ summary = result.summary()
69
+ if not summary:
70
+ return YoloPose()
71
+ name = summary[0].get("name", "")
72
+ box = (
73
+ BBox.from_dict(summary[0]["box"], title=name)
74
+ if "box" in summary[0]
75
+ else BBox()
76
+ )
77
+ keypoints = (
78
+ Pose3D.from_dict(summary[0]["keypoints"])
79
+ if "keypoints" in summary[0]
80
+ else Pose3D()
81
+ )
82
+ return YoloPose(
83
+ cls=summary[0]["class"],
84
+ name=name,
85
+ confidence=summary[0]["confidence"],
86
+ box=box,
87
+ keypoints=keypoints,
88
+ )
89
+
90
+
91
+ class YoloPoses(DataModel):
92
+ """
93
+ A data model for a list of YOLO pose keypoints.
94
+
95
+ Attributes:
96
+ cls: The classes of the poses.
97
+ name: The names of the poses.
98
+ confidence: The confidence scores of the poses.
99
+ box: The bounding boxes of the poses.
100
+ keypoints: The 3D pose keypoints of the poses.
101
+ """
102
+
103
+ cls: list[int]
104
+ name: list[str]
105
+ confidence: list[float]
106
+ box: list[BBox]
107
+ keypoints: list[Pose3D]
108
+
109
+ @staticmethod
110
+ def from_results(results: list["Results"]) -> "YoloPoses":
111
+ cls, names, confidence, box, keypoints = [], [], [], [], []
112
+ for r in results:
113
+ for s in r.summary():
114
+ name = s.get("name", "")
115
+ cls.append(s["class"])
116
+ names.append(name)
117
+ confidence.append(s["confidence"])
118
+ box.append(BBox.from_dict(s.get("box", {}), title=name))
119
+ keypoints.append(Pose3D.from_dict(s.get("keypoints", {})))
120
+ return YoloPoses(
121
+ cls=cls,
122
+ name=names,
123
+ confidence=confidence,
124
+ box=box,
125
+ keypoints=keypoints,
126
+ )
@@ -0,0 +1,121 @@
1
+ """
2
+ This module contains the YOLO models.
3
+
4
+ YOLO stands for "You Only Look Once", a family of object detection models that
5
+ are designed to be fast and accurate. The models are trained to detect objects
6
+ in images by dividing the image into a grid and predicting the bounding boxes
7
+ and class probabilities for each grid cell.
8
+
9
+ More information about YOLO can be found here:
10
+ - https://pjreddie.com/darknet/yolo/
11
+ - https://docs.ultralytics.com/
12
+ """
13
+
14
+ from io import BytesIO
15
+ from typing import TYPE_CHECKING
16
+
17
+ from PIL import Image
18
+ from pydantic import Field
19
+
20
+ from datachain.lib.data_model import DataModel
21
+ from datachain.lib.models.bbox import BBox
22
+ from datachain.lib.models.segment import Segments
23
+
24
+ if TYPE_CHECKING:
25
+ from ultralytics.engine.results import Results
26
+ from ultralytics.models import YOLO
27
+
28
+ from datachain.lib.file import File
29
+
30
+
31
+ class YoloSegment(DataModel):
32
+ """
33
+ A data model for a single YOLO segment.
34
+
35
+ Attributes:
36
+ cls (int): The class of the segment.
37
+ name (str): The name of the segment.
38
+ confidence (float): The confidence of the segment.
39
+ box (BBox): The bounding box of the segment.
40
+ segments (Segments): The segments of the segment.
41
+ """
42
+
43
+ cls: int = Field(default=-1)
44
+ name: str = Field(default="")
45
+ confidence: float = Field(default=0)
46
+ box: BBox = Field(default=None)
47
+ segments: Segments = Field(default=None)
48
+
49
+ @staticmethod
50
+ def from_file(yolo: "YOLO", file: "File") -> "YoloSegment":
51
+ results = yolo(Image.open(BytesIO(file.read())))
52
+ if len(results) == 0:
53
+ return YoloSegment()
54
+ return YoloSegment.from_result(results[0])
55
+
56
+ @staticmethod
57
+ def from_result(result: "Results") -> "YoloSegment":
58
+ summary = result.summary()
59
+ if not summary:
60
+ return YoloSegment()
61
+ name = summary[0].get("name", "")
62
+ box = (
63
+ BBox.from_dict(summary[0]["box"], title=name)
64
+ if "box" in summary[0]
65
+ else BBox()
66
+ )
67
+ segments = (
68
+ Segments.from_dict(summary[0]["segments"], title=name)
69
+ if "segments" in summary[0]
70
+ else Segments()
71
+ )
72
+ return YoloSegment(
73
+ cls=summary[0]["class"],
74
+ name=summary[0]["name"],
75
+ confidence=summary[0]["confidence"],
76
+ box=box,
77
+ segments=segments,
78
+ )
79
+
80
+
81
+ class YoloSegments(DataModel):
82
+ """
83
+ A data model for a list of YOLO segments.
84
+
85
+ Attributes:
86
+ cls (list[int]): The classes of the segments.
87
+ name (list[str]): The names of the segments.
88
+ confidence (list[float]): The confidences of the segments.
89
+ box (list[BBox]): The bounding boxes of the segments.
90
+ segments (list[Segments]): The segments of the segments.
91
+ """
92
+
93
+ cls: list[int]
94
+ name: list[str]
95
+ confidence: list[float]
96
+ box: list[BBox]
97
+ segments: list[Segments]
98
+
99
+ @staticmethod
100
+ def from_file(yolo: "YOLO", file: "File") -> "YoloSegments":
101
+ results = yolo(Image.open(BytesIO(file.read())))
102
+ return YoloSegments.from_results(results)
103
+
104
+ @staticmethod
105
+ def from_results(results: list["Results"]) -> "YoloSegments":
106
+ cls, names, confidence, box, segments = [], [], [], [], []
107
+ for r in results:
108
+ for s in r.summary():
109
+ name = s.get("name", "")
110
+ cls.append(s["class"])
111
+ names.append(name)
112
+ confidence.append(s["confidence"])
113
+ box.append(BBox.from_dict(s.get("box", {}), title=name))
114
+ segments.append(Segments.from_dict(s.get("segments", {}), title=name))
115
+ return YoloSegments(
116
+ cls=cls,
117
+ name=names,
118
+ confidence=confidence,
119
+ box=box,
120
+ segments=segments,
121
+ )
datachain/listing.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import glob
2
2
  import os
3
3
  from collections.abc import Iterable, Iterator
4
+ from functools import cached_property
4
5
  from itertools import zip_longest
5
6
  from typing import TYPE_CHECKING, Optional
6
7
 
@@ -15,28 +16,34 @@ from datachain.utils import suffix_to_number
15
16
  if TYPE_CHECKING:
16
17
  from datachain.catalog.datasource import DataSource
17
18
  from datachain.client import Client
18
- from datachain.data_storage import AbstractWarehouse
19
+ from datachain.data_storage import AbstractMetastore, AbstractWarehouse
19
20
  from datachain.dataset import DatasetRecord
20
21
 
21
22
 
22
23
  class Listing:
23
24
  def __init__(
24
25
  self,
26
+ metastore: "AbstractMetastore",
25
27
  warehouse: "AbstractWarehouse",
26
28
  client: "Client",
27
- dataset: Optional["DatasetRecord"],
29
+ dataset_name: Optional["str"] = None,
30
+ dataset_version: Optional[int] = None,
28
31
  object_name: str = "file",
29
32
  ):
33
+ self.metastore = metastore
30
34
  self.warehouse = warehouse
31
35
  self.client = client
32
- self.dataset = dataset # dataset representing bucket listing
36
+ self.dataset_name = dataset_name # dataset representing bucket listing
37
+ self.dataset_version = dataset_version # dataset representing bucket listing
33
38
  self.object_name = object_name
34
39
 
35
40
  def clone(self) -> "Listing":
36
41
  return self.__class__(
42
+ self.metastore.clone(),
37
43
  self.warehouse.clone(),
38
44
  self.client,
39
- self.dataset,
45
+ self.dataset_name,
46
+ self.dataset_version,
40
47
  self.object_name,
41
48
  )
42
49
 
@@ -53,12 +60,22 @@ class Listing:
53
60
  def uri(self):
54
61
  from datachain.lib.listing import listing_uri_from_name
55
62
 
56
- return listing_uri_from_name(self.dataset.name)
63
+ assert self.dataset_name
57
64
 
58
- @property
65
+ return listing_uri_from_name(self.dataset_name)
66
+
67
+ @cached_property
68
+ def dataset(self) -> "DatasetRecord":
69
+ assert self.dataset_name
70
+ return self.metastore.get_dataset(self.dataset_name)
71
+
72
+ @cached_property
59
73
  def dataset_rows(self):
74
+ dataset = self.dataset
60
75
  return self.warehouse.dataset_rows(
61
- self.dataset, self.dataset.latest_version, object_name=self.object_name
76
+ dataset,
77
+ self.dataset_version or dataset.latest_version,
78
+ object_name=self.object_name,
62
79
  )
63
80
 
64
81
  def expand_path(self, path, use_glob=True) -> list[Node]:
@@ -0,0 +1,3 @@
1
+ from .split import train_test_split
2
+
3
+ __all__ = ["train_test_split"]
@@ -0,0 +1,67 @@
1
+ from datachain import C, DataChain
2
+
3
+
4
+ def train_test_split(dc: DataChain, weights: list[float]) -> list[DataChain]:
5
+ """
6
+ Splits a DataChain into multiple subsets based on the provided weights.
7
+
8
+ This function partitions the rows or items of a DataChain into disjoint subsets,
9
+ ensuring that the relative sizes of the subsets correspond to the given weights.
10
+ It is particularly useful for creating training, validation, and test datasets.
11
+
12
+ Args:
13
+ dc (DataChain):
14
+ The DataChain instance to split.
15
+ weights (list[float]):
16
+ A list of weights indicating the relative proportions of the splits.
17
+ The weights do not need to sum to 1; they will be normalized internally.
18
+ For example:
19
+ - `[0.7, 0.3]` corresponds to a 70/30 split;
20
+ - `[2, 1, 1]` corresponds to a 50/25/25 split.
21
+
22
+ Returns:
23
+ list[DataChain]:
24
+ A list of DataChain instances, one for each weight in the weights list.
25
+
26
+ Examples:
27
+ Train-test split:
28
+ ```python
29
+ from datachain import DataChain
30
+ from datachain.toolkit import train_test_split
31
+
32
+ # Load a DataChain from a storage source (e.g., S3 bucket)
33
+ dc = DataChain.from_storage("s3://bucket/dir/")
34
+
35
+ # Perform a 70/30 train-test split
36
+ train, test = train_test_split(dc, [0.7, 0.3])
37
+
38
+ # Save the resulting splits
39
+ train.save("dataset_train")
40
+ test.save("dataset_test")
41
+ ```
42
+
43
+ Train-test-validation split:
44
+ ```python
45
+ train, test, val = train_test_split(dc, [0.7, 0.2, 0.1])
46
+ train.save("dataset_train")
47
+ test.save("dataset_test")
48
+ val.save("dataset_val")
49
+ ```
50
+
51
+ Note:
52
+ The splits are random but deterministic, based on Dataset `sys__rand` field.
53
+ """
54
+ if len(weights) < 2:
55
+ raise ValueError("Weights should have at least two elements")
56
+ if any(weight < 0 for weight in weights):
57
+ raise ValueError("Weights should be non-negative")
58
+
59
+ weights_normalized = [weight / sum(weights) for weight in weights]
60
+
61
+ return [
62
+ dc.filter(
63
+ C("sys__rand") % 1000 >= round(sum(weights_normalized[:index]) * 1000),
64
+ C("sys__rand") % 1000 < round(sum(weights_normalized[: index + 1]) * 1000),
65
+ )
66
+ for index, _ in enumerate(weights_normalized)
67
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: datachain
3
- Version: 0.6.9
3
+ Version: 0.6.10
4
4
  Summary: Wrangle unstructured AI data at scale
5
5
  Author-email: Dmitry Petrov <support@dvc.org>
6
6
  License: Apache-2.0
@@ -82,7 +82,7 @@ Requires-Dist: pytest <9,>=8 ; extra == 'tests'
82
82
  Requires-Dist: pytest-sugar >=0.9.6 ; extra == 'tests'
83
83
  Requires-Dist: pytest-cov >=4.1.0 ; extra == 'tests'
84
84
  Requires-Dist: pytest-mock >=3.12.0 ; extra == 'tests'
85
- Requires-Dist: pytest-servers[all] >=0.5.7 ; extra == 'tests'
85
+ Requires-Dist: pytest-servers[all] >=0.5.8 ; extra == 'tests'
86
86
  Requires-Dist: pytest-benchmark[histogram] ; extra == 'tests'
87
87
  Requires-Dist: pytest-xdist >=3.3.1 ; extra == 'tests'
88
88
  Requires-Dist: virtualenv ; extra == 'tests'
@@ -8,7 +8,7 @@ datachain/config.py,sha256=g8qbNV0vW2VEKpX-dGZ9pAn0DAz6G2ZFcr7SAV3PoSM,4272
8
8
  datachain/dataset.py,sha256=0IN-5y723y-bnFlieKtOFZLCjwX_yplFo3q0DV7LRPw,14821
9
9
  datachain/error.py,sha256=bxAAL32lSeMgzsQDEHbGTGORj-mPzzpCRvWDPueJNN4,1092
10
10
  datachain/job.py,sha256=Jt4sNutMHJReaGsj3r3scueN5aESLGfhimAa8pUP7Is,1271
11
- datachain/listing.py,sha256=AV23WZq-k6e2zeeNBhVQP1-2PrwNCYidO0HBDKzpVaA,7152
11
+ datachain/listing.py,sha256=TgKg25ZWAP5enzKgw2_2GUPJVdnQUh6uySHB5SJrUY4,7773
12
12
  datachain/node.py,sha256=i7_jC8VcW6W5VYkDszAOu0H-rNBuqXB4UnLEh4wFzjc,5195
13
13
  datachain/nodes_fetcher.py,sha256=F-73-h19HHNGtHFBGKk7p3mc0ALm4a9zGnzhtuUjnp4,1107
14
14
  datachain/nodes_thread_pool.py,sha256=uPo-xl8zG5m9YgODjPFBpbcqqHjI-dcxH87yAbj_qco,3192
@@ -18,7 +18,7 @@ datachain/studio.py,sha256=6kxF7VxPAbh9D7_Bk8_SghS5OXrwUwSpDaw19eNCTP4,4083
18
18
  datachain/telemetry.py,sha256=0A4IOPPp9VlP5pyW9eBfaTK3YhHGzHl7dQudQjUAx9A,994
19
19
  datachain/utils.py,sha256=-mSFowjIidJ4_sMXInvNHLn4rK_QnHuIlLuH1_lMGmI,13897
20
20
  datachain/catalog/__init__.py,sha256=g2iAAFx_gEIrqshXlhSEbrc8qDaEH11cjU40n3CHDz4,409
21
- datachain/catalog/catalog.py,sha256=Iwb562grttdGcrNVHCna_n7e884BqwGhQwAgYagBwyg,57347
21
+ datachain/catalog/catalog.py,sha256=J1nUWLI4RYCvvR6fB4neQBtB7V-CTh4PM71irhNmJc4,57817
22
22
  datachain/catalog/datasource.py,sha256=D-VWIVDCM10A8sQavLhRXdYSCG7F4o4ifswEF80_NAQ,1412
23
23
  datachain/catalog/loader.py,sha256=-6VelNfXUdgUnwInVyA8g86Boxv2xqhTh9xNS-Zlwig,8242
24
24
  datachain/client/__init__.py,sha256=T4wiYL9KIM0ZZ_UqIyzV8_ufzYlewmizlV4iymHNluE,86
@@ -36,14 +36,14 @@ datachain/data_storage/job.py,sha256=w-7spowjkOa1P5fUVtJou3OltT0L48P0RYWZ9rSJ9-s
36
36
  datachain/data_storage/metastore.py,sha256=5b7o_CSHC2djottebYn-Hq5q0yaSLOKPIRCnaVRvjsU,36056
37
37
  datachain/data_storage/schema.py,sha256=scANMQqozita3HjEtq7eupMgh6yYkrZHoXtfuL2RoQg,9879
38
38
  datachain/data_storage/serializer.py,sha256=6G2YtOFqqDzJf1KbvZraKGXl2XHZyVml2krunWUum5o,927
39
- datachain/data_storage/sqlite.py,sha256=wb8xlMJYYyt59wft0psJj587d-AwpNThzIqspVcKnRI,27388
39
+ datachain/data_storage/sqlite.py,sha256=CspRUlYsIcubgzvcQxTACnmcuKESSLZcqCl0dcrtRiA,27471
40
40
  datachain/data_storage/warehouse.py,sha256=xwMaR4jBpR13vjG3zrhphH4z2_CFLNj0KPF0LJCXCJ8,30727
41
41
  datachain/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  datachain/lib/arrow.py,sha256=-hu9tic79a01SY2UBqkA3U6wUr6tnE3T3q5q_BnO93A,9156
43
43
  datachain/lib/clip.py,sha256=lm5CzVi4Cj1jVLEKvERKArb-egb9j1Ls-fwTItT6vlI,6150
44
44
  datachain/lib/data_model.py,sha256=dau4AlZBhOFvF7pEKMeqCeRkcFFg5KFvTBWW_2CdH5g,2371
45
45
  datachain/lib/dataset_info.py,sha256=q0EW9tj5jXGSD9Lzct9zbH4P1lfIGd_cIWqhnMxv7Q0,2464
46
- datachain/lib/dc.py,sha256=RQ8p95rzCMRY4ygFecO_hhQ3IgQHmbLXNqhcaINvGcI,85841
46
+ datachain/lib/dc.py,sha256=BmRgCt5fXvBqlFV07KN-nWszueRyCkC7td1x7T4BZ7k,87688
47
47
  datachain/lib/file.py,sha256=lHxE1wOGR4QJBQ3AYjhPLwpX72dOi06vkcwA-WSAGlg,14817
48
48
  datachain/lib/hf.py,sha256=BW2NPpqxkpPwkSaGlppT8Rbs8zPpyYC-tR6htY08c-0,5817
49
49
  datachain/lib/image.py,sha256=AMXYwQsmarZjRbPCZY3M1jDsM2WAB_b3cTY4uOIuXNU,2675
@@ -71,10 +71,14 @@ datachain/lib/convert/values_to_tuples.py,sha256=varRCnSMT_pZmHznrd2Yi05qXLLz_v9
71
71
  datachain/lib/func/__init__.py,sha256=wlAKhGV0QDg9y7reSwoUF8Vicfqh_YOUNIXLzxICGz4,403
72
72
  datachain/lib/func/aggregate.py,sha256=H1ziFQdaK9zvnxvttfnEzkkyGvEEmMAvmgCsBV6nfm8,10917
73
73
  datachain/lib/func/func.py,sha256=HAJZ_tpiRG2R-et7pr0WnoyNZYtpbPn3_HBuL3RQpbU,4800
74
- datachain/lib/models/__init__.py,sha256=AGvjPbUokJiir3uelTa4XGtNSECkMFc5Xmi_N3AtxPQ,119
75
- datachain/lib/models/bbox.py,sha256=aiYNhvEcRK3dEN4MBcptmkPKc9kMP16ZQdu7xPk6hek,1555
76
- datachain/lib/models/pose.py,sha256=peuJPNSiGuTXfCfGIABwv8PGYistvTTBmtf-8X8E_eA,1077
77
- datachain/lib/models/yolo.py,sha256=eftoJDUa8iOpFTF1EkKVAd5Q-3HRd6X4eCIZ9h5p4nI,972
74
+ datachain/lib/models/__init__.py,sha256=6iwqXWcybyELKdLEe59yUPl8R8ZHDY4lA-xCHVYPdOA,191
75
+ datachain/lib/models/bbox.py,sha256=UJ_64D8TQglX2B_ueseILPoT3cGIWr9McVg0mv2YdmE,3717
76
+ datachain/lib/models/pose.py,sha256=KC-OpLC7-3v6qg4YN6pXlfAgtg88VLQoRc75JCEmbfY,3931
77
+ datachain/lib/models/segment.py,sha256=ergCFnEzLDzaU75p1_KvWgal1LSv4VuFmkWLkRJeaVk,1862
78
+ datachain/lib/models/ultralytics/__init__.py,sha256=g8mgII0k_RJiOG9kd4k_ECfCgDhT_iPh3vCC_5OiDD4,305
79
+ datachain/lib/models/ultralytics/bbox.py,sha256=LAaezAnnugfBiczWZ63NTo65kX2BegR5WGXjQTOTE28,5784
80
+ datachain/lib/models/ultralytics/pose.py,sha256=nMoEeeY_Zi7Iiu7vIo9ZTq8ARUdg_BcZMQIA_WgRNk4,3488
81
+ datachain/lib/models/ultralytics/segment.py,sha256=IHnthsq6uQ6DSdHLK2akbdd0Eq8wW7oaAK6pUG8nxJc,3818
78
82
  datachain/query/__init__.py,sha256=7DhEIjAA8uZJfejruAVMZVcGFmvUpffuZJwgRqNwe-c,263
79
83
  datachain/query/batch.py,sha256=5fEhORFe7li12SdYddaSK3LyqksMfCHhwN1_A6TfsA4,3485
80
84
  datachain/query/dataset.py,sha256=MGArYxioeGvm8w7hQtQAjEI6wsZN_XAoh4-jO4d0U5Q,53926
@@ -103,10 +107,12 @@ datachain/sql/sqlite/__init__.py,sha256=TAdJX0Bg28XdqPO-QwUVKy8rg78cgMileHvMNot7
103
107
  datachain/sql/sqlite/base.py,sha256=aHSZVvh4XSVkvZ07h3jMoRlHI4sWD8y3SnmGs9xMG9Y,14375
104
108
  datachain/sql/sqlite/types.py,sha256=yzvp0sXSEoEYXs6zaYC_2YubarQoZH-MiUNXcpuEP4s,1573
105
109
  datachain/sql/sqlite/vector.py,sha256=ncW4eu2FlJhrP_CIpsvtkUabZlQdl2D5Lgwy_cbfqR0,469
110
+ datachain/toolkit/__init__.py,sha256=eQ58Q5Yf_Fgv1ZG0IO5dpB4jmP90rk8YxUWmPc1M2Bo,68
111
+ datachain/toolkit/split.py,sha256=6FcEJgUsJsUcCqKW5aXuJy4DvbcQ7_dFbsfNPhn8EVg,2377
106
112
  datachain/torch/__init__.py,sha256=gIS74PoEPy4TB3X6vx9nLO0Y3sLJzsA8ckn8pRWihJM,579
107
- datachain-0.6.9.dist-info/LICENSE,sha256=8DnqK5yoPI_E50bEg_zsHKZHY2HqPy4rYN338BHQaRA,11344
108
- datachain-0.6.9.dist-info/METADATA,sha256=McKhuW43_7Q3iJKxueIYbk-rpYF6rbIKeFinzeeUzMo,18037
109
- datachain-0.6.9.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
110
- datachain-0.6.9.dist-info/entry_points.txt,sha256=0GMJS6B_KWq0m3VT98vQI2YZodAMkn4uReZ_okga9R4,49
111
- datachain-0.6.9.dist-info/top_level.txt,sha256=lZPpdU_2jJABLNIg2kvEOBi8PtsYikbN1OdMLHk8bTg,10
112
- datachain-0.6.9.dist-info/RECORD,,
113
+ datachain-0.6.10.dist-info/LICENSE,sha256=8DnqK5yoPI_E50bEg_zsHKZHY2HqPy4rYN338BHQaRA,11344
114
+ datachain-0.6.10.dist-info/METADATA,sha256=AgQuuefAhZRIL1jDJWz-q4daqA5ZmnQN8dafqnt01XA,18038
115
+ datachain-0.6.10.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
116
+ datachain-0.6.10.dist-info/entry_points.txt,sha256=0GMJS6B_KWq0m3VT98vQI2YZodAMkn4uReZ_okga9R4,49
117
+ datachain-0.6.10.dist-info/top_level.txt,sha256=lZPpdU_2jJABLNIg2kvEOBi8PtsYikbN1OdMLHk8bTg,10
118
+ datachain-0.6.10.dist-info/RECORD,,
@@ -1,39 +0,0 @@
1
- """
2
- This module contains the YOLO models.
3
-
4
- YOLO stands for "You Only Look Once", a family of object detection models that
5
- are designed to be fast and accurate. The models are trained to detect objects
6
- in images by dividing the image into a grid and predicting the bounding boxes
7
- and class probabilities for each grid cell.
8
-
9
- More information about YOLO can be found here:
10
- - https://pjreddie.com/darknet/yolo/
11
- - https://docs.ultralytics.com/
12
- """
13
-
14
-
15
- class PoseBodyPart:
16
- """
17
- An enumeration of body parts for YOLO pose keypoints.
18
-
19
- More information about the body parts can be found here:
20
- https://docs.ultralytics.com/tasks/pose/
21
- """
22
-
23
- nose = 0
24
- left_eye = 1
25
- right_eye = 2
26
- left_ear = 3
27
- right_ear = 4
28
- left_shoulder = 5
29
- right_shoulder = 6
30
- left_elbow = 7
31
- right_elbow = 8
32
- left_wrist = 9
33
- right_wrist = 10
34
- left_hip = 11
35
- right_hip = 12
36
- left_knee = 13
37
- right_knee = 14
38
- left_ankle = 15
39
- right_ankle = 16