guidellm 0.4.0a18__py3-none-any.whl → 0.4.0a155__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of guidellm might be problematic. Click here for more details.

Files changed (116) hide show
  1. guidellm/__init__.py +5 -2
  2. guidellm/__main__.py +451 -252
  3. guidellm/backends/__init__.py +33 -0
  4. guidellm/backends/backend.py +110 -0
  5. guidellm/backends/openai.py +355 -0
  6. guidellm/backends/response_handlers.py +455 -0
  7. guidellm/benchmark/__init__.py +53 -39
  8. guidellm/benchmark/benchmarker.py +148 -317
  9. guidellm/benchmark/entrypoints.py +466 -128
  10. guidellm/benchmark/output.py +517 -771
  11. guidellm/benchmark/profile.py +580 -280
  12. guidellm/benchmark/progress.py +568 -549
  13. guidellm/benchmark/scenarios/__init__.py +40 -0
  14. guidellm/benchmark/scenarios/chat.json +6 -0
  15. guidellm/benchmark/scenarios/rag.json +6 -0
  16. guidellm/benchmark/schemas.py +2085 -0
  17. guidellm/data/__init__.py +28 -4
  18. guidellm/data/collators.py +16 -0
  19. guidellm/data/deserializers/__init__.py +53 -0
  20. guidellm/data/deserializers/deserializer.py +109 -0
  21. guidellm/data/deserializers/file.py +222 -0
  22. guidellm/data/deserializers/huggingface.py +94 -0
  23. guidellm/data/deserializers/memory.py +192 -0
  24. guidellm/data/deserializers/synthetic.py +346 -0
  25. guidellm/data/loaders.py +145 -0
  26. guidellm/data/preprocessors/__init__.py +25 -0
  27. guidellm/data/preprocessors/formatters.py +412 -0
  28. guidellm/data/preprocessors/mappers.py +198 -0
  29. guidellm/data/preprocessors/preprocessor.py +29 -0
  30. guidellm/data/processor.py +30 -0
  31. guidellm/data/schemas.py +13 -0
  32. guidellm/data/utils/__init__.py +10 -0
  33. guidellm/data/utils/dataset.py +94 -0
  34. guidellm/data/utils/functions.py +18 -0
  35. guidellm/extras/__init__.py +4 -0
  36. guidellm/extras/audio.py +215 -0
  37. guidellm/extras/vision.py +242 -0
  38. guidellm/logger.py +2 -2
  39. guidellm/mock_server/__init__.py +8 -0
  40. guidellm/mock_server/config.py +84 -0
  41. guidellm/mock_server/handlers/__init__.py +17 -0
  42. guidellm/mock_server/handlers/chat_completions.py +280 -0
  43. guidellm/mock_server/handlers/completions.py +280 -0
  44. guidellm/mock_server/handlers/tokenizer.py +142 -0
  45. guidellm/mock_server/models.py +510 -0
  46. guidellm/mock_server/server.py +168 -0
  47. guidellm/mock_server/utils.py +302 -0
  48. guidellm/preprocess/dataset.py +23 -26
  49. guidellm/presentation/builder.py +2 -2
  50. guidellm/presentation/data_models.py +25 -21
  51. guidellm/presentation/injector.py +2 -3
  52. guidellm/scheduler/__init__.py +65 -26
  53. guidellm/scheduler/constraints.py +1035 -0
  54. guidellm/scheduler/environments.py +252 -0
  55. guidellm/scheduler/scheduler.py +140 -368
  56. guidellm/scheduler/schemas.py +272 -0
  57. guidellm/scheduler/strategies.py +519 -0
  58. guidellm/scheduler/worker.py +391 -420
  59. guidellm/scheduler/worker_group.py +707 -0
  60. guidellm/schemas/__init__.py +31 -0
  61. guidellm/schemas/info.py +159 -0
  62. guidellm/schemas/request.py +216 -0
  63. guidellm/schemas/response.py +119 -0
  64. guidellm/schemas/stats.py +228 -0
  65. guidellm/{config.py → settings.py} +32 -21
  66. guidellm/utils/__init__.py +95 -8
  67. guidellm/utils/auto_importer.py +98 -0
  68. guidellm/utils/cli.py +46 -2
  69. guidellm/utils/console.py +183 -0
  70. guidellm/utils/encoding.py +778 -0
  71. guidellm/utils/functions.py +134 -0
  72. guidellm/utils/hf_datasets.py +1 -2
  73. guidellm/utils/hf_transformers.py +4 -4
  74. guidellm/utils/imports.py +9 -0
  75. guidellm/utils/messaging.py +1118 -0
  76. guidellm/utils/mixins.py +115 -0
  77. guidellm/utils/pydantic_utils.py +411 -0
  78. guidellm/utils/random.py +3 -4
  79. guidellm/utils/registry.py +220 -0
  80. guidellm/utils/singleton.py +133 -0
  81. guidellm/{objects → utils}/statistics.py +341 -247
  82. guidellm/utils/synchronous.py +159 -0
  83. guidellm/utils/text.py +163 -50
  84. guidellm/utils/typing.py +41 -0
  85. guidellm/version.py +1 -1
  86. {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/METADATA +33 -10
  87. guidellm-0.4.0a155.dist-info/RECORD +96 -0
  88. guidellm/backend/__init__.py +0 -23
  89. guidellm/backend/backend.py +0 -259
  90. guidellm/backend/openai.py +0 -705
  91. guidellm/backend/response.py +0 -136
  92. guidellm/benchmark/aggregator.py +0 -760
  93. guidellm/benchmark/benchmark.py +0 -837
  94. guidellm/benchmark/scenario.py +0 -104
  95. guidellm/data/prideandprejudice.txt.gz +0 -0
  96. guidellm/dataset/__init__.py +0 -22
  97. guidellm/dataset/creator.py +0 -213
  98. guidellm/dataset/entrypoints.py +0 -42
  99. guidellm/dataset/file.py +0 -92
  100. guidellm/dataset/hf_datasets.py +0 -62
  101. guidellm/dataset/in_memory.py +0 -132
  102. guidellm/dataset/synthetic.py +0 -287
  103. guidellm/objects/__init__.py +0 -18
  104. guidellm/objects/pydantic.py +0 -89
  105. guidellm/request/__init__.py +0 -18
  106. guidellm/request/loader.py +0 -284
  107. guidellm/request/request.py +0 -79
  108. guidellm/request/types.py +0 -10
  109. guidellm/scheduler/queues.py +0 -25
  110. guidellm/scheduler/result.py +0 -155
  111. guidellm/scheduler/strategy.py +0 -495
  112. guidellm-0.4.0a18.dist-info/RECORD +0 -62
  113. {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/WHEEL +0 -0
  114. {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/entry_points.txt +0 -0
  115. {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/licenses/LICENSE +0 -0
  116. {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/top_level.txt +0 -0
@@ -1,104 +0,0 @@
1
- from collections.abc import Iterable
2
- from functools import cache
3
- from pathlib import Path
4
- from typing import Annotated, Any, Literal, Optional, TypeVar, Union
5
-
6
- from datasets import Dataset, DatasetDict, IterableDataset, IterableDatasetDict
7
- from pydantic import BeforeValidator, Field, NonNegativeInt, PositiveFloat, PositiveInt
8
- from transformers.tokenization_utils_base import ( # type: ignore[import]
9
- PreTrainedTokenizerBase,
10
- )
11
-
12
- from guidellm.backend.backend import BackendType
13
- from guidellm.benchmark.profile import ProfileType
14
- from guidellm.objects.pydantic import StandardBaseModel
15
- from guidellm.scheduler.strategy import StrategyType
16
-
17
- __ALL__ = ["Scenario", "GenerativeTextScenario", "get_builtin_scenarios"]
18
-
19
- SCENARIO_DIR = Path(__file__).parent / "scenarios/"
20
-
21
-
22
- @cache
23
- def get_builtin_scenarios() -> list[str]:
24
- """Returns list of builtin scenario names."""
25
- return [p.stem for p in SCENARIO_DIR.glob("*.json")]
26
-
27
-
28
- def parse_float_list(value: Union[str, float, list[float]]) -> list[float]:
29
- """
30
- Parse a comma separated string to a list of float
31
- or convert single float list of one or pass float
32
- list through.
33
- """
34
- if isinstance(value, (int, float)):
35
- return [value]
36
- elif isinstance(value, list):
37
- return value
38
-
39
- values = value.split(",") if "," in value else [value]
40
-
41
- try:
42
- return [float(val) for val in values]
43
- except ValueError as err:
44
- raise ValueError(
45
- "must be a number or comma-separated list of numbers."
46
- ) from err
47
-
48
-
49
- T = TypeVar("T", bound="Scenario")
50
-
51
-
52
- class Scenario(StandardBaseModel):
53
- """
54
- Parent Scenario class with common options for all benchmarking types.
55
- """
56
-
57
- target: str
58
-
59
- @classmethod
60
- def from_builtin(cls: type[T], name: str, overrides: Optional[dict] = None) -> T:
61
- filename = SCENARIO_DIR / f"{name}.json"
62
-
63
- if not filename.is_file():
64
- raise ValueError(f"{name} is not a valid builtin scenario")
65
-
66
- return cls.from_file(filename, overrides)
67
-
68
-
69
- class GenerativeTextScenario(Scenario):
70
- """
71
- Scenario class for generative text benchmarks.
72
- """
73
-
74
- class Config:
75
- # NOTE: This prevents errors due to unvalidatable
76
- # types like PreTrainedTokenizerBase
77
- arbitrary_types_allowed = True
78
-
79
- backend_type: BackendType = "openai_http"
80
- backend_args: Optional[dict[str, Any]] = None
81
- model: Optional[str] = None
82
- processor: Optional[Union[str, Path, PreTrainedTokenizerBase]] = None
83
- processor_args: Optional[dict[str, Any]] = None
84
- data: Union[
85
- str,
86
- Path,
87
- Iterable[Union[str, dict[str, Any]]],
88
- Dataset,
89
- DatasetDict,
90
- IterableDataset,
91
- IterableDatasetDict,
92
- ]
93
- data_args: Optional[dict[str, Any]] = None
94
- data_sampler: Optional[Literal["random"]] = None
95
- rate_type: Union[StrategyType, ProfileType]
96
- rate: Annotated[
97
- Optional[list[PositiveFloat]], BeforeValidator(parse_float_list)
98
- ] = None
99
- max_seconds: Optional[PositiveFloat] = None
100
- max_requests: Optional[PositiveInt] = None
101
- warmup_percent: Annotated[Optional[float], Field(gt=0, le=1)] = None
102
- cooldown_percent: Annotated[Optional[float], Field(gt=0, le=1)] = None
103
- output_sampling: Optional[NonNegativeInt] = None
104
- random_seed: int = 42
Binary file
@@ -1,22 +0,0 @@
1
- from .creator import ColumnInputTypes, DatasetCreator
2
- from .entrypoints import load_dataset
3
- from .file import FileDatasetCreator
4
- from .hf_datasets import HFDatasetsCreator
5
- from .in_memory import InMemoryDatasetCreator
6
- from .synthetic import (
7
- SyntheticDatasetConfig,
8
- SyntheticDatasetCreator,
9
- SyntheticTextItemsGenerator,
10
- )
11
-
12
- __all__ = [
13
- "ColumnInputTypes",
14
- "DatasetCreator",
15
- "FileDatasetCreator",
16
- "HFDatasetsCreator",
17
- "InMemoryDatasetCreator",
18
- "SyntheticDatasetConfig",
19
- "SyntheticDatasetCreator",
20
- "SyntheticTextItemsGenerator",
21
- "load_dataset",
22
- ]
@@ -1,213 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from pathlib import Path
3
- from typing import Any, Literal, Optional, Union
4
-
5
- from datasets import Dataset, DatasetDict, IterableDataset, IterableDatasetDict
6
- from transformers import PreTrainedTokenizerBase # type: ignore[import]
7
-
8
- __all__ = ["ColumnInputTypes", "DatasetCreator"]
9
-
10
- ColumnInputTypes = Literal[
11
- "prompt_column",
12
- "text_column",
13
- "prompt_tokens_count_column",
14
- "output_tokens_count_column",
15
- ]
16
-
17
-
18
- class DatasetCreator(ABC):
19
- DEFAULT_SPLITS_TRAIN = [
20
- "train",
21
- "training",
22
- "train_set",
23
- "training_set",
24
- "train_dataset",
25
- "training_dataset",
26
- "train_data",
27
- "training_data",
28
- "pretrain",
29
- "pretrain_set",
30
- "pretrain_dataset",
31
- "pretrain_data",
32
- "pretraining",
33
- ]
34
- DEFAULT_SPLITS_CALIB = [
35
- "calibration",
36
- "calib",
37
- "cal",
38
- "calibration_set",
39
- "calib_set",
40
- "cal_set",
41
- "calibration_dataset",
42
- "calib_dataset",
43
- "cal_set",
44
- "calibration_data",
45
- "calib_data",
46
- "cal_data",
47
- ]
48
- DEFAULT_SPLITS_VAL = [
49
- "validation",
50
- "val",
51
- "valid",
52
- "validation_set",
53
- "val_set",
54
- "validation_dataset",
55
- "val_dataset",
56
- "validation_data",
57
- "val_data",
58
- "dev",
59
- "dev_set",
60
- "dev_dataset",
61
- "dev_data",
62
- ]
63
- DEFAULT_SPLITS_TEST = [
64
- "test",
65
- "testing",
66
- "test_set",
67
- "testing_set",
68
- "test_dataset",
69
- "testing_dataset",
70
- "test_data",
71
- "testing_data",
72
- "eval",
73
- "eval_set",
74
- "eval_dataset",
75
- "eval_data",
76
- ]
77
- DEFAULT_SPLITS_DATASET: dict[str, str] = {}
78
-
79
- @classmethod
80
- def create(
81
- cls,
82
- data: Any,
83
- data_args: Optional[dict[str, Any]],
84
- processor: Optional[Union[str, Path, PreTrainedTokenizerBase]],
85
- processor_args: Optional[dict[str, Any]],
86
- random_seed: int = 42,
87
- split_pref_order: Optional[list[str]] = None,
88
- ) -> tuple[Union[Dataset, IterableDataset], dict[ColumnInputTypes, str]]:
89
- if not cls.is_supported(data, data_args):
90
- raise ValueError(f"Unsupported data type: {type(data)} given for {data}. ")
91
-
92
- split = cls.extract_args_split(data_args)
93
- column_mappings = cls.extract_args_column_mappings(data_args)
94
- dataset = cls.handle_create(
95
- data, data_args, processor, processor_args, random_seed
96
- )
97
-
98
- if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
99
- dataset = cls.extract_dataset_split(dataset, split, split_pref_order)
100
-
101
- if not isinstance(dataset, (Dataset, IterableDataset)):
102
- raise ValueError(
103
- f"Unsupported data type: {type(dataset)} given for {dataset}."
104
- )
105
-
106
- return dataset, column_mappings
107
-
108
- @classmethod
109
- def extract_args_split(cls, data_args: Optional[dict[str, Any]]) -> str:
110
- split = "auto"
111
-
112
- if data_args and "split" in data_args:
113
- split = data_args["split"]
114
- del data_args["split"]
115
-
116
- return split
117
-
118
- @classmethod
119
- def extract_args_column_mappings(
120
- cls,
121
- data_args: Optional[dict[str, Any]],
122
- ) -> dict[ColumnInputTypes, str]:
123
- columns: dict[ColumnInputTypes, str] = {}
124
-
125
- if data_args:
126
- if "prompt_column" in data_args:
127
- columns["prompt_column"] = data_args["prompt_column"]
128
- del data_args["prompt_column"]
129
-
130
- if "prompt_tokens_count_column" in data_args:
131
- columns["prompt_tokens_count_column"] = data_args[
132
- "prompt_tokens_count_column"
133
- ]
134
- del data_args["prompt_tokens_count_column"]
135
-
136
- if "output_tokens_count_column" in data_args:
137
- columns["output_tokens_count_column"] = data_args[
138
- "output_tokens_count_column"
139
- ]
140
- del data_args["output_tokens_count_column"]
141
-
142
- return columns
143
-
144
- @classmethod
145
- def extract_dataset_name(
146
- cls, dataset: Union[Dataset, IterableDataset, DatasetDict, IterableDatasetDict]
147
- ) -> Optional[str]:
148
- if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
149
- dataset = dataset[list(dataset.keys())[0]]
150
-
151
- if isinstance(dataset, (Dataset, IterableDataset)):
152
- if not hasattr(dataset, "info") or not hasattr(
153
- dataset.info, "dataset_name"
154
- ):
155
- return None
156
-
157
- return dataset.info.dataset_name
158
-
159
- raise ValueError(f"Unsupported data type: {type(dataset)} given for {dataset}.")
160
-
161
- @classmethod
162
- def extract_dataset_split(
163
- cls,
164
- dataset: Union[DatasetDict, IterableDatasetDict],
165
- specified_split: Union[Literal["auto"], str] = "auto",
166
- split_pref_order: Optional[Union[Literal["auto"], list[str]]] = "auto",
167
- ) -> Union[Dataset, IterableDataset]:
168
- if not isinstance(dataset, (DatasetDict, IterableDatasetDict)):
169
- raise ValueError(
170
- f"Unsupported data type: {type(dataset)} given for {dataset}."
171
- )
172
-
173
- if specified_split != "auto":
174
- if specified_split not in dataset:
175
- raise ValueError(
176
- f"Split {specified_split} not found in dataset {dataset}."
177
- )
178
-
179
- return dataset[specified_split]
180
-
181
- dataset_name = cls.extract_dataset_name(dataset)
182
-
183
- if dataset_name and dataset_name in cls.DEFAULT_SPLITS_DATASET:
184
- return dataset[cls.DEFAULT_SPLITS_DATASET[dataset_name]]
185
-
186
- if split_pref_order == "auto":
187
- split_pref_order = [
188
- *cls.DEFAULT_SPLITS_TEST,
189
- *cls.DEFAULT_SPLITS_VAL,
190
- *cls.DEFAULT_SPLITS_CALIB,
191
- *cls.DEFAULT_SPLITS_TRAIN,
192
- ]
193
-
194
- for test_split in split_pref_order or []:
195
- if test_split in dataset:
196
- return dataset[test_split]
197
-
198
- return dataset[list(dataset.keys())[0]]
199
-
200
- @classmethod
201
- @abstractmethod
202
- def is_supported(cls, data: Any, data_args: Optional[dict[str, Any]]) -> bool: ...
203
-
204
- @classmethod
205
- @abstractmethod
206
- def handle_create(
207
- cls,
208
- data: Any,
209
- data_args: Optional[dict[str, Any]],
210
- processor: Optional[Union[str, Path, PreTrainedTokenizerBase]],
211
- processor_args: Optional[dict[str, Any]],
212
- random_seed: int,
213
- ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: ...
@@ -1,42 +0,0 @@
1
- from pathlib import Path
2
- from typing import Any, Optional, Union
3
-
4
- from datasets import Dataset, IterableDataset
5
- from transformers import PreTrainedTokenizerBase # type: ignore[import]
6
-
7
- from guidellm.dataset.creator import ColumnInputTypes
8
- from guidellm.dataset.file import FileDatasetCreator
9
- from guidellm.dataset.hf_datasets import HFDatasetsCreator
10
- from guidellm.dataset.in_memory import InMemoryDatasetCreator
11
- from guidellm.dataset.synthetic import SyntheticDatasetCreator
12
-
13
- __all__ = ["load_dataset"]
14
-
15
-
16
- def load_dataset(
17
- data: Any,
18
- data_args: Optional[dict[str, Any]],
19
- processor: Optional[Union[str, Path, PreTrainedTokenizerBase]],
20
- processor_args: Optional[dict[str, Any]],
21
- random_seed: int = 42,
22
- split_pref_order: Optional[list[str]] = None,
23
- ) -> tuple[Union[Dataset, IterableDataset], dict[ColumnInputTypes, str]]:
24
- creators = [
25
- InMemoryDatasetCreator,
26
- SyntheticDatasetCreator,
27
- FileDatasetCreator,
28
- HFDatasetsCreator,
29
- ]
30
-
31
- for creator in creators:
32
- if creator.is_supported(data, data_args):
33
- return creator.create(
34
- data,
35
- data_args,
36
- processor,
37
- processor_args,
38
- random_seed,
39
- split_pref_order,
40
- )
41
-
42
- raise ValueError(f"Unsupported data type: {type(data)} given for {data}. ")
guidellm/dataset/file.py DELETED
@@ -1,92 +0,0 @@
1
- from pathlib import Path
2
- from typing import Any, Optional, Union
3
-
4
- import pandas as pd # type: ignore[import]
5
- from datasets import (
6
- Dataset,
7
- DatasetDict,
8
- IterableDataset,
9
- IterableDatasetDict,
10
- load_dataset,
11
- )
12
- from transformers import PreTrainedTokenizerBase # type: ignore[import]
13
-
14
- from guidellm.dataset.creator import DatasetCreator
15
-
16
- __all__ = ["FileDatasetCreator"]
17
-
18
-
19
- class FileDatasetCreator(DatasetCreator):
20
- SUPPORTED_TYPES = {
21
- ".txt",
22
- ".text",
23
- ".csv",
24
- ".json",
25
- ".jsonl",
26
- ".parquet",
27
- ".arrow",
28
- ".hdf5",
29
- ".tar",
30
- }
31
-
32
- @classmethod
33
- def is_supported(cls, data: Any, data_args: Optional[dict[str, Any]]) -> bool: # noqa: ARG003
34
- if isinstance(data, (str, Path)) and (path := Path(data)).exists():
35
- # local folder or py file, assume supported
36
- return path.suffix.lower() in cls.SUPPORTED_TYPES
37
-
38
- return False
39
-
40
- @classmethod
41
- def handle_create(
42
- cls,
43
- data: Any,
44
- data_args: Optional[dict[str, Any]],
45
- processor: Optional[Union[str, Path, PreTrainedTokenizerBase]], # noqa: ARG003
46
- processor_args: Optional[dict[str, Any]], # noqa: ARG003
47
- random_seed: int, # noqa: ARG003
48
- ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
49
- if not isinstance(data, (str, Path)):
50
- raise ValueError(f"Unsupported data type: {type(data)} given for {data}. ")
51
-
52
- path = Path(data)
53
- if not path.exists():
54
- raise FileNotFoundError(f"File not found: {path}")
55
-
56
- if not path.is_file():
57
- raise ValueError(f"Unsupported data type: {path} given for {path}. ")
58
-
59
- if path.suffix.lower() not in cls.SUPPORTED_TYPES:
60
- raise ValueError(f"Unsupported file type: {path.suffix} given for {path}. ")
61
-
62
- return cls.load_dataset(path, data_args)
63
-
64
- @classmethod
65
- def load_dataset(
66
- cls, path: Path, data_args: Optional[dict[str, Any]]
67
- ) -> Union[Dataset, IterableDataset]:
68
- if path.suffix.lower() in {".txt", ".text"}:
69
- with path.open("r") as file:
70
- items = file.readlines()
71
-
72
- dataset = Dataset.from_dict({"text": items}, **(data_args or {}))
73
- elif path.suffix.lower() == ".csv":
74
- dataset = load_dataset("csv", data_files=str(path), **(data_args or {}))
75
- elif path.suffix.lower() in {".json", ".jsonl"}:
76
- dataset = load_dataset("json", data_files=str(path), **(data_args or {}))
77
- elif path.suffix.lower() == ".parquet":
78
- dataset = load_dataset("parquet", data_files=str(path), **(data_args or {}))
79
- elif path.suffix.lower() == ".arrow":
80
- dataset = load_dataset("arrow", data_files=str(path), **(data_args or {}))
81
- elif path.suffix.lower() == ".hdf5":
82
- dataset = Dataset.from_pandas(pd.read_hdf(str(path)), **(data_args or {}))
83
- elif path.suffix.lower() == ".db":
84
- dataset = Dataset.from_sql(con=str(path), **(data_args or {}))
85
- elif path.suffix.lower() == ".tar":
86
- dataset = load_dataset(
87
- "webdataset", data_files=str(path), **(data_args or {})
88
- )
89
- else:
90
- raise ValueError(f"Unsupported file type: {path.suffix} given for {path}. ")
91
-
92
- return dataset
@@ -1,62 +0,0 @@
1
- from pathlib import Path
2
- from typing import Any, Optional, Union
3
-
4
- from datasets import (
5
- Dataset,
6
- DatasetDict,
7
- IterableDataset,
8
- IterableDatasetDict,
9
- get_dataset_config_info,
10
- load_dataset,
11
- )
12
- from transformers import PreTrainedTokenizerBase # type: ignore[import]
13
-
14
- from guidellm.dataset.creator import DatasetCreator
15
-
16
- __all__ = ["HFDatasetsCreator"]
17
-
18
-
19
- class HFDatasetsCreator(DatasetCreator):
20
- @classmethod
21
- def is_supported(cls, data: Any, data_args: Optional[dict[str, Any]]) -> bool: # noqa: ARG003
22
- if isinstance(
23
- data, (Dataset, DatasetDict, IterableDataset, IterableDatasetDict)
24
- ):
25
- # base type is supported
26
- return True
27
-
28
- if isinstance(data, (str, Path)) and (path := Path(data)).exists():
29
- # local folder or py file, assume supported
30
- return path.is_dir() or path.suffix == ".py"
31
-
32
- if isinstance(data, (str, Path)):
33
- try:
34
- # try to load dataset
35
- return get_dataset_config_info(data) is not None
36
- except Exception: # noqa: BLE001, S110
37
- pass
38
-
39
- return False
40
-
41
- @classmethod
42
- def handle_create(
43
- cls,
44
- data: Any,
45
- data_args: Optional[dict[str, Any]],
46
- processor: Optional[Union[str, Path, PreTrainedTokenizerBase]], # noqa: ARG003
47
- processor_args: Optional[dict[str, Any]], # noqa: ARG003
48
- random_seed: int, # noqa: ARG003
49
- ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
50
- if isinstance(data, (str, Path)):
51
- data = load_dataset(data, **(data_args or {}))
52
- elif data_args:
53
- raise ValueError(
54
- f"data_args should not be provided when data is a {type(data)}"
55
- )
56
-
57
- if isinstance(
58
- data, (Dataset, DatasetDict, IterableDataset, IterableDatasetDict)
59
- ):
60
- return data
61
-
62
- raise ValueError(f"Unsupported data type: {type(data)} given for {data}. ")
@@ -1,132 +0,0 @@
1
- from collections.abc import Iterable
2
- from pathlib import Path
3
- from typing import Any, Optional, Union
4
-
5
- from datasets import (
6
- Dataset,
7
- DatasetDict,
8
- IterableDataset,
9
- IterableDatasetDict,
10
- )
11
- from transformers import PreTrainedTokenizerBase # type: ignore[import]
12
-
13
- from guidellm.dataset.creator import DatasetCreator
14
-
15
- __all__ = ["InMemoryDatasetCreator"]
16
-
17
-
18
- class InMemoryDatasetCreator(DatasetCreator):
19
- @classmethod
20
- def is_supported(cls, data: Any, data_args: Optional[dict[str, Any]]) -> bool: # noqa: ARG003
21
- return isinstance(data, Iterable) and not isinstance(data, str)
22
-
23
- @classmethod
24
- def handle_create(
25
- cls,
26
- data: Any,
27
- data_args: Optional[dict[str, Any]],
28
- processor: Optional[Union[str, Path, PreTrainedTokenizerBase]], # noqa: ARG003
29
- processor_args: Optional[dict[str, Any]], # noqa: ARG003
30
- random_seed: int, # noqa: ARG003
31
- ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
32
- if not isinstance(data, Iterable):
33
- raise TypeError(
34
- f"Unsupported data format. Expected Iterable[Any], got {type(data)}"
35
- )
36
-
37
- if not data:
38
- raise ValueError("Data is empty")
39
-
40
- if isinstance(data, dict):
41
- # assume data is a dictionary of columns and values: {"c1": ["i1", "i2"]}
42
- data_dict = cls.format_data_dict(data)
43
- elif isinstance(data[0], dict): # type: ignore[index]
44
- # assume data is a list of dictionaries: [{"c1": "i1"}, {"c1": "i2"}]
45
- data_dict = cls.format_data_iterable_dicts(data)
46
- else:
47
- # assume data is a list of items with no columns: ["i1", "i2"]
48
- data_dict = cls.format_data_iterable_values(data)
49
-
50
- return Dataset.from_dict(data_dict, **(data_args or {}))
51
-
52
- @classmethod
53
- def format_data_dict(cls, data: dict[Any, Any]) -> dict[str, Any]:
54
- if not isinstance(data, dict):
55
- raise TypeError(
56
- f"Unsupported data format. Expected Dict[str, Iterable[Any]], "
57
- f"got {type(data)}"
58
- )
59
-
60
- if not all(
61
- isinstance(key, str) and isinstance(val, Iterable)
62
- for key, val in data.items()
63
- ):
64
- raise TypeError(
65
- "Unsupported data format. Expected Dict[str, Iterable[Any]], "
66
- f"got {type(data)}"
67
- )
68
-
69
- samples = len(list(data.values())[0])
70
- if not all(len(val) == samples for val in data.values()):
71
- raise ValueError(
72
- "Unsupported data format. Not all columns have the same number samples "
73
- f"for {data}"
74
- )
75
-
76
- return data
77
-
78
- @classmethod
79
- def format_data_iterable_dicts(
80
- cls, data: Iterable[dict[Any, Any]]
81
- ) -> dict[str, Any]:
82
- if not isinstance(data, Iterable):
83
- raise TypeError(
84
- f"Unsupported data format. Expected Iterable[Dict[str, Any]], "
85
- f"got {type(data)}"
86
- )
87
-
88
- if not all(isinstance(item, dict) for item in data):
89
- raise TypeError(
90
- f"Unsupported data format. Expected Iterable[Dict[str, Any]], "
91
- f"got {type(data)}"
92
- )
93
-
94
- if not all(isinstance(key, str) for key in data[0]): # type: ignore[index]
95
- raise TypeError(
96
- "Unsupported data format. Expected Dict[str, Any], "
97
- f"but one of the items had a non string column for {data}"
98
- )
99
-
100
- columns = list(data[0].keys()) # type: ignore[index]
101
- if not all(
102
- len(item) == len(columns) and all(key in item for key in columns)
103
- for item in data
104
- ):
105
- raise ValueError(
106
- "Unsupported data format. Not all items have the same columns "
107
- f"for {data}"
108
- )
109
-
110
- data_dict: dict[str, Any] = {key: [] for key in columns}
111
- for item in data:
112
- for key, value in item.items():
113
- data_dict[key].append(value)
114
-
115
- return data_dict
116
-
117
- @classmethod
118
- def format_data_iterable_values(cls, data: Iterable[Any]) -> dict[str, Any]:
119
- if not isinstance(data, Iterable):
120
- raise TypeError(
121
- f"Unsupported data format. Expected Iterable[Iterable[Any]], "
122
- f"got {type(data)}"
123
- )
124
-
125
- first_item = next(iter(data), None)
126
- first_type = type(first_item)
127
- if not all(isinstance(item, first_type) for item in data):
128
- raise TypeError(
129
- f"Unsupported data format. Not all types are the same for {data}"
130
- )
131
-
132
- return {"data": list(data)}