data-designer 0.3.8rc1__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data_designer/cli/commands/__init__.py +1 -1
- data_designer/interface/__init__.py +21 -1
- data_designer/{_version.py → interface/_version.py} +2 -2
- data_designer/interface/data_designer.py +8 -11
- {data_designer-0.3.8rc1.dist-info → data_designer-0.4.0.dist-info}/METADATA +10 -42
- data_designer-0.4.0.dist-info/RECORD +39 -0
- data_designer/__init__.py +0 -17
- data_designer/config/__init__.py +0 -2
- data_designer/config/analysis/__init__.py +0 -2
- data_designer/config/analysis/column_profilers.py +0 -159
- data_designer/config/analysis/column_statistics.py +0 -421
- data_designer/config/analysis/dataset_profiler.py +0 -84
- data_designer/config/analysis/utils/errors.py +0 -10
- data_designer/config/analysis/utils/reporting.py +0 -192
- data_designer/config/base.py +0 -69
- data_designer/config/column_configs.py +0 -470
- data_designer/config/column_types.py +0 -141
- data_designer/config/config_builder.py +0 -595
- data_designer/config/data_designer_config.py +0 -40
- data_designer/config/dataset_builders.py +0 -13
- data_designer/config/dataset_metadata.py +0 -18
- data_designer/config/default_model_settings.py +0 -121
- data_designer/config/errors.py +0 -24
- data_designer/config/exports.py +0 -145
- data_designer/config/interface.py +0 -55
- data_designer/config/models.py +0 -455
- data_designer/config/preview_results.py +0 -41
- data_designer/config/processors.py +0 -148
- data_designer/config/run_config.py +0 -48
- data_designer/config/sampler_constraints.py +0 -52
- data_designer/config/sampler_params.py +0 -639
- data_designer/config/seed.py +0 -116
- data_designer/config/seed_source.py +0 -84
- data_designer/config/seed_source_types.py +0 -19
- data_designer/config/utils/code_lang.py +0 -82
- data_designer/config/utils/constants.py +0 -363
- data_designer/config/utils/errors.py +0 -21
- data_designer/config/utils/info.py +0 -94
- data_designer/config/utils/io_helpers.py +0 -258
- data_designer/config/utils/misc.py +0 -78
- data_designer/config/utils/numerical_helpers.py +0 -30
- data_designer/config/utils/type_helpers.py +0 -106
- data_designer/config/utils/visualization.py +0 -482
- data_designer/config/validator_params.py +0 -94
- data_designer/engine/__init__.py +0 -2
- data_designer/engine/analysis/column_profilers/base.py +0 -49
- data_designer/engine/analysis/column_profilers/judge_score_profiler.py +0 -153
- data_designer/engine/analysis/column_profilers/registry.py +0 -22
- data_designer/engine/analysis/column_statistics.py +0 -145
- data_designer/engine/analysis/dataset_profiler.py +0 -149
- data_designer/engine/analysis/errors.py +0 -9
- data_designer/engine/analysis/utils/column_statistics_calculations.py +0 -234
- data_designer/engine/analysis/utils/judge_score_processing.py +0 -132
- data_designer/engine/column_generators/__init__.py +0 -2
- data_designer/engine/column_generators/generators/__init__.py +0 -2
- data_designer/engine/column_generators/generators/base.py +0 -122
- data_designer/engine/column_generators/generators/embedding.py +0 -35
- data_designer/engine/column_generators/generators/expression.py +0 -55
- data_designer/engine/column_generators/generators/llm_completion.py +0 -113
- data_designer/engine/column_generators/generators/samplers.py +0 -69
- data_designer/engine/column_generators/generators/seed_dataset.py +0 -144
- data_designer/engine/column_generators/generators/validation.py +0 -140
- data_designer/engine/column_generators/registry.py +0 -60
- data_designer/engine/column_generators/utils/errors.py +0 -15
- data_designer/engine/column_generators/utils/generator_classification.py +0 -43
- data_designer/engine/column_generators/utils/judge_score_factory.py +0 -58
- data_designer/engine/column_generators/utils/prompt_renderer.py +0 -100
- data_designer/engine/compiler.py +0 -97
- data_designer/engine/configurable_task.py +0 -71
- data_designer/engine/dataset_builders/artifact_storage.py +0 -283
- data_designer/engine/dataset_builders/column_wise_builder.py +0 -338
- data_designer/engine/dataset_builders/errors.py +0 -15
- data_designer/engine/dataset_builders/multi_column_configs.py +0 -46
- data_designer/engine/dataset_builders/utils/__init__.py +0 -2
- data_designer/engine/dataset_builders/utils/concurrency.py +0 -215
- data_designer/engine/dataset_builders/utils/config_compiler.py +0 -62
- data_designer/engine/dataset_builders/utils/dag.py +0 -62
- data_designer/engine/dataset_builders/utils/dataset_batch_manager.py +0 -200
- data_designer/engine/dataset_builders/utils/errors.py +0 -15
- data_designer/engine/errors.py +0 -51
- data_designer/engine/model_provider.py +0 -77
- data_designer/engine/models/__init__.py +0 -2
- data_designer/engine/models/errors.py +0 -300
- data_designer/engine/models/facade.py +0 -287
- data_designer/engine/models/factory.py +0 -42
- data_designer/engine/models/litellm_overrides.py +0 -179
- data_designer/engine/models/parsers/__init__.py +0 -2
- data_designer/engine/models/parsers/errors.py +0 -34
- data_designer/engine/models/parsers/parser.py +0 -235
- data_designer/engine/models/parsers/postprocessors.py +0 -93
- data_designer/engine/models/parsers/tag_parsers.py +0 -62
- data_designer/engine/models/parsers/types.py +0 -84
- data_designer/engine/models/recipes/base.py +0 -81
- data_designer/engine/models/recipes/response_recipes.py +0 -293
- data_designer/engine/models/registry.py +0 -146
- data_designer/engine/models/telemetry.py +0 -359
- data_designer/engine/models/usage.py +0 -73
- data_designer/engine/models/utils.py +0 -38
- data_designer/engine/processing/ginja/__init__.py +0 -2
- data_designer/engine/processing/ginja/ast.py +0 -65
- data_designer/engine/processing/ginja/environment.py +0 -463
- data_designer/engine/processing/ginja/exceptions.py +0 -56
- data_designer/engine/processing/ginja/record.py +0 -32
- data_designer/engine/processing/gsonschema/__init__.py +0 -2
- data_designer/engine/processing/gsonschema/exceptions.py +0 -15
- data_designer/engine/processing/gsonschema/schema_transformers.py +0 -83
- data_designer/engine/processing/gsonschema/types.py +0 -10
- data_designer/engine/processing/gsonschema/validators.py +0 -202
- data_designer/engine/processing/processors/base.py +0 -13
- data_designer/engine/processing/processors/drop_columns.py +0 -42
- data_designer/engine/processing/processors/registry.py +0 -25
- data_designer/engine/processing/processors/schema_transform.py +0 -49
- data_designer/engine/processing/utils.py +0 -169
- data_designer/engine/registry/base.py +0 -99
- data_designer/engine/registry/data_designer_registry.py +0 -39
- data_designer/engine/registry/errors.py +0 -12
- data_designer/engine/resources/managed_dataset_generator.py +0 -39
- data_designer/engine/resources/managed_dataset_repository.py +0 -197
- data_designer/engine/resources/managed_storage.py +0 -65
- data_designer/engine/resources/resource_provider.py +0 -77
- data_designer/engine/resources/seed_reader.py +0 -154
- data_designer/engine/sampling_gen/column.py +0 -91
- data_designer/engine/sampling_gen/constraints.py +0 -100
- data_designer/engine/sampling_gen/data_sources/base.py +0 -217
- data_designer/engine/sampling_gen/data_sources/errors.py +0 -12
- data_designer/engine/sampling_gen/data_sources/sources.py +0 -347
- data_designer/engine/sampling_gen/entities/__init__.py +0 -2
- data_designer/engine/sampling_gen/entities/assets/zip_area_code_map.parquet +0 -0
- data_designer/engine/sampling_gen/entities/dataset_based_person_fields.py +0 -86
- data_designer/engine/sampling_gen/entities/email_address_utils.py +0 -171
- data_designer/engine/sampling_gen/entities/errors.py +0 -10
- data_designer/engine/sampling_gen/entities/national_id_utils.py +0 -102
- data_designer/engine/sampling_gen/entities/person.py +0 -144
- data_designer/engine/sampling_gen/entities/phone_number.py +0 -128
- data_designer/engine/sampling_gen/errors.py +0 -26
- data_designer/engine/sampling_gen/generator.py +0 -122
- data_designer/engine/sampling_gen/jinja_utils.py +0 -64
- data_designer/engine/sampling_gen/people_gen.py +0 -199
- data_designer/engine/sampling_gen/person_constants.py +0 -56
- data_designer/engine/sampling_gen/schema.py +0 -147
- data_designer/engine/sampling_gen/schema_builder.py +0 -61
- data_designer/engine/sampling_gen/utils.py +0 -46
- data_designer/engine/secret_resolver.py +0 -82
- data_designer/engine/validation.py +0 -367
- data_designer/engine/validators/__init__.py +0 -19
- data_designer/engine/validators/base.py +0 -38
- data_designer/engine/validators/local_callable.py +0 -39
- data_designer/engine/validators/python.py +0 -254
- data_designer/engine/validators/remote.py +0 -89
- data_designer/engine/validators/sql.py +0 -65
- data_designer/errors.py +0 -7
- data_designer/essentials/__init__.py +0 -33
- data_designer/lazy_heavy_imports.py +0 -54
- data_designer/logging.py +0 -163
- data_designer/plugin_manager.py +0 -78
- data_designer/plugins/__init__.py +0 -8
- data_designer/plugins/errors.py +0 -15
- data_designer/plugins/plugin.py +0 -141
- data_designer/plugins/registry.py +0 -88
- data_designer/plugins/testing/__init__.py +0 -10
- data_designer/plugins/testing/stubs.py +0 -116
- data_designer/plugins/testing/utils.py +0 -20
- data_designer-0.3.8rc1.dist-info/RECORD +0 -196
- data_designer-0.3.8rc1.dist-info/licenses/LICENSE +0 -201
- {data_designer-0.3.8rc1.dist-info → data_designer-0.4.0.dist-info}/WHEEL +0 -0
- {data_designer-0.3.8rc1.dist-info → data_designer-0.4.0.dist-info}/entry_points.txt +0 -0
|
@@ -1,338 +0,0 @@
|
|
|
1
|
-
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
from __future__ import annotations
|
|
5
|
-
|
|
6
|
-
import functools
|
|
7
|
-
import importlib.metadata
|
|
8
|
-
import json
|
|
9
|
-
import logging
|
|
10
|
-
import time
|
|
11
|
-
import uuid
|
|
12
|
-
from pathlib import Path
|
|
13
|
-
from typing import TYPE_CHECKING, Callable
|
|
14
|
-
|
|
15
|
-
from data_designer.config.column_types import ColumnConfigT
|
|
16
|
-
from data_designer.config.config_builder import BuilderConfig
|
|
17
|
-
from data_designer.config.data_designer_config import DataDesignerConfig
|
|
18
|
-
from data_designer.config.dataset_builders import BuildStage
|
|
19
|
-
from data_designer.config.processors import (
|
|
20
|
-
DropColumnsProcessorConfig,
|
|
21
|
-
ProcessorConfig,
|
|
22
|
-
ProcessorType,
|
|
23
|
-
)
|
|
24
|
-
from data_designer.engine.column_generators.generators.base import (
|
|
25
|
-
ColumnGenerator,
|
|
26
|
-
ColumnGeneratorWithModel,
|
|
27
|
-
GenerationStrategy,
|
|
28
|
-
)
|
|
29
|
-
from data_designer.engine.column_generators.utils.generator_classification import column_type_is_model_generated
|
|
30
|
-
from data_designer.engine.compiler import compile_data_designer_config
|
|
31
|
-
from data_designer.engine.dataset_builders.artifact_storage import SDG_CONFIG_FILENAME, ArtifactStorage
|
|
32
|
-
from data_designer.engine.dataset_builders.errors import DatasetGenerationError, DatasetProcessingError
|
|
33
|
-
from data_designer.engine.dataset_builders.multi_column_configs import MultiColumnConfig
|
|
34
|
-
from data_designer.engine.dataset_builders.utils.concurrency import (
|
|
35
|
-
MAX_CONCURRENCY_PER_NON_LLM_GENERATOR,
|
|
36
|
-
ConcurrentThreadExecutor,
|
|
37
|
-
)
|
|
38
|
-
from data_designer.engine.dataset_builders.utils.config_compiler import compile_dataset_builder_column_configs
|
|
39
|
-
from data_designer.engine.dataset_builders.utils.dataset_batch_manager import DatasetBatchManager
|
|
40
|
-
from data_designer.engine.models.telemetry import InferenceEvent, NemoSourceEnum, TaskStatusEnum, TelemetryHandler
|
|
41
|
-
from data_designer.engine.processing.processors.base import Processor
|
|
42
|
-
from data_designer.engine.processing.processors.drop_columns import DropColumnsProcessor
|
|
43
|
-
from data_designer.engine.registry.data_designer_registry import DataDesignerRegistry
|
|
44
|
-
from data_designer.engine.resources.resource_provider import ResourceProvider
|
|
45
|
-
from data_designer.lazy_heavy_imports import pd
|
|
46
|
-
|
|
47
|
-
if TYPE_CHECKING:
|
|
48
|
-
import pandas as pd
|
|
49
|
-
|
|
50
|
-
from data_designer.engine.column_generators.generators.base import ColumnGeneratorWithModelRegistry
|
|
51
|
-
from data_designer.engine.models.usage import ModelUsageStats
|
|
52
|
-
|
|
53
|
-
logger = logging.getLogger(__name__)
|
|
54
|
-
|
|
55
|
-
_CLIENT_VERSION: str = importlib.metadata.version("data_designer")
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
class ColumnWiseDatasetBuilder:
|
|
59
|
-
def __init__(
|
|
60
|
-
self,
|
|
61
|
-
data_designer_config: DataDesignerConfig,
|
|
62
|
-
resource_provider: ResourceProvider,
|
|
63
|
-
registry: DataDesignerRegistry | None = None,
|
|
64
|
-
):
|
|
65
|
-
self.batch_manager = DatasetBatchManager(resource_provider.artifact_storage)
|
|
66
|
-
self._resource_provider = resource_provider
|
|
67
|
-
self._records_to_drop: set[int] = set()
|
|
68
|
-
self._registry = registry or DataDesignerRegistry()
|
|
69
|
-
|
|
70
|
-
self._data_designer_config = compile_data_designer_config(data_designer_config, resource_provider)
|
|
71
|
-
self._column_configs = compile_dataset_builder_column_configs(self._data_designer_config)
|
|
72
|
-
self._processors: dict[BuildStage, list[Processor]] = self._initialize_processors(
|
|
73
|
-
self._data_designer_config.processors or []
|
|
74
|
-
)
|
|
75
|
-
self._validate_column_configs()
|
|
76
|
-
|
|
77
|
-
@property
|
|
78
|
-
def artifact_storage(self) -> ArtifactStorage:
|
|
79
|
-
return self._resource_provider.artifact_storage
|
|
80
|
-
|
|
81
|
-
@functools.cached_property
|
|
82
|
-
def single_column_configs(self) -> list[ColumnConfigT]:
|
|
83
|
-
configs = []
|
|
84
|
-
for config in self._column_configs:
|
|
85
|
-
if isinstance(config, MultiColumnConfig):
|
|
86
|
-
configs.extend(config.columns)
|
|
87
|
-
else:
|
|
88
|
-
configs.append(config)
|
|
89
|
-
return configs
|
|
90
|
-
|
|
91
|
-
@functools.cached_property
|
|
92
|
-
def llm_generated_column_configs(self) -> list[ColumnConfigT]:
|
|
93
|
-
return [config for config in self.single_column_configs if column_type_is_model_generated(config.column_type)]
|
|
94
|
-
|
|
95
|
-
def build(
|
|
96
|
-
self,
|
|
97
|
-
*,
|
|
98
|
-
num_records: int,
|
|
99
|
-
on_batch_complete: Callable[[Path], None] | None = None,
|
|
100
|
-
) -> Path:
|
|
101
|
-
self._run_model_health_check_if_needed()
|
|
102
|
-
self._write_builder_config()
|
|
103
|
-
generators = self._initialize_generators()
|
|
104
|
-
start_time = time.perf_counter()
|
|
105
|
-
group_id = uuid.uuid4().hex
|
|
106
|
-
|
|
107
|
-
buffer_size = self._resource_provider.run_config.buffer_size
|
|
108
|
-
self.batch_manager.start(num_records=num_records, buffer_size=buffer_size)
|
|
109
|
-
for batch_idx in range(self.batch_manager.num_batches):
|
|
110
|
-
logger.info(f"⏳ Processing batch {batch_idx + 1} of {self.batch_manager.num_batches}")
|
|
111
|
-
self._run_batch(generators, batch_mode="batch", group_id=group_id)
|
|
112
|
-
df_batch = self._run_processors(
|
|
113
|
-
stage=BuildStage.POST_BATCH,
|
|
114
|
-
dataframe=self.batch_manager.get_current_batch(as_dataframe=True),
|
|
115
|
-
current_batch_number=batch_idx,
|
|
116
|
-
)
|
|
117
|
-
self._write_processed_batch(df_batch)
|
|
118
|
-
self.batch_manager.finish_batch(on_batch_complete)
|
|
119
|
-
self.batch_manager.finish()
|
|
120
|
-
|
|
121
|
-
model_usage_stats = self._resource_provider.model_registry.get_model_usage_stats(
|
|
122
|
-
time.perf_counter() - start_time
|
|
123
|
-
)
|
|
124
|
-
logger.info(f"📊 Model usage summary:\n{json.dumps(model_usage_stats, indent=4)}")
|
|
125
|
-
|
|
126
|
-
return self.artifact_storage.final_dataset_path
|
|
127
|
-
|
|
128
|
-
def build_preview(self, *, num_records: int) -> pd.DataFrame:
|
|
129
|
-
self._run_model_health_check_if_needed()
|
|
130
|
-
|
|
131
|
-
generators = self._initialize_generators()
|
|
132
|
-
group_id = uuid.uuid4().hex
|
|
133
|
-
start_time = time.perf_counter()
|
|
134
|
-
self.batch_manager.start(num_records=num_records, buffer_size=num_records)
|
|
135
|
-
self._run_batch(generators, batch_mode="preview", save_partial_results=False, group_id=group_id)
|
|
136
|
-
dataset = self.batch_manager.get_current_batch(as_dataframe=True)
|
|
137
|
-
self.batch_manager.reset()
|
|
138
|
-
|
|
139
|
-
model_usage_stats = self._resource_provider.model_registry.get_model_usage_stats(
|
|
140
|
-
time.perf_counter() - start_time
|
|
141
|
-
)
|
|
142
|
-
logger.info(f"📊 Model usage summary:\n{json.dumps(model_usage_stats, indent=4)}")
|
|
143
|
-
|
|
144
|
-
return dataset
|
|
145
|
-
|
|
146
|
-
def process_preview(self, dataset: pd.DataFrame) -> pd.DataFrame:
|
|
147
|
-
return self._run_processors(
|
|
148
|
-
stage=BuildStage.POST_BATCH,
|
|
149
|
-
dataframe=dataset.copy(),
|
|
150
|
-
current_batch_number=None, # preview mode does not have a batch number
|
|
151
|
-
)
|
|
152
|
-
|
|
153
|
-
def _initialize_generators(self) -> list[ColumnGenerator]:
|
|
154
|
-
return [
|
|
155
|
-
self._registry.column_generators.get_for_config_type(type(config))(
|
|
156
|
-
config=config, resource_provider=self._resource_provider
|
|
157
|
-
)
|
|
158
|
-
for config in self._column_configs
|
|
159
|
-
]
|
|
160
|
-
|
|
161
|
-
def _write_builder_config(self) -> None:
|
|
162
|
-
self.artifact_storage.mkdir_if_needed(self.artifact_storage.base_dataset_path)
|
|
163
|
-
BuilderConfig(data_designer=self._data_designer_config).to_json(
|
|
164
|
-
self.artifact_storage.base_dataset_path / SDG_CONFIG_FILENAME
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
def _run_batch(
|
|
168
|
-
self, generators: list[ColumnGenerator], *, batch_mode: str, save_partial_results: bool = True, group_id: str
|
|
169
|
-
) -> None:
|
|
170
|
-
pre_batch_snapshot = self._resource_provider.model_registry.get_model_usage_snapshot()
|
|
171
|
-
for generator in generators:
|
|
172
|
-
generator.log_pre_generation()
|
|
173
|
-
try:
|
|
174
|
-
generation_strategy = generator.get_generation_strategy()
|
|
175
|
-
if generator.can_generate_from_scratch and self.batch_manager.buffer_is_empty:
|
|
176
|
-
self._run_from_scratch_column_generator(generator)
|
|
177
|
-
elif generation_strategy == GenerationStrategy.CELL_BY_CELL:
|
|
178
|
-
self._run_cell_by_cell_generator(generator)
|
|
179
|
-
elif generation_strategy == GenerationStrategy.FULL_COLUMN:
|
|
180
|
-
self._run_full_column_generator(generator)
|
|
181
|
-
else:
|
|
182
|
-
logger.error(f"❌ Unknown generation strategy: {generation_strategy}")
|
|
183
|
-
raise DatasetGenerationError(f"🛑 Unknown generation strategy: {generation_strategy}")
|
|
184
|
-
if save_partial_results:
|
|
185
|
-
self.batch_manager.write()
|
|
186
|
-
except Exception as e:
|
|
187
|
-
column_error_str = (
|
|
188
|
-
f"columns {generator.config.column_names}"
|
|
189
|
-
if hasattr(generator.config, "column_names")
|
|
190
|
-
else f"column {generator.config.name!r}"
|
|
191
|
-
)
|
|
192
|
-
raise DatasetGenerationError(f"🛑 Failed to process {column_error_str}:\n{e}")
|
|
193
|
-
|
|
194
|
-
try:
|
|
195
|
-
usage_deltas = self._resource_provider.model_registry.get_usage_deltas(pre_batch_snapshot)
|
|
196
|
-
self._emit_batch_inference_events(batch_mode, usage_deltas, group_id)
|
|
197
|
-
except Exception:
|
|
198
|
-
pass
|
|
199
|
-
|
|
200
|
-
def _run_from_scratch_column_generator(self, generator: ColumnGenerator) -> None:
|
|
201
|
-
df = generator.generate_from_scratch(self.batch_manager.num_records_batch)
|
|
202
|
-
self.batch_manager.add_records(df.to_dict(orient="records"))
|
|
203
|
-
|
|
204
|
-
def _run_cell_by_cell_generator(self, generator: ColumnGenerator) -> None:
|
|
205
|
-
max_workers = MAX_CONCURRENCY_PER_NON_LLM_GENERATOR
|
|
206
|
-
if isinstance(generator, ColumnGeneratorWithModel):
|
|
207
|
-
max_workers = generator.inference_parameters.max_parallel_requests
|
|
208
|
-
self._fan_out_with_threads(generator, max_workers=max_workers)
|
|
209
|
-
|
|
210
|
-
def _run_full_column_generator(self, generator: ColumnGenerator) -> None:
|
|
211
|
-
df = generator.generate(self.batch_manager.get_current_batch(as_dataframe=True))
|
|
212
|
-
self.batch_manager.update_records(df.to_dict(orient="records"))
|
|
213
|
-
|
|
214
|
-
def _run_model_health_check_if_needed(self) -> bool:
|
|
215
|
-
if any(column_type_is_model_generated(config.column_type) for config in self.single_column_configs):
|
|
216
|
-
self._resource_provider.model_registry.run_health_check(
|
|
217
|
-
list(set(config.model_alias for config in self.llm_generated_column_configs))
|
|
218
|
-
)
|
|
219
|
-
|
|
220
|
-
def _fan_out_with_threads(self, generator: ColumnGeneratorWithModelRegistry, max_workers: int) -> None:
|
|
221
|
-
if generator.get_generation_strategy() != GenerationStrategy.CELL_BY_CELL:
|
|
222
|
-
raise DatasetGenerationError(
|
|
223
|
-
f"Generator {generator.name} is not a {GenerationStrategy.CELL_BY_CELL} "
|
|
224
|
-
"generator so concurrency through threads is not supported."
|
|
225
|
-
)
|
|
226
|
-
|
|
227
|
-
logger.info(
|
|
228
|
-
f"🐙 Processing {generator.config.column_type} column '{generator.config.name}' "
|
|
229
|
-
f"with {max_workers} concurrent workers"
|
|
230
|
-
)
|
|
231
|
-
settings = self._resource_provider.run_config
|
|
232
|
-
with ConcurrentThreadExecutor(
|
|
233
|
-
max_workers=max_workers,
|
|
234
|
-
column_name=generator.config.name,
|
|
235
|
-
result_callback=self._worker_result_callback,
|
|
236
|
-
error_callback=self._worker_error_callback,
|
|
237
|
-
shutdown_error_rate=settings.shutdown_error_rate,
|
|
238
|
-
shutdown_error_window=settings.shutdown_error_window,
|
|
239
|
-
disable_early_shutdown=settings.disable_early_shutdown,
|
|
240
|
-
) as executor:
|
|
241
|
-
for i, record in self.batch_manager.iter_current_batch():
|
|
242
|
-
executor.submit(lambda record: generator.generate(record), record, context={"index": i})
|
|
243
|
-
|
|
244
|
-
if len(self._records_to_drop) > 0:
|
|
245
|
-
self.batch_manager.drop_records(self._records_to_drop)
|
|
246
|
-
self._records_to_drop.clear()
|
|
247
|
-
|
|
248
|
-
def _write_processed_batch(self, dataframe: pd.DataFrame) -> None:
|
|
249
|
-
self.batch_manager.update_records(dataframe.to_dict(orient="records"))
|
|
250
|
-
self.batch_manager.write()
|
|
251
|
-
|
|
252
|
-
def _validate_column_configs(self) -> None:
|
|
253
|
-
if len(self._column_configs) == 0:
|
|
254
|
-
raise DatasetGenerationError("🛑 No column configs provided.")
|
|
255
|
-
|
|
256
|
-
if not self._registry.column_generators.get_for_config_type(
|
|
257
|
-
type(self._column_configs[0])
|
|
258
|
-
).can_generate_from_scratch:
|
|
259
|
-
raise DatasetGenerationError("🛑 The first column config must be a from-scratch column generator.")
|
|
260
|
-
|
|
261
|
-
def _initialize_processors(self, processor_configs: list[ProcessorConfig]) -> dict[BuildStage, list[Processor]]:
|
|
262
|
-
# Check columns marked for drop
|
|
263
|
-
columns_to_drop = [config.name for config in self.single_column_configs if config.drop]
|
|
264
|
-
|
|
265
|
-
processors: dict[BuildStage, list[Processor]] = {stage: [] for stage in BuildStage}
|
|
266
|
-
for config in processor_configs:
|
|
267
|
-
processors[config.build_stage].append(
|
|
268
|
-
self._registry.processors.get_for_config_type(type(config))(
|
|
269
|
-
config=config,
|
|
270
|
-
resource_provider=self._resource_provider,
|
|
271
|
-
)
|
|
272
|
-
)
|
|
273
|
-
|
|
274
|
-
# Manually included "drop columns" processor takes precedence (can e.g., pick stages other than post-batch)
|
|
275
|
-
if config.processor_type == ProcessorType.DROP_COLUMNS:
|
|
276
|
-
for column in config.column_names:
|
|
277
|
-
if column in columns_to_drop:
|
|
278
|
-
columns_to_drop.remove(column)
|
|
279
|
-
|
|
280
|
-
# If there are still columns marked for drop, add the "drop columns" processor to drop them
|
|
281
|
-
if len(columns_to_drop) > 0:
|
|
282
|
-
processors[BuildStage.POST_BATCH].append( # as post-batch by default
|
|
283
|
-
DropColumnsProcessor(
|
|
284
|
-
config=DropColumnsProcessorConfig(
|
|
285
|
-
name="default_drop_columns_processor",
|
|
286
|
-
column_names=columns_to_drop,
|
|
287
|
-
build_stage=BuildStage.POST_BATCH,
|
|
288
|
-
),
|
|
289
|
-
resource_provider=self._resource_provider,
|
|
290
|
-
)
|
|
291
|
-
)
|
|
292
|
-
|
|
293
|
-
return processors
|
|
294
|
-
|
|
295
|
-
def _run_processors(
|
|
296
|
-
self, stage: BuildStage, dataframe: pd.DataFrame, current_batch_number: int | None = None
|
|
297
|
-
) -> pd.DataFrame:
|
|
298
|
-
for processor in self._processors[stage]:
|
|
299
|
-
try:
|
|
300
|
-
dataframe = processor.process(dataframe, current_batch_number=current_batch_number)
|
|
301
|
-
except Exception as e:
|
|
302
|
-
raise DatasetProcessingError(
|
|
303
|
-
f"🛑 Failed to process dataset with processor {processor.name} in stage {stage}: {e}"
|
|
304
|
-
) from e
|
|
305
|
-
return dataframe
|
|
306
|
-
|
|
307
|
-
def _worker_error_callback(self, exc: Exception, *, context: dict | None = None) -> None:
|
|
308
|
-
"""If a worker fails, we can handle the exception here."""
|
|
309
|
-
logger.warning(
|
|
310
|
-
f"⚠️ Generation for record at index {context['index']} failed. "
|
|
311
|
-
f"Will omit this record from the dataset.\n{exc}"
|
|
312
|
-
)
|
|
313
|
-
self._records_to_drop.add(context["index"])
|
|
314
|
-
|
|
315
|
-
def _worker_result_callback(self, result: dict, *, context: dict | None = None) -> None:
|
|
316
|
-
self.batch_manager.update_record(context["index"], result)
|
|
317
|
-
|
|
318
|
-
def _emit_batch_inference_events(
|
|
319
|
-
self, batch_mode: str, usage_deltas: dict[str, ModelUsageStats], group_id: str
|
|
320
|
-
) -> None:
|
|
321
|
-
if not usage_deltas:
|
|
322
|
-
return
|
|
323
|
-
|
|
324
|
-
events = [
|
|
325
|
-
InferenceEvent(
|
|
326
|
-
nemo_source=NemoSourceEnum.DATADESIGNER,
|
|
327
|
-
task=batch_mode,
|
|
328
|
-
task_status=TaskStatusEnum.SUCCESS,
|
|
329
|
-
model=model_name,
|
|
330
|
-
input_tokens=delta.token_usage.input_tokens,
|
|
331
|
-
output_tokens=delta.token_usage.output_tokens,
|
|
332
|
-
)
|
|
333
|
-
for model_name, delta in usage_deltas.items()
|
|
334
|
-
]
|
|
335
|
-
|
|
336
|
-
with TelemetryHandler(source_client_version=_CLIENT_VERSION, session_id=group_id) as telemetry_handler:
|
|
337
|
-
for event in events:
|
|
338
|
-
telemetry_handler.enqueue(event)
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
from __future__ import annotations
|
|
5
|
-
|
|
6
|
-
from data_designer.engine.errors import DataDesignerError
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class ArtifactStorageError(DataDesignerError): ...
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class DatasetGenerationError(DataDesignerError): ...
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class DatasetProcessingError(DataDesignerError): ...
|
|
@@ -1,46 +0,0 @@
|
|
|
1
|
-
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
from __future__ import annotations
|
|
5
|
-
|
|
6
|
-
from abc import ABC
|
|
7
|
-
from typing import TypeAlias
|
|
8
|
-
|
|
9
|
-
from pydantic import Field, field_validator
|
|
10
|
-
|
|
11
|
-
from data_designer.config.base import ConfigBase
|
|
12
|
-
from data_designer.config.column_configs import SamplerColumnConfig, SeedDatasetColumnConfig, SingleColumnConfig
|
|
13
|
-
from data_designer.config.column_types import ColumnConfigT, DataDesignerColumnType
|
|
14
|
-
from data_designer.config.sampler_constraints import ColumnConstraintT
|
|
15
|
-
from data_designer.config.seed import SeedConfig
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class MultiColumnConfig(ConfigBase, ABC):
|
|
19
|
-
columns: list[SingleColumnConfig] = Field(..., min_length=1)
|
|
20
|
-
|
|
21
|
-
@property
|
|
22
|
-
def column_names(self) -> list[str]:
|
|
23
|
-
return [c.name for c in self.columns]
|
|
24
|
-
|
|
25
|
-
@property
|
|
26
|
-
def column_type(self) -> DataDesignerColumnType:
|
|
27
|
-
return self.columns[0].column_type
|
|
28
|
-
|
|
29
|
-
@field_validator("columns", mode="after")
|
|
30
|
-
def validate_column_types_are_the_same(cls, v: list[SingleColumnConfig]) -> list[SingleColumnConfig]:
|
|
31
|
-
if len(set([c.column_type for c in v])) != 1:
|
|
32
|
-
raise ValueError("All column types must be of the same type")
|
|
33
|
-
return v
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class SamplerMultiColumnConfig(MultiColumnConfig):
|
|
37
|
-
columns: list[SamplerColumnConfig]
|
|
38
|
-
constraints: list[ColumnConstraintT] = []
|
|
39
|
-
max_rejections_factor: int = 5
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
class SeedDatasetMultiColumnConfig(SeedConfig, MultiColumnConfig):
|
|
43
|
-
columns: list[SeedDatasetColumnConfig]
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
DatasetBuilderColumnConfigT: TypeAlias = ColumnConfigT | SeedDatasetMultiColumnConfig | SamplerMultiColumnConfig
|
|
@@ -1,215 +0,0 @@
|
|
|
1
|
-
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
from __future__ import annotations
|
|
5
|
-
|
|
6
|
-
import contextvars
|
|
7
|
-
import json
|
|
8
|
-
import logging
|
|
9
|
-
from concurrent.futures import Future, ThreadPoolExecutor
|
|
10
|
-
from threading import Lock, Semaphore
|
|
11
|
-
from typing import Any, Protocol
|
|
12
|
-
|
|
13
|
-
from pydantic import BaseModel, Field
|
|
14
|
-
|
|
15
|
-
from data_designer.engine.errors import DataDesignerRuntimeError, ErrorTrap
|
|
16
|
-
|
|
17
|
-
logger = logging.getLogger(__name__)
|
|
18
|
-
|
|
19
|
-
# Constants
|
|
20
|
-
MAX_CONCURRENCY_PER_NON_LLM_GENERATOR = 4
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class ExecutorResults(BaseModel):
|
|
24
|
-
failure_threshold: float = 0.0 # Error rate threshold
|
|
25
|
-
completed_count: int = 0 # How many tasks/jobs completed
|
|
26
|
-
success_count: int = 0 # How many tasks/jobs were successful
|
|
27
|
-
early_shutdown: bool = False # Did we shutdown early due to errors?
|
|
28
|
-
error_trap: ErrorTrap = Field(default_factory=ErrorTrap)
|
|
29
|
-
|
|
30
|
-
@property
|
|
31
|
-
def summary(self) -> dict:
|
|
32
|
-
summary = self.model_dump(exclude={"error_trap"})
|
|
33
|
-
summary |= self.error_trap.model_dump()
|
|
34
|
-
return summary
|
|
35
|
-
|
|
36
|
-
def get_error_rate(self, window: int) -> float:
|
|
37
|
-
# We don't start actually tracking until our minimum window size is met
|
|
38
|
-
if self.completed_count < window:
|
|
39
|
-
return 0.0
|
|
40
|
-
return self.error_trap.error_count / max(1, self.completed_count)
|
|
41
|
-
|
|
42
|
-
def is_error_rate_exceeded(self, window: int) -> bool:
|
|
43
|
-
return self.get_error_rate(window) >= self.failure_threshold
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
class CallbackWithContext(Protocol):
|
|
47
|
-
"""Executor callback functions must accept a context kw argument."""
|
|
48
|
-
|
|
49
|
-
def __call__(self, result: Any, *, context: dict | None = None) -> Any: ...
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class ErrorCallbackWithContext(Protocol):
|
|
53
|
-
"""Error callbacks take the Exception instance and context."""
|
|
54
|
-
|
|
55
|
-
def __call__(self, exc: Exception, *, context: dict | None = None) -> Any: ...
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
class ConcurrentThreadExecutor:
|
|
59
|
-
"""
|
|
60
|
-
Interface for executing multiple concurrent tasks with error rate monitoring.
|
|
61
|
-
|
|
62
|
-
This interface should be used exclusively as
|
|
63
|
-
a context manager. New tasks can be submitted to the executor using the `submit`
|
|
64
|
-
method. This submit method functions similarly to the
|
|
65
|
-
submit method of a ThreadPoolExecutor.
|
|
66
|
-
|
|
67
|
-
The underlying queue of tasks is bounded by the `max_workers`
|
|
68
|
-
parameter. This means that only `max_workers` number of
|
|
69
|
-
tasks can be queued up for execution. As tasks complete,
|
|
70
|
-
if there are errors, those are tracked and counted. If
|
|
71
|
-
a certain error rate is exceeded, the executor will shutdown
|
|
72
|
-
early. All queued and running tasks will complete.
|
|
73
|
-
|
|
74
|
-
The reason we bound the underlying task queue is to ensure that when
|
|
75
|
-
a certain error threshold is met there aren't an unbounded
|
|
76
|
-
number of tasks that need to complete. Generally speaking,
|
|
77
|
-
tasks should not be sitting in the queue for long at all since
|
|
78
|
-
the queue size == `max_workers`. The side effect of this is that
|
|
79
|
-
the `submit()` method will block, however this should not matter
|
|
80
|
-
because upstream Tasks need to wait for all jobs to complete
|
|
81
|
-
before the Task can be considered complete.
|
|
82
|
-
|
|
83
|
-
ContextVars from the main parent thread are automatically propagated
|
|
84
|
-
to all child threads.
|
|
85
|
-
|
|
86
|
-
When a task is completed, the user provided `result_callback`
|
|
87
|
-
function will be called with the task result as the only argument.
|
|
88
|
-
"""
|
|
89
|
-
|
|
90
|
-
def __init__(
|
|
91
|
-
self,
|
|
92
|
-
*,
|
|
93
|
-
max_workers: int,
|
|
94
|
-
column_name: str,
|
|
95
|
-
result_callback: CallbackWithContext | None = None,
|
|
96
|
-
error_callback: ErrorCallbackWithContext | None = None,
|
|
97
|
-
shutdown_error_rate: float = 0.50,
|
|
98
|
-
shutdown_error_window: int = 10,
|
|
99
|
-
disable_early_shutdown: bool = False,
|
|
100
|
-
):
|
|
101
|
-
self._executor = None
|
|
102
|
-
self._column_name = column_name
|
|
103
|
-
self._max_workers = max_workers
|
|
104
|
-
self._lock = Lock()
|
|
105
|
-
self._semaphore = Semaphore(self._max_workers)
|
|
106
|
-
self._result_callback = result_callback
|
|
107
|
-
self._error_callback = error_callback
|
|
108
|
-
self._shutdown_error_rate = shutdown_error_rate
|
|
109
|
-
self._shutdown_window_size = shutdown_error_window
|
|
110
|
-
self._disable_early_shutdown = disable_early_shutdown
|
|
111
|
-
self._results = ExecutorResults(failure_threshold=shutdown_error_rate)
|
|
112
|
-
|
|
113
|
-
@property
|
|
114
|
-
def results(self) -> ExecutorResults:
|
|
115
|
-
return self._results
|
|
116
|
-
|
|
117
|
-
@property
|
|
118
|
-
def max_workers(self) -> int:
|
|
119
|
-
return self._max_workers
|
|
120
|
-
|
|
121
|
-
@property
|
|
122
|
-
def shutdown_error_rate(self) -> float:
|
|
123
|
-
return self._shutdown_error_rate
|
|
124
|
-
|
|
125
|
-
@property
|
|
126
|
-
def shutdown_window_size(self) -> int:
|
|
127
|
-
return self._shutdown_window_size
|
|
128
|
-
|
|
129
|
-
@property
|
|
130
|
-
def semaphore(self) -> Semaphore:
|
|
131
|
-
return self._semaphore
|
|
132
|
-
|
|
133
|
-
def __enter__(self) -> ConcurrentThreadExecutor:
|
|
134
|
-
self._executor = ThreadPoolExecutor(
|
|
135
|
-
max_workers=self._max_workers,
|
|
136
|
-
thread_name_prefix="ConcurrentThreadExecutor",
|
|
137
|
-
initializer=_set_worker_contextvars,
|
|
138
|
-
initargs=(contextvars.copy_context(),),
|
|
139
|
-
)
|
|
140
|
-
return self
|
|
141
|
-
|
|
142
|
-
def __exit__(self, exc_type, exc_value, traceback):
|
|
143
|
-
self._shutdown_executor()
|
|
144
|
-
if not self._disable_early_shutdown and self._results.early_shutdown is True:
|
|
145
|
-
self._raise_task_error()
|
|
146
|
-
|
|
147
|
-
def _shutdown_executor(self) -> None:
|
|
148
|
-
if self._executor is not None:
|
|
149
|
-
self._executor.shutdown()
|
|
150
|
-
|
|
151
|
-
def _raise_task_error(self):
|
|
152
|
-
raise DataDesignerRuntimeError(
|
|
153
|
-
"\n".join(
|
|
154
|
-
[
|
|
155
|
-
" |-- Data generation was terminated early due to error rate exceeding threshold.",
|
|
156
|
-
f" |-- The summary of encountered errors is: \n{json.dumps(self._results.summary, indent=4)}",
|
|
157
|
-
]
|
|
158
|
-
)
|
|
159
|
-
)
|
|
160
|
-
|
|
161
|
-
def submit(self, fn, *args, context: dict | None = None, **kwargs) -> None:
|
|
162
|
-
if self._executor is None:
|
|
163
|
-
raise RuntimeError("Executor is not initialized, this class should be used as a context manager.")
|
|
164
|
-
|
|
165
|
-
if not self._disable_early_shutdown and self._results.early_shutdown:
|
|
166
|
-
self._shutdown_executor()
|
|
167
|
-
self._raise_task_error()
|
|
168
|
-
|
|
169
|
-
def _handle_future(future: Future) -> None:
|
|
170
|
-
try:
|
|
171
|
-
result = future.result()
|
|
172
|
-
if self._result_callback is not None:
|
|
173
|
-
self._result_callback(result, context=context)
|
|
174
|
-
with self._lock:
|
|
175
|
-
self._results.completed_count += 1
|
|
176
|
-
self._results.success_count += 1
|
|
177
|
-
except Exception as err:
|
|
178
|
-
with self._lock:
|
|
179
|
-
self._results.completed_count += 1
|
|
180
|
-
self._results.error_trap.handle_error(err)
|
|
181
|
-
if not self._disable_early_shutdown and self._results.is_error_rate_exceeded(
|
|
182
|
-
self._shutdown_window_size
|
|
183
|
-
):
|
|
184
|
-
# Signal to shutdown early on the next submission (if received).
|
|
185
|
-
# We cannot trigger shutdown from within this thread as it can
|
|
186
|
-
# cause a deadlock.
|
|
187
|
-
if not self._results.early_shutdown:
|
|
188
|
-
self._results.early_shutdown = True
|
|
189
|
-
if self._error_callback is not None:
|
|
190
|
-
self._error_callback(err, context=context)
|
|
191
|
-
finally:
|
|
192
|
-
self._semaphore.release()
|
|
193
|
-
|
|
194
|
-
try:
|
|
195
|
-
self._semaphore.acquire()
|
|
196
|
-
future = self._executor.submit(fn, *args, **kwargs)
|
|
197
|
-
future.add_done_callback(_handle_future)
|
|
198
|
-
except Exception as err:
|
|
199
|
-
# If we get here, the pool is shutting down (likely due to early termination from errors)
|
|
200
|
-
# We'll re-raise a custom error that can be handled at the call-site and the summary
|
|
201
|
-
# can also be inspected.
|
|
202
|
-
self._semaphore.release()
|
|
203
|
-
is_shutdown_error = isinstance(err, RuntimeError) and (
|
|
204
|
-
"after shutdown" in str(err) or "Pool shutdown" in str(err)
|
|
205
|
-
)
|
|
206
|
-
if not is_shutdown_error:
|
|
207
|
-
raise err
|
|
208
|
-
if self._disable_early_shutdown:
|
|
209
|
-
raise err
|
|
210
|
-
self._raise_task_error()
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
def _set_worker_contextvars(context: contextvars.Context):
|
|
214
|
-
for var, value in context.items():
|
|
215
|
-
var.set(value)
|
|
@@ -1,62 +0,0 @@
|
|
|
1
|
-
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
from __future__ import annotations
|
|
5
|
-
|
|
6
|
-
from data_designer.config.column_types import DataDesignerColumnType
|
|
7
|
-
from data_designer.config.data_designer_config import DataDesignerConfig
|
|
8
|
-
from data_designer.config.processors import ProcessorConfig
|
|
9
|
-
from data_designer.engine.dataset_builders.multi_column_configs import (
|
|
10
|
-
DatasetBuilderColumnConfigT,
|
|
11
|
-
SamplerMultiColumnConfig,
|
|
12
|
-
SeedDatasetMultiColumnConfig,
|
|
13
|
-
)
|
|
14
|
-
from data_designer.engine.dataset_builders.utils.dag import topologically_sort_column_configs
|
|
15
|
-
from data_designer.engine.dataset_builders.utils.errors import ConfigCompilationError
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def compile_dataset_builder_column_configs(config: DataDesignerConfig) -> list[DatasetBuilderColumnConfigT]:
|
|
19
|
-
seed_column_configs = []
|
|
20
|
-
sampler_column_configs = []
|
|
21
|
-
generated_column_configs = []
|
|
22
|
-
|
|
23
|
-
for column_config in topologically_sort_column_configs(config.columns):
|
|
24
|
-
if column_config.column_type == DataDesignerColumnType.SEED_DATASET:
|
|
25
|
-
seed_column_configs.append(column_config)
|
|
26
|
-
elif column_config.column_type == DataDesignerColumnType.SAMPLER:
|
|
27
|
-
sampler_column_configs.append(column_config)
|
|
28
|
-
else:
|
|
29
|
-
generated_column_configs.append(column_config)
|
|
30
|
-
|
|
31
|
-
compiled_column_configs = []
|
|
32
|
-
|
|
33
|
-
if len(seed_column_configs) > 0:
|
|
34
|
-
if config.seed_config is None:
|
|
35
|
-
raise ConfigCompilationError("🛑 Seed column configs require a seed configuration.")
|
|
36
|
-
compiled_column_configs.append(
|
|
37
|
-
SeedDatasetMultiColumnConfig(
|
|
38
|
-
columns=seed_column_configs,
|
|
39
|
-
source=config.seed_config.source,
|
|
40
|
-
sampling_strategy=config.seed_config.sampling_strategy,
|
|
41
|
-
selection_strategy=config.seed_config.selection_strategy,
|
|
42
|
-
)
|
|
43
|
-
)
|
|
44
|
-
|
|
45
|
-
if len(sampler_column_configs) > 0:
|
|
46
|
-
compiled_column_configs.append(
|
|
47
|
-
SamplerMultiColumnConfig(
|
|
48
|
-
columns=sampler_column_configs,
|
|
49
|
-
constraints=config.constraints or [],
|
|
50
|
-
)
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
if len(generated_column_configs) > 0:
|
|
54
|
-
compiled_column_configs.extend(generated_column_configs)
|
|
55
|
-
|
|
56
|
-
return compiled_column_configs
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def compile_dataset_builder_processor_configs(
|
|
60
|
-
config: DataDesignerConfig,
|
|
61
|
-
) -> list[ProcessorConfig]:
|
|
62
|
-
return config.processors or []
|