sdg-hub 0.1.0a4__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sdg_hub/_version.py +2 -2
- sdg_hub/blocks/__init__.py +35 -5
- sdg_hub/blocks/block.py +58 -16
- sdg_hub/blocks/llmblock.py +121 -193
- sdg_hub/blocks/utilblocks.py +500 -43
- sdg_hub/checkpointer.py +139 -0
- sdg_hub/configs/annotations/detailed_annotations.yaml +28 -0
- sdg_hub/configs/annotations/simple_annotations.yaml +9 -0
- sdg_hub/configs/knowledge/atomic_facts.yaml +1 -0
- sdg_hub/configs/knowledge/detailed_summary.yaml +1 -0
- sdg_hub/configs/knowledge/extractive_summary.yaml +1 -0
- sdg_hub/configs/knowledge/generate_questions.yaml +82 -0
- sdg_hub/configs/knowledge/generate_responses.yaml +86 -0
- sdg_hub/configs/skills/contexts.yaml +18 -11
- sdg_hub/configs/skills/evaluate_freeform_pair.yaml +79 -12
- sdg_hub/configs/skills/evaluate_freeform_questions.yaml +60 -28
- sdg_hub/configs/skills/evaluate_grounded_pair.yaml +95 -30
- sdg_hub/configs/skills/freeform_questions.yaml +21 -16
- sdg_hub/configs/skills/freeform_responses.yaml +19 -25
- sdg_hub/configs/skills/router.yaml +53 -6
- sdg_hub/flow.py +351 -21
- sdg_hub/flow_runner.py +216 -0
- sdg_hub/flows/generation/knowledge/synth_knowledge1.5.yaml +26 -9
- sdg_hub/flows/generation/skills/{agentic_improve_skill.yaml → improve_responses.yaml} +26 -31
- sdg_hub/flows/generation/skills/synth_skills.yaml +4 -4
- sdg_hub/pipeline.py +67 -12
- sdg_hub/prompts.py +21 -0
- sdg_hub/sdg.py +128 -86
- sdg_hub/utils/config_validation.py +91 -0
- sdg_hub/utils/validation_result.py +10 -0
- sdg_hub-0.1.1.dist-info/METADATA +190 -0
- sdg_hub-0.1.1.dist-info/RECORD +86 -0
- {sdg_hub-0.1.0a4.dist-info → sdg_hub-0.1.1.dist-info}/WHEEL +1 -1
- sdg_hub/blocks/filterblock.py +0 -76
- sdg_hub/blocks/iterblock.py +0 -31
- sdg_hub/blocks/rmblocks.py +0 -194
- sdg_hub/configs/annotations/simple.yaml +0 -10
- sdg_hub/configs/knowledge/data_recipe/default_recipe.yaml +0 -3
- sdg_hub/configs/skills/data_recipe/default_recipe.yaml +0 -6
- sdg_hub/flows/annotation/emotion/detailed_description.yaml +0 -19
- sdg_hub/flows/annotation/emotion/detailed_description_icl.yaml +0 -19
- sdg_hub/flows/annotation/emotion/simple.yaml +0 -19
- sdg_hub/utils/chunking.py +0 -73
- sdg_hub/utils/docprocessor.py +0 -357
- sdg_hub/utils/parse_and_convert.py +0 -392
- sdg_hub-0.1.0a4.dist-info/METADATA +0 -309
- sdg_hub-0.1.0a4.dist-info/RECORD +0 -90
- /sdg_hub/configs/{knowledge/data_recipe → reasoning}/__init__.py +0 -0
- /sdg_hub/configs/skills/{_G_.yaml → icl_examples/STEM.yaml} +0 -0
- /sdg_hub/configs/skills/{data_recipe → icl_examples}/__init__.py +0 -0
- /sdg_hub/configs/skills/{_A_.yaml → icl_examples/coding.yaml} +0 -0
- /sdg_hub/configs/skills/{_B_.yaml → icl_examples/extraction.yaml} +0 -0
- /sdg_hub/configs/skills/{_C_.yaml → icl_examples/humanities.yaml} +0 -0
- /sdg_hub/configs/skills/{_D_.yaml → icl_examples/math.yaml} +0 -0
- /sdg_hub/configs/skills/{_E_.yaml → icl_examples/reasoning.yaml} +0 -0
- /sdg_hub/configs/skills/{_F_.yaml → icl_examples/roleplay.yaml} +0 -0
- /sdg_hub/configs/skills/{_H_.yaml → icl_examples/writing.yaml} +0 -0
- {sdg_hub-0.1.0a4.dist-info → sdg_hub-0.1.1.dist-info}/licenses/LICENSE +0 -0
- {sdg_hub-0.1.0a4.dist-info → sdg_hub-0.1.1.dist-info}/top_level.txt +0 -0
@@ -1,392 +0,0 @@
|
|
1
|
-
# SPDX-License-Identifier: Apache-2.0
|
2
|
-
|
3
|
-
# Standard
|
4
|
-
from enum import Enum
|
5
|
-
from typing import Any
|
6
|
-
import json
|
7
|
-
import os
|
8
|
-
import random
|
9
|
-
import re
|
10
|
-
import uuid
|
11
|
-
|
12
|
-
# Third Party
|
13
|
-
from datasets import Dataset
|
14
|
-
import yaml
|
15
|
-
|
16
|
-
# First Party
|
17
|
-
# pylint: disable=ungrouped-imports
|
18
|
-
from sdg_hub import utils
|
19
|
-
from sdg_hub.logger_config import setup_logger
|
20
|
-
from .datautils import safe_concatenate_datasets
|
21
|
-
|
22
|
-
logger = setup_logger(__name__)
|
23
|
-
|
24
|
-
|
25
|
-
class TaxonomyType(Enum):
|
26
|
-
KNOWLEDGE = "knowledge"
|
27
|
-
SKILL = "skill"
|
28
|
-
|
29
|
-
|
30
|
-
def _unescape(s):
|
31
|
-
return bytes(s, "utf-8").decode("utf-8").strip()
|
32
|
-
|
33
|
-
|
34
|
-
# This is a hack because the simple workflow returns a q/a pair as a single output.
|
35
|
-
# We could possibly try to ask for them separately, but it would cost twice the inference
|
36
|
-
# API calls. All of this is because the smallest models we use on small environments
|
37
|
-
# for testing and demos weren't good enough to follow the strict formatting instructions used
|
38
|
-
# in the full pipeline.
|
39
|
-
def _get_question(synth_example: dict):
|
40
|
-
if "question" in synth_example:
|
41
|
-
return synth_example["question"]
|
42
|
-
|
43
|
-
if not synth_example.get("output"):
|
44
|
-
raise utils.GenerateException(
|
45
|
-
f"Error: output not found in synth_example: {synth_example}"
|
46
|
-
)
|
47
|
-
|
48
|
-
parts = synth_example["output"].split("?", 1)
|
49
|
-
if len(parts) != 2:
|
50
|
-
logger.warning(f"Failed to split generated q&a: {synth_example['output']}")
|
51
|
-
return parts[0].strip() + "?" if len(parts) == 2 else ""
|
52
|
-
|
53
|
-
|
54
|
-
# This is also a hack. See the comment above _get_question.
|
55
|
-
def _get_response(synth_example: dict):
|
56
|
-
if "response" in synth_example:
|
57
|
-
return synth_example["response"]
|
58
|
-
|
59
|
-
if "output" not in synth_example:
|
60
|
-
raise utils.GenerateException(
|
61
|
-
f"Error: output not found in synth_example: {synth_example}"
|
62
|
-
)
|
63
|
-
|
64
|
-
parts = synth_example["output"].split("?", 1)
|
65
|
-
if len(parts) != 2:
|
66
|
-
logger.warning(f"Failed to split generated q&a: {synth_example['output']}")
|
67
|
-
return parts[1].strip() if len(parts) == 2 else parts[0].strip()
|
68
|
-
|
69
|
-
|
70
|
-
def _convert_to_hack_fmt(sample: dict, sys_prompt: str):
|
71
|
-
"""
|
72
|
-
Convert a sample dictionary to contain 'system', 'user', and 'assistant' columns.
|
73
|
-
|
74
|
-
Note: We should remove this function in the future when we resolve this issue and
|
75
|
-
standardize the format to messages.
|
76
|
-
"""
|
77
|
-
# Create user query message
|
78
|
-
user_query = _unescape(_get_question(sample))
|
79
|
-
response = _unescape(_get_response(sample))
|
80
|
-
if "context" in sample:
|
81
|
-
user_query = f"{sample['context']}\n\n{user_query}"
|
82
|
-
|
83
|
-
sample["id"] = str(uuid.uuid4())
|
84
|
-
sample["system"] = sys_prompt
|
85
|
-
sample["user"] = user_query
|
86
|
-
sample["assistant"] = response
|
87
|
-
|
88
|
-
return sample
|
89
|
-
|
90
|
-
|
91
|
-
def _convert_to_messages(sample: dict, sys_prompt: str):
|
92
|
-
"""
|
93
|
-
Convert a sample dictionary to contain 'messages'
|
94
|
-
and 'metadata' columns required for training.
|
95
|
-
"""
|
96
|
-
# Create user query message
|
97
|
-
user_query = _unescape(_get_question(sample))
|
98
|
-
response = _unescape(_get_response(sample))
|
99
|
-
|
100
|
-
sample["id"] = str(uuid.uuid4())
|
101
|
-
sample["messages"] = [
|
102
|
-
{"content": sys_prompt, "role": "system"},
|
103
|
-
{"content": user_query, "role": "user"},
|
104
|
-
{"content": response, "role": "assistant"},
|
105
|
-
]
|
106
|
-
|
107
|
-
return sample
|
108
|
-
|
109
|
-
|
110
|
-
def create_auxiliary_dataset(generated_dataset: Dataset):
|
111
|
-
if "dataset_type" not in generated_dataset.column_names:
|
112
|
-
return None
|
113
|
-
|
114
|
-
# get module path of the current file
|
115
|
-
module_dir = os.path.dirname(os.path.abspath(__file__))
|
116
|
-
aux_inst_path = os.path.join(module_dir, "../configs/knowledge/auxilary_instructions.yaml")
|
117
|
-
if os.path.isfile(
|
118
|
-
aux_inst_path
|
119
|
-
):
|
120
|
-
with open(aux_inst_path, "r", encoding="utf-8") as fp:
|
121
|
-
auxiliary_inst = yaml.safe_load(fp)
|
122
|
-
else:
|
123
|
-
logger.error(f"auxiliary instructions file not found at {aux_inst_path}")
|
124
|
-
return None
|
125
|
-
auxiliary_ds = generated_dataset.filter(
|
126
|
-
lambda x: x["dataset_type"] != "base_document"
|
127
|
-
)
|
128
|
-
unique_document_auxiliary = auxiliary_ds.to_pandas().drop_duplicates(
|
129
|
-
subset=["document"]
|
130
|
-
)
|
131
|
-
unique_document_auxiliary = Dataset.from_pandas(unique_document_auxiliary)
|
132
|
-
unique_document_auxiliary = unique_document_auxiliary.remove_columns(
|
133
|
-
[
|
134
|
-
col
|
135
|
-
for col in unique_document_auxiliary.column_names
|
136
|
-
if col
|
137
|
-
not in [
|
138
|
-
"raw_document",
|
139
|
-
"document_outline",
|
140
|
-
"domain",
|
141
|
-
"dataset_type",
|
142
|
-
"document",
|
143
|
-
]
|
144
|
-
]
|
145
|
-
)
|
146
|
-
unique_document_auxiliary = unique_document_auxiliary.rename_columns(
|
147
|
-
{"raw_document": "context", "document": "response"}
|
148
|
-
)
|
149
|
-
|
150
|
-
def __create_auxiliary_ds(rec):
|
151
|
-
instruction = random.choice(auxiliary_inst[rec["dataset_type"]])
|
152
|
-
messages = [
|
153
|
-
{"role": "user", "content": f"{rec['context']}\n\n{instruction}"},
|
154
|
-
{"role": "assistant", "content": rec["response"]},
|
155
|
-
]
|
156
|
-
metadata = json.dumps(
|
157
|
-
{
|
158
|
-
"dataset_type": rec["dataset_type"],
|
159
|
-
"raw_document": rec["context"],
|
160
|
-
"dataset": f"document_{rec['dataset_type']}",
|
161
|
-
"domain": rec["domain"],
|
162
|
-
}
|
163
|
-
)
|
164
|
-
return {"messages": messages, "metadata": metadata, "id": str(uuid.uuid4())}
|
165
|
-
|
166
|
-
unique_document_auxiliary = unique_document_auxiliary.map(
|
167
|
-
__create_auxiliary_ds, remove_columns=unique_document_auxiliary.column_names
|
168
|
-
)
|
169
|
-
return unique_document_auxiliary
|
170
|
-
|
171
|
-
|
172
|
-
def generate_knowledge_qa_dataset(
|
173
|
-
generated_dataset: Dataset, keep_context_separate=False
|
174
|
-
):
|
175
|
-
def __create_qa_row(rec):
|
176
|
-
context = rec["document"]
|
177
|
-
instruction = rec["question"]
|
178
|
-
response = rec["response"]
|
179
|
-
metadata = {
|
180
|
-
"sdg_document": rec["document"],
|
181
|
-
"domain": rec["domain"],
|
182
|
-
"dataset": "document_knowledge_qa",
|
183
|
-
}
|
184
|
-
if "raw_document" in rec and "dataset_type" in rec:
|
185
|
-
metadata.update(
|
186
|
-
{
|
187
|
-
"raw_document": rec["raw_document"],
|
188
|
-
"dataset_type": rec["dataset_type"],
|
189
|
-
}
|
190
|
-
)
|
191
|
-
metadata = json.dumps(metadata)
|
192
|
-
if keep_context_separate:
|
193
|
-
messages = [
|
194
|
-
{"role": "user", "content": f"{instruction}"},
|
195
|
-
{"role": "assistant", "content": response},
|
196
|
-
]
|
197
|
-
return {
|
198
|
-
"messages": messages,
|
199
|
-
"metadata": metadata,
|
200
|
-
"id": str(uuid.uuid4()),
|
201
|
-
"context": context,
|
202
|
-
}
|
203
|
-
else:
|
204
|
-
messages = [
|
205
|
-
{"role": "user", "content": f"{context}\n\n{instruction}"},
|
206
|
-
{"role": "assistant", "content": response},
|
207
|
-
]
|
208
|
-
|
209
|
-
return {"messages": messages, "metadata": metadata, "id": str(uuid.uuid4())}
|
210
|
-
|
211
|
-
knowledge_ds = generated_dataset.map(
|
212
|
-
__create_qa_row, remove_columns=generated_dataset.column_names
|
213
|
-
)
|
214
|
-
return knowledge_ds
|
215
|
-
|
216
|
-
|
217
|
-
def build_raft_dataset(ds: Dataset, p, num_doc_in_context=4):
|
218
|
-
all_context = list(set(ds["context"]))
|
219
|
-
|
220
|
-
def _pick_documents(rec, p):
|
221
|
-
answer_document = rec["context"]
|
222
|
-
selected_docs = [e for e in all_context if e != answer_document]
|
223
|
-
if len(selected_docs) > 0:
|
224
|
-
if len(selected_docs) < num_doc_in_context:
|
225
|
-
logger.info(
|
226
|
-
f"Number of unique document is {len(selected_docs)} which is less than {num_doc_in_context}. Using all the documents in the RAFT context"
|
227
|
-
)
|
228
|
-
if random.uniform(0, 1) < p:
|
229
|
-
# golden/answer + distractor documents
|
230
|
-
docs = (
|
231
|
-
random.sample(selected_docs, k=num_doc_in_context-1) + [answer_document]
|
232
|
-
if len(selected_docs) >= (num_doc_in_context-1)
|
233
|
-
else selected_docs + [answer_document]
|
234
|
-
)
|
235
|
-
else:
|
236
|
-
# distractor documents
|
237
|
-
docs = (
|
238
|
-
random.sample(selected_docs, k=num_doc_in_context)
|
239
|
-
if len(selected_docs) >= num_doc_in_context
|
240
|
-
else selected_docs
|
241
|
-
)
|
242
|
-
else:
|
243
|
-
logger.info("Only 1 unique document found. Turning off RAFT styling")
|
244
|
-
docs = [answer_document]
|
245
|
-
|
246
|
-
random.shuffle(docs)
|
247
|
-
|
248
|
-
docs = "\n".join(([f"Document:\n{e}\n\n" for idx, e in enumerate(docs)]))
|
249
|
-
user_idx, user_msg = [
|
250
|
-
(idx, rec_msg)
|
251
|
-
for idx, rec_msg in enumerate(rec["messages"])
|
252
|
-
if rec_msg["role"] == "user"
|
253
|
-
][0]
|
254
|
-
user_inst = user_msg["content"]
|
255
|
-
rec["messages"][user_idx]["content"] = f"{docs}\n\n{user_inst}"
|
256
|
-
rec["messages"] = rec["messages"]
|
257
|
-
metadata = json.loads(rec["metadata"])
|
258
|
-
metadata["dataset"] += f"_raft_p{p}"
|
259
|
-
rec["metadata"] = json.dumps(metadata)
|
260
|
-
return rec
|
261
|
-
|
262
|
-
ds = ds.map(_pick_documents, fn_kwargs={"p": p} , remove_columns=["context"])
|
263
|
-
return ds
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
def _conv_pretrain(rec):
|
268
|
-
rec["messages"] = [
|
269
|
-
{
|
270
|
-
"role": "pretraining",
|
271
|
-
"content": f"<|user|>\n{rec['messages'][0]['content']}\n<|assistant|>\n{rec['messages'][1]['content']}",
|
272
|
-
}
|
273
|
-
]
|
274
|
-
return rec
|
275
|
-
|
276
|
-
|
277
|
-
def create_knowledge_regular_ds(generated_dataset: Dataset):
|
278
|
-
# Phase 1.0
|
279
|
-
knowledge_ds = generate_knowledge_qa_dataset(
|
280
|
-
generated_dataset, keep_context_separate=True
|
281
|
-
)
|
282
|
-
knowledge_ds = build_raft_dataset(knowledge_ds, p=0.4)
|
283
|
-
|
284
|
-
auxiliary_dataset = create_auxiliary_dataset(generated_dataset)
|
285
|
-
if auxiliary_dataset is not None:
|
286
|
-
transformed_data = safe_concatenate_datasets([knowledge_ds, auxiliary_dataset])
|
287
|
-
else:
|
288
|
-
transformed_data = knowledge_ds
|
289
|
-
return transformed_data
|
290
|
-
|
291
|
-
|
292
|
-
def create_knowledge_pretraining_ds(generated_dataset: Dataset):
|
293
|
-
# Phase 0.7
|
294
|
-
knowledge_ds = generate_knowledge_qa_dataset(
|
295
|
-
generated_dataset, keep_context_separate=False
|
296
|
-
)
|
297
|
-
knowledge_ds = knowledge_ds.map(_conv_pretrain)
|
298
|
-
|
299
|
-
auxiliary_dataset = create_auxiliary_dataset(generated_dataset)
|
300
|
-
if auxiliary_dataset is not None:
|
301
|
-
auxiliary_dataset = auxiliary_dataset.map(_conv_pretrain)
|
302
|
-
transformed_data = safe_concatenate_datasets([knowledge_ds, auxiliary_dataset])
|
303
|
-
else:
|
304
|
-
transformed_data = knowledge_ds
|
305
|
-
return transformed_data
|
306
|
-
|
307
|
-
|
308
|
-
def post_process_mcq(ds: Dataset, is_mmlu_eval: bool = False) -> Dataset:
|
309
|
-
"""Filters out badly generated data, adds dataset type column
|
310
|
-
|
311
|
-
Args:
|
312
|
-
ds (Dataset): mcq generated dataset from mmmlu pipeline
|
313
|
-
is_mmlu_eval (bool, optional): _description_. Defaults to False.
|
314
|
-
|
315
|
-
Returns:
|
316
|
-
Dataset: Hf Dataset with new column, filtered dataset
|
317
|
-
"""
|
318
|
-
ds = ds.filter(lambda x: ")" in x["mmlubench_answer"])
|
319
|
-
ds = ds.filter(lambda x: "A)" in x["mmlubench_question"])
|
320
|
-
ds = ds.add_column("dataset_type", ["mcq_qa"] * ds.num_rows)
|
321
|
-
if is_mmlu_eval:
|
322
|
-
return format_mmlu_style(ds)
|
323
|
-
return ds
|
324
|
-
|
325
|
-
|
326
|
-
def extract_options(text: str) -> list[Any]:
|
327
|
-
"""regex to extract options from mcq
|
328
|
-
|
329
|
-
Args:
|
330
|
-
text (str): question with options/mcq choices
|
331
|
-
|
332
|
-
Returns:
|
333
|
-
list[Any]: options under question that match the pattern.
|
334
|
-
"""
|
335
|
-
# Use a regular expression to find patterns and capture the text after the letter and parenthesis
|
336
|
-
pattern = r"\b[A-Z]\) (.+)"
|
337
|
-
matches = re.findall(pattern, text)
|
338
|
-
return matches
|
339
|
-
|
340
|
-
|
341
|
-
def format_mmlu_style(ds: Dataset) -> Dataset:
|
342
|
-
"""Format the dataset according to lm-harness mmlu requirement.
|
343
|
-
|
344
|
-
Args:
|
345
|
-
ds (Dataset): input dataset
|
346
|
-
|
347
|
-
Returns:
|
348
|
-
Dataset: formated hf dataset
|
349
|
-
"""
|
350
|
-
ds = ds.map(
|
351
|
-
lambda x: {"answer": x["mmlubench_answer"][: x["mmlubench_answer"].index(")")]}
|
352
|
-
)
|
353
|
-
ds = ds.map(lambda x: {"choices": extract_options(x["mmlubench_question"])})
|
354
|
-
ds = ds.map(
|
355
|
-
lambda x: {
|
356
|
-
"question": x["mmlubench_question"][
|
357
|
-
: x["mmlubench_question"].index("A)")
|
358
|
-
].strip()
|
359
|
-
}
|
360
|
-
)
|
361
|
-
ds = ds.rename_columns({"domain": "subject"})
|
362
|
-
ds = ds.filter(lambda x: x["choices"])
|
363
|
-
ds = ds.filter(lambda x: len(x["choices"]) == 4)
|
364
|
-
ds = ds.filter(lambda x: x["answer"] in ["A", "B", "C", "D"])
|
365
|
-
ds = ds.class_encode_column("answer")
|
366
|
-
return ds
|
367
|
-
|
368
|
-
|
369
|
-
def create_mmlu_evaluation_dataset(generate_mcq_dataset: Dataset) -> Dataset:
|
370
|
-
"""Filter, format and return mcq dataset that is compatible with lm-harness for doing mmlu-style evaluation
|
371
|
-
|
372
|
-
Args:
|
373
|
-
generate_mcq_dataset (Dataset): sdg generated mcq dataset
|
374
|
-
Returns:
|
375
|
-
Dataset: MMLU MCQ datast
|
376
|
-
"""
|
377
|
-
mmlu_dataset = post_process_mcq(generate_mcq_dataset, is_mmlu_eval=True)
|
378
|
-
return mmlu_dataset
|
379
|
-
|
380
|
-
|
381
|
-
def create_mmlu_evaluation_yaml(task_name, eval_data_file_path, yaml_file_path):
|
382
|
-
"""
|
383
|
-
Prepare Task Yaml that will be used in lm_eval_harness to evaluate knowledge using mmlu style metric
|
384
|
-
"""
|
385
|
-
task_yaml = {
|
386
|
-
"task": task_name,
|
387
|
-
"dataset_kwargs": {"data_files": {"test": eval_data_file_path}},
|
388
|
-
"include": "_default_mmlu_pr_template_yaml",
|
389
|
-
"group": "mmlu_pr",
|
390
|
-
}
|
391
|
-
with open(yaml_file_path, "w", encoding="utf-8") as yaml_file:
|
392
|
-
yaml.dump(task_yaml, yaml_file, default_flow_style=False)
|
@@ -1,309 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: sdg_hub
|
3
|
-
Version: 0.1.0a4
|
4
|
-
Summary: Synthetic Data Generation
|
5
|
-
Author-email: Red Hat AI Innovation <abhandwa@redhat.com>
|
6
|
-
License: Apache-2.0
|
7
|
-
Project-URL: homepage, https://ai-innovation.team/
|
8
|
-
Project-URL: source, https://github.com/Red-Hat-AI-Innovation-Team/sdg_hub
|
9
|
-
Project-URL: issues, https://github.com/Red-Hat-AI-Innovation-Team/sdg_hub/issues
|
10
|
-
Classifier: Development Status :: 3 - Alpha
|
11
|
-
Classifier: Environment :: Console
|
12
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
13
|
-
Classifier: License :: OSI Approved :: MIT License
|
14
|
-
Classifier: Operating System :: MacOS :: MacOS X
|
15
|
-
Classifier: Operating System :: POSIX :: Linux
|
16
|
-
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
17
|
-
Classifier: Programming Language :: Python :: 3
|
18
|
-
Classifier: Programming Language :: Python :: 3.9
|
19
|
-
Classifier: Programming Language :: Python :: 3.10
|
20
|
-
Classifier: Programming Language :: Python :: 3.11
|
21
|
-
Classifier: Programming Language :: Python :: 3.12
|
22
|
-
Classifier: Programming Language :: Python :: Implementation :: CPython
|
23
|
-
Requires-Python: >=3.9
|
24
|
-
Description-Content-Type: text/markdown
|
25
|
-
License-File: LICENSE
|
26
|
-
Requires-Dist: click<9.0.0,>=8.1.7
|
27
|
-
Requires-Dist: datasets<4.0.0,>=2.18.0
|
28
|
-
Requires-Dist: httpx<1.0.0,>=0.25.0
|
29
|
-
Requires-Dist: jinja2
|
30
|
-
Requires-Dist: langchain-text-splitters
|
31
|
-
Requires-Dist: openai<2.0.0,>=1.13.3
|
32
|
-
Requires-Dist: rich
|
33
|
-
Requires-Dist: tenacity!=8.4.0,>=8.3.0
|
34
|
-
Requires-Dist: tqdm<5.0.0,>=4.66.2
|
35
|
-
Dynamic: license-file
|
36
|
-
|
37
|
-
# sdg_hub: Synthetic Data Generation Toolkit for LLMs
|
38
|
-
|
39
|
-

|
40
|
-

|
41
|
-

|
42
|
-
[](https://github.com/Red-Hat-AI-Innovation-Team/sdg_hub/actions/workflows/test.yml)
|
43
|
-
[](https://codecov.io/gh/Red-Hat-AI-Innovation-Team/sdg_hub)
|
44
|
-
|
45
|
-
sdg_hub is a modular, scalable, and efficient solution for creating synthetic data generation workflows in a "no-code" manner. At its core, this framework is designed to simplify data creation for LLMs, allowing users to chain computational units and build powerful pipelines for generating data and processing tasks.
|
46
|
-
|
47
|
-
|
48
|
-
## Installation
|
49
|
-
|
50
|
-
Latest release from PyPI
|
51
|
-
|
52
|
-
```sh
|
53
|
-
pip install sdg-hub
|
54
|
-
```
|
55
|
-
|
56
|
-
Latest main branch
|
57
|
-
```sh
|
58
|
-
pip install git+https://github.com/Red-Hat-AI-Innovation-Team/sdg_hub.git
|
59
|
-
```
|
60
|
-
|
61
|
-
## Core Design Principles
|
62
|
-
|
63
|
-
The framework is built around the following principles:
|
64
|
-
|
65
|
-
1. **Modular Design**: Highly composable blocks form the building units of the framework, allowing users to build workflows effortlessly.
|
66
|
-
2. **No-Code Workflow Creation**: Specify workflows using simple YAML configuration files.
|
67
|
-
3. **Scalability and Performance**: Optimized for handling large-scale workflows with millions of records.
|
68
|
-
|
69
|
-
---
|
70
|
-
|
71
|
-
## Framework Architecture
|
72
|
-
|
73
|
-

|
74
|
-
|
75
|
-
### Blocks: The Fundamental Unit
|
76
|
-
|
77
|
-
At the heart of the framework is the **Block**. Each block is a self-contained computational unit that performs specific tasks, such as:
|
78
|
-
|
79
|
-
- Making LLM calls
|
80
|
-
- Performing data transformations
|
81
|
-
- Applying filters
|
82
|
-
|
83
|
-
Blocks are designed to be:
|
84
|
-
- **Modular**: Reusable across multiple pipelines.
|
85
|
-
- **Composable**: Easily chained together to create workflows.
|
86
|
-
|
87
|
-
These blocks are implemented in the [src/sdg_hub/blocks](src/sdg_hub/blocks) directory.
|
88
|
-
|
89
|
-
### Prompts
|
90
|
-
|
91
|
-
Prompts are at the core of how LLMs are instructed within SDG Hub. Each `LLMBlock` is associated with a prompt configuration file written in YAML, allowing users to define the exact behavior of the language model — including system instructions, generation principles, and output formatting.
|
92
|
-
|
93
|
-
#### Prompt YAML Structure
|
94
|
-
|
95
|
-
A typical prompt YAML file looks like this:
|
96
|
-
|
97
|
-
```yaml
|
98
|
-
system: You are a helpful assistant that can summarize text.
|
99
|
-
introduction: Give me a short summary of the text.
|
100
|
-
principles:
|
101
|
-
- Do not add any new information.
|
102
|
-
- Do not miss any key points from the provided text.
|
103
|
-
examples:
|
104
|
-
- input: Red Hat announced the acquisition of Neural Magic...
|
105
|
-
output: Red Hat acquired Neural Magic to enhance its AI optimization capabilities.
|
106
|
-
generation: Here is the document to summarize: {{document}}
|
107
|
-
```
|
108
|
-
|
109
|
-
#### Key Fields
|
110
|
-
* `system`: A high-level instruction that sets the persona or behavior of the model.
|
111
|
-
* `introduction`: Optional introduction to set context for the user.
|
112
|
-
* `principles`: A list of guiding constraints or rules the model should follow during generation.
|
113
|
-
* `examples`: Few-shot examples (optional) to guide output format or tone.
|
114
|
-
* `generation`: The actual template used to generate the model input. This supports variable injection using {{variable_name}}.
|
115
|
-
|
116
|
-
### YAML-Based Workflow: The Flow
|
117
|
-
|
118
|
-
The YAML configuration file, known as the **Flow**, is central to defining data generation workflows in the SDG Framework. A Flow describes how blocks and pipelines are orchestrated to process and generate data efficiently. By leveraging YAML, users can create highly customizable and modular workflows without writing any code.
|
119
|
-
|
120
|
-
#### Key Features of a Flow
|
121
|
-
|
122
|
-
1. **Modular Design**:
|
123
|
-
- Flows are composed of blocks, which can be chained together into pipelines.
|
124
|
-
- Each block performs a specific task, such as generating, filtering, or transforming data.
|
125
|
-
|
126
|
-
2. **Reusability**:
|
127
|
-
- Blocks and configurations defined in a Flow can be reused across different workflows.
|
128
|
-
- YAML makes it easy to tweak or extend workflows without significant changes.
|
129
|
-
|
130
|
-
3. **Ease of Configuration**:
|
131
|
-
- Users can specify block types, configurations, and data processing details in a simple and intuitive manner.
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
## Hello World Example
|
136
|
-
|
137
|
-
Let’s say you have a document and want to generate a concise summary using an LLM. Here’s how simple that is in sdg\_hub:
|
138
|
-
|
139
|
-
```yaml
|
140
|
-
- block_type: LLMBlock
|
141
|
-
block_config:
|
142
|
-
block_name: gen_summary
|
143
|
-
config_path: prompts/summarization.yaml
|
144
|
-
model_id: meta-llama/Llama-3.3-70B-Instruct
|
145
|
-
output_cols:
|
146
|
-
- summary
|
147
|
-
gen_kwargs:
|
148
|
-
max_tokens: 512
|
149
|
-
```
|
150
|
-
|
151
|
-
Want to go further? Add another block to extract keywords from the summary:
|
152
|
-
|
153
|
-
```yaml
|
154
|
-
- block_type: LLMBlock
|
155
|
-
block_config:
|
156
|
-
block_name: gen_keywords
|
157
|
-
config_path: prompts/keywords.yaml
|
158
|
-
model_id: meta-llama/Llama-3.3-70B-Instruct
|
159
|
-
output_cols:
|
160
|
-
- keywords
|
161
|
-
gen_kwargs:
|
162
|
-
max_tokens: 64
|
163
|
-
```
|
164
|
-
|
165
|
-
Just like that, you’ve built a multi-step LLM workflow using nothing but YAML.
|
166
|
-
|
167
|
-
## Available Blocks
|
168
|
-
|
169
|
-
The SDG Framework provides a rich set of blocks for different data processing needs. Here's a comprehensive overview of the available blocks and when to use them:
|
170
|
-
|
171
|
-
### Base Block Class
|
172
|
-
|
173
|
-
The framework is built around the abstract `Block` class, which serves as the foundation for all other blocks:
|
174
|
-
|
175
|
-
- **Purpose**: Provides core functionality and interface for all blocks
|
176
|
-
- **Key Features**:
|
177
|
-
- Template validation for input data
|
178
|
-
- Configuration loading from YAML files
|
179
|
-
- Standardized block initialization
|
180
|
-
- Common interface for all blocks
|
181
|
-
- **Core Methods**:
|
182
|
-
- `_validate`: Validates input data against templates
|
183
|
-
- `_load_config`: Loads configuration from YAML files
|
184
|
-
- `generate`: Abstract method for block execution
|
185
|
-
|
186
|
-
All blocks inherit from this base class, ensuring consistent behavior and interface across the framework.
|
187
|
-
|
188
|
-
### LLM Blocks
|
189
|
-
|
190
|
-
1. **LLMBlock**
|
191
|
-
- **Purpose**: Generate text using language models
|
192
|
-
- **Use Cases**:
|
193
|
-
- Generating questions, responses, or any text content
|
194
|
-
- Single-prompt generation with structured outputs
|
195
|
-
- **Features**:
|
196
|
-
- Supports batched processing
|
197
|
-
- Configurable output parsing
|
198
|
-
- Template-based prompt generation
|
199
|
-
|
200
|
-
2. **ConditionalLLMBlock**
|
201
|
-
- **Purpose**: Generate text based on conditional logic
|
202
|
-
- **Use Cases**:
|
203
|
-
- Different prompt templates based on input conditions
|
204
|
-
- Multi-path text generation workflows
|
205
|
-
- **Features**:
|
206
|
-
- Multiple config paths for different conditions
|
207
|
-
- Dynamic prompt selection
|
208
|
-
|
209
|
-
3. **LLMLogProbBlock**
|
210
|
-
- **Purpose**: Generate text with log probabilities
|
211
|
-
- **Use Cases**:
|
212
|
-
- Analyzing model confidence
|
213
|
-
- Quality scoring of generations
|
214
|
-
- **Features**:
|
215
|
-
- Returns top-k log probabilities
|
216
|
-
- JSON-formatted output
|
217
|
-
|
218
|
-
4. **LLMMessagesBlock**
|
219
|
-
- **Purpose**: Chat-based text generation
|
220
|
-
- **Use Cases**:
|
221
|
-
- Multi-turn conversations
|
222
|
-
- Chat-based interactions
|
223
|
-
- **Features**:
|
224
|
-
- Supports message history
|
225
|
-
- Chat completion API
|
226
|
-
|
227
|
-
### Filtering and Processing Blocks
|
228
|
-
|
229
|
-
1. **FilterByValueBlock**
|
230
|
-
- **Purpose**: Filter datasets based on column values
|
231
|
-
- **Use Cases**:
|
232
|
-
- Removing unwanted samples
|
233
|
-
- Data cleaning
|
234
|
-
- Quality filtering
|
235
|
-
- **Features**:
|
236
|
-
- Multiple filter operations
|
237
|
-
- Type conversion support
|
238
|
-
- Parallel processing
|
239
|
-
|
240
|
-
2. **IterBlock**
|
241
|
-
- **Purpose**: Iterative processing of data
|
242
|
-
- **Use Cases**:
|
243
|
-
- Multiple generation attempts
|
244
|
-
- Iterative refinement
|
245
|
-
- **Features**:
|
246
|
-
- Configurable number of iterations
|
247
|
-
- Nested block execution
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
### Utility Blocks
|
252
|
-
|
253
|
-
1. **SamplePopulatorBlock**
|
254
|
-
- **Purpose**: Populate samples with configuration data
|
255
|
-
- **Use Cases**:
|
256
|
-
- Adding metadata
|
257
|
-
- Configuration injection
|
258
|
-
|
259
|
-
2. **SelectorBlock**
|
260
|
-
- **Purpose**: Select data based on mapping
|
261
|
-
- **Use Cases**:
|
262
|
-
- Conditional data selection
|
263
|
-
- Data routing
|
264
|
-
|
265
|
-
3. **CombineColumnsBlock**
|
266
|
-
- **Purpose**: Merge multiple columns
|
267
|
-
- **Use Cases**:
|
268
|
-
- Text concatenation
|
269
|
-
- Feature combination
|
270
|
-
|
271
|
-
4. **FlattenColumnsBlock**
|
272
|
-
- **Purpose**: Convert wide to long format
|
273
|
-
- **Use Cases**:
|
274
|
-
- Data reshaping
|
275
|
-
- Variable-value pairs
|
276
|
-
|
277
|
-
5. **DuplicateColumns**
|
278
|
-
- **Purpose**: Create column copies
|
279
|
-
- **Use Cases**:
|
280
|
-
- Data preservation
|
281
|
-
- Multiple processing paths
|
282
|
-
|
283
|
-
6. **RenameColumns**
|
284
|
-
- **Purpose**: Rename dataset columns
|
285
|
-
- **Use Cases**:
|
286
|
-
- Standardizing column names
|
287
|
-
- Data reorganization
|
288
|
-
|
289
|
-
7. **SetToMajorityValue**
|
290
|
-
- **Purpose**: Replace values with majority
|
291
|
-
- **Use Cases**:
|
292
|
-
- Data normalization
|
293
|
-
- Outlier handling
|
294
|
-
|
295
|
-
---
|
296
|
-
### Dataflow and Storage
|
297
|
-
|
298
|
-
- **Data Representation**: Dataflow between blocks and pipelines is handled using **Hugging Face Datasets**, which are based on Arrow tables. This provides:
|
299
|
-
- Native parallelization capabilities (e.g., maps, filters).
|
300
|
-
- Support for efficient data transformations.
|
301
|
-
|
302
|
-
- **Data Checkpoints**: Intermediate caches of generated data. Checkpoints allow users to:
|
303
|
-
- Resume workflows from the last successful state if interrupted.
|
304
|
-
- Improve reliability for long-running workflows.
|
305
|
-
|
306
|
-
|
307
|
-
## Examples
|
308
|
-
|
309
|
-
For sample use cases and implementation examples, please refer to the [examples](examples) directory. This directory contains various examples demonstrating different workflows and use cases of the SDG Framework.
|