sdg-hub 0.1.4__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sdg_hub/__init__.py +28 -1
- sdg_hub/_version.py +2 -2
- sdg_hub/core/__init__.py +22 -0
- sdg_hub/core/blocks/__init__.py +58 -0
- sdg_hub/core/blocks/base.py +313 -0
- sdg_hub/core/blocks/deprecated_blocks/__init__.py +29 -0
- sdg_hub/core/blocks/deprecated_blocks/combine_columns.py +93 -0
- sdg_hub/core/blocks/deprecated_blocks/duplicate_columns.py +88 -0
- sdg_hub/core/blocks/deprecated_blocks/filter_by_value.py +103 -0
- sdg_hub/core/blocks/deprecated_blocks/flatten_columns.py +94 -0
- sdg_hub/core/blocks/deprecated_blocks/llmblock.py +479 -0
- sdg_hub/core/blocks/deprecated_blocks/rename_columns.py +88 -0
- sdg_hub/core/blocks/deprecated_blocks/sample_populator.py +58 -0
- sdg_hub/core/blocks/deprecated_blocks/selector.py +97 -0
- sdg_hub/core/blocks/deprecated_blocks/set_to_majority_value.py +88 -0
- sdg_hub/core/blocks/evaluation/__init__.py +9 -0
- sdg_hub/core/blocks/evaluation/evaluate_faithfulness_block.py +564 -0
- sdg_hub/core/blocks/evaluation/evaluate_relevancy_block.py +564 -0
- sdg_hub/core/blocks/evaluation/verify_question_block.py +564 -0
- sdg_hub/core/blocks/filtering/__init__.py +12 -0
- sdg_hub/core/blocks/filtering/column_value_filter.py +188 -0
- sdg_hub/core/blocks/llm/__init__.py +27 -0
- sdg_hub/core/blocks/llm/client_manager.py +398 -0
- sdg_hub/core/blocks/llm/config.py +336 -0
- sdg_hub/core/blocks/llm/error_handler.py +368 -0
- sdg_hub/core/blocks/llm/llm_chat_block.py +542 -0
- sdg_hub/core/blocks/llm/llm_chat_with_parsing_retry_block.py +491 -0
- sdg_hub/core/blocks/llm/prompt_builder_block.py +368 -0
- sdg_hub/core/blocks/llm/text_parser_block.py +357 -0
- sdg_hub/core/blocks/registry.py +331 -0
- sdg_hub/core/blocks/transform/__init__.py +23 -0
- sdg_hub/core/blocks/transform/duplicate_columns.py +88 -0
- sdg_hub/core/blocks/transform/index_based_mapper.py +225 -0
- sdg_hub/core/blocks/transform/melt_columns.py +126 -0
- sdg_hub/core/blocks/transform/rename_columns.py +69 -0
- sdg_hub/core/blocks/transform/text_concat.py +102 -0
- sdg_hub/core/blocks/transform/uniform_col_val_setter.py +101 -0
- sdg_hub/core/flow/__init__.py +20 -0
- sdg_hub/core/flow/base.py +1209 -0
- sdg_hub/core/flow/checkpointer.py +333 -0
- sdg_hub/core/flow/metadata.py +389 -0
- sdg_hub/core/flow/migration.py +198 -0
- sdg_hub/core/flow/registry.py +393 -0
- sdg_hub/core/flow/validation.py +277 -0
- sdg_hub/{utils → core/utils}/__init__.py +7 -4
- sdg_hub/core/utils/datautils.py +63 -0
- sdg_hub/core/utils/error_handling.py +208 -0
- sdg_hub/core/utils/flow_id_words.yaml +231 -0
- sdg_hub/core/utils/flow_identifier.py +94 -0
- sdg_hub/{utils → core/utils}/path_resolution.py +2 -2
- sdg_hub/core/utils/yaml_utils.py +59 -0
- sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/atomic_facts.yaml +40 -0
- sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/detailed_summary.yaml +13 -0
- sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/evaluate_faithfulness.yaml +64 -0
- sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/evaluate_question.yaml +29 -0
- sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/evaluate_relevancy.yaml +81 -0
- sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/extractive_summary.yaml +13 -0
- sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/flow.yaml +192 -0
- sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/generate_questions_responses.yaml +54 -0
- sdg_hub-0.2.1.dist-info/METADATA +221 -0
- sdg_hub-0.2.1.dist-info/RECORD +68 -0
- sdg_hub/blocks/__init__.py +0 -42
- sdg_hub/blocks/block.py +0 -96
- sdg_hub/blocks/llmblock.py +0 -375
- sdg_hub/blocks/openaichatblock.py +0 -556
- sdg_hub/blocks/utilblocks.py +0 -597
- sdg_hub/checkpointer.py +0 -139
- sdg_hub/configs/annotations/cot_reflection.yaml +0 -34
- sdg_hub/configs/annotations/detailed_annotations.yaml +0 -28
- sdg_hub/configs/annotations/detailed_description.yaml +0 -10
- sdg_hub/configs/annotations/detailed_description_icl.yaml +0 -32
- sdg_hub/configs/annotations/simple_annotations.yaml +0 -9
- sdg_hub/configs/knowledge/__init__.py +0 -0
- sdg_hub/configs/knowledge/atomic_facts.yaml +0 -46
- sdg_hub/configs/knowledge/auxilary_instructions.yaml +0 -35
- sdg_hub/configs/knowledge/detailed_summary.yaml +0 -18
- sdg_hub/configs/knowledge/evaluate_faithfulness.yaml +0 -68
- sdg_hub/configs/knowledge/evaluate_question.yaml +0 -38
- sdg_hub/configs/knowledge/evaluate_relevancy.yaml +0 -84
- sdg_hub/configs/knowledge/extractive_summary.yaml +0 -18
- sdg_hub/configs/knowledge/generate_code_questions_responses.yaml +0 -39
- sdg_hub/configs/knowledge/generate_questions.yaml +0 -82
- sdg_hub/configs/knowledge/generate_questions_responses.yaml +0 -56
- sdg_hub/configs/knowledge/generate_responses.yaml +0 -86
- sdg_hub/configs/knowledge/mcq_generation.yaml +0 -83
- sdg_hub/configs/knowledge/router.yaml +0 -12
- sdg_hub/configs/knowledge/simple_generate_qa.yaml +0 -34
- sdg_hub/configs/reasoning/__init__.py +0 -0
- sdg_hub/configs/reasoning/dynamic_cot.yaml +0 -40
- sdg_hub/configs/skills/__init__.py +0 -0
- sdg_hub/configs/skills/analyzer.yaml +0 -48
- sdg_hub/configs/skills/annotation.yaml +0 -36
- sdg_hub/configs/skills/contexts.yaml +0 -28
- sdg_hub/configs/skills/critic.yaml +0 -60
- sdg_hub/configs/skills/evaluate_freeform_pair.yaml +0 -111
- sdg_hub/configs/skills/evaluate_freeform_questions.yaml +0 -78
- sdg_hub/configs/skills/evaluate_grounded_pair.yaml +0 -119
- sdg_hub/configs/skills/evaluate_grounded_questions.yaml +0 -51
- sdg_hub/configs/skills/freeform_questions.yaml +0 -34
- sdg_hub/configs/skills/freeform_responses.yaml +0 -39
- sdg_hub/configs/skills/grounded_questions.yaml +0 -38
- sdg_hub/configs/skills/grounded_responses.yaml +0 -59
- sdg_hub/configs/skills/icl_examples/STEM.yaml +0 -56
- sdg_hub/configs/skills/icl_examples/__init__.py +0 -0
- sdg_hub/configs/skills/icl_examples/coding.yaml +0 -97
- sdg_hub/configs/skills/icl_examples/extraction.yaml +0 -36
- sdg_hub/configs/skills/icl_examples/humanities.yaml +0 -71
- sdg_hub/configs/skills/icl_examples/math.yaml +0 -85
- sdg_hub/configs/skills/icl_examples/reasoning.yaml +0 -30
- sdg_hub/configs/skills/icl_examples/roleplay.yaml +0 -45
- sdg_hub/configs/skills/icl_examples/writing.yaml +0 -80
- sdg_hub/configs/skills/judge.yaml +0 -53
- sdg_hub/configs/skills/planner.yaml +0 -67
- sdg_hub/configs/skills/respond.yaml +0 -8
- sdg_hub/configs/skills/revised_responder.yaml +0 -78
- sdg_hub/configs/skills/router.yaml +0 -59
- sdg_hub/configs/skills/simple_generate_qa_freeform.yaml +0 -27
- sdg_hub/configs/skills/simple_generate_qa_grounded.yaml +0 -31
- sdg_hub/flow.py +0 -477
- sdg_hub/flow_runner.py +0 -450
- sdg_hub/flows/generation/knowledge/mmlu_bench.yaml +0 -13
- sdg_hub/flows/generation/knowledge/simple_knowledge.yaml +0 -12
- sdg_hub/flows/generation/knowledge/synth_knowledge.yaml +0 -89
- sdg_hub/flows/generation/knowledge/synth_knowledge1.5.yaml +0 -136
- sdg_hub/flows/generation/skills/improve_responses.yaml +0 -103
- sdg_hub/flows/generation/skills/simple_freeform_skill.yaml +0 -12
- sdg_hub/flows/generation/skills/simple_grounded_skill.yaml +0 -12
- sdg_hub/flows/generation/skills/synth_grounded_skills.yaml +0 -80
- sdg_hub/flows/generation/skills/synth_skills.yaml +0 -59
- sdg_hub/pipeline.py +0 -121
- sdg_hub/prompts.py +0 -80
- sdg_hub/registry.py +0 -122
- sdg_hub/sdg.py +0 -206
- sdg_hub/utils/config_validation.py +0 -91
- sdg_hub/utils/datautils.py +0 -14
- sdg_hub/utils/error_handling.py +0 -94
- sdg_hub/utils/validation_result.py +0 -10
- sdg_hub-0.1.4.dist-info/METADATA +0 -190
- sdg_hub-0.1.4.dist-info/RECORD +0 -89
- sdg_hub/{logger_config.py → core/utils/logger_config.py} +1 -1
- /sdg_hub/{configs/__init__.py → flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/README.md} +0 -0
- /sdg_hub/{configs/annotations → flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab}/__init__.py +0 -0
- {sdg_hub-0.1.4.dist-info → sdg_hub-0.2.1.dist-info}/WHEEL +0 -0
- {sdg_hub-0.1.4.dist-info → sdg_hub-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {sdg_hub-0.1.4.dist-info → sdg_hub-0.2.1.dist-info}/top_level.txt +0 -0
@@ -1,82 +0,0 @@
|
|
1
|
-
# This YAML file defines a prompt template for generating educational Q&A pairs from textbook content.
|
2
|
-
# The prompt is designed to create comprehensive, domain-specific questions and answers that
|
3
|
-
# effectively teach and reinforce key concepts from educational materials.
|
4
|
-
#
|
5
|
-
# Structure:
|
6
|
-
# - system: Sets the AI's role as a knowledgeable assistant
|
7
|
-
# - introduction: Main instruction for creating Q&A pairs from textbook chapters
|
8
|
-
# - principles: Detailed guidelines for question formulation and educational value
|
9
|
-
# - examples: Example Q&A pairs showing expected format and style
|
10
|
-
# - generation: Template for the document to be used for Q&A generation
|
11
|
-
#
|
12
|
-
# Key Features:
|
13
|
-
# - Domain-specific question generation (science, legal, etc.)
|
14
|
-
# - Multiple difficulty levels and question types
|
15
|
-
# - Self-contained questions without external references
|
16
|
-
# - Focus on key concepts and learning objectives
|
17
|
-
# - Educational value and teaching effectiveness
|
18
|
-
#
|
19
|
-
# Question Guidelines:
|
20
|
-
# - Must be self-contained and independently answerable
|
21
|
-
# - Should cover basic recall to advanced comprehension
|
22
|
-
# - Include multiple-choice, short answer, and essay types
|
23
|
-
# - Align with chapter learning objectives
|
24
|
-
# - Avoid references to specific sections or figures
|
25
|
-
#
|
26
|
-
# Response Format:
|
27
|
-
# - Questions and answers are clearly separated
|
28
|
-
# - Each response ends with [End] tag
|
29
|
-
# - [UNANSWERABLE] for unsuitable content
|
30
|
-
#
|
31
|
-
# Usage:
|
32
|
-
# This prompt is used to generate educational Q&A pairs that effectively teach
|
33
|
-
# and reinforce concepts from textbook chapters while maintaining educational
|
34
|
-
# value and accessibility.
|
35
|
-
|
36
|
-
system: You are a very knowledgeable AI Assistant that will faithfully assist the user with their task.
|
37
|
-
|
38
|
-
introduction: Develop a series of educational questions from a chapter in a {{domain}} textbook.
|
39
|
-
|
40
|
-
principles: |
|
41
|
-
The questions should:
|
42
|
-
* Self-contained – understandable without needing to reference tables, figures, or specific text sections.
|
43
|
-
* Focus on the provided example and follow the format and style of the provided examples.
|
44
|
-
* Relevant to the subject – based on the textbook’s domain (e.g., legal, scientific, etc.).
|
45
|
-
* Independently answerable – avoid direct references to theorems, figures, or text numbers.
|
46
|
-
* Varied in difficulty - Make difficult same as the provided examples.
|
47
|
-
* Use same format as the provided examples.
|
48
|
-
|
49
|
-
Strictly follow this format for each question your generate while responding
|
50
|
-
|
51
|
-
[QUESTION]
|
52
|
-
<Insert question here>
|
53
|
-
[END]
|
54
|
-
|
55
|
-
|
56
|
-
examples: |
|
57
|
-
Here are some examples of questions:
|
58
|
-
|
59
|
-
[Document]
|
60
|
-
{{icl_document}}
|
61
|
-
|
62
|
-
[QUESTION]
|
63
|
-
{{icl_query_1}}
|
64
|
-
[END]
|
65
|
-
|
66
|
-
[QUESTION]
|
67
|
-
{{icl_query_2}}
|
68
|
-
[END]
|
69
|
-
|
70
|
-
[QUESTION]
|
71
|
-
{{icl_query_3}}
|
72
|
-
[END]
|
73
|
-
|
74
|
-
generation: |
|
75
|
-
Here is the document:
|
76
|
-
|
77
|
-
[DOCUMENT]
|
78
|
-
{{document_outline}}
|
79
|
-
{{document}}
|
80
|
-
|
81
|
-
start_tags: [""]
|
82
|
-
end_tags: [""]
|
@@ -1,56 +0,0 @@
|
|
1
|
-
system: You are a very knowledgeable AI Assistant that will faithfully assist the user with their task.
|
2
|
-
|
3
|
-
introduction: Develop a series of educational question and answer pairs from a chapter in a {{domain}} textbook.
|
4
|
-
|
5
|
-
principles: |
|
6
|
-
The questions should:
|
7
|
-
* Be self-contained, not requiring references to tables, figures, or specific sections in the text for understanding.
|
8
|
-
* Focus on teaching and reinforcing the key knowledge and concepts presented in the chapter.
|
9
|
-
* Avoid sections with minimal educational content like index pages or prefaces. In such cases, respond with [UNANSWERABLE].
|
10
|
-
* Be directly relevant to the textbook's domain. For instance, in a science textbook, questions should revolve around scientific terms, definitions, and practical applications, while in a legal textbook, they should cover legal principles, case law, and precedents.
|
11
|
-
* Be formulated to allow for independent answers, avoiding direct references to specific theorems or text sections. For example, rather than asking 'Under what conditions is the fixed point of a function unique according to Theorem 3.1.5?', ask 'How does the Fixed Point Iteration method contribute to understanding function uniqueness?'
|
12
|
-
* Span a range of difficulty levels to accommodate a diverse student audience, from basic understanding to advanced comprehension.
|
13
|
-
* Include a variety of question types such as multiple-choice for basic recall, short answer for deeper understanding, and essay or problem-solving questions to test application and analysis skills.
|
14
|
-
* Align closely with the learning objectives of the textbook or the specific chapter, ensuring that the questions test the fundamental concepts and skills that the chapter aims to impart.
|
15
|
-
|
16
|
-
Strictly follow this format for each question answer pair your generate while responding
|
17
|
-
|
18
|
-
[QUESTION]
|
19
|
-
<Insert question here>
|
20
|
-
[ANSWER]
|
21
|
-
<Insert answer here>
|
22
|
-
[END]
|
23
|
-
|
24
|
-
|
25
|
-
Each question and answer pair should stand alone as a mini-lesson, encapsulating a key concept or idea from the chapter in a way that is accessible and informative without requiring the reader to refer back to the textbook.
|
26
|
-
|
27
|
-
examples: |
|
28
|
-
Here are some examples of questions:
|
29
|
-
|
30
|
-
[Document]
|
31
|
-
{{icl_document}}
|
32
|
-
|
33
|
-
[QUESTION]
|
34
|
-
{{icl_query_1}}
|
35
|
-
[ANSWER]
|
36
|
-
{{icl_response_1}}
|
37
|
-
[END]
|
38
|
-
|
39
|
-
[QUESTION]
|
40
|
-
{{icl_query_2}}
|
41
|
-
[ANSWER]
|
42
|
-
{{icl_response_2}}
|
43
|
-
[END]
|
44
|
-
|
45
|
-
[QUESTION]
|
46
|
-
{{icl_query_3}}
|
47
|
-
[ANSWER]
|
48
|
-
{{icl_response_3}}
|
49
|
-
[END]
|
50
|
-
|
51
|
-
generation: |
|
52
|
-
Here is the document:
|
53
|
-
|
54
|
-
[DOCUMENT]
|
55
|
-
{{document_outline}}
|
56
|
-
{{document}}
|
@@ -1,86 +0,0 @@
|
|
1
|
-
# This YAML file defines a prompt template for generating educational Q&A pairs from textbook content.
|
2
|
-
# The prompt is designed to create comprehensive, domain-specific questions and answers that
|
3
|
-
# effectively teach and reinforce key concepts from educational materials.
|
4
|
-
#
|
5
|
-
# Structure:
|
6
|
-
# - system: Sets the AI's role as a knowledgeable assistant
|
7
|
-
# - introduction: Main instruction for creating Q&A pairs from textbook chapters
|
8
|
-
# - principles: Detailed guidelines for question formulation and educational value
|
9
|
-
# - examples: Example Q&A pairs showing expected format and style
|
10
|
-
# - generation: Template for the document to be used for Q&A generation
|
11
|
-
#
|
12
|
-
# Key Features:
|
13
|
-
# - Domain-specific question generation (science, legal, etc.)
|
14
|
-
# - Multiple difficulty levels and question types
|
15
|
-
# - Self-contained questions without external references
|
16
|
-
# - Focus on key concepts and learning objectives
|
17
|
-
# - Educational value and teaching effectiveness
|
18
|
-
#
|
19
|
-
# Question Guidelines:
|
20
|
-
# - Must be self-contained and independently answerable
|
21
|
-
# - Should cover basic recall to advanced comprehension
|
22
|
-
# - Include multiple-choice, short answer, and essay types
|
23
|
-
# - Align with chapter learning objectives
|
24
|
-
# - Avoid references to specific sections or figures
|
25
|
-
#
|
26
|
-
# Response Format:
|
27
|
-
# - Questions and answers are clearly separated
|
28
|
-
# - Each response ends with [End] tag
|
29
|
-
# - [UNANSWERABLE] for unsuitable content
|
30
|
-
#
|
31
|
-
# Usage:
|
32
|
-
# This prompt is used to generate educational Q&A pairs that effectively teach
|
33
|
-
# and reinforce concepts from textbook chapters while maintaining educational
|
34
|
-
# value and accessibility.
|
35
|
-
|
36
|
-
system: You are a very knowledgeable AI Assistant that will faithfully assist the user with their task.
|
37
|
-
|
38
|
-
introduction: Answer the question based on the provided document.
|
39
|
-
|
40
|
-
principles: |
|
41
|
-
The answers should:
|
42
|
-
* The answer is grounded in the provided document.
|
43
|
-
* Follows the format and style of the provided examples.
|
44
|
-
* Directly answers the question.
|
45
|
-
Strictly follow this format for each question your generate while responding
|
46
|
-
|
47
|
-
[ANSWER]
|
48
|
-
<Insert answer here>
|
49
|
-
[END]
|
50
|
-
|
51
|
-
|
52
|
-
examples: |
|
53
|
-
Here are some examples of answers for given questions for a document:
|
54
|
-
|
55
|
-
[Document]
|
56
|
-
{{icl_document}}
|
57
|
-
|
58
|
-
[QUESTION]
|
59
|
-
{{icl_query_1}}
|
60
|
-
|
61
|
-
[ANSWER]
|
62
|
-
{{icl_response_1}}
|
63
|
-
[END]
|
64
|
-
|
65
|
-
[QUESTION]
|
66
|
-
{{icl_query_2}}
|
67
|
-
|
68
|
-
[ANSWER]
|
69
|
-
{{icl_response_2}}
|
70
|
-
[END]
|
71
|
-
|
72
|
-
|
73
|
-
generation: |
|
74
|
-
Here is the document:
|
75
|
-
|
76
|
-
[DOCUMENT]
|
77
|
-
{{document_outline}}
|
78
|
-
{{document}}
|
79
|
-
|
80
|
-
[QUESTION]
|
81
|
-
{{question}}
|
82
|
-
|
83
|
-
[ANSWER]
|
84
|
-
|
85
|
-
start_tags: [""]
|
86
|
-
end_tags: [""]
|
@@ -1,83 +0,0 @@
|
|
1
|
-
system: You are a helpful assistant, that is an expert at generating question and answers based on given guidelines.
|
2
|
-
|
3
|
-
introduction: Create a series of multiple choice questions by following the given guidelines
|
4
|
-
|
5
|
-
principles: |
|
6
|
-
Guidelines for generation:
|
7
|
-
* Create Multiple Choice Questions based on the data presented in the documents provided.
|
8
|
-
* Each question should be accompanied by a correct answer that accurately interprets the data.
|
9
|
-
* Ensure that the question and the answer are grounded in the provided document.
|
10
|
-
* Return the question between the [Start of Question] and [End of Question] tags.
|
11
|
-
* Return the answer within the [Start of Answer] and [End of Answer] tags.
|
12
|
-
|
13
|
-
Follow this structure for each example:
|
14
|
-
|
15
|
-
[Start of Document]
|
16
|
-
The boiling point of water is the temperature at which it changes from liquid to gas. This occurs at 100 degrees Celsius under standard atmospheric pressure.
|
17
|
-
[End of Document]
|
18
|
-
|
19
|
-
[Start of Question]
|
20
|
-
What does the boiling point of water represent?
|
21
|
-
|
22
|
-
A) Solidification
|
23
|
-
B) Evaporation
|
24
|
-
C) Condensation
|
25
|
-
D) Freezing
|
26
|
-
[End of Question]
|
27
|
-
|
28
|
-
[Start of Answer]
|
29
|
-
B) Evaporation
|
30
|
-
[End of Answer]
|
31
|
-
|
32
|
-
examples: |
|
33
|
-
|
34
|
-
Example 1:
|
35
|
-
[Start of Document]
|
36
|
-
Photosynthesis is a process used by plants, algae, and certain bacteria to convert light energy into chemical energy. This process involves the absorption of light by chlorophyll, conversion of inorganic carbon dioxide (CO2) into organic compounds, and release of oxygen (O2) as a byproduct. The general equation for photosynthesis can be represented as
|
37
|
-
6CO2 + 6H2O + light energy → C6H12O6 + 6O2.
|
38
|
-
[Start of Document]
|
39
|
-
|
40
|
-
[Start of Question]
|
41
|
-
What is the primary function of photosynthesis in plants?
|
42
|
-
|
43
|
-
A) To produce carbon dioxide
|
44
|
-
B) To convert light energy into chemical energy
|
45
|
-
C) To absorb oxygen from the atmosphere
|
46
|
-
D) To release carbon dioxide into the environment
|
47
|
-
[End of Question]
|
48
|
-
|
49
|
-
[Start of Answer]
|
50
|
-
B) To convert light energy into chemical energy
|
51
|
-
[End of Answer]
|
52
|
-
|
53
|
-
Example 2:
|
54
|
-
[Start of Document]
|
55
|
-
E-commerce, short for electronic commerce, refers to the buying and selling of goods and services over the Internet. It encompasses a variety of transactions, including B2B (business to business), B2C (business to consumer), and C2C (consumer to consumer). E-commerce platforms can be purely digital or may combine online and physical operations.
|
56
|
-
[End of Document]
|
57
|
-
|
58
|
-
[Start of Question]
|
59
|
-
E-commerce primarily involves what kind of transactions?
|
60
|
-
|
61
|
-
A) Digital
|
62
|
-
B) Local
|
63
|
-
C) Manual
|
64
|
-
D) Verbal
|
65
|
-
[End of Question]
|
66
|
-
|
67
|
-
[Start of Answer]
|
68
|
-
A) Digital
|
69
|
-
[End of Answer]
|
70
|
-
|
71
|
-
generation: |
|
72
|
-
Follow the guidelines and structure given above to create series of Multiple choice question, along with correct answers, based on the provided document.
|
73
|
-
* Return the question between the [Start of Question] and [End of Question] tags.
|
74
|
-
* Return the answer within the [Start of Answer] and [End of Answer] tags.
|
75
|
-
|
76
|
-
Here is the document:
|
77
|
-
[Start of Document]
|
78
|
-
{{document_outline}}
|
79
|
-
{{document}}
|
80
|
-
[End of Document]
|
81
|
-
|
82
|
-
start_tags: ["[Start of Question]", "[Start of Answer]"]
|
83
|
-
end_tags: ["[End of Question]", "[End of Answer]"]
|
@@ -1,34 +0,0 @@
|
|
1
|
-
system: You are a very knowledgeable AI Assistant that will faithfully assist the user with their task.
|
2
|
-
|
3
|
-
introduction: Develop a series of educational question and answer pairs from a chapter in a {{domain}} textbook.
|
4
|
-
|
5
|
-
principles: |
|
6
|
-
Here are the requirements:
|
7
|
-
1. Try not to repeat the verb for each instruction to maximize diversity.
|
8
|
-
2. The language used for the instruction also should be diverse. For example, you should combine questions with imperative instructions.
|
9
|
-
3. The type of instructions should be similar to provided examples. The generated instruction and the output should be grounded in the provided document.
|
10
|
-
4. A GPT language model should be able to complete the instruction. For example, do not ask the assistant to create any visual or audio output. For another example, do not ask the assistant to wake you up at 5pm or set a reminder because it cannot perform any action.
|
11
|
-
5. The instructions should be in English.
|
12
|
-
6. The instructions should be 1 to 2 sentences long. Either an imperative sentence or a question is permitted.
|
13
|
-
7. The output should be an appropriate response to the input and the instruction. Long outputs are preferable.
|
14
|
-
|
15
|
-
examples: |
|
16
|
-
Here are some examples to help you understand the type of questions that are asked for this document:
|
17
|
-
|
18
|
-
{{icl_query_1}}
|
19
|
-
{{icl_response_1}}
|
20
|
-
|
21
|
-
{{icl_query_2}}
|
22
|
-
{{icl_response_2}}
|
23
|
-
|
24
|
-
{{icl_query_3}}
|
25
|
-
{{icl_response_3}}
|
26
|
-
|
27
|
-
Here is the document:
|
28
|
-
{{document}}
|
29
|
-
|
30
|
-
generation: |
|
31
|
-
Provide a single question and answer pair based on the document.
|
32
|
-
|
33
|
-
start_tags: [""]
|
34
|
-
end_tags: [""]
|
File without changes
|
@@ -1,40 +0,0 @@
|
|
1
|
-
system: You are an AI assistant that uses dynamic Chain of Thought (CoT), reflection, and verbal reinforcement learning for problem-solving. Your responses must adhere to the following instructions
|
2
|
-
|
3
|
-
principles: |
|
4
|
-
1. Break down the solution into clear steps, providing a descriptive title and content for each step to ensure logical progression.
|
5
|
-
2. Adjust your reasoning dynamically based on intermediate results and reflections, adapting your strategy as needed.
|
6
|
-
3. Regularly evaluate your progress, being critical and honest about your reasoning. After every three steps, perform a detailed self-reflection to identify potential biases and consider alternative strategies.
|
7
|
-
4. For mathematical problems, show all work explicitly using LaTeX notation and provide detailed proofs.
|
8
|
-
5. Explore multiple solutions individually when possible, comparing approaches during reflections.
|
9
|
-
6. Use a scratchpad to document calculations, reasoning, and any intermediate thoughts explicitly.
|
10
|
-
7. Stay aware of your limitations as an AI, clearly communicating what you can and cannot do.
|
11
|
-
|
12
|
-
examples: |
|
13
|
-
Respond in JSON format, with each response containing the following keys:
|
14
|
-
- current_action: Indicates the current action being taken, chosen from:
|
15
|
-
* think: Engage in thoughtful planning about how to approach or solve the task, considering potential strategies and identifying crucial elements.
|
16
|
-
* reflect: Pause to evaluate and reconsider your reasoning, assessing potential biases or errors.
|
17
|
-
* backtrack: Revert to a previous step and try a different solution path.
|
18
|
-
* generate: Present the final answer if confident.
|
19
|
-
* terminate: Conclude the process if no further action is needed.
|
20
|
-
- title: Describes the focus of the current step.
|
21
|
-
- content: Provides a detailed explanation of the step.
|
22
|
-
- confidence: A number between 0 and 1 representing your confidence in the content produced for the current action.
|
23
|
-
- next_action: Suggests the next action to be taken, chosen from the same set of actions.
|
24
|
-
|
25
|
-
Example of a valid JSON response:
|
26
|
-
```json
|
27
|
-
{
|
28
|
-
"current_action": "think",
|
29
|
-
"title": "Identifying Key Information",
|
30
|
-
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
|
31
|
-
"confidence": 0.8,
|
32
|
-
"next_action": "reflect"
|
33
|
-
}
|
34
|
-
```
|
35
|
-
|
36
|
-
generation: |
|
37
|
-
Your goal is to demonstrate a thorough, adaptive, and self-reflective problem-solving process, emphasizing dynamic thinking and learning from your reasoning.
|
38
|
-
|
39
|
-
|
40
|
-
|
File without changes
|
@@ -1,48 +0,0 @@
|
|
1
|
-
system: You are a very knowledgeable AI Assistant that will faithfully assist the user with their task.
|
2
|
-
|
3
|
-
introduction: |
|
4
|
-
You will assume the role of an question analyzer. Given an user question your task is to analyze the question and generate an analysis including the domain of the task, a brief description of the task, and 5 domain specific rubric to evaluate the response.
|
5
|
-
|
6
|
-
principles: |
|
7
|
-
1. Analysis:
|
8
|
-
* Briefly describe the task of the question, identify the domain of the task, and provide a clear understanding of the user's request.
|
9
|
-
* Look for any keywords or phrases that indicate the user's specific requirements when it comes to the format or type of response.
|
10
|
-
* The analysis should be clear, concise and unambiguous.
|
11
|
-
* Return the analysis between [Start of Analysis] and [End of Analysis] tags.
|
12
|
-
|
13
|
-
2. Rubric:
|
14
|
-
* Generate 3 to 5 domain specific rubric to evaluate the response.
|
15
|
-
* The generated rubric should be clear, concise and unambiguous.
|
16
|
-
* The rubric should be specific to the domain of the question and should not be generic.
|
17
|
-
* The rubric should be actionable and feasible.
|
18
|
-
* The rubric should satisfy all the criteria provided in the question. For instance, input and output format, type of response, etc.
|
19
|
-
* Return the rubric between [Start of Rubric] and [End of Rubric] tags.
|
20
|
-
|
21
|
-
As a general guideline, generate all the required information without any explanation or reasoning.
|
22
|
-
|
23
|
-
examples: |
|
24
|
-
To help you understand the task, here is an example:
|
25
|
-
|
26
|
-
[Start of Question]
|
27
|
-
{{ icl_query }}
|
28
|
-
[End of Question]
|
29
|
-
|
30
|
-
[Start of Analysis]
|
31
|
-
{{ icl_analysis }}
|
32
|
-
[End of Analysis]
|
33
|
-
|
34
|
-
[Start of Rubric]
|
35
|
-
{{ icl_rubric }}
|
36
|
-
[End of Rubric]
|
37
|
-
|
38
|
-
generation: |
|
39
|
-
Now it's your turn to analyze the following question. Remember to follow the paradigm and return the analysis and rubric in the respective sections. Strictly format the response using the specified tags.
|
40
|
-
* Return the analysis between [Start of Analysis] and [End of Analysis] tags.
|
41
|
-
* Return the rubric between [Start of Rubric] and [End of Rubric] tags.
|
42
|
-
|
43
|
-
[Start of Question]
|
44
|
-
{{ question }}
|
45
|
-
[End of Question]
|
46
|
-
|
47
|
-
start_tags: ["[Start of Analysis]", "[Start of Rubric]"]
|
48
|
-
end_tags: ["[End of Analysis]", "[End of Rubric]"]
|
@@ -1,36 +0,0 @@
|
|
1
|
-
system: |
|
2
|
-
{{ system }}
|
3
|
-
introduction: |
|
4
|
-
{{ task_description }}
|
5
|
-
principles: |
|
6
|
-
{{ principles }}
|
7
|
-
examples: |
|
8
|
-
To better assist you with this task, here are some examples:
|
9
|
-
{% if seed_samples is defined %}
|
10
|
-
{% for sample in seed_samples %}
|
11
|
-
[Start of Question]
|
12
|
-
{{ sample.seed_question }}
|
13
|
-
[End of Question]
|
14
|
-
|
15
|
-
[Start of Response]
|
16
|
-
{{ sample.seed_response }}
|
17
|
-
[End of Response]
|
18
|
-
{% endfor %}
|
19
|
-
{% else %}
|
20
|
-
[Start of Question]
|
21
|
-
{{ seed_question }}
|
22
|
-
[End of Question]
|
23
|
-
|
24
|
-
[Start of Response]
|
25
|
-
{{ seed_response }}
|
26
|
-
[End of Response]
|
27
|
-
{% endif %}
|
28
|
-
generation: |
|
29
|
-
Remember to follow the principles mentioned above and use the same format as the examples.
|
30
|
-
[Start of Question]
|
31
|
-
{{ question }}
|
32
|
-
[End of Question]
|
33
|
-
|
34
|
-
Generate the response to the question above and return it in between the [Start of Response] and [End of Response] tags.
|
35
|
-
start_tags: ["[Start of Response]"]
|
36
|
-
end_tags: ["[End of Response]"]
|
@@ -1,28 +0,0 @@
|
|
1
|
-
system: You are a highly capable AI Assistant that specializes in generating high-quality content tailored to specific tasks.
|
2
|
-
|
3
|
-
introduction: |
|
4
|
-
Your task is to write a rich, relevant, and well-structured **context** for the following task:
|
5
|
-
Task Description: {{task_description}}
|
6
|
-
|
7
|
-
principles: |
|
8
|
-
Please follow these guiding principles when generating the context:
|
9
|
-
* The context should be coherent, informative, and closely aligned with the task description.
|
10
|
-
* Do not include any greetings, explanations, or meta commentary.
|
11
|
-
* Maintain a natural, human-like tone suitable for the domain.
|
12
|
-
* Follow the formatting shown in the example exactly.
|
13
|
-
* Wrap the output between the tags: [Start of Context] and [End of Context].
|
14
|
-
|
15
|
-
examples: |
|
16
|
-
To guide you, here is an example of a well-structured context:
|
17
|
-
|
18
|
-
[Start of Context]
|
19
|
-
{{seed_context}}
|
20
|
-
[End of Context]
|
21
|
-
|
22
|
-
generation: |
|
23
|
-
Now generate a new context following the same structure and principles.
|
24
|
-
Begin your output with [Start of Context] and end with [End of Context].
|
25
|
-
Do not include any additional text outside these tags.
|
26
|
-
|
27
|
-
start_tags: ["[Start of Context]"]
|
28
|
-
end_tags: ["[End of Context]"]
|
@@ -1,60 +0,0 @@
|
|
1
|
-
system: |
|
2
|
-
You are a very knowledgeable AI Assistant that will faithfully assist the user with their task.
|
3
|
-
|
4
|
-
introduction: |
|
5
|
-
You will assume the role of a critic. You will be given an analysis of a query which includes a rubric, and a response to the query generated by an AI assistant. Your task is to evaluate the response based on the rubric provided.
|
6
|
-
|
7
|
-
principles: |
|
8
|
-
Use the following step-by-step process to evaluate the response:
|
9
|
-
* Using the domain as a part of the analysis, assume the role of an expert in that domain.
|
10
|
-
* Understand the task description provided in the analysis.
|
11
|
-
* Using the Rubric provided, evaluate the response generated by the AI assistant.
|
12
|
-
* For each item in the rubric, your evaluation should include how well the response meets the criteria and any feedback for improvement.
|
13
|
-
* Only evaluate the response based on the rubric provided, do not create your own criteria.
|
14
|
-
|
15
|
-
examples: |
|
16
|
-
To help you understand the task, here is an example:
|
17
|
-
|
18
|
-
[Start of Query]
|
19
|
-
{{ icl_query }}
|
20
|
-
[End of Query]
|
21
|
-
|
22
|
-
[Start of Response]
|
23
|
-
{{ icl_response }}
|
24
|
-
[End of Response]
|
25
|
-
|
26
|
-
[Start of Analysis]
|
27
|
-
{{ icl_analysis }}
|
28
|
-
[End of Analysis]
|
29
|
-
|
30
|
-
[Start of Rubric]
|
31
|
-
{{ icl_rubric }}
|
32
|
-
[End of Rubric]
|
33
|
-
|
34
|
-
[Start of Critique]
|
35
|
-
{{ icl_critique }}
|
36
|
-
[End of Critique]
|
37
|
-
|
38
|
-
generation: |
|
39
|
-
Now it's your turn to analyze the following query.
|
40
|
-
|
41
|
-
[Start of Query]
|
42
|
-
{{ question }}
|
43
|
-
[End of Query]
|
44
|
-
|
45
|
-
[Start of Response]
|
46
|
-
{{ response }}
|
47
|
-
[End of Response]
|
48
|
-
|
49
|
-
[Start of Analysis]
|
50
|
-
{{ analysis }}
|
51
|
-
[End of Analysis]
|
52
|
-
|
53
|
-
[Start of Rubric]
|
54
|
-
{{ rubric }}
|
55
|
-
[End of Rubric]
|
56
|
-
|
57
|
-
Remember to follow the paradigm and return the critique based on the rubric provided, between [Start of Critique] and [End of Critique] tags.
|
58
|
-
|
59
|
-
start_tags: ["[Start of Critique]"]
|
60
|
-
end_tags: ["[End of Critique]"]
|