labelrag 0.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,18 @@
1
+ .DS_Store
2
+ .venv/
3
+ venv/
4
+ __pycache__/
5
+ .pytest_cache/
6
+ .ruff_cache/
7
+ .pyright/
8
+ .mypy_cache/
9
+ coverage.xml
10
+ .coverage
11
+ dist/
12
+ build/
13
+ *.egg-info/
14
+ site/
15
+ docs/
16
+ !docs/
17
+ docs/*
18
+ !docs/public_api.md
labelrag-0.0.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 huruilizhen
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,212 @@
1
+ Metadata-Version: 2.4
2
+ Name: labelrag
3
+ Version: 0.0.0
4
+ Summary: A label-driven RAG pipeline built on top of paralabelgen.
5
+ Project-URL: Homepage, https://github.com/HuRuilizhen/labelrag
6
+ Project-URL: Repository, https://github.com/HuRuilizhen/labelrag
7
+ Project-URL: Issues, https://github.com/HuRuilizhen/labelrag/issues
8
+ Author-email: huruilizhen <huruilizhen@gmail.com>
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: evaluation,labels,llm,paragraphs,rag,retrieval
12
+ Classifier: Development Status :: 2 - Pre-Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
+ Requires-Python: >=3.11
22
+ Requires-Dist: paralabelgen==0.2.0
23
+ Provides-Extra: dev
24
+ Requires-Dist: build>=1.2.0; extra == 'dev'
25
+ Requires-Dist: pyright>=1.1.390; extra == 'dev'
26
+ Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
27
+ Requires-Dist: pytest>=8.3.0; extra == 'dev'
28
+ Requires-Dist: ruff>=0.11.0; extra == 'dev'
29
+ Requires-Dist: twine>=6.1.0; extra == 'dev'
30
+ Description-Content-Type: text/markdown
31
+
32
+ # labelrag
33
+
34
+ `labelrag` is a Python library for label-driven retrieval-augmented generation
35
+ pipelines built on top of `paralabelgen`.
36
+
37
+ - PyPI distribution: `labelrag`
38
+ - Python import package: `labelrag`
39
+ - Core dependency target: `paralabelgen==0.2.0`
40
+ - Default extraction path: spaCy via `paralabelgen`
41
+
42
+ ## Install
43
+
44
+ ```bash
45
+ pip install labelrag
46
+ ```
47
+
48
+ If you want to use the default spaCy-backed labeling path, install a compatible
49
+ English pipeline such as:
50
+
51
+ ```bash
52
+ python -m spacy download en_core_web_sm
53
+ ```
54
+
55
+ `en_core_web_sm` is the recommended default model, but you can point the
56
+ underlying `LabelGeneratorConfig` at another installed compatible spaCy
57
+ pipeline.
58
+
59
+ ## Quick Start
60
+
61
+ ### Retrieval-only workflow
62
+
63
+ ```python
64
+ from labelrag import RAGPipeline, RAGPipelineConfig
65
+
66
+ paragraphs = [
67
+ "OpenAI builds language models for developers.",
68
+ "Developers use language models in production systems.",
69
+ "Production systems need monitoring and evaluation tooling.",
70
+ ]
71
+
72
+ pipeline = RAGPipeline(RAGPipelineConfig())
73
+ pipeline.fit(paragraphs)
74
+
75
+ retrieval = pipeline.build_context("How do developers use language models?")
76
+ print(retrieval.prompt_context)
77
+ print(retrieval.metadata)
78
+ ```
79
+
80
+ ### Retrieval plus provider-backed answer generation
81
+
82
+ ```python
83
+ from labelrag import (
84
+ OpenAICompatibleAnswerGenerator,
85
+ OpenAICompatibleConfig,
86
+ RAGPipeline,
87
+ RAGPipelineConfig,
88
+ )
89
+
90
+ paragraphs = [
91
+ "OpenAI builds language models for developers.",
92
+ "Developers use language models in production systems.",
93
+ "Production systems need monitoring and evaluation tooling.",
94
+ ]
95
+
96
+ pipeline = RAGPipeline(RAGPipelineConfig())
97
+ pipeline.fit(paragraphs)
98
+
99
+ generator = OpenAICompatibleAnswerGenerator(
100
+ OpenAICompatibleConfig(
101
+ model="mistral-small-latest",
102
+ api_key_env_var="MISTRAL_API_KEY",
103
+ base_url="https://api.mistral.ai/v1",
104
+ )
105
+ )
106
+
107
+ answer = pipeline.answer_with_generator(
108
+ "How do developers use language models?",
109
+ generator,
110
+ )
111
+ print(answer.answer_text)
112
+ print(answer.metadata)
113
+ ```
114
+
115
+ ## Retrieval Model
116
+
117
+ The current retrieval layer is deterministic and label-driven.
118
+
119
+ - `fit(...)` delegates paragraph analysis to `labelgen.LabelGenerator`
120
+ - `build_context(...)` maps the question into the fitted label space
121
+ - retrieval uses greedy coverage over query label IDs
122
+ - label-free queries can fall back to deterministic concept overlap
123
+ - `require_full_label_coverage=True` suppresses partial retrieval results while
124
+ preserving attempted coverage trace in metadata
125
+
126
+ Tie-break order for greedy retrieval is:
127
+
128
+ 1. larger overlap with remaining query labels
129
+ 2. larger overlap on query concept IDs
130
+ 3. larger total paragraph label count
131
+ 4. lexicographically smaller `paragraph_id`
132
+
133
+ ## OpenAI-Compatible Provider Notes
134
+
135
+ The built-in answer-generation adapter targets a minimal OpenAI-compatible
136
+ chat-completions API surface.
137
+
138
+ It supports:
139
+
140
+ - standard base URLs such as `https://api.openai.com/v1`
141
+ - full endpoint URLs such as `https://api.mistral.ai/v1/chat/completions`
142
+ - API key injection through config or environment variables
143
+ - non-streaming text generation for `answer_with_generator(...)`
144
+
145
+ This adapter is intended to cover providers such as OpenAI, Mistral, and Qwen
146
+ when they expose an OpenAI-compatible endpoint shape.
147
+
148
+ ## Public API
149
+
150
+ The main public entrypoints are:
151
+
152
+ - `RAGPipeline`
153
+ - `RAGPipelineConfig`, `RetrievalConfig`, `PromptConfig`
154
+ - `IndexedParagraph`, `QueryAnalysis`, `RetrievedParagraph`
155
+ - `RetrievalResult`, `RAGAnswerResult`
156
+ - `GeneratedAnswer`, `AnswerGenerator`
157
+ - `OpenAICompatibleAnswerGenerator`, `OpenAICompatibleConfig`
158
+ - convenience re-export: `Paragraph`
159
+
160
+ Detailed API notes are available in [`docs/public_api.md`](docs/public_api.md).
161
+
162
+ ## Examples
163
+
164
+ Runnable examples are available in [`examples/`](examples/):
165
+
166
+ - [`examples/basic_usage.py`](examples/basic_usage.py)
167
+ - [`examples/custom_config.py`](examples/custom_config.py)
168
+ - [`examples/save_and_load.py`](examples/save_and_load.py)
169
+ - [`examples/provider_answer.py`](examples/provider_answer.py)
170
+
171
+ ## Persistence Notes
172
+
173
+ `save(path)` produces a human-inspectable directory containing:
174
+
175
+ - `config.json`
176
+ - `label_generator.json`
177
+ - `corpus_index.json`
178
+ - `fit_result.json`
179
+
180
+ Public guarantee:
181
+
182
+ - a saved and reloaded pipeline should preserve retrieval behavior for the same
183
+ fitted state, question, and config
184
+
185
+ ## Configuration Notes
186
+
187
+ - `RetrievalConfig.max_paragraphs` sets the hard retrieval limit
188
+ - `RetrievalConfig.allow_label_free_fallback` enables deterministic concept
189
+ overlap fallback for label-free queries
190
+ - `RetrievalConfig.require_full_label_coverage` suppresses partial retrieval
191
+ output when not all query labels can be covered
192
+ - `PromptConfig.include_paragraph_ids` includes stable paragraph IDs in the
193
+ rendered prompt context
194
+ - `PromptConfig.include_label_annotations` includes paragraph label annotations
195
+ in rendered prompt context
196
+ - `PromptConfig.max_context_characters` applies a hard cap to rendered context
197
+ length
198
+
199
+ ## Development Checks
200
+
201
+ ```bash
202
+ .venv/bin/ruff check . --fix
203
+ .venv/bin/pyright
204
+ .venv/bin/pytest
205
+ ```
206
+
207
+ ## Release Checks
208
+
209
+ ```bash
210
+ .venv/bin/python -m build
211
+ .venv/bin/python -m twine check dist/*
212
+ ```
@@ -0,0 +1,181 @@
1
+ # labelrag
2
+
3
+ `labelrag` is a Python library for label-driven retrieval-augmented generation
4
+ pipelines built on top of `paralabelgen`.
5
+
6
+ - PyPI distribution: `labelrag`
7
+ - Python import package: `labelrag`
8
+ - Core dependency target: `paralabelgen==0.2.0`
9
+ - Default extraction path: spaCy via `paralabelgen`
10
+
11
+ ## Install
12
+
13
+ ```bash
14
+ pip install labelrag
15
+ ```
16
+
17
+ If you want to use the default spaCy-backed labeling path, install a compatible
18
+ English pipeline such as:
19
+
20
+ ```bash
21
+ python -m spacy download en_core_web_sm
22
+ ```
23
+
24
+ `en_core_web_sm` is the recommended default model, but you can point the
25
+ underlying `LabelGeneratorConfig` at another installed compatible spaCy
26
+ pipeline.
27
+
28
+ ## Quick Start
29
+
30
+ ### Retrieval-only workflow
31
+
32
+ ```python
33
+ from labelrag import RAGPipeline, RAGPipelineConfig
34
+
35
+ paragraphs = [
36
+ "OpenAI builds language models for developers.",
37
+ "Developers use language models in production systems.",
38
+ "Production systems need monitoring and evaluation tooling.",
39
+ ]
40
+
41
+ pipeline = RAGPipeline(RAGPipelineConfig())
42
+ pipeline.fit(paragraphs)
43
+
44
+ retrieval = pipeline.build_context("How do developers use language models?")
45
+ print(retrieval.prompt_context)
46
+ print(retrieval.metadata)
47
+ ```
48
+
49
+ ### Retrieval plus provider-backed answer generation
50
+
51
+ ```python
52
+ from labelrag import (
53
+ OpenAICompatibleAnswerGenerator,
54
+ OpenAICompatibleConfig,
55
+ RAGPipeline,
56
+ RAGPipelineConfig,
57
+ )
58
+
59
+ paragraphs = [
60
+ "OpenAI builds language models for developers.",
61
+ "Developers use language models in production systems.",
62
+ "Production systems need monitoring and evaluation tooling.",
63
+ ]
64
+
65
+ pipeline = RAGPipeline(RAGPipelineConfig())
66
+ pipeline.fit(paragraphs)
67
+
68
+ generator = OpenAICompatibleAnswerGenerator(
69
+ OpenAICompatibleConfig(
70
+ model="mistral-small-latest",
71
+ api_key_env_var="MISTRAL_API_KEY",
72
+ base_url="https://api.mistral.ai/v1",
73
+ )
74
+ )
75
+
76
+ answer = pipeline.answer_with_generator(
77
+ "How do developers use language models?",
78
+ generator,
79
+ )
80
+ print(answer.answer_text)
81
+ print(answer.metadata)
82
+ ```
83
+
84
+ ## Retrieval Model
85
+
86
+ The current retrieval layer is deterministic and label-driven.
87
+
88
+ - `fit(...)` delegates paragraph analysis to `labelgen.LabelGenerator`
89
+ - `build_context(...)` maps the question into the fitted label space
90
+ - retrieval uses greedy coverage over query label IDs
91
+ - label-free queries can fall back to deterministic concept overlap
92
+ - `require_full_label_coverage=True` suppresses partial retrieval results while
93
+ preserving attempted coverage trace in metadata
94
+
95
+ Tie-break order for greedy retrieval is:
96
+
97
+ 1. larger overlap with remaining query labels
98
+ 2. larger overlap on query concept IDs
99
+ 3. larger total paragraph label count
100
+ 4. lexicographically smaller `paragraph_id`
101
+
102
+ ## OpenAI-Compatible Provider Notes
103
+
104
+ The built-in answer-generation adapter targets a minimal OpenAI-compatible
105
+ chat-completions API surface.
106
+
107
+ It supports:
108
+
109
+ - standard base URLs such as `https://api.openai.com/v1`
110
+ - full endpoint URLs such as `https://api.mistral.ai/v1/chat/completions`
111
+ - API key injection through config or environment variables
112
+ - non-streaming text generation for `answer_with_generator(...)`
113
+
114
+ This adapter is intended to cover providers such as OpenAI, Mistral, and Qwen
115
+ when they expose an OpenAI-compatible endpoint shape.
116
+
117
+ ## Public API
118
+
119
+ The main public entrypoints are:
120
+
121
+ - `RAGPipeline`
122
+ - `RAGPipelineConfig`, `RetrievalConfig`, `PromptConfig`
123
+ - `IndexedParagraph`, `QueryAnalysis`, `RetrievedParagraph`
124
+ - `RetrievalResult`, `RAGAnswerResult`
125
+ - `GeneratedAnswer`, `AnswerGenerator`
126
+ - `OpenAICompatibleAnswerGenerator`, `OpenAICompatibleConfig`
127
+ - convenience re-export: `Paragraph`
128
+
129
+ Detailed API notes are available in [`docs/public_api.md`](docs/public_api.md).
130
+
131
+ ## Examples
132
+
133
+ Runnable examples are available in [`examples/`](examples/):
134
+
135
+ - [`examples/basic_usage.py`](examples/basic_usage.py)
136
+ - [`examples/custom_config.py`](examples/custom_config.py)
137
+ - [`examples/save_and_load.py`](examples/save_and_load.py)
138
+ - [`examples/provider_answer.py`](examples/provider_answer.py)
139
+
140
+ ## Persistence Notes
141
+
142
+ `save(path)` produces a human-inspectable directory containing:
143
+
144
+ - `config.json`
145
+ - `label_generator.json`
146
+ - `corpus_index.json`
147
+ - `fit_result.json`
148
+
149
+ Public guarantee:
150
+
151
+ - a saved and reloaded pipeline should preserve retrieval behavior for the same
152
+ fitted state, question, and config
153
+
154
+ ## Configuration Notes
155
+
156
+ - `RetrievalConfig.max_paragraphs` sets the hard retrieval limit
157
+ - `RetrievalConfig.allow_label_free_fallback` enables deterministic concept
158
+ overlap fallback for label-free queries
159
+ - `RetrievalConfig.require_full_label_coverage` suppresses partial retrieval
160
+ output when not all query labels can be covered
161
+ - `PromptConfig.include_paragraph_ids` includes stable paragraph IDs in the
162
+ rendered prompt context
163
+ - `PromptConfig.include_label_annotations` includes paragraph label annotations
164
+ in rendered prompt context
165
+ - `PromptConfig.max_context_characters` applies a hard cap to rendered context
166
+ length
167
+
168
+ ## Development Checks
169
+
170
+ ```bash
171
+ .venv/bin/ruff check . --fix
172
+ .venv/bin/pyright
173
+ .venv/bin/pytest
174
+ ```
175
+
176
+ ## Release Checks
177
+
178
+ ```bash
179
+ .venv/bin/python -m build
180
+ .venv/bin/python -m twine check dist/*
181
+ ```
@@ -0,0 +1,85 @@
1
+ [build-system]
2
+ requires = ["hatchling>=1.27.0"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "labelrag"
7
+ version = "0.0.0"
8
+ description = "A label-driven RAG pipeline built on top of paralabelgen."
9
+ readme = "README.md"
10
+ license = { text = "MIT" }
11
+ authors = [
12
+ { name = "huruilizhen", email = "huruilizhen@gmail.com" },
13
+ ]
14
+ requires-python = ">=3.11"
15
+ dependencies = [
16
+ "paralabelgen==0.2.0",
17
+ ]
18
+ keywords = ["rag", "retrieval", "labels", "evaluation", "paragraphs", "llm"]
19
+ classifiers = [
20
+ "Development Status :: 2 - Pre-Alpha",
21
+ "Intended Audience :: Developers",
22
+ "License :: OSI Approved :: MIT License",
23
+ "Programming Language :: Python :: 3",
24
+ "Programming Language :: Python :: 3.11",
25
+ "Programming Language :: Python :: 3.12",
26
+ "Programming Language :: Python :: 3.13",
27
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
28
+ "Topic :: Software Development :: Libraries :: Python Modules",
29
+ ]
30
+
31
+ [project.urls]
32
+ Homepage = "https://github.com/HuRuilizhen/labelrag"
33
+ Repository = "https://github.com/HuRuilizhen/labelrag"
34
+ Issues = "https://github.com/HuRuilizhen/labelrag/issues"
35
+
36
+ [project.optional-dependencies]
37
+ dev = [
38
+ "build>=1.2.0",
39
+ "pyright>=1.1.390",
40
+ "pytest>=8.3.0",
41
+ "pytest-cov>=6.0.0",
42
+ "ruff>=0.11.0",
43
+ "twine>=6.1.0",
44
+ ]
45
+
46
+ [tool.hatch.build.targets.wheel]
47
+ packages = ["src/labelrag"]
48
+
49
+ [tool.hatch.build.targets.sdist]
50
+ only-include = [
51
+ "/src/labelrag",
52
+ "/README.md",
53
+ "/LICENSE",
54
+ "/pyproject.toml",
55
+ ]
56
+
57
+ [tool.pytest.ini_options]
58
+ minversion = "8.0"
59
+ testpaths = ["tests"]
60
+ addopts = [
61
+ "--strict-config",
62
+ "--strict-markers",
63
+ ]
64
+
65
+ [tool.ruff]
66
+ target-version = "py311"
67
+ line-length = 100
68
+ src = ["src", "tests"]
69
+
70
+ [tool.ruff.lint]
71
+ select = ["E", "F", "I", "B", "UP"]
72
+
73
+ [tool.ruff.format]
74
+ quote-style = "double"
75
+ indent-style = "space"
76
+
77
+ [tool.pyright]
78
+ include = ["src", "tests", "examples"]
79
+ pythonVersion = "3.11"
80
+ typeCheckingMode = "strict"
81
+ # `paralabelgen` does not currently ship type stubs, so this remains an
82
+ # intentional deviation from the sibling `labelgen` repository.
83
+ reportMissingTypeStubs = false
84
+ venvPath = "."
85
+ venv = ".venv"
@@ -0,0 +1,35 @@
1
+ """Public package exports for the `labelrag` package."""
2
+
3
+ from labelgen import Paragraph
4
+
5
+ from labelrag.config import PromptConfig, RAGPipelineConfig, RetrievalConfig
6
+ from labelrag.generation.generator import AnswerGenerator, GeneratedAnswer
7
+ from labelrag.generation.openai_compatible import (
8
+ OpenAICompatibleAnswerGenerator,
9
+ OpenAICompatibleConfig,
10
+ )
11
+ from labelrag.pipeline.rag_pipeline import RAGPipeline
12
+ from labelrag.types import (
13
+ IndexedParagraph,
14
+ QueryAnalysis,
15
+ RAGAnswerResult,
16
+ RetrievalResult,
17
+ RetrievedParagraph,
18
+ )
19
+
20
+ __all__ = [
21
+ "AnswerGenerator",
22
+ "GeneratedAnswer",
23
+ "IndexedParagraph",
24
+ "OpenAICompatibleAnswerGenerator",
25
+ "OpenAICompatibleConfig",
26
+ "Paragraph",
27
+ "PromptConfig",
28
+ "QueryAnalysis",
29
+ "RAGAnswerResult",
30
+ "RAGPipeline",
31
+ "RAGPipelineConfig",
32
+ "RetrievalConfig",
33
+ "RetrievalResult",
34
+ "RetrievedParagraph",
35
+ ]
@@ -0,0 +1,33 @@
1
+ """Public configuration models for `labelrag`."""
2
+
3
+ from dataclasses import dataclass, field
4
+
5
+ from labelgen import LabelGeneratorConfig
6
+
7
+
8
+ @dataclass(slots=True)
9
+ class RetrievalConfig:
10
+ """Configuration for paragraph retrieval behavior."""
11
+
12
+ max_paragraphs: int = 8
13
+ require_full_label_coverage: bool = False
14
+ allow_label_free_fallback: bool = True
15
+
16
+
17
+ @dataclass(slots=True)
18
+ class PromptConfig:
19
+ """Configuration for prompt context rendering."""
20
+
21
+ include_paragraph_ids: bool = True
22
+ include_label_annotations: bool = False
23
+ max_context_characters: int | None = None
24
+
25
+
26
+ @dataclass(slots=True)
27
+ class RAGPipelineConfig:
28
+ """Top-level public configuration for `RAGPipeline`."""
29
+
30
+ labelgen: LabelGeneratorConfig = field(default_factory=LabelGeneratorConfig)
31
+ retrieval: RetrievalConfig = field(default_factory=RetrievalConfig)
32
+ prompt: PromptConfig = field(default_factory=PromptConfig)
33
+
@@ -0,0 +1,14 @@
1
+ """Generation boundaries for `labelrag`."""
2
+
3
+ from labelrag.generation.generator import AnswerGenerator, GeneratedAnswer
4
+ from labelrag.generation.openai_compatible import (
5
+ OpenAICompatibleAnswerGenerator,
6
+ OpenAICompatibleConfig,
7
+ )
8
+
9
+ __all__ = [
10
+ "AnswerGenerator",
11
+ "GeneratedAnswer",
12
+ "OpenAICompatibleAnswerGenerator",
13
+ "OpenAICompatibleConfig",
14
+ ]
@@ -0,0 +1,21 @@
1
+ """Generator protocol definitions."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Any, Protocol
5
+
6
+
7
+ @dataclass(slots=True)
8
+ class GeneratedAnswer:
9
+ """Model-agnostic generated answer payload."""
10
+
11
+ text: str
12
+ metadata: dict[str, Any] = field(default_factory=lambda: {})
13
+
14
+
15
+ class AnswerGenerator(Protocol):
16
+ """Protocol for injected synchronous answer generation."""
17
+
18
+ def generate(self, question: str, context: str) -> GeneratedAnswer:
19
+ """Generate an answer from the provided question and context."""
20
+
21
+ ...