sdg-hub 0.7.2__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. sdg_hub/_version.py +2 -2
  2. sdg_hub/core/__init__.py +13 -1
  3. sdg_hub/core/blocks/__init__.py +11 -2
  4. sdg_hub/core/blocks/agent/__init__.py +6 -0
  5. sdg_hub/core/blocks/agent/agent_block.py +397 -0
  6. sdg_hub/core/blocks/base.py +4 -1
  7. sdg_hub/core/blocks/filtering/column_value_filter.py +2 -0
  8. sdg_hub/core/blocks/llm/__init__.py +3 -2
  9. sdg_hub/core/blocks/llm/llm_chat_block.py +2 -0
  10. sdg_hub/core/blocks/llm/{llm_parser_block.py → llm_response_extractor_block.py} +32 -9
  11. sdg_hub/core/blocks/llm/prompt_builder_block.py +2 -0
  12. sdg_hub/core/blocks/llm/text_parser_block.py +2 -0
  13. sdg_hub/core/blocks/transform/duplicate_columns.py +2 -0
  14. sdg_hub/core/blocks/transform/index_based_mapper.py +2 -0
  15. sdg_hub/core/blocks/transform/json_structure_block.py +2 -0
  16. sdg_hub/core/blocks/transform/melt_columns.py +2 -0
  17. sdg_hub/core/blocks/transform/rename_columns.py +12 -0
  18. sdg_hub/core/blocks/transform/text_concat.py +2 -0
  19. sdg_hub/core/blocks/transform/uniform_col_val_setter.py +2 -0
  20. sdg_hub/core/connectors/__init__.py +46 -0
  21. sdg_hub/core/connectors/agent/__init__.py +10 -0
  22. sdg_hub/core/connectors/agent/base.py +233 -0
  23. sdg_hub/core/connectors/agent/langflow.py +151 -0
  24. sdg_hub/core/connectors/base.py +99 -0
  25. sdg_hub/core/connectors/exceptions.py +41 -0
  26. sdg_hub/core/connectors/http/__init__.py +6 -0
  27. sdg_hub/core/connectors/http/client.py +150 -0
  28. sdg_hub/core/connectors/registry.py +112 -0
  29. sdg_hub/core/flow/base.py +7 -31
  30. sdg_hub/core/utils/flow_metrics.py +3 -3
  31. sdg_hub/flows/evaluation/rag/flow.yaml +6 -6
  32. sdg_hub/flows/qa_generation/document_grounded_qa/enhanced_multi_summary_qa/detailed_summary/flow.yaml +4 -4
  33. sdg_hub/flows/qa_generation/document_grounded_qa/enhanced_multi_summary_qa/doc_direct_qa/flow.yaml +3 -3
  34. sdg_hub/flows/qa_generation/document_grounded_qa/enhanced_multi_summary_qa/extractive_summary/flow.yaml +4 -4
  35. sdg_hub/flows/qa_generation/document_grounded_qa/enhanced_multi_summary_qa/key_facts/flow.yaml +2 -2
  36. sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/instructlab/flow.yaml +7 -7
  37. sdg_hub/flows/qa_generation/document_grounded_qa/multi_summary_qa/multilingual/japanese/flow.yaml +7 -7
  38. sdg_hub/flows/text_analysis/structured_insights/flow.yaml +4 -4
  39. {sdg_hub-0.7.2.dist-info → sdg_hub-0.8.0.dist-info}/METADATA +2 -2
  40. {sdg_hub-0.7.2.dist-info → sdg_hub-0.8.0.dist-info}/RECORD +43 -32
  41. {sdg_hub-0.7.2.dist-info → sdg_hub-0.8.0.dist-info}/WHEEL +1 -1
  42. {sdg_hub-0.7.2.dist-info → sdg_hub-0.8.0.dist-info}/licenses/LICENSE +0 -0
  43. {sdg_hub-0.7.2.dist-info → sdg_hub-0.8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,150 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """HTTP client with tenacity retry."""
3
+
4
+ from typing import Any, Optional
5
+
6
+ from tenacity import (
7
+ retry,
8
+ retry_if_exception_type,
9
+ stop_after_attempt,
10
+ wait_exponential,
11
+ )
12
+ import httpx
13
+
14
+ from ...utils.logger_config import setup_logger
15
+ from ..exceptions import ConnectorError, ConnectorHTTPError
16
+
17
+ logger = setup_logger(__name__)
18
+
19
+
20
+ class HttpClient:
21
+ """HTTP client with tenacity retry.
22
+
23
+ Parameters
24
+ ----------
25
+ timeout : float
26
+ Request timeout in seconds. Default is 120.0.
27
+ max_retries : int
28
+ Maximum number of retry attempts. Default is 3.
29
+
30
+ Example
31
+ -------
32
+ >>> client = HttpClient(timeout=60.0, max_retries=3)
33
+ >>> response = await client.post("https://api.example.com", {"key": "value"}, {})
34
+ """
35
+
36
+ def __init__(self, timeout: float = 120.0, max_retries: int = 3):
37
+ self.timeout = timeout
38
+ self.max_retries = max_retries
39
+
40
+ async def post(
41
+ self,
42
+ url: str,
43
+ payload: dict[str, Any],
44
+ headers: Optional[dict[str, str]] = None,
45
+ ) -> dict[str, Any]:
46
+ """Async POST request with retry logic.
47
+
48
+ Parameters
49
+ ----------
50
+ url : str
51
+ The URL to POST to.
52
+ payload : dict
53
+ The JSON payload to send.
54
+ headers : dict, optional
55
+ HTTP headers to include.
56
+
57
+ Returns
58
+ -------
59
+ dict
60
+ The JSON response.
61
+
62
+ Raises
63
+ ------
64
+ ConnectorError
65
+ If connection or timeout fails after all retries.
66
+ ConnectorHTTPError
67
+ If an HTTP error status is returned.
68
+ """
69
+ headers = headers or {}
70
+
71
+ @retry(
72
+ stop=stop_after_attempt(self.max_retries + 1), # 1 initial + retries
73
+ wait=wait_exponential(multiplier=1, min=1, max=60),
74
+ retry=retry_if_exception_type((httpx.TimeoutException, httpx.ConnectError)),
75
+ reraise=True,
76
+ )
77
+ async def _post_with_retry() -> dict[str, Any]:
78
+ async with httpx.AsyncClient(timeout=self.timeout) as client:
79
+ logger.debug(f"POST request to {url}")
80
+ response = await client.post(url, json=payload, headers=headers)
81
+ response.raise_for_status()
82
+ return response.json()
83
+
84
+ try:
85
+ return await _post_with_retry()
86
+ except httpx.HTTPStatusError as e:
87
+ response_text = e.response.text[:500] if e.response.text else None
88
+ raise ConnectorHTTPError(url, e.response.status_code, response_text) from e
89
+ except httpx.TimeoutException as e:
90
+ raise ConnectorError(
91
+ f"Request to '{url}' timed out after {self.timeout}s"
92
+ ) from e
93
+ except httpx.ConnectError as e:
94
+ raise ConnectorError(f"Failed to connect to '{url}': {e}") from e
95
+
96
+ def post_sync(
97
+ self,
98
+ url: str,
99
+ payload: dict[str, Any],
100
+ headers: Optional[dict[str, str]] = None,
101
+ ) -> dict[str, Any]:
102
+ """Synchronous POST request with retry logic.
103
+
104
+ Parameters
105
+ ----------
106
+ url : str
107
+ The URL to POST to.
108
+ payload : dict
109
+ The JSON payload to send.
110
+ headers : dict, optional
111
+ HTTP headers to include.
112
+
113
+ Returns
114
+ -------
115
+ dict
116
+ The JSON response.
117
+
118
+ Raises
119
+ ------
120
+ ConnectorError
121
+ If connection or timeout fails after all retries.
122
+ ConnectorHTTPError
123
+ If an HTTP error status is returned.
124
+ """
125
+ headers = headers or {}
126
+
127
+ @retry(
128
+ stop=stop_after_attempt(self.max_retries + 1), # 1 initial + retries
129
+ wait=wait_exponential(multiplier=1, min=1, max=60),
130
+ retry=retry_if_exception_type((httpx.TimeoutException, httpx.ConnectError)),
131
+ reraise=True,
132
+ )
133
+ def _post_with_retry() -> dict[str, Any]:
134
+ with httpx.Client(timeout=self.timeout) as client:
135
+ logger.debug(f"POST request to {url}")
136
+ response = client.post(url, json=payload, headers=headers)
137
+ response.raise_for_status()
138
+ return response.json()
139
+
140
+ try:
141
+ return _post_with_retry()
142
+ except httpx.HTTPStatusError as e:
143
+ response_text = e.response.text[:500] if e.response.text else None
144
+ raise ConnectorHTTPError(url, e.response.status_code, response_text) from e
145
+ except httpx.TimeoutException as e:
146
+ raise ConnectorError(
147
+ f"Request to '{url}' timed out after {self.timeout}s"
148
+ ) from e
149
+ except httpx.ConnectError as e:
150
+ raise ConnectorError(f"Failed to connect to '{url}': {e}") from e
@@ -0,0 +1,112 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """Registry for connector classes."""
3
+
4
+ import inspect
5
+
6
+ from ..utils.logger_config import setup_logger
7
+ from .exceptions import ConnectorError
8
+
9
+ logger = setup_logger(__name__)
10
+
11
+
12
+ class ConnectorRegistry:
13
+ """Global registry for connector classes.
14
+
15
+ Simple registry for registering and retrieving connectors by name.
16
+
17
+ Example
18
+ -------
19
+ >>> @ConnectorRegistry.register("my_connector")
20
+ ... class MyConnector(BaseConnector):
21
+ ... pass
22
+ ...
23
+ >>> connector_class = ConnectorRegistry.get("my_connector")
24
+ """
25
+
26
+ _connectors: dict[str, type] = {}
27
+
28
+ @classmethod
29
+ def register(cls, name: str):
30
+ """Register a connector class.
31
+
32
+ Parameters
33
+ ----------
34
+ name : str
35
+ Name under which to register the connector.
36
+
37
+ Returns
38
+ -------
39
+ callable
40
+ Decorator function that registers the class.
41
+
42
+ Example
43
+ -------
44
+ >>> @ConnectorRegistry.register("langflow")
45
+ ... class LangflowConnector(BaseAgentConnector):
46
+ ... pass
47
+ """
48
+
49
+ def decorator(connector_class: type) -> type:
50
+ # Validate the class
51
+ if not inspect.isclass(connector_class):
52
+ raise ConnectorError(f"Expected a class, got {type(connector_class)}")
53
+
54
+ # Check for BaseConnector inheritance
55
+ from .base import BaseConnector
56
+
57
+ if not issubclass(connector_class, BaseConnector):
58
+ raise ConnectorError(
59
+ f"Connector class '{connector_class.__name__}' "
60
+ "must inherit from BaseConnector"
61
+ )
62
+
63
+ cls._connectors[name] = connector_class
64
+ logger.debug(f"Registered connector '{name}' ({connector_class.__name__})")
65
+
66
+ return connector_class
67
+
68
+ return decorator
69
+
70
+ @classmethod
71
+ def get(cls, name: str) -> type:
72
+ """Get a connector class by name.
73
+
74
+ Parameters
75
+ ----------
76
+ name : str
77
+ Name of the connector to retrieve.
78
+
79
+ Returns
80
+ -------
81
+ type
82
+ The connector class.
83
+
84
+ Raises
85
+ ------
86
+ ConnectorError
87
+ If the connector is not found.
88
+ """
89
+ if name not in cls._connectors:
90
+ available = sorted(cls._connectors.keys())
91
+ error_msg = f"Connector '{name}' not found."
92
+ if available:
93
+ error_msg += f" Available: {', '.join(available)}"
94
+ raise ConnectorError(error_msg)
95
+
96
+ return cls._connectors[name]
97
+
98
+ @classmethod
99
+ def list_all(cls) -> list[str]:
100
+ """Get all registered connector names.
101
+
102
+ Returns
103
+ -------
104
+ list[str]
105
+ Sorted list of all connector names.
106
+ """
107
+ return sorted(cls._connectors.keys())
108
+
109
+ @classmethod
110
+ def clear(cls) -> None:
111
+ """Clear all registered connectors. Primarily for testing."""
112
+ cls._connectors.clear()
sdg_hub/core/flow/base.py CHANGED
@@ -679,7 +679,7 @@ class Flow(BaseModel):
679
679
  self._block_metrics.append(
680
680
  {
681
681
  "block_name": block.block_name,
682
- "block_type": block.__class__.__name__,
682
+ "block_class": block.__class__.__name__,
683
683
  "execution_time": execution_time,
684
684
  "input_rows": input_rows,
685
685
  "output_rows": output_rows,
@@ -701,7 +701,7 @@ class Flow(BaseModel):
701
701
  self._block_metrics.append(
702
702
  {
703
703
  "block_name": block.block_name,
704
- "block_type": block.__class__.__name__,
704
+ "block_class": block.__class__.__name__,
705
705
  "execution_time": execution_time,
706
706
  "input_rows": input_rows,
707
707
  "output_rows": 0,
@@ -882,38 +882,14 @@ class Flow(BaseModel):
882
882
  )
883
883
 
884
884
  def _detect_llm_blocks(self) -> list[str]:
885
- """Detect LLM blocks in the flow by checking for model-related attribute existence.
886
-
887
- LLM blocks are identified by having model, api_base, or api_key attributes,
888
- regardless of their values (they may be None until set_model_config() is called).
885
+ """Detect blocks with block_type='llm'.
889
886
 
890
887
  Returns
891
888
  -------
892
889
  List[str]
893
- List of block names that have LLM-related attributes.
890
+ List of block names that are LLM blocks.
894
891
  """
895
- llm_blocks = []
896
-
897
- for block in self.blocks:
898
- block_type = block.__class__.__name__
899
- block_name = block.block_name
900
-
901
- # Check by attribute existence (not value) - LLM blocks have these attributes even if None
902
- has_model_attr = hasattr(block, "model")
903
- has_api_base_attr = hasattr(block, "api_base")
904
- has_api_key_attr = hasattr(block, "api_key")
905
-
906
- # A block is considered an LLM block if it has any LLM-related attributes
907
- is_llm_block = has_model_attr or has_api_base_attr or has_api_key_attr
908
-
909
- if is_llm_block:
910
- llm_blocks.append(block_name)
911
- logger.debug(
912
- f"Detected LLM block '{block_name}' ({block_type}): "
913
- f"has_model_attr={has_model_attr}, has_api_base_attr={has_api_base_attr}, has_api_key_attr={has_api_key_attr}"
914
- )
915
-
916
- return llm_blocks
892
+ return [block.block_name for block in self.blocks if block.block_type == "llm"]
917
893
 
918
894
  def is_model_config_required(self) -> bool:
919
895
  """Check if model configuration is required for this flow.
@@ -1152,7 +1128,7 @@ class Flow(BaseModel):
1152
1128
  # Record block execution info
1153
1129
  block_info = {
1154
1130
  "block_name": block.block_name,
1155
- "block_type": block.__class__.__name__,
1131
+ "block_class": block.__class__.__name__,
1156
1132
  "execution_time_seconds": block_execution_time,
1157
1133
  "input_rows": input_rows,
1158
1134
  "output_rows": len(current_dataset),
@@ -1341,7 +1317,7 @@ class Flow(BaseModel):
1341
1317
  "metadata": self.metadata.model_dump(),
1342
1318
  "blocks": [
1343
1319
  {
1344
- "block_type": block.__class__.__name__,
1320
+ "block_class": block.__class__.__name__,
1345
1321
  "block_name": block.block_name,
1346
1322
  "input_cols": getattr(block, "input_cols", None),
1347
1323
  "output_cols": getattr(block, "output_cols", None),
@@ -31,12 +31,12 @@ def aggregate_block_metrics(entries: list[dict[str, Any]]) -> list[dict[str, Any
31
31
  """
32
32
  agg: dict[tuple[str, str], dict[str, Any]] = {}
33
33
  for m in entries:
34
- key = (m.get("block_name"), m.get("block_type"))
34
+ key = (m.get("block_name"), m.get("block_class"))
35
35
  a = agg.setdefault(
36
36
  key,
37
37
  {
38
38
  "block_name": key[0],
39
- "block_type": key[1],
39
+ "block_class": key[1],
40
40
  "execution_time": 0.0,
41
41
  "input_rows": 0,
42
42
  "output_rows": 0,
@@ -138,7 +138,7 @@ def display_metrics_summary(
138
138
 
139
139
  table.add_row(
140
140
  metrics["block_name"],
141
- metrics["block_type"],
141
+ metrics["block_class"],
142
142
  duration,
143
143
  row_change,
144
144
  col_change,
@@ -41,7 +41,7 @@ blocks:
41
41
  max_tokens: 2048
42
42
  temperature: 0.7
43
43
 
44
- - block_type: LLMParserBlock
44
+ - block_type: LLMResponseExtractorBlock
45
45
  block_config:
46
46
  block_name: parse_topic
47
47
  input_cols: topic_response
@@ -73,7 +73,7 @@ blocks:
73
73
  max_tokens: 2048
74
74
  temperature: 0.7
75
75
 
76
- - block_type: LLMParserBlock
76
+ - block_type: LLMResponseExtractorBlock
77
77
  block_config:
78
78
  block_name: parse_question
79
79
  input_cols: question_response
@@ -97,7 +97,7 @@ blocks:
97
97
  max_tokens: 4096
98
98
  temperature: 0.7
99
99
 
100
- - block_type: LLMParserBlock
100
+ - block_type: LLMResponseExtractorBlock
101
101
  block_config:
102
102
  block_name: parse_evolved_question
103
103
  input_cols: evolution_response
@@ -123,7 +123,7 @@ blocks:
123
123
  max_tokens: 4096
124
124
  temperature: 0.2
125
125
 
126
- - block_type: LLMParserBlock
126
+ - block_type: LLMResponseExtractorBlock
127
127
  block_config:
128
128
  block_name: parse_answer
129
129
  input_cols: answer_response
@@ -150,7 +150,7 @@ blocks:
150
150
  max_tokens: 512
151
151
  temperature: 0.0
152
152
 
153
- - block_type: LLMParserBlock
153
+ - block_type: LLMResponseExtractorBlock
154
154
  block_config:
155
155
  block_name: parse_critic_score
156
156
  input_cols: critic_response
@@ -185,7 +185,7 @@ blocks:
185
185
  max_tokens: 4096
186
186
  temperature: 0.0
187
187
 
188
- - block_type: LLMParserBlock
188
+ - block_type: LLMResponseExtractorBlock
189
189
  block_config:
190
190
  block_name: parse_extracted_context
191
191
  input_cols: extraction_response
@@ -60,7 +60,7 @@ blocks:
60
60
  temperature: 0.7
61
61
  n: 50
62
62
  async_mode: true
63
- - block_type: LLMParserBlock
63
+ - block_type: LLMResponseExtractorBlock
64
64
  block_config:
65
65
  block_name: extract_detailed_summary
66
66
  input_cols: raw_summary
@@ -108,7 +108,7 @@ blocks:
108
108
  temperature: 0.7
109
109
  n: 1
110
110
  async_mode: true
111
- - block_type: LLMParserBlock
111
+ - block_type: LLMResponseExtractorBlock
112
112
  block_config:
113
113
  block_name: extract_questions
114
114
  input_cols: question_list
@@ -142,7 +142,7 @@ blocks:
142
142
  temperature: 0.7
143
143
  n: 1
144
144
  async_mode: true
145
- - block_type: LLMParserBlock
145
+ - block_type: LLMResponseExtractorBlock
146
146
  block_config:
147
147
  block_name: extract_answers
148
148
  input_cols: response_dict
@@ -174,7 +174,7 @@ blocks:
174
174
  output_cols: eval_faithful_response_dict
175
175
  n: 1
176
176
  async_mode: true
177
- - block_type: LLMParserBlock
177
+ - block_type: LLMResponseExtractorBlock
178
178
  block_config:
179
179
  block_name: extract_eval_faithful
180
180
  input_cols: eval_faithful_response_dict
@@ -64,7 +64,7 @@ blocks:
64
64
  temperature: 1.0
65
65
  n: 1
66
66
  async_mode: true
67
- - block_type: LLMParserBlock
67
+ - block_type: LLMResponseExtractorBlock
68
68
  block_config:
69
69
  block_name: extract_questions
70
70
  input_cols: question_list
@@ -98,7 +98,7 @@ blocks:
98
98
  temperature: 1.0
99
99
  n: 1
100
100
  async_mode: true
101
- - block_type: LLMParserBlock
101
+ - block_type: LLMResponseExtractorBlock
102
102
  block_config:
103
103
  block_name: extract_answer
104
104
  input_cols: response_dict
@@ -130,7 +130,7 @@ blocks:
130
130
  output_cols: eval_faithful_response_dict
131
131
  n: 1
132
132
  async_mode: true
133
- - block_type: LLMParserBlock
133
+ - block_type: LLMResponseExtractorBlock
134
134
  block_config:
135
135
  block_name: extract_eval_faithful
136
136
  input_cols: eval_faithful_response_dict
@@ -62,7 +62,7 @@ blocks:
62
62
  temperature: 0.7
63
63
  n: 50
64
64
  async_mode: true
65
- - block_type: LLMParserBlock
65
+ - block_type: LLMResponseExtractorBlock
66
66
  block_config:
67
67
  block_name: extract_extractive_summary
68
68
  input_cols: raw_summary
@@ -110,7 +110,7 @@ blocks:
110
110
  temperature: 0.7
111
111
  n: 1
112
112
  async_mode: true
113
- - block_type: LLMParserBlock
113
+ - block_type: LLMResponseExtractorBlock
114
114
  block_config:
115
115
  block_name: extract_questions
116
116
  input_cols: question_list
@@ -144,7 +144,7 @@ blocks:
144
144
  temperature: 0.7
145
145
  n: 1
146
146
  async_mode: true
147
- - block_type: LLMParserBlock
147
+ - block_type: LLMResponseExtractorBlock
148
148
  block_config:
149
149
  block_name: extract_answers
150
150
  input_cols: response_dict
@@ -176,7 +176,7 @@ blocks:
176
176
  output_cols: eval_faithful_response_dict
177
177
  n: 1
178
178
  async_mode: true
179
- - block_type: LLMParserBlock
179
+ - block_type: LLMResponseExtractorBlock
180
180
  block_config:
181
181
  block_name: extract_eval_faithful
182
182
  input_cols: eval_faithful_response_dict
@@ -49,7 +49,7 @@ blocks:
49
49
  temperature: 0.7
50
50
  n: 1
51
51
  async_mode: true
52
- - block_type: LLMParserBlock
52
+ - block_type: LLMResponseExtractorBlock
53
53
  block_config:
54
54
  block_name: extract_atomic_facts
55
55
  input_cols: raw_summary
@@ -98,7 +98,7 @@ blocks:
98
98
  temperature: 0.7
99
99
  n: 1
100
100
  async_mode: true
101
- - block_type: LLMParserBlock
101
+ - block_type: LLMResponseExtractorBlock
102
102
  block_config:
103
103
  block_name: extract_key_fact_qa
104
104
  input_cols: raw_key_fact_qa
@@ -55,7 +55,7 @@ blocks:
55
55
  async_mode: true
56
56
  n: 2
57
57
 
58
- - block_type: LLMParserBlock
58
+ - block_type: LLMResponseExtractorBlock
59
59
  block_config:
60
60
  block_name: detailed_summary
61
61
  input_cols: raw_summary_detailed
@@ -85,7 +85,7 @@ blocks:
85
85
  max_tokens: 2048
86
86
  async_mode: true
87
87
 
88
- - block_type: LLMParserBlock
88
+ - block_type: LLMResponseExtractorBlock
89
89
  block_config:
90
90
  block_name: atomic_facts
91
91
  input_cols: raw_atomic_facts
@@ -114,7 +114,7 @@ blocks:
114
114
  max_tokens: 2048
115
115
  async_mode: true
116
116
 
117
- - block_type: LLMParserBlock
117
+ - block_type: LLMResponseExtractorBlock
118
118
  block_config:
119
119
  block_name: extractive_summary
120
120
  input_cols: raw_summary_extractive
@@ -160,7 +160,7 @@ blocks:
160
160
  max_tokens: 2048
161
161
  async_mode: true
162
162
 
163
- - block_type: LLMParserBlock
163
+ - block_type: LLMResponseExtractorBlock
164
164
  block_config:
165
165
  block_name: get_knowledge_generation
166
166
  input_cols: raw_knowledge_generation
@@ -191,7 +191,7 @@ blocks:
191
191
  n: 1
192
192
  async_mode: true
193
193
 
194
- - block_type: LLMParserBlock
194
+ - block_type: LLMResponseExtractorBlock
195
195
  block_config:
196
196
  block_name: extract_eval_faithful
197
197
  input_cols: eval_faithful_response_dict
@@ -236,7 +236,7 @@ blocks:
236
236
  max_tokens: 2048
237
237
  n: 1
238
238
  async_mode: true
239
- - block_type: LLMParserBlock
239
+ - block_type: LLMResponseExtractorBlock
240
240
  block_config:
241
241
  block_name: extract_eval_relevancy
242
242
  input_cols: eval_relevancy_response_dict
@@ -280,7 +280,7 @@ blocks:
280
280
  max_tokens: 2048
281
281
  n: 1
282
282
  async_mode: true
283
- - block_type: LLMParserBlock
283
+ - block_type: LLMResponseExtractorBlock
284
284
  block_config:
285
285
  block_name: extract_verify_question
286
286
  input_cols: verify_question_response_dict
@@ -57,7 +57,7 @@ blocks:
57
57
  async_mode: true
58
58
  # n: 2
59
59
 
60
- - block_type: LLMParserBlock
60
+ - block_type: LLMResponseExtractorBlock
61
61
  block_config:
62
62
  block_name: detailed_summary
63
63
  input_cols: raw_summary_detailed
@@ -87,7 +87,7 @@ blocks:
87
87
  max_tokens: 2048
88
88
  async_mode: true
89
89
 
90
- - block_type: LLMParserBlock
90
+ - block_type: LLMResponseExtractorBlock
91
91
  block_config:
92
92
  block_name: atomic_facts
93
93
  input_cols: raw_atomic_facts
@@ -116,7 +116,7 @@ blocks:
116
116
  max_tokens: 2048
117
117
  async_mode: true
118
118
 
119
- - block_type: LLMParserBlock
119
+ - block_type: LLMResponseExtractorBlock
120
120
  block_config:
121
121
  block_name: extractive_summary
122
122
  input_cols: raw_summary_extractive
@@ -161,7 +161,7 @@ blocks:
161
161
  max_tokens: 2048
162
162
  async_mode: true
163
163
 
164
- - block_type: LLMParserBlock
164
+ - block_type: LLMResponseExtractorBlock
165
165
  block_config:
166
166
  block_name: get_knowledge_generation
167
167
  input_cols: raw_knowledge_generation
@@ -192,7 +192,7 @@ blocks:
192
192
  n: 1
193
193
  async_mode: true
194
194
 
195
- - block_type: LLMParserBlock
195
+ - block_type: LLMResponseExtractorBlock
196
196
  block_config:
197
197
  block_name: extract_eval_faithful
198
198
  input_cols: eval_faithful_response_dict
@@ -237,7 +237,7 @@ blocks:
237
237
  max_tokens: 2048
238
238
  n: 1
239
239
  async_mode: true
240
- - block_type: LLMParserBlock
240
+ - block_type: LLMResponseExtractorBlock
241
241
  block_config:
242
242
  block_name: extract_eval_relevancy
243
243
  input_cols: eval_relevancy_response_dict
@@ -281,7 +281,7 @@ blocks:
281
281
  max_tokens: 2048
282
282
  n: 1
283
283
  async_mode: true
284
- - block_type: LLMParserBlock
284
+ - block_type: LLMResponseExtractorBlock
285
285
  block_config:
286
286
  block_name: extract_verify_question
287
287
  input_cols: verify_question_response_dict
@@ -49,7 +49,7 @@ blocks:
49
49
  max_tokens: 1024
50
50
  temperature: 0.3
51
51
  async_mode: true
52
- - block_type: "LLMParserBlock"
52
+ - block_type: "LLMResponseExtractorBlock"
53
53
  block_config:
54
54
  block_name: "extract_summary"
55
55
  input_cols: "raw_summary"
@@ -81,7 +81,7 @@ blocks:
81
81
  max_tokens: 512
82
82
  temperature: 0.3
83
83
  async_mode: true
84
- - block_type: "LLMParserBlock"
84
+ - block_type: "LLMResponseExtractorBlock"
85
85
  block_config:
86
86
  block_name: "extract_keywords"
87
87
  input_cols: "raw_keywords"
@@ -113,7 +113,7 @@ blocks:
113
113
  max_tokens: 1024
114
114
  temperature: 0.3
115
115
  async_mode: true
116
- - block_type: "LLMParserBlock"
116
+ - block_type: "LLMResponseExtractorBlock"
117
117
  block_config:
118
118
  block_name: "extract_entities"
119
119
  input_cols: "raw_entities"
@@ -145,7 +145,7 @@ blocks:
145
145
  max_tokens: 256
146
146
  temperature: 0.1
147
147
  async_mode: true
148
- - block_type: "LLMParserBlock"
148
+ - block_type: "LLMResponseExtractorBlock"
149
149
  block_config:
150
150
  block_name: "extract_sentiment"
151
151
  input_cols: "raw_sentiment"