lfx-nightly 0.1.12.dev14__py3-none-any.whl → 0.1.12.dev16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lfx-nightly might be problematic. Click here for more details.

Files changed (130) hide show
  1. lfx/base/agents/events.py +40 -29
  2. lfx/base/constants.py +1 -1
  3. lfx/base/data/docling_utils.py +43 -8
  4. lfx/base/data/utils.py +3 -3
  5. lfx/base/knowledge_bases/__init__.py +3 -0
  6. lfx/base/knowledge_bases/knowledge_base_utils.py +137 -0
  7. lfx/base/models/anthropic_constants.py +3 -1
  8. lfx/base/models/model_input_constants.py +1 -1
  9. lfx/base/vectorstores/vector_store_connection_decorator.py +1 -1
  10. lfx/components/agentql/agentql_api.py +1 -1
  11. lfx/components/agents/agent.py +62 -17
  12. lfx/components/agents/mcp_component.py +11 -1
  13. lfx/components/aiml/aiml.py +4 -1
  14. lfx/components/amazon/amazon_bedrock_converse.py +196 -0
  15. lfx/components/amazon/amazon_bedrock_model.py +5 -1
  16. lfx/components/azure/azure_openai.py +1 -1
  17. lfx/components/azure/azure_openai_embeddings.py +1 -1
  18. lfx/components/chroma/chroma.py +4 -2
  19. lfx/components/clickhouse/clickhouse.py +1 -1
  20. lfx/components/confluence/confluence.py +1 -1
  21. lfx/components/crewai/crewai.py +1 -0
  22. lfx/components/crewai/hierarchical_crew.py +1 -0
  23. lfx/components/crewai/hierarchical_task.py +1 -0
  24. lfx/components/crewai/sequential_crew.py +1 -0
  25. lfx/components/crewai/sequential_task.py +1 -0
  26. lfx/components/crewai/sequential_task_agent.py +1 -0
  27. lfx/components/data/api_request.py +13 -3
  28. lfx/components/data/csv_to_data.py +1 -0
  29. lfx/components/data/file.py +71 -25
  30. lfx/components/data/json_to_data.py +1 -0
  31. lfx/components/datastax/astra_db.py +2 -1
  32. lfx/components/datastax/astra_vectorize.py +3 -5
  33. lfx/components/datastax/astradb_tool.py +5 -1
  34. lfx/components/datastax/astradb_vectorstore.py +8 -1
  35. lfx/components/deactivated/chat_litellm_model.py +1 -1
  36. lfx/components/deactivated/metal.py +1 -1
  37. lfx/components/docling/docling_inline.py +23 -9
  38. lfx/components/elastic/elasticsearch.py +1 -1
  39. lfx/components/elastic/opensearch.py +1 -1
  40. lfx/components/embeddings/similarity.py +1 -0
  41. lfx/components/embeddings/text_embedder.py +1 -0
  42. lfx/components/firecrawl/firecrawl_crawl_api.py +1 -1
  43. lfx/components/firecrawl/firecrawl_extract_api.py +1 -1
  44. lfx/components/firecrawl/firecrawl_map_api.py +1 -1
  45. lfx/components/firecrawl/firecrawl_scrape_api.py +1 -1
  46. lfx/components/google/gmail.py +1 -0
  47. lfx/components/google/google_generative_ai_embeddings.py +1 -1
  48. lfx/components/helpers/memory.py +8 -6
  49. lfx/components/helpers/output_parser.py +1 -0
  50. lfx/components/helpers/store_message.py +1 -0
  51. lfx/components/huggingface/huggingface.py +3 -1
  52. lfx/components/huggingface/huggingface_inference_api.py +1 -1
  53. lfx/components/ibm/watsonx.py +1 -1
  54. lfx/components/ibm/watsonx_embeddings.py +1 -1
  55. lfx/components/icosacomputing/combinatorial_reasoner.py +1 -1
  56. lfx/components/input_output/chat.py +0 -27
  57. lfx/components/input_output/chat_output.py +3 -27
  58. lfx/components/knowledge_bases/__init__.py +34 -0
  59. lfx/components/knowledge_bases/ingestion.py +686 -0
  60. lfx/components/knowledge_bases/retrieval.py +256 -0
  61. lfx/components/langchain_utilities/langchain_hub.py +1 -1
  62. lfx/components/langwatch/langwatch.py +1 -1
  63. lfx/components/logic/conditional_router.py +40 -3
  64. lfx/components/logic/data_conditional_router.py +1 -0
  65. lfx/components/logic/flow_tool.py +2 -1
  66. lfx/components/logic/pass_message.py +1 -0
  67. lfx/components/logic/sub_flow.py +2 -1
  68. lfx/components/milvus/milvus.py +1 -1
  69. lfx/components/olivya/olivya.py +1 -1
  70. lfx/components/processing/alter_metadata.py +1 -0
  71. lfx/components/processing/combine_text.py +1 -0
  72. lfx/components/processing/create_data.py +1 -0
  73. lfx/components/processing/data_to_dataframe.py +1 -0
  74. lfx/components/processing/extract_key.py +1 -0
  75. lfx/components/processing/filter_data.py +1 -0
  76. lfx/components/processing/filter_data_values.py +1 -0
  77. lfx/components/processing/json_cleaner.py +1 -0
  78. lfx/components/processing/merge_data.py +1 -0
  79. lfx/components/processing/message_to_data.py +1 -0
  80. lfx/components/processing/parse_data.py +1 -0
  81. lfx/components/processing/parse_dataframe.py +1 -0
  82. lfx/components/processing/parse_json_data.py +1 -0
  83. lfx/components/processing/python_repl_core.py +2 -2
  84. lfx/components/processing/regex.py +1 -0
  85. lfx/components/processing/select_data.py +1 -0
  86. lfx/components/processing/structured_output.py +7 -3
  87. lfx/components/processing/update_data.py +1 -0
  88. lfx/components/prototypes/__init__.py +8 -7
  89. lfx/components/qdrant/qdrant.py +1 -1
  90. lfx/components/redis/redis_chat.py +1 -1
  91. lfx/components/tools/__init__.py +0 -6
  92. lfx/components/tools/calculator.py +2 -1
  93. lfx/components/tools/python_code_structured_tool.py +1 -0
  94. lfx/components/tools/python_repl.py +2 -1
  95. lfx/components/tools/search_api.py +2 -1
  96. lfx/components/tools/serp_api.py +2 -1
  97. lfx/components/tools/tavily_search_tool.py +1 -0
  98. lfx/components/tools/wikidata_api.py +2 -1
  99. lfx/components/tools/wikipedia_api.py +2 -1
  100. lfx/components/tools/yahoo_finance.py +2 -1
  101. lfx/components/twelvelabs/video_embeddings.py +1 -1
  102. lfx/components/upstash/upstash.py +1 -1
  103. lfx/components/vectorstores/astradb_graph.py +8 -1
  104. lfx/components/vectorstores/local_db.py +1 -0
  105. lfx/components/vectorstores/weaviate.py +1 -1
  106. lfx/components/wolframalpha/wolfram_alpha_api.py +1 -1
  107. lfx/components/zep/zep.py +2 -1
  108. lfx/custom/attributes.py +1 -0
  109. lfx/custom/validate.py +1 -1
  110. lfx/graph/graph/base.py +61 -4
  111. lfx/inputs/inputs.py +1 -0
  112. lfx/log/logger.py +31 -11
  113. lfx/schema/message.py +6 -1
  114. lfx/schema/schema.py +4 -0
  115. lfx/services/__init__.py +3 -0
  116. lfx/services/mcp_composer/__init__.py +6 -0
  117. lfx/services/mcp_composer/factory.py +16 -0
  118. lfx/services/mcp_composer/service.py +599 -0
  119. lfx/services/schema.py +1 -0
  120. lfx/services/settings/auth.py +18 -15
  121. lfx/services/settings/base.py +38 -0
  122. lfx/services/settings/constants.py +4 -1
  123. lfx/services/settings/feature_flags.py +0 -1
  124. lfx/template/frontend_node/base.py +2 -0
  125. lfx/utils/image.py +1 -1
  126. {lfx_nightly-0.1.12.dev14.dist-info → lfx_nightly-0.1.12.dev16.dist-info}/METADATA +1 -1
  127. {lfx_nightly-0.1.12.dev14.dist-info → lfx_nightly-0.1.12.dev16.dist-info}/RECORD +129 -121
  128. lfx/components/datastax/astradb.py +0 -1285
  129. {lfx_nightly-0.1.12.dev14.dist-info → lfx_nightly-0.1.12.dev16.dist-info}/WHEEL +0 -0
  130. {lfx_nightly-0.1.12.dev14.dist-info → lfx_nightly-0.1.12.dev16.dist-info}/entry_points.txt +0 -0
lfx/base/agents/events.py CHANGED
@@ -34,7 +34,7 @@ class InputDict(TypedDict):
34
34
 
35
35
  def _build_agent_input_text_content(agent_input_dict: InputDict) -> str:
36
36
  final_input = agent_input_dict.get("input", "")
37
- return f"**Input**: {final_input}"
37
+ return f"{final_input}"
38
38
 
39
39
 
40
40
  def _calculate_duration(start_time: float) -> int:
@@ -90,34 +90,45 @@ def _extract_output_text(output: str | list) -> str:
90
90
  return output
91
91
  if isinstance(output, list) and len(output) == 0:
92
92
  return ""
93
- if not isinstance(output, list) or len(output) != 1:
94
- msg = f"Output is not a string or list of dictionaries with 'text' key: {output}"
95
- raise TypeError(msg)
96
-
97
- item = output[0]
98
- if isinstance(item, str):
99
- return item
100
- if isinstance(item, dict):
101
- if "text" in item:
102
- return item["text"]
103
- # If the item's type is "tool_use", return an empty string.
104
- # This likely indicates that "tool_use" outputs are not meant to be displayed as text.
105
- if item.get("type") == "tool_use":
106
- return ""
107
- if isinstance(item, dict):
108
- if "text" in item:
109
- return item["text"]
110
- # If the item's type is "tool_use", return an empty string.
111
- # This likely indicates that "tool_use" outputs are not meant to be displayed as text.
112
- if item.get("type") == "tool_use":
113
- return ""
114
- # This is a workaround to deal with function calling by Anthropic
115
- # since the same data comes in the tool_output we don't need to stream it here
116
- # although it would be nice to
117
- if "partial_json" in item:
118
- return ""
119
- msg = f"Output is not a string or list of dictionaries with 'text' key: {output}"
120
- raise TypeError(msg)
93
+
94
+ # Handle lists of various lengths and formats
95
+ if isinstance(output, list):
96
+ # Handle single item lists
97
+ if len(output) == 1:
98
+ item = output[0]
99
+ if isinstance(item, str):
100
+ return item
101
+ if isinstance(item, dict):
102
+ if "text" in item:
103
+ return item["text"] or ""
104
+ # If the item's type is "tool_use", return an empty string.
105
+ if item.get("type") == "tool_use":
106
+ return ""
107
+ # Handle items with only 'index' key (from ChatBedrockConverse)
108
+ if "index" in item and len(item) == 1:
109
+ return ""
110
+ # This is a workaround to deal with function calling by Anthropic
111
+ if "partial_json" in item:
112
+ return ""
113
+
114
+ # Handle multiple items - extract text from all text-type items
115
+ else:
116
+ text_parts = []
117
+ for item in output:
118
+ if isinstance(item, str):
119
+ text_parts.append(item)
120
+ elif isinstance(item, dict):
121
+ if "text" in item and item["text"] is not None:
122
+ text_parts.append(item["text"])
123
+ # Skip tool_use, index-only, and partial_json items
124
+ elif (
125
+ item.get("type") == "tool_use" or "partial_json" in item or ("index" in item and len(item) == 1)
126
+ ):
127
+ continue
128
+ return "".join(text_parts)
129
+
130
+ # If we get here, the format is unexpected but try to be graceful
131
+ return ""
121
132
 
122
133
 
123
134
  async def handle_on_chain_end(
lfx/base/constants.py CHANGED
@@ -43,4 +43,4 @@ FIELD_FORMAT_ATTRIBUTES = [
43
43
  ]
44
44
  SKIPPED_FIELD_ATTRIBUTES = ["advanced"]
45
45
  ORJSON_OPTIONS = orjson.OPT_INDENT_2 | orjson.OPT_SORT_KEYS | orjson.OPT_OMIT_MICROSECONDS
46
- SKIPPED_COMPONENTS = {"LanguageModelComponent", "File", "FileComponent"}
46
+ SKIPPED_COMPONENTS = {"LanguageModelComponent"}
@@ -16,6 +16,15 @@ if TYPE_CHECKING:
16
16
  from langchain_core.language_models.chat_models import BaseChatModel
17
17
 
18
18
 
19
+ class DoclingDependencyError(Exception):
20
+ """Custom exception for missing Docling dependencies."""
21
+
22
+ def __init__(self, dependency_name: str, install_command: str):
23
+ self.dependency_name = dependency_name
24
+ self.install_command = install_command
25
+ super().__init__(f"{dependency_name} is not correctly installed. {install_command}")
26
+
27
+
19
28
  def extract_docling_documents(data_inputs: Data | list[Data] | DataFrame, doc_key: str) -> list[DoclingDocument]:
20
29
  documents: list[DoclingDocument] = []
21
30
  if isinstance(data_inputs, DataFrame):
@@ -249,22 +258,48 @@ def docling_worker(
249
258
  logger.debug(f"Processing file {i + 1}/{len(file_paths)}: {file_path}")
250
259
 
251
260
  try:
252
- # Process single file (we can't easily interrupt convert_all)
253
261
  single_result = converter.convert_all([file_path])
254
262
  results.extend(single_result)
255
-
256
- # Check for shutdown after each file
257
263
  check_shutdown()
258
264
 
259
- except (OSError, ValueError, RuntimeError, ImportError) as file_error:
260
- # Handle specific file processing errors
265
+ except ImportError as import_error:
266
+ # Simply pass ImportError to main process for handling
267
+ queue.put(
268
+ {"error": str(import_error), "error_type": "import_error", "original_exception": "ImportError"}
269
+ )
270
+ return
271
+
272
+ except (OSError, ValueError, RuntimeError) as file_error:
273
+ error_msg = str(file_error)
274
+
275
+ # Check for specific dependency errors and identify the dependency name
276
+ dependency_name = None
277
+ if "ocrmac is not correctly installed" in error_msg:
278
+ dependency_name = "ocrmac"
279
+ elif "easyocr" in error_msg and "not installed" in error_msg:
280
+ dependency_name = "easyocr"
281
+ elif "tesserocr" in error_msg and "not installed" in error_msg:
282
+ dependency_name = "tesserocr"
283
+ elif "rapidocr" in error_msg and "not installed" in error_msg:
284
+ dependency_name = "rapidocr"
285
+
286
+ if dependency_name:
287
+ queue.put(
288
+ {
289
+ "error": error_msg,
290
+ "error_type": "dependency_error",
291
+ "dependency_name": dependency_name,
292
+ "original_exception": type(file_error).__name__,
293
+ }
294
+ )
295
+ return
296
+
297
+ # If not a dependency error, log and continue with other files
261
298
  logger.error(f"Error processing file {file_path}: {file_error}")
262
- # Continue with other files, but check for shutdown
263
299
  check_shutdown()
300
+
264
301
  except Exception as file_error: # noqa: BLE001
265
- # Catch any other unexpected errors to prevent worker crash
266
302
  logger.error(f"Unexpected error processing file {file_path}: {file_error}")
267
- # Continue with other files, but check for shutdown
268
303
  check_shutdown()
269
304
 
270
305
  # Final shutdown check before sending results
lfx/base/data/utils.py CHANGED
@@ -13,17 +13,17 @@ from lfx.schema.data import Data
13
13
  # Types of files that can be read simply by file.read()
14
14
  # and have 100% to be completely readable
15
15
  TEXT_FILE_TYPES = [
16
+ "csv",
17
+ "json",
18
+ "pdf",
16
19
  "txt",
17
20
  "md",
18
21
  "mdx",
19
- "csv",
20
- "json",
21
22
  "yaml",
22
23
  "yml",
23
24
  "xml",
24
25
  "html",
25
26
  "htm",
26
- "pdf",
27
27
  "docx",
28
28
  "py",
29
29
  "sh",
@@ -0,0 +1,3 @@
1
+ from .knowledge_base_utils import compute_bm25, compute_tfidf, get_knowledge_bases
2
+
3
+ __all__ = ["compute_bm25", "compute_tfidf", "get_knowledge_bases"]
@@ -0,0 +1,137 @@
1
+ import math
2
+ from collections import Counter
3
+ from pathlib import Path
4
+ from uuid import UUID
5
+
6
+ from langflow.services.database.models.user.crud import get_user_by_id
7
+ from langflow.services.deps import session_scope
8
+
9
+
10
+ def compute_tfidf(documents: list[str], query_terms: list[str]) -> list[float]:
11
+ """Compute TF-IDF scores for query terms across a collection of documents.
12
+
13
+ Args:
14
+ documents: List of document strings
15
+ query_terms: List of query terms to score
16
+
17
+ Returns:
18
+ List of TF-IDF scores for each document
19
+ """
20
+ # Tokenize documents (simple whitespace splitting)
21
+ tokenized_docs = [doc.lower().split() for doc in documents]
22
+ n_docs = len(documents)
23
+
24
+ # Calculate document frequency for each term
25
+ document_frequencies = {}
26
+ for term in query_terms:
27
+ document_frequencies[term] = sum(1 for doc in tokenized_docs if term.lower() in doc)
28
+
29
+ scores = []
30
+
31
+ for doc_tokens in tokenized_docs:
32
+ doc_score = 0.0
33
+ doc_length = len(doc_tokens)
34
+ term_counts = Counter(doc_tokens)
35
+
36
+ for term in query_terms:
37
+ term_lower = term.lower()
38
+
39
+ # Term frequency (TF)
40
+ tf = term_counts[term_lower] / doc_length if doc_length > 0 else 0
41
+
42
+ # Inverse document frequency (IDF)
43
+ idf = math.log(n_docs / document_frequencies[term]) if document_frequencies[term] > 0 else 0
44
+
45
+ # TF-IDF score
46
+ doc_score += tf * idf
47
+
48
+ scores.append(doc_score)
49
+
50
+ return scores
51
+
52
+
53
+ def compute_bm25(documents: list[str], query_terms: list[str], k1: float = 1.2, b: float = 0.75) -> list[float]:
54
+ """Compute BM25 scores for query terms across a collection of documents.
55
+
56
+ Args:
57
+ documents: List of document strings
58
+ query_terms: List of query terms to score
59
+ k1: Controls term frequency scaling (default: 1.2)
60
+ b: Controls document length normalization (default: 0.75)
61
+
62
+ Returns:
63
+ List of BM25 scores for each document
64
+ """
65
+ # Tokenize documents
66
+ tokenized_docs = [doc.lower().split() for doc in documents]
67
+ n_docs = len(documents)
68
+
69
+ # Calculate average document length
70
+ avg_doc_length = sum(len(doc) for doc in tokenized_docs) / n_docs if n_docs > 0 else 0
71
+
72
+ # Handle edge case where all documents are empty
73
+ if avg_doc_length == 0:
74
+ return [0.0] * n_docs
75
+
76
+ # Calculate document frequency for each term
77
+ document_frequencies = {}
78
+ for term in query_terms:
79
+ document_frequencies[term] = sum(1 for doc in tokenized_docs if term.lower() in doc)
80
+
81
+ scores = []
82
+
83
+ for doc_tokens in tokenized_docs:
84
+ doc_score = 0.0
85
+ doc_length = len(doc_tokens)
86
+ term_counts = Counter(doc_tokens)
87
+
88
+ for term in query_terms:
89
+ term_lower = term.lower()
90
+
91
+ # Term frequency in document
92
+ tf = term_counts[term_lower]
93
+
94
+ # Inverse document frequency (IDF)
95
+ # Use standard BM25 IDF formula that ensures non-negative values
96
+ idf = math.log(n_docs / document_frequencies[term]) if document_frequencies[term] > 0 else 0
97
+
98
+ # BM25 score calculation
99
+ numerator = tf * (k1 + 1)
100
+ denominator = tf + k1 * (1 - b + b * (doc_length / avg_doc_length))
101
+
102
+ # Handle division by zero when tf=0 and k1=0
103
+ term_score = 0 if denominator == 0 else idf * (numerator / denominator)
104
+
105
+ doc_score += term_score
106
+
107
+ scores.append(doc_score)
108
+
109
+ return scores
110
+
111
+
112
+ async def get_knowledge_bases(kb_root: Path, user_id: UUID | str) -> list[str]:
113
+ """Retrieve a list of available knowledge bases.
114
+
115
+ Returns:
116
+ A list of knowledge base names.
117
+ """
118
+ if not kb_root.exists():
119
+ return []
120
+
121
+ # Get the current user
122
+ async with session_scope() as db:
123
+ if not user_id:
124
+ msg = "User ID is required for fetching knowledge bases."
125
+ raise ValueError(msg)
126
+ user_id = UUID(user_id) if isinstance(user_id, str) else user_id
127
+ current_user = await get_user_by_id(db, user_id)
128
+ if not current_user:
129
+ msg = f"User with ID {user_id} not found."
130
+ raise ValueError(msg)
131
+ kb_user = current_user.username
132
+ kb_path = kb_root / kb_user
133
+
134
+ if not kb_path.exists():
135
+ return []
136
+
137
+ return [str(d.name) for d in kb_path.iterdir() if not d.name.startswith(".") and d.is_dir()]
@@ -8,7 +8,9 @@ ANTHROPIC_MODELS_DETAILED = [
8
8
  create_model_metadata(provider="Anthropic", name="claude-3-5-sonnet-latest", icon="Anthropic", tool_calling=True),
9
9
  create_model_metadata(provider="Anthropic", name="claude-3-5-haiku-latest", icon="Anthropic", tool_calling=True),
10
10
  create_model_metadata(provider="Anthropic", name="claude-3-opus-latest", icon="Anthropic", tool_calling=True),
11
- create_model_metadata(provider="Anthropic", name="claude-3-sonnet-20240229", icon="Anthropic", tool_calling=True),
11
+ create_model_metadata(
12
+ provider="Anthropic", name="claude-3-sonnet-20240229", icon="Anthropic", tool_calling=True, deprecated=True
13
+ ),
12
14
  # Tool calling unsupported models
13
15
  create_model_metadata(provider="Anthropic", name="claude-2.1", icon="Anthropic", tool_calling=False),
14
16
  create_model_metadata(provider="Anthropic", name="claude-2.0", icon="Anthropic", tool_calling=False),
@@ -213,7 +213,7 @@ try:
213
213
  "prefix": "",
214
214
  "component_class": GroqModel(),
215
215
  "icon": GroqModel.icon,
216
- "is_active": True,
216
+ "is_active": False,
217
217
  }
218
218
  except ImportError:
219
219
  pass
@@ -31,7 +31,7 @@ def vector_store_connection(cls):
31
31
  [
32
32
  Output(
33
33
  display_name="Vector Store Connection",
34
- hidden=True,
34
+ hidden=False,
35
35
  name="vectorstoreconnection",
36
36
  method="as_vector_store",
37
37
  group_outputs=False,
@@ -17,7 +17,7 @@ class AgentQL(Component):
17
17
  inputs = [
18
18
  SecretStrInput(
19
19
  name="api_key",
20
- display_name="API Key",
20
+ display_name="AgentQL API Key",
21
21
  required=True,
22
22
  password=True,
23
23
  info="Your AgentQL API key from dev.agentql.com",
@@ -9,7 +9,6 @@ from lfx.base.agents.events import ExceptionWithMessageError
9
9
  from lfx.base.models.model_input_constants import (
10
10
  ALL_PROVIDER_FIELDS,
11
11
  MODEL_DYNAMIC_UPDATE_FIELDS,
12
- MODEL_PROVIDERS,
13
12
  MODEL_PROVIDERS_DICT,
14
13
  MODELS_METADATA,
15
14
  )
@@ -20,8 +19,8 @@ from lfx.components.langchain_utilities.tool_calling import ToolCallingAgentComp
20
19
  from lfx.custom.custom_component.component import get_component_toolkit
21
20
  from lfx.custom.utils import update_component_build_config
22
21
  from lfx.helpers.base_model import build_model_from_schema
23
- from lfx.inputs.inputs import TableInput
24
- from lfx.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output
22
+ from lfx.inputs.inputs import BoolInput
23
+ from lfx.io import DropdownInput, IntInput, MultilineInput, Output, TableInput
25
24
  from lfx.log.logger import logger
26
25
  from lfx.schema.data import Data
27
26
  from lfx.schema.dotdict import dotdict
@@ -34,7 +33,7 @@ def set_advanced_true(component_input):
34
33
  return component_input
35
34
 
36
35
 
37
- MODEL_PROVIDERS_LIST = ["Anthropic", "Google Generative AI", "Groq", "OpenAI"]
36
+ MODEL_PROVIDERS_LIST = ["Anthropic", "Google Generative AI", "OpenAI"]
38
37
 
39
38
 
40
39
  class AgentComponent(ToolCallingAgentComponent):
@@ -62,12 +61,24 @@ class AgentComponent(ToolCallingAgentComponent):
62
61
  name="agent_llm",
63
62
  display_name="Model Provider",
64
63
  info="The provider of the language model that the agent will use to generate responses.",
65
- options=[*MODEL_PROVIDERS_LIST, "Custom"],
64
+ options=[*MODEL_PROVIDERS_LIST],
66
65
  value="OpenAI",
67
66
  real_time_refresh=True,
67
+ refresh_button=False,
68
68
  input_types=[],
69
69
  options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA]
70
70
  + [{"icon": "brain"}],
71
+ external_options={
72
+ "fields": {
73
+ "data": {
74
+ "node": {
75
+ "name": "connect_other_models",
76
+ "display_name": "Connect other models",
77
+ "icon": "CornerDownLeft",
78
+ }
79
+ }
80
+ },
81
+ },
71
82
  ),
72
83
  *openai_inputs_filtered,
73
84
  MultilineInput(
@@ -159,7 +170,6 @@ class AgentComponent(ToolCallingAgentComponent):
159
170
  ]
160
171
  outputs = [
161
172
  Output(name="response", display_name="Response", method="message_response"),
162
- Output(name="structured_response", display_name="Structured Response", method="json_response", tool_mode=False),
163
173
  ]
164
174
 
165
175
  async def get_agent_requirements(self):
@@ -228,7 +238,13 @@ class AgentComponent(ToolCallingAgentComponent):
228
238
  }
229
239
  # Ensure multiple is handled correctly
230
240
  if isinstance(processed_field["multiple"], str):
231
- processed_field["multiple"] = processed_field["multiple"].lower() in ["true", "1", "t", "y", "yes"]
241
+ processed_field["multiple"] = processed_field["multiple"].lower() in [
242
+ "true",
243
+ "1",
244
+ "t",
245
+ "y",
246
+ "yes",
247
+ ]
232
248
  processed_schema.append(processed_field)
233
249
  return processed_schema
234
250
 
@@ -343,7 +359,12 @@ class AgentComponent(ToolCallingAgentComponent):
343
359
  raise
344
360
  try:
345
361
  result = await self.run_agent(structured_agent)
346
- except (ExceptionWithMessageError, ValueError, TypeError, RuntimeError) as e:
362
+ except (
363
+ ExceptionWithMessageError,
364
+ ValueError,
365
+ TypeError,
366
+ RuntimeError,
367
+ ) as e:
347
368
  await logger.aerror(f"Error with structured agent result: {e}")
348
369
  raise
349
370
  # Extract content from structured agent result
@@ -354,7 +375,13 @@ class AgentComponent(ToolCallingAgentComponent):
354
375
  else:
355
376
  content = str(result)
356
377
 
357
- except (ExceptionWithMessageError, ValueError, TypeError, NotImplementedError, AttributeError) as e:
378
+ except (
379
+ ExceptionWithMessageError,
380
+ ValueError,
381
+ TypeError,
382
+ NotImplementedError,
383
+ AttributeError,
384
+ ) as e:
358
385
  await logger.aerror(f"Error with structured chat agent: {e}")
359
386
  # Fallback to regular agent
360
387
  content_str = "No content returned from agent"
@@ -381,7 +408,11 @@ class AgentComponent(ToolCallingAgentComponent):
381
408
  # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.
382
409
  messages = (
383
410
  await MemoryComponent(**self.get_base_args())
384
- .set(session_id=self.graph.session_id, order="Ascending", n_messages=self.n_messages)
411
+ .set(
412
+ session_id=self.graph.session_id,
413
+ order="Ascending",
414
+ n_messages=self.n_messages,
415
+ )
385
416
  .retrieve_messages()
386
417
  )
387
418
  return [
@@ -488,19 +519,31 @@ class AgentComponent(ToolCallingAgentComponent):
488
519
  # Reset input types for agent_llm
489
520
  build_config["agent_llm"]["input_types"] = []
490
521
  build_config["agent_llm"]["display_name"] = "Model Provider"
491
- elif field_value == "Custom":
522
+ elif field_value == "connect_other_models":
492
523
  # Delete all provider fields
493
524
  self.delete_fields(build_config, ALL_PROVIDER_FIELDS)
494
- # Update with custom component
525
+ # # Update with custom component
495
526
  custom_component = DropdownInput(
496
527
  name="agent_llm",
497
528
  display_name="Language Model",
498
- options=[*sorted(MODEL_PROVIDERS), "Custom"],
499
- value="Custom",
529
+ info="The provider of the language model that the agent will use to generate responses.",
530
+ options=[*MODEL_PROVIDERS_LIST],
500
531
  real_time_refresh=True,
532
+ refresh_button=False,
501
533
  input_types=["LanguageModel"],
502
- options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]
503
- + [{"icon": "brain"}],
534
+ placeholder="Awaiting model input.",
535
+ options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],
536
+ external_options={
537
+ "fields": {
538
+ "data": {
539
+ "node": {
540
+ "name": "connect_other_models",
541
+ "display_name": "Connect other models",
542
+ "icon": "CornerDownLeft",
543
+ },
544
+ }
545
+ },
546
+ },
504
547
  )
505
548
  build_config.update({"agent_llm": custom_component.to_dict()})
506
549
  # Update input types for all fields
@@ -551,7 +594,9 @@ class AgentComponent(ToolCallingAgentComponent):
551
594
  # TODO: Agent Description Depreciated Feature to be removed
552
595
  description = f"{agent_description}{tools_names}"
553
596
  tools = component_toolkit(component=self).get_tools(
554
- tool_name="Call_Agent", tool_description=description, callbacks=self.get_langchain_callbacks()
597
+ tool_name="Call_Agent",
598
+ tool_description=description,
599
+ callbacks=self.get_langchain_callbacks(),
555
600
  )
556
601
  if hasattr(self, "tools_metadata"):
557
602
  tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)
@@ -275,7 +275,17 @@ class MCPToolsComponent(ComponentWithCache):
275
275
 
276
276
  # To avoid unnecessary updates, only proceed if the server has actually changed
277
277
  if (_last_selected_server in (current_server_name, "")) and build_config["tool"]["show"]:
278
- return build_config
278
+ if current_server_name:
279
+ servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
280
+ if isinstance(servers_cache, dict):
281
+ cached = servers_cache.get(current_server_name)
282
+ if cached is not None and cached.get("tool_names"):
283
+ cached_tools = cached["tool_names"]
284
+ current_tools = build_config["tool"]["options"]
285
+ if current_tools == cached_tools:
286
+ return build_config
287
+ else:
288
+ return build_config
279
289
 
280
290
  # Determine if "Tool Mode" is active by checking if the tool dropdown is hidden.
281
291
  is_in_tool_mode = build_config["tools_metadata"]["show"]
@@ -56,7 +56,10 @@ class AIMLModelComponent(LCModelComponent):
56
56
  required=True,
57
57
  ),
58
58
  SliderInput(
59
- name="temperature", display_name="Temperature", value=0.1, range_spec=RangeSpec(min=0, max=2, step=0.01)
59
+ name="temperature",
60
+ display_name="Temperature",
61
+ value=0.1,
62
+ range_spec=RangeSpec(min=0, max=2, step=0.01),
60
63
  ),
61
64
  ]
62
65