lfx-nightly 0.2.0.dev0__py3-none-any.whl → 0.2.0.dev26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +13 -1
- lfx/base/agents/altk_base_agent.py +380 -0
- lfx/base/agents/altk_tool_wrappers.py +565 -0
- lfx/base/agents/events.py +2 -1
- lfx/base/composio/composio_base.py +159 -224
- lfx/base/data/base_file.py +88 -21
- lfx/base/data/storage_utils.py +192 -0
- lfx/base/data/utils.py +178 -14
- lfx/base/embeddings/embeddings_class.py +113 -0
- lfx/base/models/groq_constants.py +74 -58
- lfx/base/models/groq_model_discovery.py +265 -0
- lfx/base/models/model.py +1 -1
- lfx/base/models/model_utils.py +100 -0
- lfx/base/models/openai_constants.py +7 -0
- lfx/base/models/watsonx_constants.py +32 -8
- lfx/base/tools/run_flow.py +601 -129
- lfx/cli/commands.py +6 -3
- lfx/cli/common.py +2 -2
- lfx/cli/run.py +1 -1
- lfx/cli/script_loader.py +53 -11
- lfx/components/Notion/create_page.py +1 -1
- lfx/components/Notion/list_database_properties.py +1 -1
- lfx/components/Notion/list_pages.py +1 -1
- lfx/components/Notion/list_users.py +1 -1
- lfx/components/Notion/page_content_viewer.py +1 -1
- lfx/components/Notion/search.py +1 -1
- lfx/components/Notion/update_page_property.py +1 -1
- lfx/components/__init__.py +19 -5
- lfx/components/{agents → altk}/__init__.py +5 -9
- lfx/components/altk/altk_agent.py +193 -0
- lfx/components/apify/apify_actor.py +1 -1
- lfx/components/composio/__init__.py +70 -18
- lfx/components/composio/apollo_composio.py +11 -0
- lfx/components/composio/bitbucket_composio.py +11 -0
- lfx/components/composio/canva_composio.py +11 -0
- lfx/components/composio/coda_composio.py +11 -0
- lfx/components/composio/composio_api.py +10 -0
- lfx/components/composio/discord_composio.py +1 -1
- lfx/components/composio/elevenlabs_composio.py +11 -0
- lfx/components/composio/exa_composio.py +11 -0
- lfx/components/composio/firecrawl_composio.py +11 -0
- lfx/components/composio/fireflies_composio.py +11 -0
- lfx/components/composio/gmail_composio.py +1 -1
- lfx/components/composio/googlebigquery_composio.py +11 -0
- lfx/components/composio/googlecalendar_composio.py +1 -1
- lfx/components/composio/googledocs_composio.py +1 -1
- lfx/components/composio/googlemeet_composio.py +1 -1
- lfx/components/composio/googlesheets_composio.py +1 -1
- lfx/components/composio/googletasks_composio.py +1 -1
- lfx/components/composio/heygen_composio.py +11 -0
- lfx/components/composio/mem0_composio.py +11 -0
- lfx/components/composio/peopledatalabs_composio.py +11 -0
- lfx/components/composio/perplexityai_composio.py +11 -0
- lfx/components/composio/serpapi_composio.py +11 -0
- lfx/components/composio/slack_composio.py +3 -574
- lfx/components/composio/slackbot_composio.py +1 -1
- lfx/components/composio/snowflake_composio.py +11 -0
- lfx/components/composio/tavily_composio.py +11 -0
- lfx/components/composio/youtube_composio.py +2 -2
- lfx/components/cuga/__init__.py +34 -0
- lfx/components/cuga/cuga_agent.py +730 -0
- lfx/components/data/__init__.py +78 -28
- lfx/components/data_source/__init__.py +58 -0
- lfx/components/{data → data_source}/api_request.py +26 -3
- lfx/components/{data → data_source}/csv_to_data.py +15 -10
- lfx/components/{data → data_source}/json_to_data.py +15 -8
- lfx/components/{data → data_source}/news_search.py +1 -1
- lfx/components/{data → data_source}/rss.py +1 -1
- lfx/components/{data → data_source}/sql_executor.py +1 -1
- lfx/components/{data → data_source}/url.py +1 -1
- lfx/components/{data → data_source}/web_search.py +1 -1
- lfx/components/datastax/astradb_cql.py +1 -1
- lfx/components/datastax/astradb_graph.py +1 -1
- lfx/components/datastax/astradb_tool.py +1 -1
- lfx/components/datastax/astradb_vectorstore.py +1 -1
- lfx/components/datastax/hcd.py +1 -1
- lfx/components/deactivated/json_document_builder.py +1 -1
- lfx/components/docling/__init__.py +0 -3
- lfx/components/elastic/elasticsearch.py +1 -1
- lfx/components/elastic/opensearch_multimodal.py +1575 -0
- lfx/components/files_and_knowledge/__init__.py +47 -0
- lfx/components/{data → files_and_knowledge}/directory.py +1 -1
- lfx/components/{data → files_and_knowledge}/file.py +246 -18
- lfx/components/{knowledge_bases → files_and_knowledge}/retrieval.py +2 -2
- lfx/components/{data → files_and_knowledge}/save_file.py +142 -22
- lfx/components/flow_controls/__init__.py +58 -0
- lfx/components/{logic → flow_controls}/conditional_router.py +1 -1
- lfx/components/{logic → flow_controls}/loop.py +43 -9
- lfx/components/flow_controls/run_flow.py +108 -0
- lfx/components/glean/glean_search_api.py +1 -1
- lfx/components/groq/groq.py +35 -28
- lfx/components/helpers/__init__.py +102 -0
- lfx/components/input_output/__init__.py +3 -1
- lfx/components/input_output/chat.py +4 -3
- lfx/components/input_output/chat_output.py +4 -4
- lfx/components/input_output/text.py +1 -1
- lfx/components/input_output/text_output.py +1 -1
- lfx/components/{data → input_output}/webhook.py +1 -1
- lfx/components/knowledge_bases/__init__.py +59 -4
- lfx/components/langchain_utilities/character.py +1 -1
- lfx/components/langchain_utilities/csv_agent.py +84 -16
- lfx/components/langchain_utilities/json_agent.py +67 -12
- lfx/components/langchain_utilities/language_recursive.py +1 -1
- lfx/components/llm_operations/__init__.py +46 -0
- lfx/components/{processing → llm_operations}/batch_run.py +1 -1
- lfx/components/{processing → llm_operations}/lambda_filter.py +1 -1
- lfx/components/{logic → llm_operations}/llm_conditional_router.py +1 -1
- lfx/components/{processing/llm_router.py → llm_operations/llm_selector.py} +3 -3
- lfx/components/{processing → llm_operations}/structured_output.py +1 -1
- lfx/components/logic/__init__.py +126 -0
- lfx/components/mem0/mem0_chat_memory.py +11 -0
- lfx/components/models/__init__.py +64 -9
- lfx/components/models_and_agents/__init__.py +49 -0
- lfx/components/{agents → models_and_agents}/agent.py +2 -2
- lfx/components/models_and_agents/embedding_model.py +423 -0
- lfx/components/models_and_agents/language_model.py +398 -0
- lfx/components/{agents → models_and_agents}/mcp_component.py +53 -44
- lfx/components/{helpers → models_and_agents}/memory.py +1 -1
- lfx/components/nvidia/system_assist.py +1 -1
- lfx/components/olivya/olivya.py +1 -1
- lfx/components/ollama/ollama.py +17 -3
- lfx/components/processing/__init__.py +9 -57
- lfx/components/processing/converter.py +1 -1
- lfx/components/processing/dataframe_operations.py +1 -1
- lfx/components/processing/parse_json_data.py +2 -2
- lfx/components/processing/parser.py +1 -1
- lfx/components/processing/split_text.py +1 -1
- lfx/components/qdrant/qdrant.py +1 -1
- lfx/components/redis/redis.py +1 -1
- lfx/components/twelvelabs/split_video.py +10 -0
- lfx/components/twelvelabs/video_file.py +12 -0
- lfx/components/utilities/__init__.py +43 -0
- lfx/components/{helpers → utilities}/calculator_core.py +1 -1
- lfx/components/{helpers → utilities}/current_date.py +1 -1
- lfx/components/{processing → utilities}/python_repl_core.py +1 -1
- lfx/components/vectorstores/local_db.py +9 -0
- lfx/components/youtube/youtube_transcripts.py +118 -30
- lfx/custom/custom_component/component.py +57 -1
- lfx/custom/custom_component/custom_component.py +68 -6
- lfx/graph/edge/base.py +43 -20
- lfx/graph/graph/base.py +4 -1
- lfx/graph/state/model.py +15 -2
- lfx/graph/utils.py +6 -0
- lfx/graph/vertex/base.py +4 -1
- lfx/graph/vertex/param_handler.py +10 -7
- lfx/helpers/__init__.py +12 -0
- lfx/helpers/flow.py +117 -0
- lfx/inputs/input_mixin.py +24 -1
- lfx/inputs/inputs.py +13 -1
- lfx/interface/components.py +161 -83
- lfx/log/logger.py +5 -3
- lfx/services/database/__init__.py +5 -0
- lfx/services/database/service.py +25 -0
- lfx/services/deps.py +87 -22
- lfx/services/manager.py +19 -6
- lfx/services/mcp_composer/service.py +998 -157
- lfx/services/session.py +5 -0
- lfx/services/settings/base.py +51 -7
- lfx/services/settings/constants.py +8 -0
- lfx/services/storage/local.py +76 -46
- lfx/services/storage/service.py +152 -29
- lfx/template/field/base.py +3 -0
- lfx/utils/ssrf_protection.py +384 -0
- lfx/utils/validate_cloud.py +26 -0
- {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/METADATA +38 -22
- {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/RECORD +182 -150
- {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/WHEEL +1 -1
- lfx/components/agents/altk_agent.py +0 -366
- lfx/components/agents/cuga_agent.py +0 -1013
- lfx/components/docling/docling_remote_vlm.py +0 -284
- lfx/components/logic/run_flow.py +0 -71
- lfx/components/models/embedding_model.py +0 -195
- lfx/components/models/language_model.py +0 -144
- /lfx/components/{data → data_source}/mock_data.py +0 -0
- /lfx/components/{knowledge_bases → files_and_knowledge}/ingestion.py +0 -0
- /lfx/components/{logic → flow_controls}/data_conditional_router.py +0 -0
- /lfx/components/{logic → flow_controls}/flow_tool.py +0 -0
- /lfx/components/{logic → flow_controls}/listen.py +0 -0
- /lfx/components/{logic → flow_controls}/notify.py +0 -0
- /lfx/components/{logic → flow_controls}/pass_message.py +0 -0
- /lfx/components/{logic → flow_controls}/sub_flow.py +0 -0
- /lfx/components/{processing → models_and_agents}/prompt.py +0 -0
- /lfx/components/{helpers → processing}/create_list.py +0 -0
- /lfx/components/{helpers → processing}/output_parser.py +0 -0
- /lfx/components/{helpers → processing}/store_message.py +0 -0
- /lfx/components/{helpers → utilities}/id_generator.py +0 -0
- {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""Extended embeddings class with available models metadata."""
|
|
2
|
+
|
|
3
|
+
from langchain_core.embeddings import Embeddings
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class EmbeddingsWithModels(Embeddings):
|
|
7
|
+
"""Extended Embeddings class that includes available models with dedicated instances.
|
|
8
|
+
|
|
9
|
+
This class inherits from LangChain Embeddings and provides a mapping of model names
|
|
10
|
+
to their dedicated embedding instances, enabling multi-model support without the need
|
|
11
|
+
for dynamic model switching.
|
|
12
|
+
|
|
13
|
+
Attributes:
|
|
14
|
+
embeddings: The primary LangChain Embeddings instance (used as fallback).
|
|
15
|
+
available_models: Dict mapping model names to their dedicated Embeddings instances.
|
|
16
|
+
Each model has its own pre-configured instance with specific parameters.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
embeddings: Embeddings,
|
|
22
|
+
available_models: dict[str, Embeddings] | None = None,
|
|
23
|
+
):
|
|
24
|
+
"""Initialize the EmbeddingsWithModels wrapper.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
embeddings: The primary LangChain Embeddings instance (used as default/fallback).
|
|
28
|
+
available_models: Dict mapping model names to dedicated Embeddings instances.
|
|
29
|
+
Each value should be a fully configured Embeddings object ready to use.
|
|
30
|
+
Defaults to empty dict if not provided.
|
|
31
|
+
"""
|
|
32
|
+
super().__init__()
|
|
33
|
+
self.embeddings = embeddings
|
|
34
|
+
self.available_models = available_models if available_models is not None else {}
|
|
35
|
+
|
|
36
|
+
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
37
|
+
"""Embed search docs by delegating to the underlying embeddings instance.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
texts: List of text to embed.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
List of embeddings.
|
|
44
|
+
"""
|
|
45
|
+
return self.embeddings.embed_documents(texts)
|
|
46
|
+
|
|
47
|
+
def embed_query(self, text: str) -> list[float]:
|
|
48
|
+
"""Embed query text by delegating to the underlying embeddings instance.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
text: Text to embed.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Embedding.
|
|
55
|
+
"""
|
|
56
|
+
return self.embeddings.embed_query(text)
|
|
57
|
+
|
|
58
|
+
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
59
|
+
"""Asynchronously embed search docs.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
texts: List of text to embed.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
List of embeddings.
|
|
66
|
+
"""
|
|
67
|
+
return await self.embeddings.aembed_documents(texts)
|
|
68
|
+
|
|
69
|
+
async def aembed_query(self, text: str) -> list[float]:
|
|
70
|
+
"""Asynchronously embed query text.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
text: Text to embed.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Embedding.
|
|
77
|
+
"""
|
|
78
|
+
return await self.embeddings.aembed_query(text)
|
|
79
|
+
|
|
80
|
+
def __call__(self, *args, **kwargs):
|
|
81
|
+
"""Make the class callable by delegating to the underlying embeddings instance.
|
|
82
|
+
|
|
83
|
+
This handles cases where the embeddings object is used as a callable.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
*args: Positional arguments to pass to the underlying embeddings instance.
|
|
87
|
+
**kwargs: Keyword arguments to pass to the underlying embeddings instance.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
The result of calling the underlying embeddings instance.
|
|
91
|
+
"""
|
|
92
|
+
if callable(self.embeddings):
|
|
93
|
+
return self.embeddings(*args, **kwargs)
|
|
94
|
+
msg = f"'{type(self.embeddings).__name__}' object is not callable"
|
|
95
|
+
raise TypeError(msg)
|
|
96
|
+
|
|
97
|
+
def __getattr__(self, name: str):
|
|
98
|
+
"""Delegate attribute access to the underlying embeddings instance.
|
|
99
|
+
|
|
100
|
+
This ensures full compatibility with any additional methods or attributes
|
|
101
|
+
that the underlying embeddings instance might have.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
name: The attribute name to access.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
The attribute from the underlying embeddings instance.
|
|
108
|
+
"""
|
|
109
|
+
return getattr(self.embeddings, name)
|
|
110
|
+
|
|
111
|
+
def __repr__(self) -> str:
|
|
112
|
+
"""Return string representation of the wrapper."""
|
|
113
|
+
return f"EmbeddingsWithModels(embeddings={self.embeddings!r}, available_models={self.available_models!r})"
|
|
@@ -1,87 +1,91 @@
|
|
|
1
1
|
from .model_metadata import create_model_metadata
|
|
2
2
|
|
|
3
|
-
# Unified model metadata
|
|
3
|
+
# Unified model metadata
|
|
4
|
+
#
|
|
5
|
+
# NOTE: This file serves as a FALLBACK when the dynamic model discovery system
|
|
6
|
+
# (groq_model_discovery.py) cannot fetch fresh data from the Groq API.
|
|
7
|
+
#
|
|
8
|
+
# The dynamic system is the PRIMARY source and will:
|
|
9
|
+
# - Fetch available models directly from Groq API
|
|
10
|
+
# - Test each model for tool calling support automatically
|
|
11
|
+
# - Cache results for 24 hours
|
|
12
|
+
# - Always provide up-to-date model lists
|
|
13
|
+
#
|
|
14
|
+
# This fallback list should contain:
|
|
15
|
+
# - Minimal set of stable production models
|
|
16
|
+
# - Deprecated models for backwards compatibility
|
|
17
|
+
# - Non-LLM models (audio, TTS) marked as not_supported
|
|
18
|
+
#
|
|
19
|
+
# Last manually updated: 2025-01-06
|
|
20
|
+
#
|
|
4
21
|
GROQ_MODELS_DETAILED = [
|
|
5
|
-
#
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
),
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
create_model_metadata( #
|
|
13
|
-
provider="Groq", name="
|
|
14
|
-
),
|
|
15
|
-
create_model_metadata( # Meta
|
|
16
|
-
provider="Groq", name="llama-guard-3-8b", icon="Groq"
|
|
22
|
+
# ===== FALLBACK PRODUCTION MODELS =====
|
|
23
|
+
# These are stable models that are very unlikely to be removed
|
|
24
|
+
create_model_metadata(provider="Groq", name="llama-3.1-8b-instant", icon="Groq", tool_calling=True),
|
|
25
|
+
create_model_metadata(provider="Groq", name="llama-3.3-70b-versatile", icon="Groq", tool_calling=True),
|
|
26
|
+
# ===== DEPRECATED MODELS =====
|
|
27
|
+
# Keep these for backwards compatibility - users may have flows using them
|
|
28
|
+
# These will appear in the list but show as deprecated in the UI
|
|
29
|
+
create_model_metadata( # Google - Removed
|
|
30
|
+
provider="Groq", name="gemma2-9b-it", icon="Groq", deprecated=True
|
|
17
31
|
),
|
|
18
|
-
create_model_metadata( #
|
|
19
|
-
provider="Groq", name="
|
|
20
|
-
),
|
|
21
|
-
create_model_metadata( # Meta
|
|
22
|
-
provider="Groq", name="llama3-8b-8192", icon="Groq"
|
|
23
|
-
),
|
|
24
|
-
# Preview Models - For evaluation purposes only
|
|
25
|
-
create_model_metadata( # Meta
|
|
26
|
-
provider="Groq", name="meta-llama/llama-4-scout-17b-16e-instruct", icon="Groq", tool_calling=True, preview=True
|
|
32
|
+
create_model_metadata( # Google
|
|
33
|
+
provider="Groq", name="gemma-7b-it", icon="Groq", deprecated=True
|
|
27
34
|
),
|
|
28
|
-
create_model_metadata( # Meta
|
|
29
|
-
provider="Groq",
|
|
30
|
-
name="meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
31
|
-
icon="Groq",
|
|
32
|
-
tool_calling=True,
|
|
33
|
-
preview=True,
|
|
35
|
+
create_model_metadata( # Meta - Removed
|
|
36
|
+
provider="Groq", name="llama3-70b-8192", icon="Groq", deprecated=True
|
|
34
37
|
),
|
|
35
|
-
create_model_metadata( #
|
|
36
|
-
provider="Groq", name="
|
|
38
|
+
create_model_metadata( # Meta - Removed
|
|
39
|
+
provider="Groq", name="llama3-8b-8192", icon="Groq", deprecated=True
|
|
37
40
|
),
|
|
38
|
-
create_model_metadata( #
|
|
39
|
-
provider="Groq", name="
|
|
41
|
+
create_model_metadata( # Meta - Removed, replaced by llama-guard-4-12b
|
|
42
|
+
provider="Groq", name="llama-guard-3-8b", icon="Groq", deprecated=True
|
|
40
43
|
),
|
|
41
|
-
create_model_metadata( #
|
|
42
|
-
provider="Groq", name="
|
|
44
|
+
create_model_metadata( # Meta - Removed
|
|
45
|
+
provider="Groq", name="llama-3.2-1b-preview", icon="Groq", deprecated=True
|
|
43
46
|
),
|
|
44
|
-
create_model_metadata( #
|
|
45
|
-
provider="Groq", name="
|
|
47
|
+
create_model_metadata( # Meta - Removed
|
|
48
|
+
provider="Groq", name="llama-3.2-3b-preview", icon="Groq", deprecated=True
|
|
46
49
|
),
|
|
47
|
-
create_model_metadata( #
|
|
48
|
-
provider="Groq", name="
|
|
50
|
+
create_model_metadata( # Meta - Removed
|
|
51
|
+
provider="Groq", name="llama-3.2-11b-vision-preview", icon="Groq", deprecated=True
|
|
49
52
|
),
|
|
50
|
-
create_model_metadata( # Meta
|
|
51
|
-
provider="Groq", name="llama-3.
|
|
53
|
+
create_model_metadata( # Meta - Removed
|
|
54
|
+
provider="Groq", name="llama-3.2-90b-vision-preview", icon="Groq", deprecated=True
|
|
52
55
|
),
|
|
53
|
-
create_model_metadata( # Meta
|
|
54
|
-
provider="Groq", name="llama-3.
|
|
56
|
+
create_model_metadata( # Meta - Removed
|
|
57
|
+
provider="Groq", name="llama-3.3-70b-specdec", icon="Groq", deprecated=True
|
|
55
58
|
),
|
|
56
|
-
create_model_metadata( #
|
|
57
|
-
provider="Groq", name="
|
|
59
|
+
create_model_metadata( # Alibaba - Removed, replaced by qwen/qwen3-32b
|
|
60
|
+
provider="Groq", name="qwen-qwq-32b", icon="Groq", deprecated=True
|
|
58
61
|
),
|
|
59
|
-
create_model_metadata( #
|
|
60
|
-
provider="Groq", name="
|
|
62
|
+
create_model_metadata( # Alibaba - Removed
|
|
63
|
+
provider="Groq", name="qwen-2.5-coder-32b", icon="Groq", deprecated=True
|
|
61
64
|
),
|
|
62
|
-
create_model_metadata( #
|
|
63
|
-
provider="Groq", name="
|
|
65
|
+
create_model_metadata( # Alibaba - Removed
|
|
66
|
+
provider="Groq", name="qwen-2.5-32b", icon="Groq", deprecated=True
|
|
64
67
|
),
|
|
65
|
-
create_model_metadata( #
|
|
66
|
-
provider="Groq", name="
|
|
68
|
+
create_model_metadata( # DeepSeek - Removed
|
|
69
|
+
provider="Groq", name="deepseek-r1-distill-qwen-32b", icon="Groq", deprecated=True
|
|
67
70
|
),
|
|
68
|
-
#
|
|
69
|
-
|
|
70
|
-
provider="Groq", name="gemma-7b-it", icon="Groq", tool_calling=True, deprecated=True
|
|
71
|
+
create_model_metadata( # DeepSeek - Removed
|
|
72
|
+
provider="Groq", name="deepseek-r1-distill-llama-70b", icon="Groq", deprecated=True
|
|
71
73
|
),
|
|
72
74
|
create_model_metadata( # Groq
|
|
73
|
-
provider="Groq", name="llama3-groq-70b-8192-tool-use-preview", icon="Groq",
|
|
75
|
+
provider="Groq", name="llama3-groq-70b-8192-tool-use-preview", icon="Groq", deprecated=True
|
|
74
76
|
),
|
|
75
77
|
create_model_metadata( # Groq
|
|
76
|
-
provider="Groq", name="llama3-groq-8b-8192-tool-use-preview", icon="Groq",
|
|
78
|
+
provider="Groq", name="llama3-groq-8b-8192-tool-use-preview", icon="Groq", deprecated=True
|
|
77
79
|
),
|
|
78
80
|
create_model_metadata( # Meta
|
|
79
|
-
provider="Groq", name="llama-3.1-70b-versatile", icon="Groq",
|
|
81
|
+
provider="Groq", name="llama-3.1-70b-versatile", icon="Groq", deprecated=True
|
|
80
82
|
),
|
|
81
83
|
create_model_metadata( # Mistral
|
|
82
|
-
provider="Groq", name="mixtral-8x7b-32768", icon="Groq",
|
|
84
|
+
provider="Groq", name="mixtral-8x7b-32768", icon="Groq", deprecated=True
|
|
83
85
|
),
|
|
84
|
-
#
|
|
86
|
+
# ===== UNSUPPORTED MODELS =====
|
|
87
|
+
# Audio/TTS/Guard models that should not appear in LLM model lists
|
|
88
|
+
# The dynamic system automatically filters these out
|
|
85
89
|
create_model_metadata( # Mistral
|
|
86
90
|
provider="Groq", name="mistral-saba-24b", icon="Groq", not_supported=True
|
|
87
91
|
),
|
|
@@ -100,6 +104,18 @@ GROQ_MODELS_DETAILED = [
|
|
|
100
104
|
create_model_metadata( # Hugging Face
|
|
101
105
|
provider="Groq", name="distil-whisper-large-v3-en", icon="Groq", not_supported=True
|
|
102
106
|
),
|
|
107
|
+
create_model_metadata( # Meta
|
|
108
|
+
provider="Groq", name="meta-llama/llama-guard-4-12b", icon="Groq", not_supported=True
|
|
109
|
+
),
|
|
110
|
+
create_model_metadata( # Meta
|
|
111
|
+
provider="Groq", name="meta-llama/llama-prompt-guard-2-86m", icon="Groq", not_supported=True
|
|
112
|
+
),
|
|
113
|
+
create_model_metadata( # Meta
|
|
114
|
+
provider="Groq", name="meta-llama/llama-prompt-guard-2-22m", icon="Groq", not_supported=True
|
|
115
|
+
),
|
|
116
|
+
create_model_metadata( # OpenAI
|
|
117
|
+
provider="Groq", name="openai/gpt-oss-safeguard-20b", icon="Groq", not_supported=True
|
|
118
|
+
),
|
|
103
119
|
]
|
|
104
120
|
|
|
105
121
|
# Generate backwards-compatible lists from the metadata
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
"""Dynamic Groq model discovery and tool calling detection.
|
|
2
|
+
|
|
3
|
+
This module fetches available models directly from the Groq API
|
|
4
|
+
and tests their tool calling capabilities programmatically,
|
|
5
|
+
eliminating the need for manual metadata updates.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from datetime import datetime, timedelta, timezone
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
import requests
|
|
14
|
+
|
|
15
|
+
from lfx.log.logger import logger
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class GroqModelDiscovery:
|
|
19
|
+
"""Discovers and caches Groq model capabilities dynamically."""
|
|
20
|
+
|
|
21
|
+
# Cache file location - use local cache directory within models
|
|
22
|
+
CACHE_FILE = Path(__file__).parent / ".cache" / "groq_models_cache.json"
|
|
23
|
+
CACHE_DURATION = timedelta(hours=24) # Refresh cache every 24 hours
|
|
24
|
+
|
|
25
|
+
# Models to skip from LLM list (audio, TTS, guards)
|
|
26
|
+
SKIP_PATTERNS = ["whisper", "tts", "guard", "safeguard", "prompt-guard", "saba"]
|
|
27
|
+
|
|
28
|
+
def __init__(self, api_key: str | None = None, base_url: str = "https://api.groq.com"):
|
|
29
|
+
"""Initialize discovery with optional API key for testing.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
api_key: Groq API key. If None, only cached data will be used.
|
|
33
|
+
base_url: Groq API base URL
|
|
34
|
+
"""
|
|
35
|
+
self.api_key = api_key
|
|
36
|
+
self.base_url = base_url
|
|
37
|
+
|
|
38
|
+
def get_models(self, *, force_refresh: bool = False) -> dict[str, dict[str, Any]]:
|
|
39
|
+
"""Get available models with their capabilities.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
force_refresh: If True, bypass cache and fetch fresh data
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
Dictionary mapping model IDs to their metadata:
|
|
46
|
+
{
|
|
47
|
+
"model-id": {
|
|
48
|
+
"name": "model-id",
|
|
49
|
+
"provider": "Provider Name",
|
|
50
|
+
"tool_calling": True/False,
|
|
51
|
+
"preview": True/False,
|
|
52
|
+
"not_supported": True/False, # for non-LLM models
|
|
53
|
+
"last_tested": "2025-01-06T10:30:00"
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
"""
|
|
57
|
+
# Try to load from cache first
|
|
58
|
+
if not force_refresh:
|
|
59
|
+
cached = self._load_cache()
|
|
60
|
+
if cached:
|
|
61
|
+
logger.info("Using cached Groq model metadata")
|
|
62
|
+
return cached
|
|
63
|
+
|
|
64
|
+
# Fetch fresh data from API
|
|
65
|
+
if not self.api_key:
|
|
66
|
+
logger.warning("No API key provided, using minimal fallback list")
|
|
67
|
+
return self._get_fallback_models()
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
models_metadata = {}
|
|
71
|
+
|
|
72
|
+
# Step 1: Get list of available models
|
|
73
|
+
available_models = self._fetch_available_models()
|
|
74
|
+
logger.info(f"Found {len(available_models)} models from Groq API")
|
|
75
|
+
|
|
76
|
+
# Step 2: Categorize models
|
|
77
|
+
llm_models = []
|
|
78
|
+
non_llm_models = []
|
|
79
|
+
|
|
80
|
+
for model_id in available_models:
|
|
81
|
+
if any(pattern in model_id.lower() for pattern in self.SKIP_PATTERNS):
|
|
82
|
+
non_llm_models.append(model_id)
|
|
83
|
+
else:
|
|
84
|
+
llm_models.append(model_id)
|
|
85
|
+
|
|
86
|
+
# Step 3: Test LLM models for tool calling
|
|
87
|
+
logger.info(f"Testing {len(llm_models)} LLM models for tool calling support...")
|
|
88
|
+
for model_id in llm_models:
|
|
89
|
+
supports_tools = self._test_tool_calling(model_id)
|
|
90
|
+
models_metadata[model_id] = {
|
|
91
|
+
"name": model_id,
|
|
92
|
+
"provider": self._get_provider_name(model_id),
|
|
93
|
+
"tool_calling": supports_tools,
|
|
94
|
+
"preview": "preview" in model_id.lower() or "/" in model_id,
|
|
95
|
+
"last_tested": datetime.now(timezone.utc).isoformat(),
|
|
96
|
+
}
|
|
97
|
+
logger.debug(f"{model_id}: tool_calling={supports_tools}")
|
|
98
|
+
|
|
99
|
+
# Step 4: Add non-LLM models as unsupported
|
|
100
|
+
for model_id in non_llm_models:
|
|
101
|
+
models_metadata[model_id] = {
|
|
102
|
+
"name": model_id,
|
|
103
|
+
"provider": self._get_provider_name(model_id),
|
|
104
|
+
"not_supported": True,
|
|
105
|
+
"last_tested": datetime.now(timezone.utc).isoformat(),
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
# Save to cache
|
|
109
|
+
self._save_cache(models_metadata)
|
|
110
|
+
|
|
111
|
+
except (requests.RequestException, KeyError, ValueError, ImportError) as e:
|
|
112
|
+
logger.exception(f"Error discovering models: {e}")
|
|
113
|
+
return self._get_fallback_models()
|
|
114
|
+
else:
|
|
115
|
+
return models_metadata
|
|
116
|
+
|
|
117
|
+
def _fetch_available_models(self) -> list[str]:
|
|
118
|
+
"""Fetch list of available models from Groq API."""
|
|
119
|
+
url = f"{self.base_url}/openai/v1/models"
|
|
120
|
+
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
|
121
|
+
|
|
122
|
+
response = requests.get(url, headers=headers, timeout=10)
|
|
123
|
+
response.raise_for_status()
|
|
124
|
+
|
|
125
|
+
model_list = response.json()
|
|
126
|
+
# Use direct access to raise KeyError if 'data' is missing
|
|
127
|
+
return [model["id"] for model in model_list["data"]]
|
|
128
|
+
|
|
129
|
+
def _test_tool_calling(self, model_id: str) -> bool:
|
|
130
|
+
"""Test if a model supports tool calling.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
model_id: The model ID to test
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
True if model supports tool calling, False otherwise
|
|
137
|
+
"""
|
|
138
|
+
try:
|
|
139
|
+
import groq
|
|
140
|
+
|
|
141
|
+
client = groq.Groq(api_key=self.api_key)
|
|
142
|
+
|
|
143
|
+
# Simple tool definition
|
|
144
|
+
tools = [
|
|
145
|
+
{
|
|
146
|
+
"type": "function",
|
|
147
|
+
"function": {
|
|
148
|
+
"name": "test_tool",
|
|
149
|
+
"description": "A test tool",
|
|
150
|
+
"parameters": {
|
|
151
|
+
"type": "object",
|
|
152
|
+
"properties": {"x": {"type": "string"}},
|
|
153
|
+
"required": ["x"],
|
|
154
|
+
},
|
|
155
|
+
},
|
|
156
|
+
}
|
|
157
|
+
]
|
|
158
|
+
|
|
159
|
+
messages = [{"role": "user", "content": "test"}]
|
|
160
|
+
|
|
161
|
+
# Try to make a request with tools
|
|
162
|
+
client.chat.completions.create(
|
|
163
|
+
model=model_id, messages=messages, tools=tools, tool_choice="auto", max_tokens=10
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
except (ImportError, AttributeError, TypeError, ValueError, RuntimeError, KeyError) as e:
|
|
167
|
+
error_msg = str(e).lower()
|
|
168
|
+
# If error mentions tool calling, model doesn't support it
|
|
169
|
+
if "tool" in error_msg:
|
|
170
|
+
return False
|
|
171
|
+
# Other errors might be rate limits, etc - be conservative
|
|
172
|
+
logger.warning(f"Error testing {model_id}: {e}")
|
|
173
|
+
return False
|
|
174
|
+
else:
|
|
175
|
+
return True
|
|
176
|
+
|
|
177
|
+
def _get_provider_name(self, model_id: str) -> str:
|
|
178
|
+
"""Extract provider name from model ID."""
|
|
179
|
+
if "/" in model_id:
|
|
180
|
+
provider_map = {
|
|
181
|
+
"meta-llama": "Meta",
|
|
182
|
+
"openai": "OpenAI",
|
|
183
|
+
"groq": "Groq",
|
|
184
|
+
"moonshotai": "Moonshot AI",
|
|
185
|
+
"qwen": "Alibaba Cloud",
|
|
186
|
+
}
|
|
187
|
+
prefix = model_id.split("/")[0]
|
|
188
|
+
return provider_map.get(prefix, prefix.title())
|
|
189
|
+
|
|
190
|
+
# Common patterns
|
|
191
|
+
if model_id.startswith("llama"):
|
|
192
|
+
return "Meta"
|
|
193
|
+
if model_id.startswith("qwen"):
|
|
194
|
+
return "Alibaba Cloud"
|
|
195
|
+
if model_id.startswith("allam"):
|
|
196
|
+
return "SDAIA"
|
|
197
|
+
|
|
198
|
+
return "Groq"
|
|
199
|
+
|
|
200
|
+
def _load_cache(self) -> dict[str, dict] | None:
|
|
201
|
+
"""Load cached model metadata if it exists and is fresh."""
|
|
202
|
+
if not self.CACHE_FILE.exists():
|
|
203
|
+
return None
|
|
204
|
+
|
|
205
|
+
try:
|
|
206
|
+
with self.CACHE_FILE.open() as f:
|
|
207
|
+
cache_data = json.load(f)
|
|
208
|
+
|
|
209
|
+
# Check cache age
|
|
210
|
+
cache_time = datetime.fromisoformat(cache_data["cached_at"])
|
|
211
|
+
if datetime.now(timezone.utc) - cache_time > self.CACHE_DURATION:
|
|
212
|
+
logger.info("Cache expired, will fetch fresh data")
|
|
213
|
+
return None
|
|
214
|
+
|
|
215
|
+
return cache_data["models"]
|
|
216
|
+
|
|
217
|
+
except (json.JSONDecodeError, KeyError, ValueError) as e:
|
|
218
|
+
logger.warning(f"Invalid cache file: {e}")
|
|
219
|
+
return None
|
|
220
|
+
|
|
221
|
+
def _save_cache(self, models_metadata: dict[str, dict]) -> None:
|
|
222
|
+
"""Save model metadata to cache."""
|
|
223
|
+
try:
|
|
224
|
+
cache_data = {"cached_at": datetime.now(timezone.utc).isoformat(), "models": models_metadata}
|
|
225
|
+
|
|
226
|
+
self.CACHE_FILE.parent.mkdir(parents=True, exist_ok=True)
|
|
227
|
+
with self.CACHE_FILE.open("w") as f:
|
|
228
|
+
json.dump(cache_data, f, indent=2)
|
|
229
|
+
|
|
230
|
+
logger.info(f"Cached {len(models_metadata)} models to {self.CACHE_FILE}")
|
|
231
|
+
|
|
232
|
+
except (OSError, TypeError, ValueError) as e:
|
|
233
|
+
logger.warning(f"Failed to save cache: {e}")
|
|
234
|
+
|
|
235
|
+
def _get_fallback_models(self) -> dict[str, dict]:
|
|
236
|
+
"""Return minimal fallback list when API is unavailable."""
|
|
237
|
+
return {
|
|
238
|
+
"llama-3.1-8b-instant": {
|
|
239
|
+
"name": "llama-3.1-8b-instant",
|
|
240
|
+
"provider": "Meta",
|
|
241
|
+
"tool_calling": True,
|
|
242
|
+
"preview": False,
|
|
243
|
+
},
|
|
244
|
+
"llama-3.3-70b-versatile": {
|
|
245
|
+
"name": "llama-3.3-70b-versatile",
|
|
246
|
+
"provider": "Meta",
|
|
247
|
+
"tool_calling": True,
|
|
248
|
+
"preview": False,
|
|
249
|
+
},
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
# Convenience function for use in other modules
|
|
254
|
+
def get_groq_models(api_key: str | None = None, *, force_refresh: bool = False) -> dict[str, dict]:
|
|
255
|
+
"""Get Groq models with their capabilities.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
api_key: Optional API key for testing. If None, uses cached data.
|
|
259
|
+
force_refresh: If True, bypass cache and fetch fresh data.
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Dictionary of model metadata
|
|
263
|
+
"""
|
|
264
|
+
discovery = GroqModelDiscovery(api_key=api_key)
|
|
265
|
+
return discovery.get_models(force_refresh=force_refresh)
|
lfx/base/models/model.py
CHANGED
|
@@ -296,7 +296,7 @@ class LCModelComponent(Component):
|
|
|
296
296
|
)
|
|
297
297
|
model_message.properties.source = self._build_source(self._id, self.display_name, self)
|
|
298
298
|
lf_message = await self.send_message(model_message)
|
|
299
|
-
result = lf_message.text
|
|
299
|
+
result = lf_message.text or ""
|
|
300
300
|
else:
|
|
301
301
|
message = await runnable.ainvoke(inputs)
|
|
302
302
|
result = message.content if hasattr(message, "content") else message
|
lfx/base/models/model_utils.py
CHANGED
|
@@ -1,3 +1,14 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from urllib.parse import urljoin
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
|
|
6
|
+
from lfx.log.logger import logger
|
|
7
|
+
from lfx.utils.util import transform_localhost_url
|
|
8
|
+
|
|
9
|
+
HTTP_STATUS_OK = 200
|
|
10
|
+
|
|
11
|
+
|
|
1
12
|
def get_model_name(llm, display_name: str | None = "Custom"):
|
|
2
13
|
attributes_to_check = ["model_name", "model", "model_id", "deployment_name"]
|
|
3
14
|
|
|
@@ -6,3 +17,92 @@ def get_model_name(llm, display_name: str | None = "Custom"):
|
|
|
6
17
|
|
|
7
18
|
# If no matching attribute is found, return the class name as a fallback
|
|
8
19
|
return model_name if model_name is not None else display_name
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
async def is_valid_ollama_url(url: str) -> bool:
|
|
23
|
+
"""Check if the provided URL is a valid Ollama API endpoint."""
|
|
24
|
+
try:
|
|
25
|
+
url = transform_localhost_url(url)
|
|
26
|
+
if not url:
|
|
27
|
+
return False
|
|
28
|
+
# Strip /v1 suffix if present, as Ollama API endpoints are at root level
|
|
29
|
+
url = url.rstrip("/").removesuffix("/v1")
|
|
30
|
+
if not url.endswith("/"):
|
|
31
|
+
url = url + "/"
|
|
32
|
+
async with httpx.AsyncClient() as client:
|
|
33
|
+
return (await client.get(url=urljoin(url, "api/tags"))).status_code == HTTP_STATUS_OK
|
|
34
|
+
except httpx.RequestError:
|
|
35
|
+
logger.debug(f"Invalid Ollama URL: {url}")
|
|
36
|
+
return False
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
async def get_ollama_models(
|
|
40
|
+
base_url_value: str, desired_capability: str, json_models_key: str, json_name_key: str, json_capabilities_key: str
|
|
41
|
+
) -> list[str]:
|
|
42
|
+
"""Fetch available completion models from the Ollama API.
|
|
43
|
+
|
|
44
|
+
Filters out embedding models and only returns models with completion capability.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
base_url_value (str): The base URL of the Ollama API.
|
|
48
|
+
desired_capability (str): The desired capability of the model.
|
|
49
|
+
json_models_key (str): The key in the JSON response that contains the models.
|
|
50
|
+
json_name_key (str): The key in the JSON response that contains the model names.
|
|
51
|
+
json_capabilities_key (str): The key in the JSON response that contains the model capabilities.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
list[str]: A sorted list of model names that support completion.
|
|
55
|
+
|
|
56
|
+
Raises:
|
|
57
|
+
ValueError: If there is an issue with the API request or response.
|
|
58
|
+
"""
|
|
59
|
+
try:
|
|
60
|
+
# Strip /v1 suffix if present, as Ollama API endpoints are at root level
|
|
61
|
+
base_url = base_url_value.rstrip("/").removesuffix("/v1")
|
|
62
|
+
if not base_url.endswith("/"):
|
|
63
|
+
base_url = base_url + "/"
|
|
64
|
+
base_url = transform_localhost_url(base_url)
|
|
65
|
+
|
|
66
|
+
# Ollama REST API to return models
|
|
67
|
+
tags_url = urljoin(base_url, "api/tags")
|
|
68
|
+
|
|
69
|
+
# Ollama REST API to return model capabilities
|
|
70
|
+
show_url = urljoin(base_url, "api/show")
|
|
71
|
+
tags_response = None
|
|
72
|
+
|
|
73
|
+
async with httpx.AsyncClient() as client:
|
|
74
|
+
# Fetch available models
|
|
75
|
+
tags_response = await client.get(url=tags_url)
|
|
76
|
+
tags_response.raise_for_status()
|
|
77
|
+
models = tags_response.json()
|
|
78
|
+
if asyncio.iscoroutine(models):
|
|
79
|
+
models = await models
|
|
80
|
+
await logger.adebug(f"Available models: {models}")
|
|
81
|
+
|
|
82
|
+
# Filter models that are NOT embedding models
|
|
83
|
+
model_ids = []
|
|
84
|
+
for model in models.get(json_models_key, []):
|
|
85
|
+
model_name = model.get(json_name_key)
|
|
86
|
+
if not model_name:
|
|
87
|
+
continue
|
|
88
|
+
await logger.adebug(f"Checking model: {model_name}")
|
|
89
|
+
|
|
90
|
+
payload = {"model": model_name}
|
|
91
|
+
show_response = await client.post(url=show_url, json=payload)
|
|
92
|
+
show_response.raise_for_status()
|
|
93
|
+
json_data = show_response.json()
|
|
94
|
+
if asyncio.iscoroutine(json_data):
|
|
95
|
+
json_data = await json_data
|
|
96
|
+
|
|
97
|
+
capabilities = json_data.get(json_capabilities_key, [])
|
|
98
|
+
await logger.adebug(f"Model: {model_name}, Capabilities: {capabilities}")
|
|
99
|
+
|
|
100
|
+
if desired_capability in capabilities:
|
|
101
|
+
model_ids.append(model_name)
|
|
102
|
+
|
|
103
|
+
return sorted(model_ids)
|
|
104
|
+
|
|
105
|
+
except (httpx.RequestError, ValueError) as e:
|
|
106
|
+
msg = "Could not get model names from Ollama."
|
|
107
|
+
await logger.aexception(msg)
|
|
108
|
+
raise ValueError(msg) from e
|