lfx-nightly 0.2.0.dev41__py3-none-any.whl → 0.3.0.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/__main__.py +137 -6
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +10 -6
- lfx/base/agents/altk_base_agent.py +5 -3
- lfx/base/agents/altk_tool_wrappers.py +1 -1
- lfx/base/agents/events.py +1 -1
- lfx/base/agents/utils.py +4 -0
- lfx/base/composio/composio_base.py +78 -41
- lfx/base/data/cloud_storage_utils.py +156 -0
- lfx/base/data/docling_utils.py +130 -55
- lfx/base/datastax/astradb_base.py +75 -64
- lfx/base/embeddings/embeddings_class.py +113 -0
- lfx/base/models/__init__.py +11 -1
- lfx/base/models/google_generative_ai_constants.py +33 -9
- lfx/base/models/model_metadata.py +6 -0
- lfx/base/models/ollama_constants.py +196 -30
- lfx/base/models/openai_constants.py +37 -10
- lfx/base/models/unified_models.py +1123 -0
- lfx/base/models/watsonx_constants.py +43 -4
- lfx/base/prompts/api_utils.py +40 -5
- lfx/base/tools/component_tool.py +2 -9
- lfx/cli/__init__.py +10 -2
- lfx/cli/commands.py +3 -0
- lfx/cli/run.py +65 -409
- lfx/cli/script_loader.py +18 -7
- lfx/cli/validation.py +6 -3
- lfx/components/__init__.py +0 -3
- lfx/components/composio/github_composio.py +1 -1
- lfx/components/cuga/cuga_agent.py +39 -27
- lfx/components/data_source/api_request.py +4 -2
- lfx/components/datastax/astradb_assistant_manager.py +4 -2
- lfx/components/docling/__init__.py +45 -11
- lfx/components/docling/docling_inline.py +39 -49
- lfx/components/docling/docling_remote.py +1 -0
- lfx/components/elastic/opensearch_multimodal.py +1733 -0
- lfx/components/files_and_knowledge/file.py +384 -36
- lfx/components/files_and_knowledge/ingestion.py +8 -0
- lfx/components/files_and_knowledge/retrieval.py +10 -0
- lfx/components/files_and_knowledge/save_file.py +91 -88
- lfx/components/langchain_utilities/ibm_granite_handler.py +211 -0
- lfx/components/langchain_utilities/tool_calling.py +37 -6
- lfx/components/llm_operations/batch_run.py +64 -18
- lfx/components/llm_operations/lambda_filter.py +213 -101
- lfx/components/llm_operations/llm_conditional_router.py +39 -7
- lfx/components/llm_operations/structured_output.py +38 -12
- lfx/components/models/__init__.py +16 -74
- lfx/components/models_and_agents/agent.py +51 -203
- lfx/components/models_and_agents/embedding_model.py +171 -255
- lfx/components/models_and_agents/language_model.py +54 -318
- lfx/components/models_and_agents/mcp_component.py +96 -10
- lfx/components/models_and_agents/prompt.py +105 -18
- lfx/components/ollama/ollama_embeddings.py +111 -29
- lfx/components/openai/openai_chat_model.py +1 -1
- lfx/components/processing/text_operations.py +580 -0
- lfx/components/vllm/__init__.py +37 -0
- lfx/components/vllm/vllm.py +141 -0
- lfx/components/vllm/vllm_embeddings.py +110 -0
- lfx/custom/custom_component/component.py +65 -10
- lfx/custom/custom_component/custom_component.py +8 -6
- lfx/events/observability/__init__.py +0 -0
- lfx/events/observability/lifecycle_events.py +111 -0
- lfx/field_typing/__init__.py +57 -58
- lfx/graph/graph/base.py +40 -1
- lfx/graph/utils.py +109 -30
- lfx/graph/vertex/base.py +75 -23
- lfx/graph/vertex/vertex_types.py +0 -5
- lfx/inputs/__init__.py +2 -0
- lfx/inputs/input_mixin.py +55 -0
- lfx/inputs/inputs.py +120 -0
- lfx/interface/components.py +24 -7
- lfx/interface/initialize/loading.py +42 -12
- lfx/io/__init__.py +2 -0
- lfx/run/__init__.py +5 -0
- lfx/run/base.py +464 -0
- lfx/schema/__init__.py +50 -0
- lfx/schema/data.py +1 -1
- lfx/schema/image.py +26 -7
- lfx/schema/message.py +104 -11
- lfx/schema/workflow.py +171 -0
- lfx/services/deps.py +12 -0
- lfx/services/interfaces.py +43 -1
- lfx/services/mcp_composer/service.py +7 -1
- lfx/services/schema.py +1 -0
- lfx/services/settings/auth.py +95 -4
- lfx/services/settings/base.py +11 -1
- lfx/services/settings/constants.py +2 -0
- lfx/services/settings/utils.py +82 -0
- lfx/services/storage/local.py +13 -8
- lfx/services/transaction/__init__.py +5 -0
- lfx/services/transaction/service.py +35 -0
- lfx/tests/unit/components/__init__.py +0 -0
- lfx/utils/constants.py +2 -0
- lfx/utils/mustache_security.py +79 -0
- lfx/utils/validate_cloud.py +81 -3
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/METADATA +7 -2
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/RECORD +98 -80
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/entry_points.txt +0 -0
|
@@ -1,22 +1,36 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
1
3
|
from lfx.base.prompts.api_utils import process_prompt_template
|
|
2
4
|
from lfx.custom.custom_component.component import Component
|
|
5
|
+
from lfx.inputs.input_mixin import FieldTypes
|
|
3
6
|
from lfx.inputs.inputs import DefaultPromptField
|
|
4
|
-
from lfx.io import MessageTextInput, Output, PromptInput
|
|
7
|
+
from lfx.io import BoolInput, MessageTextInput, Output, PromptInput
|
|
8
|
+
from lfx.log.logger import logger
|
|
9
|
+
from lfx.schema.dotdict import dotdict
|
|
5
10
|
from lfx.schema.message import Message
|
|
6
11
|
from lfx.template.utils import update_template_values
|
|
12
|
+
from lfx.utils.mustache_security import validate_mustache_template
|
|
7
13
|
|
|
8
14
|
|
|
9
15
|
class PromptComponent(Component):
|
|
10
16
|
display_name: str = "Prompt Template"
|
|
11
17
|
description: str = "Create a prompt template with dynamic variables."
|
|
12
18
|
documentation: str = "https://docs.langflow.org/components-prompts"
|
|
13
|
-
icon = "
|
|
19
|
+
icon = "prompts"
|
|
14
20
|
trace_type = "prompt"
|
|
15
21
|
name = "Prompt Template"
|
|
16
22
|
priority = 0 # Set priority to 0 to make it appear first
|
|
17
23
|
|
|
18
24
|
inputs = [
|
|
19
25
|
PromptInput(name="template", display_name="Template"),
|
|
26
|
+
BoolInput(
|
|
27
|
+
name="use_double_brackets",
|
|
28
|
+
display_name="Use Double Brackets",
|
|
29
|
+
value=False,
|
|
30
|
+
advanced=True,
|
|
31
|
+
info="Use {{variable}} syntax instead of {variable}.",
|
|
32
|
+
real_time_refresh=True,
|
|
33
|
+
),
|
|
20
34
|
MessageTextInput(
|
|
21
35
|
name="tool_placeholder",
|
|
22
36
|
display_name="Tool Placeholder",
|
|
@@ -30,34 +44,107 @@ class PromptComponent(Component):
|
|
|
30
44
|
Output(display_name="Prompt", name="prompt", method="build_prompt"),
|
|
31
45
|
]
|
|
32
46
|
|
|
47
|
+
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:
|
|
48
|
+
"""Update the template field type based on the selected mode."""
|
|
49
|
+
if field_name == "use_double_brackets":
|
|
50
|
+
# Change the template field type based on mode
|
|
51
|
+
is_mustache = field_value is True
|
|
52
|
+
if is_mustache:
|
|
53
|
+
build_config["template"]["type"] = FieldTypes.MUSTACHE_PROMPT.value
|
|
54
|
+
else:
|
|
55
|
+
build_config["template"]["type"] = FieldTypes.PROMPT.value
|
|
56
|
+
|
|
57
|
+
# Re-process the template to update variables when mode changes
|
|
58
|
+
template_value = build_config.get("template", {}).get("value", "")
|
|
59
|
+
if template_value:
|
|
60
|
+
# Ensure custom_fields is properly initialized
|
|
61
|
+
if "custom_fields" not in build_config:
|
|
62
|
+
build_config["custom_fields"] = {}
|
|
63
|
+
|
|
64
|
+
# Clean up fields from the OLD mode before processing with NEW mode
|
|
65
|
+
# This ensures we don't keep fields with wrong syntax even if validation fails
|
|
66
|
+
old_custom_fields = build_config["custom_fields"].get("template", [])
|
|
67
|
+
for old_field in list(old_custom_fields):
|
|
68
|
+
# Remove the field from custom_fields and template
|
|
69
|
+
if old_field in old_custom_fields:
|
|
70
|
+
old_custom_fields.remove(old_field)
|
|
71
|
+
build_config.pop(old_field, None)
|
|
72
|
+
|
|
73
|
+
# Try to process template with new mode to add new variables
|
|
74
|
+
# If validation fails, at least we cleaned up old fields
|
|
75
|
+
try:
|
|
76
|
+
# Validate mustache templates for security
|
|
77
|
+
if is_mustache:
|
|
78
|
+
validate_mustache_template(template_value)
|
|
79
|
+
|
|
80
|
+
# Re-process template with new mode to add new variables
|
|
81
|
+
_ = process_prompt_template(
|
|
82
|
+
template=template_value,
|
|
83
|
+
name="template",
|
|
84
|
+
custom_fields=build_config["custom_fields"],
|
|
85
|
+
frontend_node_template=build_config,
|
|
86
|
+
is_mustache=is_mustache,
|
|
87
|
+
)
|
|
88
|
+
except ValueError as e:
|
|
89
|
+
# If validation fails, we still updated the mode and cleaned old fields
|
|
90
|
+
# User will see error when they try to save
|
|
91
|
+
logger.debug(f"Template validation failed during mode switch: {e}")
|
|
92
|
+
return build_config
|
|
93
|
+
|
|
33
94
|
async def build_prompt(self) -> Message:
|
|
34
|
-
|
|
95
|
+
use_double_brackets = self.use_double_brackets if hasattr(self, "use_double_brackets") else False
|
|
96
|
+
template_format = "mustache" if use_double_brackets else "f-string"
|
|
97
|
+
prompt = await Message.from_template_and_variables(template_format=template_format, **self._attributes)
|
|
35
98
|
self.status = prompt.text
|
|
36
99
|
return prompt
|
|
37
100
|
|
|
38
101
|
def _update_template(self, frontend_node: dict):
|
|
39
102
|
prompt_template = frontend_node["template"]["template"]["value"]
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
103
|
+
use_double_brackets = frontend_node["template"].get("use_double_brackets", {}).get("value", False)
|
|
104
|
+
is_mustache = use_double_brackets is True
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
# Validate mustache templates for security
|
|
108
|
+
if is_mustache:
|
|
109
|
+
validate_mustache_template(prompt_template)
|
|
110
|
+
|
|
111
|
+
custom_fields = frontend_node["custom_fields"]
|
|
112
|
+
frontend_node_template = frontend_node["template"]
|
|
113
|
+
_ = process_prompt_template(
|
|
114
|
+
template=prompt_template,
|
|
115
|
+
name="template",
|
|
116
|
+
custom_fields=custom_fields,
|
|
117
|
+
frontend_node_template=frontend_node_template,
|
|
118
|
+
is_mustache=is_mustache,
|
|
119
|
+
)
|
|
120
|
+
except ValueError as e:
|
|
121
|
+
# If validation fails, don't add variables but allow component to be created
|
|
122
|
+
logger.debug(f"Template validation failed in _update_template: {e}")
|
|
48
123
|
return frontend_node
|
|
49
124
|
|
|
50
125
|
async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):
|
|
51
126
|
"""This function is called after the code validation is done."""
|
|
52
127
|
frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)
|
|
53
128
|
template = frontend_node["template"]["template"]["value"]
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
129
|
+
use_double_brackets = frontend_node["template"].get("use_double_brackets", {}).get("value", False)
|
|
130
|
+
is_mustache = use_double_brackets is True
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
# Validate mustache templates for security
|
|
134
|
+
if is_mustache:
|
|
135
|
+
validate_mustache_template(template)
|
|
136
|
+
|
|
137
|
+
# Kept it duplicated for backwards compatibility
|
|
138
|
+
_ = process_prompt_template(
|
|
139
|
+
template=template,
|
|
140
|
+
name="template",
|
|
141
|
+
custom_fields=frontend_node["custom_fields"],
|
|
142
|
+
frontend_node_template=frontend_node["template"],
|
|
143
|
+
is_mustache=is_mustache,
|
|
144
|
+
)
|
|
145
|
+
except ValueError as e:
|
|
146
|
+
# If validation fails, don't add variables but allow component to be updated
|
|
147
|
+
logger.debug(f"Template validation failed in update_frontend_node: {e}")
|
|
61
148
|
# Now that template is updated, we need to grab any values that were set in the current_frontend_node
|
|
62
149
|
# and update the frontend_node with those values
|
|
63
150
|
update_template_values(new_template=frontend_node, previous_template=current_frontend_node["template"])
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
from typing import Any
|
|
2
3
|
from urllib.parse import urljoin
|
|
3
4
|
|
|
@@ -5,9 +6,9 @@ import httpx
|
|
|
5
6
|
from langchain_ollama import OllamaEmbeddings
|
|
6
7
|
|
|
7
8
|
from lfx.base.models.model import LCModelComponent
|
|
8
|
-
from lfx.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS
|
|
9
9
|
from lfx.field_typing import Embeddings
|
|
10
|
-
from lfx.io import DropdownInput, MessageTextInput, Output
|
|
10
|
+
from lfx.io import DropdownInput, MessageTextInput, Output, SecretStrInput
|
|
11
|
+
from lfx.log.logger import logger
|
|
11
12
|
from lfx.utils.util import transform_localhost_url
|
|
12
13
|
|
|
13
14
|
HTTP_STATUS_OK = 200
|
|
@@ -20,6 +21,12 @@ class OllamaEmbeddingsComponent(LCModelComponent):
|
|
|
20
21
|
icon = "Ollama"
|
|
21
22
|
name = "OllamaEmbeddings"
|
|
22
23
|
|
|
24
|
+
# Define constants for JSON keys
|
|
25
|
+
JSON_MODELS_KEY = "models"
|
|
26
|
+
JSON_NAME_KEY = "name"
|
|
27
|
+
JSON_CAPABILITIES_KEY = "capabilities"
|
|
28
|
+
EMBEDDING_CAPABILITY = "embedding"
|
|
29
|
+
|
|
23
30
|
inputs = [
|
|
24
31
|
DropdownInput(
|
|
25
32
|
name="model_name",
|
|
@@ -34,8 +41,19 @@ class OllamaEmbeddingsComponent(LCModelComponent):
|
|
|
34
41
|
MessageTextInput(
|
|
35
42
|
name="base_url",
|
|
36
43
|
display_name="Ollama Base URL",
|
|
37
|
-
|
|
44
|
+
info="Endpoint of the Ollama API. Defaults to http://localhost:11434.",
|
|
45
|
+
value="http://localhost:11434",
|
|
38
46
|
required=True,
|
|
47
|
+
real_time_refresh=True,
|
|
48
|
+
),
|
|
49
|
+
SecretStrInput(
|
|
50
|
+
name="api_key",
|
|
51
|
+
display_name="Ollama API Key",
|
|
52
|
+
info="Your Ollama API key.",
|
|
53
|
+
value=None,
|
|
54
|
+
required=False,
|
|
55
|
+
real_time_refresh=True,
|
|
56
|
+
advanced=True,
|
|
39
57
|
),
|
|
40
58
|
]
|
|
41
59
|
|
|
@@ -43,25 +61,58 @@ class OllamaEmbeddingsComponent(LCModelComponent):
|
|
|
43
61
|
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
|
|
44
62
|
]
|
|
45
63
|
|
|
64
|
+
@property
|
|
65
|
+
def headers(self) -> dict[str, str] | None:
|
|
66
|
+
"""Get the headers for the Ollama API."""
|
|
67
|
+
if self.api_key and self.api_key.strip():
|
|
68
|
+
return {"Authorization": f"Bearer {self.api_key}"}
|
|
69
|
+
return None
|
|
70
|
+
|
|
46
71
|
def build_embeddings(self) -> Embeddings:
|
|
47
72
|
transformed_base_url = transform_localhost_url(self.base_url)
|
|
73
|
+
|
|
74
|
+
# Strip /v1 suffix if present
|
|
75
|
+
if transformed_base_url and transformed_base_url.rstrip("/").endswith("/v1"):
|
|
76
|
+
transformed_base_url = transformed_base_url.rstrip("/").removesuffix("/v1")
|
|
77
|
+
logger.warning(
|
|
78
|
+
"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, "
|
|
79
|
+
"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. "
|
|
80
|
+
"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. "
|
|
81
|
+
"Learn more at https://docs.ollama.com/openai#openai-compatibility"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
llm_params = {
|
|
85
|
+
"model": self.model_name,
|
|
86
|
+
"base_url": transformed_base_url,
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
if self.headers:
|
|
90
|
+
llm_params["client_kwargs"] = {"headers": self.headers}
|
|
91
|
+
|
|
48
92
|
try:
|
|
49
|
-
output = OllamaEmbeddings(
|
|
93
|
+
output = OllamaEmbeddings(**llm_params)
|
|
50
94
|
except Exception as e:
|
|
51
95
|
msg = (
|
|
52
|
-
"Unable to connect to the Ollama API. "
|
|
53
|
-
"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again."
|
|
96
|
+
"Unable to connect to the Ollama API. "
|
|
97
|
+
"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again."
|
|
54
98
|
)
|
|
55
99
|
raise ValueError(msg) from e
|
|
56
100
|
return output
|
|
57
101
|
|
|
58
|
-
async def update_build_config(self, build_config: dict,
|
|
102
|
+
async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):
|
|
59
103
|
if field_name in {"base_url", "model_name"} and not await self.is_valid_ollama_url(self.base_url):
|
|
60
104
|
msg = "Ollama is not running on the provided base URL. Please start Ollama and try again."
|
|
61
105
|
raise ValueError(msg)
|
|
62
|
-
if field_name in {"model_name", "base_url"
|
|
63
|
-
if
|
|
64
|
-
|
|
106
|
+
if field_name in {"model_name", "base_url"}:
|
|
107
|
+
# Use field_value if base_url is being updated, otherwise use self.base_url
|
|
108
|
+
base_url_to_check = field_value if field_name == "base_url" else self.base_url
|
|
109
|
+
# Fallback to self.base_url if field_value is None or empty
|
|
110
|
+
if not base_url_to_check and field_name == "base_url":
|
|
111
|
+
base_url_to_check = self.base_url
|
|
112
|
+
logger.warning(f"Fetching Ollama models from updated URL: {base_url_to_check}")
|
|
113
|
+
|
|
114
|
+
if base_url_to_check and await self.is_valid_ollama_url(base_url_to_check):
|
|
115
|
+
build_config["model_name"]["options"] = await self.get_model(base_url_to_check)
|
|
65
116
|
else:
|
|
66
117
|
build_config["model_name"]["options"] = []
|
|
67
118
|
|
|
@@ -69,26 +120,49 @@ class OllamaEmbeddingsComponent(LCModelComponent):
|
|
|
69
120
|
|
|
70
121
|
async def get_model(self, base_url_value: str) -> list[str]:
|
|
71
122
|
"""Get the model names from Ollama."""
|
|
72
|
-
model_ids = []
|
|
73
123
|
try:
|
|
74
|
-
|
|
75
|
-
|
|
124
|
+
# Strip /v1 suffix if present, as Ollama API endpoints are at root level
|
|
125
|
+
base_url = base_url_value.rstrip("/").removesuffix("/v1")
|
|
126
|
+
if not base_url.endswith("/"):
|
|
127
|
+
base_url = base_url + "/"
|
|
128
|
+
base_url = transform_localhost_url(base_url)
|
|
129
|
+
|
|
130
|
+
# Ollama REST API to return models
|
|
131
|
+
tags_url = urljoin(base_url, "api/tags")
|
|
132
|
+
|
|
133
|
+
# Ollama REST API to return model capabilities
|
|
134
|
+
show_url = urljoin(base_url, "api/show")
|
|
135
|
+
|
|
76
136
|
async with httpx.AsyncClient() as client:
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
137
|
+
headers = self.headers
|
|
138
|
+
# Fetch available models
|
|
139
|
+
tags_response = await client.get(url=tags_url, headers=headers)
|
|
140
|
+
tags_response.raise_for_status()
|
|
141
|
+
models = tags_response.json()
|
|
142
|
+
if asyncio.iscoroutine(models):
|
|
143
|
+
models = await models
|
|
144
|
+
await logger.adebug(f"Available models: {models}")
|
|
145
|
+
|
|
146
|
+
# Filter models that are embedding models
|
|
147
|
+
model_ids = []
|
|
148
|
+
for model in models[self.JSON_MODELS_KEY]:
|
|
149
|
+
model_name = model[self.JSON_NAME_KEY]
|
|
150
|
+
await logger.adebug(f"Checking model: {model_name}")
|
|
151
|
+
|
|
152
|
+
payload = {"model": model_name}
|
|
153
|
+
show_response = await client.post(url=show_url, json=payload, headers=headers)
|
|
154
|
+
show_response.raise_for_status()
|
|
155
|
+
json_data = show_response.json()
|
|
156
|
+
if asyncio.iscoroutine(json_data):
|
|
157
|
+
json_data = await json_data
|
|
158
|
+
|
|
159
|
+
capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])
|
|
160
|
+
await logger.adebug(f"Model: {model_name}, Capabilities: {capabilities}")
|
|
161
|
+
|
|
162
|
+
if self.EMBEDDING_CAPABILITY in capabilities:
|
|
163
|
+
model_ids.append(model_name)
|
|
164
|
+
|
|
165
|
+
except (httpx.RequestError, ValueError) as e:
|
|
92
166
|
msg = "Could not get model names from Ollama."
|
|
93
167
|
raise ValueError(msg) from e
|
|
94
168
|
|
|
@@ -98,6 +172,14 @@ class OllamaEmbeddingsComponent(LCModelComponent):
|
|
|
98
172
|
try:
|
|
99
173
|
async with httpx.AsyncClient() as client:
|
|
100
174
|
url = transform_localhost_url(url)
|
|
101
|
-
|
|
175
|
+
if not url:
|
|
176
|
+
return False
|
|
177
|
+
# Strip /v1 suffix if present, as Ollama API endpoints are at root level
|
|
178
|
+
url = url.rstrip("/").removesuffix("/v1")
|
|
179
|
+
if not url.endswith("/"):
|
|
180
|
+
url = url + "/"
|
|
181
|
+
return (
|
|
182
|
+
await client.get(url=urljoin(url, "api/tags"), headers=self.headers)
|
|
183
|
+
).status_code == HTTP_STATUS_OK
|
|
102
184
|
except httpx.RequestError:
|
|
103
185
|
return False
|
|
@@ -98,7 +98,7 @@ class OpenAIModelComponent(LCModelComponent):
|
|
|
98
98
|
# Handle api_key - it can be string or SecretStr
|
|
99
99
|
api_key_value = None
|
|
100
100
|
if self.api_key:
|
|
101
|
-
logger.debug(f"API key type: {type(self.api_key)}, value: {self.api_key
|
|
101
|
+
logger.debug(f"API key type: {type(self.api_key)}, value: {'***' if self.api_key else None}")
|
|
102
102
|
if isinstance(self.api_key, SecretStr):
|
|
103
103
|
api_key_value = self.api_key.get_secret_value()
|
|
104
104
|
else:
|