lfx-nightly 0.2.1.dev7__py3-none-any.whl → 0.3.0.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. lfx/__main__.py +137 -6
  2. lfx/_assets/component_index.json +1 -1
  3. lfx/base/agents/agent.py +9 -5
  4. lfx/base/agents/altk_base_agent.py +5 -3
  5. lfx/base/agents/events.py +1 -1
  6. lfx/base/models/unified_models.py +1 -1
  7. lfx/base/models/watsonx_constants.py +10 -7
  8. lfx/base/prompts/api_utils.py +40 -5
  9. lfx/cli/__init__.py +10 -2
  10. lfx/cli/script_loader.py +5 -4
  11. lfx/cli/validation.py +6 -3
  12. lfx/components/datastax/astradb_assistant_manager.py +4 -2
  13. lfx/components/docling/docling_remote.py +1 -0
  14. lfx/components/langchain_utilities/ibm_granite_handler.py +211 -0
  15. lfx/components/langchain_utilities/tool_calling.py +24 -1
  16. lfx/components/llm_operations/lambda_filter.py +182 -97
  17. lfx/components/models_and_agents/mcp_component.py +38 -1
  18. lfx/components/models_and_agents/prompt.py +105 -18
  19. lfx/components/ollama/ollama_embeddings.py +109 -28
  20. lfx/components/processing/text_operations.py +580 -0
  21. lfx/custom/custom_component/component.py +65 -10
  22. lfx/events/observability/__init__.py +0 -0
  23. lfx/events/observability/lifecycle_events.py +111 -0
  24. lfx/field_typing/__init__.py +57 -58
  25. lfx/graph/graph/base.py +36 -0
  26. lfx/graph/utils.py +45 -12
  27. lfx/graph/vertex/base.py +71 -22
  28. lfx/graph/vertex/vertex_types.py +0 -5
  29. lfx/inputs/input_mixin.py +1 -0
  30. lfx/inputs/inputs.py +5 -0
  31. lfx/interface/components.py +24 -7
  32. lfx/run/base.py +47 -77
  33. lfx/schema/__init__.py +50 -0
  34. lfx/schema/message.py +85 -8
  35. lfx/schema/workflow.py +171 -0
  36. lfx/services/deps.py +12 -0
  37. lfx/services/interfaces.py +43 -1
  38. lfx/services/schema.py +1 -0
  39. lfx/services/settings/auth.py +95 -4
  40. lfx/services/settings/base.py +4 -0
  41. lfx/services/settings/utils.py +82 -0
  42. lfx/services/transaction/__init__.py +5 -0
  43. lfx/services/transaction/service.py +35 -0
  44. lfx/tests/unit/components/__init__.py +0 -0
  45. lfx/utils/constants.py +1 -0
  46. lfx/utils/mustache_security.py +79 -0
  47. lfx/utils/validate_cloud.py +67 -0
  48. {lfx_nightly-0.2.1.dev7.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/METADATA +3 -1
  49. {lfx_nightly-0.2.1.dev7.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/RECORD +51 -42
  50. {lfx_nightly-0.2.1.dev7.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/WHEEL +0 -0
  51. {lfx_nightly-0.2.1.dev7.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  from typing import Any
2
3
  from urllib.parse import urljoin
3
4
 
@@ -5,9 +6,9 @@ import httpx
5
6
  from langchain_ollama import OllamaEmbeddings
6
7
 
7
8
  from lfx.base.models.model import LCModelComponent
8
- from lfx.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS
9
9
  from lfx.field_typing import Embeddings
10
- from lfx.io import DropdownInput, MessageTextInput, Output
10
+ from lfx.io import DropdownInput, MessageTextInput, Output, SecretStrInput
11
+ from lfx.log.logger import logger
11
12
  from lfx.utils.util import transform_localhost_url
12
13
 
13
14
  HTTP_STATUS_OK = 200
@@ -20,6 +21,12 @@ class OllamaEmbeddingsComponent(LCModelComponent):
20
21
  icon = "Ollama"
21
22
  name = "OllamaEmbeddings"
22
23
 
24
+ # Define constants for JSON keys
25
+ JSON_MODELS_KEY = "models"
26
+ JSON_NAME_KEY = "name"
27
+ JSON_CAPABILITIES_KEY = "capabilities"
28
+ EMBEDDING_CAPABILITY = "embedding"
29
+
23
30
  inputs = [
24
31
  DropdownInput(
25
32
  name="model_name",
@@ -37,6 +44,16 @@ class OllamaEmbeddingsComponent(LCModelComponent):
37
44
  info="Endpoint of the Ollama API. Defaults to http://localhost:11434.",
38
45
  value="http://localhost:11434",
39
46
  required=True,
47
+ real_time_refresh=True,
48
+ ),
49
+ SecretStrInput(
50
+ name="api_key",
51
+ display_name="Ollama API Key",
52
+ info="Your Ollama API key.",
53
+ value=None,
54
+ required=False,
55
+ real_time_refresh=True,
56
+ advanced=True,
40
57
  ),
41
58
  ]
42
59
 
@@ -44,25 +61,58 @@ class OllamaEmbeddingsComponent(LCModelComponent):
44
61
  Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
45
62
  ]
46
63
 
64
+ @property
65
+ def headers(self) -> dict[str, str] | None:
66
+ """Get the headers for the Ollama API."""
67
+ if self.api_key and self.api_key.strip():
68
+ return {"Authorization": f"Bearer {self.api_key}"}
69
+ return None
70
+
47
71
  def build_embeddings(self) -> Embeddings:
48
72
  transformed_base_url = transform_localhost_url(self.base_url)
73
+
74
+ # Strip /v1 suffix if present
75
+ if transformed_base_url and transformed_base_url.rstrip("/").endswith("/v1"):
76
+ transformed_base_url = transformed_base_url.rstrip("/").removesuffix("/v1")
77
+ logger.warning(
78
+ "Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, "
79
+ "not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. "
80
+ "If you want to use the OpenAI-compatible API, please use the OpenAI component instead. "
81
+ "Learn more at https://docs.ollama.com/openai#openai-compatibility"
82
+ )
83
+
84
+ llm_params = {
85
+ "model": self.model_name,
86
+ "base_url": transformed_base_url,
87
+ }
88
+
89
+ if self.headers:
90
+ llm_params["client_kwargs"] = {"headers": self.headers}
91
+
49
92
  try:
50
- output = OllamaEmbeddings(model=self.model_name, base_url=transformed_base_url)
93
+ output = OllamaEmbeddings(**llm_params)
51
94
  except Exception as e:
52
95
  msg = (
53
- "Unable to connect to the Ollama API. ",
54
- "Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.",
96
+ "Unable to connect to the Ollama API. "
97
+ "Please verify the base URL, ensure the relevant Ollama model is pulled, and try again."
55
98
  )
56
99
  raise ValueError(msg) from e
57
100
  return output
58
101
 
59
- async def update_build_config(self, build_config: dict, _field_value: Any, field_name: str | None = None):
102
+ async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):
60
103
  if field_name in {"base_url", "model_name"} and not await self.is_valid_ollama_url(self.base_url):
61
104
  msg = "Ollama is not running on the provided base URL. Please start Ollama and try again."
62
105
  raise ValueError(msg)
63
- if field_name in {"model_name", "base_url", "tool_model_enabled"}:
64
- if await self.is_valid_ollama_url(self.base_url):
65
- build_config["model_name"]["options"] = await self.get_model(self.base_url)
106
+ if field_name in {"model_name", "base_url"}:
107
+ # Use field_value if base_url is being updated, otherwise use self.base_url
108
+ base_url_to_check = field_value if field_name == "base_url" else self.base_url
109
+ # Fallback to self.base_url if field_value is None or empty
110
+ if not base_url_to_check and field_name == "base_url":
111
+ base_url_to_check = self.base_url
112
+ logger.warning(f"Fetching Ollama models from updated URL: {base_url_to_check}")
113
+
114
+ if base_url_to_check and await self.is_valid_ollama_url(base_url_to_check):
115
+ build_config["model_name"]["options"] = await self.get_model(base_url_to_check)
66
116
  else:
67
117
  build_config["model_name"]["options"] = []
68
118
 
@@ -70,26 +120,49 @@ class OllamaEmbeddingsComponent(LCModelComponent):
70
120
 
71
121
  async def get_model(self, base_url_value: str) -> list[str]:
72
122
  """Get the model names from Ollama."""
73
- model_ids = []
74
123
  try:
75
- base_url_value = transform_localhost_url(base_url_value)
76
- url = urljoin(base_url_value, "/api/tags")
124
+ # Strip /v1 suffix if present, as Ollama API endpoints are at root level
125
+ base_url = base_url_value.rstrip("/").removesuffix("/v1")
126
+ if not base_url.endswith("/"):
127
+ base_url = base_url + "/"
128
+ base_url = transform_localhost_url(base_url)
129
+
130
+ # Ollama REST API to return models
131
+ tags_url = urljoin(base_url, "api/tags")
132
+
133
+ # Ollama REST API to return model capabilities
134
+ show_url = urljoin(base_url, "api/show")
135
+
77
136
  async with httpx.AsyncClient() as client:
78
- response = await client.get(url)
79
- response.raise_for_status()
80
- data = response.json()
81
-
82
- model_ids = [model["name"] for model in data.get("models", [])]
83
- # this to ensure that not embedding models are included.
84
- # not even the base models since models can have 1b 2b etc
85
- # handles cases when embeddings models have tags like :latest - etc.
86
- model_ids = [
87
- model
88
- for model in model_ids
89
- if any(model.startswith(f"{embedding_model}") for embedding_model in OLLAMA_EMBEDDING_MODELS)
90
- ]
91
-
92
- except (ImportError, ValueError, httpx.RequestError) as e:
137
+ headers = self.headers
138
+ # Fetch available models
139
+ tags_response = await client.get(url=tags_url, headers=headers)
140
+ tags_response.raise_for_status()
141
+ models = tags_response.json()
142
+ if asyncio.iscoroutine(models):
143
+ models = await models
144
+ await logger.adebug(f"Available models: {models}")
145
+
146
+ # Filter models that are embedding models
147
+ model_ids = []
148
+ for model in models[self.JSON_MODELS_KEY]:
149
+ model_name = model[self.JSON_NAME_KEY]
150
+ await logger.adebug(f"Checking model: {model_name}")
151
+
152
+ payload = {"model": model_name}
153
+ show_response = await client.post(url=show_url, json=payload, headers=headers)
154
+ show_response.raise_for_status()
155
+ json_data = show_response.json()
156
+ if asyncio.iscoroutine(json_data):
157
+ json_data = await json_data
158
+
159
+ capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])
160
+ await logger.adebug(f"Model: {model_name}, Capabilities: {capabilities}")
161
+
162
+ if self.EMBEDDING_CAPABILITY in capabilities:
163
+ model_ids.append(model_name)
164
+
165
+ except (httpx.RequestError, ValueError) as e:
93
166
  msg = "Could not get model names from Ollama."
94
167
  raise ValueError(msg) from e
95
168
 
@@ -99,6 +172,14 @@ class OllamaEmbeddingsComponent(LCModelComponent):
99
172
  try:
100
173
  async with httpx.AsyncClient() as client:
101
174
  url = transform_localhost_url(url)
102
- return (await client.get(f"{url}/api/tags")).status_code == HTTP_STATUS_OK
175
+ if not url:
176
+ return False
177
+ # Strip /v1 suffix if present, as Ollama API endpoints are at root level
178
+ url = url.rstrip("/").removesuffix("/v1")
179
+ if not url.endswith("/"):
180
+ url = url + "/"
181
+ return (
182
+ await client.get(url=urljoin(url, "api/tags"), headers=self.headers)
183
+ ).status_code == HTTP_STATUS_OK
103
184
  except httpx.RequestError:
104
185
  return False