lfx-nightly 0.2.0.dev41__py3-none-any.whl → 0.2.1.dev7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +1 -1
- lfx/base/agents/altk_tool_wrappers.py +1 -1
- lfx/base/agents/utils.py +4 -0
- lfx/base/composio/composio_base.py +78 -41
- lfx/base/data/cloud_storage_utils.py +156 -0
- lfx/base/data/docling_utils.py +130 -55
- lfx/base/datastax/astradb_base.py +75 -64
- lfx/base/embeddings/embeddings_class.py +113 -0
- lfx/base/models/__init__.py +11 -1
- lfx/base/models/google_generative_ai_constants.py +33 -9
- lfx/base/models/model_metadata.py +6 -0
- lfx/base/models/ollama_constants.py +196 -30
- lfx/base/models/openai_constants.py +37 -10
- lfx/base/models/unified_models.py +1123 -0
- lfx/base/models/watsonx_constants.py +36 -0
- lfx/base/tools/component_tool.py +2 -9
- lfx/cli/commands.py +3 -0
- lfx/cli/run.py +65 -409
- lfx/cli/script_loader.py +13 -3
- lfx/components/__init__.py +0 -3
- lfx/components/composio/github_composio.py +1 -1
- lfx/components/cuga/cuga_agent.py +39 -27
- lfx/components/data_source/api_request.py +4 -2
- lfx/components/docling/__init__.py +45 -11
- lfx/components/docling/docling_inline.py +39 -49
- lfx/components/elastic/opensearch_multimodal.py +1733 -0
- lfx/components/files_and_knowledge/file.py +384 -36
- lfx/components/files_and_knowledge/ingestion.py +8 -0
- lfx/components/files_and_knowledge/retrieval.py +10 -0
- lfx/components/files_and_knowledge/save_file.py +91 -88
- lfx/components/langchain_utilities/tool_calling.py +14 -6
- lfx/components/llm_operations/batch_run.py +64 -18
- lfx/components/llm_operations/lambda_filter.py +33 -6
- lfx/components/llm_operations/llm_conditional_router.py +39 -7
- lfx/components/llm_operations/structured_output.py +38 -12
- lfx/components/models/__init__.py +16 -74
- lfx/components/models_and_agents/agent.py +51 -203
- lfx/components/models_and_agents/embedding_model.py +171 -255
- lfx/components/models_and_agents/language_model.py +54 -318
- lfx/components/models_and_agents/mcp_component.py +58 -9
- lfx/components/ollama/ollama_embeddings.py +2 -1
- lfx/components/openai/openai_chat_model.py +1 -1
- lfx/components/vllm/__init__.py +37 -0
- lfx/components/vllm/vllm.py +141 -0
- lfx/components/vllm/vllm_embeddings.py +110 -0
- lfx/custom/custom_component/custom_component.py +8 -6
- lfx/graph/graph/base.py +4 -1
- lfx/graph/utils.py +64 -18
- lfx/graph/vertex/base.py +4 -1
- lfx/inputs/__init__.py +2 -0
- lfx/inputs/input_mixin.py +54 -0
- lfx/inputs/inputs.py +115 -0
- lfx/interface/initialize/loading.py +42 -12
- lfx/io/__init__.py +2 -0
- lfx/run/__init__.py +5 -0
- lfx/run/base.py +494 -0
- lfx/schema/data.py +1 -1
- lfx/schema/image.py +26 -7
- lfx/schema/message.py +19 -3
- lfx/services/mcp_composer/service.py +7 -1
- lfx/services/settings/base.py +7 -1
- lfx/services/settings/constants.py +2 -0
- lfx/services/storage/local.py +13 -8
- lfx/utils/constants.py +1 -0
- lfx/utils/validate_cloud.py +14 -3
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/METADATA +5 -2
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/RECORD +70 -61
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/entry_points.txt +0 -0
|
@@ -187,34 +187,38 @@ class AstraDBBaseComponent(Component):
|
|
|
187
187
|
@classmethod
|
|
188
188
|
def map_cloud_providers(cls, token: str, environment: str | None = None) -> dict[str, dict[str, Any]]:
|
|
189
189
|
"""Fetch all available cloud providers and regions."""
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
# Get the list of available regions
|
|
195
|
-
available_regions = admin_client.find_available_regions(only_org_enabled_regions=True)
|
|
190
|
+
try:
|
|
191
|
+
# Get the admin object
|
|
192
|
+
client = DataAPIClient(environment=cls.get_environment(environment))
|
|
193
|
+
admin_client = client.get_admin(token=token)
|
|
196
194
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
"GCP": {"name": "Google Cloud Platform", "id": "gcp"},
|
|
200
|
-
"Azure": {"name": "Microsoft Azure", "id": "azure"},
|
|
201
|
-
}
|
|
195
|
+
# Get the list of available regions
|
|
196
|
+
available_regions = admin_client.find_available_regions(only_org_enabled_regions=True)
|
|
202
197
|
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
198
|
+
provider_mapping: dict[str, dict[str, str]] = {
|
|
199
|
+
"AWS": {"name": "Amazon Web Services", "id": "aws"},
|
|
200
|
+
"GCP": {"name": "Google Cloud Platform", "id": "gcp"},
|
|
201
|
+
"Azure": {"name": "Microsoft Azure", "id": "azure"},
|
|
202
|
+
}
|
|
207
203
|
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
204
|
+
result: dict[str, dict[str, Any]] = {}
|
|
205
|
+
for region_info in available_regions:
|
|
206
|
+
cloud_provider = region_info.cloud_provider
|
|
207
|
+
region = region_info.name
|
|
211
208
|
|
|
212
|
-
if
|
|
213
|
-
|
|
209
|
+
if cloud_provider in provider_mapping:
|
|
210
|
+
provider_name = provider_mapping[cloud_provider]["name"]
|
|
211
|
+
provider_id = provider_mapping[cloud_provider]["id"]
|
|
214
212
|
|
|
215
|
-
|
|
213
|
+
if provider_name not in result:
|
|
214
|
+
result[provider_name] = {"id": provider_id, "regions": []}
|
|
216
215
|
|
|
217
|
-
|
|
216
|
+
result[provider_name]["regions"].append(region)
|
|
217
|
+
except Exception as e: # noqa: BLE001
|
|
218
|
+
logger.debug("Error fetching cloud providers: %s", e)
|
|
219
|
+
return {}
|
|
220
|
+
else:
|
|
221
|
+
return result
|
|
218
222
|
|
|
219
223
|
@classmethod
|
|
220
224
|
def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):
|
|
@@ -327,48 +331,52 @@ class AstraDBBaseComponent(Component):
|
|
|
327
331
|
|
|
328
332
|
@classmethod
|
|
329
333
|
def get_database_list_static(cls, token: str, environment: str | None = None):
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
# Get the admin object
|
|
334
|
-
admin_client = client.get_admin(token=token)
|
|
334
|
+
try:
|
|
335
|
+
environment = cls.get_environment(environment)
|
|
336
|
+
client = DataAPIClient(environment=environment)
|
|
335
337
|
|
|
336
|
-
|
|
337
|
-
|
|
338
|
+
# Get the admin object
|
|
339
|
+
admin_client = client.get_admin(token=token)
|
|
338
340
|
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
for db in db_list:
|
|
342
|
-
try:
|
|
343
|
-
# Get the API endpoint for the database
|
|
344
|
-
api_endpoints = [db_reg.api_endpoint for db_reg in db.regions]
|
|
341
|
+
# Get the list of databases
|
|
342
|
+
db_list = admin_client.list_databases()
|
|
345
343
|
|
|
346
|
-
|
|
344
|
+
# Generate the api endpoint for each database
|
|
345
|
+
db_info_dict = {}
|
|
346
|
+
for db in db_list:
|
|
347
347
|
try:
|
|
348
|
-
# Get the
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
348
|
+
# Get the API endpoint for the database
|
|
349
|
+
api_endpoints = [db_reg.api_endpoint for db_reg in db.regions]
|
|
350
|
+
|
|
351
|
+
# Get the number of collections
|
|
352
|
+
try:
|
|
353
|
+
# Get the number of collections in the database
|
|
354
|
+
num_collections = len(
|
|
355
|
+
client.get_database(
|
|
356
|
+
api_endpoints[0],
|
|
357
|
+
token=token,
|
|
358
|
+
).list_collection_names()
|
|
359
|
+
)
|
|
360
|
+
except Exception: # noqa: BLE001
|
|
361
|
+
if db.status != "PENDING":
|
|
362
|
+
continue
|
|
363
|
+
num_collections = 0
|
|
364
|
+
|
|
365
|
+
# Add the database to the dictionary
|
|
366
|
+
db_info_dict[db.name] = {
|
|
367
|
+
"api_endpoints": api_endpoints,
|
|
368
|
+
"keyspaces": db.keyspaces,
|
|
369
|
+
"collections": num_collections,
|
|
370
|
+
"status": db.status if db.status != "ACTIVE" else None,
|
|
371
|
+
"org_id": db.org_id if db.org_id else None,
|
|
372
|
+
}
|
|
373
|
+
except Exception as e: # noqa: BLE001
|
|
374
|
+
logger.debug("Failed to get metadata for database %s: %s", db.name, e)
|
|
375
|
+
except Exception as e: # noqa: BLE001
|
|
376
|
+
logger.debug("Error fetching database list: %s", e)
|
|
377
|
+
return {}
|
|
378
|
+
else:
|
|
379
|
+
return db_info_dict
|
|
372
380
|
|
|
373
381
|
def get_database_list(self):
|
|
374
382
|
return self.get_database_list_static(
|
|
@@ -467,6 +475,9 @@ class AstraDBBaseComponent(Component):
|
|
|
467
475
|
|
|
468
476
|
def _initialize_database_options(self):
|
|
469
477
|
try:
|
|
478
|
+
db_list = self.get_database_list()
|
|
479
|
+
if not db_list:
|
|
480
|
+
return []
|
|
470
481
|
return [
|
|
471
482
|
{
|
|
472
483
|
"name": name,
|
|
@@ -476,11 +487,11 @@ class AstraDBBaseComponent(Component):
|
|
|
476
487
|
"keyspaces": info["keyspaces"],
|
|
477
488
|
"org_id": info["org_id"],
|
|
478
489
|
}
|
|
479
|
-
for name, info in
|
|
490
|
+
for name, info in db_list.items()
|
|
480
491
|
]
|
|
481
|
-
except Exception as e:
|
|
482
|
-
|
|
483
|
-
|
|
492
|
+
except Exception as e: # noqa: BLE001
|
|
493
|
+
logger.debug("Error fetching database options: %s", e)
|
|
494
|
+
return []
|
|
484
495
|
|
|
485
496
|
@classmethod
|
|
486
497
|
def get_provider_icon(cls, collection=None, provider_name: str | None = None) -> str:
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""Extended embeddings class with available models metadata."""
|
|
2
|
+
|
|
3
|
+
from langchain_core.embeddings import Embeddings
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class EmbeddingsWithModels(Embeddings):
|
|
7
|
+
"""Extended Embeddings class that includes available models with dedicated instances.
|
|
8
|
+
|
|
9
|
+
This class inherits from LangChain Embeddings and provides a mapping of model names
|
|
10
|
+
to their dedicated embedding instances, enabling multi-model support without the need
|
|
11
|
+
for dynamic model switching.
|
|
12
|
+
|
|
13
|
+
Attributes:
|
|
14
|
+
embeddings: The primary LangChain Embeddings instance (used as fallback).
|
|
15
|
+
available_models: Dict mapping model names to their dedicated Embeddings instances.
|
|
16
|
+
Each model has its own pre-configured instance with specific parameters.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
embeddings: Embeddings,
|
|
22
|
+
available_models: dict[str, Embeddings] | None = None,
|
|
23
|
+
):
|
|
24
|
+
"""Initialize the EmbeddingsWithModels wrapper.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
embeddings: The primary LangChain Embeddings instance (used as default/fallback).
|
|
28
|
+
available_models: Dict mapping model names to dedicated Embeddings instances.
|
|
29
|
+
Each value should be a fully configured Embeddings object ready to use.
|
|
30
|
+
Defaults to empty dict if not provided.
|
|
31
|
+
"""
|
|
32
|
+
super().__init__()
|
|
33
|
+
self.embeddings = embeddings
|
|
34
|
+
self.available_models = available_models if available_models is not None else {}
|
|
35
|
+
|
|
36
|
+
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
37
|
+
"""Embed search docs by delegating to the underlying embeddings instance.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
texts: List of text to embed.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
List of embeddings.
|
|
44
|
+
"""
|
|
45
|
+
return self.embeddings.embed_documents(texts)
|
|
46
|
+
|
|
47
|
+
def embed_query(self, text: str) -> list[float]:
|
|
48
|
+
"""Embed query text by delegating to the underlying embeddings instance.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
text: Text to embed.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Embedding.
|
|
55
|
+
"""
|
|
56
|
+
return self.embeddings.embed_query(text)
|
|
57
|
+
|
|
58
|
+
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
59
|
+
"""Asynchronously embed search docs.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
texts: List of text to embed.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
List of embeddings.
|
|
66
|
+
"""
|
|
67
|
+
return await self.embeddings.aembed_documents(texts)
|
|
68
|
+
|
|
69
|
+
async def aembed_query(self, text: str) -> list[float]:
|
|
70
|
+
"""Asynchronously embed query text.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
text: Text to embed.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Embedding.
|
|
77
|
+
"""
|
|
78
|
+
return await self.embeddings.aembed_query(text)
|
|
79
|
+
|
|
80
|
+
def __call__(self, *args, **kwargs):
|
|
81
|
+
"""Make the class callable by delegating to the underlying embeddings instance.
|
|
82
|
+
|
|
83
|
+
This handles cases where the embeddings object is used as a callable.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
*args: Positional arguments to pass to the underlying embeddings instance.
|
|
87
|
+
**kwargs: Keyword arguments to pass to the underlying embeddings instance.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
The result of calling the underlying embeddings instance.
|
|
91
|
+
"""
|
|
92
|
+
if callable(self.embeddings):
|
|
93
|
+
return self.embeddings(*args, **kwargs)
|
|
94
|
+
msg = f"'{type(self.embeddings).__name__}' object is not callable"
|
|
95
|
+
raise TypeError(msg)
|
|
96
|
+
|
|
97
|
+
def __getattr__(self, name: str):
|
|
98
|
+
"""Delegate attribute access to the underlying embeddings instance.
|
|
99
|
+
|
|
100
|
+
This ensures full compatibility with any additional methods or attributes
|
|
101
|
+
that the underlying embeddings instance might have.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
name: The attribute name to access.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
The attribute from the underlying embeddings instance.
|
|
108
|
+
"""
|
|
109
|
+
return getattr(self.embeddings, name)
|
|
110
|
+
|
|
111
|
+
def __repr__(self) -> str:
|
|
112
|
+
"""Return string representation of the wrapper."""
|
|
113
|
+
return f"EmbeddingsWithModels(embeddings={self.embeddings!r}, available_models={self.available_models!r})"
|
lfx/base/models/__init__.py
CHANGED
|
@@ -1,3 +1,13 @@
|
|
|
1
1
|
from .model import LCModelComponent
|
|
2
|
+
from .unified_models import (
|
|
3
|
+
get_model_provider_variable_mapping,
|
|
4
|
+
get_model_providers,
|
|
5
|
+
get_unified_models_detailed,
|
|
6
|
+
)
|
|
2
7
|
|
|
3
|
-
__all__ = [
|
|
8
|
+
__all__ = [
|
|
9
|
+
"LCModelComponent",
|
|
10
|
+
"get_model_provider_variable_mapping",
|
|
11
|
+
"get_model_providers",
|
|
12
|
+
"get_unified_models_detailed",
|
|
13
|
+
]
|
|
@@ -2,29 +2,53 @@ from .model_metadata import create_model_metadata
|
|
|
2
2
|
|
|
3
3
|
# Unified model metadata - single source of truth
|
|
4
4
|
GOOGLE_GENERATIVE_AI_MODELS_DETAILED = [
|
|
5
|
-
# GEMINI 1.5
|
|
5
|
+
# GEMINI 1.5 (stable)
|
|
6
6
|
create_model_metadata(
|
|
7
|
-
provider="Google Generative AI",
|
|
7
|
+
provider="Google Generative AI",
|
|
8
|
+
name="gemini-1.5-pro",
|
|
9
|
+
icon="GoogleGenerativeAI",
|
|
10
|
+
tool_calling=True,
|
|
8
11
|
),
|
|
9
12
|
create_model_metadata(
|
|
10
|
-
provider="Google Generative AI",
|
|
13
|
+
provider="Google Generative AI",
|
|
14
|
+
name="gemini-1.5-flash",
|
|
15
|
+
icon="GoogleGenerativeAI",
|
|
16
|
+
tool_calling=True,
|
|
11
17
|
),
|
|
12
18
|
create_model_metadata(
|
|
13
19
|
provider="Google Generative AI", name="gemini-1.5-flash-8b", icon="GoogleGenerativeAI", tool_calling=True
|
|
14
20
|
),
|
|
15
|
-
# GEMINI 2.
|
|
21
|
+
# GEMINI 2.0 (stable)
|
|
16
22
|
create_model_metadata(
|
|
17
|
-
provider="Google Generative AI",
|
|
23
|
+
provider="Google Generative AI",
|
|
24
|
+
name="gemini-2.0-flash-lite",
|
|
25
|
+
icon="GoogleGenerativeAI",
|
|
26
|
+
tool_calling=True,
|
|
18
27
|
),
|
|
28
|
+
# GEMINI 2.5 (future/not yet released)
|
|
19
29
|
create_model_metadata(
|
|
20
|
-
provider="Google Generative AI",
|
|
30
|
+
provider="Google Generative AI",
|
|
31
|
+
name="gemini-2.5-pro",
|
|
32
|
+
icon="GoogleGenerativeAI",
|
|
33
|
+
tool_calling=True,
|
|
34
|
+
preview=True,
|
|
35
|
+
not_supported=True,
|
|
21
36
|
),
|
|
22
37
|
create_model_metadata(
|
|
23
|
-
provider="Google Generative AI",
|
|
38
|
+
provider="Google Generative AI",
|
|
39
|
+
name="gemini-2.5-flash",
|
|
40
|
+
icon="GoogleGenerativeAI",
|
|
41
|
+
tool_calling=True,
|
|
42
|
+
preview=True,
|
|
43
|
+
not_supported=True,
|
|
24
44
|
),
|
|
25
|
-
# GEMINI 2.0
|
|
26
45
|
create_model_metadata(
|
|
27
|
-
provider="Google Generative AI",
|
|
46
|
+
provider="Google Generative AI",
|
|
47
|
+
name="gemini-2.5-flash-lite",
|
|
48
|
+
icon="GoogleGenerativeAI",
|
|
49
|
+
tool_calling=True,
|
|
50
|
+
preview=True,
|
|
51
|
+
not_supported=True,
|
|
28
52
|
),
|
|
29
53
|
# PREVIEW
|
|
30
54
|
create_model_metadata(
|
|
@@ -13,6 +13,8 @@ class ModelMetadata(TypedDict, total=False):
|
|
|
13
13
|
preview: bool # Whether model is in preview/beta (defaults to False)
|
|
14
14
|
not_supported: bool # Whether model is not supported or deprecated (defaults to False)
|
|
15
15
|
deprecated: bool # Whether model is deprecated (defaults to False)
|
|
16
|
+
default: bool # Whether model is a default/recommended option (defaults to False)
|
|
17
|
+
model_type: str # Type of model (defaults to "llm" or "embeddings")
|
|
16
18
|
|
|
17
19
|
|
|
18
20
|
def create_model_metadata(
|
|
@@ -26,6 +28,8 @@ def create_model_metadata(
|
|
|
26
28
|
preview: bool = False,
|
|
27
29
|
not_supported: bool = False,
|
|
28
30
|
deprecated: bool = False,
|
|
31
|
+
default: bool = False,
|
|
32
|
+
model_type: str = "llm",
|
|
29
33
|
) -> ModelMetadata:
|
|
30
34
|
"""Helper function to create ModelMetadata with explicit defaults."""
|
|
31
35
|
return ModelMetadata(
|
|
@@ -38,4 +42,6 @@ def create_model_metadata(
|
|
|
38
42
|
preview=preview,
|
|
39
43
|
not_supported=not_supported,
|
|
40
44
|
deprecated=deprecated,
|
|
45
|
+
default=default,
|
|
46
|
+
model_type=model_type,
|
|
41
47
|
)
|
|
@@ -1,3 +1,186 @@
|
|
|
1
|
+
from .model_metadata import create_model_metadata
|
|
2
|
+
|
|
3
|
+
# Unified model metadata - single source of truth
|
|
4
|
+
OLLAMA_MODELS_DETAILED = [
|
|
5
|
+
# Tool Calling Models
|
|
6
|
+
create_model_metadata(
|
|
7
|
+
provider="Ollama",
|
|
8
|
+
name="llama3.3",
|
|
9
|
+
icon="Ollama",
|
|
10
|
+
tool_calling=True,
|
|
11
|
+
),
|
|
12
|
+
create_model_metadata(
|
|
13
|
+
provider="Ollama",
|
|
14
|
+
name="qwq",
|
|
15
|
+
icon="Ollama",
|
|
16
|
+
tool_calling=True,
|
|
17
|
+
),
|
|
18
|
+
create_model_metadata(
|
|
19
|
+
provider="Ollama",
|
|
20
|
+
name="llama3.2",
|
|
21
|
+
icon="Ollama",
|
|
22
|
+
tool_calling=True,
|
|
23
|
+
),
|
|
24
|
+
create_model_metadata(
|
|
25
|
+
provider="Ollama",
|
|
26
|
+
name="llama3.1",
|
|
27
|
+
icon="Ollama",
|
|
28
|
+
tool_calling=True,
|
|
29
|
+
),
|
|
30
|
+
create_model_metadata(
|
|
31
|
+
provider="Ollama",
|
|
32
|
+
name="mistral",
|
|
33
|
+
icon="Ollama",
|
|
34
|
+
tool_calling=True,
|
|
35
|
+
),
|
|
36
|
+
create_model_metadata(
|
|
37
|
+
provider="Ollama",
|
|
38
|
+
name="qwen2",
|
|
39
|
+
icon="Ollama",
|
|
40
|
+
tool_calling=True,
|
|
41
|
+
),
|
|
42
|
+
create_model_metadata(
|
|
43
|
+
provider="Ollama",
|
|
44
|
+
name="qwen2.5",
|
|
45
|
+
icon="Ollama",
|
|
46
|
+
tool_calling=True,
|
|
47
|
+
),
|
|
48
|
+
create_model_metadata(
|
|
49
|
+
provider="Ollama",
|
|
50
|
+
name="qwen2.5-coder",
|
|
51
|
+
icon="Ollama",
|
|
52
|
+
tool_calling=True,
|
|
53
|
+
),
|
|
54
|
+
create_model_metadata(
|
|
55
|
+
provider="Ollama",
|
|
56
|
+
name="mistral-nemo",
|
|
57
|
+
icon="Ollama",
|
|
58
|
+
tool_calling=True,
|
|
59
|
+
),
|
|
60
|
+
create_model_metadata(
|
|
61
|
+
provider="Ollama",
|
|
62
|
+
name="mixtral",
|
|
63
|
+
icon="Ollama",
|
|
64
|
+
tool_calling=True,
|
|
65
|
+
),
|
|
66
|
+
create_model_metadata(
|
|
67
|
+
provider="Ollama",
|
|
68
|
+
name="command-r",
|
|
69
|
+
icon="Ollama",
|
|
70
|
+
tool_calling=True,
|
|
71
|
+
),
|
|
72
|
+
create_model_metadata(
|
|
73
|
+
provider="Ollama",
|
|
74
|
+
name="command-r-plus",
|
|
75
|
+
icon="Ollama",
|
|
76
|
+
tool_calling=True,
|
|
77
|
+
),
|
|
78
|
+
create_model_metadata(
|
|
79
|
+
provider="Ollama",
|
|
80
|
+
name="mistral-large",
|
|
81
|
+
icon="Ollama",
|
|
82
|
+
tool_calling=True,
|
|
83
|
+
),
|
|
84
|
+
create_model_metadata(
|
|
85
|
+
provider="Ollama",
|
|
86
|
+
name="smollm2",
|
|
87
|
+
icon="Ollama",
|
|
88
|
+
tool_calling=True,
|
|
89
|
+
),
|
|
90
|
+
create_model_metadata(
|
|
91
|
+
provider="Ollama",
|
|
92
|
+
name="hermes3",
|
|
93
|
+
icon="Ollama",
|
|
94
|
+
tool_calling=True,
|
|
95
|
+
),
|
|
96
|
+
create_model_metadata(
|
|
97
|
+
provider="Ollama",
|
|
98
|
+
name="athene-v2",
|
|
99
|
+
icon="Ollama",
|
|
100
|
+
tool_calling=True,
|
|
101
|
+
),
|
|
102
|
+
create_model_metadata(
|
|
103
|
+
provider="Ollama",
|
|
104
|
+
name="mistral-small",
|
|
105
|
+
icon="Ollama",
|
|
106
|
+
tool_calling=True,
|
|
107
|
+
),
|
|
108
|
+
create_model_metadata(
|
|
109
|
+
provider="Ollama",
|
|
110
|
+
name="nemotron-mini",
|
|
111
|
+
icon="Ollama",
|
|
112
|
+
tool_calling=True,
|
|
113
|
+
),
|
|
114
|
+
create_model_metadata(
|
|
115
|
+
provider="Ollama",
|
|
116
|
+
name="nemotron",
|
|
117
|
+
icon="Ollama",
|
|
118
|
+
tool_calling=True,
|
|
119
|
+
),
|
|
120
|
+
create_model_metadata(
|
|
121
|
+
provider="Ollama",
|
|
122
|
+
name="llama3-groq-tool-use",
|
|
123
|
+
icon="Ollama",
|
|
124
|
+
tool_calling=True,
|
|
125
|
+
),
|
|
126
|
+
create_model_metadata(
|
|
127
|
+
provider="Ollama",
|
|
128
|
+
name="granite3-dense",
|
|
129
|
+
icon="Ollama",
|
|
130
|
+
tool_calling=True,
|
|
131
|
+
),
|
|
132
|
+
create_model_metadata(
|
|
133
|
+
provider="Ollama",
|
|
134
|
+
name="granite3.1-dense",
|
|
135
|
+
icon="Ollama",
|
|
136
|
+
tool_calling=True,
|
|
137
|
+
),
|
|
138
|
+
create_model_metadata(
|
|
139
|
+
provider="Ollama",
|
|
140
|
+
name="aya-expanse",
|
|
141
|
+
icon="Ollama",
|
|
142
|
+
tool_calling=True,
|
|
143
|
+
),
|
|
144
|
+
create_model_metadata(
|
|
145
|
+
provider="Ollama",
|
|
146
|
+
name="granite3-moe",
|
|
147
|
+
icon="Ollama",
|
|
148
|
+
tool_calling=True,
|
|
149
|
+
),
|
|
150
|
+
create_model_metadata(
|
|
151
|
+
provider="Ollama",
|
|
152
|
+
name="firefunction-v2",
|
|
153
|
+
icon="Ollama",
|
|
154
|
+
tool_calling=True,
|
|
155
|
+
),
|
|
156
|
+
create_model_metadata(
|
|
157
|
+
provider="Ollama",
|
|
158
|
+
name="cogito",
|
|
159
|
+
icon="Ollama",
|
|
160
|
+
tool_calling=True,
|
|
161
|
+
),
|
|
162
|
+
create_model_metadata(
|
|
163
|
+
provider="Ollama",
|
|
164
|
+
name="gpt-oss:20b",
|
|
165
|
+
icon="Ollama",
|
|
166
|
+
tool_calling=True,
|
|
167
|
+
),
|
|
168
|
+
create_model_metadata(
|
|
169
|
+
provider="Ollama",
|
|
170
|
+
name="qwen3-vl:4b",
|
|
171
|
+
icon="Ollama",
|
|
172
|
+
tool_calling=True,
|
|
173
|
+
),
|
|
174
|
+
]
|
|
175
|
+
|
|
176
|
+
# Filter lists based on metadata properties
|
|
177
|
+
OLLAMA_TOOL_MODELS_BASE = [
|
|
178
|
+
metadata["name"]
|
|
179
|
+
for metadata in OLLAMA_MODELS_DETAILED
|
|
180
|
+
if metadata.get("tool_calling", False) and not metadata.get("not_supported", False)
|
|
181
|
+
]
|
|
182
|
+
|
|
183
|
+
# Embedding models - following OpenAI's pattern of keeping these as a simple list
|
|
1
184
|
# https://ollama.com/search?c=embedding
|
|
2
185
|
OLLAMA_EMBEDDING_MODELS = [
|
|
3
186
|
"nomic-embed-text",
|
|
@@ -10,37 +193,19 @@ OLLAMA_EMBEDDING_MODELS = [
|
|
|
10
193
|
"granite-embedding",
|
|
11
194
|
"jina-embeddings-v2-base-en",
|
|
12
195
|
]
|
|
13
|
-
# https://ollama.com/search?c=tools
|
|
14
|
-
OLLAMA_TOOL_MODELS_BASE = [
|
|
15
|
-
"llama3.3",
|
|
16
|
-
"qwq",
|
|
17
|
-
"llama3.2",
|
|
18
|
-
"llama3.1",
|
|
19
|
-
"mistral",
|
|
20
|
-
"qwen2",
|
|
21
|
-
"qwen2.5",
|
|
22
|
-
"qwen2.5-coder",
|
|
23
|
-
"mistral-nemo",
|
|
24
|
-
"mixtral",
|
|
25
|
-
"command-r",
|
|
26
|
-
"command-r-plus",
|
|
27
|
-
"mistral-large",
|
|
28
|
-
"smollm2",
|
|
29
|
-
"hermes3",
|
|
30
|
-
"athene-v2",
|
|
31
|
-
"mistral-small",
|
|
32
|
-
"nemotron-mini",
|
|
33
|
-
"nemotron",
|
|
34
|
-
"llama3-groq-tool-use",
|
|
35
|
-
"granite3-dense",
|
|
36
|
-
"granite3.1-dense",
|
|
37
|
-
"aya-expanse",
|
|
38
|
-
"granite3-moe",
|
|
39
|
-
"firefunction-v2",
|
|
40
|
-
"cogito",
|
|
41
|
-
]
|
|
42
196
|
|
|
197
|
+
# Embedding models as detailed metadata
|
|
198
|
+
OLLAMA_EMBEDDING_MODELS_DETAILED = [
|
|
199
|
+
create_model_metadata(
|
|
200
|
+
provider="Ollama",
|
|
201
|
+
name=name,
|
|
202
|
+
icon="Ollama",
|
|
203
|
+
model_type="embeddings",
|
|
204
|
+
)
|
|
205
|
+
for name in OLLAMA_EMBEDDING_MODELS
|
|
206
|
+
]
|
|
43
207
|
|
|
208
|
+
# Connection URLs
|
|
44
209
|
URL_LIST = [
|
|
45
210
|
"http://localhost:11434",
|
|
46
211
|
"http://host.docker.internal:11434",
|
|
@@ -48,5 +213,6 @@ URL_LIST = [
|
|
|
48
213
|
"http://0.0.0.0:11434",
|
|
49
214
|
]
|
|
50
215
|
|
|
51
|
-
|
|
216
|
+
# Backwards compatibility
|
|
217
|
+
OLLAMA_MODEL_NAMES = OLLAMA_TOOL_MODELS_BASE
|
|
52
218
|
DEFAULT_OLLAMA_API_URL = "https://ollama.com"
|