lfx-nightly 0.2.0.dev26__py3-none-any.whl → 0.2.1.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +9 -4
  3. lfx/base/agents/altk_base_agent.py +16 -3
  4. lfx/base/agents/altk_tool_wrappers.py +1 -1
  5. lfx/base/agents/utils.py +4 -0
  6. lfx/base/composio/composio_base.py +78 -41
  7. lfx/base/data/base_file.py +14 -4
  8. lfx/base/data/cloud_storage_utils.py +156 -0
  9. lfx/base/data/docling_utils.py +191 -65
  10. lfx/base/data/storage_utils.py +109 -0
  11. lfx/base/datastax/astradb_base.py +75 -64
  12. lfx/base/mcp/util.py +2 -2
  13. lfx/base/models/__init__.py +11 -1
  14. lfx/base/models/anthropic_constants.py +21 -12
  15. lfx/base/models/google_generative_ai_constants.py +33 -9
  16. lfx/base/models/model_metadata.py +6 -0
  17. lfx/base/models/ollama_constants.py +196 -30
  18. lfx/base/models/openai_constants.py +37 -10
  19. lfx/base/models/unified_models.py +1123 -0
  20. lfx/base/models/watsonx_constants.py +36 -0
  21. lfx/base/tools/component_tool.py +2 -9
  22. lfx/cli/commands.py +6 -1
  23. lfx/cli/run.py +65 -409
  24. lfx/cli/script_loader.py +13 -3
  25. lfx/components/__init__.py +0 -3
  26. lfx/components/composio/github_composio.py +1 -1
  27. lfx/components/cuga/cuga_agent.py +39 -27
  28. lfx/components/data_source/api_request.py +4 -2
  29. lfx/components/docling/__init__.py +45 -11
  30. lfx/components/docling/chunk_docling_document.py +3 -1
  31. lfx/components/docling/docling_inline.py +39 -49
  32. lfx/components/docling/export_docling_document.py +3 -1
  33. lfx/components/elastic/opensearch_multimodal.py +215 -57
  34. lfx/components/files_and_knowledge/file.py +439 -39
  35. lfx/components/files_and_knowledge/ingestion.py +8 -0
  36. lfx/components/files_and_knowledge/retrieval.py +10 -0
  37. lfx/components/files_and_knowledge/save_file.py +123 -53
  38. lfx/components/ibm/watsonx.py +7 -1
  39. lfx/components/input_output/chat_output.py +7 -1
  40. lfx/components/langchain_utilities/tool_calling.py +14 -6
  41. lfx/components/llm_operations/batch_run.py +80 -25
  42. lfx/components/llm_operations/lambda_filter.py +33 -6
  43. lfx/components/llm_operations/llm_conditional_router.py +39 -7
  44. lfx/components/llm_operations/structured_output.py +38 -12
  45. lfx/components/models/__init__.py +16 -74
  46. lfx/components/models_and_agents/agent.py +51 -201
  47. lfx/components/models_and_agents/embedding_model.py +185 -339
  48. lfx/components/models_and_agents/language_model.py +54 -318
  49. lfx/components/models_and_agents/mcp_component.py +58 -9
  50. lfx/components/ollama/ollama.py +9 -4
  51. lfx/components/ollama/ollama_embeddings.py +2 -1
  52. lfx/components/openai/openai_chat_model.py +1 -1
  53. lfx/components/processing/__init__.py +0 -3
  54. lfx/components/vllm/__init__.py +37 -0
  55. lfx/components/vllm/vllm.py +141 -0
  56. lfx/components/vllm/vllm_embeddings.py +110 -0
  57. lfx/custom/custom_component/custom_component.py +8 -6
  58. lfx/custom/directory_reader/directory_reader.py +5 -2
  59. lfx/graph/utils.py +64 -18
  60. lfx/inputs/__init__.py +2 -0
  61. lfx/inputs/input_mixin.py +54 -0
  62. lfx/inputs/inputs.py +115 -0
  63. lfx/interface/initialize/loading.py +42 -12
  64. lfx/io/__init__.py +2 -0
  65. lfx/run/__init__.py +5 -0
  66. lfx/run/base.py +494 -0
  67. lfx/schema/data.py +1 -1
  68. lfx/schema/image.py +28 -19
  69. lfx/schema/message.py +19 -3
  70. lfx/services/interfaces.py +5 -0
  71. lfx/services/manager.py +5 -4
  72. lfx/services/mcp_composer/service.py +45 -13
  73. lfx/services/settings/auth.py +18 -11
  74. lfx/services/settings/base.py +12 -24
  75. lfx/services/settings/constants.py +2 -0
  76. lfx/services/storage/local.py +37 -0
  77. lfx/services/storage/service.py +19 -0
  78. lfx/utils/constants.py +1 -0
  79. lfx/utils/image.py +29 -11
  80. lfx/utils/validate_cloud.py +14 -3
  81. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/METADATA +5 -2
  82. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/RECORD +84 -78
  83. lfx/components/processing/dataframe_to_toolset.py +0 -259
  84. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/WHEEL +0 -0
  85. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/entry_points.txt +0 -0
@@ -2,32 +2,41 @@ from .model_metadata import create_model_metadata
2
2
 
3
3
  ANTHROPIC_MODELS_DETAILED = [
4
4
  # Tool calling supported models
5
+ create_model_metadata(provider="Anthropic", name="claude-opus-4-5-20251101", icon="Anthropic", tool_calling=True),
6
+ create_model_metadata(provider="Anthropic", name="claude-haiku-4-5-20251001", icon="Anthropic", tool_calling=True),
5
7
  create_model_metadata(provider="Anthropic", name="claude-sonnet-4-5-20250929", icon="Anthropic", tool_calling=True),
6
8
  create_model_metadata(provider="Anthropic", name="claude-opus-4-1-20250805", icon="Anthropic", tool_calling=True),
7
9
  create_model_metadata(provider="Anthropic", name="claude-opus-4-20250514", icon="Anthropic", tool_calling=True),
8
10
  create_model_metadata(provider="Anthropic", name="claude-sonnet-4-20250514", icon="Anthropic", tool_calling=True),
9
- create_model_metadata(provider="Anthropic", name="claude-3-7-sonnet-latest", icon="Anthropic", tool_calling=True),
10
- create_model_metadata(provider="Anthropic", name="claude-3-5-sonnet-latest", icon="Anthropic", tool_calling=True),
11
- create_model_metadata(provider="Anthropic", name="claude-3-5-haiku-latest", icon="Anthropic", tool_calling=True),
12
- create_model_metadata(provider="Anthropic", name="claude-3-opus-latest", icon="Anthropic", tool_calling=True),
11
+ create_model_metadata(provider="Anthropic", name="claude-3-5-haiku-20241022", icon="Anthropic", tool_calling=True),
12
+ create_model_metadata(provider="Anthropic", name="claude-3-haiku-20240307", icon="Anthropic", tool_calling=True),
13
+ # Deprecated models
14
+ create_model_metadata(
15
+ provider="Anthropic", name="claude-3-7-sonnet-latest", icon="Anthropic", tool_calling=True, deprecated=True
16
+ ),
17
+ create_model_metadata(
18
+ provider="Anthropic", name="claude-3-5-sonnet-latest", icon="Anthropic", tool_calling=True, deprecated=True
19
+ ),
20
+ create_model_metadata(
21
+ provider="Anthropic", name="claude-3-5-haiku-latest", icon="Anthropic", tool_calling=True, deprecated=True
22
+ ),
23
+ create_model_metadata(
24
+ provider="Anthropic", name="claude-3-opus-latest", icon="Anthropic", tool_calling=True, deprecated=True
25
+ ),
13
26
  create_model_metadata(
14
27
  provider="Anthropic", name="claude-3-sonnet-20240229", icon="Anthropic", tool_calling=True, deprecated=True
15
28
  ),
16
- # Tool calling unsupported models
17
- create_model_metadata(provider="Anthropic", name="claude-2.1", icon="Anthropic", tool_calling=False),
18
- create_model_metadata(provider="Anthropic", name="claude-2.0", icon="Anthropic", tool_calling=False),
19
- # Deprecated models
20
29
  create_model_metadata(
21
- provider="Anthropic", name="claude-3-5-sonnet-20240620", icon="Anthropic", tool_calling=True, deprecated=True
30
+ provider="Anthropic", name="claude-2.1", icon="Anthropic", tool_calling=False, deprecated=True
22
31
  ),
23
32
  create_model_metadata(
24
- provider="Anthropic", name="claude-3-5-sonnet-20241022", icon="Anthropic", tool_calling=True, deprecated=True
33
+ provider="Anthropic", name="claude-2.0", icon="Anthropic", tool_calling=False, deprecated=True
25
34
  ),
26
35
  create_model_metadata(
27
- provider="Anthropic", name="claude-3-5-haiku-20241022", icon="Anthropic", tool_calling=True, deprecated=True
36
+ provider="Anthropic", name="claude-3-5-sonnet-20240620", icon="Anthropic", tool_calling=True, deprecated=True
28
37
  ),
29
38
  create_model_metadata(
30
- provider="Anthropic", name="claude-3-haiku-20240307", icon="Anthropic", tool_calling=True, deprecated=True
39
+ provider="Anthropic", name="claude-3-5-sonnet-20241022", icon="Anthropic", tool_calling=True, deprecated=True
31
40
  ),
32
41
  ]
33
42
 
@@ -2,29 +2,53 @@ from .model_metadata import create_model_metadata
2
2
 
3
3
  # Unified model metadata - single source of truth
4
4
  GOOGLE_GENERATIVE_AI_MODELS_DETAILED = [
5
- # GEMINI 1.5
5
+ # GEMINI 1.5 (stable)
6
6
  create_model_metadata(
7
- provider="Google Generative AI", name="gemini-1.5-pro", icon="GoogleGenerativeAI", tool_calling=True
7
+ provider="Google Generative AI",
8
+ name="gemini-1.5-pro",
9
+ icon="GoogleGenerativeAI",
10
+ tool_calling=True,
8
11
  ),
9
12
  create_model_metadata(
10
- provider="Google Generative AI", name="gemini-1.5-flash", icon="GoogleGenerativeAI", tool_calling=True
13
+ provider="Google Generative AI",
14
+ name="gemini-1.5-flash",
15
+ icon="GoogleGenerativeAI",
16
+ tool_calling=True,
11
17
  ),
12
18
  create_model_metadata(
13
19
  provider="Google Generative AI", name="gemini-1.5-flash-8b", icon="GoogleGenerativeAI", tool_calling=True
14
20
  ),
15
- # GEMINI 2.5
21
+ # GEMINI 2.0 (stable)
16
22
  create_model_metadata(
17
- provider="Google Generative AI", name="gemini-2.5-pro", icon="GoogleGenerativeAI", tool_calling=True
23
+ provider="Google Generative AI",
24
+ name="gemini-2.0-flash-lite",
25
+ icon="GoogleGenerativeAI",
26
+ tool_calling=True,
18
27
  ),
28
+ # GEMINI 2.5 (future/not yet released)
19
29
  create_model_metadata(
20
- provider="Google Generative AI", name="gemini-2.5-flash", icon="GoogleGenerativeAI", tool_calling=True
30
+ provider="Google Generative AI",
31
+ name="gemini-2.5-pro",
32
+ icon="GoogleGenerativeAI",
33
+ tool_calling=True,
34
+ preview=True,
35
+ not_supported=True,
21
36
  ),
22
37
  create_model_metadata(
23
- provider="Google Generative AI", name="gemini-2.5-flash-lite", icon="GoogleGenerativeAI", tool_calling=True
38
+ provider="Google Generative AI",
39
+ name="gemini-2.5-flash",
40
+ icon="GoogleGenerativeAI",
41
+ tool_calling=True,
42
+ preview=True,
43
+ not_supported=True,
24
44
  ),
25
- # GEMINI 2.0
26
45
  create_model_metadata(
27
- provider="Google Generative AI", name="gemini-2.0-flash-lite", icon="GoogleGenerativeAI", tool_calling=True
46
+ provider="Google Generative AI",
47
+ name="gemini-2.5-flash-lite",
48
+ icon="GoogleGenerativeAI",
49
+ tool_calling=True,
50
+ preview=True,
51
+ not_supported=True,
28
52
  ),
29
53
  # PREVIEW
30
54
  create_model_metadata(
@@ -13,6 +13,8 @@ class ModelMetadata(TypedDict, total=False):
13
13
  preview: bool # Whether model is in preview/beta (defaults to False)
14
14
  not_supported: bool # Whether model is not supported or deprecated (defaults to False)
15
15
  deprecated: bool # Whether model is deprecated (defaults to False)
16
+ default: bool # Whether model is a default/recommended option (defaults to False)
17
+ model_type: str # Type of model (defaults to "llm" or "embeddings")
16
18
 
17
19
 
18
20
  def create_model_metadata(
@@ -26,6 +28,8 @@ def create_model_metadata(
26
28
  preview: bool = False,
27
29
  not_supported: bool = False,
28
30
  deprecated: bool = False,
31
+ default: bool = False,
32
+ model_type: str = "llm",
29
33
  ) -> ModelMetadata:
30
34
  """Helper function to create ModelMetadata with explicit defaults."""
31
35
  return ModelMetadata(
@@ -38,4 +42,6 @@ def create_model_metadata(
38
42
  preview=preview,
39
43
  not_supported=not_supported,
40
44
  deprecated=deprecated,
45
+ default=default,
46
+ model_type=model_type,
41
47
  )
@@ -1,3 +1,186 @@
1
+ from .model_metadata import create_model_metadata
2
+
3
+ # Unified model metadata - single source of truth
4
+ OLLAMA_MODELS_DETAILED = [
5
+ # Tool Calling Models
6
+ create_model_metadata(
7
+ provider="Ollama",
8
+ name="llama3.3",
9
+ icon="Ollama",
10
+ tool_calling=True,
11
+ ),
12
+ create_model_metadata(
13
+ provider="Ollama",
14
+ name="qwq",
15
+ icon="Ollama",
16
+ tool_calling=True,
17
+ ),
18
+ create_model_metadata(
19
+ provider="Ollama",
20
+ name="llama3.2",
21
+ icon="Ollama",
22
+ tool_calling=True,
23
+ ),
24
+ create_model_metadata(
25
+ provider="Ollama",
26
+ name="llama3.1",
27
+ icon="Ollama",
28
+ tool_calling=True,
29
+ ),
30
+ create_model_metadata(
31
+ provider="Ollama",
32
+ name="mistral",
33
+ icon="Ollama",
34
+ tool_calling=True,
35
+ ),
36
+ create_model_metadata(
37
+ provider="Ollama",
38
+ name="qwen2",
39
+ icon="Ollama",
40
+ tool_calling=True,
41
+ ),
42
+ create_model_metadata(
43
+ provider="Ollama",
44
+ name="qwen2.5",
45
+ icon="Ollama",
46
+ tool_calling=True,
47
+ ),
48
+ create_model_metadata(
49
+ provider="Ollama",
50
+ name="qwen2.5-coder",
51
+ icon="Ollama",
52
+ tool_calling=True,
53
+ ),
54
+ create_model_metadata(
55
+ provider="Ollama",
56
+ name="mistral-nemo",
57
+ icon="Ollama",
58
+ tool_calling=True,
59
+ ),
60
+ create_model_metadata(
61
+ provider="Ollama",
62
+ name="mixtral",
63
+ icon="Ollama",
64
+ tool_calling=True,
65
+ ),
66
+ create_model_metadata(
67
+ provider="Ollama",
68
+ name="command-r",
69
+ icon="Ollama",
70
+ tool_calling=True,
71
+ ),
72
+ create_model_metadata(
73
+ provider="Ollama",
74
+ name="command-r-plus",
75
+ icon="Ollama",
76
+ tool_calling=True,
77
+ ),
78
+ create_model_metadata(
79
+ provider="Ollama",
80
+ name="mistral-large",
81
+ icon="Ollama",
82
+ tool_calling=True,
83
+ ),
84
+ create_model_metadata(
85
+ provider="Ollama",
86
+ name="smollm2",
87
+ icon="Ollama",
88
+ tool_calling=True,
89
+ ),
90
+ create_model_metadata(
91
+ provider="Ollama",
92
+ name="hermes3",
93
+ icon="Ollama",
94
+ tool_calling=True,
95
+ ),
96
+ create_model_metadata(
97
+ provider="Ollama",
98
+ name="athene-v2",
99
+ icon="Ollama",
100
+ tool_calling=True,
101
+ ),
102
+ create_model_metadata(
103
+ provider="Ollama",
104
+ name="mistral-small",
105
+ icon="Ollama",
106
+ tool_calling=True,
107
+ ),
108
+ create_model_metadata(
109
+ provider="Ollama",
110
+ name="nemotron-mini",
111
+ icon="Ollama",
112
+ tool_calling=True,
113
+ ),
114
+ create_model_metadata(
115
+ provider="Ollama",
116
+ name="nemotron",
117
+ icon="Ollama",
118
+ tool_calling=True,
119
+ ),
120
+ create_model_metadata(
121
+ provider="Ollama",
122
+ name="llama3-groq-tool-use",
123
+ icon="Ollama",
124
+ tool_calling=True,
125
+ ),
126
+ create_model_metadata(
127
+ provider="Ollama",
128
+ name="granite3-dense",
129
+ icon="Ollama",
130
+ tool_calling=True,
131
+ ),
132
+ create_model_metadata(
133
+ provider="Ollama",
134
+ name="granite3.1-dense",
135
+ icon="Ollama",
136
+ tool_calling=True,
137
+ ),
138
+ create_model_metadata(
139
+ provider="Ollama",
140
+ name="aya-expanse",
141
+ icon="Ollama",
142
+ tool_calling=True,
143
+ ),
144
+ create_model_metadata(
145
+ provider="Ollama",
146
+ name="granite3-moe",
147
+ icon="Ollama",
148
+ tool_calling=True,
149
+ ),
150
+ create_model_metadata(
151
+ provider="Ollama",
152
+ name="firefunction-v2",
153
+ icon="Ollama",
154
+ tool_calling=True,
155
+ ),
156
+ create_model_metadata(
157
+ provider="Ollama",
158
+ name="cogito",
159
+ icon="Ollama",
160
+ tool_calling=True,
161
+ ),
162
+ create_model_metadata(
163
+ provider="Ollama",
164
+ name="gpt-oss:20b",
165
+ icon="Ollama",
166
+ tool_calling=True,
167
+ ),
168
+ create_model_metadata(
169
+ provider="Ollama",
170
+ name="qwen3-vl:4b",
171
+ icon="Ollama",
172
+ tool_calling=True,
173
+ ),
174
+ ]
175
+
176
+ # Filter lists based on metadata properties
177
+ OLLAMA_TOOL_MODELS_BASE = [
178
+ metadata["name"]
179
+ for metadata in OLLAMA_MODELS_DETAILED
180
+ if metadata.get("tool_calling", False) and not metadata.get("not_supported", False)
181
+ ]
182
+
183
+ # Embedding models - following OpenAI's pattern of keeping these as a simple list
1
184
  # https://ollama.com/search?c=embedding
2
185
  OLLAMA_EMBEDDING_MODELS = [
3
186
  "nomic-embed-text",
@@ -10,37 +193,19 @@ OLLAMA_EMBEDDING_MODELS = [
10
193
  "granite-embedding",
11
194
  "jina-embeddings-v2-base-en",
12
195
  ]
13
- # https://ollama.com/search?c=tools
14
- OLLAMA_TOOL_MODELS_BASE = [
15
- "llama3.3",
16
- "qwq",
17
- "llama3.2",
18
- "llama3.1",
19
- "mistral",
20
- "qwen2",
21
- "qwen2.5",
22
- "qwen2.5-coder",
23
- "mistral-nemo",
24
- "mixtral",
25
- "command-r",
26
- "command-r-plus",
27
- "mistral-large",
28
- "smollm2",
29
- "hermes3",
30
- "athene-v2",
31
- "mistral-small",
32
- "nemotron-mini",
33
- "nemotron",
34
- "llama3-groq-tool-use",
35
- "granite3-dense",
36
- "granite3.1-dense",
37
- "aya-expanse",
38
- "granite3-moe",
39
- "firefunction-v2",
40
- "cogito",
41
- ]
42
196
 
197
+ # Embedding models as detailed metadata
198
+ OLLAMA_EMBEDDING_MODELS_DETAILED = [
199
+ create_model_metadata(
200
+ provider="Ollama",
201
+ name=name,
202
+ icon="Ollama",
203
+ model_type="embeddings",
204
+ )
205
+ for name in OLLAMA_EMBEDDING_MODELS
206
+ ]
43
207
 
208
+ # Connection URLs
44
209
  URL_LIST = [
45
210
  "http://localhost:11434",
46
211
  "http://host.docker.internal:11434",
@@ -48,5 +213,6 @@ URL_LIST = [
48
213
  "http://0.0.0.0:11434",
49
214
  ]
50
215
 
51
-
216
+ # Backwards compatibility
217
+ OLLAMA_MODEL_NAMES = OLLAMA_TOOL_MODELS_BASE
52
218
  DEFAULT_OLLAMA_API_URL = "https://ollama.com"
@@ -41,27 +41,43 @@ OPENAI_MODELS_DETAILED = [
41
41
  # Regular OpenAI Models
42
42
  create_model_metadata(provider="OpenAI", name="gpt-4o-mini", icon="OpenAI", tool_calling=True),
43
43
  create_model_metadata(provider="OpenAI", name="gpt-4o", icon="OpenAI", tool_calling=True),
44
- create_model_metadata(provider="OpenAI", name="gpt-4.1", icon="OpenAI", tool_calling=True),
45
- create_model_metadata(provider="OpenAI", name="gpt-4.1-mini", icon="OpenAI", tool_calling=True),
46
- create_model_metadata(provider="OpenAI", name="gpt-4.1-nano", icon="OpenAI", tool_calling=True),
44
+ create_model_metadata(
45
+ provider="OpenAI", name="gpt-4.1", icon="OpenAI", tool_calling=True, preview=True, not_supported=True
46
+ ),
47
+ create_model_metadata(
48
+ provider="OpenAI", name="gpt-4.1-mini", icon="OpenAI", tool_calling=True, preview=True, not_supported=True
49
+ ),
50
+ create_model_metadata(
51
+ provider="OpenAI", name="gpt-4.1-nano", icon="OpenAI", tool_calling=True, preview=True, not_supported=True
52
+ ),
47
53
  create_model_metadata(
48
54
  provider="OpenAI", name="gpt-4.5-preview", icon="OpenAI", tool_calling=True, preview=True, not_supported=True
49
55
  ),
50
56
  create_model_metadata(provider="OpenAI", name="gpt-4-turbo", icon="OpenAI", tool_calling=True),
51
57
  create_model_metadata(
52
- provider="OpenAI", name="gpt-4-turbo-preview", icon="OpenAI", tool_calling=True, preview=True
58
+ provider="OpenAI", name="gpt-4-turbo-preview", icon="OpenAI", tool_calling=True, preview=True, deprecated=True
53
59
  ),
54
60
  create_model_metadata(provider="OpenAI", name="gpt-4", icon="OpenAI", tool_calling=True),
55
- create_model_metadata(provider="OpenAI", name="gpt-3.5-turbo", icon="OpenAI", tool_calling=True),
61
+ create_model_metadata(provider="OpenAI", name="gpt-3.5-turbo", icon="OpenAI", tool_calling=True, deprecated=True),
56
62
  # Reasoning Models
57
63
  create_model_metadata(provider="OpenAI", name="o1", icon="OpenAI", reasoning=True),
58
64
  create_model_metadata(provider="OpenAI", name="o1-mini", icon="OpenAI", reasoning=True, not_supported=True),
59
65
  create_model_metadata(provider="OpenAI", name="o1-pro", icon="OpenAI", reasoning=True, not_supported=True),
60
- create_model_metadata(provider="OpenAI", name="o3-mini", icon="OpenAI", reasoning=True),
61
- create_model_metadata(provider="OpenAI", name="o3", icon="OpenAI", reasoning=True),
62
- create_model_metadata(provider="OpenAI", name="o3-pro", icon="OpenAI", reasoning=True),
63
- create_model_metadata(provider="OpenAI", name="o4-mini", icon="OpenAI", reasoning=True),
64
- create_model_metadata(provider="OpenAI", name="o4-mini-high", icon="OpenAI", reasoning=True),
66
+ create_model_metadata(
67
+ provider="OpenAI", name="o3-mini", icon="OpenAI", reasoning=True, preview=True, not_supported=True
68
+ ),
69
+ create_model_metadata(
70
+ provider="OpenAI", name="o3", icon="OpenAI", reasoning=True, preview=True, not_supported=True
71
+ ),
72
+ create_model_metadata(
73
+ provider="OpenAI", name="o3-pro", icon="OpenAI", reasoning=True, preview=True, not_supported=True
74
+ ),
75
+ create_model_metadata(
76
+ provider="OpenAI", name="o4-mini", icon="OpenAI", reasoning=True, preview=True, not_supported=True
77
+ ),
78
+ create_model_metadata(
79
+ provider="OpenAI", name="o4-mini-high", icon="OpenAI", reasoning=True, preview=True, not_supported=True
80
+ ),
65
81
  # Search Models
66
82
  create_model_metadata(
67
83
  provider="OpenAI",
@@ -124,6 +140,17 @@ OPENAI_EMBEDDING_MODEL_NAMES = [
124
140
  "text-embedding-ada-002",
125
141
  ]
126
142
 
143
+ # Embedding models as detailed metadata
144
+ OPENAI_EMBEDDING_MODELS_DETAILED = [
145
+ create_model_metadata(
146
+ provider="OpenAI",
147
+ name=name,
148
+ icon="OpenAI",
149
+ model_type="embeddings",
150
+ )
151
+ for name in OPENAI_EMBEDDING_MODEL_NAMES
152
+ ]
153
+
127
154
  # Backwards compatibility
128
155
  MODEL_NAMES = OPENAI_CHAT_MODEL_NAMES
129
156
  OPENAI_MODEL_NAMES = OPENAI_CHAT_MODEL_NAMES