waldiez 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of waldiez might be problematic. Click here for more details.

Files changed (79) hide show
  1. waldiez/_version.py +1 -1
  2. waldiez/cli.py +5 -27
  3. waldiez/exporter.py +0 -13
  4. waldiez/exporting/agent/exporter.py +38 -0
  5. waldiez/exporting/agent/extras/__init__.py +2 -0
  6. waldiez/exporting/agent/extras/doc_agent_extras.py +366 -0
  7. waldiez/exporting/agent/extras/group_member_extras.py +3 -2
  8. waldiez/exporting/agent/processor.py +113 -15
  9. waldiez/exporting/chats/processor.py +2 -21
  10. waldiez/exporting/chats/utils/common.py +66 -1
  11. waldiez/exporting/chats/utils/group.py +6 -3
  12. waldiez/exporting/chats/utils/nested.py +1 -1
  13. waldiez/exporting/chats/utils/sequential.py +25 -9
  14. waldiez/exporting/chats/utils/single.py +8 -6
  15. waldiez/exporting/core/context.py +0 -12
  16. waldiez/exporting/core/extras/agent_extras/standard_extras.py +3 -1
  17. waldiez/exporting/core/extras/base.py +20 -17
  18. waldiez/exporting/core/extras/path_resolver.py +39 -41
  19. waldiez/exporting/core/extras/serializer.py +16 -1
  20. waldiez/exporting/core/protocols.py +17 -0
  21. waldiez/exporting/core/types.py +6 -9
  22. waldiez/exporting/flow/execution_generator.py +56 -21
  23. waldiez/exporting/flow/exporter.py +1 -4
  24. waldiez/exporting/flow/factory.py +0 -9
  25. waldiez/exporting/flow/file_generator.py +6 -0
  26. waldiez/exporting/flow/orchestrator.py +27 -21
  27. waldiez/exporting/flow/utils/__init__.py +0 -2
  28. waldiez/exporting/flow/utils/common.py +15 -96
  29. waldiez/exporting/flow/utils/importing.py +4 -0
  30. waldiez/io/mqtt.py +33 -14
  31. waldiez/io/redis.py +18 -13
  32. waldiez/io/structured.py +9 -4
  33. waldiez/io/utils.py +32 -0
  34. waldiez/io/ws.py +8 -2
  35. waldiez/models/__init__.py +6 -0
  36. waldiez/models/agents/__init__.py +8 -0
  37. waldiez/models/agents/agent/agent.py +136 -38
  38. waldiez/models/agents/agent/agent_type.py +3 -2
  39. waldiez/models/agents/agents.py +10 -0
  40. waldiez/models/agents/doc_agent/__init__.py +13 -0
  41. waldiez/models/agents/doc_agent/doc_agent.py +126 -0
  42. waldiez/models/agents/doc_agent/doc_agent_data.py +149 -0
  43. waldiez/models/agents/doc_agent/rag_query_engine.py +127 -0
  44. waldiez/models/chat/chat_message.py +1 -1
  45. waldiez/models/flow/flow.py +13 -2
  46. waldiez/models/model/__init__.py +2 -2
  47. waldiez/models/model/_aws.py +75 -0
  48. waldiez/models/model/_llm.py +516 -0
  49. waldiez/models/model/_price.py +30 -0
  50. waldiez/models/model/model.py +45 -2
  51. waldiez/models/model/model_data.py +2 -83
  52. waldiez/models/tool/predefined/_duckduckgo.py +123 -0
  53. waldiez/models/tool/predefined/_google.py +31 -9
  54. waldiez/models/tool/predefined/_perplexity.py +161 -0
  55. waldiez/models/tool/predefined/_searxng.py +152 -0
  56. waldiez/models/tool/predefined/_tavily.py +46 -9
  57. waldiez/models/tool/predefined/_wikipedia.py +26 -6
  58. waldiez/models/tool/predefined/_youtube.py +36 -8
  59. waldiez/models/tool/predefined/registry.py +6 -0
  60. waldiez/models/waldiez.py +12 -0
  61. waldiez/runner.py +184 -382
  62. waldiez/running/__init__.py +2 -4
  63. waldiez/running/base_runner.py +136 -118
  64. waldiez/running/environment.py +61 -17
  65. waldiez/running/post_run.py +70 -14
  66. waldiez/running/pre_run.py +42 -0
  67. waldiez/running/protocol.py +42 -48
  68. waldiez/running/run_results.py +5 -5
  69. waldiez/running/standard_runner.py +429 -0
  70. waldiez/running/timeline_processor.py +1166 -0
  71. waldiez/utils/version.py +12 -1
  72. {waldiez-0.5.2.dist-info → waldiez-0.5.4.dist-info}/METADATA +61 -63
  73. {waldiez-0.5.2.dist-info → waldiez-0.5.4.dist-info}/RECORD +77 -66
  74. waldiez/running/import_runner.py +0 -424
  75. waldiez/running/subprocess_runner.py +0 -100
  76. {waldiez-0.5.2.dist-info → waldiez-0.5.4.dist-info}/WHEEL +0 -0
  77. {waldiez-0.5.2.dist-info → waldiez-0.5.4.dist-info}/entry_points.txt +0 -0
  78. {waldiez-0.5.2.dist-info → waldiez-0.5.4.dist-info}/licenses/LICENSE +0 -0
  79. {waldiez-0.5.2.dist-info → waldiez-0.5.4.dist-info}/licenses/NOTICE.md +0 -0
@@ -159,7 +159,7 @@ class WaldiezChatMessage(WaldiezBase):
159
159
  )
160
160
  content = self.content
161
161
  if self.type == "rag_message_generator":
162
- if self.use_carryover:
162
+ if not self.use_carryover:
163
163
  content = get_last_carryover_method_content(
164
164
  text_content=self.content or "",
165
165
  )
@@ -148,6 +148,17 @@ class WaldiezFlow(WaldiezBase):
148
148
  """
149
149
  return self.data.is_async
150
150
 
151
+ @property
152
+ def is_group_chat(self) -> bool:
153
+ """Check if the flow is a group chat.
154
+
155
+ Returns
156
+ -------
157
+ bool
158
+ True if the flow is a group chat, False otherwise.
159
+ """
160
+ return self._is_group_chat
161
+
151
162
  @property
152
163
  def cache_seed(self) -> Optional[int]:
153
164
  """Check if the flow has caching disabled.
@@ -409,13 +420,13 @@ class WaldiezFlow(WaldiezBase):
409
420
  # - "text" or "none" => no need to create a group manager on ag2
410
421
  # - "function/method" => create a group manager and a group chat on ag2
411
422
  # in the first case, the chat would be:
412
- # result, context, last_agent = initiate_group_chat(
423
+ # results = run_group_chat(
413
424
  # pattern=pattern,
414
425
  # messages=...,
415
426
  # max_rounds=10
416
427
  # )
417
428
  # in the second case, the chat would be:
418
- # user.initiate_chat(manager, ...)
429
+ # user.run(manager, ...)
419
430
  user_agent: Optional[WaldiezAgent] = None
420
431
  to_root_manager: Optional[WaldiezChat] = None
421
432
  root_manager: WaldiezGroupManager = self.get_root_group_manager()
@@ -2,13 +2,13 @@
2
2
  # Copyright (c) 2024 - 2025 Waldiez and contributors.
3
3
  """Waldiez model."""
4
4
 
5
+ from ._aws import WaldiezModelAWS
6
+ from ._price import WaldiezModelPrice
5
7
  from .extra_requirements import get_models_extra_requirements
6
8
  from .model import DEFAULT_BASE_URLS, MODEL_NEEDS_BASE_URL, WaldiezModel
7
9
  from .model_data import (
8
10
  WaldiezModelAPIType,
9
- WaldiezModelAWS,
10
11
  WaldiezModelData,
11
- WaldiezModelPrice,
12
12
  )
13
13
 
14
14
  __all__ = [
@@ -0,0 +1,75 @@
1
+ # SPDX-License-Identifier: Apache-2.0.
2
+ # Copyright (c) 2024 - 2025 Waldiez and contributors.
3
+ # flake8: noqa: E501
4
+ """Waldiez Model AWS model."""
5
+
6
+ from typing import Optional
7
+
8
+ from pydantic import Field
9
+ from typing_extensions import Annotated
10
+
11
+ from ..common import WaldiezBase
12
+
13
+
14
+ class WaldiezModelAWS(WaldiezBase):
15
+ """AWS related parameters.
16
+
17
+ Attributes
18
+ ----------
19
+ region : Optional[str]
20
+ The AWS region, by default None.
21
+ access_key : Optional[str]
22
+ The AWS access key, by default None.
23
+ secret_key : Optional[str]
24
+ The AWS secret access key, by default None.
25
+ session_token : Optional[str]
26
+ The AWS session token, by default None.
27
+ profile_name : Optional[str]
28
+ The AWS profile name, by default Nonde.
29
+ """
30
+
31
+ region: Annotated[
32
+ Optional[str],
33
+ Field(
34
+ None,
35
+ alias="region",
36
+ title="Region",
37
+ description="The AWS region",
38
+ ),
39
+ ] = None
40
+ access_key: Annotated[
41
+ Optional[str],
42
+ Field(
43
+ None,
44
+ alias="accessKey",
45
+ title="Access Ke",
46
+ description="The AWS access key",
47
+ ),
48
+ ] = None
49
+ secret_key: Annotated[
50
+ Optional[str],
51
+ Field(
52
+ None,
53
+ alias="secretKey",
54
+ title="Secret Key",
55
+ description="The AWS secret key",
56
+ ),
57
+ ] = None
58
+ session_token: Annotated[
59
+ Optional[str],
60
+ Field(
61
+ None,
62
+ alias="sessionToken",
63
+ title="Session Token",
64
+ description="The AWS session token",
65
+ ),
66
+ ] = None
67
+ profile_name: Annotated[
68
+ Optional[str],
69
+ Field(
70
+ None,
71
+ alias="profileName",
72
+ title="Profile Name",
73
+ description="The AWS Profile name to use",
74
+ ),
75
+ ] = None
@@ -0,0 +1,516 @@
1
+ # SPDX-License-Identifier: Apache-2.0.
2
+ # Copyright (c) 2024 - 2025 Waldiez and contributors.
3
+ """LLM related utilities for Waldiez models."""
4
+ # pylint: disable=too-complex,too-many-return-statements
5
+ # flake8: noqa: C901
6
+
7
+ # Links:
8
+ # https://docs.llamaindex.ai/en/stable/api_reference/llms/
9
+ #
10
+ # https://docs.llamaindex.ai/en/stable/examples/llm/openai/
11
+ # https://docs.llamaindex.ai/en/stable/examples/llm/anthropic/
12
+ # https://docs.llamaindex.ai/en/stable/examples/llm/azure_openai/
13
+ # https://docs.llamaindex.ai/en/stable/examples/llm/bedrock/
14
+ # https://docs.llamaindex.ai/en/stable/examples/llm/bedrock_converse/
15
+ # https://docs.llamaindex.ai/en/stable/examples/llm/cohere/
16
+ # https://docs.llamaindex.ai/en/stable/examples/llm/deepseek/
17
+ # https://docs.llamaindex.ai/en/stable/examples/llm/gemini/
18
+ # https://docs.llamaindex.ai/en/stable/examples/llm/google_genai/
19
+ # https://docs.llamaindex.ai/en/stable/examples/llm/groq/
20
+ # https://docs.llamaindex.ai/en/stable/examples/llm/mistralai/
21
+ # https://docs.llamaindex.ai/en/stable/examples/llm/nvidia_nim/
22
+ # https://docs.llamaindex.ai/en/stable/examples/llm/nvidia/
23
+ # https://docs.llamaindex.ai/en/stable/examples/llm/together/
24
+ # https://docs.llamaindex.ai/en/stable/api_reference/llms/openai_like/
25
+ #
26
+
27
+ import os
28
+ from typing import TYPE_CHECKING
29
+
30
+ if TYPE_CHECKING:
31
+ from .model import WaldiezModel
32
+
33
+
34
+ def get_llm_requirements(
35
+ model: "WaldiezModel",
36
+ ag2_version: str,
37
+ ) -> set[str]:
38
+ """Get the LLM requirements for the model.
39
+
40
+ Parameters
41
+ ----------
42
+ model : WaldiezModel
43
+ The model to get the LLM requirements for.
44
+ ag2_version : str
45
+ The version of AG2 to use for the requirements.
46
+
47
+ Returns
48
+ -------
49
+ set[str]
50
+ The set of LLM requirements for the model.
51
+ """
52
+ requirements: set[str] = {
53
+ "llama-index",
54
+ "llama-index-core",
55
+ f"ag2[rag]=={ag2_version}",
56
+ }
57
+ match model.data.api_type:
58
+ case "openai":
59
+ requirements.add("llama-index-llms-openai")
60
+ case "anthropic":
61
+ requirements.add("llama-index-llms-anthropic")
62
+ case "azure":
63
+ requirements.add("llama-index-llms-azure-openai")
64
+ case "bedrock":
65
+ requirements.add("llama-index-llms-bedrock-converse")
66
+ case "cohere":
67
+ requirements.add("llama-index-llms-openai")
68
+ requirements.add("llama-index-llms-cohere")
69
+ case "deepseek":
70
+ requirements.add("llama-index-llms-deepseek")
71
+ case "google":
72
+ # | "gemini"
73
+ requirements.add("llama-index-llms-google-genai")
74
+ requirements.add("llama-index-llms-gemini")
75
+ case "groq":
76
+ requirements.add("llama-index-llms-groq")
77
+ case "mistral":
78
+ requirements.add("llama-index-llms-mistralai")
79
+ case "nim":
80
+ requirements.update(
81
+ {
82
+ "llama-index-llms-nvidia",
83
+ "llama-index-readers-file",
84
+ "llama-index-embeddings-nvidia",
85
+ "llama-index-postprocessor-nvidia-rerank",
86
+ }
87
+ )
88
+ case "together":
89
+ requirements.add("llama-index-llms-together")
90
+ case "other":
91
+ # openai compatible LLMs
92
+ requirements.add("llama-index-llms-openai-like")
93
+
94
+ return requirements
95
+
96
+
97
+ def get_llm_imports(model: "WaldiezModel") -> set[str]:
98
+ """Get the LLM import statements for the model.
99
+
100
+ Parameters
101
+ ----------
102
+ model : WaldiezModel
103
+ The model to get the LLM import statements for.
104
+
105
+ Returns
106
+ -------
107
+ str
108
+ The LLM import statements for the model.
109
+
110
+ Raises
111
+ ------
112
+ ValueError
113
+ If the model's API type is unsupported.
114
+ """
115
+ match model.data.api_type:
116
+ case "openai":
117
+ return {"from llama_index.llms.openai import OpenAI"}
118
+ case "anthropic":
119
+ return {
120
+ "from llama_index.llms.anthropic import Anthropic",
121
+ "from llama_index.core import Settings",
122
+ }
123
+ case "azure":
124
+ return {"from llama_index.llms.azure_openai import AzureOpenAI"}
125
+ case "bedrock":
126
+ return {
127
+ "from llama_index.llms.bedrock_converse import BedrockConverse"
128
+ }
129
+ case "cohere":
130
+ return {"from llama_index.llms.cohere import Cohere"}
131
+ case "deepseek":
132
+ return {"from llama_index.llms.deepseek import DeepSeek"}
133
+ case "google":
134
+ return {"from llama_index.llms.gemini import Gemini"}
135
+ case "groq":
136
+ return {"from llama_index.llms.groq import Groq"}
137
+ case "mistral":
138
+ return {"from llama_index.llms.mistralai import MistralAI"}
139
+ case "nim":
140
+ return {
141
+ "from llama_index.core import Settings",
142
+ "from llama_index.llms.nvidia import NVIDIA",
143
+ "from llama_index.embeddings.nvidia import NVIDIAEmbedding",
144
+ }
145
+ case "together":
146
+ return {"from llama_index.llms.together import TogetherLLM"}
147
+ case "other":
148
+ return {"from llama_index.llms.openai_like import OpenAILike"}
149
+ case _: # pragma: no cover
150
+ raise ValueError(f"Unsupported API type: {model.data.api_type}")
151
+
152
+
153
+ def get_llm_arg(model: "WaldiezModel") -> tuple[str, str]:
154
+ """Get the LLM argument for the model.
155
+
156
+ Parameters
157
+ ----------
158
+ model : WaldiezModel
159
+ The model to get the LLM argument for.
160
+
161
+ Returns
162
+ -------
163
+ tuple[str, str]
164
+ A tuple containing the LLM argument string and any content before it.
165
+
166
+ Raises
167
+ ------
168
+ ValueError
169
+ If the model's API type is unsupported.
170
+ """
171
+ match model.data.api_type:
172
+ case "openai":
173
+ return do_openai_llm(model)
174
+ case "anthropic":
175
+ return do_anthropic_llm(model)
176
+ case "azure":
177
+ return do_azure_llm(model)
178
+ case "bedrock":
179
+ return do_bedrock_llm(model)
180
+ case "cohere":
181
+ return do_cohere_llm(model)
182
+ case "deepseek":
183
+ return do_deepseek_llm(model)
184
+ case "google":
185
+ return do_google_llm(model)
186
+ case "groq":
187
+ return do_groq_llm(model)
188
+ case "mistral":
189
+ return do_mistral_llm(model)
190
+ case "nim":
191
+ return do_nim_llm(model)
192
+ case "together":
193
+ return do_together_llm(model)
194
+ case "other":
195
+ return do_other_llm(model)
196
+ case _: # pragma: no cover
197
+ raise ValueError(f"Unsupported API type: {model.data.api_type}")
198
+
199
+
200
+ def do_openai_llm(model: "WaldiezModel") -> tuple[str, str]:
201
+ """Get the OpenAI LLM argument and any content before it.
202
+
203
+ Parameters
204
+ ----------
205
+ model : WaldiezModel
206
+ The model to get the LLM argument for.
207
+
208
+ Returns
209
+ -------
210
+ tuple[str, str]
211
+ A tuple containing the LLM argument string and any content before it.
212
+ """
213
+ temperature = model.data.temperature or 0.0
214
+ arg = f'OpenAI(model="{model.name}", temperature={temperature})'
215
+ before = ""
216
+ return arg, before
217
+
218
+
219
+ def do_anthropic_llm(model: "WaldiezModel") -> tuple[str, str]:
220
+ """Get the Anthropic LLM argument and any content before it.
221
+
222
+ Parameters
223
+ ----------
224
+ model : WaldiezModel
225
+ The model to get the LLM argument for.
226
+
227
+ Returns
228
+ -------
229
+ tuple[str, str]
230
+ A tuple containing the LLM argument string and any content before it.
231
+ """
232
+ # from llama_index.llms.anthropic import Anthropic
233
+ # from llama_index.core import Settings
234
+
235
+ # tokenizer = Anthropic().tokenizer
236
+ # Settings.tokenizer = tokenizer
237
+ # # otherwise it will lookup ANTHROPIC_API_KEY from your env variable
238
+ # # llm = Anthropic(api_key="<api_key>")
239
+ # llm = Anthropic(model="claude-sonnet-4-0")
240
+ arg = f'Anthropic(model="{model.name}")'
241
+ before = (
242
+ "_tokenizer = Anthropic().tokenizer\nSettings.tokenizer = _tokenizer\n"
243
+ )
244
+ return arg, before
245
+
246
+
247
+ def do_azure_llm(model: "WaldiezModel") -> tuple[str, str]:
248
+ """Get the Azure OpenAI LLM argument and any content before it.
249
+
250
+ Parameters
251
+ ----------
252
+ model : WaldiezModel
253
+ The model to get the LLM argument for.
254
+
255
+ Returns
256
+ -------
257
+ tuple[str, str]
258
+ A tuple containing the LLM argument string and any content before it.
259
+ """
260
+ #
261
+ # llm = AzureOpenAI(
262
+ # engine="simon-llm", model="gpt-35-turbo-16k", temperature=0.0
263
+ # )
264
+ # Alternatively, you can also skip setting environment variables,
265
+ # and pass the parameters in directly via constructor.
266
+ # llm = AzureOpenAI(
267
+ # engine="my-custom-llm",
268
+ # model="gpt-35-turbo-16k",
269
+ # temperature=0.0,
270
+ # azure_endpoint="https://<your-resource-name>.openai.azure.com/",
271
+ # api_key="<your-api-key>",
272
+ # api_version="2023-07-01-preview",
273
+ # )
274
+ engine = model.data.extras.get("engine", model.name)
275
+ temperature = model.data.temperature or 0.0
276
+ arg = (
277
+ f"AzureOpenAI(\n"
278
+ f' engine="{engine}"\n'
279
+ f' model="{model.name}",\n'
280
+ f" temperature={temperature},\n"
281
+ )
282
+ if model.data.base_url:
283
+ arg += f' azure_endpoint="{model.data.base_url}",\n'
284
+ if model.data.api_version:
285
+ arg += f' api_version="{model.data.api_version}",\n'
286
+ arg += ")"
287
+ before = ""
288
+ return arg, before
289
+
290
+
291
+ def do_bedrock_llm(model: "WaldiezModel") -> tuple[str, str]:
292
+ """Get the Bedrock LLM argument and any content before it.
293
+
294
+ Parameters
295
+ ----------
296
+ model : WaldiezModel
297
+ The model to get the LLM argument for.
298
+
299
+ Returns
300
+ -------
301
+ tuple[str, str]
302
+ A tuple containing the LLM argument string and any content before it.
303
+ """
304
+ # llm = BedrockConverse(
305
+ # model="anthropic.claude-3-haiku-20240307-v1:0",
306
+ # profile_name=profile_name,
307
+ # )
308
+ # llm = BedrockConverse(
309
+ # model="us.amazon.nova-lite-v1:0",
310
+ # aws_access_key_id="AWS Access Key ID to use",
311
+ # aws_secret_access_key="AWS Secret Access Key to use",
312
+ # aws_session_token="AWS Session Token to use",
313
+ # region_name="AWS Region to use, eg. us-east-1",
314
+ # )
315
+ if not model.data.aws:
316
+ # try to get what we can from env
317
+ profile_name = os.getenv("AWS_PROFILE_NAME", "")
318
+ aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID", "")
319
+ aws_region = os.getenv("AWS_REGION", "")
320
+ else:
321
+ profile_name = model.data.aws.profile_name or ""
322
+ aws_access_key_id = model.data.aws.access_key or ""
323
+ aws_region = model.data.aws.region or ""
324
+ arg = f'BedrockConverse(\n model="{model.name}",\n'
325
+ if profile_name:
326
+ arg += f' profile_name="{profile_name}",\n'
327
+ if aws_access_key_id:
328
+ arg += f' aws_access_key_id="{aws_access_key_id}",\n'
329
+ if aws_region:
330
+ arg += f' region_name="{aws_region}",\n'
331
+ arg += ")"
332
+ before = ""
333
+ return arg, before
334
+
335
+
336
+ def do_cohere_llm(model: "WaldiezModel") -> tuple[str, str]:
337
+ """Get the Cohere LLM argument and any content before it.
338
+
339
+ Parameters
340
+ ----------
341
+ model : WaldiezModel
342
+ The model to get the LLM argument for.
343
+
344
+ Returns
345
+ -------
346
+ tuple[str, str]
347
+ A tuple containing the LLM argument string and any content before it.
348
+ """
349
+ arg = f'Cohere(\n model="{model.name}",\n'
350
+ if model.data.api_key:
351
+ arg += f' api_key="{model.data.api_key}",\n'
352
+ if model.data.base_url:
353
+ arg += f' base_url="{model.data.base_url}",\n'
354
+ if model.data.temperature is not None:
355
+ arg += f" temperature={model.data.temperature},\n"
356
+ arg += ")"
357
+ before = ""
358
+ return arg, before
359
+
360
+
361
+ def do_deepseek_llm(model: "WaldiezModel") -> tuple[str, str]:
362
+ """Get the DeepSeek LLM argument and any content before it.
363
+
364
+ Parameters
365
+ ----------
366
+ model : WaldiezModel
367
+ The model to get the LLM argument for.
368
+
369
+ Returns
370
+ -------
371
+ tuple[str, str]
372
+ A tuple containing the LLM argument string and any content before it.
373
+ """
374
+ arg = f'DeepSeek(model="{model.name}")'
375
+ before = ""
376
+ return arg, before
377
+
378
+
379
+ def do_google_llm(model: "WaldiezModel") -> tuple[str, str]:
380
+ """Get the Google LLM argument and any content before it.
381
+
382
+ Parameters
383
+ ----------
384
+ model : WaldiezModel
385
+ The model to get the LLM argument for.
386
+
387
+ Returns
388
+ -------
389
+ tuple[str, str]
390
+ A tuple containing the LLM argument string and any content before it.
391
+ """
392
+ arg = f'Gemini(model="{model.name}")'
393
+ before = ""
394
+ return arg, before
395
+
396
+
397
+ def do_groq_llm(model: "WaldiezModel") -> tuple[str, str]:
398
+ """Get the Groq LLM argument and any content before it.
399
+
400
+ Parameters
401
+ ----------
402
+ model : WaldiezModel
403
+ The model to get the LLM argument for.
404
+
405
+ Returns
406
+ -------
407
+ tuple[str, str]
408
+ A tuple containing the LLM argument string and any content before it.
409
+ """
410
+ arg = f'Groq(model="{model.name}")'
411
+ before = ""
412
+ return arg, before
413
+
414
+
415
+ def do_mistral_llm(model: "WaldiezModel") -> tuple[str, str]:
416
+ """Get the Mistral LLM argument and any content before it.
417
+
418
+ Parameters
419
+ ----------
420
+ model : WaldiezModel
421
+ The model to get the LLM argument for.
422
+
423
+ Returns
424
+ -------
425
+ tuple[str, str]
426
+ A tuple containing the LLM argument string and any content before it.
427
+ """
428
+ arg = f'MistralAI(model="{model.name}")'
429
+ before = ""
430
+ return arg, before
431
+
432
+
433
+ def do_nim_llm(model: "WaldiezModel") -> tuple[str, str]:
434
+ """Get the NVIDIA NIM LLM argument and any content before it.
435
+
436
+ Parameters
437
+ ----------
438
+ model : WaldiezModel
439
+ The model to get the LLM argument for.
440
+
441
+ Returns
442
+ -------
443
+ tuple[str, str]
444
+ A tuple containing the LLM argument string and any content before it.
445
+ """
446
+ # NVIDIA's default embeddings only embed the first 512 tokens so we've set
447
+ # our chunk size to 500 to maximize the accuracy of our embeddings.
448
+ # Settings.text_splitter = SentenceSplitter(chunk_size=500)
449
+ # We set our embedding model to NVIDIA's default.
450
+ # If a chunk exceeds the number of tokens the model can encode,
451
+ # the default is to throw an error, so we set truncate="END" to
452
+ # instead discard tokens that go over the limit
453
+ # (hopefully not many because of our chunk size above).
454
+ before = (
455
+ "Settings.text_splitter = SentenceSplitter(chunk_size=500)\n"
456
+ "Settings.embed_model = "
457
+ 'NVIDIAEmbedding(model="NV-Embed-QA", truncate="END")\n'
458
+ )
459
+ arg = f'NVIDIA(model="{model.name}")'
460
+ return arg, before
461
+
462
+
463
+ def do_together_llm(model: "WaldiezModel") -> tuple[str, str]:
464
+ """Get the Together LLM argument and any content before it.
465
+
466
+ Parameters
467
+ ----------
468
+ model : WaldiezModel
469
+ The model to get the LLM argument for.
470
+
471
+ Returns
472
+ -------
473
+ tuple[str, str]
474
+ A tuple containing the LLM argument string and any content before it.
475
+ """
476
+ arg = f'TogetherLLM(model="{model.name}")'
477
+ before = ""
478
+ return arg, before
479
+
480
+
481
+ def do_other_llm(model: "WaldiezModel") -> tuple[str, str]:
482
+ """Get the OpenAI-like LLM argument and any content before it.
483
+
484
+ Parameters
485
+ ----------
486
+ model : WaldiezModel
487
+ The model to get the LLM argument for.
488
+
489
+ Returns
490
+ -------
491
+ tuple[str, str]
492
+ A tuple containing the LLM argument string and any content before it.
493
+ """
494
+ # llm = OpenAILike(
495
+ # model="my model",
496
+ # api_base="https://hostname.com/v1",
497
+ # api_key="fake",
498
+ # context_window=128000,
499
+ # is_chat_model=True,
500
+ # is_function_calling_model=False,
501
+ # )
502
+
503
+ arg = (
504
+ "OpenAILike(\n"
505
+ f' model="{model.name}",\n'
506
+ f' api_base="{model.data.base_url}",\n'
507
+ )
508
+ if not model.data.api_key:
509
+ arg += ' api_key="na",\n'
510
+ if model.data.extras:
511
+ for key, value in model.data.extras.items():
512
+ arg += f' {key}="{value}",\n'
513
+ arg += ")"
514
+ # if model.data.price:
515
+ before = ""
516
+ return arg, before
@@ -0,0 +1,30 @@
1
+ # SPDX-License-Identifier: Apache-2.0.
2
+ # Copyright (c) 2024 - 2025 Waldiez and contributors.
3
+ # flake8: noqa: E501
4
+ """Waldiez Model Price."""
5
+
6
+ from typing import Optional
7
+
8
+ from pydantic import Field
9
+ from typing_extensions import Annotated
10
+
11
+ from ..common import WaldiezBase
12
+
13
+
14
+ class WaldiezModelPrice(WaldiezBase):
15
+ """Model Price.
16
+
17
+ Attributes
18
+ ----------
19
+ prompt_price_per_1k : float
20
+ The prompt price per 1k tokens.
21
+ completion_token_price_per_1k : float
22
+ The completion token price per 1k tokens.
23
+ """
24
+
25
+ prompt_price_per_1k: Annotated[
26
+ Optional[float], Field(None, alias="promptPricePer1k")
27
+ ]
28
+ completion_token_price_per_1k: Annotated[
29
+ Optional[float], Field(None, alias="completionTokenPricePer1k")
30
+ ]