ag2 0.9.9__py3-none-any.whl → 0.9.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (88) hide show
  1. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/METADATA +232 -210
  2. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/RECORD +88 -80
  3. autogen/_website/generate_mkdocs.py +3 -3
  4. autogen/_website/notebook_processor.py +1 -1
  5. autogen/_website/utils.py +1 -1
  6. autogen/agentchat/assistant_agent.py +15 -15
  7. autogen/agentchat/chat.py +52 -40
  8. autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
  9. autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
  10. autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
  11. autogen/agentchat/contrib/capabilities/transforms.py +1 -1
  12. autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
  13. autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
  14. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
  15. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
  16. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
  17. autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
  18. autogen/agentchat/contrib/rag/query_engine.py +11 -11
  19. autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
  20. autogen/agentchat/contrib/swarm_agent.py +3 -2
  21. autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
  22. autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
  23. autogen/agentchat/contrib/web_surfer.py +1 -1
  24. autogen/agentchat/conversable_agent.py +184 -80
  25. autogen/agentchat/group/context_expression.py +21 -21
  26. autogen/agentchat/group/handoffs.py +11 -11
  27. autogen/agentchat/group/multi_agent_chat.py +3 -2
  28. autogen/agentchat/group/on_condition.py +11 -11
  29. autogen/agentchat/group/safeguards/__init__.py +21 -0
  30. autogen/agentchat/group/safeguards/api.py +224 -0
  31. autogen/agentchat/group/safeguards/enforcer.py +1064 -0
  32. autogen/agentchat/group/safeguards/events.py +119 -0
  33. autogen/agentchat/group/safeguards/validator.py +435 -0
  34. autogen/agentchat/groupchat.py +58 -17
  35. autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
  36. autogen/agentchat/realtime/experimental/function_observer.py +2 -3
  37. autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
  38. autogen/agentchat/realtime/experimental/realtime_swarm.py +21 -10
  39. autogen/agentchat/user_proxy_agent.py +55 -53
  40. autogen/agents/experimental/document_agent/document_agent.py +1 -10
  41. autogen/agents/experimental/document_agent/parser_utils.py +5 -1
  42. autogen/browser_utils.py +4 -4
  43. autogen/cache/abstract_cache_base.py +2 -6
  44. autogen/cache/disk_cache.py +1 -6
  45. autogen/cache/in_memory_cache.py +2 -6
  46. autogen/cache/redis_cache.py +1 -5
  47. autogen/coding/__init__.py +10 -2
  48. autogen/coding/base.py +2 -1
  49. autogen/coding/docker_commandline_code_executor.py +1 -6
  50. autogen/coding/factory.py +9 -0
  51. autogen/coding/jupyter/docker_jupyter_server.py +1 -7
  52. autogen/coding/jupyter/jupyter_client.py +2 -9
  53. autogen/coding/jupyter/jupyter_code_executor.py +2 -7
  54. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  55. autogen/coding/local_commandline_code_executor.py +0 -65
  56. autogen/coding/yepcode_code_executor.py +197 -0
  57. autogen/environments/docker_python_environment.py +3 -3
  58. autogen/environments/system_python_environment.py +5 -5
  59. autogen/environments/venv_python_environment.py +5 -5
  60. autogen/events/agent_events.py +1 -1
  61. autogen/events/client_events.py +1 -1
  62. autogen/fast_depends/utils.py +10 -0
  63. autogen/graph_utils.py +5 -7
  64. autogen/import_utils.py +3 -1
  65. autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
  66. autogen/io/processors/console_event_processor.py +8 -3
  67. autogen/llm_config/config.py +168 -91
  68. autogen/llm_config/entry.py +38 -26
  69. autogen/llm_config/types.py +35 -0
  70. autogen/llm_config/utils.py +223 -0
  71. autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
  72. autogen/messages/agent_messages.py +1 -1
  73. autogen/messages/client_messages.py +1 -1
  74. autogen/oai/__init__.py +8 -1
  75. autogen/oai/client.py +10 -3
  76. autogen/oai/client_utils.py +1 -1
  77. autogen/oai/cohere.py +4 -4
  78. autogen/oai/gemini.py +4 -6
  79. autogen/oai/gemini_types.py +1 -0
  80. autogen/oai/openai_utils.py +44 -115
  81. autogen/tools/dependency_injection.py +4 -8
  82. autogen/tools/experimental/reliable/reliable.py +3 -2
  83. autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
  84. autogen/tools/function_utils.py +2 -1
  85. autogen/version.py +1 -1
  86. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/WHEEL +0 -0
  87. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/licenses/LICENSE +0 -0
  88. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/licenses/NOTICE.md +0 -0
@@ -5,34 +5,20 @@
5
5
  import functools
6
6
  import json
7
7
  import re
8
+ import warnings
8
9
  from collections.abc import Iterable
9
10
  from contextvars import ContextVar
10
11
  from pathlib import Path
11
12
  from typing import Annotated, Any, Literal, TypeAlias
12
13
 
13
14
  from pydantic import BaseModel, ConfigDict, Field
15
+ from typing_extensions import Self, deprecated
16
+
17
+ from autogen.doc_utils import export_module
14
18
 
15
- from autogen.oai.anthropic import AnthropicEntryDict, AnthropicLLMConfigEntry
16
- from autogen.oai.bedrock import BedrockEntryDict, BedrockLLMConfigEntry
17
- from autogen.oai.cerebras import CerebrasEntryDict, CerebrasLLMConfigEntry
18
- from autogen.oai.client import (
19
- AzureOpenAIEntryDict,
20
- AzureOpenAILLMConfigEntry,
21
- DeepSeekEntyDict,
22
- DeepSeekLLMConfigEntry,
23
- OpenAIEntryDict,
24
- OpenAILLMConfigEntry,
25
- OpenAIResponsesLLMConfigEntry,
26
- )
27
- from autogen.oai.cohere import CohereEntryDict, CohereLLMConfigEntry
28
- from autogen.oai.gemini import GeminiEntryDict, GeminiLLMConfigEntry
29
- from autogen.oai.groq import GroqEntryDict, GroqLLMConfigEntry
30
- from autogen.oai.mistral import MistralEntryDict, MistralLLMConfigEntry
31
- from autogen.oai.ollama import OllamaEntryDict, OllamaLLMConfigEntry
32
- from autogen.oai.together import TogetherEntryDict, TogetherLLMConfigEntry
33
-
34
- from ..doc_utils import export_module
35
19
  from .entry import ApplicationConfig, LLMConfigEntry
20
+ from .types import ConfigEntries
21
+ from .utils import config_list_from_json, filter_config
36
22
 
37
23
 
38
24
  # Meta class to allow LLMConfig.current and LLMConfig.default to be used as class properties
@@ -41,6 +27,11 @@ class MetaLLMConfig(type):
41
27
  pass
42
28
 
43
29
  @property
30
+ @deprecated(
31
+ "`LLMConfig.current / .default` properties are deprecated. "
32
+ "Pass config object to usage explicitly instead. "
33
+ "Scheduled for removal in 0.11.0 version."
34
+ )
44
35
  def current(cls) -> "LLMConfig":
45
36
  current_llm_config = LLMConfig.get_current_llm_config(llm_config=None)
46
37
  if current_llm_config is None:
@@ -48,39 +39,29 @@ class MetaLLMConfig(type):
48
39
  return current_llm_config # type: ignore[return-value]
49
40
 
50
41
  @property
42
+ @deprecated(
43
+ "`LLMConfig.current / .default` properties are deprecated. "
44
+ "Pass config object to usage explicitly instead. "
45
+ "Scheduled for removal in 0.11.0 version."
46
+ )
51
47
  def default(cls) -> "LLMConfig":
52
48
  return cls.current
53
49
 
54
50
 
55
- ConfigItem: TypeAlias = (
56
- LLMConfigEntry
57
- | AnthropicEntryDict
58
- | BedrockEntryDict
59
- | CerebrasEntryDict
60
- | CohereEntryDict
61
- | AzureOpenAIEntryDict
62
- | OpenAIEntryDict
63
- | DeepSeekEntyDict
64
- | MistralEntryDict
65
- | GroqEntryDict
66
- | OllamaEntryDict
67
- | GeminiEntryDict
68
- | TogetherEntryDict
69
- | dict[str, Any]
70
- )
51
+ ConfigItem: TypeAlias = LLMConfigEntry | ConfigEntries | dict[str, Any]
71
52
 
72
53
 
73
54
  @export_module("autogen")
74
55
  class LLMConfig(metaclass=MetaLLMConfig):
75
56
  _current_llm_config: ContextVar["LLMConfig"] = ContextVar("current_llm_config")
57
+ config_list: list[ConfigEntries]
76
58
 
77
59
  def __init__(
78
60
  self,
79
- *,
61
+ *configs: ConfigItem,
80
62
  top_p: float | None = None,
81
63
  temperature: float | None = None,
82
64
  max_tokens: int | None = None,
83
- config_list: Iterable[ConfigItem] | dict[str, Any] = (),
84
65
  check_every_ms: int | None = None,
85
66
  allow_format_str_template: bool | None = None,
86
67
  response_format: str | dict[str, Any] | BaseModel | type[BaseModel] | None = None,
@@ -91,11 +72,27 @@ class LLMConfig(metaclass=MetaLLMConfig):
91
72
  tools: Iterable[Any] = (),
92
73
  functions: Iterable[Any] = (),
93
74
  routing_method: Literal["fixed_order", "round_robin"] | None = None,
94
- **kwargs: Any,
75
+ config_list: Annotated[
76
+ Iterable[ConfigItem] | dict[str, Any],
77
+ deprecated(
78
+ "`LLMConfig(config_list=[{'model': ..., 'api_key': ...}, ...])` syntax is deprecated. "
79
+ "Use `LLMConfig({'api_key': ..., 'model': ...}, ...)` instead. "
80
+ "Scheduled for removal in 0.11.0 version."
81
+ ),
82
+ ] = (),
83
+ **kwargs: Annotated[
84
+ Any,
85
+ deprecated(
86
+ "`LLMConfig(api_key=..., model=...)` syntax is deprecated. "
87
+ "Use `LLMConfig({'api_key': ..., 'model': ...})` instead. "
88
+ "Scheduled for removal in 0.11.0 version."
89
+ ),
90
+ ],
95
91
  ) -> None:
96
- """Initializes the LLMConfig object.
92
+ r"""Initializes the LLMConfig object.
97
93
 
98
94
  Args:
95
+ *configs: A list of LLM configuration entries or dictionaries.
99
96
  config_list: A list of LLM configuration entries or dictionaries.
100
97
  temperature: The sampling temperature for LLM generation.
101
98
  check_every_ms: The interval (in milliseconds) to check for updates
@@ -110,39 +107,80 @@ class LLMConfig(metaclass=MetaLLMConfig):
110
107
  max_tokens: The maximum number of tokens to generate.
111
108
  top_p: The nucleus sampling probability.
112
109
  routing_method: The method used to route requests (e.g., fixed_order, round_robin).
113
- **kwargs: Additional keyword arguments for future extensions.
110
+ **kwargs: Additional keyword arguments for\ future extensions.
114
111
 
115
112
  Examples:
116
113
  ```python
117
- # Example 1: create config from `kwargs` options
114
+ # Example 1: create config from one model dictionary
115
+ config = LLMConfig({
116
+ "model": "gpt-5-mini",
117
+ "api_key": os.environ["OPENAI_API_KEY"],
118
+ })
119
+
120
+ # Example 2: create config from list of dictionaries
118
121
  config = LLMConfig(
119
- model="gpt-4o-mini",
122
+ {
123
+ "model": "gpt-5-mini",
124
+ "api_key": os.environ["OPENAI_API_KEY"],
125
+ },
126
+ {
127
+ "model": "gpt-4",
128
+ "api_key": os.environ["OPENAI_API_KEY"],
129
+ },
130
+ )
131
+
132
+ # Example 3 (deprecated): create config from `kwargs` options
133
+ config = LLMConfig(
134
+ model="gpt-5-mini",
120
135
  api_key=os.environ["OPENAI_API_KEY"],
121
136
  )
122
137
 
123
- # Example 2: create config from `config_list` dictionary
138
+ # Example 4 (deprecated): create config from `config_list` dictionary
124
139
  config = LLMConfig(
125
140
  config_list={
126
- "model": "gpt-4o-mini",
141
+ "model": "gpt-5-mini",
127
142
  "api_key": os.environ["OPENAI_API_KEY"],
128
143
  }
129
144
  )
130
145
 
131
- # Example 3: create config from `config_list` list
146
+ # Example 5 (deprecated): create config from `config_list` list
132
147
  config = LLMConfig(
133
148
  config_list=[
134
149
  {
135
- "model": "gpt-4o-mini",
150
+ "model": "gpt-5-mini",
136
151
  "api_key": os.environ["OPENAI_API_KEY"],
137
152
  },
138
153
  {
139
- "model": "gpt-4",
154
+ "model": "gpt-5",
140
155
  "api_key": os.environ["OPENAI_API_KEY"],
141
156
  },
142
157
  ]
143
158
  )
144
159
  ```
145
160
  """
161
+ if isinstance(config_list, dict):
162
+ config_list = [config_list]
163
+
164
+ if kwargs:
165
+ warnings.warn(
166
+ (
167
+ "`LLMConfig(api_key=..., model=...)` syntax is deprecated. "
168
+ "Use `LLMConfig({'api_key': ..., 'model': ...})` instead. "
169
+ "Scheduled for removal in 0.11.0 version."
170
+ ),
171
+ DeprecationWarning,
172
+ )
173
+
174
+ if config_list:
175
+ warnings.warn(
176
+ (
177
+ "`LLMConfig(config_list=[{'model': ..., 'api_key': ...}, ...])` syntax is deprecated. "
178
+ "Use `LLMConfig({'api_key': ..., 'model': ...}, ...)` instead. "
179
+ "Scheduled for removal in 0.11.0 version."
180
+ ),
181
+ DeprecationWarning,
182
+ )
183
+
146
184
  app_config = ApplicationConfig(
147
185
  max_tokens=max_tokens,
148
186
  top_p=top_p,
@@ -152,11 +190,7 @@ class LLMConfig(metaclass=MetaLLMConfig):
152
190
  application_level_options = app_config.model_dump(exclude_none=True)
153
191
 
154
192
  final_config_list: list[LLMConfigEntry | dict[str, Any]] = []
155
-
156
- if isinstance(config_list, dict):
157
- config_list = [config_list]
158
-
159
- for c in filter(bool, (*config_list, kwargs)):
193
+ for c in filter(bool, (*configs, *config_list, kwargs)):
160
194
  if isinstance(c, LLMConfigEntry):
161
195
  final_config_list.append(c.apply_application_config(app_config))
162
196
  continue
@@ -183,9 +217,55 @@ class LLMConfig(metaclass=MetaLLMConfig):
183
217
  routing_method=routing_method,
184
218
  )
185
219
 
186
- # used by BaseModel to create instance variables
220
+ @classmethod
221
+ def ensure_config(cls, config: "LLMConfig | ConfigItem | Iterable[ConfigItem]", /) -> "LLMConfig":
222
+ """Transforms passed objects to LLMConfig object.
223
+
224
+ Method to use for `Agent(llm_config={...})` cases.
225
+
226
+ >>> LLMConfig.ensure_config(LLMConfig(...))
227
+ LLMConfig(...)
228
+
229
+ >>> LLMConfig.ensure_config(LLMConfigEntry(...))
230
+ LLMConfig(LLMConfigEntry(...))
231
+
232
+ >>> LLMConfig.ensure_config({"model": "gpt-o3"})
233
+ LLMConfig(OpenAILLMConfigEntry(model="o3"))
234
+
235
+ >>> LLMConfig.ensure_config([{"model": "gpt-o3"}, ...])
236
+ LLMConfig(OpenAILLMConfigEntry(model="o3"), ...)
237
+
238
+ >>> (deprecated) LLMConfig.ensure_config({"config_list": [{ "model": "gpt-o3" }, ...]})
239
+ LLMConfig(OpenAILLMConfigEntry(model="o3"), ...)
240
+ """
241
+ if isinstance(config, LLMConfig):
242
+ return config.copy()
243
+
244
+ if isinstance(config, LLMConfigEntry):
245
+ return LLMConfig(config)
246
+
247
+ if isinstance(config, dict):
248
+ if "config_list" in config: # backport compatibility
249
+ return LLMConfig(**config)
250
+ return LLMConfig(config)
251
+
252
+ return LLMConfig(*config)
253
+
254
+ @deprecated(
255
+ "`with llm_config: ...` context manager is deprecated. "
256
+ "Pass config object to usage explicitly instead. "
257
+ "Scheduled for removal in 0.11.0 version."
258
+ )
187
259
  def __enter__(self) -> "LLMConfig":
188
- # Store previous context and set self as current
260
+ warnings.warn(
261
+ (
262
+ "`with llm_config: ...` context manager is deprecated. "
263
+ "Pass config object to usage explicitly instead. "
264
+ "Scheduled for removal in 0.11.0 version."
265
+ ),
266
+ DeprecationWarning,
267
+ )
268
+
189
269
  self._token = LLMConfig._current_llm_config.set(self)
190
270
  return self
191
271
 
@@ -193,7 +273,21 @@ class LLMConfig(metaclass=MetaLLMConfig):
193
273
  LLMConfig._current_llm_config.reset(self._token)
194
274
 
195
275
  @classmethod
276
+ @deprecated(
277
+ "`LLMConfig.current / .default` properties are deprecated. "
278
+ "Pass config object to usage explicitly instead. "
279
+ "Scheduled for removal in 0.11.0 version."
280
+ )
196
281
  def get_current_llm_config(cls, llm_config: "LLMConfig | None" = None) -> "LLMConfig | None":
282
+ warnings.warn(
283
+ (
284
+ "`LLMConfig.current / .default` properties are deprecated. "
285
+ "Pass config object to usage explicitly instead. "
286
+ "Scheduled for removal in 0.11.0 version."
287
+ ),
288
+ DeprecationWarning,
289
+ )
290
+
197
291
  if llm_config is not None:
198
292
  return llm_config
199
293
  try:
@@ -201,15 +295,6 @@ class LLMConfig(metaclass=MetaLLMConfig):
201
295
  except LookupError:
202
296
  return None
203
297
 
204
- def _satisfies_criteria(self, value: Any, criteria_values: Any) -> bool:
205
- if value is None:
206
- return False
207
-
208
- if isinstance(value, list):
209
- return bool(set(value) & set(criteria_values)) # Non-empty intersection
210
- else:
211
- return value in criteria_values
212
-
213
298
  @classmethod
214
299
  def from_json(
215
300
  cls,
@@ -217,44 +302,47 @@ class LLMConfig(metaclass=MetaLLMConfig):
217
302
  env: str | None = None,
218
303
  path: str | Path | None = None,
219
304
  file_location: str | None = None,
305
+ filter_dict: dict[str, list[str | None] | set[str | None]] | None = None,
220
306
  **kwargs: Any,
221
- ) -> "LLMConfig":
222
- from autogen.oai.openai_utils import config_list_from_json
223
-
307
+ ) -> Self:
224
308
  if env is None and path is None:
225
309
  raise ValueError("Either 'env' or 'path' must be provided")
310
+
226
311
  if env is not None and path is not None:
227
312
  raise ValueError("Only one of 'env' or 'path' can be provided")
228
313
 
229
314
  config_list = config_list_from_json(
230
- env_or_file=env if env is not None else str(path), file_location=file_location
315
+ env_or_file=env if env is not None else str(path),
316
+ file_location=file_location,
317
+ filter_dict=filter_dict,
231
318
  )
232
- return LLMConfig(config_list=config_list, **kwargs)
319
+
320
+ return cls(*config_list, **kwargs)
233
321
 
234
322
  def where(self, *, exclude: bool = False, **kwargs: Any) -> "LLMConfig":
235
- from autogen.oai.openai_utils import filter_config
323
+ filtered_config_list = filter_config(
324
+ config_list=[c.model_dump() for c in self.config_list],
325
+ filter_dict=kwargs,
326
+ exclude=exclude,
327
+ )
236
328
 
237
- filtered_config_list = filter_config(config_list=self.config_list, filter_dict=kwargs, exclude=exclude)
238
329
  if len(filtered_config_list) == 0:
239
330
  raise ValueError(f"No config found that satisfies the filter criteria: {kwargs}")
240
331
 
241
332
  kwargs = self.model_dump()
242
- kwargs["config_list"] = filtered_config_list
333
+ del kwargs["config_list"]
243
334
 
244
- return LLMConfig(**kwargs)
335
+ return LLMConfig(*filtered_config_list, **kwargs)
245
336
 
246
- # @functools.wraps(BaseModel.model_dump)
247
337
  def model_dump(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> dict[str, Any]:
248
338
  d = self._model.model_dump(*args, exclude_none=exclude_none, **kwargs)
249
339
  return {k: v for k, v in d.items() if not (isinstance(v, list) and len(v) == 0)}
250
340
 
251
- # @functools.wraps(BaseModel.model_dump_json)
252
341
  def model_dump_json(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> str:
253
342
  # return self._model.model_dump_json(*args, exclude_none=exclude_none, **kwargs)
254
343
  d = self.model_dump(*args, exclude_none=exclude_none, **kwargs)
255
344
  return json.dumps(d)
256
345
 
257
- # @functools.wraps(BaseModel.model_validate)
258
346
  def model_validate(self, *args: Any, **kwargs: Any) -> Any:
259
347
  return self._model.model_validate(*args, **kwargs)
260
348
 
@@ -318,7 +406,8 @@ class LLMConfig(metaclass=MetaLLMConfig):
318
406
  return s
319
407
 
320
408
  def __copy__(self) -> "LLMConfig":
321
- return LLMConfig(**self.model_dump())
409
+ options = self._model.model_dump(exclude={"config_list"})
410
+ return LLMConfig(*self._model.config_list, **options)
322
411
 
323
412
  def __deepcopy__(self, memo: dict[int, Any] | None = None) -> "LLMConfig":
324
413
  return self.__copy__()
@@ -359,21 +448,9 @@ class _LLMConfig(ApplicationConfig):
359
448
  tools: list[Any]
360
449
  functions: list[Any]
361
450
 
362
- config_list: list[ # type: ignore[valid-type]
451
+ config_list: list[
363
452
  Annotated[
364
- AnthropicLLMConfigEntry
365
- | CerebrasLLMConfigEntry
366
- | BedrockLLMConfigEntry
367
- | AzureOpenAILLMConfigEntry
368
- | DeepSeekLLMConfigEntry
369
- | OpenAILLMConfigEntry
370
- | OpenAIResponsesLLMConfigEntry
371
- | CohereLLMConfigEntry
372
- | GeminiLLMConfigEntry
373
- | GroqLLMConfigEntry
374
- | MistralLLMConfigEntry
375
- | OllamaLLMConfigEntry
376
- | TogetherLLMConfigEntry,
453
+ ConfigEntries,
377
454
  Field(discriminator="api_type"),
378
455
  ],
379
456
  ] = Field(..., min_length=1)
@@ -9,7 +9,7 @@ from typing import Any
9
9
 
10
10
  from httpx import Client as httpxClient
11
11
  from pydantic import BaseModel, ConfigDict, Field, HttpUrl, SecretStr, ValidationInfo, field_serializer, field_validator
12
- from typing_extensions import Required, TypedDict
12
+ from typing_extensions import Required, Self, TypedDict
13
13
 
14
14
  from .client import ModelClient
15
15
 
@@ -21,9 +21,9 @@ class LLMConfigEntryDict(TypedDict, total=False):
21
21
  top_p: float | None
22
22
  temperature: float | None
23
23
 
24
- api_key: SecretStr | None
24
+ api_key: SecretStr | str | None
25
25
  api_version: str | None
26
- base_url: HttpUrl | None
26
+ base_url: HttpUrl | str | None
27
27
  voice: str | None
28
28
  http_client: httpxClient | None
29
29
  model_client_cls: str | None
@@ -33,23 +33,35 @@ class LLMConfigEntryDict(TypedDict, total=False):
33
33
 
34
34
 
35
35
  class ApplicationConfig(BaseModel):
36
- max_tokens: int | None = Field(default=None, ge=0)
37
- top_p: float | None = Field(default=None, gt=0, lt=1)
38
- temperature: float | None = Field(default=None, ge=0, le=1)
39
-
40
- @field_validator("top_p", mode="before")
41
- @classmethod
42
- def check_top_p(cls, v: float | None, info: ValidationInfo) -> float | None:
43
- if v is not None and info.data.get("temperature") is not None:
44
- raise ValueError("temperature and top_p cannot be set at the same time.")
45
- return v
46
-
47
- @field_validator("temperature", mode="before")
48
- @classmethod
49
- def check_temperature(cls, v: float | None, info: ValidationInfo) -> float | None:
50
- if v is not None and info.data.get("top_p") is not None:
51
- raise ValueError("temperature and top_p cannot be set at the same time.")
52
- return v
36
+ max_tokens: int | None = Field(
37
+ default=None,
38
+ ge=0,
39
+ description="The maximum number of tokens to generate before stopping.",
40
+ )
41
+
42
+ top_p: float | None = Field(
43
+ default=None,
44
+ ge=0,
45
+ le=1,
46
+ description=(
47
+ "An alternative to sampling with temperature, called nucleus sampling, "
48
+ "where the model considers the results of the tokens with top_p probability mass."
49
+ "So 0.1 means only the tokens comprising the top 10% probability mass are considered."
50
+ "You should either alter `temperature` or `top_p`, but not both."
51
+ ),
52
+ )
53
+
54
+ temperature: float | None = Field(
55
+ default=None,
56
+ ge=0,
57
+ le=1,
58
+ description=(
59
+ "Amount of randomness injected into the response. "
60
+ "Use `temperature` closer to `0.0` for analytical / multiple choice, and closer to a model's "
61
+ "maximum `temperature` for creative and generative tasks. "
62
+ "Note that even with `temperature` of `0.0`, the results will not be fully deterministic."
63
+ ),
64
+ )
53
65
 
54
66
 
55
67
  class LLMConfigEntry(ApplicationConfig, ABC):
@@ -69,13 +81,13 @@ class LLMConfigEntry(ApplicationConfig, ABC):
69
81
  # Following field is configuration for pydantic to disallow extra fields
70
82
  model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
71
83
 
72
- def apply_application_config(self, application_config: ApplicationConfig) -> "LLMConfigEntry":
84
+ def apply_application_config(self, application_config: ApplicationConfig) -> Self:
73
85
  """Apply application level configurations."""
74
- # TODO: should create a new instance instead of mutating current one
75
- self.max_tokens = self.max_tokens or application_config.max_tokens
76
- self.top_p = self.top_p or application_config.top_p
77
- self.temperature = self.temperature or application_config.temperature
78
- return self
86
+ new_entry = self.model_copy()
87
+ new_entry.max_tokens = new_entry.max_tokens or application_config.max_tokens
88
+ new_entry.top_p = new_entry.top_p or application_config.top_p
89
+ new_entry.temperature = new_entry.temperature or application_config.temperature
90
+ return new_entry
79
91
 
80
92
  @abstractmethod
81
93
  def create_client(self) -> "ModelClient": ...
@@ -0,0 +1,35 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from autogen.oai.anthropic import AnthropicLLMConfigEntry
6
+ from autogen.oai.bedrock import BedrockLLMConfigEntry
7
+ from autogen.oai.cerebras import CerebrasLLMConfigEntry
8
+ from autogen.oai.client import (
9
+ AzureOpenAILLMConfigEntry,
10
+ DeepSeekLLMConfigEntry,
11
+ OpenAILLMConfigEntry,
12
+ OpenAIResponsesLLMConfigEntry,
13
+ )
14
+ from autogen.oai.cohere import CohereLLMConfigEntry
15
+ from autogen.oai.gemini import GeminiLLMConfigEntry
16
+ from autogen.oai.groq import GroqLLMConfigEntry
17
+ from autogen.oai.mistral import MistralLLMConfigEntry
18
+ from autogen.oai.ollama import OllamaLLMConfigEntry
19
+ from autogen.oai.together import TogetherLLMConfigEntry
20
+
21
+ ConfigEntries = (
22
+ AnthropicLLMConfigEntry
23
+ | CerebrasLLMConfigEntry
24
+ | BedrockLLMConfigEntry
25
+ | AzureOpenAILLMConfigEntry
26
+ | DeepSeekLLMConfigEntry
27
+ | OpenAILLMConfigEntry
28
+ | OpenAIResponsesLLMConfigEntry
29
+ | CohereLLMConfigEntry
30
+ | GeminiLLMConfigEntry
31
+ | GroqLLMConfigEntry
32
+ | MistralLLMConfigEntry
33
+ | OllamaLLMConfigEntry
34
+ | TogetherLLMConfigEntry
35
+ )