camel-ai 0.2.66__py3-none-any.whl → 0.2.68__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (68) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +3 -0
  3. camel/configs/qianfan_config.py +85 -0
  4. camel/environments/__init__.py +12 -0
  5. camel/environments/rlcards_env.py +860 -0
  6. camel/interpreters/docker/Dockerfile +2 -5
  7. camel/loaders/firecrawl_reader.py +4 -4
  8. camel/memories/blocks/vectordb_block.py +8 -1
  9. camel/memories/context_creators/score_based.py +123 -19
  10. camel/models/__init__.py +2 -0
  11. camel/models/aiml_model.py +8 -0
  12. camel/models/anthropic_model.py +122 -2
  13. camel/models/aws_bedrock_model.py +8 -0
  14. camel/models/azure_openai_model.py +14 -5
  15. camel/models/base_model.py +4 -0
  16. camel/models/cohere_model.py +9 -2
  17. camel/models/crynux_model.py +8 -0
  18. camel/models/deepseek_model.py +8 -0
  19. camel/models/gemini_model.py +8 -0
  20. camel/models/groq_model.py +8 -0
  21. camel/models/internlm_model.py +8 -0
  22. camel/models/litellm_model.py +5 -0
  23. camel/models/lmstudio_model.py +14 -1
  24. camel/models/mistral_model.py +15 -1
  25. camel/models/model_factory.py +6 -0
  26. camel/models/modelscope_model.py +8 -0
  27. camel/models/moonshot_model.py +8 -0
  28. camel/models/nemotron_model.py +17 -2
  29. camel/models/netmind_model.py +8 -0
  30. camel/models/novita_model.py +8 -0
  31. camel/models/nvidia_model.py +8 -0
  32. camel/models/ollama_model.py +8 -0
  33. camel/models/openai_compatible_model.py +23 -5
  34. camel/models/openai_model.py +21 -4
  35. camel/models/openrouter_model.py +8 -0
  36. camel/models/ppio_model.py +8 -0
  37. camel/models/qianfan_model.py +104 -0
  38. camel/models/qwen_model.py +8 -0
  39. camel/models/reka_model.py +18 -3
  40. camel/models/samba_model.py +17 -3
  41. camel/models/sglang_model.py +20 -5
  42. camel/models/siliconflow_model.py +8 -0
  43. camel/models/stub_model.py +8 -1
  44. camel/models/togetherai_model.py +8 -0
  45. camel/models/vllm_model.py +7 -0
  46. camel/models/volcano_model.py +14 -1
  47. camel/models/watsonx_model.py +4 -1
  48. camel/models/yi_model.py +8 -0
  49. camel/models/zhipuai_model.py +8 -0
  50. camel/societies/workforce/prompts.py +71 -22
  51. camel/societies/workforce/role_playing_worker.py +3 -8
  52. camel/societies/workforce/single_agent_worker.py +37 -9
  53. camel/societies/workforce/task_channel.py +25 -20
  54. camel/societies/workforce/utils.py +104 -14
  55. camel/societies/workforce/worker.py +98 -16
  56. camel/societies/workforce/workforce.py +1289 -101
  57. camel/societies/workforce/workforce_logger.py +613 -0
  58. camel/tasks/task.py +16 -5
  59. camel/toolkits/__init__.py +2 -0
  60. camel/toolkits/code_execution.py +1 -1
  61. camel/toolkits/playwright_mcp_toolkit.py +2 -1
  62. camel/toolkits/pptx_toolkit.py +4 -4
  63. camel/types/enums.py +32 -0
  64. camel/types/unified_model_type.py +5 -0
  65. {camel_ai-0.2.66.dist-info → camel_ai-0.2.68.dist-info}/METADATA +4 -3
  66. {camel_ai-0.2.66.dist-info → camel_ai-0.2.68.dist-info}/RECORD +68 -64
  67. {camel_ai-0.2.66.dist-info → camel_ai-0.2.68.dist-info}/WHEEL +0 -0
  68. {camel_ai-0.2.66.dist-info → camel_ai-0.2.68.dist-info}/licenses/LICENSE +0 -0
@@ -64,6 +64,10 @@ class GeminiModel(OpenAICompatibleModel):
64
64
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
65
65
  environment variable or default to 180 seconds.
66
66
  (default: :obj:`None`)
67
+ max_retries (int, optional): Maximum number of retries for API calls.
68
+ (default: :obj:`3`)
69
+ **kwargs (Any): Additional arguments to pass to the client
70
+ initialization.
67
71
  """
68
72
 
69
73
  @api_keys_required(
@@ -79,6 +83,8 @@ class GeminiModel(OpenAICompatibleModel):
79
83
  url: Optional[str] = None,
80
84
  token_counter: Optional[BaseTokenCounter] = None,
81
85
  timeout: Optional[float] = None,
86
+ max_retries: int = 3,
87
+ **kwargs: Any,
82
88
  ) -> None:
83
89
  if model_config_dict is None:
84
90
  model_config_dict = GeminiConfig().as_dict()
@@ -95,6 +101,8 @@ class GeminiModel(OpenAICompatibleModel):
95
101
  url=url,
96
102
  token_counter=token_counter,
97
103
  timeout=timeout,
104
+ max_retries=max_retries,
105
+ **kwargs,
98
106
  )
99
107
 
100
108
  def _process_messages(self, messages) -> List[OpenAIMessage]:
@@ -45,6 +45,10 @@ class GroqModel(OpenAICompatibleModel):
45
45
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
46
46
  environment variable or default to 180 seconds.
47
47
  (default: :obj:`None`)
48
+ max_retries (int, optional): Maximum number of retries for API calls.
49
+ (default: :obj:`3`)
50
+ **kwargs (Any): Additional arguments to pass to the client
51
+ initialization.
48
52
  """
49
53
 
50
54
  @api_keys_required([("api_key", "GROQ_API_KEY")])
@@ -56,6 +60,8 @@ class GroqModel(OpenAICompatibleModel):
56
60
  url: Optional[str] = None,
57
61
  token_counter: Optional[BaseTokenCounter] = None,
58
62
  timeout: Optional[float] = None,
63
+ max_retries: int = 3,
64
+ **kwargs: Any,
59
65
  ) -> None:
60
66
  if model_config_dict is None:
61
67
  model_config_dict = GroqConfig().as_dict()
@@ -71,6 +77,8 @@ class GroqModel(OpenAICompatibleModel):
71
77
  url=url,
72
78
  token_counter=token_counter,
73
79
  timeout=timeout,
80
+ max_retries=max_retries,
81
+ **kwargs,
74
82
  )
75
83
 
76
84
  def check_model_config(self):
@@ -54,6 +54,10 @@ class InternLMModel(OpenAICompatibleModel):
54
54
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
55
55
  environment variable or default to 180 seconds.
56
56
  (default: :obj:`None`)
57
+ max_retries (int, optional): Maximum number of retries for API calls.
58
+ (default: :obj:`3`)
59
+ **kwargs (Any): Additional arguments to pass to the client
60
+ initialization.
57
61
  """
58
62
 
59
63
  @api_keys_required(
@@ -69,6 +73,8 @@ class InternLMModel(OpenAICompatibleModel):
69
73
  url: Optional[str] = None,
70
74
  token_counter: Optional[BaseTokenCounter] = None,
71
75
  timeout: Optional[float] = None,
76
+ max_retries: int = 3,
77
+ **kwargs: Any,
72
78
  ) -> None:
73
79
  self.model_config = model_config_dict or InternLMConfig().as_dict()
74
80
  api_key = api_key or os.environ.get("INTERNLM_API_KEY")
@@ -84,6 +90,8 @@ class InternLMModel(OpenAICompatibleModel):
84
90
  url=url,
85
91
  token_counter=token_counter,
86
92
  timeout=timeout,
93
+ max_retries=max_retries,
94
+ **kwargs,
87
95
  )
88
96
 
89
97
  async def _arun(
@@ -59,6 +59,8 @@ class LiteLLMModel(BaseModelBackend):
59
59
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
60
60
  environment variable or default to 180 seconds.
61
61
  (default: :obj:`None`)
62
+ **kwargs (Any): Additional arguments to pass to the client
63
+ initialization.
62
64
  """
63
65
 
64
66
  # NOTE: Currently stream mode is not supported.
@@ -72,6 +74,7 @@ class LiteLLMModel(BaseModelBackend):
72
74
  url: Optional[str] = None,
73
75
  token_counter: Optional[BaseTokenCounter] = None,
74
76
  timeout: Optional[float] = None,
77
+ **kwargs: Any,
75
78
  ) -> None:
76
79
  from litellm import completion
77
80
 
@@ -82,6 +85,7 @@ class LiteLLMModel(BaseModelBackend):
82
85
  model_type, model_config_dict, api_key, url, token_counter, timeout
83
86
  )
84
87
  self.client = completion
88
+ self.kwargs = kwargs
85
89
 
86
90
  def _convert_response_from_litellm_to_openai(
87
91
  self, response
@@ -173,6 +177,7 @@ class LiteLLMModel(BaseModelBackend):
173
177
  model=self.model_type,
174
178
  messages=messages,
175
179
  **self.model_config_dict,
180
+ **self.kwargs,
176
181
  )
177
182
  response = self._convert_response_from_litellm_to_openai(response)
178
183
 
@@ -43,6 +43,10 @@ class LMStudioModel(OpenAICompatibleModel):
43
43
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
44
44
  environment variable or default to 180 seconds.
45
45
  (default: :obj:`None`)
46
+ max_retries (int, optional): Maximum number of retries for API calls.
47
+ (default: :obj:`3`)
48
+ **kwargs (Any): Additional arguments to pass to the client
49
+ initialization.
46
50
  """
47
51
 
48
52
  def __init__(
@@ -53,6 +57,8 @@ class LMStudioModel(OpenAICompatibleModel):
53
57
  url: Optional[str] = None,
54
58
  token_counter: Optional[BaseTokenCounter] = None,
55
59
  timeout: Optional[float] = None,
60
+ max_retries: int = 3,
61
+ **kwargs: Any,
56
62
  ) -> None:
57
63
  if model_config_dict is None:
58
64
  model_config_dict = LMStudioConfig().as_dict()
@@ -62,7 +68,14 @@ class LMStudioModel(OpenAICompatibleModel):
62
68
  )
63
69
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
64
70
  super().__init__(
65
- model_type, model_config_dict, api_key, url, token_counter, timeout
71
+ model_type,
72
+ model_config_dict,
73
+ api_key,
74
+ url,
75
+ token_counter,
76
+ timeout,
77
+ max_retries=max_retries,
78
+ **kwargs,
66
79
  )
67
80
 
68
81
  def check_model_config(self):
@@ -80,6 +80,10 @@ class MistralModel(BaseModelBackend):
80
80
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
81
81
  environment variable or default to 180 seconds.
82
82
  (default: :obj:`None`)
83
+ max_retries (int, optional): Maximum number of retries
84
+ for API calls. (default: :obj:`3`)
85
+ **kwargs (Any): Additional arguments to pass to the client
86
+ initialization.
83
87
  """
84
88
 
85
89
  @api_keys_required(
@@ -96,6 +100,8 @@ class MistralModel(BaseModelBackend):
96
100
  url: Optional[str] = None,
97
101
  token_counter: Optional[BaseTokenCounter] = None,
98
102
  timeout: Optional[float] = None,
103
+ max_retries: int = 3,
104
+ **kwargs: Any,
99
105
  ) -> None:
100
106
  from mistralai import Mistral
101
107
 
@@ -106,7 +112,14 @@ class MistralModel(BaseModelBackend):
106
112
  url = url or os.environ.get("MISTRAL_API_BASE_URL")
107
113
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
108
114
  super().__init__(
109
- model_type, model_config_dict, api_key, url, token_counter, timeout
115
+ model_type,
116
+ model_config_dict,
117
+ api_key,
118
+ url,
119
+ token_counter,
120
+ timeout,
121
+ max_retries,
122
+ **kwargs,
110
123
  )
111
124
  self._client = Mistral(
112
125
  timeout_ms=int(self._timeout * 1000)
@@ -114,6 +127,7 @@ class MistralModel(BaseModelBackend):
114
127
  else None,
115
128
  api_key=self._api_key,
116
129
  server_url=self._url,
130
+ **kwargs,
117
131
  )
118
132
 
119
133
  def _to_openai_response(
@@ -39,6 +39,7 @@ from camel.models.openai_compatible_model import OpenAICompatibleModel
39
39
  from camel.models.openai_model import OpenAIModel
40
40
  from camel.models.openrouter_model import OpenRouterModel
41
41
  from camel.models.ppio_model import PPIOModel
42
+ from camel.models.qianfan_model import QianfanModel
42
43
  from camel.models.qwen_model import QwenModel
43
44
  from camel.models.reka_model import RekaModel
44
45
  from camel.models.samba_model import SambaModel
@@ -98,6 +99,7 @@ class ModelFactory:
98
99
  ModelPlatformType.MODELSCOPE: ModelScopeModel,
99
100
  ModelPlatformType.NOVITA: NovitaModel,
100
101
  ModelPlatformType.WATSONX: WatsonXModel,
102
+ ModelPlatformType.QIANFAN: QianfanModel,
101
103
  ModelPlatformType.CRYNUX: CrynuxModel,
102
104
  }
103
105
 
@@ -110,6 +112,7 @@ class ModelFactory:
110
112
  api_key: Optional[str] = None,
111
113
  url: Optional[str] = None,
112
114
  timeout: Optional[float] = None,
115
+ max_retries: int = 3,
113
116
  **kwargs,
114
117
  ) -> BaseModelBackend:
115
118
  r"""Creates an instance of `BaseModelBackend` of the specified type.
@@ -134,6 +137,8 @@ class ModelFactory:
134
137
  (default: :obj:`None`)
135
138
  timeout (Optional[float], optional): The timeout value in seconds
136
139
  for API calls. (default: :obj:`None`)
140
+ max_retries (int, optional): Maximum number of retries
141
+ for API calls. (default: :obj:`3`)
137
142
  **kwargs: Additional model-specific parameters that will be passed
138
143
  to the model constructor. For example, Azure OpenAI models may
139
144
  require `api_version`, `azure_deployment_name`,
@@ -186,6 +191,7 @@ class ModelFactory:
186
191
  url=url,
187
192
  token_counter=token_counter,
188
193
  timeout=timeout,
194
+ max_retries=max_retries,
189
195
  **kwargs,
190
196
  )
191
197
 
@@ -56,6 +56,10 @@ class ModelScopeModel(OpenAICompatibleModel):
56
56
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
57
57
  environment variable or default to 180 seconds.
58
58
  (default: :obj:`None`)
59
+ max_retries (int, optional): Maximum number of retries for API calls.
60
+ (default: :obj:`3`)
61
+ **kwargs (Any): Additional arguments to pass to the client
62
+ initialization.
59
63
  """
60
64
 
61
65
  @api_keys_required(
@@ -71,6 +75,8 @@ class ModelScopeModel(OpenAICompatibleModel):
71
75
  url: Optional[str] = None,
72
76
  token_counter: Optional[BaseTokenCounter] = None,
73
77
  timeout: Optional[float] = None,
78
+ max_retries: int = 3,
79
+ **kwargs: Any,
74
80
  ) -> None:
75
81
  if model_config_dict is None:
76
82
  model_config_dict = ModelScopeConfig().as_dict()
@@ -87,6 +93,8 @@ class ModelScopeModel(OpenAICompatibleModel):
87
93
  url=url,
88
94
  token_counter=token_counter,
89
95
  timeout=timeout,
96
+ max_retries=max_retries,
97
+ **kwargs,
90
98
  )
91
99
 
92
100
  def _post_handle_response(
@@ -54,6 +54,10 @@ class MoonshotModel(OpenAICompatibleModel):
54
54
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
55
55
  environment variable or default to 180 seconds.
56
56
  (default: :obj:`None`)
57
+ max_retries (int, optional): Maximum number of retries for API calls.
58
+ (default: :obj:`3`)
59
+ **kwargs (Any): Additional arguments to pass to the client
60
+ initialization.
57
61
  """
58
62
 
59
63
  @api_keys_required([("api_key", "MOONSHOT_API_KEY")])
@@ -65,6 +69,8 @@ class MoonshotModel(OpenAICompatibleModel):
65
69
  url: Optional[str] = None,
66
70
  token_counter: Optional[BaseTokenCounter] = None,
67
71
  timeout: Optional[float] = None,
72
+ max_retries: int = 3,
73
+ **kwargs: Any,
68
74
  ) -> None:
69
75
  if model_config_dict is None:
70
76
  model_config_dict = MoonshotConfig().as_dict()
@@ -81,6 +87,8 @@ class MoonshotModel(OpenAICompatibleModel):
81
87
  url=url,
82
88
  token_counter=token_counter,
83
89
  timeout=timeout,
90
+ max_retries=max_retries,
91
+ **kwargs,
84
92
  )
85
93
 
86
94
  async def _arun(
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Optional, Union
15
+ from typing import Any, Optional, Union
16
16
 
17
17
  from camel.models.openai_compatible_model import OpenAICompatibleModel
18
18
  from camel.types import ModelType
@@ -36,6 +36,10 @@ class NemotronModel(OpenAICompatibleModel):
36
36
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
37
37
  environment variable or default to 180 seconds.
38
38
  (default: :obj:`None`)
39
+ max_retries (int, optional): Maximum number of retries for API calls.
40
+ (default: :obj:`3`)
41
+ **kwargs (Any): Additional arguments to pass to the client
42
+ initialization.
39
43
 
40
44
  Notes:
41
45
  Nemotron model doesn't support additional model config like OpenAI.
@@ -52,13 +56,24 @@ class NemotronModel(OpenAICompatibleModel):
52
56
  api_key: Optional[str] = None,
53
57
  url: Optional[str] = None,
54
58
  timeout: Optional[float] = None,
59
+ max_retries: int = 3,
60
+ **kwargs: Any,
55
61
  ) -> None:
56
62
  url = url or os.environ.get(
57
63
  "NVIDIA_API_BASE_URL", "https://integrate.api.nvidia.com/v1"
58
64
  )
59
65
  api_key = api_key or os.environ.get("NVIDIA_API_KEY")
60
66
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
61
- super().__init__(model_type, {}, api_key, url, None, timeout)
67
+ super().__init__(
68
+ model_type,
69
+ {},
70
+ api_key,
71
+ url,
72
+ None,
73
+ timeout,
74
+ max_retries=max_retries,
75
+ **kwargs,
76
+ )
62
77
 
63
78
  @property
64
79
  def token_counter(self) -> BaseTokenCounter:
@@ -47,6 +47,10 @@ class NetmindModel(OpenAICompatibleModel):
47
47
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
48
48
  environment variable or default to 180 seconds.
49
49
  (default: :obj:`None`)
50
+ max_retries (int, optional): Maximum number of retries for API calls.
51
+ (default: :obj:`3`)
52
+ **kwargs (Any): Additional arguments to pass to the client
53
+ initialization.
50
54
  """
51
55
 
52
56
  @api_keys_required(
@@ -62,6 +66,8 @@ class NetmindModel(OpenAICompatibleModel):
62
66
  url: Optional[str] = None,
63
67
  token_counter: Optional[BaseTokenCounter] = None,
64
68
  timeout: Optional[float] = None,
69
+ max_retries: int = 3,
70
+ **kwargs: Any,
65
71
  ) -> None:
66
72
  if model_config_dict is None:
67
73
  model_config_dict = NetmindConfig().as_dict()
@@ -78,6 +84,8 @@ class NetmindModel(OpenAICompatibleModel):
78
84
  url=url,
79
85
  token_counter=token_counter,
80
86
  timeout=timeout,
87
+ max_retries=max_retries,
88
+ **kwargs,
81
89
  )
82
90
 
83
91
  def check_model_config(self):
@@ -47,6 +47,10 @@ class NovitaModel(OpenAICompatibleModel):
47
47
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
48
48
  environment variable or default to 180 seconds.
49
49
  (default: :obj:`None`)
50
+ max_retries (int, optional): Maximum number of retries for API calls.
51
+ (default: :obj:`3`)
52
+ **kwargs (Any): Additional arguments to pass to the client
53
+ initialization.
50
54
  """
51
55
 
52
56
  @api_keys_required(
@@ -62,6 +66,8 @@ class NovitaModel(OpenAICompatibleModel):
62
66
  url: Optional[str] = None,
63
67
  token_counter: Optional[BaseTokenCounter] = None,
64
68
  timeout: Optional[float] = None,
69
+ max_retries: int = 3,
70
+ **kwargs: Any,
65
71
  ) -> None:
66
72
  if model_config_dict is None:
67
73
  model_config_dict = NovitaConfig().as_dict()
@@ -77,6 +83,8 @@ class NovitaModel(OpenAICompatibleModel):
77
83
  url=url,
78
84
  token_counter=token_counter,
79
85
  timeout=timeout,
86
+ max_retries=max_retries,
87
+ **kwargs,
80
88
  )
81
89
 
82
90
  def check_model_config(self):
@@ -43,6 +43,10 @@ class NvidiaModel(OpenAICompatibleModel):
43
43
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
44
44
  environment variable or default to 180 seconds.
45
45
  (default: :obj:`None`)
46
+ max_retries (int, optional): Maximum number of retries for API calls.
47
+ (default: :obj:`3`)
48
+ **kwargs (Any): Additional arguments to pass to the client
49
+ initialization.
46
50
  """
47
51
 
48
52
  @api_keys_required(
@@ -58,6 +62,8 @@ class NvidiaModel(OpenAICompatibleModel):
58
62
  url: Optional[str] = None,
59
63
  token_counter: Optional[BaseTokenCounter] = None,
60
64
  timeout: Optional[float] = None,
65
+ max_retries: int = 3,
66
+ **kwargs: Any,
61
67
  ) -> None:
62
68
  if model_config_dict is None:
63
69
  model_config_dict = NvidiaConfig().as_dict()
@@ -73,6 +79,8 @@ class NvidiaModel(OpenAICompatibleModel):
73
79
  url=url,
74
80
  token_counter=token_counter,
75
81
  timeout=timeout,
82
+ max_retries=max_retries,
83
+ **kwargs,
76
84
  )
77
85
 
78
86
  def check_model_config(self):
@@ -47,6 +47,10 @@ class OllamaModel(OpenAICompatibleModel):
47
47
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
48
48
  environment variable or default to 180 seconds.
49
49
  (default: :obj:`None`)
50
+ max_retries (int, optional): Maximum number of retries for API calls.
51
+ (default: :obj:`3`)
52
+ **kwargs (Any): Additional arguments to pass to the client
53
+ initialization.
50
54
 
51
55
  References:
52
56
  https://github.com/ollama/ollama/blob/main/docs/openai.md
@@ -60,6 +64,8 @@ class OllamaModel(OpenAICompatibleModel):
60
64
  url: Optional[str] = None,
61
65
  token_counter: Optional[BaseTokenCounter] = None,
62
66
  timeout: Optional[float] = None,
67
+ max_retries: int = 3,
68
+ **kwargs: Any,
63
69
  ) -> None:
64
70
  if model_config_dict is None:
65
71
  model_config_dict = OllamaConfig().as_dict()
@@ -77,6 +83,8 @@ class OllamaModel(OpenAICompatibleModel):
77
83
  url=self._url,
78
84
  token_counter=token_counter,
79
85
  timeout=timeout,
86
+ max_retries=max_retries,
87
+ **kwargs,
80
88
  )
81
89
 
82
90
  def _start_server(self) -> None:
@@ -67,6 +67,11 @@ class OpenAICompatibleModel(BaseModelBackend):
67
67
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
68
68
  environment variable or default to 180 seconds.
69
69
  (default: :obj:`None`)
70
+ max_retries (int, optional): Maximum number of retries for API calls.
71
+ (default: :obj:`3`)
72
+ **kwargs (Any): Additional arguments to pass to the
73
+ OpenAI client initialization. These can include parameters like
74
+ 'organization', 'default_headers', 'http_client', etc.
70
75
  """
71
76
 
72
77
  def __init__(
@@ -77,12 +82,21 @@ class OpenAICompatibleModel(BaseModelBackend):
77
82
  url: Optional[str] = None,
78
83
  token_counter: Optional[BaseTokenCounter] = None,
79
84
  timeout: Optional[float] = None,
85
+ max_retries: int = 3,
86
+ **kwargs: Any,
80
87
  ) -> None:
81
88
  api_key = api_key or os.environ.get("OPENAI_COMPATIBILITY_API_KEY")
82
89
  url = url or os.environ.get("OPENAI_COMPATIBILITY_API_BASE_URL")
83
90
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
91
+
84
92
  super().__init__(
85
- model_type, model_config_dict, api_key, url, token_counter, timeout
93
+ model_type,
94
+ model_config_dict,
95
+ api_key,
96
+ url,
97
+ token_counter,
98
+ timeout,
99
+ max_retries,
86
100
  )
87
101
  if is_langfuse_available():
88
102
  from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
@@ -90,28 +104,32 @@ class OpenAICompatibleModel(BaseModelBackend):
90
104
 
91
105
  self._client = LangfuseOpenAI(
92
106
  timeout=self._timeout,
93
- max_retries=3,
107
+ max_retries=max_retries,
94
108
  base_url=self._url,
95
109
  api_key=self._api_key,
110
+ **kwargs,
96
111
  )
97
112
  self._async_client = LangfuseAsyncOpenAI(
98
113
  timeout=self._timeout,
99
- max_retries=3,
114
+ max_retries=max_retries,
100
115
  base_url=self._url,
101
116
  api_key=self._api_key,
117
+ **kwargs,
102
118
  )
103
119
  else:
104
120
  self._client = OpenAI(
105
121
  timeout=self._timeout,
106
- max_retries=3,
122
+ max_retries=max_retries,
107
123
  base_url=self._url,
108
124
  api_key=self._api_key,
125
+ **kwargs,
109
126
  )
110
127
  self._async_client = AsyncOpenAI(
111
128
  timeout=self._timeout,
112
- max_retries=3,
129
+ max_retries=max_retries,
113
130
  base_url=self._url,
114
131
  api_key=self._api_key,
132
+ **kwargs,
115
133
  )
116
134
 
117
135
  @observe()
@@ -76,6 +76,11 @@ class OpenAIModel(BaseModelBackend):
76
76
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
77
77
  environment variable or default to 180 seconds.
78
78
  (default: :obj:`None`)
79
+ max_retries (int, optional): Maximum number of retries for API calls.
80
+ (default: :obj:`3`)
81
+ **kwargs (Any): Additional arguments to pass to the
82
+ OpenAI client initialization. These can include parameters like
83
+ 'organization', 'default_headers', 'http_client', etc.
79
84
  """
80
85
 
81
86
  @api_keys_required(
@@ -91,6 +96,8 @@ class OpenAIModel(BaseModelBackend):
91
96
  url: Optional[str] = None,
92
97
  token_counter: Optional[BaseTokenCounter] = None,
93
98
  timeout: Optional[float] = None,
99
+ max_retries: int = 3,
100
+ **kwargs: Any,
94
101
  ) -> None:
95
102
  if model_config_dict is None:
96
103
  model_config_dict = ChatGPTConfig().as_dict()
@@ -98,6 +105,9 @@ class OpenAIModel(BaseModelBackend):
98
105
  url = url or os.environ.get("OPENAI_API_BASE_URL")
99
106
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
100
107
 
108
+ # Store additional client args for later use
109
+ self._max_retries = max_retries
110
+
101
111
  super().__init__(
102
112
  model_type, model_config_dict, api_key, url, token_counter, timeout
103
113
  )
@@ -106,30 +116,37 @@ class OpenAIModel(BaseModelBackend):
106
116
  from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
107
117
  from langfuse.openai import OpenAI as LangfuseOpenAI
108
118
 
119
+ # Create Langfuse client with base parameters and additional
120
+ # arguments
109
121
  self._client = LangfuseOpenAI(
110
122
  timeout=self._timeout,
111
- max_retries=3,
123
+ max_retries=self._max_retries,
112
124
  base_url=self._url,
113
125
  api_key=self._api_key,
126
+ **kwargs,
114
127
  )
115
128
  self._async_client = LangfuseAsyncOpenAI(
116
129
  timeout=self._timeout,
117
- max_retries=3,
130
+ max_retries=self._max_retries,
118
131
  base_url=self._url,
119
132
  api_key=self._api_key,
133
+ **kwargs,
120
134
  )
121
135
  else:
136
+ # Create client with base parameters and additional arguments
122
137
  self._client = OpenAI(
123
138
  timeout=self._timeout,
124
- max_retries=3,
139
+ max_retries=self._max_retries,
125
140
  base_url=self._url,
126
141
  api_key=self._api_key,
142
+ **kwargs,
127
143
  )
128
144
  self._async_client = AsyncOpenAI(
129
145
  timeout=self._timeout,
130
- max_retries=3,
146
+ max_retries=self._max_retries,
131
147
  base_url=self._url,
132
148
  api_key=self._api_key,
149
+ **kwargs,
133
150
  )
134
151
 
135
152
  def _sanitize_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
@@ -46,6 +46,10 @@ class OpenRouterModel(OpenAICompatibleModel):
46
46
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
47
47
  environment variable or default to 180 seconds.
48
48
  (default: :obj:`None`)
49
+ max_retries (int, optional): Maximum number of retries for API calls.
50
+ (default: :obj:`3`)
51
+ **kwargs (Any): Additional arguments to pass to the client
52
+ initialization.
49
53
  """
50
54
 
51
55
  @api_keys_required([("api_key", "OPENROUTER_API_KEY")])
@@ -57,6 +61,8 @@ class OpenRouterModel(OpenAICompatibleModel):
57
61
  url: Optional[str] = None,
58
62
  token_counter: Optional[BaseTokenCounter] = None,
59
63
  timeout: Optional[float] = None,
64
+ max_retries: int = 3,
65
+ **kwargs: Any,
60
66
  ) -> None:
61
67
  if model_config_dict is None:
62
68
  model_config_dict = OpenRouterConfig().as_dict()
@@ -72,6 +78,8 @@ class OpenRouterModel(OpenAICompatibleModel):
72
78
  url=url,
73
79
  token_counter=token_counter,
74
80
  timeout=timeout,
81
+ max_retries=max_retries,
82
+ **kwargs,
75
83
  )
76
84
 
77
85
  def check_model_config(self):
@@ -47,6 +47,10 @@ class PPIOModel(OpenAICompatibleModel):
47
47
  API calls. If not provided, will fall back to the MODEL_TIMEOUT
48
48
  environment variable or default to 180 seconds.
49
49
  (default: :obj:`None`)
50
+ max_retries (int, optional): Maximum number of retries for API calls.
51
+ (default: :obj:`3`)
52
+ **kwargs (Any): Additional arguments to pass to the client
53
+ initialization.
50
54
  """
51
55
 
52
56
  @api_keys_required(
@@ -62,6 +66,8 @@ class PPIOModel(OpenAICompatibleModel):
62
66
  url: Optional[str] = None,
63
67
  token_counter: Optional[BaseTokenCounter] = None,
64
68
  timeout: Optional[float] = None,
69
+ max_retries: int = 3,
70
+ **kwargs: Any,
65
71
  ) -> None:
66
72
  if model_config_dict is None:
67
73
  model_config_dict = PPIOConfig().as_dict()
@@ -77,6 +83,8 @@ class PPIOModel(OpenAICompatibleModel):
77
83
  url=url,
78
84
  token_counter=token_counter,
79
85
  timeout=timeout,
86
+ max_retries=max_retries,
87
+ **kwargs,
80
88
  )
81
89
 
82
90
  def check_model_config(self):