camel-ai 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (102) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +326 -115
  3. camel/agents/knowledge_graph_agent.py +4 -6
  4. camel/bots/__init__.py +34 -0
  5. camel/bots/discord_app.py +138 -0
  6. camel/bots/slack/__init__.py +30 -0
  7. camel/bots/slack/models.py +158 -0
  8. camel/bots/slack/slack_app.py +255 -0
  9. camel/bots/telegram_bot.py +82 -0
  10. camel/configs/__init__.py +1 -2
  11. camel/configs/anthropic_config.py +2 -5
  12. camel/configs/base_config.py +6 -6
  13. camel/configs/gemini_config.py +1 -1
  14. camel/configs/groq_config.py +2 -3
  15. camel/configs/ollama_config.py +1 -2
  16. camel/configs/openai_config.py +2 -23
  17. camel/configs/samba_config.py +2 -2
  18. camel/configs/togetherai_config.py +1 -1
  19. camel/configs/vllm_config.py +1 -1
  20. camel/configs/zhipuai_config.py +2 -3
  21. camel/embeddings/openai_embedding.py +2 -2
  22. camel/loaders/__init__.py +2 -0
  23. camel/loaders/chunkr_reader.py +163 -0
  24. camel/loaders/firecrawl_reader.py +13 -45
  25. camel/loaders/unstructured_io.py +65 -29
  26. camel/messages/__init__.py +1 -0
  27. camel/messages/func_message.py +2 -2
  28. camel/models/__init__.py +2 -4
  29. camel/models/anthropic_model.py +32 -26
  30. camel/models/azure_openai_model.py +39 -36
  31. camel/models/base_model.py +31 -20
  32. camel/models/gemini_model.py +37 -29
  33. camel/models/groq_model.py +29 -23
  34. camel/models/litellm_model.py +44 -61
  35. camel/models/mistral_model.py +33 -30
  36. camel/models/model_factory.py +66 -76
  37. camel/models/nemotron_model.py +33 -23
  38. camel/models/ollama_model.py +42 -47
  39. camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +36 -41
  40. camel/models/openai_model.py +48 -29
  41. camel/models/reka_model.py +30 -28
  42. camel/models/samba_model.py +82 -177
  43. camel/models/stub_model.py +2 -2
  44. camel/models/togetherai_model.py +37 -43
  45. camel/models/vllm_model.py +43 -50
  46. camel/models/zhipuai_model.py +33 -27
  47. camel/retrievers/auto_retriever.py +28 -10
  48. camel/retrievers/vector_retriever.py +72 -44
  49. camel/societies/babyagi_playing.py +6 -3
  50. camel/societies/role_playing.py +17 -3
  51. camel/storages/__init__.py +2 -0
  52. camel/storages/graph_storages/__init__.py +2 -0
  53. camel/storages/graph_storages/graph_element.py +3 -5
  54. camel/storages/graph_storages/nebula_graph.py +547 -0
  55. camel/storages/key_value_storages/json.py +6 -1
  56. camel/tasks/task.py +11 -4
  57. camel/tasks/task_prompt.py +4 -0
  58. camel/toolkits/__init__.py +20 -7
  59. camel/toolkits/arxiv_toolkit.py +155 -0
  60. camel/toolkits/ask_news_toolkit.py +653 -0
  61. camel/toolkits/base.py +2 -3
  62. camel/toolkits/code_execution.py +6 -7
  63. camel/toolkits/dalle_toolkit.py +6 -6
  64. camel/toolkits/{openai_function.py → function_tool.py} +34 -11
  65. camel/toolkits/github_toolkit.py +9 -10
  66. camel/toolkits/google_maps_toolkit.py +7 -7
  67. camel/toolkits/google_scholar_toolkit.py +146 -0
  68. camel/toolkits/linkedin_toolkit.py +7 -7
  69. camel/toolkits/math_toolkit.py +8 -8
  70. camel/toolkits/open_api_toolkit.py +5 -5
  71. camel/toolkits/reddit_toolkit.py +7 -7
  72. camel/toolkits/retrieval_toolkit.py +5 -5
  73. camel/toolkits/search_toolkit.py +9 -9
  74. camel/toolkits/slack_toolkit.py +11 -11
  75. camel/toolkits/twitter_toolkit.py +378 -452
  76. camel/toolkits/weather_toolkit.py +6 -6
  77. camel/toolkits/whatsapp_toolkit.py +177 -0
  78. camel/types/__init__.py +6 -1
  79. camel/types/enums.py +43 -85
  80. camel/types/openai_types.py +3 -0
  81. camel/types/unified_model_type.py +104 -0
  82. camel/utils/__init__.py +0 -2
  83. camel/utils/async_func.py +7 -7
  84. camel/utils/commons.py +40 -4
  85. camel/utils/token_counting.py +30 -212
  86. camel/workforce/__init__.py +6 -6
  87. camel/workforce/base.py +9 -5
  88. camel/workforce/prompts.py +179 -0
  89. camel/workforce/role_playing_worker.py +181 -0
  90. camel/workforce/{single_agent_node.py → single_agent_worker.py} +49 -23
  91. camel/workforce/task_channel.py +7 -8
  92. camel/workforce/utils.py +20 -50
  93. camel/workforce/{worker_node.py → worker.py} +15 -12
  94. camel/workforce/workforce.py +456 -19
  95. camel_ai-0.2.3.dist-info/LICENSE +201 -0
  96. {camel_ai-0.2.0.dist-info → camel_ai-0.2.3.dist-info}/METADATA +39 -65
  97. {camel_ai-0.2.0.dist-info → camel_ai-0.2.3.dist-info}/RECORD +98 -86
  98. {camel_ai-0.2.0.dist-info → camel_ai-0.2.3.dist-info}/WHEEL +1 -1
  99. camel/models/open_source_model.py +0 -170
  100. camel/workforce/manager_node.py +0 -299
  101. camel/workforce/role_playing_node.py +0 -168
  102. camel/workforce/workforce_prompt.py +0 -125
@@ -17,54 +17,64 @@ from typing import Any, Dict, List, Optional, Union
17
17
 
18
18
  from openai import OpenAI, Stream
19
19
 
20
- from camel.configs import OLLAMA_API_PARAMS
20
+ from camel.configs import OLLAMA_API_PARAMS, OllamaConfig
21
21
  from camel.messages import OpenAIMessage
22
- from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
22
+ from camel.models import BaseModelBackend
23
+ from camel.types import (
24
+ ChatCompletion,
25
+ ChatCompletionChunk,
26
+ ModelType,
27
+ )
23
28
  from camel.utils import BaseTokenCounter, OpenAITokenCounter
24
29
 
25
30
 
26
- class OllamaModel:
27
- r"""Ollama service interface."""
31
+ class OllamaModel(BaseModelBackend):
32
+ r"""Ollama service interface.
33
+
34
+ Args:
35
+ model_type (Union[ModelType, str]): Model for which a backend is
36
+ created.
37
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
38
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
39
+ If:obj:`None`, :obj:`OllamaConfig().as_dict()` will be used.
40
+ (default: :obj:`None`)
41
+ api_key (Optional[str], optional): The API key for authenticating with
42
+ the model service. Ollama doesn't need API key, it would be
43
+ ignored if set. (default: :obj:`None`)
44
+ url (Optional[str], optional): The url to the model service.
45
+ (default: :obj:`None`)
46
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
47
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
48
+ ModelType.GPT_4O_MINI)` will be used.
49
+ (default: :obj:`None`)
50
+
51
+ References:
52
+ https://github.com/ollama/ollama/blob/main/docs/openai.md
53
+ """
28
54
 
29
55
  def __init__(
30
56
  self,
31
- model_type: str,
32
- model_config_dict: Dict[str, Any],
57
+ model_type: Union[ModelType, str],
58
+ model_config_dict: Optional[Dict[str, Any]] = None,
59
+ api_key: Optional[str] = None,
33
60
  url: Optional[str] = None,
34
61
  token_counter: Optional[BaseTokenCounter] = None,
35
62
  ) -> None:
36
- r"""Constructor for Ollama backend with OpenAI compatibility.
37
-
38
- # Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
39
-
40
- Args:
41
- model_type (str): Model for which a backend is created.
42
- model_config_dict (Dict[str, Any]): A dictionary that will
43
- be fed into openai.ChatCompletion.create().
44
- url (Optional[str]): The url to the model service. (default:
45
- :obj:`"http://localhost:11434/v1"`)
46
- token_counter (Optional[BaseTokenCounter]): Token counter to use
47
- for the model. If not provided, `OpenAITokenCounter(ModelType.
48
- GPT_4O_MINI)` will be used.
49
- """
50
- self.model_type = model_type
51
- self.model_config_dict = model_config_dict
52
- self._url = (
53
- url
54
- or os.environ.get("OLLAMA_BASE_URL")
55
- or "http://localhost:11434/v1"
63
+ if model_config_dict is None:
64
+ model_config_dict = OllamaConfig().as_dict()
65
+ url = url or os.environ.get("OLLAMA_BASE_URL")
66
+ super().__init__(
67
+ model_type, model_config_dict, api_key, url, token_counter
56
68
  )
57
- if not url and not os.environ.get("OLLAMA_BASE_URL"):
69
+ if not self._url:
58
70
  self._start_server()
59
71
  # Use OpenAI client as interface call Ollama
60
72
  self._client = OpenAI(
61
73
  timeout=60,
62
74
  max_retries=3,
75
+ api_key="Set-but-ignored", # required but ignored
63
76
  base_url=self._url,
64
- api_key="ollama", # required but ignored
65
77
  )
66
- self._token_counter = token_counter
67
- self.check_model_config()
68
78
 
69
79
  def _start_server(self) -> None:
70
80
  r"""Starts the Ollama server in a subprocess."""
@@ -74,8 +84,9 @@ class OllamaModel:
74
84
  stdout=subprocess.PIPE,
75
85
  stderr=subprocess.PIPE,
76
86
  )
87
+ self._url = "http://localhost:11434/v1"
77
88
  print(
78
- f"Ollama server started on http://localhost:11434/v1 "
89
+ f"Ollama server started on {self._url} "
79
90
  f"for {self.model_type} model."
80
91
  )
81
92
  except Exception as e:
@@ -131,22 +142,6 @@ class OllamaModel:
131
142
  )
132
143
  return response
133
144
 
134
- @property
135
- def token_limit(self) -> int:
136
- r"""Returns the maximum token limit for the given model.
137
-
138
- Returns:
139
- int: The maximum token limit for the given model.
140
- """
141
- max_tokens = self.model_config_dict.get("max_tokens")
142
- if isinstance(max_tokens, int):
143
- return max_tokens
144
- print(
145
- "Must set `max_tokens` as an integer in `model_config_dict` when"
146
- " setting up the model. Using 4096 as default value."
147
- )
148
- return 4096
149
-
150
145
  @property
151
146
  def stream(self) -> bool:
152
147
  r"""Returns whether the model is in stream mode, which sends partial
@@ -12,51 +12,59 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
+ import os
15
16
  from typing import Any, Dict, List, Optional, Union
16
17
 
17
18
  from openai import OpenAI, Stream
18
19
 
19
20
  from camel.messages import OpenAIMessage
20
- from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
21
+ from camel.models import BaseModelBackend
22
+ from camel.types import (
23
+ ChatCompletion,
24
+ ChatCompletionChunk,
25
+ ModelType,
26
+ )
21
27
  from camel.utils import (
22
28
  BaseTokenCounter,
23
29
  OpenAITokenCounter,
24
30
  )
25
31
 
26
32
 
27
- class OpenAICompatibilityModel:
28
- r"""Constructor for model backend supporting OpenAI compatibility."""
33
+ class OpenAICompatibleModel(BaseModelBackend):
34
+ r"""Constructor for model backend supporting OpenAI compatibility.
35
+
36
+ Args:
37
+ model_type (Union[ModelType, str]): Model for which a backend is
38
+ created.
39
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
40
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
41
+ :obj:`None`, :obj:`{}` will be used. (default: :obj:`None`)
42
+ api_key (str): The API key for authenticating with the model service.
43
+ url (str): The url to the model service.
44
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
45
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
46
+ ModelType.GPT_4O_MINI)` will be used.
47
+ (default: :obj:`None`)
48
+ """
29
49
 
30
50
  def __init__(
31
51
  self,
32
- model_type: str,
33
- model_config_dict: Dict[str, Any],
34
- api_key: str,
35
- url: str,
52
+ model_type: Union[ModelType, str],
53
+ model_config_dict: Optional[Dict[str, Any]] = None,
54
+ api_key: Optional[str] = None,
55
+ url: Optional[str] = None,
36
56
  token_counter: Optional[BaseTokenCounter] = None,
37
57
  ) -> None:
38
- r"""Constructor for model backend.
39
-
40
- Args:
41
- model_type (str): Model for which a backend is created.
42
- model_config_dict (Dict[str, Any]): A dictionary that will
43
- be fed into openai.ChatCompletion.create().
44
- api_key (str): The API key for authenticating with the
45
- model service. (default: :obj:`None`)
46
- url (str): The url to the model service. (default:
47
- :obj:`None`)
48
- token_counter (Optional[BaseTokenCounter]): Token counter to use
49
- for the model. If not provided, `OpenAITokenCounter(ModelType.
50
- GPT_4O_MINI)` will be used.
51
- """
52
- self.model_type = model_type
53
- self.model_config_dict = model_config_dict
54
- self._token_counter = token_counter
58
+ self.api_key = api_key or os.environ.get("OPENAI_COMPATIBILIY_API_KEY")
59
+ self.url = url or os.environ.get("OPENAI_COMPATIBILIY_API_BASE_URL")
60
+ super().__init__(
61
+ model_type, model_config_dict, api_key, url, token_counter
62
+ )
55
63
  self._client = OpenAI(
56
64
  timeout=60,
57
65
  max_retries=3,
58
- api_key=api_key,
59
- base_url=url,
66
+ api_key=self._api_key,
67
+ base_url=self._url,
60
68
  )
61
69
 
62
70
  def run(
@@ -104,18 +112,5 @@ class OpenAICompatibilityModel:
104
112
  """
105
113
  return self.model_config_dict.get('stream', False)
106
114
 
107
- @property
108
- def token_limit(self) -> int:
109
- r"""Returns the maximum token limit for the given model.
110
-
111
- Returns:
112
- int: The maximum token limit for the given model.
113
- """
114
- max_tokens = self.model_config_dict.get("max_tokens")
115
- if isinstance(max_tokens, int):
116
- return max_tokens
117
- print(
118
- "Must set `max_tokens` as an integer in `model_config_dict` when"
119
- " setting up the model. Using 4096 as default value."
120
- )
121
- return 4096
115
+ def check_model_config(self):
116
+ pass
@@ -12,14 +12,19 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import os
15
+ import warnings
15
16
  from typing import Any, Dict, List, Optional, Union
16
17
 
17
18
  from openai import OpenAI, Stream
18
19
 
19
- from camel.configs import OPENAI_API_PARAMS
20
+ from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
20
21
  from camel.messages import OpenAIMessage
21
22
  from camel.models import BaseModelBackend
22
- from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
23
+ from camel.types import (
24
+ ChatCompletion,
25
+ ChatCompletionChunk,
26
+ ModelType,
27
+ )
23
28
  from camel.utils import (
24
29
  BaseTokenCounter,
25
30
  OpenAITokenCounter,
@@ -28,36 +33,39 @@ from camel.utils import (
28
33
 
29
34
 
30
35
  class OpenAIModel(BaseModelBackend):
31
- r"""OpenAI API in a unified BaseModelBackend interface."""
36
+ r"""OpenAI API in a unified BaseModelBackend interface.
37
+
38
+ Args:
39
+ model_type (Union[ModelType, str]): Model for which a backend is
40
+ created, one of GPT_* series.
41
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
42
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
43
+ :obj:`None`, :obj:`ChatGPTConfig().as_dict()` will be used.
44
+ (default: :obj:`None`)
45
+ api_key (Optional[str], optional): The API key for authenticating
46
+ with the OpenAI service. (default: :obj:`None`)
47
+ url (Optional[str], optional): The url to the OpenAI service.
48
+ (default: :obj:`None`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter` will
51
+ be used. (default: :obj:`None`)
52
+ """
32
53
 
33
54
  def __init__(
34
55
  self,
35
- model_type: ModelType,
36
- model_config_dict: Dict[str, Any],
56
+ model_type: Union[ModelType, str],
57
+ model_config_dict: Optional[Dict[str, Any]] = None,
37
58
  api_key: Optional[str] = None,
38
59
  url: Optional[str] = None,
39
60
  token_counter: Optional[BaseTokenCounter] = None,
40
61
  ) -> None:
41
- r"""Constructor for OpenAI backend.
42
-
43
- Args:
44
- model_type (ModelType): Model for which a backend is created,
45
- one of GPT_* series.
46
- model_config_dict (Dict[str, Any]): A dictionary that will
47
- be fed into openai.ChatCompletion.create().
48
- api_key (Optional[str]): The API key for authenticating with the
49
- OpenAI service. (default: :obj:`None`)
50
- url (Optional[str]): The url to the OpenAI service. (default:
51
- :obj:`None`)
52
- token_counter (Optional[BaseTokenCounter]): Token counter to use
53
- for the model. If not provided, `OpenAITokenCounter` will
54
- be used.
55
- """
62
+ if model_config_dict is None:
63
+ model_config_dict = ChatGPTConfig().as_dict()
64
+ api_key = api_key or os.environ.get("OPENAI_API_KEY")
65
+ url = url or os.environ.get("OPENAI_API_BASE_URL")
56
66
  super().__init__(
57
67
  model_type, model_config_dict, api_key, url, token_counter
58
68
  )
59
- self._url = url or os.environ.get("OPENAI_API_BASE_URL")
60
- self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
61
69
  self._client = OpenAI(
62
70
  timeout=60,
63
71
  max_retries=3,
@@ -96,13 +104,23 @@ class OpenAIModel(BaseModelBackend):
96
104
  # o1-preview and o1-mini have Beta limitations
97
105
  # reference: https://platform.openai.com/docs/guides/reasoning
98
106
  if self.model_type in [ModelType.O1_MINI, ModelType.O1_PREVIEW]:
107
+ warnings.warn(
108
+ "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
109
+ "which has certain limitations, reference: "
110
+ "`https://platform.openai.com/docs/guides/reasoning`.",
111
+ UserWarning,
112
+ )
113
+
99
114
  # Remove system message that is not supported in o1 model.
100
115
  messages = [msg for msg in messages if msg.get("role") != "system"]
101
116
 
102
- # Remove unsupported parameters and reset the fixed parameters
103
- del self.model_config_dict["stream"]
104
- del self.model_config_dict["tools"]
105
- del self.model_config_dict["tool_choice"]
117
+ # Check and remove unsupported parameters and reset the fixed
118
+ # parameters
119
+ unsupported_keys = ["stream", "tools", "tool_choice"]
120
+ for key in unsupported_keys:
121
+ if key in self.model_config_dict:
122
+ del self.model_config_dict[key]
123
+
106
124
  self.model_config_dict["temperature"] = 1.0
107
125
  self.model_config_dict["top_p"] = 1.0
108
126
  self.model_config_dict["n"] = 1.0
@@ -111,7 +129,7 @@ class OpenAIModel(BaseModelBackend):
111
129
 
112
130
  response = self._client.chat.completions.create(
113
131
  messages=messages,
114
- model=self.model_type.value,
132
+ model=self.model_type,
115
133
  **self.model_config_dict,
116
134
  )
117
135
  return response
@@ -133,8 +151,9 @@ class OpenAIModel(BaseModelBackend):
133
151
 
134
152
  @property
135
153
  def stream(self) -> bool:
136
- r"""Returns whether the model is in stream mode,
137
- which sends partial results each time.
154
+ r"""Returns whether the model is in stream mode, which sends partial
155
+ results each time.
156
+
138
157
  Returns:
139
158
  bool: Whether the model is in stream mode.
140
159
  """
@@ -11,10 +11,9 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- import os
15
- from typing import TYPE_CHECKING, Any, Dict, List, Optional
14
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
16
15
 
17
- from camel.configs import REKA_API_PARAMS
16
+ from camel.configs import REKA_API_PARAMS, RekaConfig
18
17
  from camel.messages import OpenAIMessage
19
18
  from camel.models import BaseModelBackend
20
19
  from camel.types import ChatCompletion, ModelType
@@ -22,6 +21,7 @@ from camel.utils import (
22
21
  BaseTokenCounter,
23
22
  OpenAITokenCounter,
24
23
  api_keys_required,
24
+ dependencies_required,
25
25
  )
26
26
 
27
27
  if TYPE_CHECKING:
@@ -39,40 +39,42 @@ except (ImportError, AttributeError):
39
39
 
40
40
 
41
41
  class RekaModel(BaseModelBackend):
42
- r"""Reka API in a unified BaseModelBackend interface."""
43
-
42
+ r"""Reka API in a unified BaseModelBackend interface.
43
+
44
+ Args:
45
+ model_type (Union[ModelType, str]): Model for which a backend is
46
+ created, one of REKA_* series.
47
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
48
+ that will be fed into:obj:`Reka.chat.create()`. If :obj:`None`,
49
+ :obj:`RekaConfig().as_dict()` will be used. (default: :obj:`None`)
50
+ api_key (Optional[str], optional): The API key for authenticating with
51
+ the Reka service. (default: :obj:`None`)
52
+ url (Optional[str], optional): The url to the Reka service.
53
+ (default: :obj:`None`)
54
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
55
+ use for the model. If not provided, :obj:`OpenAITokenCounter` will
56
+ be used. (default: :obj:`None`)
57
+ """
58
+
59
+ @dependencies_required('reka')
44
60
  def __init__(
45
61
  self,
46
- model_type: ModelType,
47
- model_config_dict: Dict[str, Any],
62
+ model_type: Union[ModelType, str],
63
+ model_config_dict: Optional[Dict[str, Any]] = None,
48
64
  api_key: Optional[str] = None,
49
65
  url: Optional[str] = None,
50
66
  token_counter: Optional[BaseTokenCounter] = None,
51
67
  ) -> None:
52
- r"""Constructor for Reka backend.
68
+ from reka.client import Reka
53
69
 
54
- Args:
55
- model_type (ModelType): Model for which a backend is created,
56
- one of REKA_* series.
57
- model_config_dict (Dict[str, Any]): A dictionary that will
58
- be fed into `Reka.chat.create`.
59
- api_key (Optional[str]): The API key for authenticating with the
60
- Reka service. (default: :obj:`None`)
61
- url (Optional[str]): The url to the Reka service.
62
- token_counter (Optional[BaseTokenCounter]): Token counter to use
63
- for the model. If not provided, `OpenAITokenCounter` will be
64
- used.
65
- """
70
+ if model_config_dict is None:
71
+ model_config_dict = RekaConfig().as_dict()
72
+ api_key = api_key or os.environ.get("REKA_API_KEY")
73
+ url = url or os.environ.get("REKA_API_BASE_URL")
66
74
  super().__init__(
67
75
  model_type, model_config_dict, api_key, url, token_counter
68
76
  )
69
- self._api_key = api_key or os.environ.get("REKA_API_KEY")
70
- self._url = url or os.environ.get("REKA_SERVER_URL")
71
-
72
- from reka.client import Reka
73
-
74
77
  self._client = Reka(api_key=self._api_key, base_url=self._url)
75
- self._token_counter: Optional[BaseTokenCounter] = None
76
78
 
77
79
  def _convert_reka_to_openai_response(
78
80
  self, response: 'ChatResponse'
@@ -184,7 +186,7 @@ class RekaModel(BaseModelBackend):
184
186
 
185
187
  response = self._client.chat.create(
186
188
  messages=reka_messages,
187
- model=self.model_type.value,
189
+ model=self.model_type,
188
190
  **self.model_config_dict,
189
191
  )
190
192
 
@@ -200,7 +202,7 @@ class RekaModel(BaseModelBackend):
200
202
  prompt_tokens=openai_response.usage.input_tokens, # type: ignore[union-attr]
201
203
  completion=openai_response.choices[0].message.content,
202
204
  completion_tokens=openai_response.usage.output_tokens, # type: ignore[union-attr]
203
- model=self.model_type.value,
205
+ model=self.model_type,
204
206
  )
205
207
  record(llm_event)
206
208