camel-ai 0.1.6.8__py3-none-any.whl → 0.1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.6.8'
15
+ __version__ = '0.1.7.0'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -815,7 +815,7 @@ class ChatAgent(BaseAgent):
815
815
  # When response has not been stopped
816
816
  # Notice that only the first chunk_dict has the "role"
817
817
  content_dict[index] += delta.content
818
- else:
818
+ elif choice.finish_reason:
819
819
  finish_reasons_dict[index] = choice.finish_reason
820
820
  chat_message = BaseMessage(
821
821
  role_name=self.role_name,
@@ -998,7 +998,10 @@ class ChatAgent(BaseAgent):
998
998
  Returns:
999
999
  dict: Usage dictionary.
1000
1000
  """
1001
- encoding = get_model_encoding(self.model_type.value_for_tiktoken)
1001
+ if isinstance(self.model_type, ModelType):
1002
+ encoding = get_model_encoding(self.model_type.value_for_tiktoken)
1003
+ else:
1004
+ encoding = get_model_encoding("gpt-4o-mini")
1002
1005
  completion_tokens = 0
1003
1006
  for message in output_messages:
1004
1007
  completion_tokens += len(encoding.encode(message.content))
camel/configs/__init__.py CHANGED
@@ -20,7 +20,12 @@ from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
20
20
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
21
21
  from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
22
22
  from .reka_config import REKA_API_PARAMS, RekaConfig
23
- from .samba_config import SAMBA_API_PARAMS, SambaConfig
23
+ from .samba_config import (
24
+ SAMBA_FAST_API_PARAMS,
25
+ SAMBA_VERSE_API_PARAMS,
26
+ SambaFastAPIConfig,
27
+ SambaVerseAPIConfig,
28
+ )
24
29
  from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
25
30
  from .vllm_config import VLLM_API_PARAMS, VLLMConfig
26
31
  from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
@@ -48,8 +53,10 @@ __all__ = [
48
53
  'MISTRAL_API_PARAMS',
49
54
  'RekaConfig',
50
55
  'REKA_API_PARAMS',
51
- 'SambaConfig',
52
- 'SAMBA_API_PARAMS',
56
+ 'SambaFastAPIConfig',
57
+ 'SAMBA_FAST_API_PARAMS',
58
+ 'SambaVerseAPIConfig',
59
+ 'SAMBA_VERSE_API_PARAMS',
53
60
  'TogetherAIConfig',
54
61
  'TOGETHERAI_API_PARAMS',
55
62
  ]
@@ -18,24 +18,27 @@ from typing import Any, Dict, Optional, Union
18
18
  from camel.configs.base_config import BaseConfig
19
19
 
20
20
 
21
- class SambaConfig(BaseConfig):
21
+ class SambaFastAPIConfig(BaseConfig):
22
22
  r"""Defines the parameters for generating chat completions using the
23
- SambaNova API.
23
+ SambaNova Fast API.
24
24
 
25
25
  Args:
26
26
  max_tokens (Optional[int], optional): the maximum number of tokens to
27
- generate, e.g. 100. Defaults to `None`.
27
+ generate, e.g. 100.
28
+ (default: :obj:`2048`)
28
29
  stop (Optional[Union[str,list[str]]]): Stop generation if this token
29
30
  is detected. Or if one of these tokens is detected when providing
30
- a string list. Defaults to `None`.
31
+ a string list.
32
+ (default: :obj:`None`)
31
33
  stream (Optional[bool]): If True, partial message deltas will be sent
32
34
  as data-only server-sent events as they become available.
33
- Currently SambaNova only support stream mode. Defaults to `True`.
35
+ Currently SambaNova Fast API only support stream mode.
36
+ (default: :obj:`True`)
34
37
  stream_options (Optional[Dict]): Additional options for streaming.
35
- Defaults to `{"include_usage": True}`.
38
+ (default: :obj:`{"include_usage": True}`)
36
39
  """
37
40
 
38
- max_tokens: Optional[int] = None
41
+ max_tokens: Optional[int] = 2048
39
42
  stop: Optional[Union[str, list[str]]] = None
40
43
  stream: Optional[bool] = True
41
44
  stream_options: Optional[Dict] = {"include_usage": True} # noqa: RUF012
@@ -47,4 +50,60 @@ class SambaConfig(BaseConfig):
47
50
  return config_dict
48
51
 
49
52
 
50
- SAMBA_API_PARAMS = {param for param in SambaConfig().model_fields.keys()}
53
+ SAMBA_FAST_API_PARAMS = {
54
+ param for param in SambaFastAPIConfig().model_fields.keys()
55
+ }
56
+
57
+
58
+ class SambaVerseAPIConfig(BaseConfig):
59
+ r"""Defines the parameters for generating chat completions using the
60
+ SambaVerse API.
61
+
62
+ Args:
63
+ temperature (float, optional): Sampling temperature to use, between
64
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
65
+ while lower values make it more focused and deterministic.
66
+ (default: :obj:`0.7`)
67
+ top_p (float, optional): An alternative to sampling with temperature,
68
+ called nucleus sampling, where the model considers the results of
69
+ the tokens with top_p probability mass. So :obj:`0.1` means only
70
+ the tokens comprising the top 10% probability mass are considered.
71
+ (default: :obj:`0.95`)
72
+ top_k (int, optional): Only sample from the top K options for each
73
+ subsequent token. Used to remove "long tail" low probability
74
+ responses.
75
+ (default: :obj:`50`)
76
+ max_tokens (Optional[int], optional): The maximum number of tokens to
77
+ generate, e.g. 100.
78
+ (default: :obj:`2048`)
79
+ repetition_penalty (Optional[float], optional): The parameter for
80
+ repetition penalty. 1.0 means no penalty.
81
+ (default: :obj:`1.0`)
82
+ stop (Optional[Union[str,list[str]]]): Stop generation if this token
83
+ is detected. Or if one of these tokens is detected when providing
84
+ a string list.
85
+ (default: :obj:`""`)
86
+ stream (Optional[bool]): If True, partial message deltas will be sent
87
+ as data-only server-sent events as they become available.
88
+ Currently SambaVerse API doesn't support stream mode.
89
+ (default: :obj:`False`)
90
+ """
91
+
92
+ temperature: Optional[float] = 0.7
93
+ top_p: Optional[float] = 0.95
94
+ top_k: Optional[int] = 50
95
+ max_tokens: Optional[int] = 2048
96
+ repetition_penalty: Optional[float] = 1.0
97
+ stop: Optional[Union[str, list[str]]] = ""
98
+ stream: Optional[bool] = False
99
+
100
+ def as_dict(self) -> dict[str, Any]:
101
+ config_dict = super().as_dict()
102
+ if "tools" in config_dict:
103
+ del config_dict["tools"] # SambaNova does not support tool calling
104
+ return config_dict
105
+
106
+
107
+ SAMBA_VERSE_API_PARAMS = {
108
+ param for param in SambaVerseAPIConfig().model_fields.keys()
109
+ }
@@ -75,12 +75,19 @@ class OpenAIEmbedding(BaseEmbedding[str]):
75
75
  as a list of floating-point numbers.
76
76
  """
77
77
  # TODO: count tokens
78
- response = self.client.embeddings.create(
79
- input=objs,
80
- model=self.model_type.value,
81
- dimensions=self.output_dim,
82
- **kwargs,
83
- )
78
+ if self.model_type == EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
79
+ response = self.client.embeddings.create(
80
+ input=objs,
81
+ model=self.model_type.value,
82
+ **kwargs,
83
+ )
84
+ else:
85
+ response = self.client.embeddings.create(
86
+ input=objs,
87
+ model=self.model_type.value,
88
+ dimensions=self.output_dim,
89
+ **kwargs,
90
+ )
84
91
  return [data.embedding for data in response.data]
85
92
 
86
93
  def get_output_dim(self) -> int:
@@ -17,6 +17,8 @@ from typing import Any, Dict, Optional
17
17
 
18
18
  from pydantic import BaseModel
19
19
 
20
+ from camel.utils import api_keys_required
21
+
20
22
 
21
23
  class Firecrawl:
22
24
  r"""Firecrawl allows you to turn entire websites into LLM-ready markdown.
@@ -30,6 +32,7 @@ class Firecrawl:
30
32
  https://docs.firecrawl.dev/introduction
31
33
  """
32
34
 
35
+ @api_keys_required("FIRECRAWL_API_KEY")
33
36
  def __init__(
34
37
  self,
35
38
  api_key: Optional[str] = None,
@@ -211,3 +214,24 @@ class Firecrawl:
211
214
  return scrape_result.get("markdown", "")
212
215
  except Exception as e:
213
216
  raise RuntimeError(f"Failed to perform tidy scrape: {e}")
217
+
218
+ def map_site(
219
+ self, url: str, params: Optional[Dict[str, Any]] = None
220
+ ) -> list:
221
+ r"""Map a website to retrieve all accessible URLs.
222
+
223
+ Args:
224
+ url (str): The URL of the site to map.
225
+ params (Optional[Dict[str, Any]]): Additional parameters for the
226
+ map request. Defaults to `None`.
227
+
228
+ Returns:
229
+ list: A list containing the URLs found on the site.
230
+
231
+ Raises:
232
+ RuntimeError: If the mapping process fails.
233
+ """
234
+ try:
235
+ return self.app.map_url(url=url, params=params)
236
+ except Exception as e:
237
+ raise RuntimeError(f"Failed to map the site: {e}")
@@ -96,8 +96,6 @@ class ModelFactory:
96
96
  model_class = MistralModel
97
97
  elif model_platform.is_reka and model_type.is_reka:
98
98
  model_class = RekaModel
99
- elif model_platform.is_samba and model_type.is_samba:
100
- model_class = SambaModel
101
99
  elif model_type == ModelType.STUB:
102
100
  model_class = StubModel
103
101
  else:
@@ -117,6 +115,8 @@ class ModelFactory:
117
115
  model_class = LiteLLMModel
118
116
  elif model_platform.is_openai_compatibility_model:
119
117
  model_class = OpenAICompatibilityModel
118
+ elif model_platform.is_samba:
119
+ model_class = SambaModel
120
120
  elif model_platform.is_together:
121
121
  model_class = TogetherAIModel
122
122
  return model_class(
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import os
15
+ import subprocess
15
16
  from typing import Any, Dict, List, Optional, Union
16
17
 
17
18
  from openai import OpenAI, Stream
@@ -48,17 +49,38 @@ class OllamaModel:
48
49
  """
49
50
  self.model_type = model_type
50
51
  self.model_config_dict = model_config_dict
51
- self._url = url or os.environ.get("OLLAMA_BASE_URL")
52
- # Use OpenAI cilent as interface call Ollama
52
+ self._url = (
53
+ url
54
+ or os.environ.get("OLLAMA_BASE_URL")
55
+ or "http://localhost:11434/v1"
56
+ )
57
+ if not url and not os.environ.get("OLLAMA_BASE_URL"):
58
+ self._start_server()
59
+ # Use OpenAI client as interface call Ollama
53
60
  self._client = OpenAI(
54
61
  timeout=60,
55
62
  max_retries=3,
56
- base_url=self._url or "http://localhost:11434/v1",
63
+ base_url=self._url,
57
64
  api_key="ollama", # required but ignored
58
65
  )
59
66
  self._token_counter = token_counter
60
67
  self.check_model_config()
61
68
 
69
+ def _start_server(self) -> None:
70
+ r"""Starts the Ollama server in a subprocess."""
71
+ try:
72
+ subprocess.Popen(
73
+ ["ollama", "server", "--port", "11434"],
74
+ stdout=subprocess.PIPE,
75
+ stderr=subprocess.PIPE,
76
+ )
77
+ print(
78
+ f"Ollama server started on http://localhost:11434/v1 "
79
+ f"for {self.model_type} model"
80
+ )
81
+ except Exception as e:
82
+ print(f"Failed to start Ollama server: {e}")
83
+
62
84
  @property
63
85
  def token_counter(self) -> BaseTokenCounter:
64
86
  r"""Initialize the token counter for the model backend.
@@ -111,7 +133,7 @@ class OllamaModel:
111
133
 
112
134
  @property
113
135
  def token_limit(self) -> int:
114
- """Returns the maximum token limit for the given model.
136
+ r"""Returns the maximum token limit for the given model.
115
137
 
116
138
  Returns:
117
139
  int: The maximum token limit for the given model.