camel-ai 0.1.5__py3-none-any.whl → 0.1.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (78) hide show
  1. camel/agents/__init__.py +2 -0
  2. camel/agents/chat_agent.py +217 -36
  3. camel/agents/deductive_reasoner_agent.py +86 -31
  4. camel/agents/knowledge_graph_agent.py +41 -18
  5. camel/agents/role_assignment_agent.py +4 -1
  6. camel/agents/search_agent.py +122 -0
  7. camel/bots/__init__.py +20 -0
  8. camel/bots/discord_bot.py +103 -0
  9. camel/bots/telegram_bot.py +84 -0
  10. camel/configs/__init__.py +3 -0
  11. camel/configs/anthropic_config.py +1 -1
  12. camel/configs/litellm_config.py +113 -0
  13. camel/embeddings/__init__.py +2 -0
  14. camel/embeddings/openai_embedding.py +2 -2
  15. camel/embeddings/sentence_transformers_embeddings.py +6 -5
  16. camel/embeddings/vlm_embedding.py +146 -0
  17. camel/functions/__init__.py +9 -0
  18. camel/functions/open_api_function.py +150 -29
  19. camel/functions/open_api_specs/biztoc/__init__.py +13 -0
  20. camel/functions/open_api_specs/biztoc/ai-plugin.json +34 -0
  21. camel/functions/open_api_specs/biztoc/openapi.yaml +21 -0
  22. camel/functions/open_api_specs/create_qr_code/__init__.py +13 -0
  23. camel/functions/open_api_specs/create_qr_code/openapi.yaml +44 -0
  24. camel/functions/open_api_specs/nasa_apod/__init__.py +13 -0
  25. camel/functions/open_api_specs/nasa_apod/openapi.yaml +72 -0
  26. camel/functions/open_api_specs/outschool/__init__.py +13 -0
  27. camel/functions/open_api_specs/outschool/ai-plugin.json +34 -0
  28. camel/functions/open_api_specs/outschool/openapi.yaml +1 -0
  29. camel/functions/open_api_specs/outschool/paths/__init__.py +14 -0
  30. camel/functions/open_api_specs/outschool/paths/get_classes.py +29 -0
  31. camel/functions/open_api_specs/outschool/paths/search_teachers.py +29 -0
  32. camel/functions/open_api_specs/security_config.py +21 -0
  33. camel/functions/open_api_specs/web_scraper/__init__.py +13 -0
  34. camel/functions/open_api_specs/web_scraper/ai-plugin.json +34 -0
  35. camel/functions/open_api_specs/web_scraper/openapi.yaml +71 -0
  36. camel/functions/open_api_specs/web_scraper/paths/__init__.py +13 -0
  37. camel/functions/open_api_specs/web_scraper/paths/scraper.py +29 -0
  38. camel/functions/openai_function.py +3 -1
  39. camel/functions/search_functions.py +104 -171
  40. camel/functions/slack_functions.py +2 -1
  41. camel/human.py +3 -1
  42. camel/loaders/base_io.py +3 -1
  43. camel/loaders/unstructured_io.py +16 -22
  44. camel/messages/base.py +135 -46
  45. camel/models/__init__.py +4 -0
  46. camel/models/anthropic_model.py +20 -14
  47. camel/models/base_model.py +2 -0
  48. camel/models/litellm_model.py +112 -0
  49. camel/models/model_factory.py +8 -1
  50. camel/models/open_source_model.py +1 -0
  51. camel/models/openai_model.py +6 -2
  52. camel/models/zhipuai_model.py +125 -0
  53. camel/prompts/__init__.py +2 -0
  54. camel/prompts/base.py +2 -1
  55. camel/prompts/descripte_video_prompt.py +33 -0
  56. camel/prompts/task_prompt_template.py +9 -3
  57. camel/retrievers/auto_retriever.py +20 -11
  58. camel/retrievers/base.py +4 -2
  59. camel/retrievers/bm25_retriever.py +2 -1
  60. camel/retrievers/cohere_rerank_retriever.py +2 -1
  61. camel/retrievers/vector_retriever.py +10 -4
  62. camel/societies/babyagi_playing.py +2 -1
  63. camel/societies/role_playing.py +2 -1
  64. camel/storages/graph_storages/base.py +1 -0
  65. camel/storages/graph_storages/neo4j_graph.py +5 -3
  66. camel/storages/vectordb_storages/base.py +2 -1
  67. camel/storages/vectordb_storages/milvus.py +5 -2
  68. camel/toolkits/github_toolkit.py +120 -26
  69. camel/types/__init__.py +3 -2
  70. camel/types/enums.py +25 -1
  71. camel/utils/__init__.py +11 -2
  72. camel/utils/commons.py +74 -4
  73. camel/utils/constants.py +26 -0
  74. camel/utils/token_counting.py +58 -5
  75. {camel_ai-0.1.5.dist-info → camel_ai-0.1.5.2.dist-info}/METADATA +29 -13
  76. camel_ai-0.1.5.2.dist-info/RECORD +148 -0
  77. camel_ai-0.1.5.dist-info/RECORD +0 -119
  78. {camel_ai-0.1.5.dist-info → camel_ai-0.1.5.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,122 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any, Optional
15
+
16
+ from camel.agents.chat_agent import ChatAgent
17
+ from camel.messages import BaseMessage
18
+ from camel.prompts import TextPrompt
19
+ from camel.types import ModelType, RoleType
20
+ from camel.utils import create_chunks
21
+
22
+
23
+ class SearchAgent(ChatAgent):
24
+ r"""An agent that summarizes text based on a query and evaluates the
25
+ relevance of an answer.
26
+
27
+ Args:
28
+ model_type (ModelType, optional): The type of model to use for the
29
+ agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
30
+ model_config (Any, optional): The configuration for the model.
31
+ (default: :obj:`None`)
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ model_type: ModelType = ModelType.GPT_3_5_TURBO,
37
+ model_config: Optional[Any] = None,
38
+ ) -> None:
39
+ system_message = BaseMessage(
40
+ role_name="Assistant",
41
+ role_type=RoleType.ASSISTANT,
42
+ meta_dict=None,
43
+ content="You are a helpful assistant.",
44
+ )
45
+ super().__init__(system_message, model_type, model_config)
46
+
47
+ def summarize_text(self, text: str, query: str) -> str:
48
+ r"""Summarize the information from the text, base on the query.
49
+
50
+ Args:
51
+ text (str): Text to summarize.
52
+ query (str): What information you want.
53
+
54
+ Returns:
55
+ str: Strings with information.
56
+ """
57
+ self.reset()
58
+
59
+ summary_prompt = TextPrompt(
60
+ '''Gather information from this text that relative to the
61
+ question, but do not directly answer the question.\nquestion:
62
+ {query}\ntext '''
63
+ )
64
+ summary_prompt = summary_prompt.format(query=query)
65
+ # Max length of each chunk
66
+ max_len = 3000
67
+ results = ""
68
+ chunks = create_chunks(text, max_len)
69
+ # Summarize
70
+ for i, chunk in enumerate(chunks, start=1):
71
+ prompt = summary_prompt + str(i) + ": " + chunk
72
+ user_msg = BaseMessage.make_user_message(
73
+ role_name="User",
74
+ content=prompt,
75
+ )
76
+ result = self.step(user_msg).msg.content
77
+ results += result + "\n"
78
+
79
+ # Final summarise
80
+ final_prompt = TextPrompt(
81
+ '''Here are some summarized texts which split from one text. Using
82
+ the information to answer the question. If can't find the answer,
83
+ you must answer "I can not find the answer to the query" and
84
+ explain why.\n Query:\n{query}.\n\nText:\n'''
85
+ )
86
+ final_prompt = final_prompt.format(query=query)
87
+ prompt = final_prompt + results
88
+
89
+ user_msg = BaseMessage.make_user_message(
90
+ role_name="User",
91
+ content=prompt,
92
+ )
93
+ response = self.step(user_msg).msg.content
94
+
95
+ return response
96
+
97
+ def continue_search(self, query: str, answer: str) -> bool:
98
+ r"""Ask whether to continue search or not based on the provided answer.
99
+
100
+ Args:
101
+ query (str): The question.
102
+ answer (str): The answer to the question.
103
+
104
+ Returns:
105
+ bool: `True` if the user want to continue search, `False`
106
+ otherwise.
107
+ """
108
+ prompt = TextPrompt(
109
+ "Do you think the ANSWER can answer the QUERY? "
110
+ "Use only 'yes' or 'no' to answer.\n"
111
+ "===== QUERY =====\n{query}\n\n"
112
+ "===== ANSWER =====\n{answer}"
113
+ )
114
+ prompt = prompt.format(query=query, answer=answer)
115
+ user_msg = BaseMessage.make_user_message(
116
+ role_name="User",
117
+ content=prompt,
118
+ )
119
+ response = self.step(user_msg).msg.content
120
+ if "yes" in str(response).lower():
121
+ return False
122
+ return True
camel/bots/__init__.py ADDED
@@ -0,0 +1,20 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from .discord_bot import DiscordBot
15
+ from .telegram_bot import TelegramBot
16
+
17
+ __all__ = [
18
+ 'DiscordBot',
19
+ 'TelegramBot',
20
+ ]
@@ -0,0 +1,103 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import TYPE_CHECKING, List, Optional
16
+
17
+ from camel.agents import ChatAgent
18
+ from camel.messages import BaseMessage
19
+
20
+ if TYPE_CHECKING:
21
+ from discord import Message
22
+
23
+
24
+ class DiscordBot:
25
+ r"""Represents a Discord bot that is powered by an agent.
26
+
27
+ Attributes:
28
+ chat_agent (ChatAgent): Chat agent that will power the bot.
29
+ channel_ids (List[int], optional): The channel IDs that the bot will
30
+ listen to.
31
+ discord_token (str, optional): The bot token.
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ chat_agent: ChatAgent,
37
+ channel_ids: Optional[List[int]] = None,
38
+ discord_token: Optional[str] = None,
39
+ ) -> None:
40
+ self.chat_agent = chat_agent
41
+ self.token = discord_token or os.getenv('DISCORD_TOKEN')
42
+ self.channel_ids = channel_ids
43
+
44
+ if not self.token:
45
+ raise ValueError(
46
+ "`DISCORD_TOKEN` not found in environment variables. Get it"
47
+ " here: `https://discord.com/developers/applications`."
48
+ )
49
+
50
+ try:
51
+ import discord
52
+ except ImportError:
53
+ raise ImportError(
54
+ "Please install `discord` first. You can install it by running"
55
+ " `python3 -m pip install -U discord.py`."
56
+ )
57
+ intents = discord.Intents.default()
58
+ intents.message_content = True
59
+ self.client = discord.Client(intents=intents)
60
+
61
+ # Register event handlers
62
+ self.client.event(self.on_ready)
63
+ self.client.event(self.on_message)
64
+
65
+ def run(self) -> None:
66
+ r"""Start the Discord bot using its token.
67
+
68
+ This method starts the Discord bot by running the client with the
69
+ provided token.
70
+ """
71
+ self.client.run(self.token) # type: ignore[arg-type]
72
+
73
+ async def on_ready(self) -> None:
74
+ r"""This method is called when the bot has successfully connected to
75
+ the Discord server.
76
+
77
+ It prints a message indicating that the bot has logged in and displays
78
+ the username of the bot.
79
+ """
80
+ print(f'We have logged in as {self.client.user}')
81
+
82
+ async def on_message(self, message: 'Message') -> None:
83
+ r"""Event handler for when a message is received.
84
+
85
+ Args:
86
+ message (discord.Message): The message object received.
87
+ """
88
+ if message.author == self.client.user:
89
+ return
90
+
91
+ if self.channel_ids and message.channel.id not in self.channel_ids:
92
+ return
93
+
94
+ if not self.client.user or not self.client.user.mentioned_in(message):
95
+ return
96
+
97
+ self.chat_agent.reset()
98
+
99
+ user_msg = BaseMessage.make_user_message(
100
+ role_name="User", content=message.content
101
+ )
102
+ assistant_response = self.chat_agent.step(user_msg)
103
+ await message.channel.send(assistant_response.msg.content)
@@ -0,0 +1,84 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import TYPE_CHECKING, Optional
16
+
17
+ from camel.agents import ChatAgent
18
+ from camel.messages import BaseMessage
19
+
20
+ # Conditionally import telebot types only for type checking
21
+ if TYPE_CHECKING:
22
+ from telebot.types import Message # type: ignore[import-untyped]
23
+
24
+
25
+ class TelegramBot:
26
+ r"""Represents a Telegram bot that is powered by an agent.
27
+
28
+ Attributes:
29
+ chat_agent (ChatAgent): Chat agent that will power the bot.
30
+ telegram_token (str, optional): The bot token.
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ chat_agent: ChatAgent,
36
+ telegram_token: Optional[str] = None,
37
+ ) -> None:
38
+ self.chat_agent = chat_agent
39
+
40
+ if not telegram_token:
41
+ self.token = os.getenv('TELEGRAM_TOKEN')
42
+ if not self.token:
43
+ raise ValueError(
44
+ "`TELEGRAM_TOKEN` not found in environment variables. "
45
+ "Get it from t.me/BotFather."
46
+ )
47
+ else:
48
+ self.token = telegram_token
49
+
50
+ try:
51
+ import telebot # type: ignore[import-untyped]
52
+ except ImportError:
53
+ raise ImportError(
54
+ "Please install `telegram_bot` first. "
55
+ "You can install it by running "
56
+ "`pip install pyTelegramBotAPI`."
57
+ )
58
+ self.bot = telebot.TeleBot(token=self.token)
59
+
60
+ # Register the message handler within the constructor
61
+ self.bot.message_handler(func=lambda message: True)(self.on_message)
62
+
63
+ def run(self) -> None:
64
+ r"""Start the Telegram bot."""
65
+ print("Telegram bot is running...")
66
+ self.bot.infinity_polling()
67
+
68
+ def on_message(self, message: 'Message') -> None:
69
+ r"""Handles incoming messages from the user.
70
+
71
+ Args:
72
+ message (types.Message): The incoming message object.
73
+ """
74
+ self.chat_agent.reset()
75
+
76
+ if not message.text:
77
+ return
78
+
79
+ user_msg = BaseMessage.make_user_message(
80
+ role_name="User", content=message.text
81
+ )
82
+ assistant_response = self.chat_agent.step(user_msg)
83
+
84
+ self.bot.reply_to(message, assistant_response.msg.content)
camel/configs/__init__.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
+ from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
16
17
  from .openai_config import (
17
18
  OPENAI_API_PARAMS,
18
19
  ChatGPTConfig,
@@ -26,4 +27,6 @@ __all__ = [
26
27
  'AnthropicConfig',
27
28
  'ANTHROPIC_API_PARAMS',
28
29
  'OpenSourceConfig',
30
+ 'LiteLLMConfig',
31
+ 'LITELLM_API_PARAMS',
29
32
  ]
@@ -15,7 +15,7 @@ from __future__ import annotations
15
15
 
16
16
  from dataclasses import asdict, dataclass
17
17
 
18
- from anthropic._types import NOT_GIVEN, NotGiven
18
+ from anthropic import NOT_GIVEN, NotGiven
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
21
 
@@ -0,0 +1,113 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass, field
17
+ from typing import List, Optional, Union
18
+
19
+ from camel.configs.base_config import BaseConfig
20
+
21
+
22
+ @dataclass(frozen=True)
23
+ class LiteLLMConfig(BaseConfig):
24
+ r"""Defines the parameters for generating chat completions using the
25
+ LiteLLM API.
26
+
27
+ Args:
28
+ model (str): The name of the language model to use for text completion.
29
+ messages (List): A list of message objects representing the
30
+ conversation context. (default: [])
31
+ timeout (Optional[Union[float, str]], optional): Request timeout.
32
+ (default: None)
33
+ temperature (Optional[float], optional): Temperature parameter for
34
+ controlling randomness. (default: None)
35
+ top_p (Optional[float], optional): Top-p parameter for nucleus
36
+ sampling. (default: None)
37
+ n (Optional[int], optional): Number of completions to generate.
38
+ (default: None)
39
+ stream (Optional[bool], optional): Whether to return a streaming
40
+ response. (default: None)
41
+ stream_options (Optional[dict], optional): Options for the streaming
42
+ response. (default: None)
43
+ stop (Optional[Union[str, List[str]]], optional): Sequences where the
44
+ API will stop generating further tokens. (default: None)
45
+ max_tokens (Optional[int], optional): Maximum number of tokens to
46
+ generate. (default: None)
47
+ presence_penalty (Optional[float], optional): Penalize new tokens
48
+ based on their existence in the text so far. (default: None)
49
+ frequency_penalty (Optional[float], optional): Penalize new tokens
50
+ based on their frequency in the text so far. (default: None)
51
+ logit_bias (Optional[dict], optional): Modify the probability of
52
+ specific tokens appearing in the completion. (default: None)
53
+ user (Optional[str], optional): A unique identifier representing the
54
+ end-user. (default: None)
55
+ response_format (Optional[dict], optional): Response format
56
+ parameters. (default: None)
57
+ seed (Optional[int], optional): Random seed. (default: None)
58
+ tools (Optional[List], optional): List of tools. (default: None)
59
+ tool_choice (Optional[Union[str, dict]], optional): Tool choice
60
+ parameters. (default: None)
61
+ logprobs (Optional[bool], optional): Whether to return log
62
+ probabilities of the output tokens. (default: None)
63
+ top_logprobs (Optional[int], optional): Number of most likely tokens
64
+ to return at each token position. (default: None)
65
+ deployment_id (Optional[str], optional): Deployment ID. (default: None)
66
+ extra_headers (Optional[dict], optional): Additional headers for the
67
+ request. (default: None)
68
+ base_url (Optional[str], optional): Base URL for the API. (default:
69
+ None)
70
+ api_version (Optional[str], optional): API version. (default: None)
71
+ api_key (Optional[str], optional): API key. (default: None)
72
+ model_list (Optional[list], optional): List of API base, version,
73
+ keys. (default: None)
74
+ mock_response (Optional[str], optional): Mock completion response for
75
+ testing or debugging. (default: None)
76
+ custom_llm_provider (Optional[str], optional): Non-OpenAI LLM
77
+ provider. (default: None)
78
+ max_retries (Optional[int], optional): Maximum number of retries.
79
+ (default: None)
80
+ """
81
+
82
+ model: str = "gpt-3.5-turbo"
83
+ messages: List = field(default_factory=list)
84
+ timeout: Optional[Union[float, str]] = None
85
+ temperature: Optional[float] = None
86
+ top_p: Optional[float] = None
87
+ n: Optional[int] = None
88
+ stream: Optional[bool] = None
89
+ stream_options: Optional[dict] = None
90
+ stop: Optional[Union[str, List[str]]] = None
91
+ max_tokens: Optional[int] = None
92
+ presence_penalty: Optional[float] = None
93
+ frequency_penalty: Optional[float] = None
94
+ logit_bias: Optional[dict] = field(default_factory=dict)
95
+ user: Optional[str] = None
96
+ response_format: Optional[dict] = None
97
+ seed: Optional[int] = None
98
+ tools: Optional[List] = field(default_factory=list)
99
+ tool_choice: Optional[Union[str, dict]] = None
100
+ logprobs: Optional[bool] = None
101
+ top_logprobs: Optional[int] = None
102
+ deployment_id: Optional[str] = None
103
+ extra_headers: Optional[dict] = field(default_factory=dict)
104
+ base_url: Optional[str] = None
105
+ api_version: Optional[str] = None
106
+ api_key: Optional[str] = None
107
+ model_list: Optional[list] = field(default_factory=list)
108
+ mock_response: Optional[str] = None
109
+ custom_llm_provider: Optional[str] = None
110
+ max_retries: Optional[int] = None
111
+
112
+
113
+ LITELLM_API_PARAMS = {param for param in asdict(LiteLLMConfig()).keys()}
@@ -14,9 +14,11 @@
14
14
  from .base import BaseEmbedding
15
15
  from .openai_embedding import OpenAIEmbedding
16
16
  from .sentence_transformers_embeddings import SentenceTransformerEncoder
17
+ from .vlm_embedding import VisionLanguageEmbedding
17
18
 
18
19
  __all__ = [
19
20
  "BaseEmbedding",
20
21
  "OpenAIEmbedding",
21
22
  "SentenceTransformerEncoder",
23
+ "VisionLanguageEmbedding",
22
24
  ]
@@ -18,7 +18,7 @@ from openai import OpenAI
18
18
 
19
19
  from camel.embeddings.base import BaseEmbedding
20
20
  from camel.types import EmbeddingModelType
21
- from camel.utils import api_key_required
21
+ from camel.utils import model_api_key_required
22
22
 
23
23
 
24
24
  class OpenAIEmbedding(BaseEmbedding[str]):
@@ -46,7 +46,7 @@ class OpenAIEmbedding(BaseEmbedding[str]):
46
46
  self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
47
47
  self.client = OpenAI(timeout=60, max_retries=3, api_key=self._api_key)
48
48
 
49
- @api_key_required
49
+ @model_api_key_required
50
50
  def embed_list(
51
51
  self,
52
52
  objs: List[str],
@@ -11,7 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, List, Union
14
+ from typing import Any, List
15
15
 
16
16
  from camel.embeddings.base import BaseEmbedding
17
17
 
@@ -38,17 +38,18 @@ class SentenceTransformerEncoder(BaseEmbedding[str]):
38
38
 
39
39
  def embed_list(
40
40
  self,
41
- objs: Union[str, List[str]],
41
+ objs: List[str],
42
42
  **kwargs: Any,
43
- ) -> list:
43
+ ) -> List[List[float]]:
44
44
  r"""Generates embeddings for the given texts using the model.
45
45
 
46
46
  Args:
47
- objs (str | List[str]): The texts for which to generate the
47
+ objs (List[str]): The texts for which to generate the
48
48
  embeddings.
49
49
 
50
50
  Returns:
51
- list: A list of float representing embeddings.
51
+ List[List[float]]: A list that represents the generated embedding
52
+ as a list of floating-point numbers.
52
53
  """
53
54
  if not objs:
54
55
  raise ValueError("Input text list is empty")
@@ -0,0 +1,146 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any, List, Optional, Union
15
+
16
+ from PIL import Image
17
+
18
+ from camel.embeddings import BaseEmbedding
19
+
20
+
21
+ class VisionLanguageEmbedding(BaseEmbedding[Union[str, Image.Image]]):
22
+ r"""Provides image embedding functionalities using multimodal model.
23
+
24
+ Args:
25
+ model_name : The model type to be used for generating embeddings.
26
+ And the default value is: obj:`openai/clip-vit-base-patch32`.
27
+
28
+ Raises:
29
+ RuntimeError: If an unsupported model type is specified.
30
+ """
31
+
32
+ def __init__(
33
+ self, model_name: str = "openai/clip-vit-base-patch32"
34
+ ) -> None:
35
+ r"""Initializes the: obj: `VisionLanguageEmbedding` class with a
36
+ specified model and return the dimension of embeddings.
37
+
38
+ Args:
39
+ model_name (str, optional): The version name of the model to use.
40
+ (default: :obj:`openai/clip-vit-base-patch32`)
41
+ """
42
+ from transformers import AutoModel, AutoProcessor
43
+
44
+ try:
45
+ self.model = AutoModel.from_pretrained(model_name)
46
+ self.processor = AutoProcessor.from_pretrained(model_name)
47
+ except Exception as e:
48
+ raise RuntimeError(f"Failed to load model '{model_name}': {e}")
49
+
50
+ self.valid_processor_kwargs = []
51
+ self.valid_model_kwargs = []
52
+
53
+ try:
54
+ self.valid_processor_kwargs = (
55
+ self.processor.image_processor._valid_processor_keys
56
+ )
57
+ self.valid_model_kwargs = [
58
+ "pixel_values",
59
+ "return_dict",
60
+ "interpolate_pos_encoding",
61
+ ]
62
+ except Exception:
63
+ print("Warning: not typically processor and model structure")
64
+ pass
65
+ self.dim: Optional[int] = None
66
+
67
+ def embed_list(
68
+ self, objs: List[Union[Image.Image, str]], **kwargs: Any
69
+ ) -> List[List[float]]:
70
+ """Generates embeddings for the given images or texts.
71
+
72
+ Args:
73
+ objs (List[Image.Image|str]): The list of images or texts for
74
+ which to generate the embeddings.
75
+ image_processor_kwargs: Extra kwargs passed to the image processor.
76
+ tokenizer_kwargs: Extra kwargs passed to the text tokenizer
77
+ (processor).
78
+ model_kwargs: Extra kwargs passed to the main model.
79
+
80
+ Returns:
81
+ List[List[float]]: A list that represents the generated embedding
82
+ as a list of floating-point numbers.
83
+
84
+ Raises:
85
+ ValueError: If the input type is not `Image.Image` or `str`.
86
+ """
87
+ if not objs:
88
+ raise ValueError("Input objs list is empty.")
89
+
90
+ image_processor_kwargs: Optional[dict] = kwargs.get(
91
+ 'image_processor_kwargs', {}
92
+ )
93
+ tokenizer_kwargs: Optional[dict] = kwargs.get('tokenizer_kwargs', {})
94
+ model_kwargs: Optional[dict] = kwargs.get('model_kwargs', {})
95
+
96
+ result_list = []
97
+ for obj in objs:
98
+ if isinstance(obj, Image.Image):
99
+ image_input = self.processor(
100
+ images=obj,
101
+ return_tensors="pt",
102
+ padding=True,
103
+ **image_processor_kwargs,
104
+ )
105
+ image_feature = (
106
+ self.model.get_image_features(
107
+ **image_input, **model_kwargs
108
+ )
109
+ .squeeze(dim=0)
110
+ .tolist()
111
+ )
112
+ result_list.append(image_feature)
113
+ elif isinstance(obj, str):
114
+ text_input = self.processor(
115
+ text=obj,
116
+ return_tensors="pt",
117
+ padding=True,
118
+ **tokenizer_kwargs,
119
+ )
120
+ text_feature = (
121
+ self.model.get_text_features(**text_input, **model_kwargs)
122
+ .squeeze(dim=0)
123
+ .tolist()
124
+ )
125
+ result_list.append(text_feature)
126
+ else:
127
+ raise ValueError("Input type is not image nor text.")
128
+
129
+ self.dim = len(result_list[0])
130
+
131
+ if any(len(result) != self.dim for result in result_list):
132
+ raise ValueError("Dimensionality is not consistent.")
133
+
134
+ return result_list
135
+
136
+ def get_output_dim(self) -> int:
137
+ r"""Returns the output dimension of the embeddings.
138
+
139
+ Returns:
140
+ int: The dimensionality of the embedding for the current model.
141
+ """
142
+ if self.dim is None:
143
+ text = 'dimension'
144
+ inputs = self.processor(text=[text], return_tensors="pt")
145
+ self.dim = self.model.get_text_features(**inputs).shape[1]
146
+ return self.dim