camel-ai 0.1.5.3__tar.gz → 0.1.5.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (158) hide show
  1. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/PKG-INFO +16 -8
  2. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/README.md +6 -2
  3. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/__init__.py +1 -1
  4. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/knowledge_graph_agent.py +4 -1
  5. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/configs/__init__.py +6 -0
  6. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/configs/litellm_config.py +8 -18
  7. camel_ai-0.1.5.5/camel/configs/ollama_config.py +85 -0
  8. camel_ai-0.1.5.5/camel/configs/zhipuai_config.py +78 -0
  9. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/embeddings/base.py +10 -9
  10. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/embeddings/openai_embedding.py +27 -14
  11. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/embeddings/sentence_transformers_embeddings.py +28 -14
  12. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/search_functions.py +5 -14
  13. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/slack_functions.py +5 -7
  14. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/twitter_function.py +3 -8
  15. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/weather_functions.py +3 -8
  16. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/interpreters/__init__.py +2 -0
  17. camel_ai-0.1.5.5/camel/interpreters/docker_interpreter.py +235 -0
  18. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/loaders/__init__.py +2 -0
  19. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/loaders/base_io.py +5 -9
  20. camel_ai-0.1.5.5/camel/loaders/jina_url_reader.py +99 -0
  21. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/loaders/unstructured_io.py +4 -6
  22. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/anthropic_model.py +6 -4
  23. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/litellm_model.py +49 -21
  24. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/model_factory.py +1 -0
  25. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/nemotron_model.py +14 -6
  26. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/ollama_model.py +11 -17
  27. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/openai_audio_models.py +10 -2
  28. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/openai_model.py +4 -3
  29. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/zhipuai_model.py +12 -6
  30. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/retrievers/auto_retriever.py +2 -2
  31. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/retrievers/bm25_retriever.py +3 -8
  32. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/retrievers/cohere_rerank_retriever.py +3 -5
  33. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/__init__.py +2 -0
  34. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/graph_storages/graph_element.py +9 -1
  35. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/graph_storages/neo4j_graph.py +3 -7
  36. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/key_value_storages/__init__.py +2 -0
  37. camel_ai-0.1.5.5/camel/storages/key_value_storages/redis.py +169 -0
  38. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/vectordb_storages/milvus.py +3 -7
  39. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/vectordb_storages/qdrant.py +3 -7
  40. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/toolkits/__init__.py +2 -0
  41. camel_ai-0.1.5.5/camel/toolkits/code_execution.py +69 -0
  42. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/toolkits/github_toolkit.py +5 -9
  43. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/types/enums.py +49 -20
  44. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/utils/__init__.py +2 -2
  45. camel_ai-0.1.5.5/camel/utils/async_func.py +42 -0
  46. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/utils/commons.py +31 -49
  47. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/utils/token_counting.py +40 -1
  48. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/pyproject.toml +23 -9
  49. camel_ai-0.1.5.3/camel/bots/__init__.py +0 -20
  50. camel_ai-0.1.5.3/camel/bots/discord_bot.py +0 -103
  51. camel_ai-0.1.5.3/camel/bots/telegram_bot.py +0 -84
  52. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/__init__.py +0 -0
  53. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/base.py +0 -0
  54. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/chat_agent.py +0 -0
  55. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/critic_agent.py +0 -0
  56. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/deductive_reasoner_agent.py +0 -0
  57. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/embodied_agent.py +0 -0
  58. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/role_assignment_agent.py +0 -0
  59. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/search_agent.py +0 -0
  60. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/task_agent.py +0 -0
  61. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/tool_agents/__init__.py +0 -0
  62. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/tool_agents/base.py +0 -0
  63. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
  64. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/configs/anthropic_config.py +0 -0
  65. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/configs/base_config.py +0 -0
  66. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/configs/openai_config.py +0 -0
  67. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/embeddings/__init__.py +0 -0
  68. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/embeddings/vlm_embedding.py +0 -0
  69. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/__init__.py +0 -0
  70. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/google_maps_function.py +0 -0
  71. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/math_functions.py +0 -0
  72. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_function.py +0 -0
  73. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/biztoc/__init__.py +0 -0
  74. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/biztoc/ai-plugin.json +0 -0
  75. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/biztoc/openapi.yaml +0 -0
  76. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/coursera/__init__.py +0 -0
  77. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/coursera/openapi.yaml +0 -0
  78. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/create_qr_code/__init__.py +0 -0
  79. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/create_qr_code/openapi.yaml +0 -0
  80. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/klarna/__init__.py +0 -0
  81. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/klarna/openapi.yaml +0 -0
  82. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/nasa_apod/__init__.py +0 -0
  83. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/nasa_apod/openapi.yaml +0 -0
  84. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/outschool/__init__.py +0 -0
  85. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/outschool/ai-plugin.json +0 -0
  86. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/outschool/openapi.yaml +0 -0
  87. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/outschool/paths/__init__.py +0 -0
  88. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/outschool/paths/get_classes.py +0 -0
  89. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/outschool/paths/search_teachers.py +0 -0
  90. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/security_config.py +0 -0
  91. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/speak/__init__.py +0 -0
  92. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/speak/openapi.yaml +0 -0
  93. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/web_scraper/__init__.py +0 -0
  94. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/web_scraper/ai-plugin.json +0 -0
  95. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/web_scraper/openapi.yaml +0 -0
  96. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/web_scraper/paths/__init__.py +0 -0
  97. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/open_api_specs/web_scraper/paths/scraper.py +0 -0
  98. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/openai_function.py +0 -0
  99. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/functions/retrieval_functions.py +0 -0
  100. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/generators.py +0 -0
  101. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/human.py +0 -0
  102. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/interpreters/base.py +0 -0
  103. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/interpreters/internal_python_interpreter.py +0 -0
  104. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/interpreters/interpreter_error.py +0 -0
  105. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/interpreters/subprocess_interpreter.py +0 -0
  106. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/__init__.py +0 -0
  107. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/agent_memories.py +0 -0
  108. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/base.py +0 -0
  109. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/blocks/__init__.py +0 -0
  110. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/blocks/chat_history_block.py +0 -0
  111. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/blocks/vectordb_block.py +0 -0
  112. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/context_creators/__init__.py +0 -0
  113. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/context_creators/score_based.py +0 -0
  114. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/memories/records.py +0 -0
  115. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/messages/__init__.py +0 -0
  116. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/messages/base.py +0 -0
  117. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/messages/func_message.py +0 -0
  118. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/__init__.py +0 -0
  119. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/base_model.py +0 -0
  120. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/open_source_model.py +0 -0
  121. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/models/stub_model.py +0 -0
  122. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/__init__.py +0 -0
  123. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/ai_society.py +0 -0
  124. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/base.py +0 -0
  125. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/code.py +0 -0
  126. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/descripte_video_prompt.py +0 -0
  127. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/evaluation.py +0 -0
  128. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/generate_text_embedding_data.py +0 -0
  129. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/misalignment.py +0 -0
  130. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/object_recognition.py +0 -0
  131. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/prompt_templates.py +0 -0
  132. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/role_description_prompt_template.py +0 -0
  133. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/solution_extraction.py +0 -0
  134. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/task_prompt_template.py +0 -0
  135. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/prompts/translation.py +0 -0
  136. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/responses/__init__.py +0 -0
  137. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/responses/agent_responses.py +0 -0
  138. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/retrievers/__init__.py +0 -0
  139. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/retrievers/base.py +0 -0
  140. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/retrievers/vector_retriever.py +0 -0
  141. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/societies/__init__.py +0 -0
  142. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/societies/babyagi_playing.py +0 -0
  143. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/societies/role_playing.py +0 -0
  144. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/graph_storages/__init__.py +0 -0
  145. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/graph_storages/base.py +0 -0
  146. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/key_value_storages/base.py +0 -0
  147. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/key_value_storages/in_memory.py +0 -0
  148. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/key_value_storages/json.py +0 -0
  149. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/vectordb_storages/__init__.py +0 -0
  150. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/storages/vectordb_storages/base.py +0 -0
  151. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/terminators/__init__.py +0 -0
  152. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/terminators/base.py +0 -0
  153. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/terminators/response_terminator.py +0 -0
  154. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/terminators/token_limit_terminator.py +0 -0
  155. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/toolkits/base.py +0 -0
  156. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/types/__init__.py +0 -0
  157. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/types/openai_types.py +0 -0
  158. {camel_ai-0.1.5.3 → camel_ai-0.1.5.5}/camel/utils/constants.py +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.5.3
3
+ Version: 0.1.5.5
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
7
7
  Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
8
8
  Author: CAMEL-AI.org
9
- Requires-Python: >=3.8.1,<3.12
9
+ Requires-Python: >=3.9.0,<3.12
10
10
  Classifier: License :: OSI Approved :: Apache Software License
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.9
@@ -16,6 +16,7 @@ Provides-Extra: all
16
16
  Provides-Extra: encoders
17
17
  Provides-Extra: graph-storages
18
18
  Provides-Extra: huggingface-agent
19
+ Provides-Extra: kv-stroages
19
20
  Provides-Extra: model-platforms
20
21
  Provides-Extra: retrievers
21
22
  Provides-Extra: test
@@ -23,7 +24,7 @@ Provides-Extra: tools
23
24
  Provides-Extra: vector-databases
24
25
  Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
25
26
  Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
26
- Requires-Dist: anthropic (>=0.28.0,<0.29.0)
27
+ Requires-Dist: anthropic (>=0.29.0,<0.30.0)
27
28
  Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
28
29
  Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
29
30
  Requires-Dist: colorama (>=0,<1)
@@ -31,13 +32,15 @@ Requires-Dist: curl_cffi (==0.6.2)
31
32
  Requires-Dist: datasets (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
32
33
  Requires-Dist: diffusers (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
33
34
  Requires-Dist: discord.py (>=2.3.2,<3.0.0) ; extra == "tools" or extra == "all"
35
+ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
34
36
  Requires-Dist: docstring-parser (>=0.15,<0.16)
35
37
  Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
36
38
  Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
37
39
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
38
- Requires-Dist: imageio (>=2.34.1,<3.0.0) ; extra == "tools" or extra == "all"
40
+ Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
39
41
  Requires-Dist: jsonschema (>=4,<5)
40
42
  Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
43
+ Requires-Dist: milvus-lite (>=2.4.0,<=2.4.7)
41
44
  Requires-Dist: mock (>=5,<6) ; extra == "test"
42
45
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
43
46
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
@@ -59,13 +62,14 @@ Requires-Dist: pytest (>=7,<8) ; extra == "test"
59
62
  Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
60
63
  Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
61
64
  Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
65
+ Requires-Dist: redis (>=5.0.6,<6.0.0) ; extra == "kv-stroages" or extra == "all"
62
66
  Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
63
- Requires-Dist: sentence-transformers (>=2.2.2,<3.0.0) ; extra == "encoders" or extra == "all"
67
+ Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "encoders" or extra == "all"
64
68
  Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
65
69
  Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
66
70
  Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
67
71
  Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
68
- Requires-Dist: torch (>=1,<2) ; extra == "huggingface-agent" or extra == "all"
72
+ Requires-Dist: torch (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
69
73
  Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
70
74
  Requires-Dist: unstructured[all-docs] (>=0.10.30,<0.11.0) ; extra == "tools" or extra == "all"
71
75
  Requires-Dist: wikipedia (>=1,<2) ; extra == "tools" or extra == "all"
@@ -180,13 +184,13 @@ exit
180
184
  Install `CAMEL` from source with conda and pip:
181
185
  ```sh
182
186
  # Create a conda virtual environment
183
- conda create --name camel python=3.10
187
+ conda create --name camel python=3.9
184
188
 
185
189
  # Activate CAMEL conda environment
186
190
  conda activate camel
187
191
 
188
192
  # Clone github repo
189
- git clone -b v0.1.5.3 https://github.com/camel-ai/camel.git
193
+ git clone -b v0.1.5.5 https://github.com/camel-ai/camel.git
190
194
 
191
195
  # Change directory into project directory
192
196
  cd camel
@@ -198,6 +202,10 @@ pip install -e .
198
202
  pip install -e .[all] # (Optional)
199
203
  ```
200
204
 
205
+ ### From Docker
206
+
207
+ Detailed guidance can be find [here](https://github.com/camel-ai/camel/blob/master/.container/README.md)
208
+
201
209
  ## Documentation
202
210
 
203
211
  [CAMEL package documentation pages](https://camel-ai.github.io/camel/).
@@ -104,13 +104,13 @@ exit
104
104
  Install `CAMEL` from source with conda and pip:
105
105
  ```sh
106
106
  # Create a conda virtual environment
107
- conda create --name camel python=3.10
107
+ conda create --name camel python=3.9
108
108
 
109
109
  # Activate CAMEL conda environment
110
110
  conda activate camel
111
111
 
112
112
  # Clone github repo
113
- git clone -b v0.1.5.3 https://github.com/camel-ai/camel.git
113
+ git clone -b v0.1.5.5 https://github.com/camel-ai/camel.git
114
114
 
115
115
  # Change directory into project directory
116
116
  cd camel
@@ -122,6 +122,10 @@ pip install -e .
122
122
  pip install -e .[all] # (Optional)
123
123
  ```
124
124
 
125
+ ### From Docker
126
+
127
+ Detailed guidance can be find [here](https://github.com/camel-ai/camel/blob/master/.container/README.md)
128
+
125
129
  ## Documentation
126
130
 
127
131
  [CAMEL package documentation pages](https://camel-ai.github.io/camel/).
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.5'
15
+ __version__ = '0.1.5.5'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -13,7 +13,10 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from typing import Optional, Union
15
15
 
16
- from unstructured.documents.elements import Element
16
+ try:
17
+ from unstructured.documents.elements import Element
18
+ except ImportError:
19
+ Element = None
17
20
 
18
21
  from camel.agents import ChatAgent
19
22
  from camel.messages import BaseMessage
@@ -14,11 +14,13 @@
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
16
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
17
+ from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
17
18
  from .openai_config import (
18
19
  OPENAI_API_PARAMS,
19
20
  ChatGPTConfig,
20
21
  OpenSourceConfig,
21
22
  )
23
+ from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
22
24
 
23
25
  __all__ = [
24
26
  'BaseConfig',
@@ -29,4 +31,8 @@ __all__ = [
29
31
  'OpenSourceConfig',
30
32
  'LiteLLMConfig',
31
33
  'LITELLM_API_PARAMS',
34
+ 'OllamaConfig',
35
+ 'OLLAMA_API_PARAMS',
36
+ 'ZhipuAIConfig',
37
+ 'ZHIPUAI_API_PARAMS',
32
38
  ]
@@ -13,11 +13,14 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass, field
17
- from typing import List, Optional, Union
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, List, Optional, Union
18
18
 
19
19
  from camel.configs.base_config import BaseConfig
20
20
 
21
+ if TYPE_CHECKING:
22
+ from camel.functions import OpenAIFunction
23
+
21
24
 
22
25
  @dataclass(frozen=True)
23
26
  class LiteLLMConfig(BaseConfig):
@@ -25,9 +28,6 @@ class LiteLLMConfig(BaseConfig):
25
28
  LiteLLM API.
26
29
 
27
30
  Args:
28
- model (str): The name of the language model to use for text completion.
29
- messages (List): A list of message objects representing the
30
- conversation context. (default: [])
31
31
  timeout (Optional[Union[float, str]], optional): Request timeout.
32
32
  (default: None)
33
33
  temperature (Optional[float], optional): Temperature parameter for
@@ -65,12 +65,7 @@ class LiteLLMConfig(BaseConfig):
65
65
  deployment_id (Optional[str], optional): Deployment ID. (default: None)
66
66
  extra_headers (Optional[dict], optional): Additional headers for the
67
67
  request. (default: None)
68
- base_url (Optional[str], optional): Base URL for the API. (default:
69
- None)
70
68
  api_version (Optional[str], optional): API version. (default: None)
71
- api_key (Optional[str], optional): API key. (default: None)
72
- model_list (Optional[list], optional): List of API base, version,
73
- keys. (default: None)
74
69
  mock_response (Optional[str], optional): Mock completion response for
75
70
  testing or debugging. (default: None)
76
71
  custom_llm_provider (Optional[str], optional): Non-OpenAI LLM
@@ -79,8 +74,6 @@ class LiteLLMConfig(BaseConfig):
79
74
  (default: None)
80
75
  """
81
76
 
82
- model: str = "gpt-3.5-turbo"
83
- messages: List = field(default_factory=list)
84
77
  timeout: Optional[Union[float, str]] = None
85
78
  temperature: Optional[float] = None
86
79
  top_p: Optional[float] = None
@@ -91,20 +84,17 @@ class LiteLLMConfig(BaseConfig):
91
84
  max_tokens: Optional[int] = None
92
85
  presence_penalty: Optional[float] = None
93
86
  frequency_penalty: Optional[float] = None
94
- logit_bias: Optional[dict] = field(default_factory=dict)
87
+ logit_bias: Optional[dict] = None
95
88
  user: Optional[str] = None
96
89
  response_format: Optional[dict] = None
97
90
  seed: Optional[int] = None
98
- tools: Optional[List] = field(default_factory=list)
91
+ tools: Optional[list[OpenAIFunction]] = None
99
92
  tool_choice: Optional[Union[str, dict]] = None
100
93
  logprobs: Optional[bool] = None
101
94
  top_logprobs: Optional[int] = None
102
95
  deployment_id: Optional[str] = None
103
- extra_headers: Optional[dict] = field(default_factory=dict)
104
- base_url: Optional[str] = None
96
+ extra_headers: Optional[dict] = None
105
97
  api_version: Optional[str] = None
106
- api_key: Optional[str] = None
107
- model_list: Optional[list] = field(default_factory=list)
108
98
  mock_response: Optional[str] = None
109
99
  custom_llm_provider: Optional[str] = None
110
100
  max_retries: Optional[int] = None
@@ -0,0 +1,85 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+
24
+ @dataclass(frozen=True)
25
+ class OllamaConfig(BaseConfig):
26
+ r"""Defines the parameters for generating chat completions using OpenAI
27
+ compatibility
28
+
29
+ Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
30
+
31
+ Args:
32
+ temperature (float, optional): Sampling temperature to use, between
33
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
34
+ while lower values make it more focused and deterministic.
35
+ (default: :obj:`0.2`)
36
+ top_p (float, optional): An alternative to sampling with temperature,
37
+ called nucleus sampling, where the model considers the results of
38
+ the tokens with top_p probability mass. So :obj:`0.1` means only
39
+ the tokens comprising the top 10% probability mass are considered.
40
+ (default: :obj:`1.0`)
41
+ response_format (object, optional): An object specifying the format
42
+ that the model must output. Compatible with GPT-4 Turbo and all
43
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
44
+ {"type": "json_object"} enables JSON mode, which guarantees the
45
+ message the model generates is valid JSON. Important: when using
46
+ JSON mode, you must also instruct the model to produce JSON
47
+ yourself via a system or user message. Without this, the model
48
+ may generate an unending stream of whitespace until the generation
49
+ reaches the token limit, resulting in a long-running and seemingly
50
+ "stuck" request. Also note that the message content may be
51
+ partially cut off if finish_reason="length", which indicates the
52
+ generation exceeded max_tokens or the conversation exceeded the
53
+ max context length.
54
+ stream (bool, optional): If True, partial message deltas will be sent
55
+ as data-only server-sent events as they become available.
56
+ (default: :obj:`False`)
57
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
58
+ will stop generating further tokens. (default: :obj:`None`)
59
+ max_tokens (int, optional): The maximum number of tokens to generate
60
+ in the chat completion. The total length of input tokens and
61
+ generated tokens is limited by the model's context length.
62
+ (default: :obj:`None`)
63
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
64
+ :obj:`2.0`. Positive values penalize new tokens based on whether
65
+ they appear in the text so far, increasing the model's likelihood
66
+ to talk about new topics. See more information about frequency and
67
+ presence penalties. (default: :obj:`0.0`)
68
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
69
+ :obj:`2.0`. Positive values penalize new tokens based on their
70
+ existing frequency in the text so far, decreasing the model's
71
+ likelihood to repeat the same line verbatim. See more information
72
+ about frequency and presence penalties. (default: :obj:`0.0`)
73
+ """
74
+
75
+ temperature: float = 0.2
76
+ top_p: float = 1.0
77
+ stream: bool = False
78
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
79
+ max_tokens: int | NotGiven = NOT_GIVEN
80
+ presence_penalty: float = 0.0
81
+ response_format: dict | NotGiven = NOT_GIVEN
82
+ frequency_penalty: float = 0.0
83
+
84
+
85
+ OLLAMA_API_PARAMS = {param for param in asdict(OllamaConfig()).keys()}
@@ -0,0 +1,78 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, Optional, Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+ if TYPE_CHECKING:
24
+ from camel.functions import OpenAIFunction
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class ZhipuAIConfig(BaseConfig):
29
+ r"""Defines the parameters for generating chat completions using OpenAI
30
+ compatibility
31
+
32
+ Reference: https://open.bigmodel.cn/dev/api#glm-4v
33
+
34
+ Args:
35
+ temperature (float, optional): Sampling temperature to use, between
36
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
37
+ while lower values make it more focused and deterministic.
38
+ (default: :obj:`0.2`)
39
+ top_p (float, optional): An alternative to sampling with temperature,
40
+ called nucleus sampling, where the model considers the results of
41
+ the tokens with top_p probability mass. So :obj:`0.1` means only
42
+ the tokens comprising the top 10% probability mass are considered.
43
+ (default: :obj:`0.6`)
44
+ stream (bool, optional): If True, partial message deltas will be sent
45
+ as data-only server-sent events as they become available.
46
+ (default: :obj:`False`)
47
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
48
+ will stop generating further tokens. (default: :obj:`None`)
49
+ max_tokens (int, optional): The maximum number of tokens to generate
50
+ in the chat completion. The total length of input tokens and
51
+ generated tokens is limited by the model's context length.
52
+ (default: :obj:`None`)
53
+ tools (list[OpenAIFunction], optional): A list of tools the model may
54
+ call. Currently, only functions are supported as a tool. Use this
55
+ to provide a list of functions the model may generate JSON inputs
56
+ for. A max of 128 functions are supported.
57
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
58
+ any) tool is called by the model. :obj:`"none"` means the model
59
+ will not call any tool and instead generates a message.
60
+ :obj:`"auto"` means the model can pick between generating a
61
+ message or calling one or more tools. :obj:`"required"` means the
62
+ model must call one or more tools. Specifying a particular tool
63
+ via {"type": "function", "function": {"name": "my_function"}}
64
+ forces the model to call that tool. :obj:`"none"` is the default
65
+ when no tools are present. :obj:`"auto"` is the default if tools
66
+ are present.
67
+ """
68
+
69
+ temperature: float = 0.2
70
+ top_p: float = 0.6
71
+ stream: bool = False
72
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
73
+ max_tokens: int | NotGiven = NOT_GIVEN
74
+ tools: Optional[list[OpenAIFunction]] = None
75
+ tool_choice: Optional[dict[str, str] | str] = None
76
+
77
+
78
+ ZHIPUAI_API_PARAMS = {param for param in asdict(ZhipuAIConfig()).keys()}
@@ -11,8 +11,10 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
14
16
  from abc import ABC, abstractmethod
15
- from typing import Any, Generic, List, TypeVar
17
+ from typing import Any, Generic, TypeVar
16
18
 
17
19
  T = TypeVar('T')
18
20
 
@@ -23,19 +25,18 @@ class BaseEmbedding(ABC, Generic[T]):
23
25
  @abstractmethod
24
26
  def embed_list(
25
27
  self,
26
- objs: List[T],
28
+ objs: list[T],
27
29
  **kwargs: Any,
28
- ) -> List[List[float]]:
30
+ ) -> list[list[float]]:
29
31
  r"""Generates embeddings for the given texts.
30
32
 
31
33
  Args:
32
- objs (List[T]): The objects for which to generate the embeddings.
34
+ objs (list[T]): The objects for which to generate the embeddings.
33
35
  **kwargs (Any): Extra kwargs passed to the embedding API.
34
36
 
35
37
  Returns:
36
- List[List[float]]: A list that represents the
37
- generated embedding as a list of floating-point numbers or a
38
- numpy matrix with embeddings.
38
+ list[list[float]]: A list that represents the
39
+ generated embedding as a list of floating-point numbers.
39
40
  """
40
41
  pass
41
42
 
@@ -43,7 +44,7 @@ class BaseEmbedding(ABC, Generic[T]):
43
44
  self,
44
45
  obj: T,
45
46
  **kwargs: Any,
46
- ) -> List[float]:
47
+ ) -> list[float]:
47
48
  r"""Generates an embedding for the given text.
48
49
 
49
50
  Args:
@@ -51,7 +52,7 @@ class BaseEmbedding(ABC, Generic[T]):
51
52
  **kwargs (Any): Extra kwargs passed to the embedding API.
52
53
 
53
54
  Returns:
54
- List[float]: A list of floating-point numbers representing the
55
+ list[float]: A list of floating-point numbers representing the
55
56
  generated embedding.
56
57
  """
57
58
  return self.embed_list([obj], **kwargs)[0]
@@ -11,24 +11,29 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
14
16
  import os
15
- from typing import Any, List, Optional
17
+ from typing import Any
16
18
 
17
- from openai import OpenAI
19
+ from openai import NOT_GIVEN, NotGiven, OpenAI
18
20
 
19
21
  from camel.embeddings.base import BaseEmbedding
20
22
  from camel.types import EmbeddingModelType
21
- from camel.utils import model_api_key_required
23
+ from camel.utils import api_keys_required
22
24
 
23
25
 
24
26
  class OpenAIEmbedding(BaseEmbedding[str]):
25
27
  r"""Provides text embedding functionalities using OpenAI's models.
26
28
 
27
29
  Args:
28
- model (OpenAiEmbeddingModel, optional): The model type to be used for
29
- generating embeddings. (default: :obj:`ModelType.ADA_2`)
30
- api_key (Optional[str]): The API key for authenticating with the
30
+ model_type (EmbeddingModelType, optional): The model type to be
31
+ used for text embeddings.
32
+ (default: :obj:`TEXT_EMBEDDING_3_SMALL`)
33
+ api_key (str, optional): The API key for authenticating with the
31
34
  OpenAI service. (default: :obj:`None`)
35
+ dimensions (int, optional): The text embedding output dimensions.
36
+ (default: :obj:`NOT_GIVEN`)
32
37
 
33
38
  Raises:
34
39
  RuntimeError: If an unsupported model type is specified.
@@ -36,36 +41,44 @@ class OpenAIEmbedding(BaseEmbedding[str]):
36
41
 
37
42
  def __init__(
38
43
  self,
39
- model_type: EmbeddingModelType = EmbeddingModelType.ADA_2,
40
- api_key: Optional[str] = None,
44
+ model_type: EmbeddingModelType = (
45
+ EmbeddingModelType.TEXT_EMBEDDING_3_SMALL
46
+ ),
47
+ api_key: str | None = None,
48
+ dimensions: int | NotGiven = NOT_GIVEN,
41
49
  ) -> None:
42
50
  if not model_type.is_openai:
43
51
  raise ValueError("Invalid OpenAI embedding model type.")
44
52
  self.model_type = model_type
45
- self.output_dim = model_type.output_dim
53
+ if dimensions == NOT_GIVEN:
54
+ self.output_dim = model_type.output_dim
55
+ else:
56
+ assert isinstance(dimensions, int)
57
+ self.output_dim = dimensions
46
58
  self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
47
59
  self.client = OpenAI(timeout=60, max_retries=3, api_key=self._api_key)
48
60
 
49
- @model_api_key_required
61
+ @api_keys_required("OPENAI_API_KEY")
50
62
  def embed_list(
51
63
  self,
52
- objs: List[str],
64
+ objs: list[str],
53
65
  **kwargs: Any,
54
- ) -> List[List[float]]:
66
+ ) -> list[list[float]]:
55
67
  r"""Generates embeddings for the given texts.
56
68
 
57
69
  Args:
58
- objs (List[str]): The texts for which to generate the embeddings.
70
+ objs (list[str]): The texts for which to generate the embeddings.
59
71
  **kwargs (Any): Extra kwargs passed to the embedding API.
60
72
 
61
73
  Returns:
62
- List[List[float]]: A list that represents the generated embedding
74
+ list[list[float]]: A list that represents the generated embedding
63
75
  as a list of floating-point numbers.
64
76
  """
65
77
  # TODO: count tokens
66
78
  response = self.client.embeddings.create(
67
79
  input=objs,
68
80
  model=self.model_type.value,
81
+ dimensions=self.output_dim,
69
82
  **kwargs,
70
83
  )
71
84
  return [data.embedding for data in response.data]
@@ -11,51 +11,63 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, List
14
+ from __future__ import annotations
15
+
16
+ from typing import Any
17
+
18
+ from numpy import ndarray
15
19
 
16
20
  from camel.embeddings.base import BaseEmbedding
17
21
 
18
22
 
19
23
  class SentenceTransformerEncoder(BaseEmbedding[str]):
20
- r"""This class provides functionalities to generate embeddings
21
- using a specified model from `Sentence Transformers`.
24
+ r"""This class provides functionalities to generate text
25
+ embeddings using `Sentence Transformers`.
22
26
 
23
27
  References:
24
28
  https://www.sbert.net/
25
29
  """
26
30
 
27
- def __init__(self, model_name: str = 'intfloat/e5-large-v2'):
31
+ def __init__(
32
+ self,
33
+ model_name: str = "intfloat/e5-large-v2",
34
+ **kwargs,
35
+ ):
28
36
  r"""Initializes the: obj: `SentenceTransformerEmbedding` class
29
37
  with the specified transformer model.
30
38
 
31
39
  Args:
32
40
  model_name (str, optional): The name of the model to use.
33
- Defaults to `intfloat/e5-large-v2`.
41
+ (default: :obj:`intfloat/e5-large-v2`)
42
+ **kwargs (optional): Additional arguments of
43
+ :class:`SentenceTransformer`, such as :obj:`prompts` etc.
34
44
  """
35
45
  from sentence_transformers import SentenceTransformer
36
46
 
37
- self.model = SentenceTransformer(model_name)
47
+ self.model = SentenceTransformer(model_name, **kwargs)
38
48
 
39
49
  def embed_list(
40
50
  self,
41
- objs: List[str],
51
+ objs: list[str],
42
52
  **kwargs: Any,
43
- ) -> List[List[float]]:
53
+ ) -> list[list[float]]:
44
54
  r"""Generates embeddings for the given texts using the model.
45
55
 
46
56
  Args:
47
- objs (List[str]): The texts for which to generate the
48
- embeddings.
57
+ objs (list[str]): The texts for which to generate the
58
+ embeddings.
49
59
 
50
60
  Returns:
51
- List[List[float]]: A list that represents the generated embedding
61
+ list[list[float]]: A list that represents the generated embedding
52
62
  as a list of floating-point numbers.
53
63
  """
54
64
  if not objs:
55
65
  raise ValueError("Input text list is empty")
56
- return self.model.encode(
66
+ embeddings = self.model.encode(
57
67
  objs, normalize_embeddings=True, **kwargs
58
- ).tolist()
68
+ )
69
+ assert isinstance(embeddings, ndarray)
70
+ return embeddings.tolist()
59
71
 
60
72
  def get_output_dim(self) -> int:
61
73
  r"""Returns the output dimension of the embeddings.
@@ -63,4 +75,6 @@ class SentenceTransformerEncoder(BaseEmbedding[str]):
63
75
  Returns:
64
76
  int: The dimensionality of the embeddings.
65
77
  """
66
- return self.model.get_sentence_embedding_dimension()
78
+ output_dim = self.model.get_sentence_embedding_dimension()
79
+ assert isinstance(output_dim, int)
80
+ return output_dim