camel-ai 0.1.5.5__tar.gz → 0.1.5.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (174) hide show
  1. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/PKG-INFO +43 -3
  2. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/README.md +36 -1
  3. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/__init__.py +1 -1
  4. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/chat_agent.py +3 -3
  5. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/critic_agent.py +1 -1
  6. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/deductive_reasoner_agent.py +4 -4
  7. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/embodied_agent.py +1 -1
  8. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/knowledge_graph_agent.py +13 -17
  9. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/role_assignment_agent.py +1 -1
  10. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/search_agent.py +4 -5
  11. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/task_agent.py +5 -6
  12. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/configs/__init__.py +15 -0
  13. camel_ai-0.1.5.9/camel/configs/gemini_config.py +98 -0
  14. camel_ai-0.1.5.9/camel/configs/groq_config.py +119 -0
  15. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/configs/litellm_config.py +1 -1
  16. camel_ai-0.1.5.9/camel/configs/mistral_config.py +81 -0
  17. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/configs/ollama_config.py +1 -1
  18. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/configs/openai_config.py +1 -1
  19. camel_ai-0.1.5.9/camel/configs/vllm_config.py +103 -0
  20. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/configs/zhipuai_config.py +1 -1
  21. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/embeddings/__init__.py +2 -0
  22. camel_ai-0.1.5.9/camel/embeddings/mistral_embedding.py +89 -0
  23. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/interpreters/__init__.py +2 -0
  24. camel_ai-0.1.5.9/camel/interpreters/ipython_interpreter.py +167 -0
  25. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/__init__.py +10 -0
  26. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/anthropic_model.py +7 -2
  27. camel_ai-0.1.5.9/camel/models/azure_openai_model.py +152 -0
  28. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/base_model.py +9 -2
  29. camel_ai-0.1.5.9/camel/models/gemini_model.py +215 -0
  30. camel_ai-0.1.5.9/camel/models/groq_model.py +131 -0
  31. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/litellm_model.py +26 -4
  32. camel_ai-0.1.5.9/camel/models/mistral_model.py +169 -0
  33. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/model_factory.py +33 -5
  34. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/ollama_model.py +21 -2
  35. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/open_source_model.py +11 -3
  36. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/openai_model.py +7 -2
  37. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/stub_model.py +4 -4
  38. camel_ai-0.1.5.9/camel/models/vllm_model.py +138 -0
  39. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/zhipuai_model.py +7 -4
  40. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/__init__.py +2 -2
  41. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/task_prompt_template.py +4 -4
  42. camel_ai-0.1.5.5/camel/prompts/descripte_video_prompt.py → camel_ai-0.1.5.9/camel/prompts/video_description_prompt.py +1 -1
  43. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/retrievers/auto_retriever.py +2 -0
  44. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/graph_storages/neo4j_graph.py +5 -0
  45. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/__init__.py +23 -15
  46. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/toolkits/base.py +1 -1
  47. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/toolkits/code_execution.py +1 -1
  48. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/toolkits/github_toolkit.py +3 -2
  49. camel_ai-0.1.5.9/camel/toolkits/google_maps_toolkit.py +367 -0
  50. camel_ai-0.1.5.9/camel/toolkits/math_toolkit.py +79 -0
  51. camel_ai-0.1.5.9/camel/toolkits/open_api_toolkit.py +548 -0
  52. camel_ai-0.1.5.9/camel/toolkits/retrieval_toolkit.py +76 -0
  53. camel_ai-0.1.5.9/camel/toolkits/search_toolkit.py +326 -0
  54. camel_ai-0.1.5.9/camel/toolkits/slack_toolkit.py +308 -0
  55. camel_ai-0.1.5.9/camel/toolkits/twitter_toolkit.py +522 -0
  56. camel_ai-0.1.5.9/camel/toolkits/weather_toolkit.py +173 -0
  57. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/types/enums.py +163 -30
  58. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/utils/__init__.py +4 -0
  59. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/utils/async_func.py +1 -1
  60. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/utils/token_counting.py +182 -40
  61. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/pyproject.toml +23 -3
  62. camel_ai-0.1.5.5/camel/functions/google_maps_function.py +0 -335
  63. camel_ai-0.1.5.5/camel/functions/math_functions.py +0 -61
  64. camel_ai-0.1.5.5/camel/functions/open_api_function.py +0 -508
  65. camel_ai-0.1.5.5/camel/functions/retrieval_functions.py +0 -61
  66. camel_ai-0.1.5.5/camel/functions/search_functions.py +0 -298
  67. camel_ai-0.1.5.5/camel/functions/slack_functions.py +0 -286
  68. camel_ai-0.1.5.5/camel/functions/twitter_function.py +0 -479
  69. camel_ai-0.1.5.5/camel/functions/weather_functions.py +0 -144
  70. camel_ai-0.1.5.5/camel/toolkits/__init__.py +0 -23
  71. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/__init__.py +0 -0
  72. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/base.py +0 -0
  73. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/tool_agents/__init__.py +0 -0
  74. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/tool_agents/base.py +0 -0
  75. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
  76. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/configs/anthropic_config.py +0 -0
  77. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/configs/base_config.py +0 -0
  78. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/embeddings/base.py +0 -0
  79. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/embeddings/openai_embedding.py +0 -0
  80. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
  81. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/embeddings/vlm_embedding.py +0 -0
  82. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/generators.py +0 -0
  83. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/human.py +0 -0
  84. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/interpreters/base.py +0 -0
  85. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/interpreters/docker_interpreter.py +0 -0
  86. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/interpreters/internal_python_interpreter.py +0 -0
  87. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/interpreters/interpreter_error.py +0 -0
  88. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/interpreters/subprocess_interpreter.py +0 -0
  89. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/loaders/__init__.py +0 -0
  90. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/loaders/base_io.py +0 -0
  91. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/loaders/jina_url_reader.py +0 -0
  92. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/loaders/unstructured_io.py +0 -0
  93. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/__init__.py +0 -0
  94. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/agent_memories.py +0 -0
  95. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/base.py +0 -0
  96. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/blocks/__init__.py +0 -0
  97. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/blocks/chat_history_block.py +0 -0
  98. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/blocks/vectordb_block.py +0 -0
  99. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/context_creators/__init__.py +0 -0
  100. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/context_creators/score_based.py +0 -0
  101. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/memories/records.py +0 -0
  102. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/messages/__init__.py +0 -0
  103. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/messages/base.py +0 -0
  104. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/messages/func_message.py +0 -0
  105. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/nemotron_model.py +0 -0
  106. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/models/openai_audio_models.py +0 -0
  107. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/ai_society.py +0 -0
  108. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/base.py +0 -0
  109. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/code.py +0 -0
  110. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/evaluation.py +0 -0
  111. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/generate_text_embedding_data.py +0 -0
  112. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/misalignment.py +0 -0
  113. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/object_recognition.py +0 -0
  114. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/prompt_templates.py +0 -0
  115. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/role_description_prompt_template.py +0 -0
  116. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/solution_extraction.py +0 -0
  117. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/prompts/translation.py +0 -0
  118. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/responses/__init__.py +0 -0
  119. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/responses/agent_responses.py +0 -0
  120. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/retrievers/__init__.py +0 -0
  121. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/retrievers/base.py +0 -0
  122. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/retrievers/bm25_retriever.py +0 -0
  123. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/retrievers/cohere_rerank_retriever.py +0 -0
  124. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/retrievers/vector_retriever.py +0 -0
  125. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/societies/__init__.py +0 -0
  126. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/societies/babyagi_playing.py +0 -0
  127. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/societies/role_playing.py +0 -0
  128. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/__init__.py +0 -0
  129. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/graph_storages/__init__.py +0 -0
  130. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/graph_storages/base.py +0 -0
  131. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/graph_storages/graph_element.py +0 -0
  132. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/key_value_storages/__init__.py +0 -0
  133. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/key_value_storages/base.py +0 -0
  134. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/key_value_storages/in_memory.py +0 -0
  135. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/key_value_storages/json.py +0 -0
  136. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/key_value_storages/redis.py +0 -0
  137. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/vectordb_storages/__init__.py +0 -0
  138. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/vectordb_storages/base.py +0 -0
  139. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/vectordb_storages/milvus.py +0 -0
  140. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/storages/vectordb_storages/qdrant.py +0 -0
  141. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/terminators/__init__.py +0 -0
  142. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/terminators/base.py +0 -0
  143. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/terminators/response_terminator.py +0 -0
  144. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/terminators/token_limit_terminator.py +0 -0
  145. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/biztoc/__init__.py +0 -0
  146. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
  147. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
  148. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/coursera/__init__.py +0 -0
  149. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
  150. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
  151. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
  152. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/klarna/__init__.py +0 -0
  153. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
  154. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
  155. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
  156. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/outschool/__init__.py +0 -0
  157. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
  158. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
  159. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
  160. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
  161. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
  162. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/security_config.py +0 -0
  163. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/speak/__init__.py +0 -0
  164. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/speak/openapi.yaml +0 -0
  165. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
  166. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
  167. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
  168. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
  169. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
  170. {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.9/camel/toolkits}/openai_function.py +0 -0
  171. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/types/__init__.py +0 -0
  172. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/types/openai_types.py +0 -0
  173. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/utils/commons.py +0 -0
  174. {camel_ai-0.1.5.5 → camel_ai-0.1.5.9}/camel/utils/constants.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.5.5
3
+ Version: 0.1.5.9
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -36,11 +36,16 @@ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
36
36
  Requires-Dist: docstring-parser (>=0.15,<0.16)
37
37
  Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
38
38
  Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
39
+ Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
39
40
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
41
+ Requires-Dist: groq (>=0.5.0,<0.6.0)
40
42
  Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
43
+ Requires-Dist: ipykernel (>=6.0.0,<7.0.0)
41
44
  Requires-Dist: jsonschema (>=4,<5)
45
+ Requires-Dist: jupyter_client (>=8.6.2,<9.0.0) ; extra == "tools" or extra == "all"
42
46
  Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
43
- Requires-Dist: milvus-lite (>=2.4.0,<=2.4.7)
47
+ Requires-Dist: mistral-common (>=1.3.3,<2.0.0) ; extra == "model-platforms" or extra == "all"
48
+ Requires-Dist: mistralai (>=0.4.2,<0.5.0) ; extra == "model-platforms" or extra == "all"
44
49
  Requires-Dist: mock (>=5,<6) ; extra == "test"
45
50
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
46
51
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
@@ -190,7 +195,7 @@ conda create --name camel python=3.9
190
195
  conda activate camel
191
196
 
192
197
  # Clone github repo
193
- git clone -b v0.1.5.5 https://github.com/camel-ai/camel.git
198
+ git clone -b v0.1.5.9 https://github.com/camel-ai/camel.git
194
199
 
195
200
  # Change directory into project directory
196
201
  cd camel
@@ -316,6 +321,41 @@ Please note that the environment variable is session-specific. If you open a new
316
321
  print(assistant_response.msg.content)
317
322
  ```
318
323
 
324
+ ## Use Open-Source Models as Backends (ex. using vLLM to set Phi-3 locally)
325
+ - [Install vLLM](https://docs.vllm.ai/en/latest/getting_started/installation.html)
326
+ - After setting up vLLM, start an OpenAI compatible server for example by
327
+ ```bash
328
+ python -m vllm.entrypoints.openai.api_server --model microsoft/Phi-3-mini-4k-instruct --api-key vllm --dtype bfloat16
329
+ ```
330
+ - Create and run following script (more details please refer to this [example](https://github.com/camel-ai/camel/blob/master/examples/models/vllm_model_example.py))
331
+ ```python
332
+ from camel.agents import ChatAgent
333
+ from camel.messages import BaseMessage
334
+ from camel.models import ModelFactory
335
+ from camel.types import ModelPlatformType
336
+
337
+ vllm_model = ModelFactory.create(
338
+ model_platform=ModelPlatformType.VLLM,
339
+ model_type="microsoft/Phi-3-mini-4k-instruct",
340
+ url="http://localhost:8000/v1",
341
+ model_config_dict={"temperature": 0.0},
342
+ api_key="vllm",
343
+ )
344
+
345
+ assistant_sys_msg = BaseMessage.make_assistant_message(
346
+ role_name="Assistant",
347
+ content="You are a helpful assistant.",
348
+ )
349
+ agent = ChatAgent(assistant_sys_msg, model=vllm_model, token_limit=4096)
350
+
351
+ user_msg = BaseMessage.make_user_message(
352
+ role_name="User",
353
+ content="Say hi to CAMEL AI",
354
+ )
355
+ assistant_response = agent.step(user_msg)
356
+ print(assistant_response.msg.content)
357
+ ```
358
+
319
359
  ## Data (Hosted on Hugging Face)
320
360
  | Dataset | Chat format | Instruction format | Chat format (translated) |
321
361
  |----------------|-----------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
@@ -110,7 +110,7 @@ conda create --name camel python=3.9
110
110
  conda activate camel
111
111
 
112
112
  # Clone github repo
113
- git clone -b v0.1.5.5 https://github.com/camel-ai/camel.git
113
+ git clone -b v0.1.5.9 https://github.com/camel-ai/camel.git
114
114
 
115
115
  # Change directory into project directory
116
116
  cd camel
@@ -236,6 +236,41 @@ Please note that the environment variable is session-specific. If you open a new
236
236
  print(assistant_response.msg.content)
237
237
  ```
238
238
 
239
+ ## Use Open-Source Models as Backends (ex. using vLLM to set Phi-3 locally)
240
+ - [Install vLLM](https://docs.vllm.ai/en/latest/getting_started/installation.html)
241
+ - After setting up vLLM, start an OpenAI compatible server for example by
242
+ ```bash
243
+ python -m vllm.entrypoints.openai.api_server --model microsoft/Phi-3-mini-4k-instruct --api-key vllm --dtype bfloat16
244
+ ```
245
+ - Create and run following script (more details please refer to this [example](https://github.com/camel-ai/camel/blob/master/examples/models/vllm_model_example.py))
246
+ ```python
247
+ from camel.agents import ChatAgent
248
+ from camel.messages import BaseMessage
249
+ from camel.models import ModelFactory
250
+ from camel.types import ModelPlatformType
251
+
252
+ vllm_model = ModelFactory.create(
253
+ model_platform=ModelPlatformType.VLLM,
254
+ model_type="microsoft/Phi-3-mini-4k-instruct",
255
+ url="http://localhost:8000/v1",
256
+ model_config_dict={"temperature": 0.0},
257
+ api_key="vllm",
258
+ )
259
+
260
+ assistant_sys_msg = BaseMessage.make_assistant_message(
261
+ role_name="Assistant",
262
+ content="You are a helpful assistant.",
263
+ )
264
+ agent = ChatAgent(assistant_sys_msg, model=vllm_model, token_limit=4096)
265
+
266
+ user_msg = BaseMessage.make_user_message(
267
+ role_name="User",
268
+ content="Say hi to CAMEL AI",
269
+ )
270
+ assistant_response = agent.step(user_msg)
271
+ print(assistant_response.msg.content)
272
+ ```
273
+
239
274
  ## Data (Hosted on Hugging Face)
240
275
  | Dataset | Chat format | Instruction format | Chat format (translated) |
241
276
  |----------------|-----------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.5.5'
15
+ __version__ = '0.1.5.9'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -42,8 +42,8 @@ from camel.utils import get_model_encoding
42
42
  if TYPE_CHECKING:
43
43
  from openai import Stream
44
44
 
45
- from camel.functions import OpenAIFunction
46
45
  from camel.terminators import ResponseTerminator
46
+ from camel.toolkits import OpenAIFunction
47
47
 
48
48
 
49
49
  @dataclass(frozen=True)
@@ -82,7 +82,7 @@ class ChatAgent(BaseAgent):
82
82
  system_message (BaseMessage): The system message for the chat agent.
83
83
  model (BaseModelBackend, optional): The model backend to use for
84
84
  generating responses. (default: :obj:`OpenAIModel` with
85
- `GPT_3_5_TURBO`)
85
+ `GPT_4O_MINI`)
86
86
  api_key (str, optional): The API key for authenticating with the
87
87
  LLM service. Only OpenAI and Anthropic model supported (default:
88
88
  :obj:`None`)
@@ -127,7 +127,7 @@ class ChatAgent(BaseAgent):
127
127
  if model is not None
128
128
  else ModelFactory.create(
129
129
  model_platform=ModelPlatformType.OPENAI,
130
- model_type=ModelType.GPT_3_5_TURBO,
130
+ model_type=ModelType.GPT_4O_MINI,
131
131
  model_config_dict=ChatGPTConfig().__dict__,
132
132
  api_key=self._api_key,
133
133
  )
@@ -33,7 +33,7 @@ class CriticAgent(ChatAgent):
33
33
  agent.
34
34
  model (BaseModelBackend, optional): The model backend to use for
35
35
  generating responses. (default: :obj:`OpenAIModel` with
36
- `GPT_3_5_TURBO`)
36
+ `GPT_4O_MINI`)
37
37
  message_window_size (int, optional): The maximum number of previous
38
38
  messages to include in the context window. If `None`, no windowing
39
39
  is performed. (default: :obj:`6`)
@@ -35,7 +35,7 @@ class DeductiveReasonerAgent(ChatAgent):
35
35
  Args:
36
36
  model (BaseModelBackend, optional): The model backend to use for
37
37
  generating responses. (default: :obj:`OpenAIModel` with
38
- `GPT_3_5_TURBO`)
38
+ `GPT_4O_MINI`)
39
39
  """
40
40
 
41
41
  def __init__(
@@ -126,7 +126,7 @@ $B$.
126
126
  - Direct Path Analysis: What are the immediate and direct conditions
127
127
  required to move from $A$ to $B$?
128
128
  - Intermediate States: Are there states between $A$ and $B$ that must be
129
- transversed or can be used to make the transition smoother or more
129
+ traversed or can be used to make the transition smoother or more
130
130
  efficient? If yes, what is the content?
131
131
  - Constraints & Limitations: Identify potential barriers or restrictions
132
132
  in moving from $A$ to $B$. These can be external (e.g., environmental
@@ -244,7 +244,7 @@ square brackets)
244
244
  print(f"Message content:\n{msg.content}")
245
245
 
246
246
  # Extract the conditions from the message
247
- condistions_dict = {
247
+ conditions_dict = {
248
248
  f"condition {i}": cdt.replace("<", "")
249
249
  .replace(">", "")
250
250
  .strip()
@@ -281,7 +281,7 @@ square brackets)
281
281
  conditions_and_quality_json: Dict[
282
282
  str, Union[List[str], Dict[str, str]]
283
283
  ] = {}
284
- conditions_and_quality_json["conditions"] = condistions_dict
284
+ conditions_and_quality_json["conditions"] = conditions_dict
285
285
  conditions_and_quality_json["labels"] = labels
286
286
  conditions_and_quality_json["evaluate_quality"] = quality
287
287
 
@@ -35,7 +35,7 @@ class EmbodiedAgent(ChatAgent):
35
35
  system_message (BaseMessage): The system message for the chat agent.
36
36
  model (BaseModelBackend, optional): The model backend to use for
37
37
  generating responses. (default: :obj:`OpenAIModel` with
38
- `GPT_3_5_TURBO`)
38
+ `GPT_4O_MINI`)
39
39
  message_window_size (int, optional): The maximum number of previous
40
40
  messages to include in the context window. If `None`, no windowing
41
41
  is performed. (default: :obj:`None`)
@@ -78,17 +78,16 @@ Expected Output:
78
78
 
79
79
  Nodes:
80
80
 
81
- Node(id='John', type='Person', properties={'agent_generated'})
82
- Node(id='XYZ Corporation', type='Organization', properties={'agent_generated'})
83
- Node(id='New York City', type='Location', properties={'agent_generated'})
81
+ Node(id='John', type='Person')
82
+ Node(id='XYZ Corporation', type='Organization')
83
+ Node(id='New York City', type='Location')
84
84
 
85
85
  Relationships:
86
86
 
87
87
  Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
88
- Corporation', type='Organization'), type='WorksAt', properties=
89
- {'agent_generated'})
88
+ Corporation', type='Organization'), type='WorksAt')
90
89
  Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
91
- type='Location'), type='ResidesIn', properties={'agent_generated'})
90
+ type='Location'), type='ResidesIn')
92
91
 
93
92
  ===== TASK =====
94
93
  Please extracts nodes and relationships from given content and structures them
@@ -116,14 +115,14 @@ class KnowledgeGraphAgent(ChatAgent):
116
115
  Args:
117
116
  model (BaseModelBackend, optional): The model backend to use for
118
117
  generating responses. (default: :obj:`OpenAIModel` with
119
- `GPT_3_5_TURBO`)
118
+ `GPT_4O_MINI`)
120
119
  """
121
120
  system_message = BaseMessage(
122
121
  role_name="Graphify",
123
122
  role_type=RoleType.ASSISTANT,
124
123
  meta_dict=None,
125
124
  content="Your mission is to transform unstructured content "
126
- "intostructured graph data. Extract nodes and relationships with "
125
+ "into structured graph data. Extract nodes and relationships with "
127
126
  "precision, and let the connections unfold. Your graphs will "
128
127
  "illuminate the hidden connections within the chaos of "
129
128
  "information.",
@@ -211,11 +210,10 @@ class KnowledgeGraphAgent(ChatAgent):
211
210
  import re
212
211
 
213
212
  # Regular expressions to extract nodes and relationships
214
- node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
213
+ node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
215
214
  rel_pattern = (
216
215
  r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
217
- r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', "
218
- r"properties=\{(.*?)\}\)"
216
+ r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
219
217
  )
220
218
 
221
219
  nodes = {}
@@ -223,8 +221,8 @@ class KnowledgeGraphAgent(ChatAgent):
223
221
 
224
222
  # Extract nodes
225
223
  for match in re.finditer(node_pattern, input_string):
226
- id, type, properties = match.groups()
227
- properties = eval(properties)
224
+ id, type = match.groups()
225
+ properties = {'source': 'agent_created'}
228
226
  if id not in nodes:
229
227
  node = Node(id, type, properties)
230
228
  if self._validate_node(node):
@@ -232,10 +230,8 @@ class KnowledgeGraphAgent(ChatAgent):
232
230
 
233
231
  # Extract relationships
234
232
  for match in re.finditer(rel_pattern, input_string):
235
- subj_id, subj_type, obj_id, obj_type, rel_type, properties_str = (
236
- match.groups()
237
- )
238
- properties = eval(properties_str)
233
+ subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
234
+ properties = {'source': 'agent_created'}
239
235
  if subj_id in nodes and obj_id in nodes:
240
236
  subj = nodes[subj_id]
241
237
  obj = nodes[obj_id]
@@ -27,7 +27,7 @@ class RoleAssignmentAgent(ChatAgent):
27
27
  Args:
28
28
  model (BaseModelBackend, optional): The model backend to use for
29
29
  generating responses. (default: :obj:`OpenAIModel` with
30
- `GPT_3_5_TURBO`)
30
+ `GPT_4O_MINI`)
31
31
 
32
32
  Attributes:
33
33
  role_assignment_prompt (TextPrompt): A prompt for the agent to generate
@@ -26,10 +26,9 @@ class SearchAgent(ChatAgent):
26
26
  relevance of an answer.
27
27
 
28
28
  Args:
29
- model_type (ModelType, optional): The type of model to use for the
30
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
31
- model_config (Any, optional): The configuration for the model.
32
- (default: :obj:`None`)
29
+ model (BaseModelBackend, optional): The model backend to use for
30
+ generating responses. (default: :obj:`OpenAIModel` with
31
+ `GPT_4O_MINI`)
33
32
  """
34
33
 
35
34
  def __init__(
@@ -76,7 +75,7 @@ class SearchAgent(ChatAgent):
76
75
  result = self.step(user_msg).msg.content
77
76
  results += result + "\n"
78
77
 
79
- # Final summarise
78
+ # Final summarization
80
79
  final_prompt = TextPrompt(
81
80
  '''Here are some summarized texts which split from one text. Using
82
81
  the information to answer the question. If can't find the answer,
@@ -32,7 +32,7 @@ class TaskSpecifyAgent(ChatAgent):
32
32
  Args:
33
33
  model (BaseModelBackend, optional): The model backend to use for
34
34
  generating responses. (default: :obj:`OpenAIModel` with
35
- `GPT_3_5_TURBO`)
35
+ `GPT_4O_MINI`)
36
36
  task_type (TaskType, optional): The type of task for which to generate
37
37
  a prompt. (default: :obj:`TaskType.AI_SOCIETY`)
38
38
  task_specify_prompt (Union[str, TextPrompt], optional): The prompt for
@@ -100,7 +100,6 @@ class TaskSpecifyAgent(ChatAgent):
100
100
 
101
101
  if meta_dict is not None:
102
102
  task_specify_prompt = task_specify_prompt.format(**meta_dict)
103
-
104
103
  task_msg = BaseMessage.make_user_message(
105
104
  role_name="Task Specifier", content=task_specify_prompt
106
105
  )
@@ -127,7 +126,7 @@ class TaskPlannerAgent(ChatAgent):
127
126
  Args:
128
127
  model (BaseModelBackend, optional): The model backend to use for
129
128
  generating responses. (default: :obj:`OpenAIModel` with
130
- `GPT_3_5_TURBO`)
129
+ `GPT_4O_MINI`)
131
130
  output_language (str, optional): The language to be output by the
132
131
  agent. (default: :obj:`None`)
133
132
  """
@@ -202,7 +201,7 @@ class TaskCreationAgent(ChatAgent):
202
201
  perform the task.
203
202
  model (BaseModelBackend, optional): The LLM backend to use for
204
203
  generating responses. (default: :obj:`OpenAIModel` with
205
- `GPT_3_5_TURBO`)
204
+ `GPT_4O_MINI`)
206
205
  output_language (str, optional): The language to be output by the
207
206
  agent. (default: :obj:`None`)
208
207
  message_window_size (int, optional): The maximum number of previous
@@ -234,7 +233,7 @@ The result must be a numbered list in the format:
234
233
  #. Third Task
235
234
 
236
235
  You can only give me up to {max_task_num} tasks at a time. \
237
- Each task shoud be concise, concrete and doable for a {role_name}.
236
+ Each task should be concise, concrete and doable for a {role_name}.
238
237
  You should make task plan and not ask me questions.
239
238
  If you think no new tasks are needed right now, write "No tasks to add."
240
239
  Now start to give me new tasks one by one. No more than three tasks.
@@ -313,7 +312,7 @@ class TaskPrioritizationAgent(ChatAgent):
313
312
  perform the task.
314
313
  model (BaseModelBackend, optional): The LLM backend to use for
315
314
  generating responses. (default: :obj:`OpenAIModel` with
316
- `GPT_3_5_TURBO`)
315
+ `GPT_4O_MINI`)
317
316
  output_language (str, optional): The language to be output by the
318
317
  agent. (default: :obj:`None`)
319
318
  message_window_size (int, optional): The maximum number of previous
@@ -13,13 +13,20 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
+ from .gemini_config import (
17
+ Gemini_API_PARAMS,
18
+ GeminiConfig,
19
+ )
20
+ from .groq_config import GROQ_API_PARAMS, GroqConfig
16
21
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
22
+ from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
17
23
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
18
24
  from .openai_config import (
19
25
  OPENAI_API_PARAMS,
20
26
  ChatGPTConfig,
21
27
  OpenSourceConfig,
22
28
  )
29
+ from .vllm_config import VLLM_API_PARAMS, VLLMConfig
23
30
  from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
24
31
 
25
32
  __all__ = [
@@ -28,6 +35,8 @@ __all__ = [
28
35
  'OPENAI_API_PARAMS',
29
36
  'AnthropicConfig',
30
37
  'ANTHROPIC_API_PARAMS',
38
+ 'GROQ_API_PARAMS',
39
+ 'GroqConfig',
31
40
  'OpenSourceConfig',
32
41
  'LiteLLMConfig',
33
42
  'LITELLM_API_PARAMS',
@@ -35,4 +44,10 @@ __all__ = [
35
44
  'OLLAMA_API_PARAMS',
36
45
  'ZhipuAIConfig',
37
46
  'ZHIPUAI_API_PARAMS',
47
+ 'GeminiConfig',
48
+ 'Gemini_API_PARAMS',
49
+ 'VLLMConfig',
50
+ 'VLLM_API_PARAMS',
51
+ 'MistralConfig',
52
+ 'MISTRAL_API_PARAMS',
38
53
  ]
@@ -0,0 +1,98 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+
16
+ from collections.abc import Iterable
17
+ from dataclasses import asdict, dataclass
18
+ from typing import TYPE_CHECKING, Optional
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+
22
+ if TYPE_CHECKING:
23
+ from google.generativeai.protos import Schema
24
+ from google.generativeai.types.content_types import (
25
+ FunctionLibraryType,
26
+ ToolConfigType,
27
+ )
28
+ from google.generativeai.types.helper_types import RequestOptionsType
29
+ from google.generativeai.types.safety_types import SafetySettingOptions
30
+
31
+
32
+ @dataclass(frozen=True)
33
+ class GeminiConfig(BaseConfig):
34
+ r"""A simple dataclass used to configure the generation parameters of
35
+ `GenerativeModel.generate_content`.
36
+
37
+ Args:
38
+ candidate_count (int, optional): Number of responses to return.
39
+ stop_sequences (Iterable[str], optional): The set of character
40
+ sequences (up to 5) that will stop output generation. If specified
41
+ the API will stop at the first appearance of a stop sequence.
42
+ The stop sequence will not be included as part of the response.
43
+ max_output_tokens (int, optional): The maximum number of tokens to
44
+ include in a candidate. If unset, this will default to
45
+ output_token_limit specified in the model's specification.
46
+ temperature (float, optional): Controls the randomness of the output.
47
+ Note: The default value varies by model, see the
48
+ `Model.temperature` attribute of the `Model` returned
49
+ the `genai.get_model` function. Values can range from [0.0,1.0],
50
+ inclusive. A value closer to 1.0 will produce responses that are
51
+ more varied and creative, while a value closer to 0.0 will
52
+ typically result in more straightforward responses from the model.
53
+ top_p (int, optional): The maximum cumulative probability of tokens to
54
+ consider when sampling. The model uses combined Top-k and nucleus
55
+ sampling. Tokens are sorted based on their assigned probabilities
56
+ so that only the most likely tokens are considered. Top-k sampling
57
+ directly limits the maximum number of tokens to consider, while
58
+ Nucleus sampling limits number of tokens
59
+ based on the cumulative probability. Note: The default value varies
60
+ by model, see the `Model.top_p` attribute of the `Model` returned
61
+ the `genai.get_model` function.
62
+ top_k (int, optional): The maximum number of tokens to consider when
63
+ sampling. The model uses combined Top-k and nucleus sampling.Top-k
64
+ sampling considers the set of `top_k` most probable tokens.
65
+ Defaults to 40. Note: The default value varies by model, see the
66
+ `Model.top_k` attribute of the `Model` returned the
67
+ `genai.get_model` function.
68
+ response_mime_type (str, optional): Output response mimetype of the
69
+ generated candidate text. Supported mimetype:
70
+ `text/plain`: (default) Text output.
71
+ `application/json`: JSON response in the candidates.
72
+ response_schema (Schema, optional): Specifies the format of the
73
+ JSON requested if response_mime_type is `application/json`.
74
+ safety_settings (SafetySettingOptions, optional):
75
+ Overrides for the model's safety settings.
76
+ tools (FunctionLibraryType, optional):
77
+ `protos.Tools` more info coming soon.
78
+ tool_config (ToolConfigType, optional):
79
+ more info coming soon.
80
+ request_options (RequestOptionsType, optional):
81
+ Options for the request.
82
+ """
83
+
84
+ candidate_count: Optional[int] = None
85
+ stop_sequences: Optional[Iterable[str]] = None
86
+ max_output_tokens: Optional[int] = None
87
+ temperature: Optional[float] = None
88
+ top_p: Optional[float] = None
89
+ top_k: Optional[int] = None
90
+ response_mime_type: Optional[str] = None
91
+ response_schema: Optional['Schema'] = None
92
+ safety_settings: Optional['SafetySettingOptions'] = None
93
+ tools: Optional['FunctionLibraryType'] = None
94
+ tool_config: Optional['ToolConfigType'] = None
95
+ request_options: Optional['RequestOptionsType'] = None
96
+
97
+
98
+ Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
@@ -0,0 +1,119 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, Optional, Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+ if TYPE_CHECKING:
24
+ from camel.toolkits import OpenAIFunction
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class GroqConfig(BaseConfig):
29
+ r"""Defines the parameters for generating chat completions using OpenAI
30
+ compatibility.
31
+
32
+ Reference: https://console.groq.com/docs/openai
33
+
34
+ Args:
35
+ temperature (float, optional): Sampling temperature to use, between
36
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
37
+ while lower values make it more focused and deterministic.
38
+ (default: :obj:`0.2`)
39
+ top_p (float, optional): An alternative to sampling with temperature,
40
+ called nucleus sampling, where the model considers the results of
41
+ the tokens with top_p probability mass. So :obj:`0.1` means only
42
+ the tokens comprising the top 10% probability mass are considered.
43
+ (default: :obj:`1.0`)
44
+ n (int, optional): How many chat completion choices to generate for
45
+ each input message. (default: :obj:`1`)
46
+ response_format (object, optional): An object specifying the format
47
+ that the model must output. Compatible with GPT-4 Turbo and all
48
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
49
+ {"type": "json_object"} enables JSON mode, which guarantees the
50
+ message the model generates is valid JSON. Important: when using
51
+ JSON mode, you must also instruct the model to produce JSON
52
+ yourself via a system or user message. Without this, the model
53
+ may generate an unending stream of whitespace until the generation
54
+ reaches the token limit, resulting in a long-running and seemingly
55
+ "stuck" request. Also note that the message content may be
56
+ partially cut off if finish_reason="length", which indicates the
57
+ generation exceeded max_tokens or the conversation exceeded the
58
+ max context length.
59
+ stream (bool, optional): If True, partial message deltas will be sent
60
+ as data-only server-sent events as they become available.
61
+ (default: :obj:`False`)
62
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
63
+ will stop generating further tokens. (default: :obj:`None`)
64
+ max_tokens (int, optional): The maximum number of tokens to generate
65
+ in the chat completion. The total length of input tokens and
66
+ generated tokens is limited by the model's context length.
67
+ (default: :obj:`None`)
68
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
69
+ :obj:`2.0`. Positive values penalize new tokens based on whether
70
+ they appear in the text so far, increasing the model's likelihood
71
+ to talk about new topics. See more information about frequency and
72
+ presence penalties. (default: :obj:`0.0`)
73
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
74
+ :obj:`2.0`. Positive values penalize new tokens based on their
75
+ existing frequency in the text so far, decreasing the model's
76
+ likelihood to repeat the same line verbatim. See more information
77
+ about frequency and presence penalties. (default: :obj:`0.0`)
78
+ user (str, optional): A unique identifier representing your end-user,
79
+ which can help OpenAI to monitor and detect abuse.
80
+ (default: :obj:`""`)
81
+ tools (list[OpenAIFunction], optional): A list of tools the model may
82
+ call. Currently, only functions are supported as a tool. Use this
83
+ to provide a list of functions the model may generate JSON inputs
84
+ for. A max of 128 functions are supported.
85
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
86
+ any) tool is called by the model. :obj:`"none"` means the model
87
+ will not call any tool and instead generates a message.
88
+ :obj:`"auto"` means the model can pick between generating a
89
+ message or calling one or more tools. :obj:`"required"` means the
90
+ model must call one or more tools. Specifying a particular tool
91
+ via {"type": "function", "function": {"name": "my_function"}}
92
+ forces the model to call that tool. :obj:`"none"` is the default
93
+ when no tools are present. :obj:`"auto"` is the default if tools
94
+ are present.
95
+ """
96
+
97
+ temperature: float = 0.2 # openai default: 1.0
98
+ top_p: float = 1.0
99
+ n: int = 1
100
+ stream: bool = False
101
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
102
+ max_tokens: int | NotGiven = NOT_GIVEN
103
+ presence_penalty: float = 0.0
104
+ response_format: dict | NotGiven = NOT_GIVEN
105
+ frequency_penalty: float = 0.0
106
+ user: str = ""
107
+ tools: Optional[list[OpenAIFunction]] = None
108
+ tool_choice: Optional[dict[str, str] | str] = "none"
109
+
110
+ def __post_init__(self):
111
+ if self.tools is not None:
112
+ object.__setattr__(
113
+ self,
114
+ 'tools',
115
+ [tool.get_openai_tool_schema() for tool in self.tools],
116
+ )
117
+
118
+
119
+ GROQ_API_PARAMS = {param for param in asdict(GroqConfig()).keys()}
@@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Union
19
19
  from camel.configs.base_config import BaseConfig
20
20
 
21
21
  if TYPE_CHECKING:
22
- from camel.functions import OpenAIFunction
22
+ from camel.toolkits import OpenAIFunction
23
23
 
24
24
 
25
25
  @dataclass(frozen=True)