camel-ai 0.1.6.6__tar.gz → 0.1.6.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (195) hide show
  1. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/PKG-INFO +19 -10
  2. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/README.md +15 -6
  3. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/__init__.py +1 -1
  4. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/chat_agent.py +44 -9
  5. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/critic_agent.py +0 -1
  6. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/__init__.py +9 -0
  7. camel_ai-0.1.6.8/camel/configs/reka_config.py +74 -0
  8. camel_ai-0.1.6.8/camel/configs/samba_config.py +50 -0
  9. camel_ai-0.1.6.8/camel/configs/togetherai_config.py +107 -0
  10. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/__init__.py +6 -0
  11. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/groq_model.py +5 -5
  12. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/litellm_model.py +1 -1
  13. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/model_factory.py +12 -0
  14. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/ollama_model.py +6 -4
  15. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/openai_compatibility_model.py +3 -3
  16. camel_ai-0.1.6.8/camel/models/reka_model.py +232 -0
  17. camel_ai-0.1.6.8/camel/models/samba_model.py +291 -0
  18. camel_ai-0.1.6.8/camel/models/togetherai_model.py +148 -0
  19. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/vllm_model.py +7 -5
  20. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/zhipuai_model.py +2 -2
  21. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/retrievers/auto_retriever.py +2 -27
  22. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/societies/babyagi_playing.py +0 -3
  23. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/societies/role_playing.py +18 -2
  24. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/object_storages/amazon_s3.py +12 -10
  25. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/__init__.py +3 -0
  26. camel_ai-0.1.6.8/camel/toolkits/linkedin_toolkit.py +230 -0
  27. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/types/enums.py +64 -6
  28. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/utils/__init__.py +2 -0
  29. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/utils/commons.py +22 -0
  30. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/pyproject.toml +9 -6
  31. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/__init__.py +0 -0
  32. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/base.py +0 -0
  33. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/deductive_reasoner_agent.py +0 -0
  34. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/embodied_agent.py +0 -0
  35. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/knowledge_graph_agent.py +0 -0
  36. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/role_assignment_agent.py +0 -0
  37. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/search_agent.py +0 -0
  38. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/task_agent.py +0 -0
  39. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/tool_agents/__init__.py +0 -0
  40. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/tool_agents/base.py +0 -0
  41. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
  42. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/anthropic_config.py +0 -0
  43. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/base_config.py +0 -0
  44. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/gemini_config.py +0 -0
  45. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/groq_config.py +0 -0
  46. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/litellm_config.py +0 -0
  47. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/mistral_config.py +0 -0
  48. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/ollama_config.py +0 -0
  49. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/openai_config.py +0 -0
  50. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/vllm_config.py +0 -0
  51. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/configs/zhipuai_config.py +0 -0
  52. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/embeddings/__init__.py +0 -0
  53. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/embeddings/base.py +0 -0
  54. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/embeddings/mistral_embedding.py +0 -0
  55. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/embeddings/openai_embedding.py +0 -0
  56. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
  57. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/embeddings/vlm_embedding.py +0 -0
  58. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/generators.py +0 -0
  59. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/human.py +0 -0
  60. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/interpreters/__init__.py +0 -0
  61. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/interpreters/base.py +0 -0
  62. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/interpreters/docker_interpreter.py +0 -0
  63. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/interpreters/internal_python_interpreter.py +0 -0
  64. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/interpreters/interpreter_error.py +0 -0
  65. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/interpreters/ipython_interpreter.py +0 -0
  66. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/interpreters/subprocess_interpreter.py +0 -0
  67. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/loaders/__init__.py +0 -0
  68. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/loaders/base_io.py +0 -0
  69. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/loaders/firecrawl_reader.py +0 -0
  70. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/loaders/jina_url_reader.py +0 -0
  71. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/loaders/unstructured_io.py +0 -0
  72. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/__init__.py +0 -0
  73. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/agent_memories.py +0 -0
  74. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/base.py +0 -0
  75. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/blocks/__init__.py +0 -0
  76. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/blocks/chat_history_block.py +0 -0
  77. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/blocks/vectordb_block.py +0 -0
  78. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/context_creators/__init__.py +0 -0
  79. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/context_creators/score_based.py +0 -0
  80. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/memories/records.py +0 -0
  81. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/messages/__init__.py +0 -0
  82. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/messages/base.py +0 -0
  83. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/messages/func_message.py +0 -0
  84. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/anthropic_model.py +0 -0
  85. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/azure_openai_model.py +0 -0
  86. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/base_model.py +0 -0
  87. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/gemini_model.py +0 -0
  88. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/mistral_model.py +0 -0
  89. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/nemotron_model.py +0 -0
  90. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/open_source_model.py +0 -0
  91. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/openai_audio_models.py +0 -0
  92. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/openai_model.py +0 -0
  93. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/models/stub_model.py +0 -0
  94. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/__init__.py +0 -0
  95. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/ai_society.py +0 -0
  96. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/base.py +0 -0
  97. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/code.py +0 -0
  98. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/evaluation.py +0 -0
  99. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/generate_text_embedding_data.py +0 -0
  100. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/image_craft.py +0 -0
  101. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/misalignment.py +0 -0
  102. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/multi_condition_image_craft.py +0 -0
  103. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/object_recognition.py +0 -0
  104. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/prompt_templates.py +0 -0
  105. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/role_description_prompt_template.py +0 -0
  106. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/solution_extraction.py +0 -0
  107. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/task_prompt_template.py +0 -0
  108. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/translation.py +0 -0
  109. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/prompts/video_description_prompt.py +0 -0
  110. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/responses/__init__.py +0 -0
  111. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/responses/agent_responses.py +0 -0
  112. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/retrievers/__init__.py +0 -0
  113. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/retrievers/base.py +0 -0
  114. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/retrievers/bm25_retriever.py +0 -0
  115. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/retrievers/cohere_rerank_retriever.py +0 -0
  116. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/retrievers/vector_retriever.py +0 -0
  117. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/societies/__init__.py +0 -0
  118. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/__init__.py +0 -0
  119. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/graph_storages/__init__.py +0 -0
  120. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/graph_storages/base.py +0 -0
  121. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/graph_storages/graph_element.py +0 -0
  122. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/graph_storages/neo4j_graph.py +0 -0
  123. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/key_value_storages/__init__.py +0 -0
  124. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/key_value_storages/base.py +0 -0
  125. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/key_value_storages/in_memory.py +0 -0
  126. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/key_value_storages/json.py +0 -0
  127. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/key_value_storages/redis.py +0 -0
  128. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/object_storages/__init__.py +0 -0
  129. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/object_storages/azure_blob.py +0 -0
  130. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/object_storages/base.py +0 -0
  131. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/object_storages/google_cloud.py +0 -0
  132. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/vectordb_storages/__init__.py +0 -0
  133. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/vectordb_storages/base.py +0 -0
  134. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/vectordb_storages/milvus.py +0 -0
  135. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/storages/vectordb_storages/qdrant.py +0 -0
  136. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/tasks/__init__.py +0 -0
  137. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/tasks/task.py +0 -0
  138. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/tasks/task_prompt.py +0 -0
  139. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/terminators/__init__.py +0 -0
  140. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/terminators/base.py +0 -0
  141. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/terminators/response_terminator.py +0 -0
  142. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/terminators/token_limit_terminator.py +0 -0
  143. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/base.py +0 -0
  144. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/code_execution.py +0 -0
  145. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/dalle_toolkit.py +0 -0
  146. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/github_toolkit.py +0 -0
  147. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/google_maps_toolkit.py +0 -0
  148. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/math_toolkit.py +0 -0
  149. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/biztoc/__init__.py +0 -0
  150. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/biztoc/ai-plugin.json +0 -0
  151. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/biztoc/openapi.yaml +0 -0
  152. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/coursera/__init__.py +0 -0
  153. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/coursera/openapi.yaml +0 -0
  154. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/create_qr_code/__init__.py +0 -0
  155. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/create_qr_code/openapi.yaml +0 -0
  156. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/klarna/__init__.py +0 -0
  157. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/klarna/openapi.yaml +0 -0
  158. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/nasa_apod/__init__.py +0 -0
  159. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/nasa_apod/openapi.yaml +0 -0
  160. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/outschool/__init__.py +0 -0
  161. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/outschool/ai-plugin.json +0 -0
  162. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/outschool/openapi.yaml +0 -0
  163. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/outschool/paths/__init__.py +0 -0
  164. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/outschool/paths/get_classes.py +0 -0
  165. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +0 -0
  166. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/security_config.py +0 -0
  167. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/speak/__init__.py +0 -0
  168. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/speak/openapi.yaml +0 -0
  169. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/web_scraper/__init__.py +0 -0
  170. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +0 -0
  171. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/web_scraper/openapi.yaml +0 -0
  172. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +0 -0
  173. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +0 -0
  174. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/open_api_toolkit.py +0 -0
  175. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/openai_function.py +0 -0
  176. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/retrieval_toolkit.py +0 -0
  177. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/search_toolkit.py +0 -0
  178. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/slack_toolkit.py +0 -0
  179. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/twitter_toolkit.py +0 -0
  180. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/toolkits/weather_toolkit.py +0 -0
  181. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/types/__init__.py +0 -0
  182. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/types/openai_types.py +0 -0
  183. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/utils/async_func.py +0 -0
  184. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/utils/constants.py +0 -0
  185. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/utils/token_counting.py +0 -0
  186. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/__init__.py +0 -0
  187. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/base.py +0 -0
  188. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/manager_node.py +0 -0
  189. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/role_playing_node.py +0 -0
  190. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/single_agent_node.py +0 -0
  191. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/task_channel.py +0 -0
  192. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/utils.py +0 -0
  193. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/worker_node.py +0 -0
  194. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/workforce.py +0 -0
  195. {camel_ai-0.1.6.6 → camel_ai-0.1.6.8}/camel/workforce/workforce_prompt.py +0 -0
@@ -1,15 +1,14 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.6.6
3
+ Version: 0.1.6.8
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
7
7
  Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
8
8
  Author: CAMEL-AI.org
9
- Requires-Python: >=3.9.0,<3.12
9
+ Requires-Python: >=3.10.0,<3.12
10
10
  Classifier: License :: OSI Approved :: Apache Software License
11
11
  Classifier: Programming Language :: Python :: 3
12
- Classifier: Programming Language :: Python :: 3.9
13
12
  Classifier: Programming Language :: Python :: 3.10
14
13
  Classifier: Programming Language :: Python :: 3.11
15
14
  Provides-Extra: all
@@ -29,7 +28,7 @@ Requires-Dist: agentops (>=0.3.6,<0.4.0) ; extra == "tools" or extra == "all"
29
28
  Requires-Dist: anthropic (>=0.29.0,<0.30.0)
30
29
  Requires-Dist: azure-storage-blob (>=12.21.0,<13.0.0) ; extra == "object-storages" or extra == "all"
31
30
  Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
32
- Requires-Dist: boto3 (>=1.34.149,<2.0.0) ; extra == "object-storages" or extra == "all"
31
+ Requires-Dist: botocore (>=1.35.3,<2.0.0) ; extra == "object-storages" or extra == "all"
33
32
  Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
34
33
  Requires-Dist: colorama (>=0,<1)
35
34
  Requires-Dist: curl_cffi (==0.6.2)
@@ -75,6 +74,7 @@ Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
75
74
  Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
76
75
  Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
77
76
  Requires-Dist: redis (>=5.0.6,<6.0.0) ; extra == "kv-stroages" or extra == "all"
77
+ Requires-Dist: reka-api (>=3.0.8,<4.0.0) ; extra == "model-platforms" or extra == "all"
78
78
  Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
79
79
  Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "encoders" or extra == "all"
80
80
  Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
@@ -164,7 +164,7 @@ Some features require extra dependencies:
164
164
 
165
165
  Install `CAMEL` from source with poetry (Recommended):
166
166
  ```sh
167
- # Make sure your python version is later than 3.9
167
+ # Make sure your python version is later than 3.10
168
168
  # You can use pyenv to manage multiple python verisons in your sytstem
169
169
 
170
170
  # Clone github repo
@@ -187,22 +187,31 @@ poetry shell
187
187
  poetry install
188
188
 
189
189
  # Install CAMEL with all dependencies
190
- poetry install -E all # (Optional)
190
+ poetry install -E all # (Optional)
191
191
 
192
192
  # Exit the virtual environment
193
193
  exit
194
194
  ```
195
195
 
196
+ > [!TIP]
197
+ > If you encounter errors when running `poetry install`, it may be due to a cache-related problem. You can try running:
198
+ > ```sh
199
+ > poetry install --no-cache
200
+ > ```
201
+
202
+
203
+
204
+
196
205
  Install `CAMEL` from source with conda and pip:
197
206
  ```sh
198
207
  # Create a conda virtual environment
199
- conda create --name camel python=3.9
208
+ conda create --name camel python=3.10
200
209
 
201
210
  # Activate CAMEL conda environment
202
211
  conda activate camel
203
212
 
204
213
  # Clone github repo
205
- git clone -b v0.1.6.6 https://github.com/camel-ai/camel.git
214
+ git clone -b v0.1.6.8 https://github.com/camel-ai/camel.git
206
215
 
207
216
  # Change directory into project directory
208
217
  cd camel
@@ -415,8 +424,8 @@ We appreciate your interest in contributing to our open-source initiative. We pr
415
424
  ## Contact
416
425
  For more information please contact camel.ai.team@gmail.com.
417
426
 
418
- [python-image]: https://img.shields.io/badge/Python-3.9%2B-brightgreen.svg
419
- [python-url]: https://docs.python.org/3.9/
427
+ [python-image]: https://img.shields.io/badge/Python-3.10%2B-brightgreen.svg
428
+ [python-url]: https://docs.python.org/3.10/
420
429
  [pytest-image]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml/badge.svg
421
430
  [pytest-url]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml
422
431
  [docs-image]: https://img.shields.io/badge/Documentation-grey.svg?logo=github
@@ -72,7 +72,7 @@ Some features require extra dependencies:
72
72
 
73
73
  Install `CAMEL` from source with poetry (Recommended):
74
74
  ```sh
75
- # Make sure your python version is later than 3.9
75
+ # Make sure your python version is later than 3.10
76
76
  # You can use pyenv to manage multiple python verisons in your sytstem
77
77
 
78
78
  # Clone github repo
@@ -95,22 +95,31 @@ poetry shell
95
95
  poetry install
96
96
 
97
97
  # Install CAMEL with all dependencies
98
- poetry install -E all # (Optional)
98
+ poetry install -E all # (Optional)
99
99
 
100
100
  # Exit the virtual environment
101
101
  exit
102
102
  ```
103
103
 
104
+ > [!TIP]
105
+ > If you encounter errors when running `poetry install`, it may be due to a cache-related problem. You can try running:
106
+ > ```sh
107
+ > poetry install --no-cache
108
+ > ```
109
+
110
+
111
+
112
+
104
113
  Install `CAMEL` from source with conda and pip:
105
114
  ```sh
106
115
  # Create a conda virtual environment
107
- conda create --name camel python=3.9
116
+ conda create --name camel python=3.10
108
117
 
109
118
  # Activate CAMEL conda environment
110
119
  conda activate camel
111
120
 
112
121
  # Clone github repo
113
- git clone -b v0.1.6.6 https://github.com/camel-ai/camel.git
122
+ git clone -b v0.1.6.8 https://github.com/camel-ai/camel.git
114
123
 
115
124
  # Change directory into project directory
116
125
  cd camel
@@ -323,8 +332,8 @@ We appreciate your interest in contributing to our open-source initiative. We pr
323
332
  ## Contact
324
333
  For more information please contact camel.ai.team@gmail.com.
325
334
 
326
- [python-image]: https://img.shields.io/badge/Python-3.9%2B-brightgreen.svg
327
- [python-url]: https://docs.python.org/3.9/
335
+ [python-image]: https://img.shields.io/badge/Python-3.10%2B-brightgreen.svg
336
+ [python-url]: https://docs.python.org/3.10/
328
337
  [pytest-image]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml/badge.svg
329
338
  [pytest-url]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml
330
339
  [docs-image]: https://img.shields.io/badge/Documentation-grey.svg?logo=github
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.6.6'
15
+ __version__ = '0.1.6.8'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -14,6 +14,7 @@
14
14
  from __future__ import annotations
15
15
 
16
16
  import json
17
+ import logging
17
18
  from collections import defaultdict
18
19
  from typing import (
19
20
  TYPE_CHECKING,
@@ -61,6 +62,9 @@ if TYPE_CHECKING:
61
62
  from camel.terminators import ResponseTerminator
62
63
  from camel.toolkits import OpenAIFunction
63
64
 
65
+
66
+ logger = logging.getLogger(__name__)
67
+
64
68
  # AgentOps decorator setting
65
69
  try:
66
70
  import os
@@ -112,9 +116,6 @@ class ChatAgent(BaseAgent):
112
116
  model (BaseModelBackend, optional): The model backend to use for
113
117
  generating responses. (default: :obj:`OpenAIModel` with
114
118
  `GPT_4O_MINI`)
115
- api_key (str, optional): The API key for authenticating with the
116
- LLM service. Only OpenAI and Anthropic model supported (default:
117
- :obj:`None`)
118
119
  memory (AgentMemory, optional): The agent memory for managing chat
119
120
  messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
120
121
  (default: :obj:`None`)
@@ -138,7 +139,6 @@ class ChatAgent(BaseAgent):
138
139
  self,
139
140
  system_message: BaseMessage,
140
141
  model: Optional[BaseModelBackend] = None,
141
- api_key: Optional[str] = None,
142
142
  memory: Optional[AgentMemory] = None,
143
143
  message_window_size: Optional[int] = None,
144
144
  token_limit: Optional[int] = None,
@@ -150,7 +150,6 @@ class ChatAgent(BaseAgent):
150
150
  self.system_message = system_message
151
151
  self.role_name: str = system_message.role_name
152
152
  self.role_type: RoleType = system_message.role_type
153
- self._api_key = api_key
154
153
  self.model_backend: BaseModelBackend = (
155
154
  model
156
155
  if model is not None
@@ -158,7 +157,6 @@ class ChatAgent(BaseAgent):
158
157
  model_platform=ModelPlatformType.OPENAI,
159
158
  model_type=ModelType.GPT_4O_MINI,
160
159
  model_config_dict=ChatGPTConfig().as_dict(),
161
- api_key=self._api_key,
162
160
  )
163
161
  )
164
162
  self.output_language: Optional[str] = output_language
@@ -443,10 +441,22 @@ class ChatAgent(BaseAgent):
443
441
  for base_message_item in output_messages:
444
442
  base_message_item.content = str(info['tool_calls'][-1].result)
445
443
 
446
- return ChatAgentResponse(
444
+ chat_agent_response = ChatAgentResponse(
447
445
  msgs=output_messages, terminated=self.terminated, info=info
448
446
  )
449
447
 
448
+ # If the output result is single message, it will be
449
+ # automatically added to the memory.
450
+ if len(chat_agent_response.msgs) == 1:
451
+ self.record_message(chat_agent_response.msg)
452
+ else:
453
+ logger.warning(
454
+ "Multiple messages are available in `ChatAgentResponse`. "
455
+ "Please manually run the `record_message` function to "
456
+ "record the selected message."
457
+ )
458
+ return chat_agent_response
459
+
450
460
  async def step_async(
451
461
  self,
452
462
  input_message: BaseMessage,
@@ -569,10 +579,23 @@ class ChatAgent(BaseAgent):
569
579
  for base_message_item in output_messages:
570
580
  base_message_item.content = str(info['tool_calls'][0].result)
571
581
 
572
- return ChatAgentResponse(
582
+ chat_agent_response = ChatAgentResponse(
573
583
  msgs=output_messages, terminated=self.terminated, info=info
574
584
  )
575
585
 
586
+ # If the output result is single message, it will be
587
+ # automatically added to the memory.
588
+ if len(chat_agent_response.msgs) == 1:
589
+ self.record_message(chat_agent_response.msg)
590
+ else:
591
+ logger.warning(
592
+ "Multiple messages are presented in `chat_agent_response`. "
593
+ "Please manually call the `record_message` function to "
594
+ "record the chosen message."
595
+ )
596
+
597
+ return chat_agent_response
598
+
576
599
  def _add_tools_for_func_call(
577
600
  self,
578
601
  response: ChatCompletion,
@@ -742,7 +765,9 @@ class ChatAgent(BaseAgent):
742
765
  str(choice.finish_reason) for choice in response.choices
743
766
  ]
744
767
  usage = (
745
- response.usage.model_dump() if response.usage is not None else {}
768
+ self._safe_model_dump(response.usage)
769
+ if response.usage is not None
770
+ else {}
746
771
  )
747
772
  return (
748
773
  output_messages,
@@ -751,6 +776,16 @@ class ChatAgent(BaseAgent):
751
776
  response.id,
752
777
  )
753
778
 
779
+ def _safe_model_dump(self, obj):
780
+ # Check if the `model_dump` method exists (Pydantic v2)
781
+ if hasattr(obj, 'model_dump'):
782
+ return obj.model_dump()
783
+ # Fallback to `dict()` method (Pydantic v1)
784
+ elif hasattr(obj, 'dict'):
785
+ return obj.dict()
786
+ else:
787
+ raise TypeError("The object is not a Pydantic model")
788
+
754
789
  def handle_stream_response(
755
790
  self,
756
791
  response: Stream[ChatCompletionChunk],
@@ -123,7 +123,6 @@ class CriticAgent(ChatAgent):
123
123
  raise RuntimeError("Critic step failed.")
124
124
 
125
125
  critic_msg = critic_response.msg
126
- self.record_message(critic_msg)
127
126
  if self.verbose:
128
127
  print_text_animated(
129
128
  self.logger_color + "\n> Critic response: "
@@ -19,6 +19,9 @@ from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
19
19
  from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
20
20
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
21
21
  from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
22
+ from .reka_config import REKA_API_PARAMS, RekaConfig
23
+ from .samba_config import SAMBA_API_PARAMS, SambaConfig
24
+ from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
22
25
  from .vllm_config import VLLM_API_PARAMS, VLLMConfig
23
26
  from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
24
27
 
@@ -43,4 +46,10 @@ __all__ = [
43
46
  'VLLM_API_PARAMS',
44
47
  'MistralConfig',
45
48
  'MISTRAL_API_PARAMS',
49
+ 'RekaConfig',
50
+ 'REKA_API_PARAMS',
51
+ 'SambaConfig',
52
+ 'SAMBA_API_PARAMS',
53
+ 'TogetherAIConfig',
54
+ 'TOGETHERAI_API_PARAMS',
46
55
  ]
@@ -0,0 +1,74 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Any, Optional, Union
17
+
18
+ from camel.configs.base_config import BaseConfig
19
+
20
+
21
+ class RekaConfig(BaseConfig):
22
+ r"""Defines the parameters for generating chat completions using the
23
+ Reka API.
24
+
25
+ Reference: https://docs.reka.ai/api-reference/chat/create
26
+
27
+ Args:
28
+ temperature (Optional[float], optional): temperature the temperature
29
+ to use for sampling, e.g. 0.5.
30
+ top_p (Optional[float], optional): the cumulative probability of
31
+ tokens to generate, e.g. 0.9. Defaults to None.
32
+ top_k (Optional[int], optional): Parameter which forces the model to
33
+ only consider the tokens with the `top_k` highest probabilities at
34
+ the next step. Defaults to 1024.
35
+ max_tokens (Optional[int], optional): the maximum number of tokens to
36
+ generate, e.g. 100. Defaults to None.
37
+ stop (Optional[Union[str,list[str]]]): Stop generation if this token
38
+ is detected. Or if one of these tokens is detected when providing
39
+ a string list.
40
+ seed (Optional[int], optional): the random seed to use for sampling, e.
41
+ g. 42. Defaults to None.
42
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
43
+ :obj:`2.0`. Positive values penalize new tokens based on whether
44
+ they appear in the text so far, increasing the model's likelihood
45
+ to talk about new topics. See more information about frequency and
46
+ presence penalties. (default: :obj:`0.0`)
47
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
48
+ :obj:`2.0`. Positive values penalize new tokens based on their
49
+ existing frequency in the text so far, decreasing the model's
50
+ likelihood to repeat the same line verbatim. See more information
51
+ about frequency and presence penalties. (default: :obj:`0.0`)
52
+ use_search_engine (Optional[bool]): Whether to consider using search
53
+ engine to complete the request. Note that even if this is set to
54
+ `True`, the model might decide to not use search.
55
+ """
56
+
57
+ temperature: Optional[float] = None
58
+ top_p: Optional[float] = None
59
+ top_k: Optional[int] = None
60
+ max_tokens: Optional[int] = None
61
+ stop: Optional[Union[str, list[str]]] = None
62
+ seed: Optional[int] = None
63
+ frequency_penalty: float = 0.0
64
+ presence_penalty: float = 0.0
65
+ use_search_engine: Optional[bool] = False
66
+
67
+ def as_dict(self) -> dict[str, Any]:
68
+ config_dict = super().as_dict()
69
+ if "tools" in config_dict:
70
+ del config_dict["tools"] # Reka does not support tool calling
71
+ return config_dict
72
+
73
+
74
+ REKA_API_PARAMS = {param for param in RekaConfig().model_fields.keys()}
@@ -0,0 +1,50 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from camel.configs.base_config import BaseConfig
19
+
20
+
21
+ class SambaConfig(BaseConfig):
22
+ r"""Defines the parameters for generating chat completions using the
23
+ SambaNova API.
24
+
25
+ Args:
26
+ max_tokens (Optional[int], optional): the maximum number of tokens to
27
+ generate, e.g. 100. Defaults to `None`.
28
+ stop (Optional[Union[str,list[str]]]): Stop generation if this token
29
+ is detected. Or if one of these tokens is detected when providing
30
+ a string list. Defaults to `None`.
31
+ stream (Optional[bool]): If True, partial message deltas will be sent
32
+ as data-only server-sent events as they become available.
33
+ Currently SambaNova only support stream mode. Defaults to `True`.
34
+ stream_options (Optional[Dict]): Additional options for streaming.
35
+ Defaults to `{"include_usage": True}`.
36
+ """
37
+
38
+ max_tokens: Optional[int] = None
39
+ stop: Optional[Union[str, list[str]]] = None
40
+ stream: Optional[bool] = True
41
+ stream_options: Optional[Dict] = {"include_usage": True} # noqa: RUF012
42
+
43
+ def as_dict(self) -> dict[str, Any]:
44
+ config_dict = super().as_dict()
45
+ if "tools" in config_dict:
46
+ del config_dict["tools"] # SambaNova does not support tool calling
47
+ return config_dict
48
+
49
+
50
+ SAMBA_API_PARAMS = {param for param in SambaConfig().model_fields.keys()}
@@ -0,0 +1,107 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Any, Sequence, Union
17
+
18
+ from openai._types import NOT_GIVEN, NotGiven
19
+ from pydantic import Field
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+
24
+ class TogetherAIConfig(BaseConfig):
25
+ r"""Defines the parameters for generating chat completions using the
26
+ OpenAI API.
27
+
28
+ Args:
29
+ temperature (float, optional): Sampling temperature to use, between
30
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
31
+ while lower values make it more focused and deterministic.
32
+ (default: :obj:`0.2`)
33
+ top_p (float, optional): An alternative to sampling with temperature,
34
+ called nucleus sampling, where the model considers the results of
35
+ the tokens with top_p probability mass. So :obj:`0.1` means only
36
+ the tokens comprising the top 10% probability mass are considered.
37
+ (default: :obj:`1.0`)
38
+ n (int, optional): How many chat completion choices to generate for
39
+ each input message. (default: :obj:`1`)
40
+ response_format (object, optional): An object specifying the format
41
+ that the model must output. Compatible with GPT-4 Turbo and all
42
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
43
+ {"type": "json_object"} enables JSON mode, which guarantees the
44
+ message the model generates is valid JSON. Important: when using
45
+ JSON mode, you must also instruct the model to produce JSON
46
+ yourself via a system or user message. Without this, the model
47
+ may generate an unending stream of whitespace until the generation
48
+ reaches the token limit, resulting in a long-running and seemingly
49
+ "stuck" request. Also note that the message content may be
50
+ partially cut off if finish_reason="length", which indicates the
51
+ generation exceeded max_tokens or the conversation exceeded the
52
+ max context length.
53
+ stream (bool, optional): If True, partial message deltas will be sent
54
+ as data-only server-sent events as they become available.
55
+ (default: :obj:`False`)
56
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
57
+ will stop generating further tokens. (default: :obj:`None`)
58
+ max_tokens (int, optional): The maximum number of tokens to generate
59
+ in the chat completion. The total length of input tokens and
60
+ generated tokens is limited by the model's context length.
61
+ (default: :obj:`None`)
62
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
63
+ :obj:`2.0`. Positive values penalize new tokens based on whether
64
+ they appear in the text so far, increasing the model's likelihood
65
+ to talk about new topics. See more information about frequency and
66
+ presence penalties. (default: :obj:`0.0`)
67
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
68
+ :obj:`2.0`. Positive values penalize new tokens based on their
69
+ existing frequency in the text so far, decreasing the model's
70
+ likelihood to repeat the same line verbatim. See more information
71
+ about frequency and presence penalties. (default: :obj:`0.0`)
72
+ logit_bias (dict, optional): Modify the likelihood of specified tokens
73
+ appearing in the completion. Accepts a json object that maps tokens
74
+ (specified by their token ID in the tokenizer) to an associated
75
+ bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
76
+ is added to the logits generated by the model prior to sampling.
77
+ The exact effect will vary per model, but values between:obj:` -1`
78
+ and :obj:`1` should decrease or increase likelihood of selection;
79
+ values like :obj:`-100` or :obj:`100` should result in a ban or
80
+ exclusive selection of the relevant token. (default: :obj:`{}`)
81
+ user (str, optional): A unique identifier representing your end-user,
82
+ which can help OpenAI to monitor and detect abuse.
83
+ (default: :obj:`""`)
84
+ """
85
+
86
+ temperature: float = 0.2 # openai default: 1.0
87
+ top_p: float = 1.0
88
+ n: int = 1
89
+ stream: bool = False
90
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
91
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
92
+ presence_penalty: float = 0.0
93
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
94
+ frequency_penalty: float = 0.0
95
+ logit_bias: dict = Field(default_factory=dict)
96
+ user: str = ""
97
+
98
+ def as_dict(self) -> dict[str, Any]:
99
+ config_dict = super().as_dict()
100
+ if "tools" in config_dict:
101
+ del config_dict["tools"] # Currently does not support tool calling
102
+ return config_dict
103
+
104
+
105
+ TOGETHERAI_API_PARAMS = {
106
+ param for param in TogetherAIConfig.model_fields.keys()
107
+ }
@@ -25,7 +25,10 @@ from .open_source_model import OpenSourceModel
25
25
  from .openai_audio_models import OpenAIAudioModels
26
26
  from .openai_compatibility_model import OpenAICompatibilityModel
27
27
  from .openai_model import OpenAIModel
28
+ from .reka_model import RekaModel
29
+ from .samba_model import SambaModel
28
30
  from .stub_model import StubModel
31
+ from .togetherai_model import TogetherAIModel
29
32
  from .vllm_model import VLLMModel
30
33
  from .zhipuai_model import ZhipuAIModel
31
34
 
@@ -47,4 +50,7 @@ __all__ = [
47
50
  'VLLMModel',
48
51
  'GeminiModel',
49
52
  'OpenAICompatibilityModel',
53
+ 'RekaModel',
54
+ 'SambaModel',
55
+ 'TogetherAIModel',
50
56
  ]
@@ -51,21 +51,21 @@ class GroqModel(BaseModelBackend):
51
51
  api_key (Optional[str]): The API key for authenticating with the
52
52
  Groq service. (default: :obj:`None`).
53
53
  url (Optional[str]): The url to the Groq service. (default:
54
- :obj:`None`)
54
+ :obj:`"https://api.groq.com/openai/v1"`)
55
55
  token_counter (Optional[BaseTokenCounter]): Token counter to use
56
56
  for the model. If not provided, `OpenAITokenCounter(ModelType.
57
- GPT_3_5_TURBO)` will be used.
57
+ GPT_4O_MINI)` will be used.
58
58
  """
59
59
  super().__init__(
60
60
  model_type, model_config_dict, api_key, url, token_counter
61
61
  )
62
- self._url = url or "https://api.groq.com/openai/v1"
62
+ self._url = url or os.environ.get("GROQ_API_BASE_URL")
63
63
  self._api_key = api_key or os.environ.get("GROQ_API_KEY")
64
64
  self._client = OpenAI(
65
65
  timeout=60,
66
66
  max_retries=3,
67
67
  api_key=self._api_key,
68
- base_url=self._url,
68
+ base_url=self._url or "https://api.groq.com/openai/v1",
69
69
  )
70
70
  self._token_counter = token_counter
71
71
 
@@ -80,7 +80,7 @@ class GroqModel(BaseModelBackend):
80
80
  # Make sure you have the access to these open-source model in
81
81
  # HuggingFace
82
82
  if not self._token_counter:
83
- self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
83
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
84
84
  return self._token_counter
85
85
 
86
86
  @api_keys_required("GROQ_API_KEY")
@@ -147,7 +147,7 @@ class LiteLLMModel:
147
147
 
148
148
  @property
149
149
  def token_limit(self) -> int:
150
- """Returns the maximum token limit for the given model.
150
+ r"""Returns the maximum token limit for the given model.
151
151
 
152
152
  Returns:
153
153
  int: The maximum token limit for the given model.
@@ -24,7 +24,10 @@ from camel.models.ollama_model import OllamaModel
24
24
  from camel.models.open_source_model import OpenSourceModel
25
25
  from camel.models.openai_compatibility_model import OpenAICompatibilityModel
26
26
  from camel.models.openai_model import OpenAIModel
27
+ from camel.models.reka_model import RekaModel
28
+ from camel.models.samba_model import SambaModel
27
29
  from camel.models.stub_model import StubModel
30
+ from camel.models.togetherai_model import TogetherAIModel
28
31
  from camel.models.vllm_model import VLLMModel
29
32
  from camel.models.zhipuai_model import ZhipuAIModel
30
33
  from camel.types import ModelPlatformType, ModelType
@@ -91,6 +94,10 @@ class ModelFactory:
91
94
  model_class = GeminiModel
92
95
  elif model_platform.is_mistral and model_type.is_mistral:
93
96
  model_class = MistralModel
97
+ elif model_platform.is_reka and model_type.is_reka:
98
+ model_class = RekaModel
99
+ elif model_platform.is_samba and model_type.is_samba:
100
+ model_class = SambaModel
94
101
  elif model_type == ModelType.STUB:
95
102
  model_class = StubModel
96
103
  else:
@@ -110,6 +117,11 @@ class ModelFactory:
110
117
  model_class = LiteLLMModel
111
118
  elif model_platform.is_openai_compatibility_model:
112
119
  model_class = OpenAICompatibilityModel
120
+ elif model_platform.is_together:
121
+ model_class = TogetherAIModel
122
+ return model_class(
123
+ model_type, model_config_dict, api_key, token_counter
124
+ )
113
125
  else:
114
126
  raise ValueError(
115
127
  f"Unknown pair of model platform `{model_platform}` "