camel-ai 0.1.6.6__tar.gz → 0.1.6.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (193) hide show
  1. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/PKG-INFO +2 -2
  2. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/README.md +1 -1
  3. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/__init__.py +1 -1
  4. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/chat_agent.py +0 -6
  5. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/__init__.py +6 -0
  6. camel_ai-0.1.6.7/camel/configs/samba_config.py +50 -0
  7. camel_ai-0.1.6.7/camel/configs/togetherai_config.py +107 -0
  8. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/__init__.py +4 -0
  9. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/groq_model.py +5 -5
  10. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/litellm_model.py +1 -1
  11. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/model_factory.py +9 -0
  12. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/ollama_model.py +6 -4
  13. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/openai_compatibility_model.py +3 -3
  14. camel_ai-0.1.6.7/camel/models/samba_model.py +291 -0
  15. camel_ai-0.1.6.7/camel/models/togetherai_model.py +148 -0
  16. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/vllm_model.py +7 -5
  17. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/zhipuai_model.py +2 -2
  18. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/retrievers/auto_retriever.py +2 -27
  19. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/__init__.py +3 -0
  20. camel_ai-0.1.6.7/camel/toolkits/linkedin_toolkit.py +230 -0
  21. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/types/enums.py +31 -5
  22. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/utils/__init__.py +2 -0
  23. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/utils/commons.py +22 -0
  24. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/pyproject.toml +1 -1
  25. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/__init__.py +0 -0
  26. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/base.py +0 -0
  27. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/critic_agent.py +0 -0
  28. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/deductive_reasoner_agent.py +0 -0
  29. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/embodied_agent.py +0 -0
  30. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/knowledge_graph_agent.py +0 -0
  31. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/role_assignment_agent.py +0 -0
  32. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/search_agent.py +0 -0
  33. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/task_agent.py +0 -0
  34. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/tool_agents/__init__.py +0 -0
  35. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/tool_agents/base.py +0 -0
  36. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
  37. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/anthropic_config.py +0 -0
  38. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/base_config.py +0 -0
  39. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/gemini_config.py +0 -0
  40. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/groq_config.py +0 -0
  41. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/litellm_config.py +0 -0
  42. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/mistral_config.py +0 -0
  43. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/ollama_config.py +0 -0
  44. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/openai_config.py +0 -0
  45. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/vllm_config.py +0 -0
  46. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/configs/zhipuai_config.py +0 -0
  47. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/embeddings/__init__.py +0 -0
  48. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/embeddings/base.py +0 -0
  49. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/embeddings/mistral_embedding.py +0 -0
  50. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/embeddings/openai_embedding.py +0 -0
  51. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
  52. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/embeddings/vlm_embedding.py +0 -0
  53. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/generators.py +0 -0
  54. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/human.py +0 -0
  55. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/interpreters/__init__.py +0 -0
  56. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/interpreters/base.py +0 -0
  57. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/interpreters/docker_interpreter.py +0 -0
  58. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/interpreters/internal_python_interpreter.py +0 -0
  59. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/interpreters/interpreter_error.py +0 -0
  60. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/interpreters/ipython_interpreter.py +0 -0
  61. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/interpreters/subprocess_interpreter.py +0 -0
  62. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/loaders/__init__.py +0 -0
  63. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/loaders/base_io.py +0 -0
  64. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/loaders/firecrawl_reader.py +0 -0
  65. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/loaders/jina_url_reader.py +0 -0
  66. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/loaders/unstructured_io.py +0 -0
  67. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/__init__.py +0 -0
  68. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/agent_memories.py +0 -0
  69. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/base.py +0 -0
  70. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/blocks/__init__.py +0 -0
  71. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/blocks/chat_history_block.py +0 -0
  72. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/blocks/vectordb_block.py +0 -0
  73. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/context_creators/__init__.py +0 -0
  74. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/context_creators/score_based.py +0 -0
  75. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/memories/records.py +0 -0
  76. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/messages/__init__.py +0 -0
  77. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/messages/base.py +0 -0
  78. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/messages/func_message.py +0 -0
  79. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/anthropic_model.py +0 -0
  80. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/azure_openai_model.py +0 -0
  81. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/base_model.py +0 -0
  82. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/gemini_model.py +0 -0
  83. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/mistral_model.py +0 -0
  84. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/nemotron_model.py +0 -0
  85. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/open_source_model.py +0 -0
  86. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/openai_audio_models.py +0 -0
  87. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/openai_model.py +0 -0
  88. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/models/stub_model.py +0 -0
  89. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/__init__.py +0 -0
  90. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/ai_society.py +0 -0
  91. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/base.py +0 -0
  92. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/code.py +0 -0
  93. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/evaluation.py +0 -0
  94. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/generate_text_embedding_data.py +0 -0
  95. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/image_craft.py +0 -0
  96. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/misalignment.py +0 -0
  97. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/multi_condition_image_craft.py +0 -0
  98. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/object_recognition.py +0 -0
  99. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/prompt_templates.py +0 -0
  100. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/role_description_prompt_template.py +0 -0
  101. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/solution_extraction.py +0 -0
  102. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/task_prompt_template.py +0 -0
  103. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/translation.py +0 -0
  104. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/prompts/video_description_prompt.py +0 -0
  105. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/responses/__init__.py +0 -0
  106. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/responses/agent_responses.py +0 -0
  107. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/retrievers/__init__.py +0 -0
  108. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/retrievers/base.py +0 -0
  109. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/retrievers/bm25_retriever.py +0 -0
  110. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/retrievers/cohere_rerank_retriever.py +0 -0
  111. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/retrievers/vector_retriever.py +0 -0
  112. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/societies/__init__.py +0 -0
  113. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/societies/babyagi_playing.py +0 -0
  114. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/societies/role_playing.py +0 -0
  115. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/__init__.py +0 -0
  116. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/graph_storages/__init__.py +0 -0
  117. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/graph_storages/base.py +0 -0
  118. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/graph_storages/graph_element.py +0 -0
  119. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/graph_storages/neo4j_graph.py +0 -0
  120. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/key_value_storages/__init__.py +0 -0
  121. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/key_value_storages/base.py +0 -0
  122. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/key_value_storages/in_memory.py +0 -0
  123. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/key_value_storages/json.py +0 -0
  124. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/key_value_storages/redis.py +0 -0
  125. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/object_storages/__init__.py +0 -0
  126. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/object_storages/amazon_s3.py +0 -0
  127. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/object_storages/azure_blob.py +0 -0
  128. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/object_storages/base.py +0 -0
  129. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/object_storages/google_cloud.py +0 -0
  130. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/vectordb_storages/__init__.py +0 -0
  131. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/vectordb_storages/base.py +0 -0
  132. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/vectordb_storages/milvus.py +0 -0
  133. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/storages/vectordb_storages/qdrant.py +0 -0
  134. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/tasks/__init__.py +0 -0
  135. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/tasks/task.py +0 -0
  136. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/tasks/task_prompt.py +0 -0
  137. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/terminators/__init__.py +0 -0
  138. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/terminators/base.py +0 -0
  139. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/terminators/response_terminator.py +0 -0
  140. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/terminators/token_limit_terminator.py +0 -0
  141. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/base.py +0 -0
  142. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/code_execution.py +0 -0
  143. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/dalle_toolkit.py +0 -0
  144. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/github_toolkit.py +0 -0
  145. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/google_maps_toolkit.py +0 -0
  146. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/math_toolkit.py +0 -0
  147. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/biztoc/__init__.py +0 -0
  148. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/biztoc/ai-plugin.json +0 -0
  149. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/biztoc/openapi.yaml +0 -0
  150. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/coursera/__init__.py +0 -0
  151. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/coursera/openapi.yaml +0 -0
  152. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/create_qr_code/__init__.py +0 -0
  153. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/create_qr_code/openapi.yaml +0 -0
  154. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/klarna/__init__.py +0 -0
  155. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/klarna/openapi.yaml +0 -0
  156. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/nasa_apod/__init__.py +0 -0
  157. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/nasa_apod/openapi.yaml +0 -0
  158. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/outschool/__init__.py +0 -0
  159. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/outschool/ai-plugin.json +0 -0
  160. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/outschool/openapi.yaml +0 -0
  161. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/outschool/paths/__init__.py +0 -0
  162. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/outschool/paths/get_classes.py +0 -0
  163. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +0 -0
  164. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/security_config.py +0 -0
  165. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/speak/__init__.py +0 -0
  166. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/speak/openapi.yaml +0 -0
  167. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/web_scraper/__init__.py +0 -0
  168. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +0 -0
  169. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/web_scraper/openapi.yaml +0 -0
  170. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +0 -0
  171. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +0 -0
  172. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/open_api_toolkit.py +0 -0
  173. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/openai_function.py +0 -0
  174. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/retrieval_toolkit.py +0 -0
  175. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/search_toolkit.py +0 -0
  176. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/slack_toolkit.py +0 -0
  177. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/twitter_toolkit.py +0 -0
  178. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/toolkits/weather_toolkit.py +0 -0
  179. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/types/__init__.py +0 -0
  180. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/types/openai_types.py +0 -0
  181. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/utils/async_func.py +0 -0
  182. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/utils/constants.py +0 -0
  183. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/utils/token_counting.py +0 -0
  184. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/__init__.py +0 -0
  185. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/base.py +0 -0
  186. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/manager_node.py +0 -0
  187. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/role_playing_node.py +0 -0
  188. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/single_agent_node.py +0 -0
  189. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/task_channel.py +0 -0
  190. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/utils.py +0 -0
  191. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/worker_node.py +0 -0
  192. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/workforce.py +0 -0
  193. {camel_ai-0.1.6.6 → camel_ai-0.1.6.7}/camel/workforce/workforce_prompt.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.6.6
3
+ Version: 0.1.6.7
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -202,7 +202,7 @@ conda create --name camel python=3.9
202
202
  conda activate camel
203
203
 
204
204
  # Clone github repo
205
- git clone -b v0.1.6.6 https://github.com/camel-ai/camel.git
205
+ git clone -b v0.1.6.7 https://github.com/camel-ai/camel.git
206
206
 
207
207
  # Change directory into project directory
208
208
  cd camel
@@ -110,7 +110,7 @@ conda create --name camel python=3.9
110
110
  conda activate camel
111
111
 
112
112
  # Clone github repo
113
- git clone -b v0.1.6.6 https://github.com/camel-ai/camel.git
113
+ git clone -b v0.1.6.7 https://github.com/camel-ai/camel.git
114
114
 
115
115
  # Change directory into project directory
116
116
  cd camel
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.6.6'
15
+ __version__ = '0.1.6.7'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -112,9 +112,6 @@ class ChatAgent(BaseAgent):
112
112
  model (BaseModelBackend, optional): The model backend to use for
113
113
  generating responses. (default: :obj:`OpenAIModel` with
114
114
  `GPT_4O_MINI`)
115
- api_key (str, optional): The API key for authenticating with the
116
- LLM service. Only OpenAI and Anthropic model supported (default:
117
- :obj:`None`)
118
115
  memory (AgentMemory, optional): The agent memory for managing chat
119
116
  messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
120
117
  (default: :obj:`None`)
@@ -138,7 +135,6 @@ class ChatAgent(BaseAgent):
138
135
  self,
139
136
  system_message: BaseMessage,
140
137
  model: Optional[BaseModelBackend] = None,
141
- api_key: Optional[str] = None,
142
138
  memory: Optional[AgentMemory] = None,
143
139
  message_window_size: Optional[int] = None,
144
140
  token_limit: Optional[int] = None,
@@ -150,7 +146,6 @@ class ChatAgent(BaseAgent):
150
146
  self.system_message = system_message
151
147
  self.role_name: str = system_message.role_name
152
148
  self.role_type: RoleType = system_message.role_type
153
- self._api_key = api_key
154
149
  self.model_backend: BaseModelBackend = (
155
150
  model
156
151
  if model is not None
@@ -158,7 +153,6 @@ class ChatAgent(BaseAgent):
158
153
  model_platform=ModelPlatformType.OPENAI,
159
154
  model_type=ModelType.GPT_4O_MINI,
160
155
  model_config_dict=ChatGPTConfig().as_dict(),
161
- api_key=self._api_key,
162
156
  )
163
157
  )
164
158
  self.output_language: Optional[str] = output_language
@@ -19,6 +19,8 @@ from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
19
19
  from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
20
20
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
21
21
  from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
22
+ from .samba_config import SAMBA_API_PARAMS, SambaConfig
23
+ from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
22
24
  from .vllm_config import VLLM_API_PARAMS, VLLMConfig
23
25
  from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
24
26
 
@@ -43,4 +45,8 @@ __all__ = [
43
45
  'VLLM_API_PARAMS',
44
46
  'MistralConfig',
45
47
  'MISTRAL_API_PARAMS',
48
+ 'SambaConfig',
49
+ 'SAMBA_API_PARAMS',
50
+ 'TogetherAIConfig',
51
+ 'TOGETHERAI_API_PARAMS',
46
52
  ]
@@ -0,0 +1,50 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from camel.configs.base_config import BaseConfig
19
+
20
+
21
+ class SambaConfig(BaseConfig):
22
+ r"""Defines the parameters for generating chat completions using the
23
+ SambaNova API.
24
+
25
+ Args:
26
+ max_tokens (Optional[int], optional): the maximum number of tokens to
27
+ generate, e.g. 100. Defaults to `None`.
28
+ stop (Optional[Union[str,list[str]]]): Stop generation if this token
29
+ is detected. Or if one of these tokens is detected when providing
30
+ a string list. Defaults to `None`.
31
+ stream (Optional[bool]): If True, partial message deltas will be sent
32
+ as data-only server-sent events as they become available.
33
+ Currently SambaNova only support stream mode. Defaults to `True`.
34
+ stream_options (Optional[Dict]): Additional options for streaming.
35
+ Defaults to `{"include_usage": True}`.
36
+ """
37
+
38
+ max_tokens: Optional[int] = None
39
+ stop: Optional[Union[str, list[str]]] = None
40
+ stream: Optional[bool] = True
41
+ stream_options: Optional[Dict] = {"include_usage": True} # noqa: RUF012
42
+
43
+ def as_dict(self) -> dict[str, Any]:
44
+ config_dict = super().as_dict()
45
+ if "tools" in config_dict:
46
+ del config_dict["tools"] # SambaNova does not support tool calling
47
+ return config_dict
48
+
49
+
50
+ SAMBA_API_PARAMS = {param for param in SambaConfig().model_fields.keys()}
@@ -0,0 +1,107 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Any, Sequence, Union
17
+
18
+ from openai._types import NOT_GIVEN, NotGiven
19
+ from pydantic import Field
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+
24
+ class TogetherAIConfig(BaseConfig):
25
+ r"""Defines the parameters for generating chat completions using the
26
+ OpenAI API.
27
+
28
+ Args:
29
+ temperature (float, optional): Sampling temperature to use, between
30
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
31
+ while lower values make it more focused and deterministic.
32
+ (default: :obj:`0.2`)
33
+ top_p (float, optional): An alternative to sampling with temperature,
34
+ called nucleus sampling, where the model considers the results of
35
+ the tokens with top_p probability mass. So :obj:`0.1` means only
36
+ the tokens comprising the top 10% probability mass are considered.
37
+ (default: :obj:`1.0`)
38
+ n (int, optional): How many chat completion choices to generate for
39
+ each input message. (default: :obj:`1`)
40
+ response_format (object, optional): An object specifying the format
41
+ that the model must output. Compatible with GPT-4 Turbo and all
42
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
43
+ {"type": "json_object"} enables JSON mode, which guarantees the
44
+ message the model generates is valid JSON. Important: when using
45
+ JSON mode, you must also instruct the model to produce JSON
46
+ yourself via a system or user message. Without this, the model
47
+ may generate an unending stream of whitespace until the generation
48
+ reaches the token limit, resulting in a long-running and seemingly
49
+ "stuck" request. Also note that the message content may be
50
+ partially cut off if finish_reason="length", which indicates the
51
+ generation exceeded max_tokens or the conversation exceeded the
52
+ max context length.
53
+ stream (bool, optional): If True, partial message deltas will be sent
54
+ as data-only server-sent events as they become available.
55
+ (default: :obj:`False`)
56
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
57
+ will stop generating further tokens. (default: :obj:`None`)
58
+ max_tokens (int, optional): The maximum number of tokens to generate
59
+ in the chat completion. The total length of input tokens and
60
+ generated tokens is limited by the model's context length.
61
+ (default: :obj:`None`)
62
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
63
+ :obj:`2.0`. Positive values penalize new tokens based on whether
64
+ they appear in the text so far, increasing the model's likelihood
65
+ to talk about new topics. See more information about frequency and
66
+ presence penalties. (default: :obj:`0.0`)
67
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
68
+ :obj:`2.0`. Positive values penalize new tokens based on their
69
+ existing frequency in the text so far, decreasing the model's
70
+ likelihood to repeat the same line verbatim. See more information
71
+ about frequency and presence penalties. (default: :obj:`0.0`)
72
+ logit_bias (dict, optional): Modify the likelihood of specified tokens
73
+ appearing in the completion. Accepts a json object that maps tokens
74
+ (specified by their token ID in the tokenizer) to an associated
75
+ bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
76
+ is added to the logits generated by the model prior to sampling.
77
+ The exact effect will vary per model, but values between:obj:` -1`
78
+ and :obj:`1` should decrease or increase likelihood of selection;
79
+ values like :obj:`-100` or :obj:`100` should result in a ban or
80
+ exclusive selection of the relevant token. (default: :obj:`{}`)
81
+ user (str, optional): A unique identifier representing your end-user,
82
+ which can help OpenAI to monitor and detect abuse.
83
+ (default: :obj:`""`)
84
+ """
85
+
86
+ temperature: float = 0.2 # openai default: 1.0
87
+ top_p: float = 1.0
88
+ n: int = 1
89
+ stream: bool = False
90
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
91
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
92
+ presence_penalty: float = 0.0
93
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
94
+ frequency_penalty: float = 0.0
95
+ logit_bias: dict = Field(default_factory=dict)
96
+ user: str = ""
97
+
98
+ def as_dict(self) -> dict[str, Any]:
99
+ config_dict = super().as_dict()
100
+ if "tools" in config_dict:
101
+ del config_dict["tools"] # Currently does not support tool calling
102
+ return config_dict
103
+
104
+
105
+ TOGETHERAI_API_PARAMS = {
106
+ param for param in TogetherAIConfig.model_fields.keys()
107
+ }
@@ -25,7 +25,9 @@ from .open_source_model import OpenSourceModel
25
25
  from .openai_audio_models import OpenAIAudioModels
26
26
  from .openai_compatibility_model import OpenAICompatibilityModel
27
27
  from .openai_model import OpenAIModel
28
+ from .samba_model import SambaModel
28
29
  from .stub_model import StubModel
30
+ from .togetherai_model import TogetherAIModel
29
31
  from .vllm_model import VLLMModel
30
32
  from .zhipuai_model import ZhipuAIModel
31
33
 
@@ -47,4 +49,6 @@ __all__ = [
47
49
  'VLLMModel',
48
50
  'GeminiModel',
49
51
  'OpenAICompatibilityModel',
52
+ 'SambaModel',
53
+ 'TogetherAIModel',
50
54
  ]
@@ -51,21 +51,21 @@ class GroqModel(BaseModelBackend):
51
51
  api_key (Optional[str]): The API key for authenticating with the
52
52
  Groq service. (default: :obj:`None`).
53
53
  url (Optional[str]): The url to the Groq service. (default:
54
- :obj:`None`)
54
+ :obj:`"https://api.groq.com/openai/v1"`)
55
55
  token_counter (Optional[BaseTokenCounter]): Token counter to use
56
56
  for the model. If not provided, `OpenAITokenCounter(ModelType.
57
- GPT_3_5_TURBO)` will be used.
57
+ GPT_4O_MINI)` will be used.
58
58
  """
59
59
  super().__init__(
60
60
  model_type, model_config_dict, api_key, url, token_counter
61
61
  )
62
- self._url = url or "https://api.groq.com/openai/v1"
62
+ self._url = url or os.environ.get("GROQ_API_BASE_URL")
63
63
  self._api_key = api_key or os.environ.get("GROQ_API_KEY")
64
64
  self._client = OpenAI(
65
65
  timeout=60,
66
66
  max_retries=3,
67
67
  api_key=self._api_key,
68
- base_url=self._url,
68
+ base_url=self._url or "https://api.groq.com/openai/v1",
69
69
  )
70
70
  self._token_counter = token_counter
71
71
 
@@ -80,7 +80,7 @@ class GroqModel(BaseModelBackend):
80
80
  # Make sure you have the access to these open-source model in
81
81
  # HuggingFace
82
82
  if not self._token_counter:
83
- self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
83
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
84
84
  return self._token_counter
85
85
 
86
86
  @api_keys_required("GROQ_API_KEY")
@@ -147,7 +147,7 @@ class LiteLLMModel:
147
147
 
148
148
  @property
149
149
  def token_limit(self) -> int:
150
- """Returns the maximum token limit for the given model.
150
+ r"""Returns the maximum token limit for the given model.
151
151
 
152
152
  Returns:
153
153
  int: The maximum token limit for the given model.
@@ -24,7 +24,9 @@ from camel.models.ollama_model import OllamaModel
24
24
  from camel.models.open_source_model import OpenSourceModel
25
25
  from camel.models.openai_compatibility_model import OpenAICompatibilityModel
26
26
  from camel.models.openai_model import OpenAIModel
27
+ from camel.models.samba_model import SambaModel
27
28
  from camel.models.stub_model import StubModel
29
+ from camel.models.togetherai_model import TogetherAIModel
28
30
  from camel.models.vllm_model import VLLMModel
29
31
  from camel.models.zhipuai_model import ZhipuAIModel
30
32
  from camel.types import ModelPlatformType, ModelType
@@ -91,6 +93,8 @@ class ModelFactory:
91
93
  model_class = GeminiModel
92
94
  elif model_platform.is_mistral and model_type.is_mistral:
93
95
  model_class = MistralModel
96
+ elif model_platform.is_samba and model_type.is_samba:
97
+ model_class = SambaModel
94
98
  elif model_type == ModelType.STUB:
95
99
  model_class = StubModel
96
100
  else:
@@ -110,6 +114,11 @@ class ModelFactory:
110
114
  model_class = LiteLLMModel
111
115
  elif model_platform.is_openai_compatibility_model:
112
116
  model_class = OpenAICompatibilityModel
117
+ elif model_platform.is_together:
118
+ model_class = TogetherAIModel
119
+ return model_class(
120
+ model_type, model_config_dict, api_key, token_counter
121
+ )
113
122
  else:
114
123
  raise ValueError(
115
124
  f"Unknown pair of model platform `{model_platform}` "
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
14
15
  from typing import Any, Dict, List, Optional, Union
15
16
 
16
17
  from openai import OpenAI, Stream
@@ -40,18 +41,19 @@ class OllamaModel:
40
41
  model_config_dict (Dict[str, Any]): A dictionary that will
41
42
  be fed into openai.ChatCompletion.create().
42
43
  url (Optional[str]): The url to the model service. (default:
43
- :obj:`None`)
44
+ :obj:`"http://localhost:11434/v1"`)
44
45
  token_counter (Optional[BaseTokenCounter]): Token counter to use
45
46
  for the model. If not provided, `OpenAITokenCounter(ModelType.
46
- GPT_3_5_TURBO)` will be used.
47
+ GPT_4O_MINI)` will be used.
47
48
  """
48
49
  self.model_type = model_type
49
50
  self.model_config_dict = model_config_dict
51
+ self._url = url or os.environ.get("OLLAMA_BASE_URL")
50
52
  # Use OpenAI cilent as interface call Ollama
51
53
  self._client = OpenAI(
52
54
  timeout=60,
53
55
  max_retries=3,
54
- base_url=url,
56
+ base_url=self._url or "http://localhost:11434/v1",
55
57
  api_key="ollama", # required but ignored
56
58
  )
57
59
  self._token_counter = token_counter
@@ -66,7 +68,7 @@ class OllamaModel:
66
68
  tokenization style.
67
69
  """
68
70
  if not self._token_counter:
69
- self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
71
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
70
72
  return self._token_counter
71
73
 
72
74
  def check_model_config(self):
@@ -38,7 +38,7 @@ class OpenAICompatibilityModel:
38
38
  r"""Constructor for model backend.
39
39
 
40
40
  Args:
41
- model_type (ModelType): Model for which a backend is created.
41
+ model_type (str): Model for which a backend is created.
42
42
  model_config_dict (Dict[str, Any]): A dictionary that will
43
43
  be fed into openai.ChatCompletion.create().
44
44
  api_key (str): The API key for authenticating with the
@@ -47,7 +47,7 @@ class OpenAICompatibilityModel:
47
47
  :obj:`None`)
48
48
  token_counter (Optional[BaseTokenCounter]): Token counter to use
49
49
  for the model. If not provided, `OpenAITokenCounter(ModelType.
50
- GPT_3_5_TURBO)` will be used.
50
+ GPT_4O_MINI)` will be used.
51
51
  """
52
52
  self.model_type = model_type
53
53
  self.model_config_dict = model_config_dict
@@ -91,7 +91,7 @@ class OpenAICompatibilityModel:
91
91
  """
92
92
 
93
93
  if not self._token_counter:
94
- self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
94
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
95
95
  return self._token_counter
96
96
 
97
97
  @property