camel-ai 0.2.43__tar.gz → 0.2.45__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.2.43 → camel_ai-0.2.45}/PKG-INFO +5 -5
- {camel_ai-0.2.43 → camel_ai-0.2.45}/README.md +4 -4
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/__init__.py +1 -1
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datasets/few_shot_generator.py +1 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/environments/single_step.py +14 -2
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/extractors/python_strategies.py +14 -5
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/cohere_model.py +32 -4
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/litellm_model.py +11 -4
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/mistral_model.py +14 -2
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/reka_model.py +11 -3
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/togetherai_model.py +106 -31
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/volcano_model.py +7 -2
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/__init__.py +2 -0
- camel_ai-0.2.45/camel/runtime/ubuntu_docker_runtime.py +340 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/audio_analysis_toolkit.py +21 -17
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/browser_toolkit.py +6 -1
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/dalle_toolkit.py +15 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/excel_toolkit.py +14 -1
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/image_analysis_toolkit.py +9 -1
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/mcp_toolkit.py +2 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/networkx_toolkit.py +5 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/openai_agent_toolkit.py +5 -1
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/searxng_toolkit.py +7 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/slack_toolkit.py +15 -2
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/video_analysis_toolkit.py +5 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/weather_toolkit.py +14 -1
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/zapier_toolkit.py +6 -2
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/types/enums.py +16 -1
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/verifiers/base.py +14 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/pyproject.toml +1 -1
- {camel_ai-0.2.43 → camel_ai-0.2.45}/.gitignore +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/LICENSE +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/_types.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/_utils.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/chat_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/critic_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/deductive_reasoner_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/embodied_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/knowledge_graph_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/multi_hop_generator_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/programmed_agent_instruction.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/repo_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/role_assignment_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/search_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/task_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/benchmarks/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/benchmarks/apibank.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/benchmarks/apibench.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/benchmarks/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/benchmarks/gaia.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/benchmarks/nexus.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/benchmarks/ragbench.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/discord/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/discord/discord_app.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/discord/discord_installation.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/discord/discord_store.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/slack/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/slack/models.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/slack/slack_app.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/bots/telegram_bot.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/aiml_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/anthropic_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/base_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/cohere_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/deepseek_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/gemini_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/groq_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/internlm_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/litellm_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/mistral_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/modelscope_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/moonshot_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/nvidia_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/ollama_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/openai_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/openrouter_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/ppio_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/qwen_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/reka_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/samba_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/sglang_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/siliconflow_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/togetherai_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/vllm_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/yi_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/configs/zhipuai_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/data_collector/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/data_collector/alpaca_collector.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/data_collector/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/data_collector/sharegpt_collector.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/cot_datagen.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/evol_instruct/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/evol_instruct/evol_instruct.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/evol_instruct/scorer.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/evol_instruct/templates.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/self_improving_cot.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/self_instruct/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/self_instruct/filter/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/self_instruct/filter/filter_function.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/self_instruct/filter/filter_registry.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/self_instruct/filter/instruction_filter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/self_instruct/self_instruct.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/self_instruct/templates.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/source2synth/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/source2synth/data_processor.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/source2synth/models.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datagen/source2synth/user_data_processor_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datahubs/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datahubs/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datahubs/huggingface.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datahubs/models.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datasets/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datasets/base_generator.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datasets/models.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datasets/self_instruct_generator.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/datasets/static_dataset.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/azure_embedding.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/jina_embedding.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/mistral_embedding.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/openai_compatible_embedding.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/openai_embedding.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/together_embedding.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/environments/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/environments/models.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/environments/multi_step.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/environments/tic_tac_toe.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/extractors/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/extractors/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/generators.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/human.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/docker/Dockerfile +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/docker_interpreter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/e2b_interpreter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/ipython_interpreter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/apify_reader.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/base_io.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/chunkr_reader.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/crawl4ai_reader.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/firecrawl_reader.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/jina_url_reader.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/mineru_extractor.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/pandas_reader.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/loaders/unstructured_io.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/logger.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/agent_memories.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/blocks/chat_history_block.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/blocks/vectordb_block.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/context_creators/score_based.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/memories/records.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/conversion/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/conversion/alpaca.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/conversion/conversation_models.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/conversion/sharegpt/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/conversion/sharegpt/function_call_formatter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/conversion/sharegpt/hermes/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/messages/func_message.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/_utils.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/aiml_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/anthropic_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/azure_openai_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/base_audio_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/base_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/deepseek_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/fish_audio_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/gemini_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/groq_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/internlm_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/model_factory.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/model_manager.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/modelscope_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/moonshot_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/nemotron_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/nvidia_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/ollama_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/openai_audio_models.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/openai_compatible_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/openai_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/openrouter_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/ppio_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/qwen_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/reward/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/reward/base_reward_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/reward/evaluator.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/reward/nemotron_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/reward/skywork_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/samba_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/sglang_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/siliconflow_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/stub_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/vllm_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/yi_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/models/zhipuai_model.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/personas/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/personas/persona.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/personas/persona_hub.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/code.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/generate_text_embedding_data.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/image_craft.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/multi_condition_image_craft.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/persona_hub.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/task_prompt_template.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/translation.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/prompts/video_description_prompt.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/py.typed +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/responses/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/responses/agent_responses.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/retrievers/auto_retriever.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/retrievers/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/retrievers/bm25_retriever.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/retrievers/cohere_rerank_retriever.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/retrievers/hybrid_retrival.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/retrievers/vector_retriever.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/api.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/configs.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/docker_runtime.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/llm_guard_runtime.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/remote_http_runtime.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/utils/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/utils/function_risk_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/runtime/utils/ignore_risk_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/schemas/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/schemas/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/schemas/openai_converter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/schemas/outlines_converter.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/babyagi_playing.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/role_playing.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/prompts.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/role_playing_worker.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/single_agent_worker.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/task_channel.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/utils.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/worker.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/societies/workforce/workforce.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/graph_storages/graph_element.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/graph_storages/nebula_graph.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/graph_storages/neo4j_graph.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/key_value_storages/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/key_value_storages/json.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/key_value_storages/mem0_cloud.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/key_value_storages/redis.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/object_storages/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/object_storages/amazon_s3.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/object_storages/azure_blob.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/object_storages/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/object_storages/google_cloud.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/vectordb_storages/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/vectordb_storages/milvus.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/vectordb_storages/qdrant.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/storages/vectordb_storages/tidb.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/tasks/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/tasks/task.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/tasks/task_prompt.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/terminators/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/arxiv_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/ask_news_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/code_execution.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/dappier_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/data_commons_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/file_write_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/function_tool.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/github_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/google_calendar_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/google_maps_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/google_scholar_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/human_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/linkedin_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/math_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/memory_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/meshy_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/mineru_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/notion_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/security_config.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/open_api_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/openbb_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/page_script.js +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/pubmed_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/reddit_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/retrieval_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/search_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/semantic_scholar_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/stripe_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/sympy_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/terminal_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/thinking_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/twitter_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/video_download_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/toolkits/whatsapp_toolkit.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/types/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/types/agents/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/types/agents/tool_calling_record.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/types/openai_types.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/types/unified_model_type.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/async_func.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/chunker/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/chunker/base.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/chunker/code_chunker.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/chunker/uio_chunker.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/commons.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/constants.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/deduplication.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/mcp.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/response_format.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/utils/token_counting.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/verifiers/__init__.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/verifiers/math_verifier.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/verifiers/models.py +0 -0
- {camel_ai-0.2.43 → camel_ai-0.2.45}/camel/verifiers/python_verifier.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.45
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Project-URL: Homepage, https://www.camel-ai.org/
|
|
6
6
|
Project-URL: Repository, https://github.com/camel-ai/camel
|
|
@@ -693,10 +693,10 @@ Practical guides and tutorials for implementing specific functionalities in CAME
|
|
|
693
693
|
### 3. Model Training & Data Generation
|
|
694
694
|
| Cookbook | Description |
|
|
695
695
|
|:---|:---|
|
|
696
|
-
| **[Data Generation with CAMEL and Finetuning with Unsloth](https://docs.camel-ai.org/cookbooks/
|
|
697
|
-
| **[Data Gen with Real Function Calls and Hermes Format](https://docs.camel-ai.org/cookbooks/
|
|
698
|
-
| **[CoT Data Generation and Upload Data to Huggingface](https://docs.camel-ai.org/cookbooks/
|
|
699
|
-
| **[CoT Data Generation and SFT Qwen with Unsolth](https://docs.camel-ai.org/cookbooks/
|
|
696
|
+
| **[Data Generation with CAMEL and Finetuning with Unsloth](https://docs.camel-ai.org/cookbooks/data_generation/sft_data_generation_and_unsloth_finetuning_Qwen2_5_7B.html)** | Learn how to generate data with CAMEL and fine-tune models effectively with Unsloth. |
|
|
697
|
+
| **[Data Gen with Real Function Calls and Hermes Format](https://docs.camel-ai.org/cookbooks/data_generation/data_gen_with_real_function_calls_and_hermes_format.html)** | Explore how to generate data with real function calls and the Hermes format. |
|
|
698
|
+
| **[CoT Data Generation and Upload Data to Huggingface](https://docs.camel-ai.org/cookbooks/data_generation/distill_math_reasoning_data_from_deepseek_r1.html)** | Uncover how to generate CoT data with CAMEL and seamlessly upload it to Huggingface. |
|
|
699
|
+
| **[CoT Data Generation and SFT Qwen with Unsolth](https://docs.camel-ai.org/cookbooks/data_generation/cot_data_gen_sft_qwen_unsolth_upload_huggingface.html)** | Discover how to generate CoT data using CAMEL and SFT Qwen with Unsolth, and seamlessly upload your data and model to Huggingface. |
|
|
700
700
|
|
|
701
701
|
### 4. Multi-Agent Systems & Applications
|
|
702
702
|
| Cookbook | Description |
|
|
@@ -372,10 +372,10 @@ Practical guides and tutorials for implementing specific functionalities in CAME
|
|
|
372
372
|
### 3. Model Training & Data Generation
|
|
373
373
|
| Cookbook | Description |
|
|
374
374
|
|:---|:---|
|
|
375
|
-
| **[Data Generation with CAMEL and Finetuning with Unsloth](https://docs.camel-ai.org/cookbooks/
|
|
376
|
-
| **[Data Gen with Real Function Calls and Hermes Format](https://docs.camel-ai.org/cookbooks/
|
|
377
|
-
| **[CoT Data Generation and Upload Data to Huggingface](https://docs.camel-ai.org/cookbooks/
|
|
378
|
-
| **[CoT Data Generation and SFT Qwen with Unsolth](https://docs.camel-ai.org/cookbooks/
|
|
375
|
+
| **[Data Generation with CAMEL and Finetuning with Unsloth](https://docs.camel-ai.org/cookbooks/data_generation/sft_data_generation_and_unsloth_finetuning_Qwen2_5_7B.html)** | Learn how to generate data with CAMEL and fine-tune models effectively with Unsloth. |
|
|
376
|
+
| **[Data Gen with Real Function Calls and Hermes Format](https://docs.camel-ai.org/cookbooks/data_generation/data_gen_with_real_function_calls_and_hermes_format.html)** | Explore how to generate data with real function calls and the Hermes format. |
|
|
377
|
+
| **[CoT Data Generation and Upload Data to Huggingface](https://docs.camel-ai.org/cookbooks/data_generation/distill_math_reasoning_data_from_deepseek_r1.html)** | Uncover how to generate CoT data with CAMEL and seamlessly upload it to Huggingface. |
|
|
378
|
+
| **[CoT Data Generation and SFT Qwen with Unsolth](https://docs.camel-ai.org/cookbooks/data_generation/cot_data_gen_sft_qwen_unsolth_upload_huggingface.html)** | Discover how to generate CoT data using CAMEL and SFT Qwen with Unsolth, and seamlessly upload your data and model to Huggingface. |
|
|
379
379
|
|
|
380
380
|
### 4. Multi-Agent Systems & Applications
|
|
381
381
|
| Cookbook | Description |
|
|
@@ -200,7 +200,13 @@ class SingleStepEnv:
|
|
|
200
200
|
self._states_done = [False] * self.current_batch_size
|
|
201
201
|
|
|
202
202
|
observations = [
|
|
203
|
-
Observation(
|
|
203
|
+
Observation(
|
|
204
|
+
question=sample.question,
|
|
205
|
+
context={},
|
|
206
|
+
metadata=sample.metadata
|
|
207
|
+
if sample.metadata is not None
|
|
208
|
+
else {},
|
|
209
|
+
)
|
|
204
210
|
for sample in self._states
|
|
205
211
|
]
|
|
206
212
|
|
|
@@ -214,7 +220,13 @@ class SingleStepEnv:
|
|
|
214
220
|
self._states_done = [False] * batch_size
|
|
215
221
|
|
|
216
222
|
observations = [
|
|
217
|
-
Observation(
|
|
223
|
+
Observation(
|
|
224
|
+
question=sample.question,
|
|
225
|
+
context={},
|
|
226
|
+
metadata=sample.metadata
|
|
227
|
+
if sample.metadata is not None
|
|
228
|
+
else {},
|
|
229
|
+
)
|
|
218
230
|
for sample in self._states
|
|
219
231
|
]
|
|
220
232
|
|
|
@@ -22,21 +22,30 @@ logger = get_logger(__name__)
|
|
|
22
22
|
|
|
23
23
|
|
|
24
24
|
class BoxedStrategy(BaseExtractorStrategy):
|
|
25
|
-
r"""Extracts content from \\boxed{} environments."""
|
|
25
|
+
r"""Extracts content from \\boxed{} and \boxed{} environments."""
|
|
26
26
|
|
|
27
27
|
async def extract(self, text: str) -> Optional[str]:
|
|
28
|
-
r"""Extract content from \\boxed{} environments.
|
|
28
|
+
r"""Extract content from \\boxed{} and \boxed{} environments.
|
|
29
29
|
|
|
30
30
|
Args:
|
|
31
31
|
text (str): The input text to process.
|
|
32
32
|
|
|
33
33
|
Returns:
|
|
34
|
-
Optional[str]: Content inside \\boxed{} if found, else
|
|
34
|
+
Optional[str]: Content inside \\boxed{} or \boxed{} if found, else
|
|
35
|
+
None.
|
|
35
36
|
"""
|
|
36
37
|
# Find the start of the boxed content
|
|
37
38
|
boxed_pattern = "\\boxed{"
|
|
38
|
-
|
|
39
|
-
|
|
39
|
+
single_backslash_boxed_pattern = "\boxed{"
|
|
40
|
+
|
|
41
|
+
if (
|
|
42
|
+
boxed_pattern not in text
|
|
43
|
+
and single_backslash_boxed_pattern not in text
|
|
44
|
+
):
|
|
45
|
+
logger.debug(
|
|
46
|
+
f"Patterns '{boxed_pattern}' or "
|
|
47
|
+
f"'{single_backslash_boxed_pattern}' not found in text: {text}"
|
|
48
|
+
)
|
|
40
49
|
return None
|
|
41
50
|
|
|
42
51
|
start_idx = text.find(boxed_pattern) + len(boxed_pattern)
|
|
@@ -44,7 +44,28 @@ except (ImportError, AttributeError):
|
|
|
44
44
|
|
|
45
45
|
|
|
46
46
|
class CohereModel(BaseModelBackend):
|
|
47
|
-
r"""Cohere API in a unified BaseModelBackend interface.
|
|
47
|
+
r"""Cohere API in a unified BaseModelBackend interface.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
51
|
+
created, one of Cohere series.
|
|
52
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
53
|
+
that will be fed into:obj:`cohere.ClientV2().chat()`. If
|
|
54
|
+
:obj:`None`, :obj:`CohereConfig().as_dict()` will be used.
|
|
55
|
+
(default: :obj:`None`)
|
|
56
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
57
|
+
the Cohere service. (default: :obj:`None`)
|
|
58
|
+
url (Optional[str], optional): The url to the Cohere service.
|
|
59
|
+
(default: :obj:`None`)
|
|
60
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
61
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
62
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
63
|
+
(default: :obj:`None`)
|
|
64
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
65
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
66
|
+
environment variable or default to 180 seconds.
|
|
67
|
+
(default: :obj:`None`)
|
|
68
|
+
"""
|
|
48
69
|
|
|
49
70
|
@api_keys_required(
|
|
50
71
|
[
|
|
@@ -58,6 +79,7 @@ class CohereModel(BaseModelBackend):
|
|
|
58
79
|
api_key: Optional[str] = None,
|
|
59
80
|
url: Optional[str] = None,
|
|
60
81
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
82
|
+
timeout: Optional[float] = None,
|
|
61
83
|
):
|
|
62
84
|
import cohere
|
|
63
85
|
|
|
@@ -66,11 +88,17 @@ class CohereModel(BaseModelBackend):
|
|
|
66
88
|
|
|
67
89
|
api_key = api_key or os.environ.get("COHERE_API_KEY")
|
|
68
90
|
url = url or os.environ.get("COHERE_API_BASE_URL")
|
|
91
|
+
|
|
92
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
69
93
|
super().__init__(
|
|
70
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
94
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
95
|
+
)
|
|
96
|
+
self._client = cohere.ClientV2(
|
|
97
|
+
timeout=self._timeout, api_key=self._api_key
|
|
98
|
+
)
|
|
99
|
+
self._async_client = cohere.AsyncClientV2(
|
|
100
|
+
timeout=self._timeout, api_key=self._api_key
|
|
71
101
|
)
|
|
72
|
-
self._client = cohere.ClientV2(api_key=self._api_key)
|
|
73
|
-
self._async_client = cohere.AsyncClientV2(api_key=self._api_key)
|
|
74
102
|
|
|
75
103
|
def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion:
|
|
76
104
|
if response.usage and response.usage.tokens:
|
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
14
15
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
15
16
|
|
|
16
17
|
from pydantic import BaseModel
|
|
@@ -33,8 +34,8 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
33
34
|
model_type (Union[ModelType, str]): Model for which a backend is
|
|
34
35
|
created, such as GPT-3.5-turbo, Claude-2, etc.
|
|
35
36
|
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
36
|
-
that will be fed into:obj:`
|
|
37
|
-
|
|
37
|
+
that will be fed into:obj:`completion()`. If:obj:`None`,
|
|
38
|
+
:obj:`LiteLLMConfig().as_dict()` will be used.
|
|
38
39
|
(default: :obj:`None`)
|
|
39
40
|
api_key (Optional[str], optional): The API key for authenticating with
|
|
40
41
|
the model service. (default: :obj:`None`)
|
|
@@ -43,6 +44,10 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
43
44
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
44
45
|
use for the model. If not provided, :obj:`LiteLLMTokenCounter` will
|
|
45
46
|
be used. (default: :obj:`None`)
|
|
47
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
48
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
49
|
+
environment variable or default to 180 seconds.
|
|
50
|
+
(default: :obj:`None`)
|
|
46
51
|
"""
|
|
47
52
|
|
|
48
53
|
# NOTE: Currently stream mode is not supported.
|
|
@@ -55,14 +60,15 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
55
60
|
api_key: Optional[str] = None,
|
|
56
61
|
url: Optional[str] = None,
|
|
57
62
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
|
+
timeout: Optional[float] = None,
|
|
58
64
|
) -> None:
|
|
59
65
|
from litellm import completion
|
|
60
66
|
|
|
61
67
|
if model_config_dict is None:
|
|
62
68
|
model_config_dict = LiteLLMConfig().as_dict()
|
|
63
|
-
|
|
69
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
64
70
|
super().__init__(
|
|
65
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
71
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
66
72
|
)
|
|
67
73
|
self.client = completion
|
|
68
74
|
|
|
@@ -127,6 +133,7 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
127
133
|
ChatCompletion
|
|
128
134
|
"""
|
|
129
135
|
response = self.client(
|
|
136
|
+
timeout=self._timeout,
|
|
130
137
|
api_key=self._api_key,
|
|
131
138
|
base_url=self._url,
|
|
132
139
|
model=self.model_type,
|
|
@@ -62,6 +62,10 @@ class MistralModel(BaseModelBackend):
|
|
|
62
62
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
63
63
|
use for the model. If not provided, :obj:`OpenAITokenCounter` will
|
|
64
64
|
be used. (default: :obj:`None`)
|
|
65
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
66
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
67
|
+
environment variable or default to 180 seconds.
|
|
68
|
+
(default: :obj:`None`)
|
|
65
69
|
"""
|
|
66
70
|
|
|
67
71
|
@api_keys_required(
|
|
@@ -77,6 +81,7 @@ class MistralModel(BaseModelBackend):
|
|
|
77
81
|
api_key: Optional[str] = None,
|
|
78
82
|
url: Optional[str] = None,
|
|
79
83
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
84
|
+
timeout: Optional[float] = None,
|
|
80
85
|
) -> None:
|
|
81
86
|
from mistralai import Mistral
|
|
82
87
|
|
|
@@ -85,10 +90,17 @@ class MistralModel(BaseModelBackend):
|
|
|
85
90
|
|
|
86
91
|
api_key = api_key or os.environ.get("MISTRAL_API_KEY")
|
|
87
92
|
url = url or os.environ.get("MISTRAL_API_BASE_URL")
|
|
93
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
88
94
|
super().__init__(
|
|
89
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
95
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
96
|
+
)
|
|
97
|
+
self._client = Mistral(
|
|
98
|
+
timeout_ms=int(self._timeout)
|
|
99
|
+
if self._timeout is not None
|
|
100
|
+
else None,
|
|
101
|
+
api_key=self._api_key,
|
|
102
|
+
server_url=self._url,
|
|
90
103
|
)
|
|
91
|
-
self._client = Mistral(api_key=self._api_key, server_url=self._url)
|
|
92
104
|
|
|
93
105
|
def _to_openai_response(
|
|
94
106
|
self, response: 'ChatCompletionResponse'
|
|
@@ -56,6 +56,10 @@ class RekaModel(BaseModelBackend):
|
|
|
56
56
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
57
57
|
use for the model. If not provided, :obj:`OpenAITokenCounter` will
|
|
58
58
|
be used. (default: :obj:`None`)
|
|
59
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
60
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
61
|
+
environment variable or default to 180 seconds.
|
|
62
|
+
(default: :obj:`None`)
|
|
59
63
|
"""
|
|
60
64
|
|
|
61
65
|
@api_keys_required(
|
|
@@ -71,6 +75,7 @@ class RekaModel(BaseModelBackend):
|
|
|
71
75
|
api_key: Optional[str] = None,
|
|
72
76
|
url: Optional[str] = None,
|
|
73
77
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
78
|
+
timeout: Optional[float] = None,
|
|
74
79
|
) -> None:
|
|
75
80
|
from reka.client import AsyncReka, Reka
|
|
76
81
|
|
|
@@ -78,12 +83,15 @@ class RekaModel(BaseModelBackend):
|
|
|
78
83
|
model_config_dict = RekaConfig().as_dict()
|
|
79
84
|
api_key = api_key or os.environ.get("REKA_API_KEY")
|
|
80
85
|
url = url or os.environ.get("REKA_API_BASE_URL")
|
|
86
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
81
87
|
super().__init__(
|
|
82
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
88
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
89
|
+
)
|
|
90
|
+
self._client = Reka(
|
|
91
|
+
api_key=self._api_key, base_url=self._url, timeout=self._timeout
|
|
83
92
|
)
|
|
84
|
-
self._client = Reka(api_key=self._api_key, base_url=self._url)
|
|
85
93
|
self._async_client = AsyncReka(
|
|
86
|
-
api_key=self._api_key, base_url=self._url
|
|
94
|
+
api_key=self._api_key, base_url=self._url, timeout=self._timeout
|
|
87
95
|
)
|
|
88
96
|
|
|
89
97
|
def _convert_reka_to_openai_response(
|
|
@@ -96,70 +96,145 @@ class TogetherAIModel(BaseModelBackend):
|
|
|
96
96
|
base_url=self._url,
|
|
97
97
|
)
|
|
98
98
|
|
|
99
|
-
|
|
99
|
+
@property
|
|
100
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
101
|
+
r"""Initialize the token counter for the model backend.
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
BaseTokenCounter: The token counter following the model's
|
|
105
|
+
tokenization style.
|
|
106
|
+
"""
|
|
107
|
+
if not self._token_counter:
|
|
108
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
109
|
+
return self._token_counter
|
|
110
|
+
|
|
111
|
+
def _run(
|
|
100
112
|
self,
|
|
101
113
|
messages: List[OpenAIMessage],
|
|
102
114
|
response_format: Optional[Type[BaseModel]] = None,
|
|
103
115
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
104
|
-
) -> Union[ChatCompletion,
|
|
116
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
105
117
|
r"""Runs inference of OpenAI chat completion.
|
|
106
118
|
|
|
107
119
|
Args:
|
|
108
120
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
109
121
|
in OpenAI API format.
|
|
122
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
123
|
+
response.
|
|
124
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
125
|
+
use for the request.
|
|
110
126
|
|
|
111
127
|
Returns:
|
|
112
|
-
Union[ChatCompletion,
|
|
128
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
113
129
|
`ChatCompletion` in the non-stream mode, or
|
|
114
|
-
`
|
|
130
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
115
131
|
"""
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
response = await self._async_client.chat.completions.create(
|
|
119
|
-
messages=messages,
|
|
120
|
-
model=self.model_type,
|
|
121
|
-
**self.model_config_dict,
|
|
132
|
+
response_format = response_format or self.model_config_dict.get(
|
|
133
|
+
"response_format", None
|
|
122
134
|
)
|
|
123
|
-
|
|
135
|
+
if response_format:
|
|
136
|
+
return self._request_parse(messages, response_format, tools)
|
|
137
|
+
else:
|
|
138
|
+
return self._request_chat_completion(messages, tools)
|
|
124
139
|
|
|
125
|
-
def
|
|
140
|
+
async def _arun(
|
|
126
141
|
self,
|
|
127
142
|
messages: List[OpenAIMessage],
|
|
128
143
|
response_format: Optional[Type[BaseModel]] = None,
|
|
129
144
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
130
|
-
) -> Union[ChatCompletion,
|
|
131
|
-
r"""Runs inference of OpenAI chat completion.
|
|
145
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
146
|
+
r"""Runs inference of OpenAI chat completion in async mode.
|
|
132
147
|
|
|
133
148
|
Args:
|
|
134
149
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
135
150
|
in OpenAI API format.
|
|
151
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
152
|
+
response.
|
|
153
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
154
|
+
use for the request.
|
|
136
155
|
|
|
137
156
|
Returns:
|
|
138
|
-
Union[ChatCompletion,
|
|
157
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
139
158
|
`ChatCompletion` in the non-stream mode, or
|
|
140
|
-
`
|
|
159
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
141
160
|
"""
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
161
|
+
response_format = response_format or self.model_config_dict.get(
|
|
162
|
+
"response_format", None
|
|
163
|
+
)
|
|
164
|
+
if response_format:
|
|
165
|
+
return await self._arequest_parse(messages, response_format, tools)
|
|
166
|
+
else:
|
|
167
|
+
return await self._arequest_chat_completion(messages, tools)
|
|
168
|
+
|
|
169
|
+
def _request_chat_completion(
|
|
170
|
+
self,
|
|
171
|
+
messages: List[OpenAIMessage],
|
|
172
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
173
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
174
|
+
request_config = self.model_config_dict.copy()
|
|
175
|
+
|
|
176
|
+
if tools:
|
|
177
|
+
request_config["tools"] = tools
|
|
178
|
+
|
|
179
|
+
return self._client.chat.completions.create(
|
|
145
180
|
messages=messages,
|
|
146
181
|
model=self.model_type,
|
|
147
|
-
**
|
|
182
|
+
**request_config,
|
|
148
183
|
)
|
|
149
|
-
return response
|
|
150
184
|
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
185
|
+
async def _arequest_chat_completion(
|
|
186
|
+
self,
|
|
187
|
+
messages: List[OpenAIMessage],
|
|
188
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
189
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
190
|
+
request_config = self.model_config_dict.copy()
|
|
154
191
|
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
tokenization style.
|
|
158
|
-
"""
|
|
192
|
+
if tools:
|
|
193
|
+
request_config["tools"] = tools
|
|
159
194
|
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
195
|
+
return await self._async_client.chat.completions.create(
|
|
196
|
+
messages=messages,
|
|
197
|
+
model=self.model_type,
|
|
198
|
+
**request_config,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
def _request_parse(
|
|
202
|
+
self,
|
|
203
|
+
messages: List[OpenAIMessage],
|
|
204
|
+
response_format: Type[BaseModel],
|
|
205
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
206
|
+
) -> ChatCompletion:
|
|
207
|
+
request_config = self.model_config_dict.copy()
|
|
208
|
+
|
|
209
|
+
request_config["response_format"] = response_format
|
|
210
|
+
|
|
211
|
+
if tools is not None:
|
|
212
|
+
request_config["tools"] = tools
|
|
213
|
+
|
|
214
|
+
return self._client.beta.chat.completions.parse(
|
|
215
|
+
messages=messages,
|
|
216
|
+
model=self.model_type,
|
|
217
|
+
**request_config,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
async def _arequest_parse(
|
|
221
|
+
self,
|
|
222
|
+
messages: List[OpenAIMessage],
|
|
223
|
+
response_format: Type[BaseModel],
|
|
224
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
225
|
+
) -> ChatCompletion:
|
|
226
|
+
request_config = self.model_config_dict.copy()
|
|
227
|
+
|
|
228
|
+
request_config["response_format"] = response_format
|
|
229
|
+
|
|
230
|
+
if tools is not None:
|
|
231
|
+
request_config["tools"] = tools
|
|
232
|
+
|
|
233
|
+
return await self._async_client.beta.chat.completions.parse(
|
|
234
|
+
messages=messages,
|
|
235
|
+
model=self.model_type,
|
|
236
|
+
**request_config,
|
|
237
|
+
)
|
|
163
238
|
|
|
164
239
|
def check_model_config(self):
|
|
165
240
|
r"""Check whether the model configuration contains any
|
|
@@ -41,6 +41,10 @@ class VolcanoModel(OpenAICompatibleModel):
|
|
|
41
41
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
42
|
use for the model. If not provided, :obj:`OpenAITokenCounter`
|
|
43
43
|
will be used. (default: :obj:`None`)
|
|
44
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
45
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
46
|
+
environment variable or default to 180 seconds.
|
|
47
|
+
(default: :obj:`None`)
|
|
44
48
|
"""
|
|
45
49
|
|
|
46
50
|
@api_keys_required(
|
|
@@ -55,6 +59,7 @@ class VolcanoModel(OpenAICompatibleModel):
|
|
|
55
59
|
api_key: Optional[str] = None,
|
|
56
60
|
url: Optional[str] = None,
|
|
57
61
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
62
|
+
timeout: Optional[float] = None,
|
|
58
63
|
) -> None:
|
|
59
64
|
if model_config_dict is None:
|
|
60
65
|
model_config_dict = {}
|
|
@@ -65,9 +70,9 @@ class VolcanoModel(OpenAICompatibleModel):
|
|
|
65
70
|
or os.environ.get("VOLCANO_API_BASE_URL")
|
|
66
71
|
or "https://ark.cn-beijing.volces.com/api/v3"
|
|
67
72
|
)
|
|
68
|
-
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
69
74
|
super().__init__(
|
|
70
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
75
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
71
76
|
)
|
|
72
77
|
|
|
73
78
|
@property
|
|
@@ -16,6 +16,7 @@ from .configs import TaskConfig
|
|
|
16
16
|
from .docker_runtime import DockerRuntime
|
|
17
17
|
from .llm_guard_runtime import LLMGuardRuntime
|
|
18
18
|
from .remote_http_runtime import RemoteHttpRuntime
|
|
19
|
+
from .ubuntu_docker_runtime import UbuntuDockerRuntime
|
|
19
20
|
|
|
20
21
|
# TODO: Add Celery Runtime to support distributed computing,
|
|
21
22
|
# Rate Limiting, Load Balancing, etc.
|
|
@@ -26,4 +27,5 @@ __all__ = [
|
|
|
26
27
|
"RemoteHttpRuntime",
|
|
27
28
|
"LLMGuardRuntime",
|
|
28
29
|
"TaskConfig",
|
|
30
|
+
"UbuntuDockerRuntime",
|
|
29
31
|
]
|