camel-ai 0.2.82__py3-none-any.whl → 0.2.83a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +3 -3
- camel/agents/__init__.py +2 -2
- camel/agents/_types.py +2 -2
- camel/agents/_utils.py +2 -2
- camel/agents/base.py +2 -2
- camel/agents/chat_agent.py +765 -541
- camel/agents/critic_agent.py +2 -2
- camel/agents/deductive_reasoner_agent.py +2 -2
- camel/agents/embodied_agent.py +2 -2
- camel/agents/knowledge_graph_agent.py +2 -2
- camel/agents/mcp_agent.py +2 -2
- camel/agents/multi_hop_generator_agent.py +2 -2
- camel/agents/programmed_agent_instruction.py +2 -2
- camel/agents/repo_agent.py +2 -2
- camel/agents/role_assignment_agent.py +2 -2
- camel/agents/search_agent.py +2 -2
- camel/agents/task_agent.py +2 -2
- camel/agents/tool_agents/__init__.py +2 -2
- camel/agents/tool_agents/base.py +2 -2
- camel/agents/tool_agents/hugging_face_tool_agent.py +2 -2
- camel/benchmarks/__init__.py +2 -2
- camel/benchmarks/apibank.py +2 -2
- camel/benchmarks/apibench.py +2 -2
- camel/benchmarks/base.py +2 -2
- camel/benchmarks/browsecomp.py +2 -2
- camel/benchmarks/gaia.py +2 -2
- camel/benchmarks/mock_website/mock_web.py +2 -2
- camel/benchmarks/mock_website/shopping_mall/app.py +2 -2
- camel/benchmarks/nexus.py +2 -2
- camel/benchmarks/ragbench.py +2 -2
- camel/bots/__init__.py +2 -2
- camel/bots/discord/__init__.py +2 -2
- camel/bots/discord/discord_app.py +2 -2
- camel/bots/discord/discord_installation.py +2 -2
- camel/bots/discord/discord_store.py +2 -2
- camel/bots/slack/__init__.py +2 -2
- camel/bots/slack/models.py +2 -2
- camel/bots/slack/slack_app.py +2 -2
- camel/bots/telegram_bot.py +2 -2
- camel/configs/__init__.py +8 -2
- camel/configs/aihubmix_config.py +2 -2
- camel/configs/aiml_config.py +2 -2
- camel/configs/amd_config.py +2 -2
- camel/configs/anthropic_config.py +2 -2
- camel/configs/base_config.py +2 -2
- camel/configs/bedrock_config.py +2 -2
- camel/configs/cerebras_config.py +2 -2
- camel/configs/cohere_config.py +2 -2
- camel/configs/cometapi_config.py +2 -2
- camel/configs/crynux_config.py +2 -2
- camel/configs/deepseek_config.py +2 -2
- camel/configs/function_gemma_config.py +59 -0
- camel/configs/gemini_config.py +2 -2
- camel/configs/groq_config.py +2 -2
- camel/configs/internlm_config.py +2 -2
- camel/configs/litellm_config.py +2 -2
- camel/configs/lmstudio_config.py +2 -2
- camel/configs/minimax_config.py +2 -2
- camel/configs/mistral_config.py +2 -2
- camel/configs/modelscope_config.py +2 -2
- camel/configs/moonshot_config.py +2 -2
- camel/configs/nebius_config.py +2 -2
- camel/configs/netmind_config.py +2 -2
- camel/configs/novita_config.py +2 -2
- camel/configs/nvidia_config.py +2 -2
- camel/configs/ollama_config.py +2 -2
- camel/configs/openai_config.py +2 -2
- camel/configs/openrouter_config.py +2 -2
- camel/configs/ppio_config.py +2 -2
- camel/configs/qianfan_config.py +2 -2
- camel/configs/qwen_config.py +2 -2
- camel/configs/reka_config.py +2 -2
- camel/configs/samba_config.py +2 -2
- camel/configs/sglang_config.py +2 -2
- camel/configs/siliconflow_config.py +2 -2
- camel/configs/togetherai_config.py +2 -2
- camel/configs/vllm_config.py +2 -2
- camel/configs/watsonx_config.py +2 -2
- camel/configs/yi_config.py +2 -2
- camel/configs/zhipuai_config.py +2 -2
- camel/data_collectors/__init__.py +2 -2
- camel/data_collectors/alpaca_collector.py +2 -2
- camel/data_collectors/base.py +2 -2
- camel/data_collectors/sharegpt_collector.py +2 -2
- camel/datagen/__init__.py +2 -2
- camel/datagen/cot_datagen.py +2 -2
- camel/datagen/evol_instruct/__init__.py +2 -2
- camel/datagen/evol_instruct/evol_instruct.py +2 -2
- camel/datagen/evol_instruct/scorer.py +2 -2
- camel/datagen/evol_instruct/templates.py +2 -2
- camel/datagen/self_improving_cot.py +2 -2
- camel/datagen/self_instruct/__init__.py +2 -2
- camel/datagen/self_instruct/filter/__init__.py +2 -2
- camel/datagen/self_instruct/filter/filter_function.py +2 -2
- camel/datagen/self_instruct/filter/filter_registry.py +2 -2
- camel/datagen/self_instruct/filter/instruction_filter.py +2 -2
- camel/datagen/self_instruct/self_instruct.py +2 -2
- camel/datagen/self_instruct/templates.py +2 -2
- camel/datagen/source2synth/__init__.py +2 -2
- camel/datagen/source2synth/data_processor.py +2 -2
- camel/datagen/source2synth/models.py +2 -2
- camel/datagen/source2synth/user_data_processor_config.py +2 -2
- camel/datahubs/__init__.py +2 -2
- camel/datahubs/base.py +2 -2
- camel/datahubs/huggingface.py +2 -2
- camel/datahubs/models.py +2 -2
- camel/datasets/__init__.py +2 -2
- camel/datasets/base_generator.py +2 -2
- camel/datasets/few_shot_generator.py +2 -2
- camel/datasets/models.py +2 -2
- camel/datasets/self_instruct_generator.py +2 -2
- camel/datasets/static_dataset.py +2 -2
- camel/embeddings/__init__.py +2 -2
- camel/embeddings/azure_embedding.py +2 -2
- camel/embeddings/base.py +2 -2
- camel/embeddings/gemini_embedding.py +2 -2
- camel/embeddings/jina_embedding.py +2 -2
- camel/embeddings/mistral_embedding.py +2 -2
- camel/embeddings/openai_compatible_embedding.py +2 -2
- camel/embeddings/openai_embedding.py +2 -2
- camel/embeddings/sentence_transformers_embeddings.py +2 -2
- camel/embeddings/together_embedding.py +2 -2
- camel/embeddings/vlm_embedding.py +2 -2
- camel/environments/__init__.py +2 -2
- camel/environments/models.py +2 -2
- camel/environments/multi_step.py +2 -2
- camel/environments/rlcards_env.py +2 -2
- camel/environments/single_step.py +2 -2
- camel/environments/tic_tac_toe.py +2 -2
- camel/extractors/__init__.py +2 -2
- camel/extractors/base.py +2 -2
- camel/extractors/python_strategies.py +2 -2
- camel/generators.py +2 -2
- camel/human.py +2 -2
- camel/interpreters/__init__.py +2 -2
- camel/interpreters/base.py +2 -2
- camel/interpreters/docker_interpreter.py +2 -2
- camel/interpreters/e2b_interpreter.py +2 -2
- camel/interpreters/internal_python_interpreter.py +2 -2
- camel/interpreters/interpreter_error.py +2 -2
- camel/interpreters/ipython_interpreter.py +2 -2
- camel/interpreters/microsandbox_interpreter.py +2 -2
- camel/interpreters/subprocess_interpreter.py +2 -2
- camel/loaders/__init__.py +2 -2
- camel/loaders/apify_reader.py +2 -2
- camel/loaders/base_io.py +2 -2
- camel/loaders/base_loader.py +2 -2
- camel/loaders/chunkr_reader.py +2 -2
- camel/loaders/crawl4ai_reader.py +2 -2
- camel/loaders/firecrawl_reader.py +2 -2
- camel/loaders/jina_url_reader.py +2 -2
- camel/loaders/markitdown.py +2 -2
- camel/loaders/mineru_extractor.py +2 -2
- camel/loaders/mistral_reader.py +2 -2
- camel/loaders/scrapegraph_reader.py +2 -2
- camel/loaders/unstructured_io.py +2 -2
- camel/logger.py +2 -2
- camel/memories/__init__.py +2 -2
- camel/memories/agent_memories.py +2 -2
- camel/memories/base.py +2 -2
- camel/memories/blocks/__init__.py +2 -2
- camel/memories/blocks/chat_history_block.py +2 -2
- camel/memories/blocks/vectordb_block.py +2 -2
- camel/memories/context_creators/__init__.py +2 -2
- camel/memories/context_creators/score_based.py +89 -2
- camel/memories/records.py +2 -2
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +2 -2
- camel/messages/conversion/__init__.py +2 -2
- camel/messages/conversion/alpaca.py +2 -2
- camel/messages/conversion/conversation_models.py +2 -2
- camel/messages/conversion/sharegpt/__init__.py +2 -2
- camel/messages/conversion/sharegpt/function_call_formatter.py +2 -2
- camel/messages/conversion/sharegpt/hermes/__init__.py +2 -2
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +2 -2
- camel/messages/func_message.py +2 -2
- camel/models/__init__.py +4 -2
- camel/models/_utils.py +2 -2
- camel/models/aihubmix_model.py +2 -2
- camel/models/aiml_model.py +2 -2
- camel/models/amd_model.py +2 -2
- camel/models/anthropic_model.py +2 -2
- camel/models/aws_bedrock_model.py +2 -2
- camel/models/azure_openai_model.py +4 -28
- camel/models/base_audio_model.py +2 -2
- camel/models/base_model.py +192 -14
- camel/models/cerebras_model.py +2 -2
- camel/models/cohere_model.py +4 -30
- camel/models/cometapi_model.py +2 -2
- camel/models/crynux_model.py +2 -2
- camel/models/deepseek_model.py +4 -28
- camel/models/fish_audio_model.py +2 -2
- camel/models/function_gemma_model.py +889 -0
- camel/models/gemini_model.py +4 -28
- camel/models/groq_model.py +2 -2
- camel/models/internlm_model.py +2 -2
- camel/models/litellm_model.py +3 -17
- camel/models/lmstudio_model.py +2 -2
- camel/models/minimax_model.py +2 -2
- camel/models/mistral_model.py +4 -30
- camel/models/model_factory.py +4 -2
- camel/models/model_manager.py +2 -2
- camel/models/modelscope_model.py +2 -2
- camel/models/moonshot_model.py +3 -15
- camel/models/nebius_model.py +2 -2
- camel/models/nemotron_model.py +2 -2
- camel/models/netmind_model.py +2 -2
- camel/models/novita_model.py +2 -2
- camel/models/nvidia_model.py +2 -2
- camel/models/ollama_model.py +2 -2
- camel/models/openai_audio_models.py +2 -2
- camel/models/openai_compatible_model.py +4 -28
- camel/models/openai_model.py +4 -43
- camel/models/openrouter_model.py +2 -2
- camel/models/ppio_model.py +2 -2
- camel/models/qianfan_model.py +2 -2
- camel/models/qwen_model.py +2 -2
- camel/models/reka_model.py +4 -30
- camel/models/reward/__init__.py +2 -2
- camel/models/reward/base_reward_model.py +2 -2
- camel/models/reward/evaluator.py +2 -2
- camel/models/reward/nemotron_model.py +2 -2
- camel/models/reward/skywork_model.py +2 -2
- camel/models/samba_model.py +4 -30
- camel/models/sglang_model.py +4 -30
- camel/models/siliconflow_model.py +2 -2
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +2 -2
- camel/models/vllm_model.py +2 -2
- camel/models/volcano_model.py +147 -4
- camel/models/watsonx_model.py +4 -30
- camel/models/yi_model.py +2 -2
- camel/models/zhipuai_model.py +2 -2
- camel/parsers/__init__.py +2 -2
- camel/parsers/mcp_tool_call_parser.py +2 -2
- camel/personas/__init__.py +2 -2
- camel/personas/persona.py +2 -2
- camel/personas/persona_hub.py +2 -2
- camel/prompts/__init__.py +2 -2
- camel/prompts/ai_society.py +2 -2
- camel/prompts/base.py +2 -2
- camel/prompts/code.py +2 -2
- camel/prompts/evaluation.py +2 -2
- camel/prompts/generate_text_embedding_data.py +2 -2
- camel/prompts/image_craft.py +2 -2
- camel/prompts/misalignment.py +2 -2
- camel/prompts/multi_condition_image_craft.py +2 -2
- camel/prompts/object_recognition.py +2 -2
- camel/prompts/persona_hub.py +2 -2
- camel/prompts/prompt_templates.py +2 -2
- camel/prompts/role_description_prompt_template.py +2 -2
- camel/prompts/solution_extraction.py +2 -2
- camel/prompts/task_prompt_template.py +2 -2
- camel/prompts/translation.py +2 -2
- camel/prompts/video_description_prompt.py +2 -2
- camel/responses/__init__.py +2 -2
- camel/responses/agent_responses.py +2 -2
- camel/retrievers/__init__.py +2 -2
- camel/retrievers/auto_retriever.py +2 -2
- camel/retrievers/base.py +2 -2
- camel/retrievers/bm25_retriever.py +2 -2
- camel/retrievers/cohere_rerank_retriever.py +2 -2
- camel/retrievers/hybrid_retrival.py +2 -2
- camel/retrievers/vector_retriever.py +2 -2
- camel/runtimes/__init__.py +2 -2
- camel/runtimes/api.py +2 -2
- camel/runtimes/base.py +2 -2
- camel/runtimes/configs.py +2 -2
- camel/runtimes/daytona_runtime.py +2 -2
- camel/runtimes/docker_runtime.py +2 -2
- camel/runtimes/llm_guard_runtime.py +2 -2
- camel/runtimes/remote_http_runtime.py +2 -2
- camel/runtimes/ubuntu_docker_runtime.py +2 -2
- camel/runtimes/utils/__init__.py +2 -2
- camel/runtimes/utils/function_risk_toolkit.py +2 -2
- camel/runtimes/utils/ignore_risk_toolkit.py +2 -2
- camel/schemas/__init__.py +2 -2
- camel/schemas/base.py +2 -2
- camel/schemas/openai_converter.py +2 -2
- camel/schemas/outlines_converter.py +2 -2
- camel/services/agent_openapi_server.py +2 -2
- camel/societies/__init__.py +2 -2
- camel/societies/babyagi_playing.py +2 -2
- camel/societies/role_playing.py +2 -2
- camel/societies/workforce/__init__.py +2 -2
- camel/societies/workforce/base.py +2 -2
- camel/societies/workforce/events.py +4 -2
- camel/societies/workforce/prompts.py +9 -8
- camel/societies/workforce/role_playing_worker.py +2 -2
- camel/societies/workforce/single_agent_worker.py +2 -2
- camel/societies/workforce/structured_output_handler.py +2 -2
- camel/societies/workforce/task_channel.py +2 -2
- camel/societies/workforce/utils.py +2 -2
- camel/societies/workforce/worker.py +2 -2
- camel/societies/workforce/workflow_memory_manager.py +2 -2
- camel/societies/workforce/workforce.py +132 -71
- camel/societies/workforce/workforce_callback.py +2 -2
- camel/societies/workforce/workforce_logger.py +2 -2
- camel/societies/workforce/workforce_metrics.py +2 -2
- camel/storages/__init__.py +2 -2
- camel/storages/graph_storages/__init__.py +2 -2
- camel/storages/graph_storages/base.py +2 -2
- camel/storages/graph_storages/graph_element.py +2 -2
- camel/storages/graph_storages/nebula_graph.py +2 -2
- camel/storages/graph_storages/neo4j_graph.py +2 -2
- camel/storages/key_value_storages/__init__.py +2 -2
- camel/storages/key_value_storages/base.py +2 -2
- camel/storages/key_value_storages/in_memory.py +2 -2
- camel/storages/key_value_storages/json.py +2 -2
- camel/storages/key_value_storages/mem0_cloud.py +2 -2
- camel/storages/key_value_storages/redis.py +2 -2
- camel/storages/object_storages/__init__.py +2 -2
- camel/storages/object_storages/amazon_s3.py +2 -2
- camel/storages/object_storages/azure_blob.py +2 -2
- camel/storages/object_storages/base.py +2 -2
- camel/storages/object_storages/google_cloud.py +2 -2
- camel/storages/vectordb_storages/__init__.py +2 -2
- camel/storages/vectordb_storages/base.py +2 -2
- camel/storages/vectordb_storages/chroma.py +2 -2
- camel/storages/vectordb_storages/faiss.py +2 -2
- camel/storages/vectordb_storages/milvus.py +2 -2
- camel/storages/vectordb_storages/oceanbase.py +2 -2
- camel/storages/vectordb_storages/pgvector.py +2 -2
- camel/storages/vectordb_storages/qdrant.py +2 -2
- camel/storages/vectordb_storages/surreal.py +2 -2
- camel/storages/vectordb_storages/tidb.py +2 -2
- camel/storages/vectordb_storages/weaviate.py +2 -2
- camel/tasks/__init__.py +2 -2
- camel/tasks/task.py +2 -2
- camel/tasks/task_prompt.py +2 -2
- camel/terminators/__init__.py +2 -2
- camel/terminators/base.py +2 -2
- camel/terminators/response_terminator.py +2 -2
- camel/terminators/token_limit_terminator.py +2 -2
- camel/toolkits/__init__.py +6 -3
- camel/toolkits/aci_toolkit.py +2 -2
- camel/toolkits/arxiv_toolkit.py +2 -2
- camel/toolkits/ask_news_toolkit.py +2 -2
- camel/toolkits/async_browser_toolkit.py +2 -2
- camel/toolkits/audio_analysis_toolkit.py +2 -2
- camel/toolkits/base.py +47 -5
- camel/toolkits/bohrium_toolkit.py +2 -2
- camel/toolkits/browser_toolkit.py +2 -2
- camel/toolkits/browser_toolkit_commons.py +2 -2
- camel/toolkits/code_execution.py +2 -2
- camel/toolkits/context_summarizer_toolkit.py +2 -2
- camel/toolkits/craw4ai_toolkit.py +2 -2
- camel/toolkits/dappier_toolkit.py +2 -2
- camel/toolkits/data_commons_toolkit.py +2 -2
- camel/toolkits/dingtalk.py +2 -2
- camel/toolkits/earth_science_toolkit.py +2 -2
- camel/toolkits/edgeone_pages_mcp_toolkit.py +2 -2
- camel/toolkits/excel_toolkit.py +2 -2
- camel/toolkits/file_toolkit.py +2 -2
- camel/toolkits/function_tool.py +95 -25
- camel/toolkits/github_toolkit.py +2 -2
- camel/toolkits/gmail_toolkit.py +2 -2
- camel/toolkits/google_calendar_toolkit.py +2 -2
- camel/toolkits/google_drive_mcp_toolkit.py +2 -2
- camel/toolkits/google_maps_toolkit.py +2 -2
- camel/toolkits/google_scholar_toolkit.py +2 -2
- camel/toolkits/human_toolkit.py +2 -2
- camel/toolkits/hybrid_browser_toolkit/__init__.py +2 -2
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +2 -2
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +2 -2
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +89 -104
- camel/toolkits/hybrid_browser_toolkit/installer.py +2 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +25 -14
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +6 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +2 -2
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +2 -2
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +2 -2
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +2 -2
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +2 -2
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +2 -2
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2 -2
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +2 -2
- camel/toolkits/image_analysis_toolkit.py +2 -2
- camel/toolkits/image_generation_toolkit.py +2 -2
- camel/toolkits/jina_reranker_toolkit.py +2 -2
- camel/toolkits/klavis_toolkit.py +2 -2
- camel/toolkits/linkedin_toolkit.py +2 -2
- camel/toolkits/markitdown_toolkit.py +2 -2
- camel/toolkits/math_toolkit.py +2 -2
- camel/toolkits/mcp_toolkit.py +2 -2
- camel/toolkits/memory_toolkit.py +2 -2
- camel/toolkits/meshy_toolkit.py +2 -2
- camel/toolkits/message_agent_toolkit.py +2 -2
- camel/toolkits/message_integration.py +6 -2
- camel/toolkits/microsoft_outlook_mail_toolkit.py +1885 -0
- camel/toolkits/mineru_toolkit.py +2 -2
- camel/toolkits/minimax_mcp_toolkit.py +2 -2
- camel/toolkits/networkx_toolkit.py +2 -2
- camel/toolkits/note_taking_toolkit.py +2 -2
- camel/toolkits/notion_mcp_toolkit.py +2 -2
- camel/toolkits/notion_toolkit.py +2 -2
- camel/toolkits/open_api_specs/biztoc/__init__.py +2 -2
- camel/toolkits/open_api_specs/coursera/__init__.py +2 -2
- camel/toolkits/open_api_specs/create_qr_code/__init__.py +2 -2
- camel/toolkits/open_api_specs/klarna/__init__.py +2 -2
- camel/toolkits/open_api_specs/nasa_apod/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/get_classes.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +2 -2
- camel/toolkits/open_api_specs/security_config.py +2 -2
- camel/toolkits/open_api_specs/speak/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +2 -2
- camel/toolkits/open_api_toolkit.py +2 -2
- camel/toolkits/openbb_toolkit.py +2 -2
- camel/toolkits/origene_mcp_toolkit.py +2 -2
- camel/toolkits/playwright_mcp_toolkit.py +2 -2
- camel/toolkits/pptx_toolkit.py +2 -2
- camel/toolkits/pubmed_toolkit.py +2 -2
- camel/toolkits/pulse_mcp_search_toolkit.py +2 -2
- camel/toolkits/pyautogui_toolkit.py +2 -2
- camel/toolkits/reddit_toolkit.py +2 -2
- camel/toolkits/resend_toolkit.py +2 -2
- camel/toolkits/retrieval_toolkit.py +2 -2
- camel/toolkits/screenshot_toolkit.py +2 -2
- camel/toolkits/search_toolkit.py +70 -13
- camel/toolkits/searxng_toolkit.py +2 -2
- camel/toolkits/semantic_scholar_toolkit.py +2 -2
- camel/toolkits/slack_toolkit.py +2 -2
- camel/toolkits/sql_toolkit.py +2 -2
- camel/toolkits/stripe_toolkit.py +2 -2
- camel/toolkits/sympy_toolkit.py +2 -2
- camel/toolkits/task_planning_toolkit.py +2 -2
- camel/toolkits/terminal_toolkit/__init__.py +2 -2
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +323 -112
- camel/toolkits/terminal_toolkit/utils.py +179 -52
- camel/toolkits/thinking_toolkit.py +2 -2
- camel/toolkits/twitter_toolkit.py +2 -2
- camel/toolkits/vertex_ai_veo_toolkit.py +2 -2
- camel/toolkits/video_analysis_toolkit.py +2 -2
- camel/toolkits/video_download_toolkit.py +2 -2
- camel/toolkits/weather_toolkit.py +2 -2
- camel/toolkits/web_deploy_toolkit.py +2 -2
- camel/toolkits/wechat_official_toolkit.py +2 -2
- camel/toolkits/whatsapp_toolkit.py +2 -2
- camel/toolkits/wolfram_alpha_toolkit.py +2 -2
- camel/toolkits/zapier_toolkit.py +2 -2
- camel/types/__init__.py +2 -2
- camel/types/agents/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +2 -2
- camel/types/enums.py +5 -4
- camel/types/mcp_registries.py +2 -2
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +10 -6
- camel/utils/__init__.py +5 -2
- camel/utils/agent_context.py +41 -0
- camel/utils/async_func.py +2 -2
- camel/utils/chunker/__init__.py +2 -2
- camel/utils/chunker/base.py +2 -2
- camel/utils/chunker/code_chunker.py +2 -2
- camel/utils/chunker/uio_chunker.py +2 -2
- camel/utils/commons.py +2 -2
- camel/utils/constants.py +2 -2
- camel/utils/context_utils.py +2 -2
- camel/utils/deduplication.py +2 -2
- camel/utils/filename.py +2 -2
- camel/utils/langfuse.py +18 -10
- camel/utils/mcp.py +2 -2
- camel/utils/mcp_client.py +2 -2
- camel/utils/message_summarizer.py +2 -2
- camel/utils/response_format.py +2 -2
- camel/utils/token_counting.py +2 -2
- camel/utils/tool_result.py +2 -2
- camel/verifiers/__init__.py +2 -2
- camel/verifiers/base.py +2 -2
- camel/verifiers/math_verifier.py +2 -2
- camel/verifiers/models.py +2 -2
- camel/verifiers/physics_verifier.py +2 -2
- camel/verifiers/python_verifier.py +2 -2
- {camel_ai-0.2.82.dist-info → camel_ai-0.2.83a6.dist-info}/METADATA +34 -29
- camel_ai-0.2.83a6.dist-info/RECORD +511 -0
- camel_ai-0.2.82.dist-info/RECORD +0 -507
- {camel_ai-0.2.82.dist-info → camel_ai-0.2.83a6.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.82.dist-info → camel_ai-0.2.83a6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,889 @@
|
|
|
1
|
+
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import re
|
|
17
|
+
import time
|
|
18
|
+
import uuid
|
|
19
|
+
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
|
20
|
+
|
|
21
|
+
import httpx
|
|
22
|
+
from pydantic import BaseModel
|
|
23
|
+
|
|
24
|
+
from camel.configs import FunctionGemmaConfig
|
|
25
|
+
from camel.logger import get_logger
|
|
26
|
+
from camel.messages import OpenAIMessage
|
|
27
|
+
from camel.models import BaseModelBackend
|
|
28
|
+
from camel.types import ChatCompletion, CompletionUsage, ModelType
|
|
29
|
+
from camel.utils import (
|
|
30
|
+
BaseTokenCounter,
|
|
31
|
+
OpenAITokenCounter,
|
|
32
|
+
update_current_observation,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# conditional observe import based on environment variables
|
|
36
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
37
|
+
try:
|
|
38
|
+
from langfuse.decorators import observe
|
|
39
|
+
except ImportError:
|
|
40
|
+
from camel.utils import observe
|
|
41
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
42
|
+
try:
|
|
43
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
44
|
+
except ImportError:
|
|
45
|
+
from camel.utils import observe
|
|
46
|
+
else:
|
|
47
|
+
from camel.utils import observe
|
|
48
|
+
|
|
49
|
+
logger = get_logger(__name__)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class FunctionGemmaModel(BaseModelBackend):
|
|
53
|
+
r"""FunctionGemma model backend for Ollama with custom tool calling format.
|
|
54
|
+
|
|
55
|
+
FunctionGemma is a specialized Gemma model fine-tuned for function calling.
|
|
56
|
+
It uses a custom chat template format that differs from OpenAI's format.
|
|
57
|
+
This backend handles conversion between CAMEL's OpenAI-style tool schemas
|
|
58
|
+
and FunctionGemma's native format.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
62
|
+
created (e.g., "functiongemma").
|
|
63
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
64
|
+
of configuration options. If :obj:`None`,
|
|
65
|
+
:obj:`FunctionGemmaConfig().as_dict()` will be used.
|
|
66
|
+
(default: :obj:`None`)
|
|
67
|
+
api_key (Optional[str], optional): Not required for local Ollama.
|
|
68
|
+
(default: :obj:`None`)
|
|
69
|
+
url (Optional[str], optional): The URL to the Ollama server.
|
|
70
|
+
(default: :obj:`http://localhost:11434`)
|
|
71
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
72
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
73
|
+
ModelType.GPT_4O_MINI)` will be used. (default: :obj:`None`)
|
|
74
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
75
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
76
|
+
environment variable or default to 180 seconds.
|
|
77
|
+
(default: :obj:`None`)
|
|
78
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
79
|
+
(default: :obj:`3`)
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def __init__(
|
|
83
|
+
self,
|
|
84
|
+
model_type: Union[ModelType, str],
|
|
85
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
86
|
+
api_key: Optional[str] = None,
|
|
87
|
+
url: Optional[str] = None,
|
|
88
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
89
|
+
timeout: Optional[float] = None,
|
|
90
|
+
max_retries: int = 3,
|
|
91
|
+
) -> None:
|
|
92
|
+
if model_config_dict is None:
|
|
93
|
+
model_config_dict = FunctionGemmaConfig().as_dict()
|
|
94
|
+
|
|
95
|
+
url = url or os.environ.get(
|
|
96
|
+
"OLLAMA_API_BASE_URL",
|
|
97
|
+
"http://localhost:11434",
|
|
98
|
+
)
|
|
99
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
100
|
+
|
|
101
|
+
super().__init__(
|
|
102
|
+
model_type,
|
|
103
|
+
model_config_dict,
|
|
104
|
+
api_key,
|
|
105
|
+
url,
|
|
106
|
+
token_counter,
|
|
107
|
+
timeout,
|
|
108
|
+
max_retries,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
self._client = httpx.Client(timeout=self._timeout)
|
|
112
|
+
self._async_client = httpx.AsyncClient(timeout=self._timeout)
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
116
|
+
r"""Initialize the token counter for the model backend.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
BaseTokenCounter: The token counter following the model's
|
|
120
|
+
tokenization style.
|
|
121
|
+
"""
|
|
122
|
+
if not self._token_counter:
|
|
123
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
124
|
+
return self._token_counter
|
|
125
|
+
|
|
126
|
+
def _escape_string(self, s: str) -> str:
|
|
127
|
+
r"""Wrap string values in <escape> tags for FunctionGemma format.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
s (str): The string to escape.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
str: The escaped string.
|
|
134
|
+
"""
|
|
135
|
+
return f"<escape>{s}<escape>"
|
|
136
|
+
|
|
137
|
+
def _unescape_string(self, s: str) -> str:
|
|
138
|
+
r"""Remove <escape> tags from string values.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
s (str): The string to unescape.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
str: The unescaped string.
|
|
145
|
+
"""
|
|
146
|
+
return s.replace("<escape>", "")
|
|
147
|
+
|
|
148
|
+
def _type_to_function_gemma(self, json_type: Union[str, List[str]]) -> str:
|
|
149
|
+
r"""Convert JSON schema type to FunctionGemma type (uppercase).
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
json_type (Union[str, List[str]]): The JSON schema type. Can be a
|
|
153
|
+
string like "string" or a list like ["string", "null"] for
|
|
154
|
+
optional parameters.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
str: The FunctionGemma type.
|
|
158
|
+
"""
|
|
159
|
+
if isinstance(json_type, list):
|
|
160
|
+
# handle union types like ["string", "null"]
|
|
161
|
+
# use the first non-null type
|
|
162
|
+
for t in json_type:
|
|
163
|
+
if t != "null":
|
|
164
|
+
return t.upper()
|
|
165
|
+
return "STRING" # fallback
|
|
166
|
+
return json_type.upper()
|
|
167
|
+
|
|
168
|
+
def _format_parameter_properties(
|
|
169
|
+
self,
|
|
170
|
+
properties: Dict[str, Any],
|
|
171
|
+
required: List[str],
|
|
172
|
+
) -> str:
|
|
173
|
+
r"""Format parameter properties for FunctionGemma declaration.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
properties (Dict[str, Any]): The properties dictionary.
|
|
177
|
+
required (List[str]): List of required parameter names.
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
str: Formatted properties string.
|
|
181
|
+
"""
|
|
182
|
+
parts = []
|
|
183
|
+
for name, prop in sorted(properties.items()):
|
|
184
|
+
desc = prop.get("description", "")
|
|
185
|
+
prop_type = prop.get("type", "string")
|
|
186
|
+
|
|
187
|
+
param_str = (
|
|
188
|
+
f"{name}:{{description:{self._escape_string(desc)},"
|
|
189
|
+
f"type:{self._escape_string(self._type_to_function_gemma(prop_type))}}}"
|
|
190
|
+
)
|
|
191
|
+
parts.append(param_str)
|
|
192
|
+
|
|
193
|
+
return ",".join(parts)
|
|
194
|
+
|
|
195
|
+
def _convert_tool_to_function_gemma(self, tool: Dict[str, Any]) -> str:
|
|
196
|
+
r"""Convert OpenAI tool schema to FunctionGemma declaration format.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
tool (Dict[str, Any]): The OpenAI tool schema.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
str: The FunctionGemma declaration string.
|
|
203
|
+
"""
|
|
204
|
+
func = tool.get("function", {})
|
|
205
|
+
name = func.get("name", "")
|
|
206
|
+
description = func.get("description", "")
|
|
207
|
+
params = func.get("parameters", {})
|
|
208
|
+
|
|
209
|
+
properties = params.get("properties", {})
|
|
210
|
+
required = params.get("required", [])
|
|
211
|
+
param_type = params.get("type", "object")
|
|
212
|
+
|
|
213
|
+
# format properties
|
|
214
|
+
props_str = self._format_parameter_properties(properties, required)
|
|
215
|
+
|
|
216
|
+
# format required list
|
|
217
|
+
req_parts = [self._escape_string(r) for r in required]
|
|
218
|
+
req_str = ",".join(req_parts)
|
|
219
|
+
|
|
220
|
+
type_escaped = self._escape_string(
|
|
221
|
+
self._type_to_function_gemma(param_type)
|
|
222
|
+
)
|
|
223
|
+
desc_escaped = self._escape_string(description)
|
|
224
|
+
|
|
225
|
+
declaration = (
|
|
226
|
+
f"<start_function_declaration>"
|
|
227
|
+
f"declaration:{name}{{description:{desc_escaped},"
|
|
228
|
+
f"parameters:{{properties:{{{props_str}}},"
|
|
229
|
+
f"required:[{req_str}],"
|
|
230
|
+
f"type:{type_escaped}}}}}"
|
|
231
|
+
f"<end_function_declaration>"
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
return declaration
|
|
235
|
+
|
|
236
|
+
def _format_developer_turn(
|
|
237
|
+
self,
|
|
238
|
+
content: str,
|
|
239
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
240
|
+
) -> str:
|
|
241
|
+
r"""Format the developer/system turn with function declarations.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
content (str): The system message content.
|
|
245
|
+
tools (Optional[List[Dict[str, Any]]]): List of tool schemas.
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
str: Formatted developer turn.
|
|
249
|
+
"""
|
|
250
|
+
result = "<start_of_turn>developer\n"
|
|
251
|
+
if content:
|
|
252
|
+
result += content
|
|
253
|
+
if tools:
|
|
254
|
+
for tool in tools:
|
|
255
|
+
result += self._convert_tool_to_function_gemma(tool)
|
|
256
|
+
result += "<end_of_turn>\n"
|
|
257
|
+
return result
|
|
258
|
+
|
|
259
|
+
def _format_user_turn(self, content: str) -> str:
|
|
260
|
+
r"""Format a user message turn.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
content (str): The user message content.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
str: Formatted user turn.
|
|
267
|
+
"""
|
|
268
|
+
return f"<start_of_turn>user\n{content}<end_of_turn>\n"
|
|
269
|
+
|
|
270
|
+
def _format_model_turn(self, message: OpenAIMessage) -> str:
|
|
271
|
+
r"""Format an assistant/model message turn.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
message (OpenAIMessage): The assistant message.
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
str: Formatted model turn.
|
|
278
|
+
"""
|
|
279
|
+
content = message.get("content", "") or ""
|
|
280
|
+
tool_calls = message.get("tool_calls")
|
|
281
|
+
|
|
282
|
+
result = f"<start_of_turn>model\n{content}"
|
|
283
|
+
|
|
284
|
+
if tool_calls and isinstance(tool_calls, list):
|
|
285
|
+
for tool_call in tool_calls:
|
|
286
|
+
func = tool_call.get("function", {})
|
|
287
|
+
func_name = func.get("name", "")
|
|
288
|
+
args_raw = func.get("arguments", "{}")
|
|
289
|
+
if isinstance(args_raw, str):
|
|
290
|
+
args: Dict[str, Any] = json.loads(args_raw)
|
|
291
|
+
else:
|
|
292
|
+
args = dict(args_raw) if args_raw else {}
|
|
293
|
+
|
|
294
|
+
# format arguments
|
|
295
|
+
arg_parts = []
|
|
296
|
+
for key, value in sorted(args.items()):
|
|
297
|
+
if isinstance(value, str):
|
|
298
|
+
arg_parts.append(f"{key}:{self._escape_string(value)}")
|
|
299
|
+
else:
|
|
300
|
+
arg_parts.append(f"{key}:{json.dumps(value)}")
|
|
301
|
+
|
|
302
|
+
args_str = ",".join(arg_parts)
|
|
303
|
+
result += (
|
|
304
|
+
f"<start_function_call>call:{func_name}{{{args_str}}}"
|
|
305
|
+
f"<end_function_call>"
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
result += "<end_of_turn>\n"
|
|
309
|
+
return result
|
|
310
|
+
|
|
311
|
+
def _format_tool_response(self, message: OpenAIMessage) -> str:
|
|
312
|
+
r"""Format a tool response message.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
message (OpenAIMessage): The tool response message.
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
str: Formatted tool response.
|
|
319
|
+
"""
|
|
320
|
+
content = message.get("content", "")
|
|
321
|
+
name = message.get("name", "")
|
|
322
|
+
|
|
323
|
+
# try to parse content as json for structured response
|
|
324
|
+
try:
|
|
325
|
+
if not isinstance(content, str):
|
|
326
|
+
content = str(content) if content else ""
|
|
327
|
+
result_data = json.loads(content)
|
|
328
|
+
# check if it's a dict (structured response)
|
|
329
|
+
if isinstance(result_data, dict):
|
|
330
|
+
result_parts = []
|
|
331
|
+
for key, value in sorted(result_data.items()):
|
|
332
|
+
if isinstance(value, str):
|
|
333
|
+
result_parts.append(
|
|
334
|
+
f"{key}:{self._escape_string(value)}"
|
|
335
|
+
)
|
|
336
|
+
else:
|
|
337
|
+
result_parts.append(f"{key}:{json.dumps(value)}")
|
|
338
|
+
result_str = ",".join(result_parts)
|
|
339
|
+
else:
|
|
340
|
+
# scalar value (int, float, bool, list, etc.)
|
|
341
|
+
if isinstance(result_data, str):
|
|
342
|
+
result_str = f"value:{self._escape_string(result_data)}"
|
|
343
|
+
else:
|
|
344
|
+
result_str = f"value:{json.dumps(result_data)}"
|
|
345
|
+
except (json.JSONDecodeError, TypeError):
|
|
346
|
+
result_str = f"value:{self._escape_string(str(content))}"
|
|
347
|
+
|
|
348
|
+
return (
|
|
349
|
+
f"<start_function_response>response:{name}{{{result_str}}}"
|
|
350
|
+
f"<end_function_response>"
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
def _format_messages(
|
|
354
|
+
self,
|
|
355
|
+
messages: List[OpenAIMessage],
|
|
356
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
357
|
+
) -> str:
|
|
358
|
+
r"""Format all messages into a FunctionGemma prompt string.
|
|
359
|
+
|
|
360
|
+
Args:
|
|
361
|
+
messages (List[OpenAIMessage]): List of messages in OpenAI format.
|
|
362
|
+
tools (Optional[List[Dict[str, Any]]]): List of tool schemas.
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
str: Complete formatted prompt.
|
|
366
|
+
"""
|
|
367
|
+
prompt = "<bos>"
|
|
368
|
+
|
|
369
|
+
# check for system message
|
|
370
|
+
system_content = ""
|
|
371
|
+
start_idx = 0
|
|
372
|
+
|
|
373
|
+
if messages and messages[0].get("role") in ["system", "developer"]:
|
|
374
|
+
content = messages[0].get("content", "")
|
|
375
|
+
if isinstance(content, str):
|
|
376
|
+
system_content = content
|
|
377
|
+
elif isinstance(content, list):
|
|
378
|
+
# handle list content
|
|
379
|
+
for item in content:
|
|
380
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
381
|
+
system_content += item.get("text", "")
|
|
382
|
+
start_idx = 1
|
|
383
|
+
|
|
384
|
+
# add developer turn if we have system content or tools
|
|
385
|
+
if system_content or tools:
|
|
386
|
+
prompt += self._format_developer_turn(system_content, tools)
|
|
387
|
+
|
|
388
|
+
# process remaining messages
|
|
389
|
+
prev_role = None
|
|
390
|
+
for msg in messages[start_idx:]:
|
|
391
|
+
role = msg.get("role", "")
|
|
392
|
+
|
|
393
|
+
if role == "user":
|
|
394
|
+
content = msg.get("content", "")
|
|
395
|
+
if isinstance(content, str):
|
|
396
|
+
prompt += self._format_user_turn(content)
|
|
397
|
+
elif isinstance(content, list):
|
|
398
|
+
text_content = ""
|
|
399
|
+
for item in content:
|
|
400
|
+
is_text = (
|
|
401
|
+
isinstance(item, dict)
|
|
402
|
+
and item.get("type") == "text"
|
|
403
|
+
)
|
|
404
|
+
if is_text:
|
|
405
|
+
text_content += item.get("text", "")
|
|
406
|
+
prompt += self._format_user_turn(text_content)
|
|
407
|
+
|
|
408
|
+
elif role == "assistant":
|
|
409
|
+
prompt += self._format_model_turn(msg)
|
|
410
|
+
|
|
411
|
+
elif role == "tool":
|
|
412
|
+
prompt += self._format_tool_response(msg)
|
|
413
|
+
|
|
414
|
+
prev_role = role
|
|
415
|
+
|
|
416
|
+
# add generation prompt - but not after tool response
|
|
417
|
+
# per FunctionGemma template, model continues after tool response
|
|
418
|
+
if prev_role != "tool":
|
|
419
|
+
prompt += "<start_of_turn>model\n"
|
|
420
|
+
|
|
421
|
+
return prompt
|
|
422
|
+
|
|
423
|
+
def _extract_function_calls(
|
|
424
|
+
self,
|
|
425
|
+
text: str,
|
|
426
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
427
|
+
) -> Tuple[str, List[Dict[str, Any]]]:
|
|
428
|
+
r"""Extract function calls from model output.
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
text (str): The model output text.
|
|
432
|
+
tools (Optional[List[Dict[str, Any]]]): Available tools to infer
|
|
433
|
+
function names when the model outputs malformed calls.
|
|
434
|
+
|
|
435
|
+
Returns:
|
|
436
|
+
Tuple[str, List[Dict[str, Any]]]: Tuple of
|
|
437
|
+
(remaining_content, list_of_tool_calls).
|
|
438
|
+
"""
|
|
439
|
+
tool_calls = []
|
|
440
|
+
|
|
441
|
+
# try standard format first:
|
|
442
|
+
# <start_function_call>call:func_name{args}<end_function_call>
|
|
443
|
+
pattern = (
|
|
444
|
+
r"<start_function_call>call:(\w+)\{([^}]*)\}"
|
|
445
|
+
r"(?:<end_function_call>)?"
|
|
446
|
+
)
|
|
447
|
+
match = re.search(pattern, text)
|
|
448
|
+
|
|
449
|
+
if match:
|
|
450
|
+
func_name, args_str = match.groups()
|
|
451
|
+
args = self._parse_function_args(args_str)
|
|
452
|
+
tool_call = {
|
|
453
|
+
"id": f"call_{uuid.uuid4().hex[:8]}",
|
|
454
|
+
"type": "function",
|
|
455
|
+
"function": {
|
|
456
|
+
"name": func_name,
|
|
457
|
+
"arguments": json.dumps(args, ensure_ascii=False),
|
|
458
|
+
},
|
|
459
|
+
}
|
|
460
|
+
tool_calls.append(tool_call)
|
|
461
|
+
else:
|
|
462
|
+
# try alternate format the model might produce:
|
|
463
|
+
# <start_function_response>call{args}<end_function_call>
|
|
464
|
+
# or call:func_name{args} without proper tags
|
|
465
|
+
alt_pattern = r"<start_function_\w+>call(?::(\w+))?\{([^}]*)\}"
|
|
466
|
+
alt_match = re.search(alt_pattern, text)
|
|
467
|
+
|
|
468
|
+
if alt_match:
|
|
469
|
+
func_name, args_str = alt_match.groups()
|
|
470
|
+
# if function name is missing, try to infer from tools
|
|
471
|
+
if not func_name:
|
|
472
|
+
func_name = self._infer_function_name(args_str, tools)
|
|
473
|
+
if func_name:
|
|
474
|
+
args = self._parse_function_args(args_str)
|
|
475
|
+
tool_call = {
|
|
476
|
+
"id": f"call_{uuid.uuid4().hex[:8]}",
|
|
477
|
+
"type": "function",
|
|
478
|
+
"function": {
|
|
479
|
+
"name": func_name,
|
|
480
|
+
"arguments": json.dumps(args, ensure_ascii=False),
|
|
481
|
+
},
|
|
482
|
+
}
|
|
483
|
+
tool_calls.append(tool_call)
|
|
484
|
+
|
|
485
|
+
# remove all function call/response blocks from content
|
|
486
|
+
content = re.sub(
|
|
487
|
+
r"<start_function_call>.*?(?:<end_function_call>|$)",
|
|
488
|
+
"",
|
|
489
|
+
text,
|
|
490
|
+
flags=re.DOTALL,
|
|
491
|
+
)
|
|
492
|
+
content = re.sub(
|
|
493
|
+
r"<start_function_response>.*?(?:<end_function_call>|"
|
|
494
|
+
r"<end_function_response>|$)",
|
|
495
|
+
"",
|
|
496
|
+
content,
|
|
497
|
+
flags=re.DOTALL,
|
|
498
|
+
)
|
|
499
|
+
content = content.strip()
|
|
500
|
+
|
|
501
|
+
return content, tool_calls
|
|
502
|
+
|
|
503
|
+
def _infer_function_name(
|
|
504
|
+
self,
|
|
505
|
+
args_str: str,
|
|
506
|
+
tools: Optional[List[Dict[str, Any]]],
|
|
507
|
+
) -> Optional[str]:
|
|
508
|
+
r"""Infer the function name from available tools.
|
|
509
|
+
|
|
510
|
+
Args:
|
|
511
|
+
args_str (str): The arguments string from the model output.
|
|
512
|
+
tools (Optional[List[Dict[str, Any]]]): Available tools.
|
|
513
|
+
|
|
514
|
+
Returns:
|
|
515
|
+
Optional[str]: The inferred function name, or None if not found.
|
|
516
|
+
"""
|
|
517
|
+
if not tools:
|
|
518
|
+
return None
|
|
519
|
+
|
|
520
|
+
# if only one tool, use it
|
|
521
|
+
if len(tools) == 1:
|
|
522
|
+
func = tools[0].get("function", {})
|
|
523
|
+
return func.get("name")
|
|
524
|
+
|
|
525
|
+
# try to match by argument names
|
|
526
|
+
parsed_args = self._parse_function_args(args_str)
|
|
527
|
+
arg_names = set(parsed_args.keys())
|
|
528
|
+
|
|
529
|
+
best_match = None
|
|
530
|
+
best_score = 0
|
|
531
|
+
|
|
532
|
+
for tool in tools:
|
|
533
|
+
func = tool.get("function", {})
|
|
534
|
+
params = func.get("parameters", {})
|
|
535
|
+
properties = params.get("properties", {})
|
|
536
|
+
tool_arg_names = set(properties.keys())
|
|
537
|
+
|
|
538
|
+
# count matching argument names
|
|
539
|
+
score = len(arg_names & tool_arg_names)
|
|
540
|
+
if score > best_score:
|
|
541
|
+
best_score = score
|
|
542
|
+
best_match = func.get("name")
|
|
543
|
+
|
|
544
|
+
return best_match
|
|
545
|
+
|
|
546
|
+
def _parse_function_args(self, args_str: str) -> Dict[str, Any]:
|
|
547
|
+
r"""Parse function arguments from FunctionGemma format.
|
|
548
|
+
|
|
549
|
+
Args:
|
|
550
|
+
args_str (str): The arguments string (e.g., "a:15,b:27").
|
|
551
|
+
|
|
552
|
+
Returns:
|
|
553
|
+
Dict[str, Any]: Parsed arguments dictionary.
|
|
554
|
+
"""
|
|
555
|
+
args: Dict[str, Any] = {}
|
|
556
|
+
if not args_str:
|
|
557
|
+
return args
|
|
558
|
+
|
|
559
|
+
# split by comma, but be careful with escaped strings
|
|
560
|
+
current_key = ""
|
|
561
|
+
current_value = ""
|
|
562
|
+
in_escape = False
|
|
563
|
+
parsing_key = True
|
|
564
|
+
|
|
565
|
+
i = 0
|
|
566
|
+
while i < len(args_str):
|
|
567
|
+
char = args_str[i]
|
|
568
|
+
|
|
569
|
+
# check for <escape> tag
|
|
570
|
+
if args_str[i : i + 8] == "<escape>":
|
|
571
|
+
in_escape = not in_escape
|
|
572
|
+
i += 8
|
|
573
|
+
continue
|
|
574
|
+
|
|
575
|
+
if not in_escape:
|
|
576
|
+
if char == ":" and parsing_key:
|
|
577
|
+
parsing_key = False
|
|
578
|
+
i += 1
|
|
579
|
+
continue
|
|
580
|
+
elif char == "," and not parsing_key:
|
|
581
|
+
# save current pair
|
|
582
|
+
args[current_key] = self._parse_value(current_value)
|
|
583
|
+
current_key = ""
|
|
584
|
+
current_value = ""
|
|
585
|
+
parsing_key = True
|
|
586
|
+
i += 1
|
|
587
|
+
continue
|
|
588
|
+
|
|
589
|
+
if parsing_key:
|
|
590
|
+
current_key += char
|
|
591
|
+
else:
|
|
592
|
+
current_value += char
|
|
593
|
+
|
|
594
|
+
i += 1
|
|
595
|
+
|
|
596
|
+
# save last pair
|
|
597
|
+
if current_key:
|
|
598
|
+
args[current_key] = self._parse_value(current_value)
|
|
599
|
+
|
|
600
|
+
return args
|
|
601
|
+
|
|
602
|
+
def _parse_value(self, value: str) -> Any:
|
|
603
|
+
r"""Parse a value string to appropriate Python type.
|
|
604
|
+
|
|
605
|
+
Args:
|
|
606
|
+
value (str): The value string.
|
|
607
|
+
|
|
608
|
+
Returns:
|
|
609
|
+
Any: Parsed value (int, float, bool, or str).
|
|
610
|
+
"""
|
|
611
|
+
value = value.strip()
|
|
612
|
+
|
|
613
|
+
# try to parse as number
|
|
614
|
+
try:
|
|
615
|
+
if "." in value:
|
|
616
|
+
return float(value)
|
|
617
|
+
return int(value)
|
|
618
|
+
except ValueError:
|
|
619
|
+
pass
|
|
620
|
+
|
|
621
|
+
# check for boolean
|
|
622
|
+
if value.lower() == "true":
|
|
623
|
+
return True
|
|
624
|
+
if value.lower() == "false":
|
|
625
|
+
return False
|
|
626
|
+
|
|
627
|
+
# return as string
|
|
628
|
+
return value
|
|
629
|
+
|
|
630
|
+
def _to_chat_completion( # type: ignore[override]
|
|
631
|
+
self,
|
|
632
|
+
response_text: str,
|
|
633
|
+
model: str,
|
|
634
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
635
|
+
) -> ChatCompletion:
|
|
636
|
+
r"""Convert parsed response to OpenAI ChatCompletion format.
|
|
637
|
+
|
|
638
|
+
Args:
|
|
639
|
+
response_text (str): The model response text.
|
|
640
|
+
model (str): The model name.
|
|
641
|
+
tools (Optional[List[Dict[str, Any]]]): Available tools for
|
|
642
|
+
function name inference.
|
|
643
|
+
|
|
644
|
+
Returns:
|
|
645
|
+
ChatCompletion: OpenAI-compatible ChatCompletion object.
|
|
646
|
+
"""
|
|
647
|
+
content, tool_calls = self._extract_function_calls(
|
|
648
|
+
response_text, tools
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
message: Dict[str, Any] = {
|
|
652
|
+
"role": "assistant",
|
|
653
|
+
"content": content if content else None,
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
if tool_calls:
|
|
657
|
+
message["tool_calls"] = tool_calls
|
|
658
|
+
|
|
659
|
+
finish_reason = "tool_calls" if tool_calls else "stop"
|
|
660
|
+
|
|
661
|
+
choice = dict(
|
|
662
|
+
index=0,
|
|
663
|
+
message=message,
|
|
664
|
+
finish_reason=finish_reason,
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
obj = ChatCompletion.construct(
|
|
668
|
+
id=f"chatcmpl-{uuid.uuid4().hex}",
|
|
669
|
+
choices=[choice],
|
|
670
|
+
created=int(time.time()),
|
|
671
|
+
model=model,
|
|
672
|
+
object="chat.completion",
|
|
673
|
+
usage=CompletionUsage(
|
|
674
|
+
prompt_tokens=0,
|
|
675
|
+
completion_tokens=0,
|
|
676
|
+
total_tokens=0,
|
|
677
|
+
),
|
|
678
|
+
)
|
|
679
|
+
|
|
680
|
+
return obj
|
|
681
|
+
|
|
682
|
+
def _call_ollama_generate(self, prompt: str) -> str:
|
|
683
|
+
r"""Call Ollama's /api/generate endpoint with raw prompt.
|
|
684
|
+
|
|
685
|
+
Args:
|
|
686
|
+
prompt (str): The formatted prompt string.
|
|
687
|
+
|
|
688
|
+
Returns:
|
|
689
|
+
str: The model response text.
|
|
690
|
+
|
|
691
|
+
Raises:
|
|
692
|
+
RuntimeError: If the API request fails.
|
|
693
|
+
"""
|
|
694
|
+
url = f"{self._url}/api/generate"
|
|
695
|
+
|
|
696
|
+
options = {}
|
|
697
|
+
if self.model_config_dict.get("temperature") is not None:
|
|
698
|
+
options["temperature"] = self.model_config_dict["temperature"]
|
|
699
|
+
if self.model_config_dict.get("top_p") is not None:
|
|
700
|
+
options["top_p"] = self.model_config_dict["top_p"]
|
|
701
|
+
if self.model_config_dict.get("top_k") is not None:
|
|
702
|
+
options["top_k"] = self.model_config_dict["top_k"]
|
|
703
|
+
if self.model_config_dict.get("num_predict") is not None:
|
|
704
|
+
options["num_predict"] = self.model_config_dict["num_predict"]
|
|
705
|
+
if self.model_config_dict.get("seed") is not None:
|
|
706
|
+
options["seed"] = self.model_config_dict["seed"]
|
|
707
|
+
|
|
708
|
+
data = {
|
|
709
|
+
"model": str(self.model_type),
|
|
710
|
+
"prompt": prompt,
|
|
711
|
+
"raw": True,
|
|
712
|
+
"stream": False,
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
if options:
|
|
716
|
+
data["options"] = options
|
|
717
|
+
|
|
718
|
+
# add default stop sequences if not provided
|
|
719
|
+
stop_sequences = self.model_config_dict.get("stop")
|
|
720
|
+
if stop_sequences:
|
|
721
|
+
data["stop"] = stop_sequences
|
|
722
|
+
else:
|
|
723
|
+
# default stop sequences to prevent repetition
|
|
724
|
+
data["stop"] = [
|
|
725
|
+
"<end_of_turn>",
|
|
726
|
+
"<start_of_turn>",
|
|
727
|
+
"<end_function_call>",
|
|
728
|
+
]
|
|
729
|
+
|
|
730
|
+
try:
|
|
731
|
+
response = self._client.post(url, json=data)
|
|
732
|
+
response.raise_for_status()
|
|
733
|
+
result = response.json()
|
|
734
|
+
return result.get("response", "")
|
|
735
|
+
except httpx.HTTPStatusError as e:
|
|
736
|
+
raise RuntimeError(f"Ollama API request failed: {e}")
|
|
737
|
+
|
|
738
|
+
async def _acall_ollama_generate(self, prompt: str) -> str:
|
|
739
|
+
r"""Async call Ollama's /api/generate endpoint with raw prompt.
|
|
740
|
+
|
|
741
|
+
Args:
|
|
742
|
+
prompt (str): The formatted prompt string.
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
str: The model response text.
|
|
746
|
+
|
|
747
|
+
Raises:
|
|
748
|
+
RuntimeError: If the API request fails.
|
|
749
|
+
"""
|
|
750
|
+
url = f"{self._url}/api/generate"
|
|
751
|
+
|
|
752
|
+
options = {}
|
|
753
|
+
if self.model_config_dict.get("temperature") is not None:
|
|
754
|
+
options["temperature"] = self.model_config_dict["temperature"]
|
|
755
|
+
if self.model_config_dict.get("top_p") is not None:
|
|
756
|
+
options["top_p"] = self.model_config_dict["top_p"]
|
|
757
|
+
if self.model_config_dict.get("top_k") is not None:
|
|
758
|
+
options["top_k"] = self.model_config_dict["top_k"]
|
|
759
|
+
if self.model_config_dict.get("num_predict") is not None:
|
|
760
|
+
options["num_predict"] = self.model_config_dict["num_predict"]
|
|
761
|
+
if self.model_config_dict.get("seed") is not None:
|
|
762
|
+
options["seed"] = self.model_config_dict["seed"]
|
|
763
|
+
|
|
764
|
+
data = {
|
|
765
|
+
"model": str(self.model_type),
|
|
766
|
+
"prompt": prompt,
|
|
767
|
+
"raw": True,
|
|
768
|
+
"stream": False,
|
|
769
|
+
}
|
|
770
|
+
|
|
771
|
+
if options:
|
|
772
|
+
data["options"] = options
|
|
773
|
+
|
|
774
|
+
# add default stop sequences if not provided
|
|
775
|
+
stop_sequences = self.model_config_dict.get("stop")
|
|
776
|
+
if stop_sequences:
|
|
777
|
+
data["stop"] = stop_sequences
|
|
778
|
+
else:
|
|
779
|
+
# default stop sequences to prevent repetition
|
|
780
|
+
data["stop"] = [
|
|
781
|
+
"<end_of_turn>",
|
|
782
|
+
"<start_of_turn>",
|
|
783
|
+
"<end_function_call>",
|
|
784
|
+
]
|
|
785
|
+
|
|
786
|
+
try:
|
|
787
|
+
response = await self._async_client.post(url, json=data)
|
|
788
|
+
response.raise_for_status()
|
|
789
|
+
result = response.json()
|
|
790
|
+
return result.get("response", "")
|
|
791
|
+
except httpx.HTTPStatusError as e:
|
|
792
|
+
raise RuntimeError(f"Ollama API request failed: {e}")
|
|
793
|
+
|
|
794
|
+
@observe()
|
|
795
|
+
def _run(
|
|
796
|
+
self,
|
|
797
|
+
messages: List[OpenAIMessage],
|
|
798
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
799
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
800
|
+
) -> ChatCompletion:
|
|
801
|
+
r"""Run inference using FunctionGemma via Ollama.
|
|
802
|
+
|
|
803
|
+
Args:
|
|
804
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
805
|
+
in OpenAI API format.
|
|
806
|
+
response_format (Optional[Type[BaseModel]]): Not supported for
|
|
807
|
+
FunctionGemma. (default: :obj:`None`)
|
|
808
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
809
|
+
use for the request.
|
|
810
|
+
|
|
811
|
+
Returns:
|
|
812
|
+
ChatCompletion: The model response in OpenAI ChatCompletion format.
|
|
813
|
+
"""
|
|
814
|
+
update_current_observation(
|
|
815
|
+
input={
|
|
816
|
+
"messages": messages,
|
|
817
|
+
"tools": tools,
|
|
818
|
+
},
|
|
819
|
+
model=str(self.model_type),
|
|
820
|
+
model_parameters=self.model_config_dict,
|
|
821
|
+
)
|
|
822
|
+
|
|
823
|
+
self._log_and_trace()
|
|
824
|
+
|
|
825
|
+
prompt = self._format_messages(messages, tools)
|
|
826
|
+
logger.debug(f"FunctionGemma prompt:\n{prompt}")
|
|
827
|
+
|
|
828
|
+
response_text = self._call_ollama_generate(prompt)
|
|
829
|
+
logger.debug(f"FunctionGemma response:\n{response_text}")
|
|
830
|
+
|
|
831
|
+
response = self._to_chat_completion(
|
|
832
|
+
response_text, str(self.model_type), tools
|
|
833
|
+
)
|
|
834
|
+
update_current_observation(usage=response.usage)
|
|
835
|
+
return response
|
|
836
|
+
|
|
837
|
+
@observe()
|
|
838
|
+
async def _arun(
|
|
839
|
+
self,
|
|
840
|
+
messages: List[OpenAIMessage],
|
|
841
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
842
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
843
|
+
) -> ChatCompletion:
|
|
844
|
+
r"""Async run inference using FunctionGemma via Ollama.
|
|
845
|
+
|
|
846
|
+
Args:
|
|
847
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
848
|
+
in OpenAI API format.
|
|
849
|
+
response_format (Optional[Type[BaseModel]]): Not supported for
|
|
850
|
+
FunctionGemma. (default: :obj:`None`)
|
|
851
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
852
|
+
use for the request.
|
|
853
|
+
|
|
854
|
+
Returns:
|
|
855
|
+
ChatCompletion: The model response in OpenAI ChatCompletion format.
|
|
856
|
+
"""
|
|
857
|
+
update_current_observation(
|
|
858
|
+
input={
|
|
859
|
+
"messages": messages,
|
|
860
|
+
"tools": tools,
|
|
861
|
+
},
|
|
862
|
+
model=str(self.model_type),
|
|
863
|
+
model_parameters=self.model_config_dict,
|
|
864
|
+
)
|
|
865
|
+
|
|
866
|
+
self._log_and_trace()
|
|
867
|
+
|
|
868
|
+
prompt = self._format_messages(messages, tools)
|
|
869
|
+
logger.debug(f"FunctionGemma prompt:\n{prompt}")
|
|
870
|
+
|
|
871
|
+
response_text = await self._acall_ollama_generate(prompt)
|
|
872
|
+
logger.debug(f"FunctionGemma response:\n{response_text}")
|
|
873
|
+
|
|
874
|
+
response = self._to_chat_completion(
|
|
875
|
+
response_text, str(self.model_type), tools
|
|
876
|
+
)
|
|
877
|
+
update_current_observation(usage=response.usage)
|
|
878
|
+
return response
|
|
879
|
+
|
|
880
|
+
@property
|
|
881
|
+
def stream(self) -> bool:
|
|
882
|
+
r"""Returns whether the model is in stream mode.
|
|
883
|
+
|
|
884
|
+
FunctionGemma does not currently support streaming.
|
|
885
|
+
|
|
886
|
+
Returns:
|
|
887
|
+
bool: Always False for FunctionGemma.
|
|
888
|
+
"""
|
|
889
|
+
return False
|