ragaai-catalyst 2.1.6.4b0__tar.gz → 2.1.7b0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst-2.1.7b0/.github/workflows/ci.yml +97 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/PKG-INFO +1 -1
- ragaai_catalyst-2.1.7b0/examples/all_llm_provider/all_llm_provider.py +534 -0
- ragaai_catalyst-2.1.7b0/examples/all_llm_provider/config.py +22 -0
- ragaai_catalyst-2.1.7b0/examples/all_llm_provider/test_llm_call_utils.py +128 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/custom_agents/travel_agent/main.py +10 -3
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/custom_agents/travel_agent/tools.py +3 -0
- ragaai_catalyst-2.1.7b0/examples/haystack/news_fetching/README.md +77 -0
- ragaai_catalyst-2.1.7b0/examples/haystack/news_fetching/news_fetching.py +97 -0
- ragaai_catalyst-2.1.7b0/examples/haystack/news_fetching/requirements.txt +2 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/llamaindex_examples/legal_research_rag/legal_rag.py +1 -1
- ragaai_catalyst-2.1.7b0/examples/openai_agents_sdk/youtube_summary_agent/README.md +78 -0
- ragaai_catalyst-2.1.7b0/examples/openai_agents_sdk/youtube_summary_agent/requirements.txt +6 -0
- ragaai_catalyst-2.1.7b0/examples/openai_agents_sdk/youtube_summary_agent/sample.env +7 -0
- ragaai_catalyst-2.1.7b0/examples/openai_agents_sdk/youtube_summary_agent/youtube_summary_agent.py +189 -0
- ragaai_catalyst-2.1.7b0/examples/pii_masking_example/llamaindex_agentic_fastapi/app.py +396 -0
- ragaai_catalyst-2.1.7b0/examples/pii_masking_example/llamaindex_agentic_fastapi/app_presidio.py +382 -0
- ragaai_catalyst-2.1.7b0/examples/pii_masking_example/llamaindex_agentic_fastapi/request.py +34 -0
- ragaai_catalyst-2.1.7b0/examples/pii_masking_example/llamaindex_agentic_fastapi/requirements.txt +4 -0
- ragaai_catalyst-2.1.7b0/examples/smolagents/most_upvoted_paper/README.md +65 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/smolagents/most_upvoted_paper/most_upvoted_paper.py +28 -3
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/pyproject.toml +1 -2
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/dataset.py +1 -1
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +26 -1
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +6 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +180 -164
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/exporters/dynamic_trace_exporter.py +8 -2
- ragaai_catalyst-2.1.7b0/ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py +248 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/tracer.py +125 -115
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/upload_traces.py +3 -3
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py +1 -1
- ragaai_catalyst-2.1.7b0/ragaai_catalyst/tracers/utils/rag_trace_json_converter.py +243 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/utils/trace_json_converter.py +1 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst.egg-info/PKG-INFO +1 -1
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst.egg-info/SOURCES.txt +62 -17
- ragaai_catalyst-2.1.7b0/test_report_20250407_183101.txt +60 -0
- ragaai_catalyst-2.1.7b0/tests/README.md +83 -0
- ragaai_catalyst-2.1.7b0/tests/environment.yml +389 -0
- ragaai_catalyst-2.1.7b0/tests/examples/all_llm_provider/all_llm_provider.py +562 -0
- ragaai_catalyst-2.1.7b0/tests/examples/all_llm_provider/config.py +25 -0
- ragaai_catalyst-2.1.7b0/tests/examples/all_llm_provider/test_all_llm_provider.py +68 -0
- ragaai_catalyst-2.1.7b0/tests/examples/crewai/scifi_writer/sci_fi_story.md +21 -0
- ragaai_catalyst-2.1.7b0/tests/examples/crewai/scifi_writer/scifi_writer.py +115 -0
- ragaai_catalyst-2.1.7b0/tests/examples/crewai/scifi_writer/test_scifi_writer.py +40 -0
- ragaai_catalyst-2.1.7b0/tests/examples/custom_agents/travel_agent/agents.py +53 -0
- ragaai_catalyst-2.1.7b0/tests/examples/custom_agents/travel_agent/config.py +24 -0
- ragaai_catalyst-2.1.7b0/tests/examples/custom_agents/travel_agent/main.py +125 -0
- ragaai_catalyst-2.1.7b0/tests/examples/custom_agents/travel_agent/test_travel_agent.py +40 -0
- ragaai_catalyst-2.1.7b0/tests/examples/custom_agents/travel_agent/tools.py +105 -0
- ragaai_catalyst-2.1.7b0/tests/examples/haystack/news_fetching/news_fetching.py +110 -0
- ragaai_catalyst-2.1.7b0/tests/examples/haystack/news_fetching/test_news_fetching.py +40 -0
- ragaai_catalyst-2.1.7b0/tests/examples/langchain/medical_rag/data/medical_texts/handbook1.pdf +0 -0
- ragaai_catalyst-2.1.7b0/tests/examples/langchain/medical_rag/data/medical_texts/handbook2.pdf +70 -0
- ragaai_catalyst-2.1.7b0/tests/examples/langchain/medical_rag/data/symptom_disease_map.csv +9 -0
- ragaai_catalyst-2.1.7b0/tests/examples/langchain/medical_rag/diagnosis_agent.py +159 -0
- ragaai_catalyst-2.1.7b0/tests/examples/langchain/medical_rag/test_diagnosis_agent.py +40 -0
- ragaai_catalyst-2.1.7b0/tests/examples/langgraph/personal_research_assistant/research_assistant.py +252 -0
- ragaai_catalyst-2.1.7b0/tests/examples/langgraph/personal_research_assistant/test_research_assistant.py +42 -0
- ragaai_catalyst-2.1.7b0/tests/examples/llamaindex_examples/legal_research_rag/legal_data/cases/ca_overtime_2021.pdf +0 -0
- ragaai_catalyst-2.1.7b0/tests/examples/llamaindex_examples/legal_research_rag/legal_data/cases/fl_ada_2022.pdf +85 -0
- ragaai_catalyst-2.1.7b0/tests/examples/llamaindex_examples/legal_research_rag/legal_data/statutes.csv +3 -0
- ragaai_catalyst-2.1.7b0/tests/examples/llamaindex_examples/legal_research_rag/legal_rag.py +108 -0
- ragaai_catalyst-2.1.7b0/tests/examples/llamaindex_examples/legal_research_rag/test_legal_rag.py +40 -0
- ragaai_catalyst-2.1.7b0/tests/examples/smolagents/most_upvoted_paper/most_upvoted_paper.py +180 -0
- ragaai_catalyst-2.1.7b0/tests/examples/smolagents/most_upvoted_paper/paper.pdf +0 -0
- ragaai_catalyst-2.1.7b0/tests/examples/smolagents/most_upvoted_paper/test_most_upvoted_paper.py +38 -0
- ragaai_catalyst-2.1.7b0/tests/examples/test_utils/get_components.py +29 -0
- ragaai_catalyst-2.1.7b0/tests/examples/test_utils/get_trace_data.py +74 -0
- ragaai_catalyst-2.1.7b0/tests/run_pytest_and_print_and_save_results.py +126 -0
- ragaai_catalyst-2.1.7b0/tests/table_result.png +0 -0
- ragaai_catalyst-2.1.7b0/tests/test_catalyst/test_data/util_synthetic_data_doc.pdf +0 -0
- ragaai_catalyst-2.1.7b0/tests/test_catalyst/test_data/util_synthetic_data_invalid.csv +7 -0
- ragaai_catalyst-2.1.7b0/tests/test_catalyst/test_data/util_synthetic_data_valid.csv +147 -0
- ragaai_catalyst-2.1.7b0/tests/test_catalyst/test_data/util_test_dataset.csv +11 -0
- ragaai_catalyst-2.1.7b0/tests/test_catalyst/test_data/util_test_langchain_tracing.pdf +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/tests/test_catalyst/test_dataset.py +7 -17
- ragaai_catalyst-2.1.7b0/tests/test_catalyst/test_evaluation.py +165 -0
- ragaai_catalyst-2.1.7b0/tests/test_catalyst/test_openinference_langchain.py +562 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/tests/test_catalyst/test_prompt_manager.py +2 -2
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/tests/test_catalyst/test_synthetic_data_generation.py +25 -18
- ragaai_catalyst-2.1.6.4b0/tests/test_catalyst/test_configuration.py → ragaai_catalyst-2.1.7b0/tests/test_catalyst/test_the_configuration.py +3 -3
- ragaai_catalyst-2.1.6.4b0/examples/haystack/rag/rag.py +0 -92
- ragaai_catalyst-2.1.6.4b0/examples/haystack/rag/requirements.txt +0 -3
- ragaai_catalyst-2.1.6.4b0/examples/haystack/rag/sample.env +0 -1
- ragaai_catalyst-2.1.6.4b0/examples/smolagents/most_upvoted_paper/README.md +0 -12
- ragaai_catalyst-2.1.6.4b0/examples/smolagents/rag_using_chromadb/README.md +0 -60
- ragaai_catalyst-2.1.6.4b0/examples/smolagents/rag_using_chromadb/rag_using_chromadb.py +0 -114
- ragaai_catalyst-2.1.6.4b0/examples/smolagents/rag_using_chromadb/requirements.txt +0 -9
- ragaai_catalyst-2.1.6.4b0/examples/smolagents/rag_using_chromadb/sample.env +0 -5
- ragaai_catalyst-2.1.6.4b0/ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py +0 -130
- ragaai_catalyst-2.1.6.4b0/tests/examples/langgraph/personal_research_assistant/test_research_assistant.py +0 -203
- ragaai_catalyst-2.1.6.4b0/tests/test_catalyst/test_evaluation.py +0 -503
- ragaai_catalyst-2.1.6.4b0/tests/test_catalyst/test_langchain_tracing.py +0 -175
- ragaai_catalyst-2.1.6.4b0/tests/test_catalyst/test_llm_providers.py +0 -104
- ragaai_catalyst-2.1.6.4b0/tests/test_catalyst/test_redteaming.py +0 -56
- ragaai_catalyst-2.1.6.4b0/tests/test_catalyst/upload_trace_zip_automation.py +0 -582
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/.github/PULL_REQUEST_TEMPLATE.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/.gitignore +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/.gitmodules +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/LICENSE +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/README.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/dataset_management.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/autheticate.gif +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/create_project.gif +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/custom_metrics.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/dataset.gif +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/dataset.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/evaluation.gif +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/evaluation.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/guardrails.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/last_main.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/main.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/projects_new.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/img/trace_comp.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/prompt_management.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/docs/trace_management.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/crewai/scifi_writer/README.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/crewai/scifi_writer/requirements.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/crewai/scifi_writer/sample.env +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/crewai/scifi_writer/scifi_writer.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/custom_agents/travel_agent/agents.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/custom_agents/travel_agent/config.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langchain/medical_rag/data/medical_texts/handbook1.pdf +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langchain/medical_rag/data/medical_texts/handbook2.pdf +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langchain/medical_rag/data/symptom_disease_map.csv +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langchain/medical_rag/diagnosis_agent.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langchain/medical_rag/requirements.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langchain/medical_rag/sample.env +0 -0
- {ragaai_catalyst-2.1.6.4b0/examples/haystack/rag → ragaai_catalyst-2.1.7b0/examples/langgraph/personal_research_assistant}/README.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langgraph/personal_research_assistant/requirements.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langgraph/personal_research_assistant/research_assistant.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/langgraph/personal_research_assistant/sample.env +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/llamaindex_examples/legal_research_rag/legal_data/cases/ca_overtime_2021.pdf +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/llamaindex_examples/legal_research_rag/legal_data/cases/fl_ada_2022.pdf +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/llamaindex_examples/legal_research_rag/legal_data/statutes.csv +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/llamaindex_examples/legal_research_rag/requirements.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/llamaindex_examples/legal_research_rag/sample.env +0 -0
- {ragaai_catalyst-2.1.6.4b0/examples/openai_agents_sdk → ragaai_catalyst-2.1.7b0/examples/openai_agents_sdk/email_data_extraction_agent}/README.md +0 -0
- {ragaai_catalyst-2.1.6.4b0/examples/openai_agents_sdk → ragaai_catalyst-2.1.7b0/examples/openai_agents_sdk/email_data_extraction_agent}/data_extraction_email.py +0 -0
- {ragaai_catalyst-2.1.6.4b0/examples/openai_agents_sdk → ragaai_catalyst-2.1.7b0/examples/openai_agents_sdk/email_data_extraction_agent}/requirements.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0/examples/openai_agents_sdk → ragaai_catalyst-2.1.7b0/examples/openai_agents_sdk/email_data_extraction_agent}/sample.env +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/smolagents/most_upvoted_paper/requirements.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/examples/smolagents/most_upvoted_paper/sample.env +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/_version.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/evaluation.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/experiment.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/guard_executor.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/guardrails_manager.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/internal_api_completion.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/prompt_manager.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/proxy_call.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/ragaai_catalyst.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/config/detectors.toml +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/data_generator/scenario_generator.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/data_generator/test_case_generator.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/evaluator.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/llm_generator.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/llm_generator_old.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/red_teaming.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/requirements.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/tests/grok.ipynb +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/tests/stereotype.ipynb +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/upload_result.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/utils/issue_description.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming/utils/rt.png +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/redteaming_old.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/synthetic_data_generation.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/README.md +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/data/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tests/FinancialAnalysisSystem.ipynb +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tests/GameActivityEventPlanner.ipynb +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tests/TravelPlanner.ipynb +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tests/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tests/ai_travel_agent.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tests/unique_decorator_test.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/upload/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/upload/trace_uploader.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_code.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/api_utils.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/file_name_tracker.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/generic.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/distributed.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/exporters/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/exporters/file_span_exporter.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/exporters/raga_exporter.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/instrumentators/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/langchain_callback.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/llamaindex_callback.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/llamaindex_instrumentation.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/utils/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/utils/convert_llama_instru_callback.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/utils/extraction_logic_llama_index.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/utils/model_prices_and_context_window_backup.json +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/tracers/utils/utils.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst/utils.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst.egg-info/dependency_links.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst.egg-info/requires.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/ragaai_catalyst.egg-info/top_level.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/requirements.txt +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/setup.cfg +0 -0
- /ragaai_catalyst-2.1.6.4b0/examples/langgraph/personal_research_assistant/README.md → /ragaai_catalyst-2.1.7b0/tests/examples/__init__.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/tests/test_catalyst/test_base_tracer_add_metrics.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/tests/test_catalyst/test_base_tracer_metrics.py +0 -0
- {ragaai_catalyst-2.1.6.4b0 → ragaai_catalyst-2.1.7b0}/tests/test_catalyst/test_evaluation_metrics.py +0 -0
@@ -0,0 +1,97 @@
|
|
1
|
+
name: CI Pipeline
|
2
|
+
|
3
|
+
on:
|
4
|
+
push:
|
5
|
+
branches: [ main ]
|
6
|
+
pull_request:
|
7
|
+
branches: [ main ]
|
8
|
+
|
9
|
+
jobs:
|
10
|
+
code-quality:
|
11
|
+
runs-on: ubuntu-latest
|
12
|
+
continue-on-error: true
|
13
|
+
steps:
|
14
|
+
- uses: actions/checkout@v4
|
15
|
+
- name: Set up Python
|
16
|
+
uses: actions/setup-python@v5
|
17
|
+
with:
|
18
|
+
python-version: '3.10'
|
19
|
+
- name: Install dependencies
|
20
|
+
shell: bash
|
21
|
+
run: |
|
22
|
+
if [ "$RUNNER_OS" == "Windows" ]; then
|
23
|
+
python -m pip install --upgrade pip
|
24
|
+
pip install ruff
|
25
|
+
pip install -e ".[dev]"
|
26
|
+
else
|
27
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
28
|
+
export PATH="$HOME/.cargo/bin:$PATH"
|
29
|
+
uv pip install --system ruff
|
30
|
+
uv pip install --system -e ".[dev]"
|
31
|
+
fi
|
32
|
+
- name: Format and lint with Ruff
|
33
|
+
run: |
|
34
|
+
# First run format to fix formatting issues
|
35
|
+
ruff format .
|
36
|
+
# Then run check with auto-fix for fixable issues
|
37
|
+
ruff check --fix .
|
38
|
+
|
39
|
+
test:
|
40
|
+
needs: code-quality
|
41
|
+
continue-on-error: true
|
42
|
+
strategy:
|
43
|
+
fail-fast: false
|
44
|
+
matrix:
|
45
|
+
os: [ubuntu-latest, windows-latest, macos-latest]
|
46
|
+
python-version: ['3.10', '3.11', '3.12', '3.13']
|
47
|
+
runs-on: ${{ matrix.os }}
|
48
|
+
outputs:
|
49
|
+
test_summary: ${{ steps.pytest.outputs.test_summary }}
|
50
|
+
steps:
|
51
|
+
- uses: actions/checkout@v4
|
52
|
+
- name: Set up Python ${{ matrix.python-version }}
|
53
|
+
uses: actions/setup-python@v5
|
54
|
+
with:
|
55
|
+
python-version: ${{ matrix.python-version }}
|
56
|
+
- name: Install dependencies
|
57
|
+
shell: bash
|
58
|
+
run: |
|
59
|
+
if [ "$RUNNER_OS" == "Windows" ]; then
|
60
|
+
python -m pip install --upgrade pip
|
61
|
+
pip install pytest pytest-cov
|
62
|
+
pip install -e ".[dev]"
|
63
|
+
else
|
64
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
65
|
+
export PATH="$HOME/.cargo/bin:$PATH"
|
66
|
+
uv pip install --system pytest pytest-cov
|
67
|
+
uv pip install --system -e ".[dev]"
|
68
|
+
fi
|
69
|
+
- name: Test with pytest
|
70
|
+
id: pytest
|
71
|
+
env:
|
72
|
+
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
73
|
+
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
74
|
+
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
75
|
+
AZURE_OPENAI_ENDPOINT: ${{ secrets.AZURE_OPENAI_ENDPOINT }}
|
76
|
+
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
77
|
+
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
78
|
+
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
79
|
+
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
80
|
+
PROJECT_NAME: ${{ secrets.PROJECT_NAME }}
|
81
|
+
LOCATION: ${{ secrets.LOCATION }}
|
82
|
+
RAGAAI_CATALYST_BASE_URL: ${{ secrets.RAGAAI_CATALYST_BASE_URL }}
|
83
|
+
RAGAAI_CATALYST_ACCESS_KEY: ${{ secrets.RAGAAI_CATALYST_ACCESS_KEY }}
|
84
|
+
RAGAAI_CATALYST_SECRET_KEY: ${{ secrets.RAGAAI_CATALYST_SECRET_KEY }}
|
85
|
+
RAGAAI_PROJECT_NAME: ${{ secrets.RAGAAI_PROJECT_NAME }}
|
86
|
+
RAGAAI_DATASET_NAME: ${{ secrets.RAGAAI_DATASET_NAME }}_$(date +'%Y%m%d%H%M%S')
|
87
|
+
TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }}
|
88
|
+
SERPERDEV_API_KEY: ${{ secrets.SERPERDEV_API_KEY }}
|
89
|
+
run: |
|
90
|
+
mkdir -p test-results
|
91
|
+
pytest tests/ -v --junitxml=test-results/junit.xml | tee test-output.txt
|
92
|
+
echo "test_summary<<EOF" >> $GITHUB_OUTPUT
|
93
|
+
echo "### Test Results for ${{ matrix.os }} - Python ${{ matrix.python-version }}" >> $GITHUB_OUTPUT
|
94
|
+
echo '```' >> $GITHUB_OUTPUT
|
95
|
+
cat test-output.txt | grep -E "collected|PASSED|FAILED|ERROR|SKIPPED" >> $GITHUB_OUTPUT
|
96
|
+
echo '```' >> $GITHUB_OUTPUT
|
97
|
+
echo "EOF" >> $GITHUB_OUTPUT
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ragaai_catalyst
|
3
|
-
Version: 2.1.
|
3
|
+
Version: 2.1.7b0
|
4
4
|
Summary: RAGA AI CATALYST
|
5
5
|
Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>, Tushar Kumar <tushar.kumar@raga.ai>
|
6
6
|
Requires-Python: <3.13,>=3.10
|
@@ -0,0 +1,534 @@
|
|
1
|
+
import sys
|
2
|
+
import os
|
3
|
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
|
4
|
+
|
5
|
+
from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
|
6
|
+
import vertexai
|
7
|
+
from vertexai.generative_models import GenerativeModel, GenerationConfig
|
8
|
+
import google.generativeai as genai
|
9
|
+
from litellm import completion, acompletion
|
10
|
+
import litellm
|
11
|
+
import anthropic
|
12
|
+
from anthropic import Anthropic, AsyncAnthropic
|
13
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
14
|
+
from langchain_google_vertexai import ChatVertexAI
|
15
|
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
16
|
+
from groq import Groq, AsyncGroq
|
17
|
+
|
18
|
+
from ragaai_catalyst import trace_llm
|
19
|
+
|
20
|
+
from dotenv import load_dotenv
|
21
|
+
load_dotenv()
|
22
|
+
|
23
|
+
# Azure OpenAI setup
|
24
|
+
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
|
25
|
+
azure_api_key = os.getenv("AZURE_OPENAI_API_KEY")
|
26
|
+
azure_api_version = os.getenv("AZURE_OPENAI_API_VERSION", "2024-08-01-preview")
|
27
|
+
|
28
|
+
# Google AI setup
|
29
|
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
30
|
+
|
31
|
+
# Vertex AI setup
|
32
|
+
vertexai.init(project="gen-lang-client-0655603261", location="us-central1")
|
33
|
+
|
34
|
+
async def get_llm_response(
|
35
|
+
prompt,
|
36
|
+
model,
|
37
|
+
provider,
|
38
|
+
temperature,
|
39
|
+
max_tokens,
|
40
|
+
async_llm=False,
|
41
|
+
):
|
42
|
+
"""
|
43
|
+
Main interface for getting responses from various LLM providers
|
44
|
+
"""
|
45
|
+
if 'azure' in provider.lower():
|
46
|
+
if async_llm:
|
47
|
+
async_azure_openai_client = AsyncAzureOpenAI(azure_endpoint=azure_endpoint, api_key=azure_api_key, api_version=azure_api_version)
|
48
|
+
return await _get_async_azure_openai_response(async_azure_openai_client, prompt, model, temperature, max_tokens)
|
49
|
+
else:
|
50
|
+
azure_openai_client = AzureOpenAI(azure_endpoint=azure_endpoint, api_key=azure_api_key, api_version=azure_api_version)
|
51
|
+
return _get_azure_openai_response(azure_openai_client, prompt, model, temperature, max_tokens)
|
52
|
+
elif 'openai_beta' in provider.lower():
|
53
|
+
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
54
|
+
return _get_openai_beta_response(openai_client, prompt, model, temperature, max_tokens)
|
55
|
+
elif 'openai' in provider.lower():
|
56
|
+
if async_llm:
|
57
|
+
async_openai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
58
|
+
return await _get_async_openai_response(async_openai_client, prompt, model, temperature, max_tokens)
|
59
|
+
else:
|
60
|
+
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
61
|
+
return _get_openai_response(openai_client, prompt, model, temperature, max_tokens)
|
62
|
+
elif 'chat_google' in provider.lower():
|
63
|
+
if async_llm:
|
64
|
+
return await _get_async_chat_google_generativeai_response(prompt, model, temperature, max_tokens)
|
65
|
+
else:
|
66
|
+
return _get_chat_google_generativeai_response(prompt, model, temperature, max_tokens)
|
67
|
+
elif 'google' in provider.lower():
|
68
|
+
if async_llm:
|
69
|
+
return await _get_async_google_generativeai_response(prompt, model, temperature, max_tokens)
|
70
|
+
else:
|
71
|
+
return _get_google_generativeai_response(prompt, model, temperature, max_tokens)
|
72
|
+
elif 'chat_vertexai' in provider.lower():
|
73
|
+
if async_llm:
|
74
|
+
return await _get_async_chat_vertexai_response(prompt, model, temperature, max_tokens)
|
75
|
+
else:
|
76
|
+
return _get_chat_vertexai_response(prompt, model, temperature, max_tokens)
|
77
|
+
elif 'vertexai' in provider.lower():
|
78
|
+
if async_llm:
|
79
|
+
return await _get_async_vertexai_response(prompt, model, temperature, max_tokens)
|
80
|
+
else:
|
81
|
+
return _get_vertexai_response(prompt, model, temperature, max_tokens)
|
82
|
+
elif 'anthropic' in provider.lower():
|
83
|
+
if async_llm:
|
84
|
+
async_anthropic_client = AsyncAnthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
|
85
|
+
return await _get_async_anthropic_response(async_anthropic_client, prompt, model, temperature, max_tokens)
|
86
|
+
else:
|
87
|
+
anthropic_client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
|
88
|
+
return _get_anthropic_response(anthropic_client, prompt, model, temperature, max_tokens)
|
89
|
+
elif 'groq' in provider.lower():
|
90
|
+
if async_llm:
|
91
|
+
async_groq_client = AsyncGroq(api_key=os.getenv("GROQ_API_KEY"))
|
92
|
+
return await _get_async_groq_response(async_groq_client, prompt, model, temperature, max_tokens)
|
93
|
+
else:
|
94
|
+
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
95
|
+
return _get_groq_response(groq_client, prompt, model, temperature, max_tokens)
|
96
|
+
elif 'litellm' in provider.lower():
|
97
|
+
if async_llm:
|
98
|
+
return await _get_async_litellm_response(prompt, model, temperature, max_tokens)
|
99
|
+
else:
|
100
|
+
return _get_litellm_response(prompt, model, temperature, max_tokens)
|
101
|
+
|
102
|
+
|
103
|
+
@trace_llm(name="_get_openai_response")
|
104
|
+
def _get_openai_response(
|
105
|
+
openai_client,
|
106
|
+
prompt,
|
107
|
+
model,
|
108
|
+
temperature,
|
109
|
+
max_tokens,
|
110
|
+
):
|
111
|
+
"""
|
112
|
+
Get response from OpenAI API
|
113
|
+
"""
|
114
|
+
try:
|
115
|
+
response = openai_client.chat.completions.create(
|
116
|
+
model=model,
|
117
|
+
messages=[{"role": "user", "content": prompt}],
|
118
|
+
temperature=temperature,
|
119
|
+
max_tokens=max_tokens
|
120
|
+
)
|
121
|
+
return response.choices[0].message.content
|
122
|
+
except Exception as e:
|
123
|
+
print(f"Error with OpenAI API: {str(e)}")
|
124
|
+
return None
|
125
|
+
|
126
|
+
@trace_llm(name="_get_async_openai_response")
|
127
|
+
async def _get_async_openai_response(
|
128
|
+
async_openai_client,
|
129
|
+
prompt,
|
130
|
+
model,
|
131
|
+
temperature,
|
132
|
+
max_tokens,
|
133
|
+
):
|
134
|
+
"""
|
135
|
+
Get async response from OpenAI API
|
136
|
+
"""
|
137
|
+
try:
|
138
|
+
response = await async_openai_client.chat.completions.create(
|
139
|
+
model=model,
|
140
|
+
messages=[{"role": "user", "content": prompt}],
|
141
|
+
temperature=temperature,
|
142
|
+
max_tokens=max_tokens
|
143
|
+
)
|
144
|
+
return response.choices[0].message.content
|
145
|
+
except Exception as e:
|
146
|
+
print(f"Error with async OpenAI API: {str(e)}")
|
147
|
+
return None
|
148
|
+
|
149
|
+
@trace_llm(name="_get_openai_beta_response")
|
150
|
+
def _get_openai_beta_response(
|
151
|
+
openai_client,
|
152
|
+
prompt,
|
153
|
+
model,
|
154
|
+
temperature,
|
155
|
+
max_tokens
|
156
|
+
):
|
157
|
+
assistant = openai_client.beta.assistants.create(model=model)
|
158
|
+
thread = openai_client.beta.threads.create()
|
159
|
+
message = openai_client.beta.threads.messages.create(
|
160
|
+
thread_id=thread.id,
|
161
|
+
role="user",
|
162
|
+
content=prompt
|
163
|
+
)
|
164
|
+
run = openai_client.beta.threads.runs.create_and_poll(
|
165
|
+
thread_id=thread.id,
|
166
|
+
assistant_id=assistant.id,
|
167
|
+
temperature=temperature,
|
168
|
+
max_completion_tokens=max_tokens
|
169
|
+
)
|
170
|
+
if run.status == 'completed':
|
171
|
+
messages = openai_client.beta.threads.messages.list(thread_id=thread.id)
|
172
|
+
return messages.data[0].content[0].text.value
|
173
|
+
|
174
|
+
@trace_llm(name="_get_azure_openai_response")
|
175
|
+
def _get_azure_openai_response(
|
176
|
+
azure_openai_client,
|
177
|
+
prompt,
|
178
|
+
model,
|
179
|
+
temperature,
|
180
|
+
max_tokens
|
181
|
+
):
|
182
|
+
"""
|
183
|
+
Get response from Azure OpenAI API
|
184
|
+
"""
|
185
|
+
try:
|
186
|
+
response = azure_openai_client.chat.completions.create(
|
187
|
+
model=model,
|
188
|
+
messages=[{"role": "user", "content": prompt}],
|
189
|
+
temperature=temperature,
|
190
|
+
max_tokens=max_tokens
|
191
|
+
)
|
192
|
+
return response.choices[0].message.content
|
193
|
+
except Exception as e:
|
194
|
+
print(f"Error with Azure OpenAI API: {str(e)}")
|
195
|
+
return None
|
196
|
+
|
197
|
+
@trace_llm(name="_get_async_azure_openai_response")
|
198
|
+
async def _get_async_azure_openai_response(
|
199
|
+
async_azure_openai_client,
|
200
|
+
prompt,
|
201
|
+
model,
|
202
|
+
temperature,
|
203
|
+
max_tokens
|
204
|
+
):
|
205
|
+
"""
|
206
|
+
Get async response from Azure OpenAI API
|
207
|
+
"""
|
208
|
+
try:
|
209
|
+
response = await async_azure_openai_client.chat.completions.create(
|
210
|
+
model=model,
|
211
|
+
messages=[{"role": "user", "content": prompt}],
|
212
|
+
temperature=temperature,
|
213
|
+
max_tokens=max_tokens
|
214
|
+
)
|
215
|
+
return response.choices[0].message.content
|
216
|
+
except Exception as e:
|
217
|
+
print(f"Error with async Azure OpenAI API: {str(e)}")
|
218
|
+
return None
|
219
|
+
|
220
|
+
@trace_llm(name="_get_litellm_response")
|
221
|
+
def _get_litellm_response(
|
222
|
+
prompt,
|
223
|
+
model,
|
224
|
+
temperature,
|
225
|
+
max_tokens
|
226
|
+
):
|
227
|
+
"""
|
228
|
+
Get response using LiteLLM
|
229
|
+
"""
|
230
|
+
try:
|
231
|
+
response = completion(
|
232
|
+
model=model,
|
233
|
+
messages=[{"role": "user", "content": prompt}],
|
234
|
+
temperature=temperature,
|
235
|
+
max_tokens=max_tokens
|
236
|
+
)
|
237
|
+
return response.choices[0].message.content
|
238
|
+
except Exception as e:
|
239
|
+
print(f"Error with LiteLLM: {str(e)}")
|
240
|
+
return None
|
241
|
+
|
242
|
+
@trace_llm(name="_get_async_litellm_response")
|
243
|
+
async def _get_async_litellm_response(
|
244
|
+
prompt,
|
245
|
+
model,
|
246
|
+
temperature,
|
247
|
+
max_tokens
|
248
|
+
):
|
249
|
+
"""
|
250
|
+
Get async response using LiteLLM
|
251
|
+
"""
|
252
|
+
try:
|
253
|
+
response = await acompletion(
|
254
|
+
model=model,
|
255
|
+
messages=[{"role": "user", "content": prompt}],
|
256
|
+
temperature=temperature,
|
257
|
+
max_tokens=max_tokens
|
258
|
+
)
|
259
|
+
return response.choices[0].message.content
|
260
|
+
except Exception as e:
|
261
|
+
print(f"Error with async LiteLLM: {str(e)}")
|
262
|
+
return None
|
263
|
+
|
264
|
+
@trace_llm(name="_get_vertexai_response")
|
265
|
+
def _get_vertexai_response(
|
266
|
+
prompt,
|
267
|
+
model,
|
268
|
+
temperature,
|
269
|
+
max_tokens
|
270
|
+
):
|
271
|
+
"""
|
272
|
+
Get response from VertexAI
|
273
|
+
"""
|
274
|
+
try:
|
275
|
+
# vertexai.init(project="gen-lang-client-0655603261", location="us-central1")
|
276
|
+
model = GenerativeModel(
|
277
|
+
model_name=model
|
278
|
+
)
|
279
|
+
response = model.generate_content(
|
280
|
+
prompt,
|
281
|
+
generation_config=GenerationConfig(
|
282
|
+
temperature=temperature,
|
283
|
+
max_output_tokens=max_tokens
|
284
|
+
)
|
285
|
+
)
|
286
|
+
return response.text
|
287
|
+
except Exception as e:
|
288
|
+
print(f"Error with VertexAI: {str(e)}")
|
289
|
+
return None
|
290
|
+
|
291
|
+
@trace_llm(name="_get_async_vertexai_response")
|
292
|
+
async def _get_async_vertexai_response(
|
293
|
+
prompt,
|
294
|
+
model,
|
295
|
+
temperature,
|
296
|
+
max_tokens
|
297
|
+
):
|
298
|
+
"""
|
299
|
+
Get async response from VertexAI
|
300
|
+
"""
|
301
|
+
try:
|
302
|
+
model = GenerativeModel(
|
303
|
+
model_name=model
|
304
|
+
)
|
305
|
+
response = await model.generate_content_async(
|
306
|
+
prompt,
|
307
|
+
generation_config=GenerationConfig(
|
308
|
+
temperature=temperature,
|
309
|
+
max_output_tokens=max_tokens
|
310
|
+
)
|
311
|
+
)
|
312
|
+
return response.text
|
313
|
+
except Exception as e:
|
314
|
+
print(f"Error with async VertexAI: {str(e)}")
|
315
|
+
return None
|
316
|
+
|
317
|
+
@trace_llm(name="_get_google_generativeai_response")
|
318
|
+
def _get_google_generativeai_response(
|
319
|
+
prompt,
|
320
|
+
model,
|
321
|
+
temperature,
|
322
|
+
max_tokens
|
323
|
+
):
|
324
|
+
"""
|
325
|
+
Get response from Google GenerativeAI
|
326
|
+
"""
|
327
|
+
try:
|
328
|
+
model = genai.GenerativeModel(model)
|
329
|
+
response = model.generate_content(
|
330
|
+
prompt,
|
331
|
+
generation_config=genai.GenerationConfig(
|
332
|
+
temperature=temperature,
|
333
|
+
max_output_tokens=max_tokens
|
334
|
+
)
|
335
|
+
)
|
336
|
+
return response.text
|
337
|
+
except Exception as e:
|
338
|
+
print(f"Error with Google GenerativeAI: {str(e)}")
|
339
|
+
return None
|
340
|
+
|
341
|
+
@trace_llm(name="_get_async_google_generativeai_response")
|
342
|
+
async def _get_async_google_generativeai_response(
|
343
|
+
prompt,
|
344
|
+
model,
|
345
|
+
temperature,
|
346
|
+
max_tokens
|
347
|
+
):
|
348
|
+
"""
|
349
|
+
Get async response from Google GenerativeAI
|
350
|
+
"""
|
351
|
+
try:
|
352
|
+
model = genai.GenerativeModel(model)
|
353
|
+
response = await model.generate_content_async(
|
354
|
+
prompt,
|
355
|
+
generation_config=genai.GenerationConfig(
|
356
|
+
temperature=temperature,
|
357
|
+
max_output_tokens=max_tokens
|
358
|
+
)
|
359
|
+
)
|
360
|
+
return response.text
|
361
|
+
except Exception as e:
|
362
|
+
print(f"Error with async Google GenerativeAI: {str(e)}")
|
363
|
+
return None
|
364
|
+
|
365
|
+
@trace_llm(name="_get_anthropic_response")
|
366
|
+
def _get_anthropic_response(
|
367
|
+
anthropic_client,
|
368
|
+
prompt,
|
369
|
+
model,
|
370
|
+
temperature,
|
371
|
+
max_tokens,
|
372
|
+
):
|
373
|
+
try:
|
374
|
+
response = anthropic_client.messages.create(
|
375
|
+
model=model,
|
376
|
+
messages=[{"role": "user", "content": prompt}],
|
377
|
+
temperature=temperature,
|
378
|
+
max_tokens=max_tokens
|
379
|
+
)
|
380
|
+
return response.content[0].text
|
381
|
+
except Exception as e:
|
382
|
+
print(f"Error with Anthropic: {str(e)}")
|
383
|
+
return None
|
384
|
+
|
385
|
+
@trace_llm(name="_get_async_anthropic_response")
|
386
|
+
async def _get_async_anthropic_response(
|
387
|
+
async_anthropic_client,
|
388
|
+
prompt,
|
389
|
+
model,
|
390
|
+
temperature,
|
391
|
+
max_tokens,
|
392
|
+
):
|
393
|
+
try:
|
394
|
+
response = await async_anthropic_client.messages.create(
|
395
|
+
model=model,
|
396
|
+
messages=[{"role": "user", "content": prompt}],
|
397
|
+
temperature=temperature,
|
398
|
+
max_tokens=max_tokens
|
399
|
+
)
|
400
|
+
return response.content[0].text
|
401
|
+
except Exception as e:
|
402
|
+
print(f"Error with async Anthropic: {str(e)}")
|
403
|
+
return None
|
404
|
+
|
405
|
+
@trace_llm(name="_get_chat_google_generativeai_response")
|
406
|
+
def _get_chat_google_generativeai_response(
|
407
|
+
prompt,
|
408
|
+
model,
|
409
|
+
temperature,
|
410
|
+
max_tokens
|
411
|
+
):
|
412
|
+
try:
|
413
|
+
model = ChatGoogleGenerativeAI(model=model)
|
414
|
+
response = model._generate(
|
415
|
+
[HumanMessage(content=prompt)],
|
416
|
+
generation_config=dict(
|
417
|
+
temperature=temperature,
|
418
|
+
max_output_tokens=max_tokens
|
419
|
+
)
|
420
|
+
)
|
421
|
+
return response.generations[0].text
|
422
|
+
except Exception as e:
|
423
|
+
print(f"Error with Google GenerativeAI: {str(e)}")
|
424
|
+
return None
|
425
|
+
|
426
|
+
@trace_llm(name="_get_async_chat_google_generativeai_response")
|
427
|
+
async def _get_async_chat_google_generativeai_response(
|
428
|
+
prompt,
|
429
|
+
model,
|
430
|
+
temperature,
|
431
|
+
max_tokens
|
432
|
+
):
|
433
|
+
try:
|
434
|
+
model = ChatGoogleGenerativeAI(model=model)
|
435
|
+
response = await model._agenerate(
|
436
|
+
[HumanMessage(content=prompt)],
|
437
|
+
generation_config=dict(
|
438
|
+
temperature=temperature,
|
439
|
+
max_output_tokens=max_tokens
|
440
|
+
)
|
441
|
+
)
|
442
|
+
return response.generations[0].text
|
443
|
+
except Exception as e:
|
444
|
+
print(f"Error with async Google GenerativeAI: {str(e)}")
|
445
|
+
return None
|
446
|
+
|
447
|
+
@trace_llm(name="_get_chat_vertexai_response")
|
448
|
+
def _get_chat_vertexai_response(
|
449
|
+
prompt,
|
450
|
+
model,
|
451
|
+
temperature,
|
452
|
+
max_tokens
|
453
|
+
):
|
454
|
+
try:
|
455
|
+
model = ChatVertexAI(
|
456
|
+
model=model,
|
457
|
+
google_api_key=os.getenv("GOOGLE_API_KEY")
|
458
|
+
)
|
459
|
+
response = model._generate(
|
460
|
+
[HumanMessage(content=prompt)],
|
461
|
+
generation_config=dict(
|
462
|
+
temperature=temperature,
|
463
|
+
max_output_tokens=max_tokens
|
464
|
+
)
|
465
|
+
)
|
466
|
+
return response.generations[0].text
|
467
|
+
except Exception as e:
|
468
|
+
print(f"Error with VertexAI: {str(e)}")
|
469
|
+
return None
|
470
|
+
|
471
|
+
@trace_llm(name="_get_async_chat_vertexai_response")
|
472
|
+
async def _get_async_chat_vertexai_response(
|
473
|
+
prompt,
|
474
|
+
model,
|
475
|
+
temperature,
|
476
|
+
max_tokens
|
477
|
+
):
|
478
|
+
try:
|
479
|
+
model = ChatVertexAI(
|
480
|
+
model=model,
|
481
|
+
google_api_key=os.getenv("GOOGLE_API_KEY")
|
482
|
+
)
|
483
|
+
response = await model._agenerate(
|
484
|
+
[HumanMessage(content=prompt)],
|
485
|
+
generation_config=dict(
|
486
|
+
temperature=temperature,
|
487
|
+
max_output_tokens=max_tokens
|
488
|
+
)
|
489
|
+
)
|
490
|
+
return response.generations[0].text
|
491
|
+
except Exception as e:
|
492
|
+
print(f"Error with async VertexAI: {str(e)}")
|
493
|
+
return None
|
494
|
+
|
495
|
+
@trace_llm(name="_get_groq_response")
|
496
|
+
def _get_groq_response(
|
497
|
+
groq_client,
|
498
|
+
prompt,
|
499
|
+
model,
|
500
|
+
temperature,
|
501
|
+
max_tokens
|
502
|
+
):
|
503
|
+
try:
|
504
|
+
response = groq_client.chat.completions.create(
|
505
|
+
model=model,
|
506
|
+
messages=[{"role": "user", "content": prompt}],
|
507
|
+
temperature=temperature,
|
508
|
+
max_tokens=max_tokens
|
509
|
+
)
|
510
|
+
return response.choices[0].message.content
|
511
|
+
except Exception as e:
|
512
|
+
print(f"Error with Groq: {str(e)}")
|
513
|
+
return None
|
514
|
+
|
515
|
+
@trace_llm(name="_get_async_groq_response")
|
516
|
+
async def _get_async_groq_response(
|
517
|
+
async_groq_client,
|
518
|
+
prompt,
|
519
|
+
model,
|
520
|
+
temperature,
|
521
|
+
max_tokens
|
522
|
+
):
|
523
|
+
try:
|
524
|
+
response = await async_groq_client.chat.completions.create(
|
525
|
+
model=model,
|
526
|
+
messages=[{"role": "user", "content": prompt}],
|
527
|
+
temperature=temperature,
|
528
|
+
max_tokens=max_tokens
|
529
|
+
)
|
530
|
+
return response.choices[0].message.content
|
531
|
+
except Exception as e:
|
532
|
+
print(f"Error with async Groq: {str(e)}")
|
533
|
+
return None
|
534
|
+
|
@@ -0,0 +1,22 @@
|
|
1
|
+
from ragaai_catalyst import RagaAICatalyst, init_tracing
|
2
|
+
from ragaai_catalyst.tracers import Tracer
|
3
|
+
import sys
|
4
|
+
import os
|
5
|
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
|
6
|
+
|
7
|
+
|
8
|
+
def initialize_tracing():
|
9
|
+
catalyst = RagaAICatalyst(
|
10
|
+
access_key=os.getenv("CATALYST_ACCESS_KEY"),
|
11
|
+
secret_key=os.getenv("CATALYST_SECRET_KEY"),
|
12
|
+
base_url=os.getenv("CATALYST_BASE_URL"),
|
13
|
+
)
|
14
|
+
|
15
|
+
tracer = Tracer(
|
16
|
+
project_name=os.getenv("PROJECT_NAME"),
|
17
|
+
dataset_name=os.getenv("DATASET_NAME"),
|
18
|
+
tracer_type="Agentic",
|
19
|
+
)
|
20
|
+
|
21
|
+
init_tracing(catalyst=catalyst, tracer=tracer)
|
22
|
+
return tracer
|