ragaai-catalyst 2.1.5b23__tar.gz → 2.1.5b24__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/PKG-INFO +1 -1
  2. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/joke_gen_critique_anthropic.py +39 -8
  3. ragaai_catalyst-2.1.5b24/examples/llamaindex_examples/joke_gen_critique_async.py +106 -0
  4. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/travel_agent/config.py +1 -1
  5. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/travel_agent/tools.py +13 -0
  6. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/pyproject.toml +1 -1
  7. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +5 -0
  8. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +8 -15
  9. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +32 -34
  10. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +1 -1
  11. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +5 -0
  12. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py +10 -10
  13. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +10 -8
  14. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst.egg-info/PKG-INFO +1 -1
  15. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst.egg-info/SOURCES.txt +1 -0
  16. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
  17. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
  18. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/.github/PULL_REQUEST_TEMPLATE.md +0 -0
  19. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/.gitignore +0 -0
  20. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/LICENSE +0 -0
  21. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/README.md +0 -0
  22. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/docs/dataset_management.md +0 -0
  23. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/docs/prompt_management.md +0 -0
  24. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/FinancialAnalysisSystem.ipynb +0 -0
  25. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/TravelPlanner.ipynb +0 -0
  26. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/agentic_rag.py +0 -0
  27. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/custom_tracer_example.py +0 -0
  28. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/customer_support.py +0 -0
  29. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/finance.py +0 -0
  30. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/langgraph_examples/agentic_rag.py +0 -0
  31. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/langgraph_examples/customer_support.py +0 -0
  32. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/langgraph_examples/multi_tool.py +0 -0
  33. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/langgraph_examples/planning_agent.py +0 -0
  34. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/langgraph_multi_tools.py +0 -0
  35. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/function_calling_agent.ipynb +0 -0
  36. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/joke_gen_critique.py +0 -0
  37. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/joke_gen_critique_azureopenai.py +0 -0
  38. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/joke_gen_critique_gemini.py +0 -0
  39. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/joke_gen_critique_litellm.py +0 -0
  40. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/joke_gen_critque_vertex.py +0 -0
  41. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/react_agent.ipynb +0 -0
  42. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/llamaindex_examples/tool_call_agent.py +0 -0
  43. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/planning_agent.py +0 -0
  44. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/prompt_management_litellm.ipynb +0 -0
  45. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/prompt_management_openai.ipynb +0 -0
  46. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/sync_sample_call.py +0 -0
  47. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/travel_agent/agents.py +0 -0
  48. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/examples/travel_agent/main.py +0 -0
  49. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/__init__.py +0 -0
  50. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/_version.py +0 -0
  51. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/dataset.py +0 -0
  52. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/evaluation.py +0 -0
  53. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/experiment.py +0 -0
  54. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/guard_executor.py +0 -0
  55. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/guardrails_manager.py +0 -0
  56. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/internal_api_completion.py +0 -0
  57. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/prompt_manager.py +0 -0
  58. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/proxy_call.py +0 -0
  59. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/ragaai_catalyst.py +0 -0
  60. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/redteaming.py +0 -0
  61. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/synthetic_data_generation.py +0 -0
  62. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/__init__.py +0 -0
  63. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/README.md +0 -0
  64. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/__init__.py +0 -0
  65. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/data/__init__.py +0 -0
  66. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +0 -0
  67. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tests/FinancialAnalysisSystem.ipynb +0 -0
  68. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tests/GameActivityEventPlanner.ipynb +0 -0
  69. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tests/TravelPlanner.ipynb +0 -0
  70. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tests/__init__.py +0 -0
  71. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tests/ai_travel_agent.py +0 -0
  72. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tests/unique_decorator_test.py +0 -0
  73. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/__init__.py +0 -0
  74. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +0 -0
  75. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py +0 -0
  76. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +0 -0
  77. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py +0 -0
  78. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/upload/__init__.py +0 -0
  79. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +0 -0
  80. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_code.py +0 -0
  81. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +0 -0
  82. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/__init__.py +0 -0
  83. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/api_utils.py +0 -0
  84. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +0 -0
  85. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/file_name_tracker.py +0 -0
  86. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/generic.py +0 -0
  87. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py +0 -0
  88. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +0 -0
  89. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +0 -0
  90. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml +0 -0
  91. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py +0 -0
  92. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +0 -0
  93. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py +0 -0
  94. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +0 -0
  95. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/distributed.py +0 -0
  96. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/exporters/__init__.py +0 -0
  97. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/exporters/file_span_exporter.py +0 -0
  98. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/exporters/raga_exporter.py +0 -0
  99. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/instrumentators/__init__.py +0 -0
  100. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/instrumentators/langchain.py +0 -0
  101. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/instrumentators/llamaindex.py +0 -0
  102. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/instrumentators/openai.py +0 -0
  103. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/langchain_callback.py +0 -0
  104. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/llamaindex_callback.py +0 -0
  105. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/tracer.py +0 -0
  106. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/upload_traces.py +0 -0
  107. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/utils/__init__.py +0 -0
  108. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py +0 -0
  109. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py +0 -0
  110. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/tracers/utils/utils.py +0 -0
  111. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst/utils.py +0 -0
  112. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst.egg-info/dependency_links.txt +0 -0
  113. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst.egg-info/requires.txt +0 -0
  114. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/ragaai_catalyst.egg-info/top_level.txt +0 -0
  115. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/requirements.txt +0 -0
  116. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/setup.cfg +0 -0
  117. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/autonomous_research_agent/.env.example +0 -0
  118. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/autonomous_research_agent/agents/base_agent.py +0 -0
  119. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/autonomous_research_agent/agents/coordinator.py +0 -0
  120. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/autonomous_research_agent/agents/discovery.py +0 -0
  121. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/autonomous_research_agent/agents/synthesis.py +0 -0
  122. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/autonomous_research_agent/research_script.py +0 -0
  123. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/autonomous_research_agent/utils/llm.py +0 -0
  124. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/test_configuration.py +0 -0
  125. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/test_dataset.py +0 -0
  126. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/test_evaluation.py +0 -0
  127. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/test_llm_providers.py +0 -0
  128. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/test_prompt_manager.py +0 -0
  129. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/test_redteaming.py +0 -0
  130. {ragaai_catalyst-2.1.5b23 → ragaai_catalyst-2.1.5b24}/test/test_catalyst/test_synthetic_data_generation.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ragaai_catalyst
3
- Version: 2.1.5b23
3
+ Version: 2.1.5b24
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
6
  Requires-Python: <3.13,>=3.9
@@ -1,4 +1,6 @@
1
- # !pip install llama-index-llms-anthropic
1
+ import sys
2
+ sys.path.append(".")
3
+
2
4
  from llama_index.core.workflow import (
3
5
  Event,
4
6
  StartEvent,
@@ -10,13 +12,11 @@ from llama_index.core.workflow import (
10
12
  from llama_index.llms.anthropic import Anthropic
11
13
  from dotenv import load_dotenv
12
14
  import os
13
- import sys
14
- sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
15
+
15
16
 
16
17
  load_dotenv()
17
- from ragaai_catalyst.tracers import Tracer
18
- from ragaai_catalyst import RagaAICatalyst, init_tracing
19
- from ragaai_catalyst import trace_llm
18
+
19
+ from ragaai_catalyst import RagaAICatalyst, Tracer, init_tracing, trace_llm, current_span
20
20
 
21
21
  catalyst = RagaAICatalyst(
22
22
  access_key=os.getenv("RAGAAI_CATALYST_ACCESS_KEY"),
@@ -41,15 +41,46 @@ class JokeFlow(Workflow):
41
41
  llm = Anthropic()
42
42
 
43
43
  @step
44
- #@trace_llm("generate joke")
44
+ @trace_llm("generate joke")
45
45
  async def generate_joke(self, ev: StartEvent) -> JokeEvent:
46
46
  topic = ev.topic
47
47
  prompt = f"Write your best joke about {topic}."
48
48
  response = await self.llm.acomplete(prompt)
49
+
50
+ current_span().add_metrics(
51
+ name="toxicity",
52
+ score=0.5,
53
+ reason="some reason"
54
+ )
55
+
56
+ # current_span().execute_metrics(
57
+ # name="Hallucination",
58
+ # model="gpt-4o-mini",
59
+ # provider="openai",
60
+ # display_name="Hallucination_display",
61
+ # mapping={
62
+ # 'prompt': prompt,
63
+ # 'context': "travel agent",
64
+ # 'response': response
65
+ # }
66
+ # )
67
+
68
+ # current_span().execute_metrics(
69
+ # name="Hallucination",
70
+ # model="gpt-4o-mini",
71
+ # provider="openai",
72
+ # display_name="Hallucination_display1",
73
+ # mapping={
74
+ # 'prompt': prompt,
75
+ # 'context': "travel agent",
76
+ # 'response': response
77
+ # }
78
+ # )
79
+
49
80
  return JokeEvent(joke=str(response))
50
81
 
51
82
  @step
52
- #@trace_llm("criticise joke")
83
+ @trace_llm("criticise joke")
53
84
  async def critique_joke(self, ev: JokeEvent) -> StopEvent:
54
85
  joke = ev.joke
55
86
  prompt = f"Give a thorough analysis and critique of the following joke: {joke}"
@@ -0,0 +1,106 @@
1
+ import sys
2
+
3
+ from ragaai_catalyst.tracers.distributed import trace_agent
4
+ sys.path.append('.')
5
+
6
+ import os
7
+ from llama_index.llms.openai import OpenAI
8
+ from llama_index.core.workflow import (
9
+ Event,
10
+ StartEvent,
11
+ StopEvent,
12
+ Workflow,
13
+ step,
14
+ )
15
+
16
+ from ragaai_catalyst import RagaAICatalyst, Tracer, init_tracing, trace_llm, current_span, trace_agent
17
+
18
+ from dotenv import load_dotenv
19
+ load_dotenv(override=True)
20
+
21
+ catalyst = RagaAICatalyst(
22
+ access_key=os.getenv("RAGAAI_CATALYST_ACCESS_KEY"),
23
+ secret_key=os.getenv("RAGAAI_CATALYST_SECRET_KEY"),
24
+ base_url=os.getenv("RAGAAI_CATALYST_BASE_URL"),
25
+ )
26
+
27
+ # Initialize tracer
28
+ tracer = Tracer(
29
+ project_name="Execute_Metric_Test1",
30
+ dataset_name="joke_generation_workflow_async1",
31
+ tracer_type="Agentic",
32
+ )
33
+
34
+ init_tracing(catalyst=catalyst, tracer=tracer)
35
+
36
+ class JokeEvent(Event):
37
+ joke: str
38
+
39
+
40
+ class JokeFlow(Workflow):
41
+ llm = OpenAI()
42
+
43
+ @step
44
+ @trace_agent("generate joke")
45
+ async def generate_joke(self, ev: StartEvent) -> JokeEvent:
46
+ topic = ev.topic
47
+ prompt = f"Write your best joke about {topic}."
48
+
49
+ # Get the current span and store its attributes
50
+ span = current_span()
51
+ span.add_context(context="joke generation")
52
+ span.add_metrics(
53
+ name="toxicity",
54
+ score=0.4,
55
+ reasoning="Reason for toxicity",
56
+ )
57
+
58
+ # First execute_metrics call
59
+ span.execute_metrics(
60
+ name="Hallucination",
61
+ display_name="hallucination_generate_joke",
62
+ provider="openai",
63
+ model="gpt-4o-mini",
64
+ mapping={
65
+ "prompt": prompt,
66
+ "response": "vnvnvs",
67
+ "context": "Some Context"
68
+ }
69
+ )
70
+
71
+ # Perform async operation
72
+ response = await self.llm.acomplete(prompt)
73
+
74
+ # Get fresh span after async operation
75
+ span.execute_metrics(
76
+ name="Hallucination",
77
+ display_name="hallucination_generate_joke_2",
78
+ provider="openai",
79
+ model="gpt-4o-mini",
80
+ mapping={
81
+ "prompt": prompt,
82
+ "response": response.text,
83
+ "context": "Some Context"
84
+ }
85
+ )
86
+
87
+ return JokeEvent(joke=str(response))
88
+
89
+ @step
90
+ @trace_agent("criticise joke")
91
+ async def critique_joke(self, ev: JokeEvent) -> StopEvent:
92
+ joke = ev.joke
93
+ prompt = f"Give a thorough analysis and critique of the following joke: {joke}"
94
+ response = await self.llm.acomplete(prompt)
95
+ return StopEvent(result=str(response))
96
+
97
+
98
+ async def main():
99
+ w = JokeFlow(timeout=60, verbose=False)
100
+ result = await w.run(topic="climate change")
101
+ print(str(result))
102
+
103
+ if __name__ == "__main__":
104
+ import asyncio
105
+ with tracer:
106
+ asyncio.run(main())
@@ -15,7 +15,7 @@ def initialize_tracing():
15
15
  )
16
16
 
17
17
  tracer = Tracer(
18
- project_name="swarnendu-4",
18
+ project_name="Execute_Metric_Test1",
19
19
  dataset_name="travel_agent_dataset",
20
20
  tracer_type="Agentic",
21
21
  )
@@ -34,6 +34,19 @@ def llm_call(prompt, max_tokens=512, model="gpt-4o-mini", name="default"):
34
34
  }
35
35
  )
36
36
 
37
+ current_span().execute_metrics(
38
+ name="Hallucination",
39
+ model="gpt-4o-mini",
40
+ provider="openai",
41
+ display_name="Hallucination_display1",
42
+ mapping={
43
+ 'prompt': "goa to mumbai price",
44
+ 'context': "travel agent",
45
+ 'response': "approximately 10",
46
+ "gt": "10"
47
+ }
48
+ )
49
+
37
50
  response = client.chat.completions.create(
38
51
  model=model,
39
52
  messages=[{"role": "user", "content": prompt}],
@@ -9,7 +9,7 @@ readme = "README.md"
9
9
  requires-python = ">=3.9,<3.13"
10
10
  # license = {file = "LICENSE"}
11
11
 
12
- version = "2.1.5.b23"
12
+ version = "2.1.5.b24"
13
13
  authors = [
14
14
  {name = "Kiran Scaria", email = "kiran.scaria@raga.ai"},
15
15
  {name = "Kedar Gaikwad", email = "kedar.gaikwad@raga.ai"},
@@ -9,6 +9,7 @@ import contextvars
9
9
  import asyncio
10
10
  from ..utils.file_name_tracker import TrackName
11
11
  from ..utils.span_attributes import SpanAttributes
12
+ from .base import BaseTracer
12
13
  import logging
13
14
 
14
15
  logger = logging.getLogger(__name__)
@@ -555,6 +556,10 @@ class AgentTracerMixin:
555
556
  metrics.append(metric)
556
557
 
557
558
  # TODO agent_trace execute metric
559
+ formatted_metrics = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name)
560
+ if formatted_metrics:
561
+ metrics.extend(formatted_metrics)
562
+
558
563
  component = {
559
564
  "id": kwargs["component_id"],
560
565
  "hash_id": kwargs["hash_id"],
@@ -1055,29 +1055,20 @@ class BaseTracer:
1055
1055
  return self.span_attributes_dict[span_name]
1056
1056
 
1057
1057
  @staticmethod
1058
- def get_formatted_metric(span_attributes_dict, project_id, name, prompt, span_context, response, span_gt):
1058
+ def get_formatted_metric(span_attributes_dict, project_id, name):
1059
1059
  if name in span_attributes_dict:
1060
1060
  local_metrics = span_attributes_dict[name].local_metrics or []
1061
+ local_metrics_results = []
1061
1062
  for metric in local_metrics:
1062
1063
  try:
1063
- if metric.get("prompt") is not None:
1064
- prompt = metric['prompt']
1065
- if metric.get("response") is not None:
1066
- response = metric['response']
1067
- if metric.get('context') is not None:
1068
- span_context = metric['context']
1069
- if metric.get('gt') is not None:
1070
- span_gt = metric['gt']
1071
-
1072
1064
  logger.info("calculating the metric, please wait....")
1065
+
1066
+ mapping = metric.get("mapping", {})
1073
1067
  result = calculate_metric(project_id=project_id,
1074
1068
  metric_name=metric.get("name"),
1075
1069
  model=metric.get("model"),
1076
1070
  provider=metric.get("provider"),
1077
- prompt=prompt,
1078
- context=span_context,
1079
- response=response,
1080
- expected_response=span_gt
1071
+ **mapping
1081
1072
  )
1082
1073
 
1083
1074
  result = result['data']['data'][0]
@@ -1107,9 +1098,11 @@ class BaseTracer:
1107
1098
  "mappings": [],
1108
1099
  "config": metric_config
1109
1100
  }
1110
- return formatted_metric
1101
+ local_metrics_results.append(formatted_metric)
1111
1102
  except ValueError as e:
1112
1103
  logger.error(f"Validation Error: {e}")
1113
1104
  except Exception as e:
1114
1105
  logger.error(f"Error executing metric: {e}")
1115
1106
 
1107
+ return local_metrics_results
1108
+
@@ -630,9 +630,9 @@ class LLMTracerMixin:
630
630
  #print("Response output: ",response)
631
631
 
632
632
  # TODO: Execute & Add the User requested metrics here
633
- formatted_metric = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name, prompt, span_context, response, span_gt)
634
- if formatted_metric is not None:
635
- metrics.append(formatted_metric)
633
+ formatted_metrics = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name)
634
+ if formatted_metrics:
635
+ metrics.extend(formatted_metrics)
636
636
 
637
637
  component = {
638
638
  "id": component_id,
@@ -683,38 +683,36 @@ class LLMTracerMixin:
683
683
  # return "\n".join(process_content(msg.get("content", "")) for msg in messages if msg.get("content"))
684
684
 
685
685
  def convert_to_content(self, input_data):
686
- if isinstance(input_data, dict):
687
- messages = input_data.get("kwargs", {}).get("messages", [])
688
- elif isinstance(input_data, list):
689
- if len(input_data)>0 and isinstance(input_data[0]['content'],ChatResponse):
690
- extracted_messages = []
691
-
692
- for item in input_data:
693
- chat_response = item.get('content')
694
- if hasattr(chat_response, 'message') and hasattr(chat_response.message, 'blocks'):
695
- for block in chat_response.message.blocks:
696
- if hasattr(block, 'text'):
697
- extracted_messages.append(block.text)
698
- messages=extracted_messages
699
- if isinstance(messages,list):
700
- return "\n".join(messages)
701
-
702
- #messages=[msg["content"] for msg in input_data if isinstance(msg, dict) and "content" in msg]
703
- #messages = [msg["content"].message for msg in input_data if isinstance(msg, dict) and "content" in msg and isinstance(msg["content"], ChatResponse)]
686
+ try:
687
+ if isinstance(input_data, dict):
688
+ messages = input_data.get("kwargs", {}).get("messages", [])
689
+ elif isinstance(input_data, list):
690
+ if len(input_data)>0 and isinstance(input_data[0]['content'],ChatResponse):
691
+ extracted_messages = []
692
+
693
+ for item in input_data:
694
+ chat_response = item.get('content')
695
+ if hasattr(chat_response, 'message') and hasattr(chat_response.message, 'blocks'):
696
+ for block in chat_response.message.blocks:
697
+ if hasattr(block, 'text'):
698
+ extracted_messages.append(block.text)
699
+ messages=extracted_messages
700
+ if isinstance(messages,list):
701
+ return "\n".join(messages)
702
+
703
+ #messages=[msg["content"] for msg in input_data if isinstance(msg, dict) and "content" in msg]
704
+ #messages = [msg["content"].message for msg in input_data if isinstance(msg, dict) and "content" in msg and isinstance(msg["content"], ChatResponse)]
705
+ else:
706
+ messages = input_data
707
+ elif isinstance(input_data,ChatResponse):
708
+ messages=input_data['content']
704
709
  else:
705
- messages = input_data
706
- elif isinstance(input_data,ChatResponse):
707
- messages=input_data['content']
708
- else:
709
- return ""
710
- res=""
711
- # try:
712
- res="\n".join(msg.get("content", "").strip() for msg in messages if msg.get("content"))
713
- # except Exception as e:
714
- # print("Exception occured for: ",e)
715
- # print("Input: ",input_data,"Meeage: ",messages)
716
- # # import sys
717
- # # sys.exit()
710
+ return ""
711
+ res=""
712
+ # try:
713
+ res="\n".join(msg.get("content", "").strip() for msg in messages if msg.get("content"))
714
+ except Exception as e:
715
+ res=str(messages)
718
716
  return res
719
717
 
720
718
  def process_content(content):
@@ -361,7 +361,7 @@ class AgenticTracing(
361
361
 
362
362
  # Check if there's an active agent context
363
363
  current_agent_id = self.current_agent_id.get()
364
- if current_agent_id and component_data["type"] in ["llm", "tool"]:
364
+ if current_agent_id and component_data["type"] in ["llm", "tool", "custom"]:
365
365
  # Add this component as a child of the current agent
366
366
  current_children = self.agent_children.get()
367
367
  current_children.append(component_data)
@@ -7,6 +7,7 @@ import functools
7
7
  from typing import Optional, Any, Dict, List
8
8
 
9
9
  from pydantic import tools
10
+ from .base import BaseTracer
10
11
  from ..utils.unique_decorator import generate_unique_hash_simple
11
12
  import contextvars
12
13
  import asyncio
@@ -483,6 +484,10 @@ class ToolTracerMixin:
483
484
  metric["name"] = metric_name
484
485
  metrics.append(metric)
485
486
 
487
+ formatted_metrics = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name)
488
+ if formatted_metrics:
489
+ metrics.extend(formatted_metrics)
490
+
486
491
  start_time = kwargs["start_time"]
487
492
  component = {
488
493
  "id": kwargs["component_id"],
@@ -11,7 +11,7 @@ logging_level = (
11
11
  else logger.setLevel(logging.INFO)
12
12
  )
13
13
 
14
- def calculate_metric(project_id, metric_name, model, provider, prompt, response, context, expected_response=None):
14
+ def calculate_metric(project_id, metric_name, model, provider, **kwargs):
15
15
  user_id = "1"
16
16
  org_domain = "raga"
17
17
 
@@ -41,15 +41,15 @@ def calculate_metric(project_id, metric_name, model, provider, prompt, response,
41
41
  "trace_object": {
42
42
  "Data": {
43
43
  "DocId": "doc-1",
44
- "Prompt": prompt,
45
- "Response": response,
46
- "Context": context,
47
- "ExpectedResponse": "",
48
- "ExpectedContext": expected_response,
49
- "Chat": "",
50
- "Instructions": "",
51
- "SystemPrompt": "",
52
- "Text": ""
44
+ "Prompt": kwargs.get("prompt"),
45
+ "Response": kwargs.get("response"),
46
+ "Context": kwargs.get("context"),
47
+ "ExpectedResponse": kwargs.get("expected_response"),
48
+ "ExpectedContext": kwargs.get("expected_context"),
49
+ "Chat": kwargs.get("chat"),
50
+ "Instructions": kwargs.get("instructions"),
51
+ "SystemPrompt": kwargs.get("system_prompt"),
52
+ "Text": kwargs.get("text")
53
53
  },
54
54
  "claims": {},
55
55
  "last_computed_metrics": {
@@ -62,6 +62,7 @@ class SpanAttributes:
62
62
  self.feedback = feedback
63
63
  logger.debug(f"Added feedback: {self.feedback}")
64
64
 
65
+ # TODO: Add validation to check if all the required parameters are present
65
66
  def execute_metrics(self, **kwargs: Any):
66
67
  name = kwargs.get("name")
67
68
  model = kwargs.get("model")
@@ -91,19 +92,20 @@ class SpanAttributes:
91
92
  prompt =None
92
93
  context = None
93
94
  response = None
94
- if mapping is not None:
95
- prompt = mapping['prompt']
96
- context = mapping['context']
97
- response = mapping['response']
95
+ # if mapping is not None:
96
+ # prompt = mapping['prompt']
97
+ # context = mapping['context']
98
+ # response = mapping['response']
98
99
  new_metric = {
99
100
  "name": metric_name,
100
101
  "model": model,
101
102
  "provider": provider,
102
103
  "project_id": self.project_id,
103
- "prompt": prompt,
104
- "context": context,
105
- "response": response,
106
- "displayName": display_name
104
+ # "prompt": prompt,
105
+ # "context": context,
106
+ # "response": response,
107
+ "displayName": display_name,
108
+ "mapping": mapping
107
109
  }
108
110
  self.local_metrics.append(new_metric)
109
111
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ragaai_catalyst
3
- Version: 2.1.5b23
3
+ Version: 2.1.5b24
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
6
  Requires-Python: <3.13,>=3.9
@@ -26,6 +26,7 @@ examples/langgraph_examples/planning_agent.py
26
26
  examples/llamaindex_examples/function_calling_agent.ipynb
27
27
  examples/llamaindex_examples/joke_gen_critique.py
28
28
  examples/llamaindex_examples/joke_gen_critique_anthropic.py
29
+ examples/llamaindex_examples/joke_gen_critique_async.py
29
30
  examples/llamaindex_examples/joke_gen_critique_azureopenai.py
30
31
  examples/llamaindex_examples/joke_gen_critique_gemini.py
31
32
  examples/llamaindex_examples/joke_gen_critique_litellm.py