ragaai-catalyst 2.1.5b23__py3-none-any.whl → 2.1.5b24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,6 +9,7 @@ import contextvars
9
9
  import asyncio
10
10
  from ..utils.file_name_tracker import TrackName
11
11
  from ..utils.span_attributes import SpanAttributes
12
+ from .base import BaseTracer
12
13
  import logging
13
14
 
14
15
  logger = logging.getLogger(__name__)
@@ -555,6 +556,10 @@ class AgentTracerMixin:
555
556
  metrics.append(metric)
556
557
 
557
558
  # TODO agent_trace execute metric
559
+ formatted_metrics = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name)
560
+ if formatted_metrics:
561
+ metrics.extend(formatted_metrics)
562
+
558
563
  component = {
559
564
  "id": kwargs["component_id"],
560
565
  "hash_id": kwargs["hash_id"],
@@ -1055,29 +1055,20 @@ class BaseTracer:
1055
1055
  return self.span_attributes_dict[span_name]
1056
1056
 
1057
1057
  @staticmethod
1058
- def get_formatted_metric(span_attributes_dict, project_id, name, prompt, span_context, response, span_gt):
1058
+ def get_formatted_metric(span_attributes_dict, project_id, name):
1059
1059
  if name in span_attributes_dict:
1060
1060
  local_metrics = span_attributes_dict[name].local_metrics or []
1061
+ local_metrics_results = []
1061
1062
  for metric in local_metrics:
1062
1063
  try:
1063
- if metric.get("prompt") is not None:
1064
- prompt = metric['prompt']
1065
- if metric.get("response") is not None:
1066
- response = metric['response']
1067
- if metric.get('context') is not None:
1068
- span_context = metric['context']
1069
- if metric.get('gt') is not None:
1070
- span_gt = metric['gt']
1071
-
1072
1064
  logger.info("calculating the metric, please wait....")
1065
+
1066
+ mapping = metric.get("mapping", {})
1073
1067
  result = calculate_metric(project_id=project_id,
1074
1068
  metric_name=metric.get("name"),
1075
1069
  model=metric.get("model"),
1076
1070
  provider=metric.get("provider"),
1077
- prompt=prompt,
1078
- context=span_context,
1079
- response=response,
1080
- expected_response=span_gt
1071
+ **mapping
1081
1072
  )
1082
1073
 
1083
1074
  result = result['data']['data'][0]
@@ -1107,9 +1098,11 @@ class BaseTracer:
1107
1098
  "mappings": [],
1108
1099
  "config": metric_config
1109
1100
  }
1110
- return formatted_metric
1101
+ local_metrics_results.append(formatted_metric)
1111
1102
  except ValueError as e:
1112
1103
  logger.error(f"Validation Error: {e}")
1113
1104
  except Exception as e:
1114
1105
  logger.error(f"Error executing metric: {e}")
1115
1106
 
1107
+ return local_metrics_results
1108
+
@@ -630,9 +630,9 @@ class LLMTracerMixin:
630
630
  #print("Response output: ",response)
631
631
 
632
632
  # TODO: Execute & Add the User requested metrics here
633
- formatted_metric = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name, prompt, span_context, response, span_gt)
634
- if formatted_metric is not None:
635
- metrics.append(formatted_metric)
633
+ formatted_metrics = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name)
634
+ if formatted_metrics:
635
+ metrics.extend(formatted_metrics)
636
636
 
637
637
  component = {
638
638
  "id": component_id,
@@ -683,38 +683,36 @@ class LLMTracerMixin:
683
683
  # return "\n".join(process_content(msg.get("content", "")) for msg in messages if msg.get("content"))
684
684
 
685
685
  def convert_to_content(self, input_data):
686
- if isinstance(input_data, dict):
687
- messages = input_data.get("kwargs", {}).get("messages", [])
688
- elif isinstance(input_data, list):
689
- if len(input_data)>0 and isinstance(input_data[0]['content'],ChatResponse):
690
- extracted_messages = []
691
-
692
- for item in input_data:
693
- chat_response = item.get('content')
694
- if hasattr(chat_response, 'message') and hasattr(chat_response.message, 'blocks'):
695
- for block in chat_response.message.blocks:
696
- if hasattr(block, 'text'):
697
- extracted_messages.append(block.text)
698
- messages=extracted_messages
699
- if isinstance(messages,list):
700
- return "\n".join(messages)
701
-
702
- #messages=[msg["content"] for msg in input_data if isinstance(msg, dict) and "content" in msg]
703
- #messages = [msg["content"].message for msg in input_data if isinstance(msg, dict) and "content" in msg and isinstance(msg["content"], ChatResponse)]
686
+ try:
687
+ if isinstance(input_data, dict):
688
+ messages = input_data.get("kwargs", {}).get("messages", [])
689
+ elif isinstance(input_data, list):
690
+ if len(input_data)>0 and isinstance(input_data[0]['content'],ChatResponse):
691
+ extracted_messages = []
692
+
693
+ for item in input_data:
694
+ chat_response = item.get('content')
695
+ if hasattr(chat_response, 'message') and hasattr(chat_response.message, 'blocks'):
696
+ for block in chat_response.message.blocks:
697
+ if hasattr(block, 'text'):
698
+ extracted_messages.append(block.text)
699
+ messages=extracted_messages
700
+ if isinstance(messages,list):
701
+ return "\n".join(messages)
702
+
703
+ #messages=[msg["content"] for msg in input_data if isinstance(msg, dict) and "content" in msg]
704
+ #messages = [msg["content"].message for msg in input_data if isinstance(msg, dict) and "content" in msg and isinstance(msg["content"], ChatResponse)]
705
+ else:
706
+ messages = input_data
707
+ elif isinstance(input_data,ChatResponse):
708
+ messages=input_data['content']
704
709
  else:
705
- messages = input_data
706
- elif isinstance(input_data,ChatResponse):
707
- messages=input_data['content']
708
- else:
709
- return ""
710
- res=""
711
- # try:
712
- res="\n".join(msg.get("content", "").strip() for msg in messages if msg.get("content"))
713
- # except Exception as e:
714
- # print("Exception occured for: ",e)
715
- # print("Input: ",input_data,"Meeage: ",messages)
716
- # # import sys
717
- # # sys.exit()
710
+ return ""
711
+ res=""
712
+ # try:
713
+ res="\n".join(msg.get("content", "").strip() for msg in messages if msg.get("content"))
714
+ except Exception as e:
715
+ res=str(messages)
718
716
  return res
719
717
 
720
718
  def process_content(content):
@@ -361,7 +361,7 @@ class AgenticTracing(
361
361
 
362
362
  # Check if there's an active agent context
363
363
  current_agent_id = self.current_agent_id.get()
364
- if current_agent_id and component_data["type"] in ["llm", "tool"]:
364
+ if current_agent_id and component_data["type"] in ["llm", "tool", "custom"]:
365
365
  # Add this component as a child of the current agent
366
366
  current_children = self.agent_children.get()
367
367
  current_children.append(component_data)
@@ -7,6 +7,7 @@ import functools
7
7
  from typing import Optional, Any, Dict, List
8
8
 
9
9
  from pydantic import tools
10
+ from .base import BaseTracer
10
11
  from ..utils.unique_decorator import generate_unique_hash_simple
11
12
  import contextvars
12
13
  import asyncio
@@ -483,6 +484,10 @@ class ToolTracerMixin:
483
484
  metric["name"] = metric_name
484
485
  metrics.append(metric)
485
486
 
487
+ formatted_metrics = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name)
488
+ if formatted_metrics:
489
+ metrics.extend(formatted_metrics)
490
+
486
491
  start_time = kwargs["start_time"]
487
492
  component = {
488
493
  "id": kwargs["component_id"],
@@ -11,7 +11,7 @@ logging_level = (
11
11
  else logger.setLevel(logging.INFO)
12
12
  )
13
13
 
14
- def calculate_metric(project_id, metric_name, model, provider, prompt, response, context, expected_response=None):
14
+ def calculate_metric(project_id, metric_name, model, provider, **kwargs):
15
15
  user_id = "1"
16
16
  org_domain = "raga"
17
17
 
@@ -41,15 +41,15 @@ def calculate_metric(project_id, metric_name, model, provider, prompt, response,
41
41
  "trace_object": {
42
42
  "Data": {
43
43
  "DocId": "doc-1",
44
- "Prompt": prompt,
45
- "Response": response,
46
- "Context": context,
47
- "ExpectedResponse": "",
48
- "ExpectedContext": expected_response,
49
- "Chat": "",
50
- "Instructions": "",
51
- "SystemPrompt": "",
52
- "Text": ""
44
+ "Prompt": kwargs.get("prompt"),
45
+ "Response": kwargs.get("response"),
46
+ "Context": kwargs.get("context"),
47
+ "ExpectedResponse": kwargs.get("expected_response"),
48
+ "ExpectedContext": kwargs.get("expected_context"),
49
+ "Chat": kwargs.get("chat"),
50
+ "Instructions": kwargs.get("instructions"),
51
+ "SystemPrompt": kwargs.get("system_prompt"),
52
+ "Text": kwargs.get("text")
53
53
  },
54
54
  "claims": {},
55
55
  "last_computed_metrics": {
@@ -62,6 +62,7 @@ class SpanAttributes:
62
62
  self.feedback = feedback
63
63
  logger.debug(f"Added feedback: {self.feedback}")
64
64
 
65
+ # TODO: Add validation to check if all the required parameters are present
65
66
  def execute_metrics(self, **kwargs: Any):
66
67
  name = kwargs.get("name")
67
68
  model = kwargs.get("model")
@@ -91,19 +92,20 @@ class SpanAttributes:
91
92
  prompt =None
92
93
  context = None
93
94
  response = None
94
- if mapping is not None:
95
- prompt = mapping['prompt']
96
- context = mapping['context']
97
- response = mapping['response']
95
+ # if mapping is not None:
96
+ # prompt = mapping['prompt']
97
+ # context = mapping['context']
98
+ # response = mapping['response']
98
99
  new_metric = {
99
100
  "name": metric_name,
100
101
  "model": model,
101
102
  "provider": provider,
102
103
  "project_id": self.project_id,
103
- "prompt": prompt,
104
- "context": context,
105
- "response": response,
106
- "displayName": display_name
104
+ # "prompt": prompt,
105
+ # "context": context,
106
+ # "response": response,
107
+ "displayName": display_name,
108
+ "mapping": mapping
107
109
  }
108
110
  self.local_metrics.append(new_metric)
109
111
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ragaai_catalyst
3
- Version: 2.1.5b23
3
+ Version: 2.1.5b24
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
6
  Requires-Python: <3.13,>=3.9
@@ -29,19 +29,19 @@ ragaai_catalyst/tracers/agentic_tracing/tests/__init__.py,sha256=47DEQpj8HBSa-_T
29
29
  ragaai_catalyst/tracers/agentic_tracing/tests/ai_travel_agent.py,sha256=S4rCcKzU_5SB62BYEbNn_1VbbTdG4396N8rdZ3ZNGcE,5654
30
30
  ragaai_catalyst/tracers/agentic_tracing/tests/unique_decorator_test.py,sha256=Xk1cLzs-2A3dgyBwRRnCWs7Eubki40FVonwd433hPN8,4805
31
31
  ragaai_catalyst/tracers/agentic_tracing/tracers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
- ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py,sha256=983uI_vc6XTBtkUIIHs_FcyUa8VBl40d10JmlPzV6iA,26915
33
- ragaai_catalyst/tracers/agentic_tracing/tracers/base.py,sha256=lMhRXfTsnKw_-sibA-LqJNV_nvh284VT-rv6_KF4clA,46621
32
+ ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py,sha256=kxZPdaSgNY0-ykyvIEF1d6qjXlR7TV81k2Nbc1k0icg,27131
33
+ ragaai_catalyst/tracers/agentic_tracing/tracers/base.py,sha256=WpeSzPrCwsMyDQrF7juJjTV5nrP3ewR5IiMcwVqHDtg,46095
34
34
  ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py,sha256=mR4jCNjsKUPiidJ1pIthoUI5i9KCGGPe3zG5l80FUBo,14060
35
35
  ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py,sha256=jnbJqs8ln3YWDYAZ8kqpkU8E8h6HGT1-OspHYrDijIw,48118
37
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py,sha256=KTXiDhfEcF2q0FJ4rJqLXONC93MWQAJCIW_ZOsOIiuo,18139
36
+ ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py,sha256=69A33Hyv7j9lCSX9yDTdUJeMOtPE6AD-zO38KXETxGY,48048
37
+ ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py,sha256=hbyKvuCeL084vlUk5fBeBnWkzPEjmdHJ_HTZtuw5TxM,18149
38
38
  ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py,sha256=m8CxYkl7iMiFya_lNwN1ykBc3Pmo-2pR_2HmpptwHWQ,10352
39
- ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py,sha256=CEbTzedv_AH5zXlChfTGMlQc-8oWATlwrVFb1KcO4c0,21588
39
+ ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py,sha256=G7IEr0neJWg4w-ffaAhs_Wo3BSJn1VJuECPUc9zCEVA,21804
40
40
  ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py,sha256=bhSUhNQCuJXKjgJAXhjKEYjnHMpYN90FSZdR84fNIKU,4614
41
41
  ragaai_catalyst/tracers/agentic_tracing/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py,sha256=1MDKXAAPzOEdxFKWWQrRgrmM3kz--DGXSywGXQmR3lQ,6041
43
43
  ragaai_catalyst/tracers/agentic_tracing/upload/upload_code.py,sha256=HgpMgI-JTWZrizcM7GGUIaAgaZF4aRT3D0dJXVEkblY,4271
44
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py,sha256=qpUjPDfeRIwKh2x-pQFLXJwT4hFRrYBKvzS1bWcJTOo,2437
44
+ ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py,sha256=PJMwYSL9TKw0LgP--hghSbosuJF5ju5K7zUltEHfQ34,2561
45
45
  ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py,sha256=V9dgYx4DwibPr38Xbk7_SOJk9gONE7xYpb0MPA1oMGI,3943
46
46
  ragaai_catalyst/tracers/agentic_tracing/utils/__init__.py,sha256=XdB3X_ufe4RVvGorxSqAiB9dYv4UD7Hvvuw3bsDUppY,60
47
47
  ragaai_catalyst/tracers/agentic_tracing/utils/api_utils.py,sha256=JyNCbfpW-w4O9CjtemTqmor2Rh1WGpQwhRaDSRmBxw8,689
@@ -51,7 +51,7 @@ ragaai_catalyst/tracers/agentic_tracing/utils/generic.py,sha256=WwXT01xmp8MSr7Ki
51
51
  ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py,sha256=vPZ4dn4EHFW0kqd1GyRpsYXbfrRrd0DXCmh-pzsDBNE,1109
52
52
  ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py,sha256=gbPqWtJINW8JVlkM41UmF5zGR8oj8Q6g9KQIS3moQYM,20439
53
53
  ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json,sha256=2tzGw_cKCTPcfjEm7iGvFE6pTw7gMTPzeBov_MTaXNY,321336
54
- ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py,sha256=3zTWcooXLZgbShzQCql2EWWfFkrTK2soTWkMxnzMwnE,4218
54
+ ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py,sha256=qmODERcFZhc8MX24boFCXkkh6sJ-vZngRHPvxhyWFeE,4347
55
55
  ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml,sha256=LvFDivDIE96Zasp-fgDEqUJ5GEQZUawQucR3aOcSUTY,926
56
56
  ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py,sha256=H8WNsk4v_5T6OUw4TFOzlDLjQhJwjh1nAMyMAoqMEi4,6946
57
57
  ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py,sha256=go7FVnofviATDph-j8sk2juv09CGSRt1Vq4U868Fhd8,2259
@@ -68,8 +68,8 @@ ragaai_catalyst/tracers/utils/__init__.py,sha256=KeMaZtYaTojilpLv65qH08QmpYclfpa
68
68
  ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py,sha256=ofrNrxf2b1hpjDh_zeaxiYq86azn1MF3kW8-ViYPEg0,1641
69
69
  ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py,sha256=XS2_x2qneqEx9oAighLg-LRiueWcESLwIC2r7eJT-Ww,3117
70
70
  ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
71
- ragaai_catalyst-2.1.5b23.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
72
- ragaai_catalyst-2.1.5b23.dist-info/METADATA,sha256=ETuEVrAepDOTKJUD-h2JcNx9lJnJcTCPLunJVg-QRwc,13874
73
- ragaai_catalyst-2.1.5b23.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
74
- ragaai_catalyst-2.1.5b23.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
75
- ragaai_catalyst-2.1.5b23.dist-info/RECORD,,
71
+ ragaai_catalyst-2.1.5b24.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
72
+ ragaai_catalyst-2.1.5b24.dist-info/METADATA,sha256=b_RiCZulVnUEc2NpbCUsN1-eMICHLEqLv4VG_8kWiRI,13874
73
+ ragaai_catalyst-2.1.5b24.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
74
+ ragaai_catalyst-2.1.5b24.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
75
+ ragaai_catalyst-2.1.5b24.dist-info/RECORD,,