ragaai-catalyst 2.1.3__py3-none-any.whl → 2.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +37 -11
- ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +240 -81
- ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +632 -114
- ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +316 -0
- ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py +0 -0
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +229 -82
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +214 -59
- ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +16 -14
- ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +147 -28
- ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py +88 -2
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +9 -51
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +83 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +26 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py +28 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +45 -15
- ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +2520 -2152
- ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +59 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +23 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +284 -15
- ragaai_catalyst/tracers/llamaindex_callback.py +5 -5
- ragaai_catalyst/tracers/tracer.py +83 -10
- ragaai_catalyst/tracers/upload_traces.py +1 -1
- ragaai_catalyst-2.1.4.dist-info/METADATA +431 -0
- {ragaai_catalyst-2.1.3.dist-info → ragaai_catalyst-2.1.4.dist-info}/RECORD +26 -20
- ragaai_catalyst-2.1.3.dist-info/METADATA +0 -43
- {ragaai_catalyst-2.1.3.dist-info → ragaai_catalyst-2.1.4.dist-info}/WHEEL +0 -0
- {ragaai_catalyst-2.1.3.dist-info → ragaai_catalyst-2.1.4.dist-info}/top_level.txt +0 -0
@@ -91,9 +91,43 @@ def extract_token_usage(result):
|
|
91
91
|
# Run the coroutine in the current event loop
|
92
92
|
result = loop.run_until_complete(result)
|
93
93
|
|
94
|
-
# Handle
|
94
|
+
# Handle text attribute responses (JSON string or Vertex AI)
|
95
|
+
if hasattr(result, "text"):
|
96
|
+
# First try parsing as JSON for OpenAI responses
|
97
|
+
try:
|
98
|
+
import json
|
99
|
+
json_data = json.loads(result.text)
|
100
|
+
if isinstance(json_data, dict) and "usage" in json_data:
|
101
|
+
usage = json_data["usage"]
|
102
|
+
return {
|
103
|
+
"prompt_tokens": usage.get("prompt_tokens", 0),
|
104
|
+
"completion_tokens": usage.get("completion_tokens", 0),
|
105
|
+
"total_tokens": usage.get("total_tokens", 0)
|
106
|
+
}
|
107
|
+
except (json.JSONDecodeError, AttributeError):
|
108
|
+
pass
|
109
|
+
|
110
|
+
# If JSON parsing fails, try Vertex AI format
|
111
|
+
total_tokens = getattr(result, "token_count", 0)
|
112
|
+
if not total_tokens and hasattr(result, "_raw_response"):
|
113
|
+
total_tokens = getattr(result._raw_response, "token_count", 0)
|
114
|
+
if total_tokens: # Only return if we actually found tokens
|
115
|
+
return {
|
116
|
+
"prompt_tokens": 0, # Vertex AI doesn't provide this breakdown
|
117
|
+
"completion_tokens": total_tokens,
|
118
|
+
"total_tokens": total_tokens
|
119
|
+
}
|
120
|
+
|
121
|
+
# Handle Claude 3 message format
|
95
122
|
if hasattr(result, "usage"):
|
96
123
|
usage = result.usage
|
124
|
+
if hasattr(usage, "input_tokens") and hasattr(usage, "output_tokens"):
|
125
|
+
return {
|
126
|
+
"prompt_tokens": usage.input_tokens,
|
127
|
+
"completion_tokens": usage.output_tokens,
|
128
|
+
"total_tokens": usage.input_tokens + usage.output_tokens
|
129
|
+
}
|
130
|
+
# Handle standard OpenAI/Anthropic format
|
97
131
|
return {
|
98
132
|
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
99
133
|
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
@@ -133,19 +167,6 @@ def extract_token_usage(result):
|
|
133
167
|
"total_tokens": metadata.get("total_tokens", 0)
|
134
168
|
}
|
135
169
|
|
136
|
-
# Handle Vertex AI format
|
137
|
-
if hasattr(result, "text"):
|
138
|
-
# For LangChain ChatVertexAI
|
139
|
-
total_tokens = getattr(result, "token_count", 0)
|
140
|
-
if not total_tokens and hasattr(result, "_raw_response"):
|
141
|
-
# Try to get from raw response
|
142
|
-
total_tokens = getattr(result._raw_response, "token_count", 0)
|
143
|
-
return {
|
144
|
-
"prompt_tokens": 0, # Vertex AI doesn't provide this breakdown
|
145
|
-
"completion_tokens": total_tokens,
|
146
|
-
"total_tokens": total_tokens
|
147
|
-
}
|
148
|
-
|
149
170
|
return {
|
150
171
|
"prompt_tokens": 0,
|
151
172
|
"completion_tokens": 0,
|
@@ -171,10 +192,19 @@ def calculate_llm_cost(token_usage, model_name, model_costs):
|
|
171
192
|
}
|
172
193
|
|
173
194
|
# Get model costs, defaulting to default costs if unknown
|
174
|
-
model_cost = model_costs.get(model_name, {
|
195
|
+
model_cost = model_cost = model_costs.get(model_name, {
|
175
196
|
"input_cost_per_token": 0.0,
|
176
197
|
"output_cost_per_token": 0.0
|
177
198
|
})
|
199
|
+
if model_cost['input_cost_per_token'] == 0.0 and model_cost['output_cost_per_token'] == 0.0:
|
200
|
+
provide_name = model_name.split('-')[0]
|
201
|
+
if provide_name == 'azure':
|
202
|
+
model_name = os.path.join('azure', '-'.join(model_name.split('-')[1:]))
|
203
|
+
|
204
|
+
model_cost = model_costs.get(model_name, {
|
205
|
+
"input_cost_per_token": 0.0,
|
206
|
+
"output_cost_per_token": 0.0
|
207
|
+
})
|
178
208
|
|
179
209
|
input_cost = (token_usage.get("prompt_tokens", 0)) * model_cost.get("input_cost_per_token", 0.0)
|
180
210
|
output_cost = (token_usage.get("completion_tokens", 0)) * model_cost.get("output_cost_per_token", 0.0)
|