ragaai-catalyst 2.1.6.4b0__py3-none-any.whl → 2.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/dataset.py +1 -1
- ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +26 -1
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +6 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +180 -164
- ragaai_catalyst/tracers/exporters/dynamic_trace_exporter.py +20 -2
- ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py +169 -50
- ragaai_catalyst/tracers/tracer.py +128 -115
- ragaai_catalyst/tracers/upload_traces.py +3 -3
- ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py +1 -1
- ragaai_catalyst/tracers/utils/rag_trace_json_converter.py +243 -0
- ragaai_catalyst/tracers/utils/trace_json_converter.py +1 -0
- {ragaai_catalyst-2.1.6.4b0.dist-info → ragaai_catalyst-2.1.7.dist-info}/METADATA +1 -1
- {ragaai_catalyst-2.1.6.4b0.dist-info → ragaai_catalyst-2.1.7.dist-info}/RECORD +16 -15
- {ragaai_catalyst-2.1.6.4b0.dist-info → ragaai_catalyst-2.1.7.dist-info}/WHEEL +0 -0
- {ragaai_catalyst-2.1.6.4b0.dist-info → ragaai_catalyst-2.1.7.dist-info}/licenses/LICENSE +0 -0
- {ragaai_catalyst-2.1.6.4b0.dist-info → ragaai_catalyst-2.1.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,243 @@
|
|
1
|
+
import json
|
2
|
+
from litellm import model_cost
|
3
|
+
import logging
|
4
|
+
import os
|
5
|
+
import re
|
6
|
+
from datetime import datetime
|
7
|
+
import tiktoken
|
8
|
+
|
9
|
+
logger = logging.getLogger("RagaAICatalyst")
|
10
|
+
logging_level = (
|
11
|
+
logger.setLevel(logging.DEBUG) if os.getenv("DEBUG") == "1" else logging.INFO
|
12
|
+
)
|
13
|
+
|
14
|
+
def rag_trace_json_converter(input_trace, custom_model_cost, trace_id, user_details, tracer_type):
|
15
|
+
trace_aggregate = {}
|
16
|
+
|
17
|
+
def get_prompt(input_trace):
|
18
|
+
if tracer_type == "langchain":
|
19
|
+
for span in input_trace:
|
20
|
+
if span["name"] in ["ChatOpenAI", "ChatAnthropic", "ChatGoogleGenerativeAI"]:
|
21
|
+
return span["attributes"].get("llm.input_messages.1.message.content")
|
22
|
+
|
23
|
+
elif span["name"] == "LLMChain":
|
24
|
+
return json.loads(span["attributes"].get("input.value", "{}")).get("question")
|
25
|
+
|
26
|
+
elif span["name"] == "RetrievalQA":
|
27
|
+
return span["attributes"].get("input.value")
|
28
|
+
|
29
|
+
elif span["name"] == "VectorStoreRetriever":
|
30
|
+
return span["attributes"].get("input.value")
|
31
|
+
|
32
|
+
return None
|
33
|
+
|
34
|
+
def get_response(input_trace):
|
35
|
+
if tracer_type == "langchain":
|
36
|
+
for span in input_trace:
|
37
|
+
if span["name"] in ["ChatOpenAI", "ChatAnthropic", "ChatGoogleGenerativeAI"]:
|
38
|
+
return span["attributes"].get("llm.output_messages.0.message.content")
|
39
|
+
|
40
|
+
elif span["name"] == "LLMChain":
|
41
|
+
return json.loads(span["attributes"].get("output.value", ""))
|
42
|
+
|
43
|
+
elif span["name"] == "RetrievalQA":
|
44
|
+
return span["attributes"].get("output.value")
|
45
|
+
|
46
|
+
return None
|
47
|
+
|
48
|
+
def get_context(input_trace):
|
49
|
+
if tracer_type == "langchain":
|
50
|
+
for span in input_trace:
|
51
|
+
if span["name"] == "VectorStoreRetriever":
|
52
|
+
return span["attributes"].get("retrieval.documents.1.document.content")
|
53
|
+
return None
|
54
|
+
|
55
|
+
prompt = get_prompt(input_trace)
|
56
|
+
response = get_response(input_trace)
|
57
|
+
context = get_context(input_trace)
|
58
|
+
|
59
|
+
if tracer_type == "langchain":
|
60
|
+
trace_aggregate["tracer_type"] = "langchain"
|
61
|
+
else:
|
62
|
+
trace_aggregate["tracer_type"] = "llamaindex"
|
63
|
+
|
64
|
+
trace_aggregate['trace_id'] = trace_id
|
65
|
+
trace_aggregate['session_id'] = None
|
66
|
+
trace_aggregate["metadata"] = user_details.get("trace_user_detail", {}).get("metadata")
|
67
|
+
|
68
|
+
#dummy data need to be fetched
|
69
|
+
trace_aggregate["pipeline"] = {
|
70
|
+
'llm_model': 'gpt-4o-mini',
|
71
|
+
'vector_store': 'faiss',
|
72
|
+
'embed_model': 'text-embedding-ada-002'
|
73
|
+
}
|
74
|
+
|
75
|
+
trace_aggregate["data"] = {}
|
76
|
+
trace_aggregate["data"]["prompt"] = prompt
|
77
|
+
trace_aggregate["data"]["response"] = response
|
78
|
+
trace_aggregate["data"]["context"] = context
|
79
|
+
|
80
|
+
if tracer_type == "langchain":
|
81
|
+
additional_metadata = get_additional_metadata(input_trace, custom_model_cost, model_cost, prompt, response)
|
82
|
+
else:
|
83
|
+
additional_metadata = get_additional_metadata(input_trace, custom_model_cost, model_cost)
|
84
|
+
|
85
|
+
trace_aggregate["metadata"].update(additional_metadata)
|
86
|
+
additional_metadata.pop("total_cost")
|
87
|
+
additional_metadata.pop("total_latency")
|
88
|
+
return trace_aggregate, additional_metadata
|
89
|
+
|
90
|
+
def get_additional_metadata(spans, custom_model_cost, model_cost_dict, prompt="", response=""):
|
91
|
+
additional_metadata = {}
|
92
|
+
additional_metadata["cost"] = 0.0
|
93
|
+
additional_metadata["tokens"] = {}
|
94
|
+
try:
|
95
|
+
for span in spans:
|
96
|
+
if span["name"] in ["ChatOpenAI", "ChatAnthropic", "ChatGoogleGenerativeAI"]:
|
97
|
+
start_time = datetime.fromisoformat(span.get("start_time", "")[:-1]) # Remove 'Z' and parse
|
98
|
+
end_time = datetime.fromisoformat(span.get("end_time", "")[:-1]) # Remove 'Z' and parse
|
99
|
+
additional_metadata["latency"] = (end_time - start_time).total_seconds()
|
100
|
+
additional_metadata["model_name"] = span["attributes"].get("llm.model_name", "").replace("models/", "")
|
101
|
+
additional_metadata["model"] = additional_metadata["model_name"]
|
102
|
+
try:
|
103
|
+
additional_metadata["tokens"]["prompt"] = span["attributes"]["llm.token_count.prompt"]
|
104
|
+
|
105
|
+
except:
|
106
|
+
logger.warning("Warning: prompt token not found. using fallback strategies to get tokens.")
|
107
|
+
try:
|
108
|
+
additional_metadata["tokens"]["prompt"] = num_tokens_from_messages(
|
109
|
+
model=additional_metadata["model_name"],
|
110
|
+
message=prompt
|
111
|
+
)
|
112
|
+
except Exception as e:
|
113
|
+
logger.warning(f"Failed to count prompt tokens: {str(e)}. Using 'gpt-4o-mini' model count as fallback.")
|
114
|
+
additional_metadata["tokens"]["prompt"] = num_tokens_from_messages(
|
115
|
+
model="gpt-4o-mini",
|
116
|
+
message=prompt
|
117
|
+
)
|
118
|
+
|
119
|
+
try:
|
120
|
+
additional_metadata["tokens"]["completion"] = span["attributes"]["llm.token_count.completion"]
|
121
|
+
except:
|
122
|
+
logger.warning("Warning: completion token not found. using fallback strategies to get tokens.")
|
123
|
+
try:
|
124
|
+
additional_metadata["tokens"]["completion"] = num_tokens_from_messages(
|
125
|
+
model=additional_metadata["model_name"],
|
126
|
+
message=response
|
127
|
+
)
|
128
|
+
except Exception as e:
|
129
|
+
logger.warning(f"Failed to count completion tokens: {str(e)}. Using 'gpt-4o-mini' model count as fallback.")
|
130
|
+
additional_metadata["tokens"]["completion"] = num_tokens_from_messages(
|
131
|
+
model="gpt-4o-mini",
|
132
|
+
message=response
|
133
|
+
)
|
134
|
+
|
135
|
+
# Ensure both values are not None before adding
|
136
|
+
prompt_tokens = additional_metadata["tokens"].get("prompt", 0) or 0
|
137
|
+
completion_tokens = additional_metadata["tokens"].get("completion", 0) or 0
|
138
|
+
additional_metadata["tokens"]["total"] = prompt_tokens + completion_tokens
|
139
|
+
|
140
|
+
except Exception as e:
|
141
|
+
logger.error(f"Error getting additional metadata: {str(e)}")
|
142
|
+
|
143
|
+
try:
|
144
|
+
if custom_model_cost.get(additional_metadata.get('model_name')):
|
145
|
+
model_cost_data = custom_model_cost[additional_metadata.get('model_name')]
|
146
|
+
else:
|
147
|
+
model_cost_data = model_cost_dict.get(additional_metadata.get('model_name'))
|
148
|
+
|
149
|
+
# Check if model_cost_data is None
|
150
|
+
if model_cost_data is None:
|
151
|
+
logger.warning(f"No cost data found for model: {additional_metadata.get('model_name')}")
|
152
|
+
# Set default values
|
153
|
+
additional_metadata["cost"] = 0.0
|
154
|
+
additional_metadata["total_cost"] = 0.0
|
155
|
+
additional_metadata["total_latency"] = additional_metadata.get("latency", 0)
|
156
|
+
additional_metadata["prompt_tokens"] = additional_metadata["tokens"].get("prompt", 0) or 0
|
157
|
+
additional_metadata["completion_tokens"] = additional_metadata["tokens"].get("completion", 0) or 0
|
158
|
+
elif 'tokens' in additional_metadata and all(k in additional_metadata['tokens'] for k in ['prompt', 'completion']):
|
159
|
+
# Get input and output costs, defaulting to 0 if not found
|
160
|
+
input_cost_per_token = model_cost_data.get("input_cost_per_token", 0) or 0
|
161
|
+
output_cost_per_token = model_cost_data.get("output_cost_per_token", 0) or 0
|
162
|
+
|
163
|
+
# Get token counts, defaulting to 0 if not found
|
164
|
+
prompt_tokens = additional_metadata["tokens"].get("prompt", 0) or 0
|
165
|
+
completion_tokens = additional_metadata["tokens"].get("completion", 0) or 0
|
166
|
+
|
167
|
+
# Calculate costs
|
168
|
+
prompt_cost = prompt_tokens * input_cost_per_token
|
169
|
+
completion_cost = completion_tokens * output_cost_per_token
|
170
|
+
|
171
|
+
additional_metadata["cost"] = prompt_cost + completion_cost
|
172
|
+
additional_metadata["total_cost"] = additional_metadata["cost"]
|
173
|
+
additional_metadata["total_latency"] = additional_metadata.get("latency", 0)
|
174
|
+
additional_metadata["prompt_tokens"] = prompt_tokens
|
175
|
+
additional_metadata["completion_tokens"] = completion_tokens
|
176
|
+
except Exception as e:
|
177
|
+
logger.warning(f"Error getting model cost data: {str(e)}")
|
178
|
+
# Set default values in case of error
|
179
|
+
additional_metadata["cost"] = 0.0
|
180
|
+
additional_metadata["total_cost"] = 0.0
|
181
|
+
additional_metadata["total_latency"] = additional_metadata.get("latency", 0)
|
182
|
+
additional_metadata["prompt_tokens"] = additional_metadata["tokens"].get("prompt", 0) or 0
|
183
|
+
additional_metadata["completion_tokens"] = additional_metadata["tokens"].get("completion", 0) or 0
|
184
|
+
try:
|
185
|
+
additional_metadata.pop("tokens", None)
|
186
|
+
except Exception as e:
|
187
|
+
logger.error(f"Error removing tokens from additional metadata: {str(e)}")
|
188
|
+
|
189
|
+
return additional_metadata
|
190
|
+
|
191
|
+
def num_tokens_from_messages(model, message):
|
192
|
+
# GPT models
|
193
|
+
if re.match(r'^gpt-', model):
|
194
|
+
"""Check if the model is any GPT model (pattern: ^gpt-)
|
195
|
+
This matches any model name that starts with 'gpt-'
|
196
|
+
"""
|
197
|
+
def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
198
|
+
"""Returns the number of tokens in a text string."""
|
199
|
+
encoding = tiktoken.get_encoding(encoding_name)
|
200
|
+
num_tokens = len(encoding.encode(string))
|
201
|
+
return num_tokens
|
202
|
+
|
203
|
+
if re.match(r'^gpt-4o.*', model):
|
204
|
+
"""Check for GPT-4 Optimized models (pattern: ^gpt-4o.*)
|
205
|
+
Examples that match:
|
206
|
+
- gpt-4o
|
207
|
+
- gpt-4o-mini
|
208
|
+
- gpt-4o-2024-08-06
|
209
|
+
The .* allows for any characters after 'gpt-4o'
|
210
|
+
"""
|
211
|
+
encoding_name = "o200k_base"
|
212
|
+
return num_tokens_from_string(message, encoding_name)
|
213
|
+
|
214
|
+
elif re.match(r'^gpt-(4|3\.5).*', model):
|
215
|
+
"""Check for GPT-4 and GPT-3.5 models (pattern: ^gpt-(4|3\.5).*)
|
216
|
+
Uses cl100k_base encoding for GPT-4 and GPT-3.5 models
|
217
|
+
Examples that match:
|
218
|
+
- gpt-4
|
219
|
+
- gpt-4-turbo
|
220
|
+
- gpt-4-2024-08-06
|
221
|
+
- gpt-3.5-turbo
|
222
|
+
- gpt-3.5-turbo-16k
|
223
|
+
"""
|
224
|
+
encoding_name = "cl100k_base"
|
225
|
+
return num_tokens_from_string(message, encoding_name)
|
226
|
+
|
227
|
+
else:
|
228
|
+
"""Default case for any other GPT models
|
229
|
+
Uses o200k_base encoding as the default tokenizer
|
230
|
+
"""
|
231
|
+
return num_tokens_from_string(message, encoding_name="o200k_base")
|
232
|
+
|
233
|
+
|
234
|
+
# Gemini models
|
235
|
+
elif re.match(r'^gemini-', model):
|
236
|
+
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
237
|
+
client = genai.Client(api_key=GOOGLE_API_KEY)
|
238
|
+
|
239
|
+
response = client.models.count_tokens(
|
240
|
+
model=model,
|
241
|
+
contents=message,
|
242
|
+
)
|
243
|
+
return response.total_tokens
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ragaai_catalyst
|
3
|
-
Version: 2.1.
|
3
|
+
Version: 2.1.7
|
4
4
|
Summary: RAGA AI CATALYST
|
5
5
|
Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>, Tushar Kumar <tushar.kumar@raga.ai>
|
6
6
|
Requires-Python: <3.13,>=3.10
|
@@ -1,6 +1,6 @@
|
|
1
1
|
ragaai_catalyst/__init__.py,sha256=2wfkucAbb3Bt_p2KHelkg9zBQp4yC4iZanltyieG18w,895
|
2
2
|
ragaai_catalyst/_version.py,sha256=JKt9KaVNOMVeGs8ojO6LvIZr7ZkMzNN-gCcvryy4x8E,460
|
3
|
-
ragaai_catalyst/dataset.py,sha256=
|
3
|
+
ragaai_catalyst/dataset.py,sha256=LefpZDCTkLoimZr0GEGK2awjOjhy8zSU-29EtqLrSG0,29404
|
4
4
|
ragaai_catalyst/evaluation.py,sha256=O96CydYVPh3duUmXjY6REIXMOR-tOPixSG-Qhrf636A,22955
|
5
5
|
ragaai_catalyst/experiment.py,sha256=8yQo1phCHlpnJ-4CqCaIbLXg_1ZlAuLGI9kqGBl-OTE,18859
|
6
6
|
ragaai_catalyst/guard_executor.py,sha256=f2FXQSW17z4-eor61J_mtD0z-xBm9yordq8giB-GN_U,14006
|
@@ -31,8 +31,8 @@ ragaai_catalyst/tracers/distributed.py,sha256=MwlBwIxCAng-OI-7Ove_rkE1mTLeuW4Jw-
|
|
31
31
|
ragaai_catalyst/tracers/langchain_callback.py,sha256=CB75zzG3-DkYTELj0vI1MOHQTY0MuQJfoHIXz9Cl8S8,34568
|
32
32
|
ragaai_catalyst/tracers/llamaindex_callback.py,sha256=ZY0BJrrlz-P9Mg2dX-ZkVKG3gSvzwqBtk7JL_05MiYA,14028
|
33
33
|
ragaai_catalyst/tracers/llamaindex_instrumentation.py,sha256=Ys_jLkvVqo12bKgXDmkp4TxJu9HkBATrFE8cIcTYxWw,14329
|
34
|
-
ragaai_catalyst/tracers/tracer.py,sha256=
|
35
|
-
ragaai_catalyst/tracers/upload_traces.py,sha256=
|
34
|
+
ragaai_catalyst/tracers/tracer.py,sha256=YZB3l55EXLog_rrAMb2jc5pvhQ63qtCc9UZcY7Yqmxo,37134
|
35
|
+
ragaai_catalyst/tracers/upload_traces.py,sha256=PEE_JhAmOAMKyb-pl4ZoFWhIePxJm1zs93crrk94iEg,5887
|
36
36
|
ragaai_catalyst/tracers/agentic_tracing/README.md,sha256=X4QwLb7-Jg7GQMIXj-SerZIgDETfw-7VgYlczOR8ZeQ,4508
|
37
37
|
ragaai_catalyst/tracers/agentic_tracing/__init__.py,sha256=yf6SKvOPSpH-9LiKaoLKXwqj5sez8F_5wkOb91yp0oE,260
|
38
38
|
ragaai_catalyst/tracers/agentic_tracing/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -45,11 +45,11 @@ ragaai_catalyst/tracers/agentic_tracing/tests/ai_travel_agent.py,sha256=S4rCcKzU
|
|
45
45
|
ragaai_catalyst/tracers/agentic_tracing/tests/unique_decorator_test.py,sha256=Xk1cLzs-2A3dgyBwRRnCWs7Eubki40FVonwd433hPN8,4805
|
46
46
|
ragaai_catalyst/tracers/agentic_tracing/tracers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
47
47
|
ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py,sha256=LzbsHvELwBmH8ObFomJRhiQ98b6MEi18irm0DPiplt0,29743
|
48
|
-
ragaai_catalyst/tracers/agentic_tracing/tracers/base.py,sha256=
|
48
|
+
ragaai_catalyst/tracers/agentic_tracing/tracers/base.py,sha256=wYdGud8JKBnr78fizMGwTdA8qWRMpQyA2Il7tS3rSRU,55823
|
49
49
|
ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py,sha256=OBJJjFSvwRjCGNJyqX3yIfC1W05ZN2QUXasCJ4gmCjQ,13930
|
50
50
|
ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
51
51
|
ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py,sha256=z-qzmCQCkhyW0aLDUR_rNq4pmxhAaVhNY-kZQsox-Ws,50221
|
52
|
-
ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py,sha256=
|
52
|
+
ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py,sha256=Va66pvibVSgrr_Wfokog4toxLZw40SqxAmR_NZlWu70,16383
|
53
53
|
ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py,sha256=m8CxYkl7iMiFya_lNwN1ykBc3Pmo-2pR_2HmpptwHWQ,10352
|
54
54
|
ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py,sha256=xxrliKPfdfbIZRZqMnUewsaTD8_Hv0dbuoBivNZGD4U,21674
|
55
55
|
ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py,sha256=bhSUhNQCuJXKjgJAXhjKEYjnHMpYN90FSZdR84fNIKU,4614
|
@@ -70,25 +70,26 @@ ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json,sha256=2tzGw_cKCT
|
|
70
70
|
ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py,sha256=qmODERcFZhc8MX24boFCXkkh6sJ-vZngRHPvxhyWFeE,4347
|
71
71
|
ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml,sha256=LvFDivDIE96Zasp-fgDEqUJ5GEQZUawQucR3aOcSUTY,926
|
72
72
|
ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py,sha256=H8WNsk4v_5T6OUw4TFOzlDLjQhJwjh1nAMyMAoqMEi4,6946
|
73
|
-
ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py,sha256=
|
73
|
+
ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py,sha256=W7Nw-IpugejIoHbCtQiN4Sn4ughLocQ9AUCjuAtOhOo,17258
|
74
74
|
ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py,sha256=G027toV-Km20JjKrc-Y_PilQ8ABEKrBvvzgLTnqVg7I,5819
|
75
75
|
ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py,sha256=4TeCGsFF26249fV6dJHLTZDrRa93SG9oer4rudoF8Y4,19443
|
76
76
|
ragaai_catalyst/tracers/exporters/__init__.py,sha256=wQbaqyeIjVZxYprHCKZ9BeiqxeXYBKjzEgP79LWNxCU,293
|
77
|
-
ragaai_catalyst/tracers/exporters/dynamic_trace_exporter.py,sha256=
|
77
|
+
ragaai_catalyst/tracers/exporters/dynamic_trace_exporter.py,sha256=eR_bbWMg_q8g9SzutrOZA24Bptr0BWpauZKVWfiCM1c,5910
|
78
78
|
ragaai_catalyst/tracers/exporters/file_span_exporter.py,sha256=RgGteu-NVGprXKkynvyIO5yOjpbtA41R3W_NzCjnkwE,6445
|
79
79
|
ragaai_catalyst/tracers/exporters/raga_exporter.py,sha256=6xvjWXyh8XPkHKSLLmAZUQSvwuyY17ov8pv2VdfI0qA,17875
|
80
|
-
ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py,sha256=
|
80
|
+
ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py,sha256=jfJr836ahLyo2qYrP5Hmdx0XkZt8lEhlKGHIP7PNmGs,11463
|
81
81
|
ragaai_catalyst/tracers/instrumentators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
82
82
|
ragaai_catalyst/tracers/utils/__init__.py,sha256=KeMaZtYaTojilpLv65qH08QmpYclfpacDA0U3wg6Ybw,64
|
83
|
-
ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py,sha256=
|
83
|
+
ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py,sha256=e0URuRWCdzpxuBLfL82FOTMjbRuDAkW8aIRi7s7Nocc,1655
|
84
84
|
ragaai_catalyst/tracers/utils/convert_llama_instru_callback.py,sha256=8qLo7x4Zsn3dhJfSv9gviB60YXZ2TOsWEouucJmBM0c,1724
|
85
85
|
ragaai_catalyst/tracers/utils/extraction_logic_llama_index.py,sha256=ZhPs0YhVtB82-Pq9o1BvCinKE_WPvVxPTEcZjlJbFYM,2371
|
86
86
|
ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py,sha256=XS2_x2qneqEx9oAighLg-LRiueWcESLwIC2r7eJT-Ww,3117
|
87
87
|
ragaai_catalyst/tracers/utils/model_prices_and_context_window_backup.json,sha256=C3uwkibJ08C9sOX-54kulZYmJlIpZ-SQpfE6HNGrjbM,343502
|
88
|
-
ragaai_catalyst/tracers/utils/
|
88
|
+
ragaai_catalyst/tracers/utils/rag_trace_json_converter.py,sha256=XSanJ3ChRn_Nqj5mlJhy-jFUCAhkw6lBuhhzUx6Mo9k,11239
|
89
|
+
ragaai_catalyst/tracers/utils/trace_json_converter.py,sha256=E0_QfciQMMpCtQYrNB4l8HJhlaFalr5bkMqkVRgQahY,14073
|
89
90
|
ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
|
90
|
-
ragaai_catalyst-2.1.
|
91
|
-
ragaai_catalyst-2.1.
|
92
|
-
ragaai_catalyst-2.1.
|
93
|
-
ragaai_catalyst-2.1.
|
94
|
-
ragaai_catalyst-2.1.
|
91
|
+
ragaai_catalyst-2.1.7.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
92
|
+
ragaai_catalyst-2.1.7.dist-info/METADATA,sha256=_4zHpKGxANGDv7HlC-pqZ6mZBqdvo4b8vDHyeKaefBY,22137
|
93
|
+
ragaai_catalyst-2.1.7.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
94
|
+
ragaai_catalyst-2.1.7.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
|
95
|
+
ragaai_catalyst-2.1.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|