ragaai-catalyst 2.1.7.1__py3-none-any.whl → 2.1.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/synthetic_data_generation.py +4 -2
- ragaai_catalyst/tracers/utils/rag_trace_json_converter.py +237 -88
- {ragaai_catalyst-2.1.7.1.dist-info → ragaai_catalyst-2.1.7.2.dist-info}/METADATA +4 -166
- {ragaai_catalyst-2.1.7.1.dist-info → ragaai_catalyst-2.1.7.2.dist-info}/RECORD +7 -7
- {ragaai_catalyst-2.1.7.1.dist-info → ragaai_catalyst-2.1.7.2.dist-info}/WHEEL +1 -1
- {ragaai_catalyst-2.1.7.1.dist-info → ragaai_catalyst-2.1.7.2.dist-info}/licenses/LICENSE +0 -0
- {ragaai_catalyst-2.1.7.1.dist-info → ragaai_catalyst-2.1.7.2.dist-info}/top_level.txt +0 -0
@@ -813,8 +813,9 @@ Irrelevant Examples: Any examples that are not relevant to the user's instructio
|
|
813
813
|
)
|
814
814
|
except Exception as e:
|
815
815
|
continue
|
816
|
-
|
817
|
-
|
816
|
+
for example in examples:
|
817
|
+
row_dict['generated_examples'] = example
|
818
|
+
fin_df_list.append(row_dict)
|
818
819
|
fin_df = pd.DataFrame(fin_df_list)
|
819
820
|
csv_file, csv_ext = os.path.splitext(csv_path)
|
820
821
|
if not dst_csv_path:
|
@@ -824,6 +825,7 @@ Irrelevant Examples: Any examples that are not relevant to the user's instructio
|
|
824
825
|
os.makedirs(dst_dir, exist_ok=True)
|
825
826
|
fin_df.to_csv(dst_csv_path)
|
826
827
|
logger.info(f'CSV with generated examples saved at {dst_csv_path}')
|
828
|
+
return dst_csv_path
|
827
829
|
|
828
830
|
|
829
831
|
# Usage:
|
@@ -14,44 +14,156 @@ logging_level = (
|
|
14
14
|
def rag_trace_json_converter(input_trace, custom_model_cost, trace_id, user_details, tracer_type,user_context):
|
15
15
|
trace_aggregate = {}
|
16
16
|
def get_prompt(input_trace):
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
17
|
+
try:
|
18
|
+
if tracer_type == "langchain":
|
19
|
+
for span in input_trace:
|
20
|
+
try:
|
21
|
+
# First check if there's a user message in any of the input messages
|
22
|
+
attributes = span.get("attributes", {})
|
23
|
+
|
24
|
+
# Look for user role in any of the input messages
|
25
|
+
if attributes:
|
26
|
+
for key, value in attributes.items():
|
27
|
+
try:
|
28
|
+
if key.startswith("llm.input_messages.") and key.endswith(".message.role") and value == "user":
|
29
|
+
# Extract the message number
|
30
|
+
message_num = key.split(".")[2]
|
31
|
+
# Construct the content key
|
32
|
+
content_key = f"llm.input_messages.{message_num}.message.content"
|
33
|
+
if content_key in attributes:
|
34
|
+
return attributes.get(content_key)
|
35
|
+
except Exception as e:
|
36
|
+
logger.warning(f"Error processing attribute key-value pair: {str(e)}")
|
37
|
+
continue
|
24
38
|
|
25
|
-
|
26
|
-
|
39
|
+
for key, value in attributes.items():
|
40
|
+
try:
|
41
|
+
if key.startswith("llm.prompts") and isinstance(value, list):
|
42
|
+
human_message = None
|
43
|
+
for message in value:
|
44
|
+
if isinstance(message, str):
|
45
|
+
human_index = message.find("Human:")
|
46
|
+
if human_index != -1:
|
47
|
+
human_message = message[human_index:].replace("Human:", "")
|
48
|
+
break
|
49
|
+
return human_message if human_message else value
|
50
|
+
except Exception as e:
|
51
|
+
logger.warning(f"Error processing attribute key-value pair for prompt: {str(e)}")
|
52
|
+
continue
|
53
|
+
except Exception as e:
|
54
|
+
logger.warning(f"Error processing span for prompt extraction: {str(e)}")
|
55
|
+
continue
|
27
56
|
|
28
|
-
|
29
|
-
|
57
|
+
for span in input_trace:
|
58
|
+
try:
|
59
|
+
# If no user message found, check for specific span types
|
60
|
+
if span["name"] == "LLMChain":
|
61
|
+
try:
|
62
|
+
input_value = span["attributes"].get("input.value", "{}")
|
63
|
+
return json.loads(input_value).get("question", "")
|
64
|
+
except json.JSONDecodeError:
|
65
|
+
logger.warning(f"Invalid JSON in LLMChain input.value: {input_value}")
|
66
|
+
continue
|
67
|
+
elif span["name"] == "RetrievalQA":
|
68
|
+
return span["attributes"].get("input.value", "")
|
69
|
+
elif span["name"] == "VectorStoreRetriever":
|
70
|
+
return span["attributes"].get("input.value", "")
|
71
|
+
except Exception as e:
|
72
|
+
logger.warning(f"Error processing span for fallback prompt extraction: {str(e)}")
|
73
|
+
continue
|
30
74
|
|
31
|
-
|
75
|
+
# If we've gone through all spans and found nothing
|
76
|
+
logger.warning("No user message found in any span")
|
77
|
+
logger.warning("Returning empty string for prompt.")
|
78
|
+
return ""
|
79
|
+
|
80
|
+
logger.error("Prompt not found in the trace")
|
81
|
+
return None
|
82
|
+
except Exception as e:
|
83
|
+
logger.error(f"Error while extracting prompt from trace: {str(e)}")
|
84
|
+
return None
|
32
85
|
|
33
86
|
def get_response(input_trace):
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
87
|
+
try:
|
88
|
+
if tracer_type == "langchain":
|
89
|
+
for span in input_trace:
|
90
|
+
try:
|
91
|
+
attributes = span.get("attributes", {})
|
92
|
+
if attributes:
|
93
|
+
for key, value in attributes.items():
|
94
|
+
try:
|
95
|
+
if key.startswith("llm.output_messages.") and key.endswith(".message.content"):
|
96
|
+
return value
|
97
|
+
except Exception as e:
|
98
|
+
logger.warning(f"Error processing attribute key-value pair for response: {str(e)}")
|
99
|
+
continue
|
100
|
+
|
101
|
+
for key, value in attributes.items():
|
102
|
+
try:
|
103
|
+
if key.startswith("output.value"):
|
104
|
+
try:
|
105
|
+
output_json = json.loads(value)
|
106
|
+
if "generations" in output_json and isinstance(output_json.get("generations"), list) and len(output_json.get("generations")) > 0:
|
107
|
+
if isinstance(output_json.get("generations")[0], list) and len(output_json.get("generations")[0]) > 0:
|
108
|
+
first_generation = output_json.get("generations")[0][0]
|
109
|
+
if "text" in first_generation:
|
110
|
+
return first_generation["text"]
|
111
|
+
except json.JSONDecodeError:
|
112
|
+
logger.warning(f"Invalid JSON in output.value: {value}")
|
113
|
+
continue
|
114
|
+
except Exception as e:
|
115
|
+
logger.warning(f"Error processing attribute key-value pair for response: {str(e)}")
|
116
|
+
continue
|
117
|
+
except Exception as e:
|
118
|
+
logger.warning(f"Error processing span for response extraction: {str(e)}")
|
119
|
+
continue
|
120
|
+
|
121
|
+
for span in input_trace:
|
122
|
+
try:
|
123
|
+
if span["name"] == "LLMChain":
|
124
|
+
try:
|
125
|
+
output_value = span["attributes"].get("output.value", "")
|
126
|
+
if output_value:
|
127
|
+
return json.loads(output_value)
|
128
|
+
return ""
|
129
|
+
except json.JSONDecodeError:
|
130
|
+
logger.warning(f"Invalid JSON in LLMChain output.value: {output_value}")
|
131
|
+
continue
|
132
|
+
elif span["name"] == "RetrievalQA":
|
133
|
+
return span["attributes"].get("output.value", "")
|
134
|
+
elif span["name"] == "VectorStoreRetriever":
|
135
|
+
return span["attributes"].get("output.value", "")
|
136
|
+
except Exception as e:
|
137
|
+
logger.warning(f"Error processing span for fallback response extraction: {str(e)}")
|
138
|
+
continue
|
139
|
+
|
140
|
+
logger.warning("No response found in any span")
|
141
|
+
return ""
|
142
|
+
|
143
|
+
logger.error("Response not found in the trace")
|
144
|
+
return None
|
145
|
+
except Exception as e:
|
146
|
+
logger.error(f"Error while extracting response from trace: {str(e)}")
|
147
|
+
return None
|
46
148
|
|
47
149
|
def get_context(input_trace):
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
150
|
+
try:
|
151
|
+
if user_context and user_context.strip():
|
152
|
+
return user_context
|
153
|
+
elif tracer_type == "langchain":
|
154
|
+
for span in input_trace:
|
155
|
+
try:
|
156
|
+
if span["name"] == "VectorStoreRetriever":
|
157
|
+
return span["attributes"].get("retrieval.documents.1.document.content", "")
|
158
|
+
except Exception as e:
|
159
|
+
logger.warning(f"Error processing span for context extraction: {str(e)}")
|
160
|
+
continue
|
161
|
+
|
162
|
+
logger.warning("Context not found in the trace")
|
163
|
+
return ""
|
164
|
+
except Exception as e:
|
165
|
+
logger.error(f"Error while extracting context from trace: {str(e)}")
|
166
|
+
return ""
|
55
167
|
|
56
168
|
prompt = get_prompt(input_trace)
|
57
169
|
response = get_response(input_trace)
|
@@ -65,14 +177,8 @@ def rag_trace_json_converter(input_trace, custom_model_cost, trace_id, user_deta
|
|
65
177
|
trace_aggregate['trace_id'] = trace_id
|
66
178
|
trace_aggregate['session_id'] = None
|
67
179
|
trace_aggregate["metadata"] = user_details.get("trace_user_detail", {}).get("metadata")
|
180
|
+
trace_aggregate["pipeline"] = user_details.get("trace_user_detail", {}).get("pipeline")
|
68
181
|
|
69
|
-
#dummy data need to be fetched
|
70
|
-
trace_aggregate["pipeline"] = {
|
71
|
-
'llm_model': 'gpt-4o-mini',
|
72
|
-
'vector_store': 'faiss',
|
73
|
-
'embed_model': 'text-embedding-ada-002'
|
74
|
-
}
|
75
|
-
|
76
182
|
trace_aggregate["data"] = {}
|
77
183
|
trace_aggregate["data"]["prompt"] = prompt
|
78
184
|
trace_aggregate["data"]["response"] = response
|
@@ -95,7 +201,8 @@ def get_additional_metadata(spans, custom_model_cost, model_cost_dict, prompt=""
|
|
95
201
|
additional_metadata["tokens"] = {}
|
96
202
|
try:
|
97
203
|
for span in spans:
|
98
|
-
if span["name"] in ["ChatOpenAI", "ChatAnthropic", "ChatGoogleGenerativeAI"
|
204
|
+
if span["name"] in ["ChatOpenAI", "ChatAnthropic", "ChatGoogleGenerativeAI", "OpenAI", "ChatOpenAI_LangchainOpenAI", "ChatOpenAI_ChatModels",
|
205
|
+
"ChatVertexAI", "VertexAI", "ChatLiteLLM", "ChatBedrock", "AzureChatOpenAI", "ChatAnthropicVertex"]:
|
99
206
|
start_time = datetime.fromisoformat(span.get("start_time", "")[:-1]) # Remove 'Z' and parse
|
100
207
|
end_time = datetime.fromisoformat(span.get("end_time", "")[:-1]) # Remove 'Z' and parse
|
101
208
|
additional_metadata["latency"] = (end_time - start_time).total_seconds()
|
@@ -191,55 +298,97 @@ def get_additional_metadata(spans, custom_model_cost, model_cost_dict, prompt=""
|
|
191
298
|
return additional_metadata
|
192
299
|
|
193
300
|
def num_tokens_from_messages(model, message):
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
if re.match(r'^gpt-4o.*', model):
|
206
|
-
"""Check for GPT-4 Optimized models (pattern: ^gpt-4o.*)
|
207
|
-
Examples that match:
|
208
|
-
- gpt-4o
|
209
|
-
- gpt-4o-mini
|
210
|
-
- gpt-4o-2024-08-06
|
211
|
-
The .* allows for any characters after 'gpt-4o'
|
212
|
-
"""
|
213
|
-
encoding_name = "o200k_base"
|
214
|
-
return num_tokens_from_string(message, encoding_name)
|
215
|
-
|
216
|
-
elif re.match(r'^gpt-(4|3\.5).*', model):
|
217
|
-
"""Check for GPT-4 and GPT-3.5 models (pattern: ^gpt-(4|3\.5).*)
|
218
|
-
Uses cl100k_base encoding for GPT-4 and GPT-3.5 models
|
219
|
-
Examples that match:
|
220
|
-
- gpt-4
|
221
|
-
- gpt-4-turbo
|
222
|
-
- gpt-4-2024-08-06
|
223
|
-
- gpt-3.5-turbo
|
224
|
-
- gpt-3.5-turbo-16k
|
225
|
-
"""
|
226
|
-
encoding_name = "cl100k_base"
|
227
|
-
return num_tokens_from_string(message, encoding_name)
|
228
|
-
|
229
|
-
else:
|
230
|
-
"""Default case for any other GPT models
|
231
|
-
Uses o200k_base encoding as the default tokenizer
|
301
|
+
try:
|
302
|
+
# Handle None or empty message
|
303
|
+
if not message:
|
304
|
+
logger.warning("Empty or None message provided to token counter")
|
305
|
+
return 0
|
306
|
+
|
307
|
+
# GPT models
|
308
|
+
if re.match(r'^gpt-', model):
|
309
|
+
"""Check if the model is any GPT model (pattern: ^gpt-)
|
310
|
+
This matches any model name that starts with 'gpt-'
|
232
311
|
"""
|
233
|
-
|
234
|
-
|
312
|
+
def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
313
|
+
"""Returns the number of tokens in a text string."""
|
314
|
+
try:
|
315
|
+
encoding = tiktoken.get_encoding(encoding_name)
|
316
|
+
num_tokens = len(encoding.encode(string))
|
317
|
+
return num_tokens
|
318
|
+
except Exception as e:
|
319
|
+
logger.warning(f"Error encoding with {encoding_name}: {str(e)}")
|
320
|
+
# Fallback to a different encoding if the requested one fails
|
321
|
+
try:
|
322
|
+
fallback_encoding = tiktoken.get_encoding("cl100k_base")
|
323
|
+
return len(fallback_encoding.encode(string))
|
324
|
+
except:
|
325
|
+
logger.error("Failed to use fallback encoding")
|
326
|
+
return 0
|
327
|
+
|
328
|
+
if re.match(r'^gpt-4o.*', model):
|
329
|
+
"""Check for GPT-4 Optimized models (pattern: ^gpt-4o.*)
|
330
|
+
Examples that match:
|
331
|
+
- gpt-4o
|
332
|
+
- gpt-4o-mini
|
333
|
+
- gpt-4o-2024-08-06
|
334
|
+
The .* allows for any characters after 'gpt-4o'
|
335
|
+
"""
|
336
|
+
encoding_name = "o200k_base"
|
337
|
+
return num_tokens_from_string(message, encoding_name)
|
338
|
+
|
339
|
+
elif re.match(r'^gpt-(4|3\.5).*', model):
|
340
|
+
"""Check for GPT-4 and GPT-3.5 models (pattern: ^gpt-(4|3\.5).*)
|
341
|
+
Uses cl100k_base encoding for GPT-4 and GPT-3.5 models
|
342
|
+
Examples that match:
|
343
|
+
- gpt-4
|
344
|
+
- gpt-4-turbo
|
345
|
+
- gpt-4-2024-08-06
|
346
|
+
- gpt-3.5-turbo
|
347
|
+
- gpt-3.5-turbo-16k
|
348
|
+
"""
|
349
|
+
encoding_name = "cl100k_base"
|
350
|
+
return num_tokens_from_string(message, encoding_name)
|
351
|
+
|
352
|
+
else:
|
353
|
+
"""Default case for any other GPT models
|
354
|
+
Uses o200k_base encoding as the default tokenizer
|
355
|
+
"""
|
356
|
+
return num_tokens_from_string(message, encoding_name="o200k_base")
|
357
|
+
|
235
358
|
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
359
|
+
# Gemini models
|
360
|
+
elif re.match(r'^gemini-', model):
|
361
|
+
try:
|
362
|
+
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
363
|
+
if not GOOGLE_API_KEY:
|
364
|
+
logger.warning("GOOGLE_API_KEY not found in environment variables")
|
365
|
+
return 0
|
366
|
+
|
367
|
+
import google.generativeai as genai
|
368
|
+
client = genai.Client(api_key=GOOGLE_API_KEY)
|
240
369
|
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
370
|
+
response = client.models.count_tokens(
|
371
|
+
model=model,
|
372
|
+
contents=message,
|
373
|
+
)
|
374
|
+
return response.total_tokens
|
375
|
+
except ImportError:
|
376
|
+
logger.warning("google.generativeai module not found. Install with pip install google-generativeai")
|
377
|
+
return 0
|
378
|
+
except Exception as e:
|
379
|
+
logger.warning(f"Error counting tokens for Gemini model: {str(e)}")
|
380
|
+
return 0
|
381
|
+
|
382
|
+
# Default case for unknown models
|
383
|
+
else:
|
384
|
+
logger.warning(f"Unknown model type: {model}. Using default token counter.")
|
385
|
+
try:
|
386
|
+
# Use cl100k_base as a fallback for unknown models
|
387
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
388
|
+
return len(encoding.encode(message))
|
389
|
+
except:
|
390
|
+
logger.error("Failed to use fallback encoding for unknown model")
|
391
|
+
return 0
|
392
|
+
except Exception as e:
|
393
|
+
logger.error(f"Unexpected error in token counting: {str(e)}")
|
394
|
+
return 0
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ragaai_catalyst
|
3
|
-
Version: 2.1.7.
|
3
|
+
Version: 2.1.7.2
|
4
4
|
Summary: RAGA AI CATALYST
|
5
5
|
Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>, Tushar Kumar <tushar.kumar@raga.ai>
|
6
6
|
Requires-Python: <=3.13.2,>=3.10
|
@@ -57,8 +57,6 @@ Dynamic: license-file
|
|
57
57
|
|
58
58
|
RagaAI Catalyst is a comprehensive platform designed to enhance the management and optimization of LLM projects. It offers a wide range of features, including project management, dataset management, evaluation management, trace management, prompt management, synthetic data generation, and guardrail management. These functionalities enable you to efficiently evaluate, and safeguard your LLM applications.
|
59
59
|
|
60
|
-

|
61
|
-
|
62
60
|
## Table of Contents
|
63
61
|
|
64
62
|
- [RagaAI Catalyst](#ragaai-catalyst)
|
@@ -149,20 +147,6 @@ dataset_manager.create_from_csv(
|
|
149
147
|
schema_mapping={'column1': 'schema_element1', 'column2': 'schema_element2'}
|
150
148
|
)
|
151
149
|
|
152
|
-
# Create a dataset from JSONl
|
153
|
-
dataset_manager.create_from_jsonl(
|
154
|
-
jsonl_path='jsonl_path',
|
155
|
-
dataset_name='MyDataset',
|
156
|
-
schema_mapping={'column1': 'schema_element1', 'column2': 'schema_element2'}
|
157
|
-
)
|
158
|
-
|
159
|
-
# Create a dataset from dataframe
|
160
|
-
dataset_manager.create_from_df(
|
161
|
-
df=df,
|
162
|
-
dataset_name='MyDataset',
|
163
|
-
schema_mapping={'column1': 'schema_element1', 'column2': 'schema_element2'}
|
164
|
-
)
|
165
|
-
|
166
150
|
# Get project schema mapping
|
167
151
|
dataset_manager.get_schema_mapping()
|
168
152
|
|
@@ -189,7 +173,6 @@ evaluation = Evaluation(
|
|
189
173
|
evaluation.list_metrics()
|
190
174
|
|
191
175
|
# Add metrics to the experiment
|
192
|
-
|
193
176
|
schema_mapping={
|
194
177
|
'Query': 'prompt',
|
195
178
|
'response': 'response',
|
@@ -303,159 +286,14 @@ tracer = Tracer(
|
|
303
286
|
```
|
304
287
|
|
305
288
|
```python
|
289
|
+
# Enable auto-instrumentation
|
306
290
|
from ragaai_catalyst import init_tracing
|
307
291
|
init_tracing(catalyst=catalyst, tracer=tracer)
|
308
292
|
```
|
309
293
|
|
310
|
-
#### Agentic Tracing Features
|
311
|
-
1- add span level metrics
|
312
|
-
|
313
|
-
```python
|
314
|
-
current_span().add_metrics(name='Accuracy', score=0.5, reasoning='some reasoning')
|
315
|
-
```
|
316
|
-
|
317
|
-
2- add trace level metrics
|
318
|
-
|
319
|
-
```python
|
320
|
-
tracer.add_metrics(name='hallucination_1', score=0.5, reasoning='some reasoning')
|
321
|
-
```
|
322
|
-
|
323
|
-
3- add gt
|
324
|
-
|
325
|
-
```python
|
326
|
-
current_span().add_gt("This is the ground truth")
|
327
|
-
```
|
328
|
-
|
329
|
-
4- add context
|
330
|
-
|
331
|
-
```python
|
332
|
-
current_span().add_context("This is the context")
|
333
|
-
```
|
334
|
-
|
335
|
-
5- add span level metric execution
|
336
|
-
|
337
|
-
```python
|
338
|
-
current_span().execute_metrics(
|
339
|
-
name="Hallucination",
|
340
|
-
model="gpt-4o",
|
341
|
-
provider="openai"
|
342
|
-
)
|
343
|
-
```
|
344
|
-
|
345
|
-
#### Example
|
346
|
-
```python
|
347
|
-
from ragaai_catalyst import trace_llm, trace_tool, trace_agent, current_span
|
348
|
-
|
349
|
-
from openai import OpenAI
|
350
|
-
|
351
|
-
|
352
|
-
@trace_llm(name="llm_call", tags=["default_llm_call"])
|
353
|
-
def llm_call(prompt, max_tokens=512, model="gpt-4o-mini"):
|
354
|
-
client = OpenAI(api_key=OPENAI_API_KEY)
|
355
|
-
response = client.chat.completions.create(
|
356
|
-
model=model,
|
357
|
-
messages=[{"role": "user", "content": prompt}],
|
358
|
-
max_tokens=max_tokens,
|
359
|
-
temperature=0.85,
|
360
|
-
)
|
361
|
-
# Span level context
|
362
|
-
current_span().add_context("name = span level in summary_agent, context = some span level context")
|
363
|
-
|
364
|
-
# Span level execute metrics
|
365
|
-
current_span().execute_metrics(
|
366
|
-
name="Hallucination",
|
367
|
-
model="gpt-4o",
|
368
|
-
provider="openai"
|
369
|
-
)
|
370
|
-
response_data = response.choices[0].message.content.strip()
|
371
|
-
print('response_data: ', response_data)
|
372
|
-
return response_data
|
373
|
-
|
374
|
-
class SummaryAgent:
|
375
|
-
def __init__(self, persona="Summary Agent"):
|
376
|
-
self.persona = persona
|
377
|
-
|
378
|
-
@trace_agent(name="summary_agent")
|
379
|
-
def summarize(self, text):
|
380
|
-
prompt = f"Please summarize this text concisely: {text}"
|
381
|
-
|
382
|
-
# Span level metric
|
383
|
-
current_span().add_metrics(name='Accuracy', score=0.5, reasoning='some reasoning')
|
384
|
-
|
385
|
-
# Span level context
|
386
|
-
current_span().add_context("name = span level in summary_agent, context = some span level context")
|
387
|
-
|
388
|
-
summary = llm_call(prompt)
|
389
|
-
return summary
|
390
|
-
|
391
|
-
|
392
|
-
class AnalysisAgent:
|
393
|
-
def __init__(self, persona="Analysis Agent"):
|
394
|
-
self.persona = persona
|
395
|
-
self.summary_agent = SummaryAgent()
|
396
|
-
|
397
|
-
@trace_agent(name="analysis_agent")
|
398
|
-
def analyze(self, text):
|
399
|
-
summary = self.summary_agent.summarize(text)
|
400
|
-
|
401
|
-
prompt = f"Given this summary: {summary}\nProvide a brief analysis of the main points."
|
402
|
-
|
403
|
-
# Span level metric
|
404
|
-
current_span().add_metrics(name='correctness', score=0.5, reasoning='some reasoning')
|
405
|
-
analysis = llm_call(prompt)
|
406
|
-
|
407
|
-
return {
|
408
|
-
"summary": summary,
|
409
|
-
"analysis": analysis
|
410
|
-
}
|
411
|
-
|
412
|
-
class RecommendationAgent:
|
413
|
-
def __init__(self, persona="Recommendation Agent"):
|
414
|
-
self.persona = persona
|
415
|
-
self.analysis_agent = AnalysisAgent()
|
416
|
-
|
417
|
-
@trace_agent(name="recommendation_agent", tags=['coordinator_agent'])
|
418
|
-
def recommend(self, text):
|
419
|
-
analysis_result = self.analysis_agent.analyze(text)
|
420
|
-
|
421
|
-
prompt = f"""Given this summary: {analysis_result['summary']}
|
422
|
-
And this analysis: {analysis_result['analysis']}
|
423
|
-
Provide 2-3 actionable recommendations."""
|
424
|
-
|
425
|
-
recommendations = llm_call(prompt)
|
426
|
-
|
427
|
-
return {
|
428
|
-
"summary": analysis_result["summary"],
|
429
|
-
"analysis": analysis_result["analysis"],
|
430
|
-
"recommendations": recommendations
|
431
|
-
}
|
432
|
-
#Defining agent tracer
|
433
|
-
@trace_agent(name="get_recommendation", tags=['coordinator_agent'])
|
434
|
-
def get_recommendation(agent, text):
|
435
|
-
recommendation = agent.recommend(text)
|
436
|
-
return recommendation
|
437
|
-
|
438
|
-
def main():
|
439
|
-
text = """
|
440
|
-
Artificial Intelligence has transformed various industries in recent years.
|
441
|
-
From healthcare to finance, AI applications are becoming increasingly prevalent.
|
442
|
-
Machine learning models are being used to predict market trends, diagnose diseases,
|
443
|
-
and automate routine tasks. The impact of AI on society continues to grow,
|
444
|
-
raising both opportunities and challenges for the future.
|
445
|
-
"""
|
446
|
-
|
447
|
-
recommendation_agent = RecommendationAgent()
|
448
|
-
result = get_recommendation(recommendation_agent, text)
|
449
|
-
|
450
|
-
|
451
|
-
# Trace level metric
|
452
|
-
tracer.add_metrics(name='hallucination_1', score=0.5, reasoning='some reasoning')
|
453
|
-
|
454
|
-
# Run tracer
|
455
|
-
with tracer:
|
456
|
-
main()
|
457
|
-
```
|
458
294
|

|
295
|
+
For more detailed information on Trace Management, please refer to the [Agentic Tracing Management documentation](docs/agentic_tracing.md).
|
296
|
+
|
459
297
|
|
460
298
|
### Prompt Management
|
461
299
|
|
@@ -10,7 +10,7 @@ ragaai_catalyst/prompt_manager.py,sha256=W8ypramzOprrJ7-22d5vkBXIuIQ8v9XAzKDGxKs
|
|
10
10
|
ragaai_catalyst/proxy_call.py,sha256=CHxldeceZUaLU-to_hs_Kf1z_b2vHMssLS_cOBedu78,5499
|
11
11
|
ragaai_catalyst/ragaai_catalyst.py,sha256=1FaeK_VZpJLQ1ZqEWpMyI8J8M2MI0abLLLDFWY9W-4A,19580
|
12
12
|
ragaai_catalyst/redteaming_old.py,sha256=W2d89Ok8W-C8g7TBM3fDIFLof3q9FuYSr0jcryH2XQo,7097
|
13
|
-
ragaai_catalyst/synthetic_data_generation.py,sha256=
|
13
|
+
ragaai_catalyst/synthetic_data_generation.py,sha256=7lIWa3nwgW2-FlJrDaGxTN6OE4-dbbhLtKNOBQufhho,37952
|
14
14
|
ragaai_catalyst/utils.py,sha256=TlhEFwLyRU690HvANbyoRycR3nQ67lxVUQoUOfTPYQ0,3772
|
15
15
|
ragaai_catalyst/redteaming/__init__.py,sha256=TJdvZpaZGFsg9qKONdjTosSVLZGadYFpHG6KE0xapKU,155
|
16
16
|
ragaai_catalyst/redteaming/evaluator.py,sha256=C50SAc3RsR7PZnz-VQ7wQfDpiVEb7T3W3KV4Lj0tWYE,4599
|
@@ -85,11 +85,11 @@ ragaai_catalyst/tracers/utils/convert_llama_instru_callback.py,sha256=8qLo7x4Zsn
|
|
85
85
|
ragaai_catalyst/tracers/utils/extraction_logic_llama_index.py,sha256=ZhPs0YhVtB82-Pq9o1BvCinKE_WPvVxPTEcZjlJbFYM,2371
|
86
86
|
ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py,sha256=XS2_x2qneqEx9oAighLg-LRiueWcESLwIC2r7eJT-Ww,3117
|
87
87
|
ragaai_catalyst/tracers/utils/model_prices_and_context_window_backup.json,sha256=C3uwkibJ08C9sOX-54kulZYmJlIpZ-SQpfE6HNGrjbM,343502
|
88
|
-
ragaai_catalyst/tracers/utils/rag_trace_json_converter.py,sha256=
|
88
|
+
ragaai_catalyst/tracers/utils/rag_trace_json_converter.py,sha256=yq1vp1O94xXIX9IUsns5VqsfIDU83Eqn5XzJEJGh-Bs,20637
|
89
89
|
ragaai_catalyst/tracers/utils/trace_json_converter.py,sha256=E0_QfciQMMpCtQYrNB4l8HJhlaFalr5bkMqkVRgQahY,14073
|
90
90
|
ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
|
91
|
-
ragaai_catalyst-2.1.7.
|
92
|
-
ragaai_catalyst-2.1.7.
|
93
|
-
ragaai_catalyst-2.1.7.
|
94
|
-
ragaai_catalyst-2.1.7.
|
95
|
-
ragaai_catalyst-2.1.7.
|
91
|
+
ragaai_catalyst-2.1.7.2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
92
|
+
ragaai_catalyst-2.1.7.2.dist-info/METADATA,sha256=4idPE4h-NhbrCeTAjM4Ud00KQeI7mCjRCIwwsjdE-2M,17605
|
93
|
+
ragaai_catalyst-2.1.7.2.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
|
94
|
+
ragaai_catalyst-2.1.7.2.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
|
95
|
+
ragaai_catalyst-2.1.7.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|