ragaai-catalyst 2.2.4b5__py3-none-any.whl → 2.2.5b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. ragaai_catalyst/__init__.py +0 -2
  2. ragaai_catalyst/dataset.py +59 -1
  3. ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +5 -285
  4. ragaai_catalyst/tracers/agentic_tracing/utils/__init__.py +0 -2
  5. ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +1 -1
  6. ragaai_catalyst/tracers/exporters/__init__.py +1 -2
  7. ragaai_catalyst/tracers/exporters/file_span_exporter.py +0 -1
  8. ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py +23 -1
  9. ragaai_catalyst/tracers/tracer.py +6 -186
  10. {ragaai_catalyst-2.2.4b5.dist-info → ragaai_catalyst-2.2.5b2.dist-info}/METADATA +1 -1
  11. {ragaai_catalyst-2.2.4b5.dist-info → ragaai_catalyst-2.2.5b2.dist-info}/RECORD +14 -45
  12. ragaai_catalyst/experiment.py +0 -486
  13. ragaai_catalyst/tracers/agentic_tracing/tests/FinancialAnalysisSystem.ipynb +0 -536
  14. ragaai_catalyst/tracers/agentic_tracing/tests/GameActivityEventPlanner.ipynb +0 -134
  15. ragaai_catalyst/tracers/agentic_tracing/tests/TravelPlanner.ipynb +0 -563
  16. ragaai_catalyst/tracers/agentic_tracing/tests/__init__.py +0 -0
  17. ragaai_catalyst/tracers/agentic_tracing/tests/ai_travel_agent.py +0 -197
  18. ragaai_catalyst/tracers/agentic_tracing/tests/unique_decorator_test.py +0 -172
  19. ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +0 -687
  20. ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +0 -1319
  21. ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +0 -347
  22. ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py +0 -0
  23. ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +0 -1182
  24. ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +0 -288
  25. ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +0 -557
  26. ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py +0 -129
  27. ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py +0 -74
  28. ragaai_catalyst/tracers/agentic_tracing/utils/api_utils.py +0 -21
  29. ragaai_catalyst/tracers/agentic_tracing/utils/generic.py +0 -32
  30. ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py +0 -28
  31. ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +0 -133
  32. ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml +0 -34
  33. ragaai_catalyst/tracers/exporters/raga_exporter.py +0 -467
  34. ragaai_catalyst/tracers/langchain_callback.py +0 -821
  35. ragaai_catalyst/tracers/llamaindex_callback.py +0 -361
  36. ragaai_catalyst/tracers/llamaindex_instrumentation.py +0 -424
  37. ragaai_catalyst/tracers/upload_traces.py +0 -170
  38. ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py +0 -62
  39. ragaai_catalyst/tracers/utils/convert_llama_instru_callback.py +0 -69
  40. ragaai_catalyst/tracers/utils/extraction_logic_llama_index.py +0 -74
  41. ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py +0 -82
  42. ragaai_catalyst/tracers/utils/rag_trace_json_converter.py +0 -403
  43. {ragaai_catalyst-2.2.4b5.dist-info → ragaai_catalyst-2.2.5b2.dist-info}/WHEEL +0 -0
  44. {ragaai_catalyst-2.2.4b5.dist-info → ragaai_catalyst-2.2.5b2.dist-info}/licenses/LICENSE +0 -0
  45. {ragaai_catalyst-2.2.4b5.dist-info → ragaai_catalyst-2.2.5b2.dist-info}/top_level.txt +0 -0
@@ -1,74 +0,0 @@
1
- import logging
2
- import os
3
- import requests
4
-
5
- from ragaai_catalyst import RagaAICatalyst
6
-
7
- logger = logging.getLogger(__name__)
8
- logging_level = (
9
- logger.setLevel(logging.DEBUG)
10
- if os.getenv("DEBUG")
11
- else logger.setLevel(logging.INFO)
12
- )
13
-
14
-
15
- def calculate_metric(project_id, metric_name, model, provider, **kwargs):
16
- user_id = "1"
17
- org_domain = "raga"
18
-
19
- headers = {
20
- "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
21
- "X-Project-Id": str(project_id),
22
- "Content-Type": "application/json"
23
- }
24
-
25
- payload = {
26
- "data": [
27
- {
28
- "metric_name": metric_name,
29
- "metric_config": {
30
- "threshold": {
31
- "isEditable": True,
32
- "lte": 0.3
33
- },
34
- "model": model,
35
- "orgDomain": org_domain,
36
- "provider": provider,
37
- "user_id": user_id,
38
- "job_id": 1,
39
- "metric_name": metric_name,
40
- "request_id": 1
41
- },
42
- "variable_mapping": kwargs,
43
- "trace_object": {
44
- "Data": {
45
- "DocId": "doc-1",
46
- "Prompt": kwargs.get("prompt"),
47
- "Response": kwargs.get("response"),
48
- "Context": kwargs.get("context"),
49
- "ExpectedResponse": kwargs.get("expected_response"),
50
- "ExpectedContext": kwargs.get("expected_context"),
51
- "Chat": kwargs.get("chat"),
52
- "Instructions": kwargs.get("instructions"),
53
- "SystemPrompt": kwargs.get("system_prompt"),
54
- "Text": kwargs.get("text")
55
- },
56
- "claims": {},
57
- "last_computed_metrics": {
58
- metric_name: {
59
- }
60
- }
61
- }
62
- }
63
- ]
64
- }
65
-
66
- try:
67
- BASE_URL = RagaAICatalyst.BASE_URL
68
- response = requests.post(f"{BASE_URL}/v1/llm/calculate-metric", headers=headers, json=payload, timeout=30)
69
- logger.debug(f"Metric calculation response status {response.status_code}")
70
- response.raise_for_status()
71
- return response.json()
72
- except requests.exceptions.RequestException as e:
73
- logger.debug(f"Error in calculate-metric api: {e}, payload: {payload}")
74
- raise Exception(f"Error in calculate-metric: {e}")
@@ -1,21 +0,0 @@
1
- import requests
2
- import logging
3
-
4
- logger = logging.getLogger(__name__)
5
-
6
- def fetch_analysis_trace(base_url, trace_id):
7
- """
8
- Fetches the analysis trace data from the server.
9
-
10
- :param base_url: The base URL of the server (e.g., "http://localhost:3000").
11
- :param trace_id: The ID of the trace to fetch.
12
- :return: The JSON response from the server if successful, otherwise None.
13
- """
14
- try:
15
- url = f"{base_url}/api/analysis_traces/{trace_id}"
16
- response = requests.get(url)
17
- response.raise_for_status() # Raise an error for bad responses (4xx, 5xx)
18
- return response.json()
19
- except requests.exceptions.RequestException as e:
20
- logger.error(f"Error fetching analysis trace: {e}")
21
- return None
@@ -1,32 +0,0 @@
1
- import os
2
- import logging
3
-
4
-
5
- def get_db_path():
6
- db_filename = "trace_data.db"
7
-
8
- # First, try the package directory
9
- package_dir = os.path.dirname(os.path.abspath(__file__))
10
- public_dir = os.path.join(package_dir, "..", "ui", "dist")
11
- package_db_path = os.path.join(public_dir, db_filename)
12
-
13
- # Ensure the directory exists
14
- os.makedirs(os.path.dirname(package_db_path), exist_ok=True)
15
-
16
- if os.path.exists(os.path.dirname(package_db_path)):
17
- logging.debug(f"Using package database: {package_db_path}")
18
- return f"sqlite:///{package_db_path}"
19
-
20
- # Then, try the local directory
21
- local_db_path = os.path.join(os.getcwd(), "agentneo", "ui", "dist", db_filename)
22
- if os.path.exists(os.path.dirname(local_db_path)):
23
- logging.debug(f"Using local database: {local_db_path}")
24
- return f"sqlite:///{local_db_path}"
25
-
26
- # Finally, try the local "/dist" directory
27
- local_dist_path = os.path.join(os.getcwd(), "dist", db_filename)
28
- if os.path.exists(os.path.dirname(local_dist_path)):
29
- logging.debug(f"Using local database: {local_dist_path}")
30
- return f"sqlite:///{local_dist_path}"
31
-
32
- return f"sqlite:///{package_db_path}"
@@ -1,28 +0,0 @@
1
- import requests
2
- import os
3
- from ....ragaai_catalyst import RagaAICatalyst
4
- from ....dataset import Dataset
5
-
6
- def get_user_trace_metrics(project_name, dataset_name):
7
- try:
8
- list_datasets = Dataset(project_name=project_name).list_datasets()
9
- if not list_datasets:
10
- return []
11
- elif dataset_name not in list_datasets:
12
- return []
13
- else:
14
- headers = {
15
- "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
16
- "X-Project-Name": project_name,
17
- }
18
- response = requests.request("GET",
19
- f"{RagaAICatalyst.BASE_URL}/v1/llm/trace/metrics?datasetName={dataset_name}",
20
- headers=headers, timeout=10)
21
- if response.status_code != 200:
22
- print(f"Error fetching traces metrics: {response.json()['message']}")
23
- return None
24
-
25
- return response.json()["data"]["columns"]
26
- except Exception as e:
27
- print(f"Error fetching traces metrics: {e}")
28
- return None
@@ -1,133 +0,0 @@
1
- import os
2
- from typing import List, Dict, Any, Optional
3
- import logging
4
-
5
- logger = logging.getLogger(__name__)
6
- logging_level = (
7
- logger.setLevel(logging.DEBUG)
8
- if os.getenv("DEBUG")
9
- else logger.setLevel(logging.INFO)
10
- )
11
-
12
-
13
- class SpanAttributes:
14
- def __init__(self, name, project_id: Optional[int] = None):
15
- self.name = name
16
- self.tags = []
17
- self.metadata = {}
18
- self.metrics = []
19
- self.local_metrics = []
20
- self.feedback = None
21
- self.project_id = project_id
22
- self.trace_attributes = ["tags", "metadata", "metrics"]
23
- self.gt = None
24
- self.context = None
25
-
26
- def add_tags(self, tags: str | List[str]):
27
- if isinstance(tags, str):
28
- tags = [tags]
29
- self.tags.extend(tags)
30
- logger.debug(f"Added tags: {tags}")
31
-
32
- def add_metadata(self, metadata):
33
- self.metadata.update(metadata)
34
- logger.debug(f"Added metadata: {metadata}")
35
-
36
- def add_metrics(
37
- self,
38
- name: str,
39
- score: float | int,
40
- reasoning: str = "",
41
- cost: float = None,
42
- latency: float = None,
43
- metadata: Dict[str, Any] = {},
44
- config: Dict[str, Any] = {},
45
- ):
46
- self.metrics.append(
47
- {
48
- "name": name,
49
- "score": score,
50
- "reason": reasoning,
51
- "source": "user",
52
- "cost": cost,
53
- "latency": latency,
54
- "metadata": metadata,
55
- "mappings": [],
56
- "config": config,
57
- }
58
- )
59
- logger.debug(f"Added metrics: {self.metrics}")
60
-
61
- def add_feedback(self, feedback: Any):
62
- self.feedback = feedback
63
- logger.debug(f"Added feedback: {self.feedback}")
64
-
65
- # TODO: Add validation to check if all the required parameters are present
66
- def execute_metrics(self, **kwargs: Any):
67
- name = kwargs.get("name")
68
- model = kwargs.get("model")
69
- provider = kwargs.get("provider")
70
- display_name = kwargs.get("display_name", None)
71
- mapping = kwargs.get("mapping", None)
72
-
73
- if isinstance(name, str):
74
- metrics = [{
75
- "name": name
76
- }]
77
- else:
78
- metrics = name if isinstance(name, list) else [name] if isinstance(name, dict) else []
79
-
80
- for metric in metrics:
81
- if not isinstance(metric, dict):
82
- logger.error(f"Expected dict, got {type(metric)}")
83
- continue
84
-
85
- if "name" not in metric:
86
- logger.error("Metric must contain 'name'")
87
- continue
88
-
89
- metric_name = metric["name"]
90
- if metric_name in self.local_metrics:
91
- count = sum(1 for m in self.local_metrics if m.startswith(metric_name))
92
- metric_name = f"{metric_name}_{count + 1}"
93
-
94
- prompt =None
95
- context = None
96
- response = None
97
- # if mapping is not None:
98
- # prompt = mapping['prompt']
99
- # context = mapping['context']
100
- # response = mapping['response']
101
- new_metric = {
102
- "name": metric_name,
103
- "model": model,
104
- "provider": provider,
105
- "project_id": self.project_id,
106
- # "prompt": prompt,
107
- # "context": context,
108
- # "response": response,
109
- "displayName": display_name,
110
- "mapping": mapping
111
- }
112
- self.local_metrics.append(new_metric)
113
-
114
- def add_gt(self, gt: Any):
115
- if not isinstance(gt, (str, int, float, bool, list, dict)):
116
- logger.error(f"Unsupported type for gt: {type(gt)}")
117
- return
118
- if self.gt:
119
- logger.warning(f"GT already exists: {self.gt} \n Overwriting...")
120
- self.gt = gt
121
- logger.debug(f"Added gt: {self.gt}")
122
-
123
- def add_context(self, context: Any):
124
- if isinstance(context, str):
125
- if not context.strip():
126
- logger.warning("Empty or whitespace-only context string provided")
127
- self.context = str(context)
128
- else:
129
- try:
130
- self.context = str(context)
131
- except Exception as e:
132
- logger.warning('Cannot cast the context to string... Skipping')
133
- logger.debug(f"Added context: {self.context}")
@@ -1,34 +0,0 @@
1
- # List of all supported LLM method calls
2
-
3
- supported_llm_calls = [
4
- # OpenAI
5
- "OpenAI.chat.completions.create()",
6
- "AsyncOpenAI.chat.completions.create()",
7
-
8
- # OpenAI Beta
9
- "OpenAI.beta.threads.create()",
10
- "OpenAI.beta.threads.messages.create()",
11
- "OpenAI.beta.threads.runs.create()",
12
-
13
- # Anthropic
14
- "Anthropic.messages.create()",
15
- "Anthropic.messages.acreate()",
16
-
17
- # Google VertexAI/PaLM
18
- "GenerativeModel.generate_content()",
19
- "GenerativeModel.generate_content_async()",
20
- "ChatVertexAI._generate()",
21
- "ChatVertexAI._agenerate()",
22
- "ChatVertexAI.complete()",
23
- "ChatVertexAI.acomplete()",
24
-
25
- # Google GenerativeAI
26
- "ChatGoogleGenerativeAI._generate()",
27
- "ChatGoogleGenerativeAI._agenerate()",
28
- "ChatGoogleGenerativeAI.complete()",
29
- "ChatGoogleGenerativeAI.acomplete()",
30
-
31
- # LiteLLM
32
- "litellm.completion()",
33
- "litellm.acompletion()"
34
- ]