ragaai-catalyst 2.0.5__py3-none-any.whl → 2.1b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,6 +6,7 @@ from .dataset import Dataset
6
6
  from .prompt_manager import PromptManager
7
7
  from .evaluation import Evaluation
8
8
  from .synthetic_data_generation import SyntheticDataGeneration
9
+ from .guardrails_manager import GuardrailsManager
9
10
 
10
11
 
11
- __all__ = ["Experiment", "RagaAICatalyst", "Tracer", "PromptManager", "Evaluation","SyntheticDataGeneration"]
12
+ __all__ = ["Experiment", "RagaAICatalyst", "Tracer", "PromptManager", "Evaluation","SyntheticDataGeneration", "GuardrailsManager"]
@@ -99,82 +99,71 @@ class Dataset:
99
99
  raise
100
100
 
101
101
  def get_schema_mapping(self):
102
- return ["traceid", "prompt", "context", "response", "expected_response", "expected_context", "timestamp", "metadata", "pipeline", "cost", "feedBack", "latency", "sanitized_response", "system_prompt", "traceUri"]
103
-
104
- def create_from_trace(self, dataset_name, filter_list):
105
- """
106
- Creates a new dataset with the given `dataset_name` and `filter_list`.
107
-
108
- Args:
109
- dataset_name (str): The name of the dataset to be created.
110
- filter_list (list): A list of filters to be applied to the dataset.
111
-
112
- Returns:
113
- str: A message indicating the success of the dataset creation and the name of the created dataset.
114
-
115
- Raises:
116
- None
117
-
118
- """
119
-
120
- def request_trace_creation():
121
- headers = {
122
- "Content-Type": "application/json",
123
- "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
124
- "X-Project-Name": self.project_name,
125
- }
126
- json_data = {
127
- "projectName": self.project_name,
128
- "subDatasetName": dataset_name,
129
- "filterList": filter_list,
130
- }
131
- try:
132
- response = requests.post(
133
- f"{Dataset.BASE_URL}/v1/llm/sub-dataset",
134
- headers=headers,
135
- json=json_data,
136
- timeout=Dataset.TIMEOUT,
137
- )
138
- response.raise_for_status()
139
- return response
140
- except requests.exceptions.RequestException as e:
141
- logger.error(f"Failed to create dataset from trace: {e}")
142
- raise
143
-
102
+ headers = {
103
+ "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
104
+ "X-Project-Name": self.project_name,
105
+ }
144
106
  try:
145
- response = request_trace_creation()
146
- response_checker(response, "Dataset.create_dataset")
147
- if response.status_code == 401:
148
- get_token() # Fetch a new token and set it in the environment
149
- response = request_trace_creation() # Retry the request
150
- if response.status_code != 200:
151
- return response.json()["message"]
152
- message = response.json()["message"]
153
- return f"{message} {dataset_name}"
154
- except Exception as e:
155
- logger.error(f"Error in create_from_trace: {e}")
107
+ response = requests.get(
108
+ f"{Dataset.BASE_URL}/v1/llm/schema-elements",
109
+ headers=headers,
110
+ timeout=Dataset.TIMEOUT,
111
+ )
112
+ response.raise_for_status()
113
+ response_data = response.json()["data"]["schemaElements"]
114
+ if not response.json()['success']:
115
+ raise ValueError('Unable to fetch Schema Elements for the CSV')
116
+ return response_data
117
+ except requests.exceptions.RequestException as e:
118
+ logger.error(f"Failed to get CSV schema: {e}")
156
119
  raise
157
120
 
158
121
  ###################### CSV Upload APIs ###################
159
122
 
160
- def get_csv_schema(self):
123
+ def get_dataset_columns(self, dataset_name):
124
+ list_dataset = self.list_datasets()
125
+ if dataset_name not in list_dataset:
126
+ raise ValueError(f"Dataset {dataset_name} does not exists. Please enter a valid dataset name")
127
+
161
128
  headers = {
162
129
  "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
163
130
  "X-Project-Name": self.project_name,
164
131
  }
132
+ headers = {
133
+ 'Content-Type': 'application/json',
134
+ "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
135
+ "X-Project-Id": str(self.project_id),
136
+ }
137
+ json_data = {"size": 12, "page": "0", "projectId": str(self.project_id), "search": ""}
138
+ try:
139
+ response = requests.post(
140
+ f"{Dataset.BASE_URL}/v2/llm/dataset",
141
+ headers=headers,
142
+ json=json_data,
143
+ timeout=Dataset.TIMEOUT,
144
+ )
145
+ response.raise_for_status()
146
+ datasets = response.json()["data"]["content"]
147
+ dataset_id = [dataset["id"] for dataset in datasets if dataset["name"]==dataset_name][0]
148
+ except requests.exceptions.RequestException as e:
149
+ logger.error(f"Failed to list datasets: {e}")
150
+ raise
151
+
165
152
  try:
166
153
  response = requests.get(
167
- f"{Dataset.BASE_URL}/v1/llm/schema-elements",
154
+ f"{Dataset.BASE_URL}/v2/llm/dataset/{dataset_id}?initialCols=0",
168
155
  headers=headers,
169
156
  timeout=Dataset.TIMEOUT,
170
157
  )
171
158
  response.raise_for_status()
172
- response_data = response.json()
173
- if not response_data['success']:
174
- raise ValueError('Unable to fetch Schema Elements for the CSV')
175
- return response_data
159
+ dataset_columns = response.json()["data"]["datasetColumnsResponses"]
160
+ dataset_columns = [item["displayName"] for item in dataset_columns]
161
+ dataset_columns = [data for data in dataset_columns if not data.startswith('_')]
162
+ if not response.json()['success']:
163
+ raise ValueError('Unable to fetch details of for the CSV')
164
+ return dataset_columns
176
165
  except requests.exceptions.RequestException as e:
177
- logger.error(f"Failed to get CSV schema: {e}")
166
+ logger.error(f"Failed to get CSV columns: {e}")
178
167
  raise
179
168
 
180
169
  def create_from_csv(self, csv_path, dataset_name, schema_mapping):
@@ -97,14 +97,45 @@ class Evaluation:
97
97
  logger.error(f"An unexpected error occurred: {e}")
98
98
  return []
99
99
 
100
- def _get_dataset_schema(self):
100
+ def _get_dataset_id_based_on_dataset_type(self, metric_to_evaluate):
101
+ try:
102
+ headers = {
103
+ 'Content-Type': 'application/json',
104
+ "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
105
+ "X-Project-Id": str(self.project_id),
106
+ }
107
+ json_data = {"size": 12, "page": "0", "projectId": str(self.project_id), "search": ""}
108
+ response = requests.post(
109
+ f"{self.base_url}/v2/llm/dataset",
110
+ headers=headers,
111
+ json=json_data,
112
+ timeout=self.timeout,
113
+ )
114
+
115
+ response.raise_for_status()
116
+ datasets_content = response.json()["data"]["content"]
117
+ dataset = [dataset for dataset in datasets_content if dataset["name"]==self.dataset_name][0]
118
+ if (dataset["datasetType"]=="prompt" and metric_to_evaluate=="prompt") or (dataset["datasetType"]=="chat" and metric_to_evaluate=="chat") or dataset["datasetType"]==None:
119
+ return dataset["id"]
120
+ else:
121
+ return dataset["derivedDatasetId"]
122
+ except requests.exceptions.RequestException as e:
123
+ logger.error(f"Failed to retrieve dataset list: {e}")
124
+ raise
125
+
126
+
127
+ def _get_dataset_schema(self, metric_to_evaluate=None):
128
+ #this dataset_id is based on which type of metric_to_evaluate
129
+ data_set_id=self._get_dataset_id_based_on_dataset_type(metric_to_evaluate)
130
+ self.dataset_id=data_set_id
131
+
101
132
  headers = {
102
133
  "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
103
134
  'Content-Type': 'application/json',
104
135
  'X-Project-Id': str(self.project_id),
105
136
  }
106
137
  data = {
107
- "datasetId": str(self.dataset_id),
138
+ "datasetId": str(data_set_id),
108
139
  "fields": [],
109
140
  "rowFilterList": []
110
141
  }
@@ -129,29 +160,9 @@ class Evaluation:
129
160
  logger.error(f"An unexpected error occurred: {e}")
130
161
  return {}
131
162
 
132
- def _get_variablename_from_dataset_schema(self, schemaName, metric_name):
133
- # pdb.set_trace()
134
- # print(schemaName)
135
- dataset_schema = self._get_dataset_schema()
136
- variableName = None
137
- for column in dataset_schema:
138
- columnName = column["columnType"]
139
- displayName = column["displayName"]
140
- # print(columnName, displayName)
141
- if "".join(columnName.split("_")).lower() == schemaName.lower():
142
- variableName = displayName
143
- break
144
- return variableName
145
- # print(variableName)
146
- # if variableName:
147
- # return variableName
148
- # else:
149
- # raise ValueError(f"'{schemaName}' column is required for {metric_name} metric evaluation, but not found in dataset")
150
-
151
-
152
- def _get_variablename_from_user_schema_mapping(self, schemaName, metric_name, schema_mapping):
153
- # pdb.set_trace()
154
- user_dataset_schema = self._get_dataset_schema()
163
+
164
+ def _get_variablename_from_user_schema_mapping(self, schemaName, metric_name, schema_mapping, metric_to_evaluate):
165
+ user_dataset_schema = self._get_dataset_schema(metric_to_evaluate)
155
166
  user_dataset_columns = [item["displayName"] for item in user_dataset_schema]
156
167
  variableName = None
157
168
  for key, val in schema_mapping.items():
@@ -159,7 +170,7 @@ class Evaluation:
159
170
  if key in user_dataset_columns:
160
171
  variableName=key
161
172
  else:
162
- raise ValueError(f"Column '{key}' is not present in {self.dataset_name}")
173
+ raise ValueError(f"Column '{key}' is not present in '{self.dataset_name}' dataset")
163
174
  if variableName:
164
175
  return variableName
165
176
  else:
@@ -172,10 +183,17 @@ class Evaluation:
172
183
  for schema in metrics_schema:
173
184
  if schema["name"]==metric_name:
174
185
  requiredFields = schema["config"]["requiredFields"]
186
+
187
+ #this is added to check if "Chat" column is required for metric evaluation
188
+ required_variables = [_["name"].lower() for _ in requiredFields]
189
+ if "chat" in required_variables:
190
+ metric_to_evaluate = "chat"
191
+ else:
192
+ metric_to_evaluate = "prompt"
193
+
175
194
  for field in requiredFields:
176
195
  schemaName = field["name"]
177
- # variableName = self._get_variablename_from_dataset_schema(schemaName, metric_name)
178
- variableName = self._get_variablename_from_user_schema_mapping(schemaName.lower(), metric_name, schema_mapping)
196
+ variableName = self._get_variablename_from_user_schema_mapping(schemaName.lower(), metric_name, schema_mapping, metric_to_evaluate)
179
197
  mapping.append({"schemaName": schemaName, "variableName": variableName})
180
198
  return mapping
181
199
 
@@ -223,7 +241,6 @@ class Evaluation:
223
241
  return []
224
242
 
225
243
  def _update_base_json(self, metrics):
226
- metric_schema_mapping = {"datasetId":self.dataset_id}
227
244
  metrics_schema_response = self._get_metrics_schema_response()
228
245
  sub_providers = ["openai","azure","gemini","groq"]
229
246
  metricParams = []
@@ -253,6 +270,7 @@ class Evaluation:
253
270
  mappings = self._get_mapping(metric["name"], metrics_schema_response, metric["schema_mapping"])
254
271
  base_json["metricSpec"]["config"]["mappings"] = mappings
255
272
  metricParams.append(base_json)
273
+ metric_schema_mapping = {"datasetId":self.dataset_id}
256
274
  metric_schema_mapping["metricParams"] = metricParams
257
275
  return metric_schema_mapping
258
276
 
@@ -0,0 +1,233 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from .ragaai_catalyst import RagaAICatalyst
5
+
6
+
7
+ class GuardrailsManager:
8
+ def __init__(self, project_name):
9
+ """
10
+ Initialize the GuardrailsManager with the given project name.
11
+
12
+ :param project_name: The name of the project to manage guardrails for.
13
+ """
14
+ self.project_name = project_name
15
+ self.timeout = 10
16
+ self.num_projects = 100
17
+ self.deployment_name = "NA"
18
+ self.deployment_id = "NA"
19
+ self.base_url = f"{RagaAICatalyst.BASE_URL}"
20
+ list_projects, project_name_with_id = self._get_project_list()
21
+ if project_name not in list_projects:
22
+ raise ValueError(f"Project '{self.project_name}' does not exists")
23
+
24
+ self.project_id = [_["id"] for _ in project_name_with_id if _["name"]==self.project_name][0]
25
+
26
+
27
+ def _get_project_list(self):
28
+ """
29
+ Retrieve the list of projects and their IDs from the API.
30
+
31
+ :return: A tuple containing a list of project names and a list of dictionaries with project IDs and names.
32
+ """
33
+ headers = {'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}'}
34
+ response = requests.request("GET", f"{self.base_url}/v2/llm/projects?size=12&page=0", headers=headers, timeout=self.timeout)
35
+ project_content = response.json()["data"]["content"]
36
+ list_project = [_["name"] for _ in project_content]
37
+ project_name_with_id = [{"id": _["id"], "name": _["name"]} for _ in project_content]
38
+ return list_project, project_name_with_id
39
+
40
+
41
+ def list_deployment_ids(self):
42
+ """
43
+ List all deployment IDs and their names for the current project.
44
+
45
+ :return: A list of dictionaries containing deployment IDs and names.
46
+ """
47
+ payload = {}
48
+ headers = {
49
+ 'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
50
+ 'X-Project-Id': str(self.project_id)
51
+ }
52
+ response = requests.request("GET", f"{self.base_url}/guardrail/deployment?size={self.num_projects}&page=0&sort=lastUsedAt,desc", headers=headers, data=payload, timeout=self.timeout)
53
+ deployment_ids_content = response.json()["data"]["content"]
54
+ deployment_ids_content = [{"id": _["id"], "name": _["name"]} for _ in deployment_ids_content]
55
+ return deployment_ids_content
56
+
57
+
58
+ def get_deployment(self, deployment_id):
59
+ """
60
+ Get details of a specific deployment ID, including its name and guardrails.
61
+
62
+ :param deployment_id: The ID of the deployment to retrieve details for.
63
+ :return: A dictionary containing the deployment name and a list of guardrails.
64
+ """
65
+ payload = {}
66
+ headers = {
67
+ 'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
68
+ 'X-Project-Id': str(self.project_id)
69
+ }
70
+ response = requests.request("GET", f"{self.base_url}/guardrail/deployment/{deployment_id}", headers=headers, data=payload, timeout=self.timeout)
71
+ deployment_id_name = response.json()["data"]["name"]
72
+ deployment_id_guardrails = response.json()["data"]["guardrailsResponse"]
73
+ guardrails_list_deployment_id = [{_["type"]:_["name"]} for _ in deployment_id_guardrails]
74
+ return {"deployment_name":deployment_id_name, "guardrails_list":guardrails_list_deployment_id}
75
+
76
+
77
+ def list_guardrails(self):
78
+ """
79
+ List all available guardrails for the current project.
80
+
81
+ :return: A list of guardrail names.
82
+ """
83
+ payload = {}
84
+ headers = {
85
+ 'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
86
+ 'X-Project-Id': str(self.project_id)
87
+ }
88
+ response = requests.request("GET", f"{self.base_url}/v1/llm/llm-metrics?category=Guardrail", headers=headers, data=payload, timeout=self.timeout)
89
+ list_guardrails_content = response.json()["data"]["metrics"]
90
+ list_guardrails = [_["name"] for _ in list_guardrails_content]
91
+ return list_guardrails
92
+
93
+
94
+ def list_fail_condition(self):
95
+ """
96
+ List all fail conditions for the current project's deployments.
97
+
98
+ :return: A list of fail conditions.
99
+ """
100
+ payload = {}
101
+ headers = {
102
+ 'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
103
+ 'X-Project-Id': str(self.project_id)
104
+ }
105
+ response = requests.request("GET", f"{self.base_url}/guardrail/deployment/configurations", headers=headers, data=payload, timeout=self.timeout)
106
+ return response.json()["data"]
107
+
108
+
109
+ def create_deployment(self, deployment_name):
110
+ """
111
+ Create a new deployment ID with the given name.
112
+
113
+ :param deployment_name: The name of the new deployment.
114
+ :raises ValueError: If a deployment with the given name already exists.
115
+ """
116
+ self.deployment_name = deployment_name
117
+ list_deployment_ids = self.list_deployment_ids()
118
+ list_deployment_names = [_["name"] for _ in list_deployment_ids]
119
+ if deployment_name in list_deployment_names:
120
+ raise ValueError(f"Deployment with '{deployment_name}' already exists, choose a unique name")
121
+
122
+ payload = json.dumps({"name": str(deployment_name)})
123
+ headers = {
124
+ 'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
125
+ 'Content-Type': 'application/json',
126
+ 'X-Project-Id': str(self.project_id)
127
+ }
128
+ response = requests.request("POST", f"{self.base_url}/guardrail/deployment", headers=headers, data=payload, timeout=self.timeout)
129
+ if response.status_code == 409:
130
+ raise ValueError(f"Data with '{deployment_name}' already exists, choose a unique name")
131
+ if response.json()["success"]:
132
+ print(response.json()["message"])
133
+ deployment_ids = self.list_deployment_ids()
134
+ self.deployment_id = [_["id"] for _ in deployment_ids if _["name"]==self.deployment_name][0]
135
+ else:
136
+ print(response)
137
+
138
+
139
+ def add_guardrails(self, guardrails, guardrails_config={}):
140
+ """
141
+ Add guardrails to the current deployment.
142
+
143
+ :param guardrails: A list of guardrails to add.
144
+ :param guardrails_config: Configuration settings for the guardrails.
145
+ :raises ValueError: If a guardrail name or type is invalid.
146
+ """
147
+ # Checking if guardrails names given already exist or not
148
+ _, guardrails_type_name_exists = self.get_deployment(self.deployment_id)
149
+ guardrails_type_name_exists = [list(d.values())[0] for d in guardrails_type_name_exists]
150
+ user_guardrails_name_list = [_["name"] for _ in guardrails]
151
+ for g_name in user_guardrails_name_list:
152
+ if g_name in guardrails_type_name_exists:
153
+ raise ValueError(f"Guardrail with '{g_name} already exists, choose a unique name'")
154
+
155
+ # Checking if guardrails type is correct or not
156
+ available_guardrails_list = self.list_guardrails()
157
+ user_guardrails_type_list = [_["type"] for _ in guardrails]
158
+ for g_type in user_guardrails_type_list:
159
+ if g_type not in available_guardrails_list:
160
+ raise ValueError(f"Guardrail type '{g_type} does not exists, choose a correct type'")
161
+
162
+ payload = self._get_guardrail_config_payload(guardrails_config)
163
+ payload["guardrails"] = self._get_guardrail_list_payload(guardrails)
164
+ payload = json.dumps(payload)
165
+ headers = {
166
+ 'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
167
+ 'Content-Type': 'application/json',
168
+ 'X-Project-Id': str(self.project_id)
169
+ }
170
+ response = requests.request("POST", f"{self.base_url}/guardrail/deployment/{str(self.deployment_id)}/configure", headers=headers, data=payload)
171
+ if response.json()["success"]:
172
+ print(response.json()["message"])
173
+
174
+ def _get_guardrail_config_payload(self, guardrails_config):
175
+ """
176
+ Construct the payload for guardrail configuration.
177
+
178
+ :param guardrails_config: Configuration settings for the guardrails.
179
+ :return: A dictionary representing the guardrail configuration payload.
180
+ """
181
+ data = {
182
+ "isActive": guardrails_config.get("isActive",False),
183
+ "guardrailFailConditions": guardrails_config.get("guardrailFailConditions",["FAIL"]),
184
+ "deploymentFailCondition": guardrails_config.get("deploymentFailCondition","ONE_FAIL"),
185
+ "failAction": {
186
+ "action": "ALTERNATE_RESPONSE",
187
+ "args": f'{{\"alternateResponse\": \"{guardrails_config.get("alternateResponse","This is the Alternate Response")}\"}}'
188
+ },
189
+ "guardrails" : []
190
+ }
191
+ return data
192
+
193
+ def _get_guardrail_list_payload(self, guardrails):
194
+ """
195
+ Construct the payload for a list of guardrails.
196
+
197
+ :param guardrails: A list of guardrails to include in the payload.
198
+ :return: A list of dictionaries representing each guardrail's data.
199
+ """
200
+ guardrails_list_payload = []
201
+ for guardrail in guardrails:
202
+ guardrails_list_payload.append(self._get_one_guardrail_data(guardrail))
203
+ return guardrails_list_payload
204
+
205
+ def _get_one_guardrail_data(self, guardrail):
206
+ """
207
+ Construct the data for a single guardrail.
208
+
209
+ :param guardrail: A dictionary containing the guardrail's attributes.
210
+ :return: A dictionary representing the guardrail's data.
211
+ """
212
+ data = {
213
+ "name": guardrail["name"],
214
+ "type": guardrail["type"],
215
+ "isHighRisk": guardrail.get("isHighRisk", False),
216
+ "isActive": guardrail.get("isActive", False),
217
+ "threshold": {}
218
+ }
219
+ if "lte" in guardrail["threshold"]:
220
+ data["threshold"]["lte"] = guardrail["threshold"]["lte"]
221
+ elif "gte" in guardrail["threshold"]:
222
+ data["threshold"]["gte"] = guardrail["threshold"]["gte"]
223
+ elif "eq" in guardrail["threshold"]:
224
+ data["threshold"]["eq"] = guardrail["threshold"]["eq"]
225
+ else:
226
+ data["threshold"]["gte"] = 0.0
227
+ return data
228
+
229
+
230
+ def _run(self, **kwargs):
231
+ """
232
+ Execute the guardrail checks with the provided variables.
233
+ """
@@ -0,0 +1,83 @@
1
+ import requests
2
+ import json
3
+ import subprocess
4
+ import logging
5
+ import traceback
6
+ import pandas as pd
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ def api_completion(messages, model_config, kwargs):
11
+ attempts = 0
12
+ while attempts < 3:
13
+
14
+ user_id = kwargs.get('user_id', '1')
15
+ internal_llm_proxy = kwargs.get('internal_llm_proxy', -1)
16
+
17
+
18
+ job_id = model_config.get('job_id',-1)
19
+ converted_message = convert_input(messages,model_config, user_id)
20
+ payload = json.dumps(converted_message)
21
+ headers = {
22
+ 'Content-Type': 'application/json',
23
+ # 'Wd-PCA-Feature-Key':f'your_feature_key, $(whoami)'
24
+ }
25
+ try:
26
+ response = requests.request("POST", internal_llm_proxy, headers=headers, data=payload)
27
+ if model_config.get('log_level','')=='debug':
28
+ logger.info(f'Model response Job ID {job_id} {response.text}')
29
+ if response.status_code!=200:
30
+ # logger.error(f'Error in model response Job ID {job_id}:',str(response.text))
31
+ raise ValueError(str(response.text))
32
+
33
+ if response.status_code==200:
34
+ response = response.json()
35
+ if "error" in response:
36
+ raise ValueError(response["error"]["message"])
37
+ else:
38
+ result= response["choices"][0]["message"]["content"]
39
+ response1 = result.replace('\n', '')
40
+ try:
41
+ json_data = json.loads(response1)
42
+ df = pd.DataFrame(json_data)
43
+ return(df)
44
+ except json.JSONDecodeError:
45
+ attempts += 1 # Increment attempts if JSON parsing fails
46
+ if attempts == 3:
47
+ raise Exception("Failed to generate a valid response after multiple attempts.")
48
+
49
+ except Exception as e:
50
+ raise ValueError(f"{e}")
51
+
52
+
53
+ def get_username():
54
+ result = subprocess.run(['whoami'], capture_output=True, text=True)
55
+ result = result.stdout
56
+ return result
57
+
58
+
59
+ def convert_input(messages, model_config, user_id):
60
+ doc_input = {
61
+ "model": model_config.get('model'),
62
+ **model_config,
63
+ "messages": messages,
64
+ "user_id": user_id
65
+ }
66
+ return doc_input
67
+
68
+
69
+ if __name__=='__main__':
70
+ messages = [
71
+ {
72
+ "role": "system",
73
+ "content": "you are a poet well versed in shakespeare literature"
74
+ },
75
+ {
76
+ "role": "user",
77
+ "content": "write a poem on pirates and penguins"
78
+ }
79
+ ]
80
+ kwargs = {"internal_llm_proxy": "http://13.200.11.66:4000/chat/completions", "user_id": 1}
81
+ model_config = {"model": "workday_gateway", "provider":"openai", "max_tokens": 10}
82
+ answer = api_completion(messages, model_config, kwargs)
83
+ print(answer)
@@ -23,7 +23,7 @@ def api_completion(model,messages, api_base='http://127.0.0.1:8000',
23
23
  if model_config.get('log_level','')=='debug':
24
24
  logger.info(f'Model response Job ID {job_id} {response.text}')
25
25
  if response.status_code!=200:
26
- logger.error(f'Error in model response Job ID {job_id}:',str(response.text))
26
+ # logger.error(f'Error in model response Job ID {job_id}:',str(response.text))
27
27
  raise ValueError(str(response.text))
28
28
  except Exception as e:
29
29
  logger.error(f'Error in calling api Job ID {job_id}:',str(e))