rag-sentinel 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rag_sentinel/__init__.py CHANGED
@@ -20,5 +20,5 @@ Author: RAGSentinel Team
20
20
  License: MIT
21
21
  """
22
22
 
23
- __version__ = "0.1.0"
23
+ __version__ = "0.1.2"
24
24
 
rag_sentinel/cli.py CHANGED
@@ -20,7 +20,9 @@ import socket
20
20
  import subprocess
21
21
  import time
22
22
  import argparse
23
+ import configparser
23
24
  from pathlib import Path
25
+ from urllib.parse import urlparse
24
26
 
25
27
 
26
28
  # =============================================================================
@@ -34,6 +36,39 @@ TEMPLATES_DIR = Path(__file__).parent / "templates"
34
36
  # Helper Functions
35
37
  # =============================================================================
36
38
 
39
+ def get_mlflow_host_port():
40
+ """
41
+ Read MLflow tracking_uri from config.ini and parse host and port.
42
+
43
+ Returns:
44
+ tuple: (host, port) - defaults to ("127.0.0.1", 5001) if not configured
45
+ """
46
+ default_host = "127.0.0.1"
47
+ default_port = 5000
48
+
49
+ config_path = Path("config.ini")
50
+ if not config_path.exists():
51
+ return default_host, default_port
52
+
53
+ try:
54
+ ini = configparser.ConfigParser()
55
+ ini.read(config_path)
56
+
57
+ tracking_uri = ini.get("mlflow", "tracking_uri", fallback=None)
58
+ if not tracking_uri:
59
+ return default_host, default_port
60
+
61
+ # Parse the URI (e.g., "http://192.168.1.100:5000")
62
+ parsed = urlparse(tracking_uri)
63
+
64
+ host = parsed.hostname or default_host
65
+ port = parsed.port or default_port
66
+
67
+ return host, port
68
+ except Exception:
69
+ return default_host, default_port
70
+
71
+
37
72
  def is_port_in_use(host, port):
38
73
  """
39
74
  Check if a port is already in use.
@@ -49,7 +84,7 @@ def is_port_in_use(host, port):
49
84
  return s.connect_ex((host, port)) == 0
50
85
 
51
86
 
52
- def start_mlflow_server(host="127.0.0.1", port=5001):
87
+ def start_mlflow_server(host, port):
53
88
  """
54
89
  Start MLflow tracking server as a background process.
55
90
 
@@ -57,8 +92,8 @@ def start_mlflow_server(host="127.0.0.1", port=5001):
57
92
  will skip starting a new instance.
58
93
 
59
94
  Args:
60
- host: The hostname to bind the server to (default: "127.0.0.1")
61
- port: The port number for the server (default: 5001)
95
+ host: The hostname to bind the server to
96
+ port: The port number for the server
62
97
 
63
98
  Returns:
64
99
  subprocess.Popen or None: The server process, or None if already running
@@ -144,8 +179,9 @@ def cmd_run(args):
144
179
 
145
180
  This command:
146
181
  1. Validates that all required config files exist
147
- 2. Starts MLflow server (unless --no-server is specified)
148
- 3. Runs the evaluation using the evaluator module
182
+ 2. Reads MLflow host/port from config.ini
183
+ 3. Starts MLflow server (unless --no-server is specified)
184
+ 4. Runs the evaluation using the evaluator module
149
185
 
150
186
  Args:
151
187
  args: Parsed command-line arguments (includes --no-server flag)
@@ -161,9 +197,10 @@ def cmd_run(args):
161
197
  print("\nRun 'rag-sentinel init' first to create config files.")
162
198
  sys.exit(1)
163
199
 
164
- # Start MLflow server if not disabled
200
+ # Start MLflow server if not disabled (uses host/port from config.ini)
165
201
  if not args.no_server:
166
- start_mlflow_server()
202
+ host, port = get_mlflow_host_port()
203
+ start_mlflow_server(host, port)
167
204
 
168
205
  # Import and run the evaluation
169
206
  from rag_sentinel.evaluator import run_evaluation
rag_sentinel/evaluator.py CHANGED
@@ -15,8 +15,16 @@ import pandas as pd
15
15
  import mlflow
16
16
  from dotenv import load_dotenv
17
17
  from datasets import Dataset
18
- from ragas import evaluate, RunConfig
19
- from ragas.metrics import faithfulness, answer_relevancy, context_precision, answer_correctness
18
+ from ragas import evaluate
19
+ from ragas.run_config import RunConfig
20
+ from ragas.metrics import (
21
+ Faithfulness,
22
+ AnswerRelevancy,
23
+ ContextPrecision,
24
+ AnswerCorrectness,
25
+ )
26
+ from langchain_openai import ChatOpenAI, OpenAIEmbeddings, AzureChatOpenAI, AzureOpenAIEmbeddings
27
+ from langchain_ollama import ChatOllama, OllamaEmbeddings
20
28
 
21
29
 
22
30
  # =============================================================================
@@ -24,236 +32,196 @@ from ragas.metrics import faithfulness, answer_relevancy, context_precision, ans
24
32
  # =============================================================================
25
33
 
26
34
 
27
- def resolve_placeholder(value, env_vars, ini_config):
35
+ def load_config(yaml_file='rag_eval_config.yaml'):
28
36
  """
29
- Resolve ${ENV:...} and ${INI:...} placeholders in a string value.
30
-
31
- Args:
32
- value: String that may contain placeholders
33
- env_vars: Dictionary of environment variables
34
- ini_config: ConfigParser object with ini file contents
35
-
36
- Returns:
37
- str: Value with all placeholders resolved
38
- """
39
- if not isinstance(value, str):
40
- return value
41
-
42
- # Resolve ${ENV:VAR_NAME} - reads from environment variables
43
- env_pattern = r'\$\{ENV:([^}]+)\}'
44
- def env_replacer(match):
45
- var_name = match.group(1)
46
- return env_vars.get(var_name, '')
47
- value = re.sub(env_pattern, env_replacer, value)
48
-
49
- # Resolve ${INI:section.key} - reads from config.ini
50
- ini_pattern = r'\$\{INI:([^}]+)\}'
51
- def ini_replacer(match):
52
- path = match.group(1)
53
- parts = path.split('.')
54
- if len(parts) == 2:
55
- section, key = parts
56
- if ini_config.has_option(section, key):
57
- return ini_config.get(section, key)
58
- return ''
59
- value = re.sub(ini_pattern, ini_replacer, value)
60
-
61
- return value
62
-
63
-
64
- def resolve_config(obj, env_vars, ini_config):
65
- """
66
- Recursively resolve all placeholders in a configuration object.
67
-
68
- Args:
69
- obj: Configuration object (dict, list, or str)
70
- env_vars: Dictionary of environment variables
71
- ini_config: ConfigParser object
72
-
73
- Returns:
74
- Configuration object with all placeholders resolved
75
- """
76
- if isinstance(obj, dict):
77
- return {k: resolve_config(v, env_vars, ini_config) for k, v in obj.items()}
78
- elif isinstance(obj, list):
79
- return [resolve_config(item, env_vars, ini_config) for item in obj]
80
- elif isinstance(obj, str):
81
- return resolve_placeholder(obj, env_vars, ini_config)
82
- return obj
83
-
84
-
85
- def load_config():
86
- """
87
- Load and merge configuration from .env, config.ini, and rag_eval_config.yaml.
37
+ Load configuration from YAML file with values resolved from .env and config.ini.
88
38
 
89
39
  Returns:
90
40
  dict: Fully resolved configuration dictionary
91
41
  """
92
- # Load environment variables from .env file
93
42
  load_dotenv('.env')
94
- env_vars = dict(os.environ)
95
-
96
- # Load INI configuration
97
- ini_config = configparser.ConfigParser()
98
- ini_config.read('config.ini')
99
43
 
100
- # Load YAML configuration and resolve all placeholders
101
- with open('rag_eval_config.yaml', 'r') as f:
102
- yaml_config = yaml.safe_load(f)
103
-
104
- return resolve_config(yaml_config, env_vars, ini_config)
44
+ ini = configparser.ConfigParser()
45
+ ini.read('config.ini')
46
+
47
+ def resolve(obj):
48
+ if isinstance(obj, dict):
49
+ return {k: resolve(v) for k, v in obj.items()}
50
+ if isinstance(obj, list):
51
+ return [resolve(i) for i in obj]
52
+ if isinstance(obj, str):
53
+ # Resolve ${ENV:VAR} and ${INI:section.key} placeholders
54
+ result = re.sub(r'\$\{ENV:([^}]+)\}', lambda m: os.getenv(m.group(1), ''), obj)
55
+ result = re.sub(r'\$\{INI:([^.]+)\.([^}]+)\}',
56
+ lambda m: ini.get(m.group(1), m.group(2), fallback=''), result)
57
+ # Convert types
58
+ if result.lower() == 'true': return True
59
+ if result.lower() == 'false': return False
60
+ try:
61
+ if '.' in result: return float(result)
62
+ except ValueError:
63
+ pass
64
+ return result
65
+ return obj
66
+
67
+ with open(yaml_file, 'r') as f:
68
+ return resolve(yaml.safe_load(f))
105
69
 
106
70
 
107
71
  def get_llm(config):
108
- """Initialize LLM based on provider."""
109
- llm_config = config['ragas']['llm']
110
- provider = llm_config['provider'].lower()
72
+ """Initialize LLM based on config."""
73
+ provider = config['ragas']['llm']['provider']
111
74
 
112
- if provider == 'azure':
113
- from langchain_openai import AzureChatOpenAI
75
+ if provider == "azure":
76
+ azure_config = config['ragas']['llm']['azure']
114
77
  return AzureChatOpenAI(
115
- azure_endpoint=llm_config['azure_endpoint'],
116
- api_key=llm_config['api_key'],
117
- api_version=llm_config.get('api_version', '2024-02-15-preview'),
118
- deployment_name=llm_config['model'],
119
- temperature=float(llm_config.get('temperature', 0.0))
78
+ azure_endpoint=azure_config['endpoint'],
79
+ api_key=azure_config['api_key'],
80
+ deployment_name=azure_config['deployment_name'],
81
+ model=azure_config['model'],
82
+ temperature=azure_config['temperature'],
83
+ api_version=azure_config['api_version']
120
84
  )
121
- elif provider == 'openai':
122
- from langchain_openai import ChatOpenAI
85
+ elif provider == "openai":
86
+ openai_config = config['ragas']['llm']['openai']
123
87
  return ChatOpenAI(
124
- api_key=llm_config['api_key'],
125
- model=llm_config['model'],
126
- temperature=float(llm_config.get('temperature', 0.0))
88
+ model=openai_config['model'],
89
+ temperature=openai_config['temperature'],
90
+ api_key=openai_config['api_key']
127
91
  )
128
- elif provider == 'ollama':
129
- from langchain_ollama import ChatOllama
92
+ elif provider == "ollama":
93
+ ollama_config = config['ragas']['llm']['ollama']
130
94
  return ChatOllama(
131
- base_url=llm_config.get('base_url', 'http://localhost:11434'),
132
- model=llm_config['model'],
133
- temperature=float(llm_config.get('temperature', 0.0))
95
+ base_url=ollama_config['base_url'],
96
+ model=ollama_config['model'],
97
+ temperature=ollama_config['temperature']
134
98
  )
135
99
  else:
136
- raise ValueError(f"Unknown LLM provider: {provider}")
100
+ raise ValueError(f"Unsupported LLM provider: {provider}")
137
101
 
138
102
 
139
103
  def get_embeddings(config):
140
- """Initialize embeddings based on provider."""
141
- emb_config = config['ragas']['embeddings']
142
- provider = emb_config['provider'].lower()
104
+ """Initialize embeddings based on config."""
105
+ provider = config['ragas']['embeddings']['provider']
143
106
 
144
- if provider == 'azure':
145
- from langchain_openai import AzureOpenAIEmbeddings
107
+ if provider == "azure":
108
+ azure_config = config['ragas']['embeddings']['azure']
146
109
  return AzureOpenAIEmbeddings(
147
- azure_endpoint=emb_config['azure_endpoint'],
148
- api_key=emb_config['api_key'],
149
- api_version=emb_config.get('api_version', '2024-02-15-preview'),
150
- deployment=emb_config['model']
110
+ azure_endpoint=azure_config['endpoint'],
111
+ api_key=azure_config['api_key'],
112
+ deployment=azure_config['deployment_name'],
113
+ api_version=azure_config['api_version']
151
114
  )
152
- elif provider == 'openai':
153
- from langchain_openai import OpenAIEmbeddings
115
+ elif provider == "openai":
116
+ openai_config = config['ragas']['embeddings']['openai']
154
117
  return OpenAIEmbeddings(
155
- api_key=emb_config['api_key'],
156
- model=emb_config['model']
118
+ model=openai_config['model'],
119
+ api_key=openai_config['api_key']
157
120
  )
158
- elif provider == 'ollama':
159
- from langchain_ollama import OllamaEmbeddings
121
+ elif provider == "ollama":
122
+ ollama_config = config['ragas']['embeddings']['ollama']
160
123
  return OllamaEmbeddings(
161
- base_url=emb_config.get('base_url', 'http://localhost:11434'),
162
- model=emb_config['model']
124
+ base_url=ollama_config['base_url'],
125
+ model=ollama_config['model']
163
126
  )
164
127
  else:
165
- raise ValueError(f"Unknown embeddings provider: {provider}")
128
+ raise ValueError(f"Unsupported embeddings provider: {provider}")
166
129
 
167
130
 
168
131
  def get_metrics(config):
169
- """Get list of Ragas metrics."""
132
+ """Get Ragas metrics based on config."""
170
133
  metric_map = {
171
- 'faithfulness': faithfulness,
172
- 'answer_relevancy': answer_relevancy,
173
- 'context_precision': context_precision,
174
- 'answer_correctness': answer_correctness
134
+ "Faithfulness": Faithfulness(),
135
+ "AnswerRelevancy": AnswerRelevancy(),
136
+ "ContextPrecision": ContextPrecision(),
137
+ "AnswerCorrectness": AnswerCorrectness(),
175
138
  }
176
- return [metric_map[m] for m in config['ragas']['metrics'] if m in metric_map]
177
-
178
139
 
179
- def get_auth_headers_and_cookies(config):
180
- """
181
- Get authentication headers and cookies from backend config.
140
+ metric_names = config['ragas']['metrics']
141
+ return [metric_map[name] for name in metric_names if name in metric_map]
182
142
 
183
- Supports three authentication types:
184
- - cookie: Session cookie authentication
185
- - bearer: Bearer token authentication
186
- - header: Custom header authentication
187
143
 
188
- Args:
189
- config: Full configuration dictionary
144
+ def get_auth_headers_and_cookies(config):
145
+ """Get authentication headers and cookies based on config."""
146
+ auth_config = config['backend']['auth']
147
+ auth_type = auth_config.get('type', 'none')
190
148
 
191
- Returns:
192
- tuple: (headers dict, cookies dict)
193
- """
194
- # Auth config is nested under backend.auth in the YAML
195
- auth_config = config.get('backend', {}).get('auth', {})
196
- auth_type = auth_config.get('type', 'none').lower()
197
149
  headers = {}
198
150
  cookies = {}
199
151
 
200
- if auth_type == 'cookie':
201
- cookie_name = auth_config.get('cookie_name', 'session')
202
- cookie_value = auth_config.get('cookie_value', '')
203
- if cookie_value:
204
- cookies[cookie_name] = cookie_value
205
- elif auth_type == 'bearer':
206
- token = auth_config.get('bearer_token', '')
207
- if token:
208
- headers['Authorization'] = f'Bearer {token}'
209
- elif auth_type == 'header':
210
- header_name = auth_config.get('header_name', '')
211
- header_value = auth_config.get('header_value', '')
212
- if header_name and header_value:
213
- headers[header_name] = header_value
152
+ if auth_type == "cookie":
153
+ cookies[auth_config['cookie_name']] = auth_config['cookie_value']
154
+ elif auth_type == "bearer":
155
+ headers['Authorization'] = f"Bearer {auth_config['bearer_token']}"
156
+ elif auth_type == "header":
157
+ headers[auth_config['header_name']] = auth_config['header_value']
214
158
 
215
159
  return headers, cookies
216
160
 
217
161
 
218
162
  def extract_response_data(response, endpoint_config):
219
163
  """Extract data from API response."""
220
- data = response.json()
221
- response_path = endpoint_config.get('response_path', '')
164
+ if endpoint_config.get('stream', False):
165
+ return "".join(chunk.decode() for chunk in response.iter_content(chunk_size=None))
222
166
 
223
- if response_path:
224
- for key in response_path.split('.'):
225
- if isinstance(data, dict) and key in data:
226
- data = data[key]
227
- elif isinstance(data, list) and key.isdigit():
228
- data = data[int(key)]
229
- else:
230
- return data
231
- return data
167
+ # Try to parse as JSON first
168
+ try:
169
+ data = response.json()
170
+ response_key = endpoint_config.get('response_key')
171
+ if response_key:
172
+ return data.get(response_key)
173
+ return data
174
+ except:
175
+ # If JSON parsing fails, return as plain text
176
+ return response.text
232
177
 
233
178
 
234
179
  def make_api_request(base_url, endpoint_config, query, chat_id, auth_headers, auth_cookies, verify_ssl=True):
235
180
  """Make API request to backend."""
236
- url = base_url.rstrip('/') + endpoint_config['path']
237
- method = endpoint_config.get('method', 'POST').upper()
238
-
239
- body = endpoint_config.get('body', {}).copy()
240
- body['query'] = query
241
- body['chat_id'] = chat_id
242
-
243
- headers = {'Content-Type': 'application/json'}
244
- headers.update(auth_headers)
245
-
246
- if method == 'POST':
247
- response = requests.post(url, json=body, headers=headers, cookies=auth_cookies, verify=verify_ssl)
181
+ url = base_url + endpoint_config['path']
182
+ method = endpoint_config.get('method', 'POST')
183
+
184
+ headers = {**endpoint_config.get('headers', {}), **auth_headers}
185
+
186
+ # Flexible body preparation
187
+ body = {}
188
+ for key, value in endpoint_config.get('body', {}).items():
189
+ if isinstance(value, str) and ("{query}" in value or "{chat_id}" in value):
190
+ body[key] = value.format(query=query, chat_id=chat_id)
191
+ elif key == "chat_id":
192
+ try:
193
+ body[key] = int(chat_id)
194
+ except (ValueError, TypeError):
195
+ body[key] = chat_id
196
+ else:
197
+ body[key] = value
198
+
199
+ if method.upper() == 'POST':
200
+ resp = requests.post(
201
+ url,
202
+ json=body,
203
+ headers=headers,
204
+ cookies=auth_cookies,
205
+ stream=endpoint_config.get('stream', False),
206
+ verify=verify_ssl
207
+ )
208
+ elif method.upper() == 'GET':
209
+ resp = requests.get(
210
+ url,
211
+ params=body,
212
+ headers=headers,
213
+ cookies=auth_cookies,
214
+ verify=verify_ssl
215
+ )
248
216
  else:
249
- response = requests.get(url, params=body, headers=headers, cookies=auth_cookies, verify=verify_ssl)
217
+ raise ValueError(f"Unsupported HTTP method: {method}")
250
218
 
251
- response.raise_for_status()
252
- return response
219
+ resp.raise_for_status()
220
+ return resp
253
221
 
254
222
 
255
223
  def get_context(config, query, chat_id, auth_headers, auth_cookies):
256
- """Get context from backend API."""
224
+ """Retrieve context from backend API."""
257
225
  base_url = config['backend']['base_url']
258
226
  endpoint_config = config['backend']['endpoints']['context']
259
227
  verify_ssl = config['backend'].get('verify_ssl', True)
@@ -261,9 +229,12 @@ def get_context(config, query, chat_id, auth_headers, auth_cookies):
261
229
  response = make_api_request(base_url, endpoint_config, query, chat_id, auth_headers, auth_cookies, verify_ssl)
262
230
  context = extract_response_data(response, endpoint_config)
263
231
 
264
- if isinstance(context, list):
265
- return [str(c) for c in context]
266
- return [str(context)]
232
+ if isinstance(context, str):
233
+ return [context]
234
+ elif isinstance(context, list):
235
+ return context
236
+ else:
237
+ return [str(context)]
267
238
 
268
239
 
269
240
  def get_answer(config, query, chat_id, auth_headers, auth_cookies):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rag-sentinel
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: RAG Evaluation Framework using Ragas metrics and MLflow tracking
5
5
  Author: RAGSentinel Team
6
6
  License: MIT
@@ -0,0 +1,12 @@
1
+ rag_sentinel/__init__.py,sha256=KS1u5vgA5NxVHPFebebP2cTgt26nRKlC8dfGPy_Y2kM,672
2
+ rag_sentinel/cli.py,sha256=ed9j_vetSDIuqkr3VjqFMBoRetRwkXFAX3sK3u-UodA,9525
3
+ rag_sentinel/evaluator.py,sha256=6HcVv5quf0Hnxz3UkauuaODFt4xAH9Bo61dBZCxGQSw,12296
4
+ rag_sentinel/templates/.env.template,sha256=FabB1i4pUkU8gdNLRt2D8mgltY4AudClq8rx6vS33xc,1120
5
+ rag_sentinel/templates/config.ini.template,sha256=OeW21j4LXxXnFCPVvOZhdZOq1id0BQLgwS5ruXSrXBQ,1016
6
+ rag_sentinel/templates/rag_eval_config.yaml,sha256=zRPMOngALsbhgQbkKeNvXc8VVxzDJrASpSIpGTpVKlk,3080
7
+ rag_sentinel-0.1.4.dist-info/licenses/LICENSE,sha256=0bRNV4OZXZGaeA4PLR0CZKk1peLyIw977fV9K5jAGws,1074
8
+ rag_sentinel-0.1.4.dist-info/METADATA,sha256=73Q2_ultXj4_DC0203Hrb8_gZsLB2y5c7U9rVqnn_0k,2716
9
+ rag_sentinel-0.1.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
10
+ rag_sentinel-0.1.4.dist-info/entry_points.txt,sha256=ZBQp5JLnfMLjgLX3UdzX2rainIgvqkq36Wtf_VLa9ak,55
11
+ rag_sentinel-0.1.4.dist-info/top_level.txt,sha256=qk1BCc3wrLshA3-jf0Jb4bFlZF-ARIjZfYaqAW1Aq0E,13
12
+ rag_sentinel-0.1.4.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- rag_sentinel/__init__.py,sha256=jrVnGWJh60e1fucEP8DuQtFaXQcls8Rsw9tuUfKI87Y,672
2
- rag_sentinel/cli.py,sha256=EacFBn_7TBsP1CPw9MmNUyGcAG-MO7c1UDz2v3wPkz4,8499
3
- rag_sentinel/evaluator.py,sha256=_5fMiUeQVK93C87svEH8WI5MPcH4nfA7FQo8xeh5dTE,13321
4
- rag_sentinel/templates/.env.template,sha256=FabB1i4pUkU8gdNLRt2D8mgltY4AudClq8rx6vS33xc,1120
5
- rag_sentinel/templates/config.ini.template,sha256=OeW21j4LXxXnFCPVvOZhdZOq1id0BQLgwS5ruXSrXBQ,1016
6
- rag_sentinel/templates/rag_eval_config.yaml,sha256=zRPMOngALsbhgQbkKeNvXc8VVxzDJrASpSIpGTpVKlk,3080
7
- rag_sentinel-0.1.2.dist-info/licenses/LICENSE,sha256=0bRNV4OZXZGaeA4PLR0CZKk1peLyIw977fV9K5jAGws,1074
8
- rag_sentinel-0.1.2.dist-info/METADATA,sha256=QQdGA5cyjoSVKpTbO3-qbQWii7aCO28g8JaEDO4k36s,2716
9
- rag_sentinel-0.1.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
10
- rag_sentinel-0.1.2.dist-info/entry_points.txt,sha256=ZBQp5JLnfMLjgLX3UdzX2rainIgvqkq36Wtf_VLa9ak,55
11
- rag_sentinel-0.1.2.dist-info/top_level.txt,sha256=qk1BCc3wrLshA3-jf0Jb4bFlZF-ARIjZfYaqAW1Aq0E,13
12
- rag_sentinel-0.1.2.dist-info/RECORD,,