datamule 1.2.2__py3-none-any.whl → 1.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. datamule/__init__.py +1 -0
  2. datamule/document/document.py +27 -14
  3. datamule/document/mappings/atsn.py +208 -0
  4. datamule/document/mappings/cfportal.py +346 -0
  5. datamule/document/mappings/d.py +125 -0
  6. datamule/document/mappings/ex102_abs.py +63 -0
  7. datamule/document/mappings/ex99a_sdr.py +1 -0
  8. datamule/document/mappings/ex99c_sdr.py +0 -0
  9. datamule/document/mappings/ex99g_sdr.py +0 -0
  10. datamule/document/mappings/ex99i_sdr.py +0 -0
  11. datamule/document/mappings/information_table.py +35 -0
  12. datamule/document/mappings/nmfp.py +275 -0
  13. datamule/document/mappings/npx.py +85 -0
  14. datamule/document/mappings/onefourtyfour.py +68 -0
  15. datamule/document/mappings/ownership.py +163 -0
  16. datamule/document/mappings/proxy_voting_record.py +17 -0
  17. datamule/document/mappings/sbs.py +0 -0
  18. datamule/document/mappings/sbsef.py +13 -0
  19. datamule/document/mappings/schedule13.py +117 -0
  20. datamule/document/mappings/sdr.py +63 -0
  21. datamule/document/mappings/submission_metadata.py +9 -0
  22. datamule/document/mappings/ta.py +0 -0
  23. datamule/document/mappings/thirteenfhr.py +72 -0
  24. datamule/document/mappings/twentyfivense.py +22 -0
  25. datamule/document/mappings/twentyfourf2nt.py +100 -0
  26. datamule/document/processing.py +170 -42
  27. datamule/document/table.py +60 -5
  28. datamule/helper.py +10 -1
  29. datamule/index.py +8 -10
  30. datamule/portfolio.py +17 -16
  31. datamule/sec/submissions/monitor.py +173 -120
  32. datamule/sec/submissions/textsearch.py +0 -4
  33. datamule/sec/xbrl/streamcompanyfacts.py +1 -1
  34. datamule/seclibrary/downloader.py +2 -2
  35. datamule/submission.py +92 -36
  36. {datamule-1.2.2.dist-info → datamule-1.2.9.dist-info}/METADATA +1 -2
  37. datamule-1.2.9.dist-info/RECORD +62 -0
  38. datamule/sec/rss/monitor.py +0 -416
  39. datamule-1.2.2.dist-info/RECORD +0 -40
  40. /datamule/{sec/rss → document/mappings}/__init__.py +0 -0
  41. {datamule-1.2.2.dist-info → datamule-1.2.9.dist-info}/WHEEL +0 -0
  42. {datamule-1.2.2.dist-info → datamule-1.2.9.dist-info}/top_level.txt +0 -0
@@ -1,416 +0,0 @@
1
- import asyncio
2
- import time
3
- from collections import deque
4
- import aiohttp
5
- from lxml import etree
6
- import re
7
- from tqdm.auto import tqdm
8
- from ..utils import RetryException, PreciseRateLimiter, RateMonitor, headers
9
-
10
- async def start_monitor(data_callback=None, poll_callback=None, submission_type=None, cik=None,
11
- polling_interval=200, requests_per_second=2.0, quiet=True,
12
- known_accession_numbers=None, skip_initial_accession_numbers=None):
13
- """
14
- Main monitoring loop for SEC filings.
15
-
16
- Parameters:
17
- data_callback (callable): Async function to call when new filings are found.
18
- poll_callback (callable): Async function to call during polling wait periods.
19
- submission_type (str or list): Form type(s) to monitor (e.g., "8-K", "10-Q").
20
- cik (str or list): CIK(s) to monitor.
21
- polling_interval (int): Polling interval in milliseconds.
22
- requests_per_second (float): Maximum requests per second.
23
- quiet (bool): Suppress verbose output.
24
- known_accession_numbers (list): List of accession numbers to track for ongoing monitoring.
25
- skip_initial_accession_numbers (set): Set of accession numbers to skip during initialization
26
- (these were already processed by EFTS).
27
- """
28
- # Initialize rate limiter
29
- rate_limiter = PreciseRateLimiter(requests_per_second)
30
- rate_monitor = RateMonitor()
31
-
32
- # Initialize tracking set for known accession numbers with a reasonable size
33
- active_accession_numbers = deque(maxlen=20000)
34
- if known_accession_numbers:
35
- active_accession_numbers.extend(known_accession_numbers)
36
-
37
- # Convert skip_initial_accession_numbers to a set if it's not already
38
- if skip_initial_accession_numbers is not None and not isinstance(skip_initial_accession_numbers, set):
39
- skip_initial_accession_numbers = set(skip_initial_accession_numbers)
40
-
41
- # Convert submission_type to list if it's a string
42
- if submission_type and isinstance(submission_type, str):
43
- submission_type = [submission_type]
44
-
45
- # Convert CIK to list if it's a string
46
- if cik and isinstance(cik, str):
47
- cik = [cik]
48
-
49
- # Set up base URL parameters
50
- url_params = {
51
- 'action': 'getcurrent',
52
- 'owner': 'include',
53
- 'count': 100,
54
- 'output': 'atom'
55
- }
56
-
57
- if submission_type:
58
- url_params['type'] = ','.join(submission_type)
59
- if cik:
60
- url_params['CIK'] = ','.join(cik)
61
-
62
- # Store first page accession numbers for quick polling
63
- first_page_accession_numbers = set()
64
-
65
- # Initialize by loading a batch of the latest filings
66
- await initialize_known_filings(
67
- url_params,
68
- active_accession_numbers,
69
- rate_limiter,
70
- rate_monitor,
71
- quiet,
72
- data_callback,
73
- skip_initial_accession_numbers
74
- )
75
-
76
- # Main polling loop
77
- while True:
78
- try:
79
- # Poll for new filings on the first page
80
- new_filings = await poll_for_new_filings(
81
- url_params,
82
- first_page_accession_numbers,
83
- rate_limiter,
84
- rate_monitor,
85
- quiet
86
- )
87
-
88
- if new_filings:
89
- # If there are new filings, check if we need to fetch more comprehensive data
90
- if len(new_filings) >= 100: # If the entire first page is new
91
- new_filings = await fetch_comprehensive_filings(
92
- url_params,
93
- set(active_accession_numbers), # Convert to set for faster lookups
94
- rate_limiter,
95
- rate_monitor,
96
- quiet
97
- )
98
-
99
- # Process new filings and call the data callback
100
- if new_filings and data_callback:
101
- processed_filings = process_filings(new_filings)
102
-
103
- # Filter out filings we're already tracking
104
- new_processed_filings = [
105
- filing for filing in processed_filings
106
- if filing['accession_number'] not in active_accession_numbers
107
- ]
108
-
109
- if new_processed_filings:
110
- await data_callback(new_processed_filings)
111
-
112
- # Add new filings to known accession numbers
113
- for filing in new_processed_filings:
114
- active_accession_numbers.append(filing['accession_number'])
115
-
116
- if not quiet and new_processed_filings:
117
- print(f"Found {len(new_processed_filings)} new filings.")
118
-
119
- # Call the poll callback if provided
120
- if poll_callback:
121
- await poll_callback()
122
-
123
- # Wait for the next polling interval
124
- await asyncio.sleep(polling_interval / 1000.0) # Convert milliseconds to seconds
125
-
126
- except RetryException as e:
127
- if not quiet:
128
- print(f"Rate limit exceeded. Retrying after {e.retry_after} seconds.")
129
- await asyncio.sleep(e.retry_after)
130
- except Exception as e:
131
- if not quiet:
132
- print(f"Error in monitoring loop: {e}")
133
- await asyncio.sleep(polling_interval / 1000.0) # Wait before retrying
134
-
135
- async def initialize_known_filings(url_params, active_accession_numbers, rate_limiter,
136
- rate_monitor, quiet, data_callback=None,
137
- skip_initial_accession_numbers=None):
138
- """Initialize the list of known accession numbers from the SEC feed."""
139
- if not quiet:
140
- print("Initializing known filings...")
141
-
142
- # Fetch a large batch of filings to initialize
143
- all_filings = await fetch_comprehensive_filings(url_params, set(), rate_limiter, rate_monitor, quiet)
144
-
145
- # Process and emit filings if data_callback is provided
146
- if data_callback and all_filings:
147
- # Filter out filings that are in the skip list (already processed by EFTS)
148
- new_filings = []
149
- for filing in all_filings:
150
- acc_no = extract_accession_number(filing)
151
- # Only include filings NOT in the skip list
152
- if acc_no and (skip_initial_accession_numbers is None or
153
- acc_no not in skip_initial_accession_numbers):
154
- new_filings.append(filing)
155
-
156
- if new_filings:
157
- processed_filings = process_filings(new_filings)
158
- if not quiet:
159
- print(f"Emitting {len(processed_filings)} initial filings to data callback...")
160
- await data_callback(processed_filings)
161
-
162
- # Add ALL fetched accession numbers to the active tracking list
163
- # We track all accession numbers regardless of whether they were in the skip list
164
- if not quiet:
165
- # Create a single progress bar that stays in place and shows rate
166
- with tqdm(total=len(all_filings), desc="Processing filings", unit="filing", ncols=100,
167
- leave=False, position=0,
168
- bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') as pbar:
169
- for filing in all_filings:
170
- acc_no = extract_accession_number(filing)
171
- if acc_no:
172
- active_accession_numbers.append(acc_no)
173
- pbar.update(1)
174
- else:
175
- for filing in all_filings:
176
- acc_no = extract_accession_number(filing)
177
- if acc_no:
178
- active_accession_numbers.append(acc_no)
179
-
180
- if not quiet:
181
- print(f"Initialized with {len(active_accession_numbers)} known filings.")
182
-
183
- # The rest of the functions remain the same
184
- async def poll_for_new_filings(url_params, first_page_accession_numbers, rate_limiter, rate_monitor, quiet):
185
- """Poll the first page of SEC filings to check for new ones."""
186
- # Create a copy of the URL parameters for the first page
187
- page_params = url_params.copy()
188
- page_params['start'] = 0
189
-
190
- # Construct the URL
191
- url = construct_url(page_params)
192
-
193
- async with aiohttp.ClientSession() as session:
194
- async with rate_limiter:
195
- if not quiet:
196
- # Use a clear line break before polling message
197
- print(f"Polling {url}")
198
-
199
- async with session.get(url, headers=headers) as response:
200
- if response.status == 429:
201
- retry_after = int(response.headers.get('Retry-After', 601))
202
- raise RetryException(url, retry_after)
203
-
204
- content = await response.read()
205
- await rate_monitor.add_request(len(content))
206
-
207
- if response.status != 200:
208
- if not quiet:
209
- print(f"Error {response.status} from SEC API: {content}")
210
- return []
211
-
212
- # Parse the XML response
213
- root = etree.fromstring(content)
214
- entries = root.xpath("//xmlns:entry", namespaces={"xmlns": "http://www.w3.org/2005/Atom"})
215
-
216
- # Extract accession numbers from entries
217
- current_accession_numbers = set()
218
- for entry in entries:
219
- acc_no = extract_accession_number(entry)
220
- if acc_no:
221
- current_accession_numbers.add(acc_no)
222
-
223
- # Check for new accession numbers
224
- if not first_page_accession_numbers:
225
- # First run, just store the current accession numbers
226
- first_page_accession_numbers.update(current_accession_numbers)
227
- return []
228
-
229
- # Find new accession numbers
230
- new_accession_numbers = current_accession_numbers - first_page_accession_numbers
231
-
232
- # Update first page accession numbers
233
- first_page_accession_numbers.clear()
234
- first_page_accession_numbers.update(current_accession_numbers)
235
-
236
- # If there are new accession numbers, return ALL entries with those numbers
237
- if new_accession_numbers:
238
- new_entries = []
239
- for entry in entries:
240
- acc_no = extract_accession_number(entry)
241
- if acc_no and acc_no in new_accession_numbers:
242
- new_entries.append(entry)
243
- return new_entries
244
-
245
- return []
246
-
247
- async def fetch_comprehensive_filings(url_params, known_accession_numbers, rate_limiter, rate_monitor, quiet):
248
- """Fetch a comprehensive list of filings, potentially paginating through multiple requests."""
249
- all_new_filings = []
250
-
251
- # We'll fetch up to 2000 filings in batches of 100
252
- page_range = range(0, 2000, 100)
253
-
254
- # Create a single progress bar that stays in place and shows rate
255
- pbar = None
256
- if not quiet:
257
- # Use a custom format that includes rate (pages/sec)
258
- pbar = tqdm(total=len(page_range), desc="Fetching pages", unit="page", ncols=100,
259
- leave=False, position=0,
260
- bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]')
261
-
262
- fetch_status = ""
263
- try:
264
- for start in page_range:
265
- if pbar:
266
- pbar.update(1)
267
-
268
- page_params = url_params.copy()
269
- page_params['start'] = start
270
-
271
- url = construct_url(page_params)
272
-
273
- if not quiet:
274
- fetch_status = f"Fetching {url}"
275
- if pbar:
276
- # Add URL to the progress bar but keep it short
277
- pbar.set_postfix_str(fetch_status[:30] + "..." if len(fetch_status) > 30 else fetch_status)
278
- # Ensure the progress bar gets displayed with the current rate
279
- pbar.refresh()
280
-
281
- async with aiohttp.ClientSession() as session:
282
- async with rate_limiter:
283
- async with session.get(url, headers=headers) as response:
284
- if response.status == 429:
285
- retry_after = int(response.headers.get('Retry-After', 601))
286
- raise RetryException(url, retry_after)
287
-
288
- content = await response.read()
289
- await rate_monitor.add_request(len(content))
290
-
291
- if response.status != 200:
292
- if not quiet:
293
- print(f"Error {response.status} from SEC API: {content}")
294
- break
295
-
296
- # Parse the XML response
297
- root = etree.fromstring(content)
298
- entries = root.xpath("//xmlns:entry", namespaces={"xmlns": "http://www.w3.org/2005/Atom"})
299
-
300
- if not entries:
301
- # No more entries, stop pagination
302
- break
303
-
304
- # Check for new filings - collect all entries, not just one per accession number
305
- for entry in entries:
306
- acc_no = extract_accession_number(entry)
307
- if acc_no and acc_no not in known_accession_numbers:
308
- all_new_filings.append(entry)
309
-
310
- if len(entries) < 100:
311
- # Less than a full page, no need to continue pagination
312
- break
313
- finally:
314
- # Always close the progress bar
315
- if pbar:
316
- pbar.close()
317
-
318
- return all_new_filings
319
-
320
- def process_filings(filings):
321
- """
322
- Process a list of filing entries and return structured data.
323
- Combines entries with the same accession number and collects all CIKs.
324
- """
325
- # Group filings by accession number
326
- filing_groups = {}
327
-
328
- for filing in filings:
329
- acc_no = extract_accession_number(filing)
330
- if not acc_no:
331
- continue
332
-
333
- # Get submission type
334
- submission_type = extract_submission_type(filing)
335
-
336
- # Get CIK
337
- cik = extract_cik(filing)
338
-
339
- # Initialize or update the filing group
340
- if acc_no not in filing_groups:
341
- filing_groups[acc_no] = {
342
- 'accession_number': acc_no,
343
- 'submission_type': submission_type,
344
- 'ciks': []
345
- }
346
-
347
- # Add CIK if it's not already in the list and is valid
348
- if cik and cik not in filing_groups[acc_no]['ciks']:
349
- filing_groups[acc_no]['ciks'].append(cik)
350
-
351
- # Convert the dictionary to a list of filing dictionaries
352
- return list(filing_groups.values())
353
-
354
- def extract_accession_number(entry):
355
- """Extract the accession number from an entry."""
356
- id_element = entry.find(".//xmlns:id", namespaces={"xmlns": "http://www.w3.org/2005/Atom"})
357
- if id_element is not None and id_element.text:
358
- match = re.search(r'accession-number=(\d+-\d+-\d+)', id_element.text)
359
- if match:
360
- return match.group(1)
361
- return None
362
-
363
- def extract_submission_type(entry):
364
- """Extract the submission type from an entry."""
365
- category_element = entry.find(".//xmlns:category", namespaces={"xmlns": "http://www.w3.org/2005/Atom"})
366
- if category_element is not None:
367
- return category_element.get('term')
368
- return None
369
-
370
- def extract_cik(entry):
371
- """Extract the CIK from an entry's link URL."""
372
- link_element = entry.find(".//xmlns:link", namespaces={"xmlns": "http://www.w3.org/2005/Atom"})
373
- if link_element is not None and 'href' in link_element.attrib:
374
- href = link_element.get('href')
375
- match = re.search(r'/data/(\d+)/', href)
376
- if match:
377
- return match.group(1)
378
- return None
379
-
380
- def construct_url(params):
381
- """Construct a URL with the given parameters."""
382
- base_url = "https://www.sec.gov/cgi-bin/browse-edgar"
383
- query_string = "&".join([f"{k}={v}" for k, v in params.items()])
384
- return f"{base_url}?{query_string}"
385
-
386
- def monitor(data_callback=None, poll_callback=None, submission_type=None, cik=None,
387
- polling_interval=200, requests_per_second=2.0, quiet=True,
388
- known_accession_numbers=None, skip_initial_accession_numbers=None):
389
- """
390
- Convenience function to start monitoring SEC filings from the RSS feed.
391
-
392
- Parameters:
393
- data_callback (callable): Async function to call when new filings are found.
394
- Will be called with a list of dicts containing
395
- 'accession_number', 'submission_type', and 'ciks'.
396
- poll_callback (callable): Async function to call during polling wait periods.
397
- submission_type (str or list): Form type(s) to monitor (e.g., "8-K", "10-Q").
398
- cik (str or list): CIK(s) to monitor.
399
- polling_interval (int): Polling interval in milliseconds.
400
- requests_per_second (float): Maximum requests per second.
401
- quiet (bool): Suppress verbose output.
402
- known_accession_numbers (list): List of accession numbers to track for ongoing monitoring.
403
- skip_initial_accession_numbers (set): Set of accession numbers to skip during initialization
404
- (already processed by EFTS).
405
- """
406
- return asyncio.run(start_monitor(
407
- data_callback=data_callback,
408
- poll_callback=poll_callback,
409
- submission_type=submission_type,
410
- cik=cik,
411
- polling_interval=polling_interval,
412
- requests_per_second=requests_per_second,
413
- quiet=quiet,
414
- known_accession_numbers=known_accession_numbers,
415
- skip_initial_accession_numbers=skip_initial_accession_numbers
416
- ))
@@ -1,40 +0,0 @@
1
- datamule/__init__.py,sha256=8KioESb9y0Xwy72WuTfsYZnnMFdCrRhSv8DW-kZ4-To,1066
2
- datamule/config.py,sha256=Y--CVv7JcgrjJkMOSLrvm2S8B9ost6RMSkGviP-MKtg,883
3
- datamule/helper.py,sha256=xgOVnea-lUlQ5I-U0vYUp0VeKPNZehNhqjJvegA3lYE,3342
4
- datamule/index.py,sha256=0txvbzPcvY1GsdxA-wGdLzAByxSeE_1VyyBp9mZEQRM,2292
5
- datamule/package_updater.py,sha256=Z9zaa_y0Z5cknpRn8oPea3gg4kquFHfpfhduKKCZ6NU,958
6
- datamule/portfolio.py,sha256=so6j2KrkcZOToHIqkANAu3CC4QsfgaUN1zk9CrbRe1E,7225
7
- datamule/sheet.py,sha256=TvFqK9eAYuVoJ2uWdAlx5EN6vS9lke-aZf7FqtUiDBc,22304
8
- datamule/submission.py,sha256=tc4-8houjT2gfSK0P7ekowPduT31rj5_zt0axwZUacc,8483
9
- datamule/document/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- datamule/document/document.py,sha256=BRnHPVt-vIT7EZTF-c-Ulv3N33xX9zE02Q9mKXVDeuY,9474
11
- datamule/document/processing.py,sha256=fw-1OWfbmZhG1R8XpJx_vcGwz3_djmk0FrblHAMPmwc,27476
12
- datamule/document/table.py,sha256=Sv9jTGiVhnWIY9nHaynUUixwbCrvbLsf0fdOnFR-NCY,10791
13
- datamule/mapping_dicts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- datamule/mapping_dicts/txt_mapping_dicts.py,sha256=DQPrGYbAPQxomRUtt4iiMGrwuF7BHc_LeFBQuYBzU9o,6311
15
- datamule/mapping_dicts/xml_mapping_dicts.py,sha256=Z22yDVwKYonUfM5foQP00dVDE8EHhhMKp0CLqVKV5OI,438
16
- datamule/sec/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
- datamule/sec/utils.py,sha256=JUxwijJiqRMnRJNQzVUamyF5h9ZGc7RnO_zsLOIM73g,2079
18
- datamule/sec/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- datamule/sec/infrastructure/submissions_metadata.py,sha256=f1KarzFSryKm0EV8DCDNsBw5Jv0Tx5aljiGUJkk7DRk,18745
20
- datamule/sec/rss/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- datamule/sec/rss/monitor.py,sha256=6r4EYaSlGu6VYErlj9zXJsIMLVie1cfacSZU-ESfuBI,18231
22
- datamule/sec/submissions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- datamule/sec/submissions/downloader.py,sha256=60wX2Yml1UCuxOtU0xMxqqeyHhrypCmlDQ0jZF-StJo,2665
24
- datamule/sec/submissions/eftsquery.py,sha256=mSZon8rlW8dxma7M49ZW5V02Fn-ENOdt9TNO6elBrhE,27983
25
- datamule/sec/submissions/monitor.py,sha256=Im2kgnUehhTgyY2Vq3uk07n4Vkj4PjII_SsRDi8ehAE,5384
26
- datamule/sec/submissions/streamer.py,sha256=EXyWNCD9N6mZmvm9lFSCFodF19zSQ8jfIbWPZNp0K5Y,11253
27
- datamule/sec/submissions/textsearch.py,sha256=-a5yIrrxxtaK10IJeywFmXuJmSndYL9VKm4SC4I9JAs,5808
28
- datamule/sec/xbrl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- datamule/sec/xbrl/downloadcompanyfacts.py,sha256=rMWRiCF9ci_gNZMJ9MC2c_PGEd-yEthawQ0CtVwWTjM,3323
30
- datamule/sec/xbrl/filter_xbrl.py,sha256=g9OT4zrNS0tiUJeBIwbCs_zMisOBkpFnMR3tV4Tr39Q,1316
31
- datamule/sec/xbrl/streamcompanyfacts.py,sha256=WyJIwuy5mNMXWpx_IkhFzDMe9MOfQ-vNkWl_JzBzFmc,3323
32
- datamule/sec/xbrl/xbrlmonitor.py,sha256=TKFVfSyyUUfUgFQw4WxEVs4g8Nh-2C0tygNIRmTqW3Y,5848
33
- datamule/seclibrary/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- datamule/seclibrary/bq.py,sha256=C8sb_rpXTvchprrFLcbRar4Qi0XWW25tnv1YsHSS5o4,18025
35
- datamule/seclibrary/downloader.py,sha256=fJztJ_sEfv2oHHbDff07DRlXLmztXnzt3Yvv5YaZgGk,13718
36
- datamule/seclibrary/query.py,sha256=qGuursTERRbOGfoDcYcpo4oWkW3PCBW6x1Qf1Puiak4,7352
37
- datamule-1.2.2.dist-info/METADATA,sha256=QpXbg-4cnRknynj-W4Z2Sc1zKlWan62zEG8OrN2_E-A,512
38
- datamule-1.2.2.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
39
- datamule-1.2.2.dist-info/top_level.txt,sha256=iOfgmtSMFVyr7JGl_bYSTDry79JbmsG4p8zKq89ktKk,9
40
- datamule-1.2.2.dist-info/RECORD,,
File without changes