datamule 1.2.2__py3-none-any.whl → 1.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. datamule/__init__.py +1 -0
  2. datamule/document/document.py +27 -14
  3. datamule/document/mappings/atsn.py +208 -0
  4. datamule/document/mappings/cfportal.py +346 -0
  5. datamule/document/mappings/d.py +125 -0
  6. datamule/document/mappings/ex102_abs.py +63 -0
  7. datamule/document/mappings/ex99a_sdr.py +1 -0
  8. datamule/document/mappings/ex99c_sdr.py +0 -0
  9. datamule/document/mappings/ex99g_sdr.py +0 -0
  10. datamule/document/mappings/ex99i_sdr.py +0 -0
  11. datamule/document/mappings/information_table.py +35 -0
  12. datamule/document/mappings/nmfp.py +275 -0
  13. datamule/document/mappings/npx.py +85 -0
  14. datamule/document/mappings/onefourtyfour.py +68 -0
  15. datamule/document/mappings/ownership.py +163 -0
  16. datamule/document/mappings/proxy_voting_record.py +17 -0
  17. datamule/document/mappings/sbs.py +0 -0
  18. datamule/document/mappings/sbsef.py +13 -0
  19. datamule/document/mappings/schedule13.py +117 -0
  20. datamule/document/mappings/sdr.py +63 -0
  21. datamule/document/mappings/submission_metadata.py +9 -0
  22. datamule/document/mappings/ta.py +0 -0
  23. datamule/document/mappings/thirteenfhr.py +72 -0
  24. datamule/document/mappings/twentyfivense.py +22 -0
  25. datamule/document/mappings/twentyfourf2nt.py +100 -0
  26. datamule/document/processing.py +170 -42
  27. datamule/document/table.py +60 -5
  28. datamule/helper.py +10 -1
  29. datamule/index.py +8 -10
  30. datamule/portfolio.py +17 -16
  31. datamule/sec/submissions/monitor.py +173 -120
  32. datamule/sec/submissions/textsearch.py +0 -4
  33. datamule/sec/xbrl/streamcompanyfacts.py +1 -1
  34. datamule/seclibrary/downloader.py +2 -2
  35. datamule/submission.py +92 -36
  36. {datamule-1.2.2.dist-info → datamule-1.2.9.dist-info}/METADATA +1 -2
  37. datamule-1.2.9.dist-info/RECORD +62 -0
  38. datamule/sec/rss/monitor.py +0 -416
  39. datamule-1.2.2.dist-info/RECORD +0 -40
  40. /datamule/{sec/rss → document/mappings}/__init__.py +0 -0
  41. {datamule-1.2.2.dist-info → datamule-1.2.9.dist-info}/WHEEL +0 -0
  42. {datamule-1.2.2.dist-info → datamule-1.2.9.dist-info}/top_level.txt +0 -0
@@ -17,6 +17,17 @@ def process_tabular_data(self):
17
17
  tables = process_13fhr(self.data, self.accession)
18
18
  elif self.type in ["INFORMATION TABLE"]:
19
19
  tables = process_information_table(self.data, self.accession)
20
+ elif self.type in ["25-NSE", "25-NSE/A"]:
21
+ tables = process_25nse(self.data, self.accession)
22
+ # complete mark:
23
+ elif self.type in ["EX-102"]:
24
+ tables = process_ex102_abs(self.data, self.accession)
25
+ elif self.type in ["D","D/A"]:
26
+ tables = process_d(self.data, self.accession)
27
+ elif self.type in ["N-PX","N-PX/A"]:
28
+ tables = process_npx(self.data, self.accession)
29
+
30
+
20
31
  elif self.type in ["SBSEF","SBSEF/A","SBSEF-V","SBSEF-W"]:
21
32
  tables = process_sbsef(self.data, self.accession)
22
33
  elif self.type in ["SDR","SDR/A","SDR-W","SDR-A"]:
@@ -33,8 +44,7 @@ def process_tabular_data(self):
33
44
  tables = process_144(self.data, self.accession)
34
45
  elif self.type in ["24F-2NT", "24F-2NT/A"]:
35
46
  tables = process_24f2nt(self.data, self.accession)
36
- elif self.type in ["25-NSE", "25-NSE/A"]:
37
- tables = process_25nse(self.data, self.accession)
47
+
38
48
  elif self.type in ["ATS-N", "ATS-N/A"]:
39
49
  tables = process_ats(self.data, self.accession)
40
50
  # elif self.type in ["C","C-W","C-U","C-U-W","C/A","C/A-W",
@@ -42,8 +52,7 @@ def process_tabular_data(self):
42
52
  # tables = process_c(self.data, self.accession)
43
53
  elif self.type in ["CFPORTAL","CFPORTAL/A","CFPORTAL-W"]:
44
54
  tables = process_cfportal(self.data, self.accession)
45
- # elif self.type in ["D","D/A"]:
46
- # tables = process_d(self.data, self.accession)
55
+
47
56
  # elif self.type in ["MA","MA-A","MA/A","MA-I","MA-I/A","MA-W"]:
48
57
  # tables = process_ma(self.data, self.accession)
49
58
  # elif self.type in ["N-CEN","N-CEN/A"]:
@@ -53,8 +62,7 @@ def process_tabular_data(self):
53
62
  # tables = process_nmfp(self.data, self.accession)
54
63
  # elif self.type in ["NPORT-P","NPORT-P/A"]:
55
64
  # tables = process_nportp(self.data, self.accession)
56
- elif self.type in ["N-PX","N-PX/A"]:
57
- tables = process_npx(self.data, self.accession)
65
+
58
66
  # elif self.type in ["TA-1","TA-1/A","TA-W","TA-2","TA-2/A"]:
59
67
  # tables = process_ta(self.data, self.accession)
60
68
  elif self.type in ["X-17A-5","X-17A-5/A"]:
@@ -66,10 +74,11 @@ def process_tabular_data(self):
66
74
  tables = process_reg_a(self.data, self.accession)
67
75
  # elif self.type in ["SBSE","SBSE/A","SBSE-A","SBSE-A/A","SBSE-BD","SBSE-BD/A","SBSE-C","SBSE-W","SBSE-CCO-RPT","SBSE-CCO-RPT/A"]:
68
76
  # tables = process_sbs(self.data, self.accession)
69
- # elif self.type in ["EX-102"]:
70
- # tables = process_ex102_abs(self.data, self.accession)
77
+
71
78
  elif self.type == "PROXY VOTING RECORD":
72
79
  tables = process_proxy_voting_record(self.data, self.accession)
80
+ elif self.type == 'submission_metadata':
81
+ tables = process_submission_metadata(self.content, self.accession)
73
82
  else:
74
83
  warn(f"Processing for {self.type} is not implemented yet.")
75
84
  return []
@@ -95,6 +104,67 @@ def _flatten_dict(d, parent_key=''):
95
104
 
96
105
  return items
97
106
 
107
+ # flattens in a different way
108
+ def flatten_dict_to_rows(d, parent_key='', sep='_'):
109
+
110
+ if isinstance(d, list):
111
+ # If input is a list, flatten each item and return all rows
112
+ all_rows = []
113
+ for item in d:
114
+ all_rows.extend(flatten_dict_to_rows(item, parent_key, sep))
115
+ return all_rows
116
+
117
+ if not isinstance(d, dict):
118
+ # If input is a primitive value, return single row
119
+ return [{parent_key: d}] if parent_key else []
120
+
121
+ # Input is a dictionary
122
+ rows = [{}]
123
+
124
+ for k, v in d.items():
125
+ new_key = f"{parent_key}{sep}{k}" if parent_key else k
126
+
127
+ if isinstance(v, dict):
128
+ # Recursively flatten nested dictionaries
129
+ nested_rows = flatten_dict_to_rows(v, new_key, sep)
130
+ # Cross-product with existing rows
131
+ new_rows = []
132
+ for row in rows:
133
+ for nested_row in nested_rows:
134
+ combined_row = row.copy()
135
+ combined_row.update(nested_row)
136
+ new_rows.append(combined_row)
137
+ rows = new_rows
138
+
139
+ elif isinstance(v, list):
140
+ # Handle lists - create multiple rows
141
+ if not v: # Empty list
142
+ for row in rows:
143
+ row[new_key] = ''
144
+ else:
145
+ new_rows = []
146
+ for row in rows:
147
+ for list_item in v:
148
+ new_row = row.copy()
149
+ if isinstance(list_item, dict):
150
+ # Recursively flatten dict items in list
151
+ nested_rows = flatten_dict_to_rows(list_item, new_key, sep)
152
+ for nested_row in nested_rows:
153
+ combined_row = new_row.copy()
154
+ combined_row.update(nested_row)
155
+ new_rows.append(combined_row)
156
+ else:
157
+ # Primitive value in list
158
+ new_row[new_key] = list_item
159
+ new_rows.append(new_row)
160
+ rows = new_rows
161
+ else:
162
+ # Handle primitive values
163
+ for row in rows:
164
+ row[new_key] = v
165
+
166
+ return rows
167
+
98
168
  def process_ownership(data, accession):
99
169
  tables = []
100
170
  if 'ownershipDocument' not in data:
@@ -346,33 +416,41 @@ def process_cfportal(data, accession):
346
416
 
347
417
  return tables
348
418
 
349
- # def process_d(data, accession):
350
- # tables = []
351
- # primary_issuer = safe_get(data, ['edgarSubmission', 'primaryIssuer'])
352
- # if primary_issuer:
353
- # metadata = Table(_flatten_dict(primary_issuer), 'metadata_d', accession)
354
-
355
- # metadata_columns = ['schemaVersion', 'submissionType', 'testOrLive', 'returnCopy', 'contactData', 'notificationAddressList']
356
- # for col in metadata_columns:
357
- # col_data = safe_get(data, ['edgarSubmission', col])
358
- # if col_data:
359
- # metadata.add_column(col, col_data)
360
-
361
- # tables.append(metadata)
362
-
363
- # issuer_list = safe_get(data, ['edgarSubmission', 'issuerList'])
364
- # if issuer_list:
365
- # tables.append(Table(_flatten_dict(issuer_list), 'primary_issuer_d', accession))
366
-
367
- # offering_data = safe_get(data, ['edgarSubmission', 'offeringData'])
368
- # if offering_data:
369
- # tables.append(Table(_flatten_dict(offering_data), 'offering_data_d', accession))
370
-
371
- # related_persons_list = safe_get(data, ['edgarSubmission', 'relatedPersonsList'])
372
- # if related_persons_list:
373
- # tables.append(Table(_flatten_dict(related_persons_list), 'related_persons_list_d', accession))
419
+ def process_d(data, accession):
420
+ tables = []
421
+ groups = [('contactData', 'contact_data_d'),
422
+ ('notificationAddressList', 'notification_address_list_d'),
423
+ ('primaryIssuer', 'primary_issuer_d'),
424
+ ('issuerList', 'issuer_list_d'),
425
+ ('relatedPersonsList', 'related_persons_list_d'),
426
+ ('offeringData', 'offering_data_d'),
427
+ ]
428
+ for group,table_type in groups:
429
+ if group == 'relatedPersonList':
430
+ group_data = data['edgarSubmission'].pop('relatedPersonInfo', None)
431
+ data['edgarSubmission'].pop(group, None)
432
+ elif group == 'issuerList':
433
+ group_data = data['edgarSubmission'].pop('issuerList', None)
434
+ else:
435
+ group_data = data['edgarSubmission'].pop(group, None)
436
+
437
+ if group_data:
438
+ # Special handling ONLY for relatedPersonsList
439
+ if group in ['relatedPersonsList', 'issuerList','offeringData']:
440
+ # Use the new flatten_dict_to_rows ONLY for this key
441
+ flattened_rows = flatten_dict_to_rows(group_data)
442
+ if flattened_rows:
443
+ tables.append(Table(flattened_rows, table_type, accession))
444
+ else:
445
+ # Everything else remains EXACTLY the same
446
+ tables.append(Table(_flatten_dict(group_data), table_type, accession))
447
+
448
+
449
+
450
+ metadata_table = Table(_flatten_dict(data['edgarSubmission']), 'metadata_d', accession)
451
+ tables.append(metadata_table)
374
452
 
375
- # return tables
453
+ return tables
376
454
 
377
455
  # def process_nmfp(data, accession):
378
456
  # tables = []
@@ -583,13 +661,39 @@ def process_reg_a(data, accession):
583
661
 
584
662
  # return tables
585
663
 
586
- # def process_ex102_abs(data, accession):
587
- # tables = []
588
- # asset_data = safe_get(data, ['assetData'])
589
- # if asset_data:
590
- # tables.append(Table(_flatten_dict(asset_data), 'abs', accession))
591
- # raise NotImplementedError("Need to implement the rest of the ABS processing")
592
- # return tables
664
+ def process_ex102_abs(data, accession):
665
+ tables = []
666
+ data = safe_get(data, ['assetData', 'assets'])
667
+
668
+ # Create assets list: all items without their 'property' field
669
+ assets = [{k: v for k, v in item.items() if k != 'property'} for item in data]
670
+
671
+ # Create properties list in a more vectorized way
672
+ properties = []
673
+
674
+ # Handle dictionary properties
675
+ properties.extend([
676
+ item['property'] | {'assetNumber': item['assetNumber']}
677
+ for item in data
678
+ if 'property' in item and isinstance(item['property'], dict)
679
+ ])
680
+
681
+ # Handle list properties - flatten in one operation
682
+ properties.extend([
683
+ prop | {'assetNumber': item['assetNumber']}
684
+ for item in data
685
+ if 'property' in item and isinstance(item['property'], list)
686
+ for prop in item['property']
687
+ if isinstance(prop, dict)
688
+ ])
689
+
690
+ if assets:
691
+ tables.append(Table(_flatten_dict(assets), 'assets_ex102_absee', accession))
692
+
693
+ if properties:
694
+ tables.append(Table(_flatten_dict(properties), 'properties_ex102_absee', accession))
695
+
696
+ return tables
593
697
 
594
698
  # def process_ma(data, accession):
595
699
  # tables = []
@@ -601,4 +705,28 @@ def process_reg_a(data, accession):
601
705
  # raise NotImplementedError("Need to implement the rest of the MA processing")
602
706
 
603
707
  # def process_ncen(data, accession):
604
- # raise NotImplementedError("Need to implement the N-CEN processing")
708
+ # raise NotImplementedError("Need to implement the N-CEN processing")
709
+
710
+ # WIP
711
+ # Note: going to pause this for now, as I don't have a great way of putting this in a csv.
712
+ def process_submission_metadata(data,accession):
713
+ tables = []
714
+ document_data = safe_get(data, ['documents'])
715
+ if document_data:
716
+ tables.append(Table(_flatten_dict(document_data), 'document_submission_metadata', accession))
717
+
718
+ reporting_owner_data = safe_get(data,['reporting-owner'])
719
+ if reporting_owner_data:
720
+ tables.append(Table(_flatten_dict(reporting_owner_data), 'reporting_owner_submission_metadata', accession))
721
+
722
+ issuer_data = safe_get(data,['issuer'])
723
+ if issuer_data:
724
+ tables.append(Table(_flatten_dict(issuer_data), 'issuer_submission_metadata', accession))
725
+
726
+ # # construct metadata
727
+ # accession-number date-of-filing-date-change, depositor-cik effectiveness-date
728
+
729
+ # # other tables
730
+ # depositor, securitizer
731
+
732
+ return tables
@@ -18,7 +18,12 @@ from .mappings.thirteenfhr import *
18
18
  from .mappings.twentyfivense import *
19
19
  from .mappings.twentyfourf2nt import *
20
20
  from .mappings.information_table import *
21
+ from .mappings.submission_metadata import *
22
+ from .mappings.ex102_abs import *
23
+ from .mappings.d import *
21
24
 
25
+ from pathlib import Path
26
+ import csv
22
27
  # need to check if mappings correctly create new columns
23
28
  class Table():
24
29
  def __init__(self, data, type,accession):
@@ -27,11 +32,18 @@ class Table():
27
32
  self.type = type
28
33
  self.data = data
29
34
  self.accession = accession
30
- self.columns = self.determine_columns()
35
+ self.columns = self.determine_columns_complete()
36
+
37
+ def determine_columns_complete(self):
38
+ if not self.data:
39
+ return []
40
+ return list(set().union(*(row.keys() for row in self.data)))
41
+
31
42
 
32
43
  def determine_columns(self):
33
44
  if len(self.data) == 0:
34
45
  return []
46
+
35
47
  return self.data[0].keys()
36
48
 
37
49
  def add_column(self,column_name,value):
@@ -190,6 +202,17 @@ class Table():
190
202
  elif self.type == 'signature_schedule_13':
191
203
  mapping_dict = signature_schedule_13_dict
192
204
 
205
+ # D
206
+ elif self.type == 'issuer_list_d':
207
+ mapping_dict = issuer_list_d_dict
208
+ elif self.type == 'metadata_d':
209
+ mapping_dict = metadata_d_dict
210
+ elif self.type == 'offering_data_d':
211
+ mapping_dict = offering_data_d_dict
212
+ elif self.type == 'primary_issuer_d':
213
+ mapping_dict = primary_issuer_d_dict
214
+ elif self.type == 'related_persons_list_d':
215
+ mapping_dict = related_persons_d_dict
193
216
  # SDR
194
217
  elif self.type == 'sdr':
195
218
  mapping_dict = sdr_dict
@@ -227,6 +250,15 @@ class Table():
227
250
  mapping_dict = item_9_24f2nt_dict
228
251
  elif self.type == 'signature_info_schedule_a':
229
252
  mapping_dict = signature_24f2nt_dict
253
+ # ABS
254
+ elif self.type == 'assets_ex102_absee':
255
+ mapping_dict = assets_dict_ex102_abs
256
+ elif self.type =='properties_ex102_absee':
257
+ mapping_dict = properties_dict_ex102_abs
258
+ # submission metadata
259
+ elif self.type == 'document_submission_metadata':
260
+ mapping_dict = document_submission_metadata_dict
261
+
230
262
 
231
263
  else:
232
264
  mapping_dict = {}
@@ -245,9 +277,6 @@ class Table():
245
277
  for old_key, new_key in mapping_dict.items():
246
278
  if old_key in row:
247
279
  ordered_row[new_key] = row.pop(old_key)
248
- else:
249
- # if the old key is not present, set the new key to None
250
- ordered_row[new_key] = None
251
280
 
252
281
  # Then add any remaining keys that weren't in the mapping
253
282
  for key, value in row.items():
@@ -257,4 +286,30 @@ class Table():
257
286
  row.clear()
258
287
  row.update(ordered_row)
259
288
 
260
- self.determine_columns()
289
+ # Update the columns after mapping
290
+ columns = set(self.columns)
291
+ # remove the old columns that are now in the mapping
292
+ columns.difference_update(mapping_dict.keys())
293
+ # add the new columns from the mapping
294
+ columns.update(mapping_dict.values())
295
+ # add the accession column to the columns
296
+ columns.add('accession')
297
+
298
+ self.columns = list(columns)
299
+
300
+ def write_csv(self, output_file):
301
+ output_file = Path(output_file)
302
+ fieldnames = self.columns
303
+
304
+ # Check if the file already exists
305
+ if output_file.exists():
306
+ # Append to existing file without writing header
307
+ with open(output_file, 'a', newline='') as csvfile:
308
+ writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)
309
+ writer.writerows(self.data)
310
+ else:
311
+ # Create new file with header
312
+ with open(output_file, 'w', newline='') as csvfile:
313
+ writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)
314
+ writer.writeheader()
315
+ writer.writerows(self.data)
datamule/helper.py CHANGED
@@ -79,7 +79,16 @@ def _process_cik_and_metadata_filters(cik=None, ticker=None, **kwargs):
79
79
 
80
80
  # Convert ticker to CIK if provided
81
81
  if ticker is not None:
82
- cik = get_cik_from_dataset('listed_filer_metadata', 'ticker', ticker)
82
+ if isinstance(ticker, str):
83
+ ticker = [ticker]
84
+
85
+ ciks_from_ticker = []
86
+ for t in ticker:
87
+ ciks = get_cik_from_dataset('listed_filer_metadata', 'ticker', t)
88
+ if ciks:
89
+ ciks_from_ticker.extend(ciks)
90
+
91
+ cik = ciks
83
92
 
84
93
  # Normalize CIK format
85
94
  if cik is not None:
datamule/index.py CHANGED
@@ -1,16 +1,16 @@
1
- from pathlib import Path
1
+
2
2
  from .sec.submissions.textsearch import query
3
- from .helper import _process_cik_and_metadata_filters, load_package_dataset
3
+ from .helper import _process_cik_and_metadata_filters
4
+ from pathlib import Path
4
5
 
5
6
  class Index:
6
- def __init__(self, path=None):
7
- self.path = Path(path) if path else None
7
+ def __init__(self):
8
+ pass
8
9
 
9
10
  def search_submissions(
10
11
  self,
11
12
  text_query,
12
- start_date=None,
13
- end_date=None,
13
+ filing_date=None,
14
14
  submission_type=None,
15
15
  cik=None,
16
16
  ticker=None,
@@ -47,16 +47,14 @@ class Index:
47
47
  # Execute the search query
48
48
  results = query(
49
49
  f'{text_query}',
50
- filing_date=(start_date, end_date),
50
+ filing_date=filing_date,
51
51
  requests_per_second=requests_per_second,
52
52
  quiet=quiet,
53
53
  submission_type=submission_type,
54
54
  **kwargs
55
55
  )
56
56
 
57
- # Save results to path if specified
58
- if self.path:
59
- self._save_results(results, text_query)
57
+
60
58
 
61
59
  return results
62
60
 
datamule/portfolio.py CHANGED
@@ -9,22 +9,28 @@ import os
9
9
  from .helper import _process_cik_and_metadata_filters
10
10
  from .seclibrary.downloader import download as seclibrary_download
11
11
  from .sec.xbrl.filter_xbrl import filter_xbrl
12
- from .sec.submissions.monitor import monitor
13
- from .sec.xbrl.xbrlmonitor import XBRLMonitor
12
+ from .sec.submissions.monitor import Monitor
13
+ #from .sec.xbrl.xbrlmonitor import XBRLMonitor
14
14
 
15
15
 
16
16
  class Portfolio:
17
17
  def __init__(self, path):
18
18
  self.path = Path(path)
19
+ self.api_key = None
19
20
  self.submissions = []
20
21
  self.submissions_loaded = False
21
22
  self.MAX_WORKERS = os.cpu_count() - 1
23
+
24
+ self.monitor = Monitor()
22
25
 
23
26
  if self.path.exists():
24
27
  self._load_submissions()
25
28
  self.submissions_loaded = True
26
29
  else:
27
30
  self.path.mkdir(parents=True, exist_ok=True)
31
+
32
+ def set_api_key(self, api_key):
33
+ self.api_key = api_key
28
34
 
29
35
  def _load_submissions(self):
30
36
  folders = [f for f in self.path.iterdir() if f.is_dir()]
@@ -132,6 +138,7 @@ class Portfolio:
132
138
  seclibrary_download(
133
139
  output_dir=self.path,
134
140
  cik=cik,
141
+ api_key=self.api_key,
135
142
  submission_type=submission_type,
136
143
  filing_date=filing_date,
137
144
  accession_numbers=self.accession_numbers if hasattr(self, 'accession_numbers') else None,
@@ -149,20 +156,18 @@ class Portfolio:
149
156
  )
150
157
 
151
158
  self.submissions_loaded = False
152
- def monitor_submissions(self,data_callback=None, poll_callback=None, submission_type=None, cik=None,
153
- polling_interval=200, requests_per_second=5, quiet=False, start_date=None, ticker=None, **kwargs):
159
+ def monitor_submissions(self, data_callback=None, interval_callback=None,
160
+ polling_interval=1000, quiet=True, start_date=None,
161
+ validation_interval=600000):
154
162
 
155
- cik = _process_cik_and_metadata_filters(cik, ticker, **kwargs)
156
163
 
157
- monitor(
164
+ self.monitor.monitor_submissions(
158
165
  data_callback=data_callback,
159
- poll_callback=poll_callback,
160
- cik=cik,
161
- submission_type=submission_type,
166
+ interval_callback=interval_callback,
162
167
  polling_interval=polling_interval,
163
- requests_per_second=requests_per_second,
164
168
  quiet=quiet,
165
- start_date=start_date
169
+ start_date=start_date,
170
+ validation_interval=validation_interval
166
171
  )
167
172
 
168
173
 
@@ -179,8 +184,4 @@ class Portfolio:
179
184
  document_types = [document_types]
180
185
 
181
186
  for submission in self.submissions:
182
- yield from submission.document_type(document_types)
183
-
184
- def keep(self,document_type):
185
- for submission in self.__iter__():
186
- submission.keep(document_type)
187
+ yield from submission.document_type(document_types)