epstein-files 1.0.14__py3-none-any.whl → 1.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
epstein_files/__init__.py CHANGED
@@ -17,11 +17,11 @@ from epstein_files.epstein_files import EpsteinFiles, document_cls
17
17
  from epstein_files.documents.document import INFO_PADDING, Document
18
18
  from epstein_files.documents.email import Email
19
19
  from epstein_files.util.constant.output_files import ALL_EMAILS_PATH, TEXT_MSGS_HTML_PATH, make_clean
20
- from epstein_files.util.env import args, specified_names
20
+ from epstein_files.util.env import args
21
21
  from epstein_files.util.file_helper import coerce_file_path, extract_file_id
22
22
  from epstein_files.util.logging import logger
23
- from epstein_files.util.output import (print_emails, print_json_files, print_json_stats,
24
- write_json_metadata, write_urls)
23
+ from epstein_files.util.output import (print_emails_section, print_json_files, print_json_stats,
24
+ print_other_files_section, print_text_messages_section, write_json_metadata, write_urls)
25
25
  from epstein_files.util.rich import build_highlighter, console, print_header, print_panel, write_html
26
26
  from epstein_files.util.timer import Timer
27
27
  from epstein_files.util.word_count import write_word_counts_html
@@ -49,12 +49,12 @@ def generate_html() -> None:
49
49
  exit()
50
50
 
51
51
  if args.output_texts:
52
- epstein_files.print_text_messages_section()
52
+ print_text_messages_section(epstein_files)
53
53
  timer.print_at_checkpoint(f'Printed {len(epstein_files.imessage_logs)} text message logs')
54
54
 
55
55
  if args.output_emails:
56
- emails_printed = print_emails(epstein_files)
57
- timer.print_at_checkpoint(f"Printed {emails_printed:,} emails")
56
+ emails_that_were_printed = print_emails_section(epstein_files)
57
+ timer.print_at_checkpoint(f"Printed {len(emails_that_were_printed):,} emails")
58
58
 
59
59
  if args.output_other:
60
60
  if args.uninteresting:
@@ -62,7 +62,7 @@ def generate_html() -> None:
62
62
  else:
63
63
  files = [f for f in epstein_files.other_files if args.all_other_files or f.is_interesting()]
64
64
 
65
- epstein_files.print_other_files_section(files)
65
+ print_other_files_section(files, epstein_files)
66
66
  timer.print_at_checkpoint(f"Printed {len(files)} other files (skipped {len(epstein_files.other_files) - len(files)})")
67
67
 
68
68
  # Save output
@@ -86,7 +86,7 @@ def epstein_search():
86
86
 
87
87
  for search_term in args.positional_args:
88
88
  temp_highlighter = build_highlighter(search_term)
89
- search_results = epstein_files.docs_matching(search_term, specified_names)
89
+ search_results = epstein_files.docs_matching(search_term, args.names)
90
90
  console.line(2)
91
91
  print_panel(f"Found {len(search_results)} documents matching '{search_term}'", padding=(0, 0, 0, 3))
92
92
 
@@ -160,8 +160,8 @@ class Document:
160
160
  def file_size(self) -> int:
161
161
  return file_size(self.file_path)
162
162
 
163
- def file_size_str(self) -> str:
164
- return file_size_str(self.file_path)
163
+ def file_size_str(self, decimal_places: int | None = None) -> str:
164
+ return file_size_str(self.file_path, decimal_places)
165
165
 
166
166
  def info(self) -> list[Text]:
167
167
  """0 to 2 sentences containing the info_txt() as well as any configured description."""
@@ -171,14 +171,14 @@ class Document:
171
171
  ])
172
172
 
173
173
  def info_txt(self) -> Text | None:
174
- """Secondary info about this file (recipients, level of certainty, etc). Overload in subclasses."""
174
+ """Secondary info about this file (description recipients, etc). Overload in subclasses."""
175
175
  return None
176
176
 
177
177
  def is_duplicate(self) -> bool:
178
178
  return bool(self.config and self.config.duplicate_of_id)
179
179
 
180
180
  def is_local_extract_file(self) -> bool:
181
- """True if file created by extracting text from a court doc (identifiable from filename e.g. HOUSE_OVERSIGHT_012345_1.txt)."""
181
+ """True if extracted from other file (identifiable from filename e.g. HOUSE_OVERSIGHT_012345_1.txt)."""
182
182
  return is_local_extract_file(self.filename)
183
183
 
184
184
  def length(self) -> int:
@@ -234,6 +234,7 @@ class Document:
234
234
  return text
235
235
 
236
236
  def sort_key(self) -> tuple[datetime, str, int]:
237
+ """Sort by timestamp, file_id, then whether or not it's a duplicate file."""
237
238
  if self.is_duplicate():
238
239
  sort_id = self.config.duplicate_of_id
239
240
  dupe_idx = 1
@@ -253,7 +254,7 @@ class Document:
253
254
  txt.append(' (', style=SYMBOL_STYLE)
254
255
  txt.append(f"{timestamp_str}", style=TIMESTAMP_DIM).append(')', style=SYMBOL_STYLE)
255
256
 
256
- txt.append(' [').append(key_value_txt('size', Text(self.file_size_str(), style='aquamarine1')))
257
+ txt.append(' [').append(key_value_txt('size', Text(self.file_size_str(0), style='aquamarine1')))
257
258
  txt.append(", ").append(key_value_txt('lines', self.num_lines()))
258
259
 
259
260
  if self.config and self.config.duplicate_of_id:
@@ -271,6 +272,7 @@ class Document:
271
272
  return Panel(Group(*sentences), border_style=self._class_style(), expand=False)
272
273
 
273
274
  def top_lines(self, n: int = 10) -> str:
275
+ """First n lines."""
274
276
  return '\n'.join(self.lines[0:n])[:MAX_TOP_LINES_LEN]
275
277
 
276
278
  def warn(self, msg: str) -> None:
@@ -17,7 +17,7 @@ from epstein_files.documents.document import CLOSE_PROPERTIES_CHAR, INFO_INDENT
17
17
  from epstein_files.documents.emails.email_header import (BAD_EMAILER_REGEX, EMAIL_SIMPLE_HEADER_REGEX,
18
18
  EMAIL_SIMPLE_HEADER_LINE_BREAK_REGEX, FIELD_NAMES, TIME_REGEX, EmailHeader)
19
19
  from epstein_files.util.constant.names import *
20
- from epstein_files.util.constant.strings import REDACTED, URL_SIGNIFIERS
20
+ from epstein_files.util.constant.strings import REDACTED
21
21
  from epstein_files.util.constants import *
22
22
  from epstein_files.util.data import (TIMEZONE_INFO, collapse_newlines, escape_single_quotes, extract_last_name,
23
23
  flatten, remove_timezone, uniquify)
@@ -41,6 +41,7 @@ LOCAL_EXTRACT_REGEX = re.compile(r"_\d$")
41
41
 
42
42
  SUPPRESS_LOGS_FOR_AUTHORS = ['Undisclosed recipients:', 'undisclosed-recipients:', 'Multiple Senders Multiple Senders']
43
43
  REWRITTEN_HEADER_MSG = "(janky OCR header fields were prettified, check source if something seems off)"
44
+ URL_SIGNIFIERS = ['gclid', 'htm', 'ref=', 'utm']
44
45
  APPEARS_IN = 'Appears in'
45
46
  MAX_CHARS_TO_PRINT = 4000
46
47
  MAX_NUM_HEADER_LINES = 14
@@ -134,6 +135,7 @@ JUNK_EMAILERS = [
134
135
  ]
135
136
 
136
137
  MAILING_LISTS = [
138
+ CAROLYN_RANGEL,
137
139
  INTELLIGENCE_SQUARED,
138
140
  'middle.east.update@hotmail.com',
139
141
  JP_MORGAN_USGIO,
@@ -341,6 +343,9 @@ class Email(Communication):
341
343
  else:
342
344
  for recipient in self.header.recipients():
343
345
  self.recipients.extend(self._emailer_names(recipient))
346
+
347
+ if self.author in MAILING_LISTS and (len(self.recipients) == 0 or self.recipients == [self.author]):
348
+ self.recipients = [JEFFREY_EPSTEIN] # Assume mailing list emails are to Epstein
344
349
  except Exception as e:
345
350
  console.print_exception()
346
351
  console.line(2)
@@ -589,7 +594,7 @@ class Email(Communication):
589
594
  self._merge_lines(3) # Merge 4th and 5th rows
590
595
  elif self.file_id in '026609 029402 032405 022695'.split():
591
596
  self._merge_lines(4) # Merge 5th and 6th rows
592
- elif self.file_id in ['019407', '031980', '030384', '033144', '030999', '033575', '029835', '030381', '033357']:
597
+ elif self.file_id in ['019407', '031980', '030384', '033144', '030999', '033575', '029835', '030381', '033357', '026924']:
593
598
  self._merge_lines(2, 4)
594
599
  elif self.file_id in ['029154', '029163']:
595
600
  self._merge_lines(2, 5)
@@ -692,7 +697,7 @@ class Email(Communication):
692
697
  self.config.description = extracted_description
693
698
 
694
699
  self.config.is_interesting = self.config.is_interesting or extracted_from_doc_cfg.is_interesting
695
- self.warn(f"Constructed synthetic config: {self.config}")
700
+ self.log(f"Constructed synthetic config: {self.config}")
696
701
 
697
702
  def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
698
703
  logger.debug(f"Printing '{self.filename}'...")
@@ -75,7 +75,7 @@ class TextMessage:
75
75
  return msg_txt
76
76
 
77
77
  def __rich__(self) -> Text:
78
+ timestamp_txt = Text(f"[{self.timestamp_str}]", style=TIMESTAMP_DIM).append(' ')
78
79
  author_style = get_style_for_name(self.author_str if self.author_str.startswith('+') else self.author)
79
80
  author_txt = Text(self.author_str, style=author_style)
80
- timestamp_txt = Text(f"[{self.timestamp_str}]", style=TIMESTAMP_DIM).append(' ')
81
81
  return Text('').append(timestamp_txt).append(author_txt).append(': ', style='dim').append(self._message())
@@ -12,7 +12,7 @@ from epstein_files.documents.communication import Communication
12
12
  from epstein_files.documents.imessage.text_message import TextMessage
13
13
  from epstein_files.util.constant.names import JEFFREY_EPSTEIN, UNKNOWN
14
14
  from epstein_files.util.constant.strings import AUTHOR, TIMESTAMP_STYLE
15
- from epstein_files.util.data import days_between, days_between_str, iso_timestamp, listify, sort_dict
15
+ from epstein_files.util.data import days_between, days_between_str, iso_timestamp, sort_dict
16
16
  from epstein_files.util.doc_cfg import Metadata, TextCfg
17
17
  from epstein_files.util.highlighted_group import get_style_for_name
18
18
  from epstein_files.util.logging import logger
@@ -121,23 +121,22 @@ class MessengerLog(Communication):
121
121
  return sender_counts
122
122
 
123
123
  @classmethod
124
- def logs_for(cls, author: str | None | list[str | None], logs: list['MessengerLog']) -> list['MessengerLog']:
125
- authors = listify(author)
126
- return logs if JEFFREY_EPSTEIN in authors else [log for log in logs if log.author in authors]
127
-
128
- @classmethod
129
- def summary_table(cls, imessage_logs: list['MessengerLog']) -> Table:
124
+ def summary_table(cls, log_files: list['MessengerLog']) -> Table:
130
125
  """Build a table summarizing the text messages in 'imessage_logs'."""
131
- counts_table = build_table("Text Message Counts By Author")
132
- counts_table.add_column(AUTHOR.title(), justify='left', style="steel_blue bold", width=30)
126
+ author_counts = cls.count_authors(log_files)
127
+ msg_count = sum([len(log.messages) for log in log_files])
128
+
129
+ footer = f"Deanonymized {msg_count - author_counts[None]:,} of {msg_count:,} text messages in"
130
+ counts_table = build_table("Text Message Counts By Author", caption=f"{footer} {len(log_files)} files")
131
+ counts_table.add_column(AUTHOR.title(), justify='left', width=30)
133
132
  counts_table.add_column('Files', justify='right', style='white')
134
133
  counts_table.add_column("Msgs", justify='right')
135
134
  counts_table.add_column('First Sent At', justify='center', highlight=True, width=21)
136
135
  counts_table.add_column('Last Sent At', justify='center', style=LAST_TIMESTAMP_STYLE, width=21)
137
136
  counts_table.add_column('Days', justify='right', style='dim')
138
137
 
139
- for name, count in sort_dict(cls.count_authors(imessage_logs)):
140
- logs = cls.logs_for(name, imessage_logs)
138
+ for name, count in sort_dict(author_counts):
139
+ logs = log_files if name == JEFFREY_EPSTEIN else [log for log in log_files if log.author == name]
141
140
  first_at = logs[0].first_message_at(name)
142
141
  last_at = logs[-1].first_message_at(name)
143
142
 
@@ -17,14 +17,15 @@ from rich.text import Text
17
17
  from epstein_files.documents.document import CLOSE_PROPERTIES_CHAR, WHITESPACE_REGEX, Document
18
18
  from epstein_files.util.constant.strings import *
19
19
  from epstein_files.util.constants import *
20
- from epstein_files.util.doc_cfg import FINANCIAL_REPORTS_AUTHORS, DocCfg, Metadata
20
+ from epstein_files.util.doc_cfg import DocCfg, Metadata
21
21
  from epstein_files.util.data import days_between, escape_single_quotes, remove_timezone, sort_dict, uniquify
22
22
  from epstein_files.util.file_helper import FILENAME_LENGTH, file_size_to_str
23
23
  from epstein_files.util.env import args
24
24
  from epstein_files.util.highlighted_group import styled_category
25
- from epstein_files.util.rich import QUESTION_MARK_TXT, add_cols_to_table, build_table, highlighter
25
+ from epstein_files.util.rich import QUESTION_MARK_TXT, build_table, highlighter
26
26
  from epstein_files.util.logging import logger
27
27
 
28
+ FIRST_FEW_LINES = 'First Few Lines'
28
29
  MAX_DAYS_SPANNED_TO_BE_VALID = 10
29
30
  MAX_EXTRACTED_TIMESTAMPS = 100
30
31
  MIN_TIMESTAMP = datetime(2000, 1, 1)
@@ -208,6 +209,36 @@ class OtherFile(Document):
208
209
  if num_days_spanned > MAX_DAYS_SPANNED_TO_BE_VALID and VAST_HOUSE not in self.text:
209
210
  self.log_top_lines(15, msg=timestamps_log_msg, level=logging.DEBUG)
210
211
 
212
+ @staticmethod
213
+ def count_by_category_table(files: Sequence['OtherFile']) -> Table:
214
+ counts = defaultdict(int)
215
+ category_bytes = defaultdict(int)
216
+
217
+ for file in files:
218
+ if file.category() is None:
219
+ logger.warning(f"file {file.file_id} has no category")
220
+
221
+ counts[file.category()] += 1
222
+ category_bytes[file.category()] += file.file_size()
223
+
224
+ table = build_table('Other Files Summary', ['Category', 'Count', 'Has Author', 'No Author', 'Size'])
225
+ table.columns[0].min_width = 14
226
+ table.columns[-1].style = 'dim'
227
+
228
+ for (category, count) in sort_dict(counts):
229
+ category_files = [f for f in files if f.category() == category]
230
+ known_author_count = Document.known_author_count(category_files)
231
+
232
+ table.add_row(
233
+ styled_category(category or UNKNOWN),
234
+ str(count),
235
+ str(known_author_count),
236
+ str(count - known_author_count),
237
+ file_size_to_str(category_bytes[category]),
238
+ )
239
+
240
+ return table
241
+
211
242
  @staticmethod
212
243
  def files_preview_table(files: Sequence['OtherFile']) -> Table:
213
244
  """Build a table of OtherFile documents."""
@@ -240,33 +271,3 @@ class OtherFile(Document):
240
271
  )
241
272
 
242
273
  return table
243
-
244
- @staticmethod
245
- def count_by_category_table(files: Sequence['OtherFile']) -> Table:
246
- counts = defaultdict(int)
247
- category_bytes = defaultdict(int)
248
-
249
- for file in files:
250
- if file.category() is None:
251
- logger.warning(f"file {file.file_id} has no category")
252
-
253
- counts[file.category()] += 1
254
- category_bytes[file.category()] += file.file_size()
255
-
256
- table = build_table('Other Files Summary', ['Category', 'Count', 'Has Author', 'No Author', 'Size'])
257
- table.columns[0].min_width = 14
258
- table.columns[-1].style = 'dim'
259
-
260
- for (category, count) in sort_dict(counts):
261
- category_files = [f for f in files if f.category() == category]
262
- known_author_count = Document.known_author_count(category_files)
263
-
264
- table.add_row(
265
- styled_category(category or UNKNOWN),
266
- str(count),
267
- str(known_author_count),
268
- str(count - known_author_count),
269
- file_size_to_str(category_bytes[category]),
270
- )
271
-
272
- return table
@@ -25,11 +25,11 @@ from epstein_files.util.constant.urls import (EPSTEIN_MEDIA, EPSTEIN_WEB, JMAIL,
25
25
  from epstein_files.util.constants import *
26
26
  from epstein_files.util.data import days_between, dict_sets_to_lists, json_safe, listify, sort_dict
27
27
  from epstein_files.util.doc_cfg import EmailCfg, Metadata
28
- from epstein_files.util.env import DOCS_DIR, args, logger, specified_names
28
+ from epstein_files.util.env import DOCS_DIR, args, logger
29
29
  from epstein_files.util.file_helper import file_size_str
30
30
  from epstein_files.util.highlighted_group import HIGHLIGHTED_NAMES, HighlightedNames, get_info_for_name, get_style_for_name
31
31
  from epstein_files.util.rich import (DEFAULT_NAME_STYLE, LAST_TIMESTAMP_STYLE, NA_TXT, add_cols_to_table,
32
- print_all_files_page_link, build_table, console, highlighter, link_text_obj, link_markup, print_author_header, print_centered,
32
+ print_other_page_link, build_table, console, highlighter, link_text_obj, link_markup, print_author_header, print_centered,
33
33
  print_panel, print_section_header, vertically_pad)
34
34
  from epstein_files.util.search_result import SearchResult
35
35
  from epstein_files.util.timer import Timer
@@ -201,9 +201,6 @@ class EpsteinFiles:
201
201
 
202
202
  return docs
203
203
 
204
- def imessage_logs_for(self, author: str | None | list[str | None]) -> Sequence[MessengerLog]:
205
- return MessengerLog.logs_for(author, self.imessage_logs)
206
-
207
204
  def json_metadata(self) -> str:
208
205
  """Create a JSON string containing metadata for all the files."""
209
206
  metadata = {
@@ -290,44 +287,6 @@ class EpsteinFiles:
290
287
  console.print(_build_signature_table(self.email_authors_to_device_signatures, (AUTHOR, DEVICE_SIGNATURE)))
291
288
  console.print(_build_signature_table(self.email_device_signatures_to_authors, (DEVICE_SIGNATURE, AUTHOR), ', '))
292
289
 
293
- def print_other_files_section(self, files: list[OtherFile]) -> None:
294
- """Returns the OtherFile objects that were interesting enough to print."""
295
- category_table = OtherFile.count_by_category_table(files)
296
- other_files_preview_table = OtherFile.files_preview_table(files)
297
- header_pfx = '' if args.all_other_files else 'Selected '
298
- print_section_header(f"{FIRST_FEW_LINES} of {len(files)} {header_pfx}Files That Are Neither Emails Nor Text Messages")
299
-
300
- if args.all_other_files:
301
- console.line(1)
302
- else:
303
- print_all_files_page_link(self)
304
- console.line(2)
305
-
306
- for table in [category_table, other_files_preview_table]:
307
- table.title = f"{header_pfx}{table.title}"
308
-
309
- print_centered(category_table)
310
- console.line(2)
311
- console.print(other_files_preview_table)
312
-
313
- def print_text_messages_section(self) -> None:
314
- """Print summary table and stats for text messages."""
315
- print_section_header('All of His Text Messages')
316
- print_centered("(conversations are sorted chronologically based on timestamp of first message)\n", style='gray30')
317
- authors: list[str | None] = specified_names if specified_names else [JEFFREY_EPSTEIN]
318
- log_files = self.imessage_logs_for(authors)
319
-
320
- for log_file in log_files:
321
- console.print(Padding(log_file))
322
- console.line(2)
323
-
324
- print_centered(MessengerLog.summary_table(self.imessage_logs))
325
- text_summary_msg = f"\nDeanonymized {Document.known_author_count(self.imessage_logs)} of "
326
- text_summary_msg += f"{len(self.imessage_logs)} {TEXT_MESSAGE} logs found in {len(self.all_files):,} files."
327
- console.print(text_summary_msg)
328
- imessage_msg_count = sum([len(log.messages) for log in self.imessage_logs])
329
- console.print(f"Found {imessage_msg_count} text messages in {len(self.imessage_logs)} iMessage log files.")
330
-
331
290
  def table_of_emailers(self) -> Table:
332
291
  attributed_emails = [e for e in self.non_duplicate_emails() if e.author]
333
292
  footer = f"Identified authors of {len(attributed_emails):,} out of {len(self.non_duplicate_emails()):,} emails."
@@ -38,6 +38,7 @@ BRAD_EDWARDS = 'Brad Edwards'
38
38
  BRAD_KARP = 'Brad Karp'
39
39
  BRAD_WECHSLER = 'Brad Wechsler'
40
40
  BORIS_NIKOLIC = 'Boris Nikolic'
41
+ CAROLYN_RANGEL = 'Carolyn Rangel'
41
42
  CECILE_DE_JONGH = 'Cecile de Jongh'
42
43
  CECILIA_STEEN = 'Cecilia Steen'
43
44
  CELINA_DUBIN = 'Celina Dubin'
@@ -187,9 +188,11 @@ VIRGINIA_GIUFFRE = 'Virginia Giuffre'
187
188
 
188
189
  # Organizations
189
190
  BOFA = 'BofA'
191
+ BOFA_MERRILL = f'{BOFA} / Merrill Lynch'
190
192
  CNN = 'CNN'
191
193
  DEUTSCHE_BANK = 'Deutsche Bank'
192
194
  ELECTRON_CAPITAL_PARTNERS = 'Electron Capital Partners'
195
+ EPSTEIN_FOUNDATION = 'Jeffrey Epstein VI Foundation'
193
196
  GOLDMAN_SACHS = 'Goldman Sachs'
194
197
  GOLDMAN_INVESTMENT_MGMT = f'{GOLDMAN_SACHS} Investment Management Division'
195
198
  HARVARD = 'Harvard'
@@ -57,12 +57,10 @@ TIMESTAMP_DIM = f"turquoise4 dim"
57
57
  AUTHOR = 'author'
58
58
  DEFAULT = 'default'
59
59
  EVERYONE = 'everyone'
60
- FIRST_FEW_LINES = 'First Few Lines'
61
60
  HOUSE_OVERSIGHT_PREFIX = 'HOUSE_OVERSIGHT_'
62
61
  JSON = 'json'
63
62
  NA = 'n/a'
64
63
  REDACTED = '<REDACTED>'
65
- URL_SIGNIFIERS = ['gclid', 'htm', 'ref=', 'utm']
66
64
  QUESTION_MARKS = '(???)'
67
65
 
68
66
  # Regexes
@@ -6,6 +6,7 @@ from inflection import parameterize
6
6
  from rich.text import Text
7
7
 
8
8
  from epstein_files.util.constant.output_files import *
9
+ from epstein_files.util.env import args
9
10
  from epstein_files.util.file_helper import coerce_file_stem
10
11
 
11
12
  # Style stuff
@@ -49,7 +50,7 @@ DOC_LINK_BASE_URLS: dict[ExternalSite, str] = {
49
50
  }
50
51
 
51
52
 
52
- epsteinify_api_url = lambda file_id: f"{EPSTEINIFY_URL}/api/documents/HOUSE_OVERSIGHT_{file_id}"
53
+ epsteinify_api_url = lambda file_stem: f"{EPSTEINIFY_URL}/api/documents/{file_stem}"
53
54
  epsteinify_doc_link_markup = lambda filename_or_id, style = TEXT_LINK: external_doc_link_markup(EPSTEINIFY, filename_or_id, style)
54
55
  epsteinify_doc_link_txt = lambda filename_or_id, style = TEXT_LINK: Text.from_markup(external_doc_link_markup(filename_or_id, style))
55
56
  epsteinify_doc_url = lambda file_stem: build_doc_url(DOC_LINK_BASE_URLS[EPSTEINIFY], file_stem)
@@ -66,8 +67,6 @@ epstein_web_search_url = lambda s: f"{EPSTEIN_WEB_URL}/?ewmfileq={urllib.parse.q
66
67
 
67
68
  rollcall_doc_url = lambda file_stem: build_doc_url(DOC_LINK_BASE_URLS[ROLLCALL], file_stem, 'title')
68
69
 
69
- search_archive_url = lambda txt: f"{COURIER_NEWSROOM_ARCHIVE_URL}&q={urllib.parse.quote(txt)}&p=1"
70
- search_coffeezilla_url = lambda txt: f"{COFFEEZILLA_ARCHIVE_URL}&q={urllib.parse.quote(txt)}&p=1"
71
70
  search_jmail_url = lambda txt: f"{JMAIL_URL}/search?q={urllib.parse.quote(txt)}"
72
71
  search_twitter_url = lambda txt: f"https://x.com/search?q={urllib.parse.quote(txt)}&src=typed_query&f=live"
73
72
 
@@ -103,8 +102,12 @@ def link_text_obj(url: str, link_text: str | None = None, style: str = ARCHIVE_L
103
102
  return Text.from_markup(link_markup(url, link_text, style))
104
103
 
105
104
 
106
- def search_coffeezilla_link(text: str, link_txt: str, style: str = ARCHIVE_LINK_COLOR) -> Text:
107
- return link_text_obj(search_coffeezilla_url(text), link_txt or text, style)
105
+ def other_site_type() -> SiteType:
106
+ return TEXT_MESSAGE if args.all_emails else EMAIL
107
+
108
+
109
+ def other_site_url() -> str:
110
+ return SITE_URLS[other_site_type()]
108
111
 
109
112
 
110
113
  CRYPTADAMUS_TWITTER = link_markup('https://x.com/cryptadamist', '@cryptadamist')