lattifai 0.4.6__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. lattifai/__init__.py +42 -27
  2. lattifai/alignment/__init__.py +6 -0
  3. lattifai/alignment/lattice1_aligner.py +119 -0
  4. lattifai/{workers/lattice1_alpha.py → alignment/lattice1_worker.py} +33 -132
  5. lattifai/{tokenizer → alignment}/phonemizer.py +1 -1
  6. lattifai/alignment/segmenter.py +166 -0
  7. lattifai/{tokenizer → alignment}/tokenizer.py +186 -112
  8. lattifai/audio2.py +211 -0
  9. lattifai/caption/__init__.py +20 -0
  10. lattifai/caption/caption.py +1275 -0
  11. lattifai/{io → caption}/supervision.py +1 -0
  12. lattifai/{io → caption}/text_parser.py +53 -10
  13. lattifai/cli/__init__.py +17 -0
  14. lattifai/cli/alignment.py +153 -0
  15. lattifai/cli/caption.py +204 -0
  16. lattifai/cli/server.py +19 -0
  17. lattifai/cli/transcribe.py +197 -0
  18. lattifai/cli/youtube.py +128 -0
  19. lattifai/client.py +455 -246
  20. lattifai/config/__init__.py +20 -0
  21. lattifai/config/alignment.py +73 -0
  22. lattifai/config/caption.py +178 -0
  23. lattifai/config/client.py +46 -0
  24. lattifai/config/diarization.py +67 -0
  25. lattifai/config/media.py +335 -0
  26. lattifai/config/transcription.py +84 -0
  27. lattifai/diarization/__init__.py +5 -0
  28. lattifai/diarization/lattifai.py +89 -0
  29. lattifai/errors.py +41 -34
  30. lattifai/logging.py +116 -0
  31. lattifai/mixin.py +552 -0
  32. lattifai/server/app.py +420 -0
  33. lattifai/transcription/__init__.py +76 -0
  34. lattifai/transcription/base.py +108 -0
  35. lattifai/transcription/gemini.py +219 -0
  36. lattifai/transcription/lattifai.py +103 -0
  37. lattifai/types.py +30 -0
  38. lattifai/utils.py +3 -31
  39. lattifai/workflow/__init__.py +22 -0
  40. lattifai/workflow/agents.py +6 -0
  41. lattifai/{workflows → workflow}/file_manager.py +81 -57
  42. lattifai/workflow/youtube.py +564 -0
  43. lattifai-1.0.0.dist-info/METADATA +736 -0
  44. lattifai-1.0.0.dist-info/RECORD +52 -0
  45. {lattifai-0.4.6.dist-info → lattifai-1.0.0.dist-info}/WHEEL +1 -1
  46. lattifai-1.0.0.dist-info/entry_points.txt +13 -0
  47. lattifai/base_client.py +0 -126
  48. lattifai/bin/__init__.py +0 -3
  49. lattifai/bin/agent.py +0 -324
  50. lattifai/bin/align.py +0 -295
  51. lattifai/bin/cli_base.py +0 -25
  52. lattifai/bin/subtitle.py +0 -210
  53. lattifai/io/__init__.py +0 -43
  54. lattifai/io/reader.py +0 -86
  55. lattifai/io/utils.py +0 -15
  56. lattifai/io/writer.py +0 -102
  57. lattifai/tokenizer/__init__.py +0 -3
  58. lattifai/workers/__init__.py +0 -3
  59. lattifai/workflows/__init__.py +0 -34
  60. lattifai/workflows/agents.py +0 -12
  61. lattifai/workflows/gemini.py +0 -167
  62. lattifai/workflows/prompts/README.md +0 -22
  63. lattifai/workflows/prompts/gemini/README.md +0 -24
  64. lattifai/workflows/prompts/gemini/transcription_gem.txt +0 -81
  65. lattifai/workflows/youtube.py +0 -931
  66. lattifai-0.4.6.dist-info/METADATA +0 -806
  67. lattifai-0.4.6.dist-info/RECORD +0 -39
  68. lattifai-0.4.6.dist-info/entry_points.txt +0 -3
  69. /lattifai/{io → caption}/gemini_reader.py +0 -0
  70. /lattifai/{io → caption}/gemini_writer.py +0 -0
  71. /lattifai/{workflows → transcription}/prompts/__init__.py +0 -0
  72. /lattifai/{workflows → workflow}/base.py +0 -0
  73. {lattifai-0.4.6.dist-info → lattifai-1.0.0.dist-info}/licenses/LICENSE +0 -0
  74. {lattifai-0.4.6.dist-info → lattifai-1.0.0.dist-info}/top_level.txt +0 -0
@@ -24,6 +24,7 @@ class Supervision(SupervisionSegment):
24
24
  """
25
25
 
26
26
  text: Optional[str] = None
27
+ speaker: Optional[str] = None
27
28
  id: str = ""
28
29
  recording_id: str = ""
29
30
  start: Seconds = 0.0
@@ -2,6 +2,10 @@ import logging
2
2
  import re
3
3
  from typing import Optional, Tuple
4
4
 
5
+ # Timestamp pattern: [start-end] text
6
+ # Example: [1.23-4.56] Hello world
7
+ TIMESTAMP_PATTERN = re.compile(r"^\[([\d.]+)-([\d.]+)\]\s*(.*)$")
8
+
5
9
  # 来自于字幕中常见的说话人标记格式
6
10
  SPEAKER_PATTERN = re.compile(r"((?:>>|>>|>|>).*?[::])\s*(.*)")
7
11
 
@@ -16,8 +20,19 @@ SPEAKER_LATTIFAI = re.compile(r"(^\[SPEAKER_.*?\][::])\s*(.*)")
16
20
  SPEAKER_PATTERN2 = re.compile(r"^([A-Z]{1,15}(?:\s+[A-Z]{1,15})?[::])\s*(.*)$")
17
21
 
18
22
 
19
- def normalize_html_text(text: str) -> str:
20
- """Normalize HTML text by decoding entities and stripping whitespace."""
23
+ def normalize_text(text: str) -> str:
24
+ """Normalize caption text by:
25
+ - Decoding common HTML entities
26
+ - Removing HTML tags (e.g., <i>, <font>, <b>, <br>)
27
+ - Collapsing multiple whitespace into a single space
28
+ - Converting curly apostrophes to straight ones in common contractions
29
+ """
30
+ if not text:
31
+ return ""
32
+
33
+ # # Remove HTML tags first (replace with space to avoid concatenation)
34
+ # text = re.sub(r"<[^>]+>", " ", text)
35
+
21
36
  html_entities = {
22
37
  "&amp;": "&",
23
38
  "&lt;": "<",
@@ -26,20 +41,18 @@ def normalize_html_text(text: str) -> str:
26
41
  "&#39;": "'",
27
42
  "&nbsp;": " ",
28
43
  "\\N": " ",
29
- "…": " ",
44
+ "…": " ", # replace ellipsis with space to avoid merging words
30
45
  }
31
46
  for entity, char in html_entities.items():
32
47
  text = text.replace(entity, char)
33
48
 
34
- text = re.sub(r"\s+", " ", text) # Replace multiple spaces with a single space
35
-
36
49
  # Convert curly apostrophes to straight apostrophes for common English contractions
37
- # Handles: 't 's 'll 're 've 'd 'm
38
- # For example, convert "don't" to "don't"
39
50
  text = re.sub(r"([a-zA-Z])’([tsdm]|ll|re|ve)\b", r"\1'\2", text, flags=re.IGNORECASE)
40
- # For example, convert "5’s" to "5's"
41
51
  text = re.sub(r"([0-9])’([s])\b", r"\1'\2", text, flags=re.IGNORECASE)
42
52
 
53
+ # Collapse whitespace (after replacements)
54
+ text = re.sub(r"\s+", " ", text)
55
+
43
56
  return text.strip()
44
57
 
45
58
 
@@ -70,6 +83,36 @@ def parse_speaker_text(line) -> Tuple[Optional[str], str]:
70
83
  return None, line
71
84
 
72
85
 
86
+ def parse_timestamp_text(line: str) -> Tuple[Optional[float], Optional[float], str]:
87
+ """
88
+ Parse a line of text to extract timestamp and content.
89
+
90
+ Format: [start-end] text
91
+ Example: [1.23-4.56] Hello world
92
+
93
+ Args:
94
+ line: Input line to parse
95
+
96
+ Returns:
97
+ Tuple of (start_time, end_time, text)
98
+ - start_time: Start timestamp in seconds, or None if not found
99
+ - end_time: End timestamp in seconds, or None if not found
100
+ - text: The text content after the timestamp
101
+ """
102
+ match = TIMESTAMP_PATTERN.match(line)
103
+ if match:
104
+ try:
105
+ start = float(match.group(1))
106
+ end = float(match.group(2))
107
+ text = match.group(3).strip()
108
+ return start, end, text
109
+ except ValueError:
110
+ # If conversion fails, treat as plain text
111
+ return None, None, line
112
+
113
+ return None, None, line
114
+
115
+
73
116
  if __name__ == "__main__":
74
117
  pattern = re.compile(r">>\s*(.*?)\s*[::]\s*(.*)")
75
118
  pattern = re.compile(r"(>>.*?[::])\s*(.*)")
@@ -85,8 +128,8 @@ if __name__ == "__main__":
85
128
  match = pattern.match(text)
86
129
  if match:
87
130
  print(f"Input: '{text}'")
88
- print(f" Key: '{match.group(1)}'")
89
- print(f" Value: '{match.group(2)}'")
131
+ print(f"Speaker: '{match.group(1)}'")
132
+ print(f"Content: '{match.group(2)}'")
90
133
  print("-------------")
91
134
 
92
135
  # pattern2
@@ -0,0 +1,17 @@
1
+ """CLI module for LattifAI with nemo_run entry points."""
2
+
3
+ import nemo_run as run # noqa: F401
4
+
5
+ # Import and re-export entrypoints at package level so NeMo Run can find them
6
+ from lattifai.cli.alignment import align
7
+ from lattifai.cli.caption import convert
8
+ from lattifai.cli.transcribe import transcribe, transcribe_align
9
+ from lattifai.cli.youtube import youtube
10
+
11
+ __all__ = [
12
+ "align",
13
+ "convert",
14
+ "transcribe",
15
+ "transcribe_align",
16
+ "youtube",
17
+ ]
@@ -0,0 +1,153 @@
1
+ """Alignment CLI entry point with nemo_run."""
2
+
3
+ from typing import Optional
4
+
5
+ import nemo_run as run
6
+ from lhotse.utils import Pathlike
7
+ from typing_extensions import Annotated
8
+
9
+ from lattifai.client import LattifAI
10
+ from lattifai.config import (
11
+ AlignmentConfig,
12
+ CaptionConfig,
13
+ ClientConfig,
14
+ DiarizationConfig,
15
+ MediaConfig,
16
+ TranscriptionConfig,
17
+ )
18
+
19
+ __all__ = ["align"]
20
+
21
+
22
+ @run.cli.entrypoint(name="align", namespace="alignment")
23
+ def align(
24
+ input_media: Optional[str] = None,
25
+ input_caption: Optional[str] = None,
26
+ output_caption: Optional[str] = None,
27
+ media: Annotated[Optional[MediaConfig], run.Config[MediaConfig]] = None,
28
+ caption: Annotated[Optional[CaptionConfig], run.Config[CaptionConfig]] = None,
29
+ client: Annotated[Optional[ClientConfig], run.Config[ClientConfig]] = None,
30
+ alignment: Annotated[Optional[AlignmentConfig], run.Config[AlignmentConfig]] = None,
31
+ transcription: Annotated[Optional[TranscriptionConfig], run.Config[TranscriptionConfig]] = None,
32
+ diarization: Annotated[Optional[DiarizationConfig], run.Config[DiarizationConfig]] = None,
33
+ ):
34
+ """
35
+ Align audio/video with caption file.
36
+
37
+ This command performs forced alignment between audio/video media and caption text,
38
+ generating accurate timestamps for each caption segment and optionally word-level
39
+ timestamps. The alignment engine uses advanced speech recognition models to ensure
40
+ precise synchronization between audio and text.
41
+
42
+ Shortcut: invoking ``lai-align`` is equivalent to running ``lai alignment align``.
43
+
44
+ Args:
45
+ media: Media configuration for audio/video input and output handling.
46
+ Fields: input_path, media_format, sample_rate, channels, output_dir,
47
+ output_path, output_format, prefer_audio, default_audio_format,
48
+ default_video_format, force_overwrite
49
+ client: API client configuration.
50
+ Fields: api_key, timeout, max_retries, default_headers
51
+ alignment: Alignment configuration (model selection and inference settings).
52
+ Fields: model_name, device, batch_size
53
+ caption: Caption I/O configuration (file reading/writing and formatting).
54
+ Fields: input_format, input_path, output_format, output_path,
55
+ normalize_text, split_sentence, word_level,
56
+ include_speaker_in_text, encoding
57
+
58
+ Examples:
59
+ # Basic usage with positional arguments
60
+ lai alignment align audio.wav caption.srt output.srt
61
+
62
+ # Mixing positional and keyword arguments
63
+ lai alignment align audio.mp4 caption.srt output.json \\
64
+ alignment.device=cuda \\
65
+ caption.word_level=true
66
+
67
+ # Smart sentence splitting with custom output format
68
+ lai alignment align audio.wav caption.srt output.vtt \\
69
+ caption.split_sentence=true
70
+
71
+ # Using keyword arguments (traditional syntax)
72
+ lai alignment align \\
73
+ input_media=audio.wav \\
74
+ input_caption=caption.srt \\
75
+ output_caption=output.srt
76
+
77
+ # Full configuration with nested config objects
78
+ lai alignment align audio.wav caption.srt aligned.json \\
79
+ media.output_dir=/tmp/output \\
80
+ caption.split_sentence=true \\
81
+ caption.word_level=true \\
82
+ caption.normalize_text=true \\
83
+ alignment.device=mps \\
84
+ alignment.model_name=Lattifai/Lattice-1-Alpha
85
+ """
86
+ media_config = media or MediaConfig()
87
+
88
+ # Validate that input_media and media_config.input_path are not both provided
89
+ if input_media and media_config.input_path:
90
+ raise ValueError(
91
+ "Cannot specify both positional input_media and media.input_path. "
92
+ "Use either positional argument or config, not both."
93
+ )
94
+
95
+ # Assign input_media to media_config.input_path if provided
96
+ if input_media:
97
+ media_config.set_input_path(input_media)
98
+
99
+ if not media_config.input_path:
100
+ raise ValueError("Input media path must be specified via positional argument input_media= or media.input_path=")
101
+
102
+ caption_config = caption or CaptionConfig()
103
+
104
+ # Validate that output_caption_path and caption_config.output_path are not both provided
105
+ if output_caption and caption_config.output_path:
106
+ raise ValueError(
107
+ "Cannot specify both positional output_caption and caption.output_path. "
108
+ "Use either positional argument or config, not both."
109
+ )
110
+
111
+ # Assign paths to caption_config if provided
112
+ if input_caption:
113
+ caption_config.set_input_path(input_caption)
114
+
115
+ if output_caption:
116
+ caption_config.set_output_path(output_caption)
117
+
118
+ client = LattifAI(
119
+ client_config=client,
120
+ alignment_config=alignment,
121
+ caption_config=caption_config,
122
+ transcription_config=transcription,
123
+ diarization_config=diarization,
124
+ )
125
+
126
+ is_url = media_config.input_path.startswith(("http://", "https://"))
127
+ if is_url:
128
+ # Call the client's youtube method
129
+ return client.youtube(
130
+ url=media_config.input_path,
131
+ output_dir=media_config.output_dir,
132
+ output_caption_path=caption_config.output_path,
133
+ media_format=media_config.normalize_format() if media_config.output_format else None,
134
+ force_overwrite=media_config.force_overwrite,
135
+ split_sentence=caption_config.split_sentence,
136
+ channel_selector=media_config.channel_selector,
137
+ )
138
+
139
+ return client.alignment(
140
+ input_media=media_config.input_path,
141
+ input_caption=caption_config.input_path,
142
+ output_caption_path=caption_config.output_path,
143
+ split_sentence=caption_config.split_sentence,
144
+ channel_selector=media_config.channel_selector,
145
+ )
146
+
147
+
148
+ def main():
149
+ run.cli.main(align)
150
+
151
+
152
+ if __name__ == "__main__":
153
+ main()
@@ -0,0 +1,204 @@
1
+ """Caption CLI entry point with nemo_run."""
2
+
3
+ from typing import Optional
4
+
5
+ import nemo_run as run
6
+ from lhotse.utils import Pathlike
7
+ from typing_extensions import Annotated
8
+
9
+ from lattifai.config import CaptionConfig
10
+
11
+
12
+ @run.cli.entrypoint(name="convert", namespace="caption")
13
+ def convert(
14
+ input_path: Pathlike,
15
+ output_path: Pathlike,
16
+ include_speaker_in_text: bool = True,
17
+ normalize_text: bool = False,
18
+ ):
19
+ """
20
+ Convert caption file to another format.
21
+
22
+ This command reads a caption file from one format and writes it to another format,
23
+ preserving all timing information, text content, and speaker labels (if present).
24
+ Supports common caption formats including SRT, VTT, JSON, and Praat TextGrid.
25
+
26
+ Shortcut: invoking ``laisub-convert`` is equivalent to running ``lai caption convert``.
27
+
28
+ Args:
29
+ input_path: Path to input caption file (supports SRT, VTT, JSON, TextGrid formats)
30
+ output_path: Path to output caption file (format determined by file extension)
31
+ include_speaker_in_text: Preserve speaker labels in caption text content.
32
+ normalize_text: Whether to normalize caption text during conversion.
33
+ This applies text cleaning such as removing HTML tags, decoding entities,
34
+ collapsing whitespace, and standardizing punctuation.
35
+
36
+ Examples:
37
+ # Basic format conversion (positional arguments)
38
+ lai caption convert input.srt output.vtt
39
+
40
+ # Convert with text normalization
41
+ lai caption convert input.srt output.json normalize_text=true
42
+
43
+ # Mixing positional and keyword arguments
44
+ lai caption convert input.srt output.vtt \\
45
+ include_speaker_in_text=false \\
46
+ normalize_text=true
47
+
48
+ # Using keyword arguments (traditional syntax)
49
+ lai caption convert \\
50
+ input_path=input.srt \\
51
+ output_path=output.TextGrid
52
+ """
53
+ from lattifai.caption import Caption
54
+
55
+ caption = Caption.read(input_path, normalize_text=normalize_text)
56
+ caption.write(output_path, include_speaker_in_text=include_speaker_in_text)
57
+
58
+ print(f"✅ Converted {input_path} -> {output_path}")
59
+ return output_path
60
+
61
+
62
+ @run.cli.entrypoint(name="normalize", namespace="caption")
63
+ def normalize(
64
+ input_path: Pathlike,
65
+ output_path: Pathlike,
66
+ caption: Annotated[Optional[CaptionConfig], run.Config[CaptionConfig]] = None,
67
+ ):
68
+ """
69
+ Normalize caption text by cleaning HTML entities and whitespace.
70
+
71
+ This command reads a caption file and normalizes all text content by applying
72
+ the following transformations:
73
+ - Decode common HTML entities (&amp;, &lt;, &gt;, &quot;, &#39;, &nbsp;)
74
+ - Remove HTML tags (e.g., <i>, <font>, <b>, <br>)
75
+ - Collapse multiple whitespace characters into single spaces
76
+ - Convert curly apostrophes to straight ones in contractions
77
+ - Strip leading and trailing whitespace from each segment
78
+
79
+ Shortcut: invoking ``laisub-normalize`` is equivalent to running ``lai caption normalize``.
80
+
81
+ Args:
82
+ input_path: Path to input caption file to normalize
83
+ output_path: Path to output caption file (defaults to overwriting input file)
84
+ caption: Caption configuration for text normalization.
85
+ Fields: input_format, output_format, normalize_text (automatically enabled),
86
+ encoding
87
+
88
+ Examples:
89
+ # Normalize and save to new file (positional arguments)
90
+ lai caption normalize input.srt output.srt
91
+
92
+ # Normalize with format conversion
93
+ lai caption normalize input.vtt output.srt
94
+
95
+ # Normalize with custom caption config
96
+ lai caption normalize input.srt output.srt \\
97
+ caption.encoding=utf-8
98
+
99
+ # Using keyword arguments (traditional syntax)
100
+ lai caption normalize \\
101
+ input_path=input.srt \\
102
+ output_path=output.srt
103
+ """
104
+ from pathlib import Path
105
+
106
+ from lattifai.caption import Caption
107
+
108
+ input_path = Path(input_path).expanduser()
109
+ output_path = Path(output_path).expanduser()
110
+
111
+ caption_obj = Caption.read(input_path, normalize_text=True)
112
+ caption_obj.write(output_path, include_speaker_in_text=True)
113
+
114
+ if output_path == input_path:
115
+ print(f"✅ Normalized {input_path} (in-place)")
116
+ else:
117
+ print(f"✅ Normalized {input_path} -> {output_path}")
118
+
119
+ return output_path
120
+
121
+
122
+ @run.cli.entrypoint(name="shift", namespace="caption")
123
+ def shift(
124
+ input_path: Pathlike,
125
+ output_path: Pathlike,
126
+ seconds: float,
127
+ caption: Annotated[Optional[CaptionConfig], run.Config[CaptionConfig]] = None,
128
+ ):
129
+ """
130
+ Shift caption timestamps by a specified number of seconds.
131
+
132
+ This command reads a caption file and adjusts all timestamps by adding or
133
+ subtracting a specified offset. Use positive values to delay captions and
134
+ negative values to make them appear earlier.
135
+
136
+ Shortcut: invoking ``laisub-shift`` is equivalent to running ``lai caption shift``.
137
+
138
+ Args:
139
+ input_path: Path to input caption file
140
+ output_path: Path to output caption file (can be same as input for in-place modification)
141
+ seconds: Number of seconds to shift timestamps. Positive values delay captions,
142
+ negative values advance them earlier.
143
+ caption: Caption configuration for reading/writing.
144
+ Fields: input_format, output_format, encoding
145
+
146
+ Examples:
147
+ # Delay captions by 2 seconds (positional arguments)
148
+ lai caption shift input.srt output.srt 2.0
149
+
150
+ # Make captions appear 1.5 seconds earlier
151
+ lai caption shift input.srt output.srt -1.5
152
+
153
+ # Shift and convert format
154
+ lai caption shift input.vtt output.srt seconds=0.5
155
+
156
+ # Using keyword arguments (traditional syntax)
157
+ lai caption shift \\
158
+ input_path=input.srt \\
159
+ output_path=output.srt \\
160
+ seconds=3.0
161
+ """
162
+ from pathlib import Path
163
+
164
+ from lattifai.caption import Caption
165
+
166
+ input_path = Path(input_path).expanduser()
167
+ output_path = Path(output_path).expanduser()
168
+
169
+ # Read captions
170
+ caption_obj = Caption.read(input_path)
171
+
172
+ # Shift timestamps
173
+ shifted_caption = caption_obj.shift_time(seconds)
174
+
175
+ # Write shifted captions
176
+ shifted_caption.write(output_path, include_speaker_in_text=True)
177
+
178
+ if seconds >= 0:
179
+ direction = f"delayed by {seconds}s"
180
+ else:
181
+ direction = f"advanced by {abs(seconds)}s"
182
+
183
+ if output_path == input_path:
184
+ print(f"✅ Shifted timestamps {direction} in {input_path} (in-place)")
185
+ else:
186
+ print(f"✅ Shifted timestamps {direction}: {input_path} -> {output_path}")
187
+
188
+ return output_path
189
+
190
+
191
+ def main_convert():
192
+ run.cli.main(convert)
193
+
194
+
195
+ def main_normalize():
196
+ run.cli.main(normalize)
197
+
198
+
199
+ def main_shift():
200
+ run.cli.main(shift)
201
+
202
+
203
+ if __name__ == "__main__":
204
+ main_convert()
lattifai/cli/server.py ADDED
@@ -0,0 +1,19 @@
1
+ import os
2
+
3
+ import colorful
4
+ import uvicorn
5
+
6
+
7
+ def main():
8
+ """Launch the LattifAI Web Interface."""
9
+ print(colorful.bold_green("🚀 Launching LattifAI Web Interface..."))
10
+ print(colorful.cyan("See http://localhost:8001"))
11
+
12
+ # Ensure the directory contains the app
13
+ # We might need to adjust python path or just rely on installed package
14
+
15
+ uvicorn.run("lattifai.server.app:app", host="0.0.0.0", port=8001, reload=True, log_level="info")
16
+
17
+
18
+ if __name__ == "__main__":
19
+ main()