sirchmunk 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. sirchmunk/__init__.py +8 -0
  2. sirchmunk/base.py +17 -0
  3. sirchmunk/insight/__init__.py +4 -0
  4. sirchmunk/insight/text_insights.py +292 -0
  5. sirchmunk/learnings/__init__.py +1 -0
  6. sirchmunk/learnings/evidence_processor.py +525 -0
  7. sirchmunk/learnings/knowledge_base.py +232 -0
  8. sirchmunk/llm/__init__.py +2 -0
  9. sirchmunk/llm/openai_chat.py +247 -0
  10. sirchmunk/llm/prompts.py +216 -0
  11. sirchmunk/retrieve/__init__.py +1 -0
  12. sirchmunk/retrieve/base.py +25 -0
  13. sirchmunk/retrieve/text_retriever.py +1026 -0
  14. sirchmunk/scan/__init__.py +1 -0
  15. sirchmunk/scan/base.py +18 -0
  16. sirchmunk/scan/file_scanner.py +373 -0
  17. sirchmunk/scan/web_scanner.py +18 -0
  18. sirchmunk/scheduler/__init__.py +0 -0
  19. sirchmunk/schema/__init__.py +2 -0
  20. sirchmunk/schema/cognition.py +106 -0
  21. sirchmunk/schema/context.py +25 -0
  22. sirchmunk/schema/knowledge.py +318 -0
  23. sirchmunk/schema/metadata.py +658 -0
  24. sirchmunk/schema/request.py +221 -0
  25. sirchmunk/schema/response.py +20 -0
  26. sirchmunk/schema/snapshot.py +346 -0
  27. sirchmunk/search.py +475 -0
  28. sirchmunk/storage/__init__.py +7 -0
  29. sirchmunk/storage/duckdb.py +676 -0
  30. sirchmunk/storage/knowledge_manager.py +720 -0
  31. sirchmunk/utils/__init__.py +15 -0
  32. sirchmunk/utils/constants.py +15 -0
  33. sirchmunk/utils/deps.py +23 -0
  34. sirchmunk/utils/file_utils.py +70 -0
  35. sirchmunk/utils/install_rga.py +124 -0
  36. sirchmunk/utils/log_utils.py +360 -0
  37. sirchmunk/utils/tokenizer_util.py +55 -0
  38. sirchmunk/utils/utils.py +108 -0
  39. sirchmunk/version.py +1 -1
  40. sirchmunk-0.0.1.dist-info/METADATA +416 -0
  41. sirchmunk-0.0.1.dist-info/RECORD +45 -0
  42. {sirchmunk-0.0.0.dist-info → sirchmunk-0.0.1.dist-info}/WHEEL +1 -1
  43. sirchmunk-0.0.0.dist-info/METADATA +0 -26
  44. sirchmunk-0.0.0.dist-info/RECORD +0 -8
  45. {sirchmunk-0.0.0.dist-info → sirchmunk-0.0.1.dist-info}/entry_points.txt +0 -0
  46. {sirchmunk-0.0.0.dist-info → sirchmunk-0.0.1.dist-info}/licenses/LICENSE +0 -0
  47. {sirchmunk-0.0.0.dist-info → sirchmunk-0.0.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1 @@
1
+ # Copyright (c) ModelScope Contributors. All rights reserved.
sirchmunk/scan/base.py ADDED
@@ -0,0 +1,18 @@
1
+ # Copyright (c) ModelScope Contributors. All rights reserved.
2
+ from abc import ABC, abstractmethod
3
+ from typing import Any
4
+
5
+
6
+ class BaseScanner(ABC):
7
+ """Abstract base class for scanners."""
8
+
9
+ def __init__(self, *args, **kwargs): ...
10
+
11
+ @abstractmethod
12
+ def scan(self, *args, **kwargs) -> Any:
13
+ """Perform a scan operation.
14
+
15
+ Returns:
16
+ Any: The result of the scan operation.
17
+ """
18
+ raise NotImplementedError("Subclasses must implement this method.")
@@ -0,0 +1,373 @@
1
+ # Copyright (c) ModelScope Contributors. All rights reserved.
2
+ import json
3
+ import os
4
+ import random
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ from datetime import datetime
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Optional, Set, Union
9
+
10
+ from loguru import logger
11
+ from tqdm.auto import tqdm
12
+
13
+ from sirchmunk.llm.openai_chat import OpenAIChat
14
+ from sirchmunk.scan.base import BaseScanner
15
+ from sirchmunk.schema.metadata import FileInfo, build_file_schema
16
+ from sirchmunk.utils.file_utils import StorageStructure
17
+
18
+ METADATA_NAME = ".metadata"
19
+
20
+
21
+ class FileScanner(BaseScanner):
22
+ """High-performance file metadata scanner with incremental batch processing.
23
+
24
+ Scans files under specified corpus paths, generates metadata using schema builders,
25
+ skips unchanged files, and persists results incrementally in shuffled batches.
26
+ Utilizes thread pooling for concurrency and efficient change detection.
27
+
28
+ Attributes:
29
+ corpus_paths (List[Path]): Paths to scan (directories or individual files).
30
+ work_path (Path): Base directory for operations (defaults to current working directory).
31
+ metadata_path (Path): Directory to store metadata JSON files (under work_path).
32
+ max_workers (int): Maximum thread pool workers for concurrent scanning.
33
+ batch_size (int): Number of files processed before saving metadata (default: 1000).
34
+ _base_metadata_cache (Dict[Path, Tuple[int, datetime]]): Cache of existing file stats
35
+ for change detection (size in bytes and last modification time).
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ corpus_path: Union[str, Path, List[str], List[Path]],
41
+ llm: Optional[OpenAIChat] = None,
42
+ work_path: Union[str, Path, None] = None,
43
+ max_workers: Optional[int] = None,
44
+ batch_size: int = 1000,
45
+ verbose: bool = False,
46
+ ):
47
+ """Initialize the file scanner.
48
+
49
+ Args:
50
+ corpus_path: Single or multiple paths to scan (files or directories).
51
+ work_path: Base directory for metadata storage. Defaults to current directory.
52
+ max_workers: Maximum threads for concurrent scanning. Defaults to
53
+ min(32, CPU_COUNT * 2) if unset.
54
+ batch_size: Number of files to process before saving metadata. Defaults to 1000.
55
+ """
56
+ # Normalize corpus paths to a list of Path objects
57
+ if isinstance(corpus_path, (str, Path)):
58
+ corpus_path = [corpus_path]
59
+ self.corpus_paths: List[Path] = [Path(p).resolve() for p in corpus_path]
60
+
61
+ # Set work and metadata paths
62
+ self.work_path: Path = (
63
+ Path.cwd() if work_path is None else Path(work_path).resolve()
64
+ )
65
+ self.metadata_path: Path = (
66
+ self.work_path / StorageStructure.CACHE_DIR / StorageStructure.METADATA_DIR
67
+ )
68
+
69
+ # Configure thread pool size
70
+ cpu_count = os.cpu_count() or 1
71
+ self.max_workers: int = max_workers or min(32, cpu_count * 2)
72
+ self.batch_size: int = batch_size
73
+
74
+ # Cache for existing file stats to detect changes
75
+ self._base_metadata_cache: Dict[str, Any] = {}
76
+ self._base_metadata_cache_paths: Set[str] = (
77
+ set()
78
+ ) # `/path/to/xxx.ext@ISOtimestamp`
79
+
80
+ # Ensure metadata directory exists
81
+ self.metadata_path.mkdir(parents=True, exist_ok=True)
82
+
83
+ self.verbose = verbose
84
+
85
+ self.llm = llm
86
+
87
+ super().__init__()
88
+
89
+ def scan(
90
+ self,
91
+ max_workers: Optional[int] = None,
92
+ batch_size: Optional[int] = None,
93
+ shuffle: bool = True,
94
+ tqdm_desc: str = "Scanning files",
95
+ ) -> List[Any]:
96
+ """Scan files and generate metadata in shuffled batches with incremental saving and progress tracking.
97
+
98
+ Skips files unchanged since last scan (based on abs path and modification time).
99
+ Uses thread pooling for concurrent processing within batches. Saves metadata
100
+ after each batch completes to ensure progress persistence.
101
+
102
+ Args:
103
+ max_workers: Override default thread count for this scan.
104
+ batch_size: Override default batch size. Must be >= 1.
105
+ shuffle: Whether to shuffle files before batching (improves I/O distribution).
106
+ tqdm_desc: Description prefix for the progress bar.
107
+ llm: The OpenAI api format client.
108
+
109
+ Returns:
110
+ List of metadata objects (FileInfo or subclasses) for all scanned files.
111
+ """
112
+ effective_batch_size = batch_size or self.batch_size
113
+ if effective_batch_size < 1:
114
+ raise ValueError("Batch size must be at least 1")
115
+
116
+ # Load existing metadata stats for change detection
117
+ self._load_base_metadata_cache()
118
+
119
+ # Collect all file paths to consider (skip directories and symlinks)
120
+ all_files: List[Path] = []
121
+ for path in self.corpus_paths:
122
+ if path.is_dir():
123
+ all_files.extend(
124
+ p for p in path.rglob("*") if p.is_file() and not p.is_symlink()
125
+ )
126
+ elif path.is_file():
127
+ all_files.append(path.resolve())
128
+
129
+ # Quick filter
130
+ files_to_scan = [f for f in all_files if not self._should_exclude(f)]
131
+
132
+ total_files = len(files_to_scan)
133
+ total_skipped = len(all_files) - total_files
134
+
135
+ # Shuffle files to distribute I/O load evenly across batches
136
+ if shuffle:
137
+ random.shuffle(files_to_scan)
138
+
139
+ logger.info(
140
+ f"Scanning {total_files} of {len(all_files)} files "
141
+ f"(skipped {total_skipped} unchanged) in "
142
+ f"{(total_files // effective_batch_size) + (1 if total_files % effective_batch_size else 0)} batches"
143
+ )
144
+
145
+ if total_files == 0:
146
+ logger.info("No new or modified files to scan.")
147
+ return []
148
+
149
+ # Prepare batches
150
+ batches = [
151
+ files_to_scan[i : i + effective_batch_size]
152
+ for i in range(0, len(files_to_scan), effective_batch_size)
153
+ ]
154
+ total_batches = len(batches)
155
+
156
+ # Initialize progress bar
157
+ pbar = tqdm(
158
+ total=total_files,
159
+ desc=tqdm_desc,
160
+ unit="file",
161
+ dynamic_ncols=True,
162
+ bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} files [{elapsed}<{remaining}, {rate_fmt}{postfix}]",
163
+ )
164
+
165
+ total_results = []
166
+ total_success = 0
167
+ total_failed = 0
168
+
169
+ try:
170
+ for batch_idx, batch in enumerate(batches, 1):
171
+ batch_size_actual = len(batch)
172
+ # Update progress bar description for current batch
173
+ pbar.set_postfix_str(
174
+ f"Batch {batch_idx}/{total_batches}, "
175
+ f"Success: {total_success}, Failed: {total_failed}"
176
+ )
177
+
178
+ batch_results = self._process_batch(batch, max_workers, self.llm)
179
+ success_in_batch = len(batch_results)
180
+ failed_in_batch = batch_size_actual - success_in_batch
181
+
182
+ # Increment counters
183
+ total_success += success_in_batch
184
+ total_failed += failed_in_batch
185
+
186
+ # Save and aggregate
187
+ self.save(batch_results)
188
+ total_results.extend(batch_results)
189
+
190
+ # Update progress bar by actual processed count (not just batch size)
191
+ pbar.update(batch_size_actual)
192
+
193
+ # Update postfix with fresh stats
194
+ success_rate = (
195
+ (total_success / (total_success + total_failed)) * 100
196
+ if (total_success + total_failed) > 0
197
+ else 0.0
198
+ )
199
+ pbar.set_postfix_str(
200
+ f"B{batch_idx}/{total_batches}, "
201
+ f"✓{total_success} ✗{total_failed} "
202
+ f"({success_rate:.1f}% ok)"
203
+ )
204
+
205
+ pbar.set_postfix_str(f"✓{total_success} ✗{total_failed} (done)")
206
+ finally:
207
+ pbar.close()
208
+
209
+ logger.info(
210
+ f"Scan completed: {total_success} succeeded, {total_failed} failed, "
211
+ f"{total_skipped} skipped (unchanged)."
212
+ )
213
+ return total_results
214
+
215
+ def save(self, metadata_list: List[FileInfo]) -> None:
216
+ """Persist metadata objects incrementally to disk.
217
+
218
+ Saves each metadata object as a separate JSON file in metadata_path.
219
+ Filenames are SHA-256 hashes of absolute file paths to avoid collisions.
220
+ Only new/updated files are written; existing unchanged files are untouched.
221
+
222
+ Args:
223
+ metadata_list: List of metadata objects (FileInfo or subclasses) to save.
224
+ """
225
+ for metadata in metadata_list:
226
+ try:
227
+ # Convert metadata object to JSON-serializable dictionary
228
+ data = self._serialize_metadata(metadata)
229
+ if not metadata.cache_key:
230
+ continue
231
+ output_file = self.metadata_path / f"{metadata.cache_key}.json"
232
+
233
+ # Write JSON file
234
+ with open(output_file, "w", encoding="utf-8") as f:
235
+ json.dump(data, f, indent=2, ensure_ascii=False)
236
+ if self.verbose:
237
+ logger.debug(
238
+ f"Saved metadata for {metadata.file_or_url} to {output_file}"
239
+ )
240
+ except Exception as e:
241
+ logger.error(f"Failed to save metadata for {metadata.file_or_url}: {e}")
242
+
243
+ def load(self) -> Dict[Path, Any]:
244
+ """Load all existing metadata from disk into a path-indexed dictionary.
245
+
246
+ Reads all JSON files in metadata_path, deserializes them, and maps by file path.
247
+ Intended for external use (e.g., reporting); not used internally for scanning.
248
+
249
+ Returns:
250
+ Dictionary mapping file paths to their metadata objects.
251
+ """
252
+ metadata_map = {}
253
+ for meta_file in self.metadata_path.glob("*.json"):
254
+ try:
255
+ with open(meta_file, "r", encoding="utf-8") as f:
256
+ data = json.load(f)
257
+ # Deserialize JSON to metadata object (basic reconstruction)
258
+ path = Path(data["path"])
259
+ metadata_map[path] = data
260
+ except Exception as e:
261
+ logger.warning(f"Error loading metadata file {meta_file}: {e}")
262
+ return metadata_map
263
+
264
+ def _load_base_metadata_cache(self) -> None:
265
+ """Populate internal cache with size and mtime of existing metadata files.
266
+
267
+ Enables efficient change detection during scanning by avoiding full metadata loads.
268
+ Cache format: {file_path: (size_bytes, last_modified_datetime)}
269
+ """
270
+ self._base_metadata_cache.clear()
271
+ self._base_metadata_cache_paths.clear()
272
+ for meta_file in self.metadata_path.glob("*.json"):
273
+ try:
274
+ with open(meta_file, "r", encoding="utf-8") as f:
275
+ file_info: Dict[str, Any] = json.load(f)
276
+
277
+ file_info: FileInfo = FileInfo.from_dict(file_info)
278
+ self._base_metadata_cache[file_info.cache_key] = file_info
279
+ self._base_metadata_cache_paths.add(
280
+ FileInfo.get_path_mtime(
281
+ file_info.file_or_url, file_info.last_modified
282
+ )
283
+ )
284
+ except (KeyError, ValueError, TypeError, OSError) as e:
285
+ if self.verbose:
286
+ logger.warning(f"Invalid metadata in {meta_file}: {e}")
287
+
288
+ def _process_batch(
289
+ self,
290
+ file_batch: List[Path],
291
+ max_workers: Optional[int] = None,
292
+ llm: Optional[OpenAIChat] = None,
293
+ ) -> List[Any]:
294
+ """Process a batch of files concurrently and return successful metadata objects.
295
+
296
+ Args:
297
+ file_batch: List of file paths to process in this batch.
298
+ max_workers: Override default thread count for this batch.
299
+
300
+ Returns:
301
+ List of metadata objects (FileInfo or subclasses) for successfully processed files.
302
+ """
303
+ results = []
304
+ workers = max_workers or self.max_workers
305
+ with ThreadPoolExecutor(max_workers=workers) as executor:
306
+ futures = {
307
+ executor.submit(self._process_file, file_path, llm): file_path
308
+ for file_path in file_batch
309
+ }
310
+ for future in as_completed(futures):
311
+ file_path = futures[future]
312
+ try:
313
+ metadata = future.result(timeout=60) # 60-second timeout per file
314
+ if metadata is not None:
315
+ results.append(metadata)
316
+ except Exception as e:
317
+ logger.error(f"Failed to process {file_path} in batch: {e}")
318
+ return results
319
+
320
+ def _process_file(
321
+ self,
322
+ file_path: Path,
323
+ llm: Optional[OpenAIChat] = None,
324
+ ) -> Optional[Any]:
325
+ """Process a single file to generate its metadata schema.
326
+
327
+ Wraps build_file_schema with error handling and logging.
328
+
329
+ Args:
330
+ file_path: Path to the file to process.
331
+
332
+ Returns:
333
+ Metadata object (FileInfo or subclass) if successful; None on failure.
334
+ """
335
+ try:
336
+ if self.verbose:
337
+ logger.debug(f"Processing file: {file_path}")
338
+ return build_file_schema(
339
+ path=file_path,
340
+ llm=llm,
341
+ )
342
+ except Exception as e:
343
+ logger.error(f"Schema build failed for {file_path}: {e}")
344
+ return None
345
+
346
+ def _serialize_metadata(self, metadata: FileInfo) -> Dict[str, Any]:
347
+ """Convert a metadata object to a JSON-serializable dictionary.
348
+
349
+ Handles Path and datetime conversions, and recursively processes dataclasses.
350
+
351
+ Args:
352
+ metadata: Metadata object (instance of FileInfo or subclass).
353
+
354
+ Returns:
355
+ Dictionary ready for JSON serialization.
356
+ """
357
+ return metadata.to_dict()
358
+
359
+ def _should_exclude(self, f: Path) -> bool:
360
+ """
361
+ Quick check if the file should be excluded from scanning based on existing cache.
362
+ key: `/path/to/xxx.ext@ISOtimestamp`
363
+
364
+ Args:
365
+ f: Path object of the file to check.
366
+
367
+ Returns:
368
+ bool: True if the file should be excluded (unchanged), False otherwise.
369
+ """
370
+ stat = f.stat()
371
+ mtime: datetime = datetime.fromtimestamp(stat.st_mtime)
372
+
373
+ return FileInfo.get_path_mtime(f, mtime) in self._base_metadata_cache_paths
@@ -0,0 +1,18 @@
1
+ # Copyright (c) ModelScope Contributors. All rights reserved.
2
+ from typing import List, Union
3
+
4
+ from sirchmunk.scan.base import BaseScanner
5
+
6
+
7
+ class WebScanner(BaseScanner):
8
+ """Scanner for web-based resources."""
9
+
10
+ def __init__(self, *args, **kwargs):
11
+ super().__init__(*args, **kwargs)
12
+
13
+ def scan(self, url_or_path: Union[str, List[str]]):
14
+ """
15
+ Scan a web resource given its URL.
16
+ TODO: Implement actual web scanning logic.
17
+ """
18
+ print(f"Scanning web resource at: {url_or_path}")
File without changes
@@ -0,0 +1,2 @@
1
+ # Copyright (c) ModelScope Contributors. All rights reserved.
2
+ # Learnings as a Living Knowledge Forest
@@ -0,0 +1,106 @@
1
+ # Copyright (c) ModelScope Contributors. All rights reserved.
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ from typing import Any, Dict, Optional
5
+
6
+ """
7
+ Cognition Layer: Cognitive Graph based on Knowledge Clusters, enabling rich semantic relationships, cognitive navigation, and self-evolution.
8
+ """
9
+
10
+
11
+ class RichEdgeType(Enum):
12
+ """
13
+ Types of semantic edges connecting knowledge clusters in the Cognitive Graph, Directed edge.
14
+ """
15
+
16
+ PATHWAY = (
17
+ "pathway" # Directed edge suggesting a procedural route to another cluster
18
+ )
19
+ BARRIER = "barrier" # Conditional edge representing a constraint or risk
20
+ ANALOGY = "analogy" # Undirected edge indicating a conceptual similarity
21
+ SHORTCUT = "shortcut" # Directed edge representing an expedited path under certain conditions
22
+ RESOLUTION = "resolution" # Directed edge indicating a solution to a conflict or contradiction
23
+
24
+
25
+ @dataclass
26
+ class RichSemanticEdge:
27
+ """
28
+ A rich, executable semantic relationship from current cluster to another.
29
+
30
+ Mirrors the edge structure used in Cognition Layer, enabling direct promotion.
31
+
32
+ Usage:
33
+ semantic_edges: Dict[str, List[RichSemanticEdge]]
34
+
35
+ Examples:
36
+ {
37
+ "pathway": [
38
+ RichSemanticEdge(
39
+ target_cluster_id="C1002", # Quantization
40
+ edge_type="pathway",
41
+ score=0.95,
42
+ meta={
43
+ "steps": ["apply_4bit_quantization", "calibrate_activations"],
44
+ "required_context": {"task": "finetune"}
45
+ }
46
+ ),
47
+ RichSemanticEdge(
48
+ target_cluster_id="C1008", # Low-Rank Update
49
+ edge_type="pathway",
50
+ score=0.9,
51
+ meta={
52
+ "steps": ["decompose_delta_w", "train_A_and_B"],
53
+ "required_context": {}
54
+ }
55
+ )
56
+ ],
57
+ "barrier": [
58
+ RichSemanticEdge(
59
+ target_cluster_id="C1005", # Token Pruning
60
+ edge_type="barrier",
61
+ score=0.8,
62
+ meta={
63
+ "condition": "quant_bits < 4",
64
+ "severity": "high",
65
+ "description": "Ultra-low-bit quantization amplifies pruning noise"
66
+ }
67
+ )
68
+ ],
69
+ "analogy": [
70
+ RichSemanticEdge(
71
+ target_cluster_id="C2010", # Sparse RL Policy Update
72
+ edge_type="analogy",
73
+ score=0.85,
74
+ meta={
75
+ "source_role": "low_rank_delta_in_lm",
76
+ "target_role": "sparse_delta_in_policy",
77
+ "mapping_rules": [
78
+ "both_constrain_update_space",
79
+ "freeze_backbone_parameters",
80
+ "small_trainable_adapter"
81
+ ]
82
+ }
83
+ )
84
+ ],
85
+ "shortcut": [
86
+ RichSemanticEdge(
87
+ target_cluster_id="C1006", # Mobile Deployment
88
+ edge_type="shortcut",
89
+ score=0.9,
90
+ meta={
91
+ "trigger_pattern": r".*mobile.*qlora.*",
92
+ "bypass_steps": 2,
93
+ "source": "user_query_log"
94
+ }
95
+ )
96
+ ]
97
+ }
98
+ """
99
+
100
+ target_cluster_id: str # ID of the destination cluster (e.g., "C1002")
101
+ edge_type: RichEdgeType # Type of the edge (see RichEdgeType class)
102
+ meta: Dict[str, Any] # Edge-specific payload (see examples below)
103
+ created_at: Optional[str] = None # ISO 8601 timestamp; optional for immutability
104
+ score: Optional[float] = (
105
+ None # Confidence or relevance score, normalized [0.0, 1.0] TODO: can be learned dynamically
106
+ )
@@ -0,0 +1,25 @@
1
+ # Copyright (c) ModelScope Contributors. All rights reserved.
2
+ from dataclasses import dataclass
3
+ from typing import List
4
+
5
+ from .knowledge import KnowledgeCluster
6
+ from .request import Request
7
+ from .response import Response
8
+
9
+
10
+ @dataclass
11
+ class Context:
12
+ """
13
+ Represents the context of an agentic search operation.
14
+ """
15
+
16
+ # The request made by the user
17
+ request: Request
18
+
19
+ # The response generated by the agentic search operation
20
+ response: Response = None
21
+
22
+ # A list of knowledge clusters related to the search operation
23
+ related_knowledge: List[KnowledgeCluster] = None
24
+
25
+ def __post_init__(self): ...