attune-ai 2.1.5__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. attune/cli/__init__.py +3 -59
  2. attune/cli/commands/batch.py +4 -12
  3. attune/cli/commands/cache.py +7 -15
  4. attune/cli/commands/provider.py +17 -0
  5. attune/cli/commands/routing.py +3 -1
  6. attune/cli/commands/setup.py +122 -0
  7. attune/cli/commands/tier.py +1 -3
  8. attune/cli/commands/workflow.py +31 -0
  9. attune/cli/parsers/cache.py +1 -0
  10. attune/cli/parsers/help.py +1 -3
  11. attune/cli/parsers/provider.py +7 -0
  12. attune/cli/parsers/routing.py +1 -3
  13. attune/cli/parsers/setup.py +7 -0
  14. attune/cli/parsers/status.py +1 -3
  15. attune/cli/parsers/tier.py +1 -3
  16. attune/cli_minimal.py +9 -3
  17. attune/cli_router.py +9 -7
  18. attune/cli_unified.py +3 -0
  19. attune/dashboard/app.py +3 -1
  20. attune/dashboard/simple_server.py +3 -1
  21. attune/dashboard/standalone_server.py +7 -3
  22. attune/mcp/server.py +54 -102
  23. attune/memory/long_term.py +0 -2
  24. attune/memory/short_term/__init__.py +84 -0
  25. attune/memory/short_term/base.py +467 -0
  26. attune/memory/short_term/batch.py +219 -0
  27. attune/memory/short_term/caching.py +227 -0
  28. attune/memory/short_term/conflicts.py +265 -0
  29. attune/memory/short_term/cross_session.py +122 -0
  30. attune/memory/short_term/facade.py +655 -0
  31. attune/memory/short_term/pagination.py +215 -0
  32. attune/memory/short_term/patterns.py +271 -0
  33. attune/memory/short_term/pubsub.py +286 -0
  34. attune/memory/short_term/queues.py +244 -0
  35. attune/memory/short_term/security.py +300 -0
  36. attune/memory/short_term/sessions.py +250 -0
  37. attune/memory/short_term/streams.py +249 -0
  38. attune/memory/short_term/timelines.py +234 -0
  39. attune/memory/short_term/transactions.py +186 -0
  40. attune/memory/short_term/working.py +252 -0
  41. attune/meta_workflows/cli_commands/__init__.py +3 -0
  42. attune/meta_workflows/cli_commands/agent_commands.py +0 -4
  43. attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
  44. attune/meta_workflows/cli_commands/config_commands.py +0 -5
  45. attune/meta_workflows/cli_commands/memory_commands.py +0 -5
  46. attune/meta_workflows/cli_commands/template_commands.py +0 -5
  47. attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
  48. attune/models/adaptive_routing.py +4 -8
  49. attune/models/auth_cli.py +3 -9
  50. attune/models/auth_strategy.py +2 -4
  51. attune/models/telemetry/analytics.py +0 -2
  52. attune/models/telemetry/backend.py +0 -3
  53. attune/models/telemetry/storage.py +0 -2
  54. attune/orchestration/_strategies/__init__.py +156 -0
  55. attune/orchestration/_strategies/base.py +231 -0
  56. attune/orchestration/_strategies/conditional_strategies.py +373 -0
  57. attune/orchestration/_strategies/conditions.py +369 -0
  58. attune/orchestration/_strategies/core_strategies.py +491 -0
  59. attune/orchestration/_strategies/data_classes.py +64 -0
  60. attune/orchestration/_strategies/nesting.py +233 -0
  61. attune/orchestration/execution_strategies.py +58 -1567
  62. attune/orchestration/meta_orchestrator.py +1 -3
  63. attune/project_index/scanner.py +1 -3
  64. attune/project_index/scanner_parallel.py +7 -5
  65. attune/socratic_router.py +1 -3
  66. attune/telemetry/agent_coordination.py +9 -3
  67. attune/telemetry/agent_tracking.py +16 -3
  68. attune/telemetry/approval_gates.py +22 -5
  69. attune/telemetry/cli.py +1 -3
  70. attune/telemetry/commands/dashboard_commands.py +24 -8
  71. attune/telemetry/event_streaming.py +8 -2
  72. attune/telemetry/feedback_loop.py +10 -2
  73. attune/tools.py +1 -0
  74. attune/workflow_commands.py +1 -3
  75. attune/workflows/__init__.py +53 -10
  76. attune/workflows/autonomous_test_gen.py +158 -102
  77. attune/workflows/base.py +48 -672
  78. attune/workflows/batch_processing.py +1 -3
  79. attune/workflows/compat.py +156 -0
  80. attune/workflows/cost_mixin.py +141 -0
  81. attune/workflows/data_classes.py +92 -0
  82. attune/workflows/document_gen/workflow.py +11 -14
  83. attune/workflows/history.py +62 -37
  84. attune/workflows/llm_base.py +1 -3
  85. attune/workflows/migration.py +422 -0
  86. attune/workflows/output.py +2 -7
  87. attune/workflows/parsing_mixin.py +427 -0
  88. attune/workflows/perf_audit.py +3 -1
  89. attune/workflows/progress.py +9 -11
  90. attune/workflows/release_prep.py +5 -1
  91. attune/workflows/routing.py +0 -2
  92. attune/workflows/secure_release.py +2 -1
  93. attune/workflows/security_audit.py +19 -14
  94. attune/workflows/security_audit_phase3.py +28 -22
  95. attune/workflows/seo_optimization.py +27 -27
  96. attune/workflows/test_gen/test_templates.py +1 -4
  97. attune/workflows/test_gen/workflow.py +0 -2
  98. attune/workflows/test_gen_behavioral.py +6 -19
  99. attune/workflows/test_gen_parallel.py +6 -4
  100. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
  101. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/RECORD +116 -91
  102. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
  103. attune_healthcare/monitors/monitoring/__init__.py +9 -9
  104. attune_llm/agent_factory/__init__.py +6 -6
  105. attune_llm/commands/__init__.py +10 -10
  106. attune_llm/commands/models.py +3 -3
  107. attune_llm/config/__init__.py +8 -8
  108. attune_llm/learning/__init__.py +3 -3
  109. attune_llm/learning/extractor.py +5 -3
  110. attune_llm/learning/storage.py +5 -3
  111. attune_llm/security/__init__.py +17 -17
  112. attune_llm/utils/tokens.py +3 -1
  113. attune/cli_legacy.py +0 -3978
  114. attune/memory/short_term.py +0 -2192
  115. attune/workflows/manage_docs.py +0 -87
  116. attune/workflows/test5.py +0 -125
  117. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
  118. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
  119. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  120. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,215 @@
1
+ """SCAN-based pagination for large key sets.
2
+
3
+ This module provides cursor-based pagination using Redis SCAN:
4
+ - Paginated pattern listing
5
+ - Generic key scanning with filters
6
+
7
+ Benefits:
8
+ - Memory-efficient for large datasets
9
+ - Non-blocking (unlike KEYS command)
10
+ - Cursor-based for consistent iteration
11
+
12
+ Classes:
13
+ Pagination: SCAN-based pagination operations
14
+
15
+ Example:
16
+ >>> from attune.memory.short_term.pagination import Pagination
17
+ >>> from attune.memory.types import AgentCredentials, AccessTier
18
+ >>> pagination = Pagination(base_ops)
19
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
20
+ >>> result = pagination.list_staged_patterns_paginated(creds, "0", 10)
21
+ >>> for pattern in result.items:
22
+ ... print(pattern.name)
23
+
24
+ Copyright 2025 Smart-AI-Memory
25
+ Licensed under Fair Source License 0.9
26
+ """
27
+
28
+ from __future__ import annotations
29
+
30
+ import json
31
+ import time
32
+ from datetime import datetime
33
+ from typing import TYPE_CHECKING
34
+
35
+ import structlog
36
+
37
+ from attune.memory.types import (
38
+ AgentCredentials,
39
+ PaginatedResult,
40
+ StagedPattern,
41
+ )
42
+
43
+ if TYPE_CHECKING:
44
+ from attune.memory.short_term.base import BaseOperations
45
+
46
+ logger = structlog.get_logger(__name__)
47
+
48
+
49
+ class Pagination:
50
+ """SCAN-based pagination operations.
51
+
52
+ Provides memory-efficient pagination using Redis SCAN command
53
+ instead of the blocking KEYS command. Suitable for large datasets.
54
+
55
+ The class is designed to be composed with BaseOperations
56
+ for dependency injection.
57
+
58
+ Attributes:
59
+ PREFIX_STAGED: Key prefix for staged patterns namespace
60
+
61
+ Example:
62
+ >>> pagination = Pagination(base_ops)
63
+ >>> result = pagination.list_staged_patterns_paginated(creds, "0", 10)
64
+ >>> while result.has_more:
65
+ ... for pattern in result.items:
66
+ ... process(pattern)
67
+ ... result = pagination.list_staged_patterns_paginated(creds, result.cursor, 10)
68
+ """
69
+
70
+ PREFIX_STAGED = "empathy:staged:"
71
+
72
+ def __init__(self, base: BaseOperations) -> None:
73
+ """Initialize pagination operations.
74
+
75
+ Args:
76
+ base: BaseOperations instance for storage access
77
+ """
78
+ self._base = base
79
+
80
+ def list_staged_patterns_paginated(
81
+ self,
82
+ credentials: AgentCredentials,
83
+ cursor: str = "0",
84
+ count: int = 100,
85
+ ) -> PaginatedResult:
86
+ """List staged patterns with pagination using SCAN.
87
+
88
+ More efficient than list_staged_patterns() for large datasets.
89
+
90
+ Args:
91
+ credentials: Agent credentials
92
+ cursor: Pagination cursor (start with "0")
93
+ count: Maximum items per page
94
+
95
+ Returns:
96
+ PaginatedResult with items, cursor, and has_more flag
97
+
98
+ Example:
99
+ >>> result = pagination.list_staged_patterns_paginated(creds, "0", 10)
100
+ >>> for pattern in result.items:
101
+ ... print(pattern.name)
102
+ >>> if result.has_more:
103
+ ... next_result = pagination.list_staged_patterns_paginated(
104
+ ... creds, result.cursor, 10
105
+ ... )
106
+ """
107
+ start_time = time.perf_counter()
108
+ pattern = f"{self.PREFIX_STAGED}*"
109
+
110
+ # Handle mock storage mode
111
+ if self._base.use_mock:
112
+ import fnmatch
113
+
114
+ all_keys = [
115
+ k for k in self._base._mock_storage.keys() if fnmatch.fnmatch(k, pattern)
116
+ ]
117
+ start_idx = int(cursor)
118
+ end_idx = start_idx + count
119
+ page_keys = all_keys[start_idx:end_idx]
120
+
121
+ patterns = []
122
+ for key in page_keys:
123
+ raw_value, expires = self._base._mock_storage[key]
124
+ if expires is None or datetime.now().timestamp() < expires:
125
+ patterns.append(StagedPattern.from_dict(json.loads(str(raw_value))))
126
+
127
+ new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
128
+ has_more = end_idx < len(all_keys)
129
+
130
+ latency_ms = (time.perf_counter() - start_time) * 1000
131
+ self._base._metrics.record_operation("list_paginated", latency_ms)
132
+
133
+ return PaginatedResult(
134
+ items=patterns,
135
+ cursor=new_cursor,
136
+ has_more=has_more,
137
+ total_scanned=len(page_keys),
138
+ )
139
+
140
+ # Handle real Redis client
141
+ if self._base._client is None:
142
+ return PaginatedResult(items=[], cursor="0", has_more=False)
143
+
144
+ # Use SCAN for efficient iteration
145
+ new_cursor, keys = self._base._client.scan(
146
+ cursor=int(cursor), match=pattern, count=count
147
+ )
148
+
149
+ patterns = []
150
+ for key in keys:
151
+ raw = self._base._client.get(key)
152
+ if raw:
153
+ patterns.append(StagedPattern.from_dict(json.loads(raw)))
154
+
155
+ has_more = new_cursor != 0
156
+
157
+ latency_ms = (time.perf_counter() - start_time) * 1000
158
+ self._base._metrics.record_operation("list_paginated", latency_ms)
159
+
160
+ return PaginatedResult(
161
+ items=patterns,
162
+ cursor=str(new_cursor),
163
+ has_more=has_more,
164
+ total_scanned=len(keys),
165
+ )
166
+
167
+ def scan_keys(
168
+ self,
169
+ pattern: str,
170
+ cursor: str = "0",
171
+ count: int = 100,
172
+ ) -> PaginatedResult:
173
+ """Scan keys matching a pattern with pagination.
174
+
175
+ Generic key scanning that can be used for any key namespace.
176
+
177
+ Args:
178
+ pattern: Key pattern (e.g., "empathy:working:*")
179
+ cursor: Pagination cursor
180
+ count: Items per page
181
+
182
+ Returns:
183
+ PaginatedResult with key strings
184
+
185
+ Example:
186
+ >>> result = pagination.scan_keys("empathy:session:*", "0", 50)
187
+ >>> for key in result.items:
188
+ ... print(key)
189
+ """
190
+ # Handle mock storage mode
191
+ if self._base.use_mock:
192
+ import fnmatch
193
+
194
+ all_keys = [
195
+ k for k in self._base._mock_storage.keys() if fnmatch.fnmatch(k, pattern)
196
+ ]
197
+ start_idx = int(cursor)
198
+ end_idx = start_idx + count
199
+ page_keys = all_keys[start_idx:end_idx]
200
+ new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
201
+ has_more = end_idx < len(all_keys)
202
+ return PaginatedResult(items=page_keys, cursor=new_cursor, has_more=has_more)
203
+
204
+ # Handle real Redis client
205
+ if self._base._client is None:
206
+ return PaginatedResult(items=[], cursor="0", has_more=False)
207
+
208
+ new_cursor, keys = self._base._client.scan(
209
+ cursor=int(cursor), match=pattern, count=count
210
+ )
211
+ return PaginatedResult(
212
+ items=[str(k) for k in keys],
213
+ cursor=str(new_cursor),
214
+ has_more=new_cursor != 0,
215
+ )
@@ -0,0 +1,271 @@
1
+ """Pattern staging workflow - stage, validate, promote/reject.
2
+
3
+ This module provides the pattern staging lifecycle:
4
+ - Stage: Store patterns for validation (CONTRIBUTOR+)
5
+ - Get/List: Retrieve staged patterns (any tier)
6
+ - Promote: Move pattern to active library (VALIDATOR+)
7
+ - Reject: Remove pattern from staging (VALIDATOR+)
8
+
9
+ Key Prefix: PREFIX_STAGED = "empathy:staged:"
10
+
11
+ Classes:
12
+ PatternStaging: Pattern staging lifecycle operations
13
+
14
+ Example:
15
+ >>> from attune.memory.short_term.patterns import PatternStaging
16
+ >>> from attune.memory.types import AgentCredentials, AccessTier, StagedPattern
17
+ >>> staging = PatternStaging(base_ops)
18
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
19
+ >>> pattern = StagedPattern(pattern_id="p1", name="Test", ...)
20
+ >>> staging.stage_pattern(pattern, creds)
21
+ >>> staged = staging.list_staged_patterns(creds)
22
+
23
+ Copyright 2025 Smart-AI-Memory
24
+ Licensed under Fair Source License 0.9
25
+ """
26
+
27
+ from __future__ import annotations
28
+
29
+ import json
30
+ from typing import TYPE_CHECKING
31
+
32
+ import structlog
33
+
34
+ from attune.memory.types import (
35
+ AgentCredentials,
36
+ StagedPattern,
37
+ TTLStrategy,
38
+ )
39
+
40
+ if TYPE_CHECKING:
41
+ from attune.memory.short_term.base import BaseOperations
42
+
43
+ logger = structlog.get_logger(__name__)
44
+
45
+
46
+ class PatternStaging:
47
+ """Pattern staging lifecycle operations.
48
+
49
+ Implements the pattern validation workflow per EMPATHY_PHILOSOPHY.md:
50
+ - Patterns must be staged before being promoted to active library
51
+ - CONTRIBUTOR tier can stage patterns
52
+ - VALIDATOR tier can promote or reject patterns
53
+
54
+ The class is designed to be composed with BaseOperations
55
+ for dependency injection.
56
+
57
+ Attributes:
58
+ PREFIX_STAGED: Key prefix for staged patterns namespace
59
+
60
+ Example:
61
+ >>> staging = PatternStaging(base_ops)
62
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
63
+ >>> staging.stage_pattern(pattern, creds)
64
+ True
65
+ >>> staging.list_staged_patterns(creds)
66
+ [StagedPattern(...)]
67
+ """
68
+
69
+ PREFIX_STAGED = "empathy:staged:"
70
+
71
+ def __init__(self, base: BaseOperations) -> None:
72
+ """Initialize pattern staging operations.
73
+
74
+ Args:
75
+ base: BaseOperations instance for storage access
76
+ """
77
+ self._base = base
78
+
79
+ def stage_pattern(
80
+ self,
81
+ pattern: StagedPattern,
82
+ credentials: AgentCredentials,
83
+ ) -> bool:
84
+ """Stage a pattern for validation.
85
+
86
+ Per EMPATHY_PHILOSOPHY.md: Patterns must be staged before
87
+ being promoted to the active library.
88
+
89
+ Args:
90
+ pattern: Pattern to stage
91
+ credentials: Must be CONTRIBUTOR or higher
92
+
93
+ Returns:
94
+ True if staged successfully
95
+
96
+ Raises:
97
+ TypeError: If pattern is not StagedPattern
98
+ PermissionError: If credentials lack staging access
99
+
100
+ Example:
101
+ >>> pattern = StagedPattern(pattern_id="p1", name="Test", ...)
102
+ >>> staging.stage_pattern(pattern, creds)
103
+ True
104
+ """
105
+ # Pattern 5: Type validation
106
+ if not isinstance(pattern, StagedPattern):
107
+ raise TypeError(f"pattern must be StagedPattern, got {type(pattern).__name__}")
108
+
109
+ if not credentials.can_stage():
110
+ raise PermissionError(
111
+ f"Agent {credentials.agent_id} cannot stage patterns. "
112
+ "Requires CONTRIBUTOR tier or higher.",
113
+ )
114
+
115
+ key = f"{self.PREFIX_STAGED}{pattern.pattern_id}"
116
+ return self._base._set(
117
+ key,
118
+ json.dumps(pattern.to_dict()),
119
+ TTLStrategy.STAGED_PATTERNS.value,
120
+ )
121
+
122
+ def get_staged_pattern(
123
+ self,
124
+ pattern_id: str,
125
+ credentials: AgentCredentials,
126
+ ) -> StagedPattern | None:
127
+ """Retrieve a staged pattern.
128
+
129
+ Args:
130
+ pattern_id: Pattern ID
131
+ credentials: Any tier can read
132
+
133
+ Returns:
134
+ StagedPattern or None if not found
135
+
136
+ Raises:
137
+ ValueError: If pattern_id is empty
138
+
139
+ Example:
140
+ >>> pattern = staging.get_staged_pattern("p1", creds)
141
+ >>> if pattern:
142
+ ... print(f"Found: {pattern.name}")
143
+ """
144
+ # Pattern 1: String ID validation
145
+ if not pattern_id or not pattern_id.strip():
146
+ raise ValueError(f"pattern_id cannot be empty. Got: {pattern_id!r}")
147
+
148
+ key = f"{self.PREFIX_STAGED}{pattern_id}"
149
+ raw = self._base._get(key)
150
+
151
+ if raw is None:
152
+ return None
153
+
154
+ return StagedPattern.from_dict(json.loads(raw))
155
+
156
+ def list_staged_patterns(
157
+ self,
158
+ credentials: AgentCredentials,
159
+ ) -> list[StagedPattern]:
160
+ """List all staged patterns awaiting validation.
161
+
162
+ Args:
163
+ credentials: Any tier can read
164
+
165
+ Returns:
166
+ List of staged patterns
167
+
168
+ Example:
169
+ >>> patterns = staging.list_staged_patterns(creds)
170
+ >>> for p in patterns:
171
+ ... print(f"{p.pattern_id}: {p.name}")
172
+ """
173
+ pattern = f"{self.PREFIX_STAGED}*"
174
+ keys = self._base._keys(pattern)
175
+ patterns = []
176
+
177
+ for key in keys:
178
+ raw = self._base._get(key)
179
+ if raw:
180
+ patterns.append(StagedPattern.from_dict(json.loads(raw)))
181
+
182
+ return patterns
183
+
184
+ def promote_pattern(
185
+ self,
186
+ pattern_id: str,
187
+ credentials: AgentCredentials,
188
+ ) -> StagedPattern | None:
189
+ """Promote staged pattern (remove from staging for library add).
190
+
191
+ Args:
192
+ pattern_id: Pattern to promote
193
+ credentials: Must be VALIDATOR or higher
194
+
195
+ Returns:
196
+ The promoted pattern (for adding to PatternLibrary)
197
+
198
+ Raises:
199
+ PermissionError: If credentials lack validation access
200
+
201
+ Example:
202
+ >>> pattern = staging.promote_pattern("p1", validator_creds)
203
+ >>> if pattern:
204
+ ... pattern_library.add(pattern)
205
+ """
206
+ if not credentials.can_validate():
207
+ raise PermissionError(
208
+ f"Agent {credentials.agent_id} cannot promote patterns. "
209
+ "Requires VALIDATOR tier or higher.",
210
+ )
211
+
212
+ pattern = self.get_staged_pattern(pattern_id, credentials)
213
+ if pattern:
214
+ key = f"{self.PREFIX_STAGED}{pattern_id}"
215
+ self._base._delete(key)
216
+ return pattern
217
+
218
+ def reject_pattern(
219
+ self,
220
+ pattern_id: str,
221
+ credentials: AgentCredentials,
222
+ reason: str = "",
223
+ ) -> bool:
224
+ """Reject a staged pattern.
225
+
226
+ Args:
227
+ pattern_id: Pattern to reject
228
+ credentials: Must be VALIDATOR or higher
229
+ reason: Rejection reason (for audit)
230
+
231
+ Returns:
232
+ True if rejected
233
+
234
+ Raises:
235
+ PermissionError: If credentials lack validation access
236
+
237
+ Example:
238
+ >>> staging.reject_pattern("p1", validator_creds, "Not applicable")
239
+ True
240
+ """
241
+ if not credentials.can_validate():
242
+ raise PermissionError(
243
+ f"Agent {credentials.agent_id} cannot reject patterns. "
244
+ "Requires VALIDATOR tier or higher.",
245
+ )
246
+
247
+ key = f"{self.PREFIX_STAGED}{pattern_id}"
248
+ deleted = self._base._delete(key)
249
+
250
+ if deleted and reason:
251
+ logger.info(
252
+ "pattern_rejected",
253
+ pattern_id=pattern_id,
254
+ agent_id=credentials.agent_id,
255
+ reason=reason,
256
+ )
257
+
258
+ return deleted
259
+
260
+ def count_staged(self) -> int:
261
+ """Count the number of staged patterns.
262
+
263
+ Returns:
264
+ Number of staged patterns
265
+
266
+ Example:
267
+ >>> count = staging.count_staged()
268
+ >>> print(f"{count} patterns awaiting validation")
269
+ """
270
+ pattern = f"{self.PREFIX_STAGED}*"
271
+ return len(self._base._keys(pattern))