lucidicai 2.0.2__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +367 -899
- lucidicai/api/__init__.py +1 -0
- lucidicai/api/client.py +218 -0
- lucidicai/api/resources/__init__.py +1 -0
- lucidicai/api/resources/dataset.py +192 -0
- lucidicai/api/resources/event.py +88 -0
- lucidicai/api/resources/session.py +126 -0
- lucidicai/core/__init__.py +1 -0
- lucidicai/core/config.py +223 -0
- lucidicai/core/errors.py +60 -0
- lucidicai/core/types.py +35 -0
- lucidicai/sdk/__init__.py +1 -0
- lucidicai/sdk/context.py +231 -0
- lucidicai/sdk/decorators.py +187 -0
- lucidicai/sdk/error_boundary.py +299 -0
- lucidicai/sdk/event.py +126 -0
- lucidicai/sdk/event_builder.py +304 -0
- lucidicai/sdk/features/__init__.py +1 -0
- lucidicai/sdk/features/dataset.py +605 -0
- lucidicai/sdk/features/feature_flag.py +383 -0
- lucidicai/sdk/init.py +361 -0
- lucidicai/sdk/shutdown_manager.py +302 -0
- lucidicai/telemetry/context_bridge.py +82 -0
- lucidicai/telemetry/context_capture_processor.py +25 -9
- lucidicai/telemetry/litellm_bridge.py +20 -24
- lucidicai/telemetry/lucidic_exporter.py +99 -60
- lucidicai/telemetry/openai_patch.py +295 -0
- lucidicai/telemetry/openai_uninstrument.py +87 -0
- lucidicai/telemetry/telemetry_init.py +16 -1
- lucidicai/telemetry/utils/model_pricing.py +278 -0
- lucidicai/utils/__init__.py +1 -0
- lucidicai/utils/images.py +337 -0
- lucidicai/utils/logger.py +168 -0
- lucidicai/utils/queue.py +393 -0
- {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/METADATA +1 -1
- {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/RECORD +38 -9
- {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/WHEEL +0 -0
- {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
"""OpenAI responses.parse instrumentation patch.
|
|
2
|
+
|
|
3
|
+
This module provides instrumentation for OpenAI's responses.parse API
|
|
4
|
+
which is not covered by the standard opentelemetry-instrumentation-openai package.
|
|
5
|
+
"""
|
|
6
|
+
import functools
|
|
7
|
+
import logging
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any, Callable, Optional
|
|
10
|
+
|
|
11
|
+
from opentelemetry import trace
|
|
12
|
+
from opentelemetry.trace import Status, StatusCode, SpanKind
|
|
13
|
+
|
|
14
|
+
from ..sdk.context import current_session_id, current_parent_event_id
|
|
15
|
+
from ..utils.logger import debug, verbose, warning
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger("Lucidic")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class OpenAIResponsesPatcher:
|
|
21
|
+
"""Patches OpenAI client to instrument responses.parse method."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, tracer_provider=None):
|
|
24
|
+
"""Initialize the patcher.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
tracer_provider: OpenTelemetry TracerProvider to use
|
|
28
|
+
"""
|
|
29
|
+
self._tracer_provider = tracer_provider or trace.get_tracer_provider()
|
|
30
|
+
self._tracer = self._tracer_provider.get_tracer(__name__)
|
|
31
|
+
self._is_patched = False
|
|
32
|
+
self._original_parse = None
|
|
33
|
+
self._client_refs = [] # Keep track of patched clients for cleanup
|
|
34
|
+
|
|
35
|
+
def patch(self):
|
|
36
|
+
"""Apply the patch to OpenAI client initialization."""
|
|
37
|
+
if self._is_patched:
|
|
38
|
+
debug("[OpenAI Patch] responses.parse already patched")
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
import openai
|
|
43
|
+
from openai import OpenAI
|
|
44
|
+
|
|
45
|
+
# Store the original __init__
|
|
46
|
+
original_init = OpenAI.__init__
|
|
47
|
+
|
|
48
|
+
@functools.wraps(original_init)
|
|
49
|
+
def patched_init(client_self, *args, **kwargs):
|
|
50
|
+
# Call original initialization
|
|
51
|
+
original_init(client_self, *args, **kwargs)
|
|
52
|
+
|
|
53
|
+
# Patch the responses.parse method on this specific instance
|
|
54
|
+
if hasattr(client_self, 'resources') and hasattr(client_self.resources, 'responses'):
|
|
55
|
+
responses = client_self.resources.responses
|
|
56
|
+
if hasattr(responses, 'parse'):
|
|
57
|
+
# Store original and apply wrapper
|
|
58
|
+
original_parse = responses.parse
|
|
59
|
+
responses.parse = self._create_parse_wrapper(original_parse)
|
|
60
|
+
|
|
61
|
+
# Track this client for cleanup
|
|
62
|
+
self._client_refs.append((responses, original_parse))
|
|
63
|
+
|
|
64
|
+
verbose("[OpenAI Patch] Patched responses.parse on client instance")
|
|
65
|
+
|
|
66
|
+
# Also patch the direct access if available
|
|
67
|
+
if hasattr(client_self, 'responses') and hasattr(client_self.responses, 'parse'):
|
|
68
|
+
original_parse = client_self.responses.parse
|
|
69
|
+
client_self.responses.parse = self._create_parse_wrapper(original_parse)
|
|
70
|
+
self._client_refs.append((client_self.responses, original_parse))
|
|
71
|
+
verbose("[OpenAI Patch] Patched client.responses.parse")
|
|
72
|
+
|
|
73
|
+
# Replace the __init__ method
|
|
74
|
+
OpenAI.__init__ = patched_init
|
|
75
|
+
self._original_init = original_init
|
|
76
|
+
self._is_patched = True
|
|
77
|
+
|
|
78
|
+
logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses.parse")
|
|
79
|
+
|
|
80
|
+
except ImportError:
|
|
81
|
+
logger.warning("[OpenAI Patch] OpenAI library not installed, skipping patch")
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.error(f"[OpenAI Patch] Failed to patch responses.parse: {e}")
|
|
84
|
+
|
|
85
|
+
def _create_parse_wrapper(self, original_method: Callable) -> Callable:
|
|
86
|
+
"""Create a wrapper for the responses.parse method.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
original_method: The original parse method to wrap
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Wrapped method with instrumentation
|
|
93
|
+
"""
|
|
94
|
+
@functools.wraps(original_method)
|
|
95
|
+
def wrapper(**kwargs):
|
|
96
|
+
# Create span for tracing
|
|
97
|
+
with self._tracer.start_as_current_span(
|
|
98
|
+
"openai.responses.parse",
|
|
99
|
+
kind=SpanKind.CLIENT
|
|
100
|
+
) as span:
|
|
101
|
+
start_time = time.time()
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
# Extract request parameters
|
|
105
|
+
model = kwargs.get('model', 'unknown')
|
|
106
|
+
temperature = kwargs.get('temperature', 1.0)
|
|
107
|
+
input_param = kwargs.get('input', [])
|
|
108
|
+
text_format = kwargs.get('text_format')
|
|
109
|
+
instructions = kwargs.get('instructions')
|
|
110
|
+
|
|
111
|
+
# Convert input to messages format if needed
|
|
112
|
+
if isinstance(input_param, str):
|
|
113
|
+
messages = [{"role": "user", "content": input_param}]
|
|
114
|
+
elif isinstance(input_param, list):
|
|
115
|
+
messages = input_param
|
|
116
|
+
else:
|
|
117
|
+
messages = []
|
|
118
|
+
|
|
119
|
+
# Set span attributes
|
|
120
|
+
span.set_attribute("gen_ai.system", "openai")
|
|
121
|
+
span.set_attribute("gen_ai.request.model", model)
|
|
122
|
+
span.set_attribute("gen_ai.request.temperature", temperature)
|
|
123
|
+
span.set_attribute("gen_ai.operation.name", "responses.parse")
|
|
124
|
+
|
|
125
|
+
# Add a unique marker for our instrumentation
|
|
126
|
+
span.set_attribute("lucidic.instrumented", "responses.parse")
|
|
127
|
+
span.set_attribute("lucidic.patch.version", "1.0")
|
|
128
|
+
|
|
129
|
+
if text_format and hasattr(text_format, '__name__'):
|
|
130
|
+
span.set_attribute("gen_ai.request.response_format", text_format.__name__)
|
|
131
|
+
|
|
132
|
+
if instructions:
|
|
133
|
+
span.set_attribute("gen_ai.request.instructions", str(instructions))
|
|
134
|
+
|
|
135
|
+
# Always set message attributes for proper event creation
|
|
136
|
+
for i, msg in enumerate(messages): # Include all messages
|
|
137
|
+
if isinstance(msg, dict):
|
|
138
|
+
role = msg.get('role', 'user')
|
|
139
|
+
content = msg.get('content', '')
|
|
140
|
+
span.set_attribute(f"gen_ai.prompt.{i}.role", role)
|
|
141
|
+
# Always include full content - EventQueue handles large messages
|
|
142
|
+
span.set_attribute(f"gen_ai.prompt.{i}.content", str(content))
|
|
143
|
+
|
|
144
|
+
# Call the original method
|
|
145
|
+
result = original_method(**kwargs)
|
|
146
|
+
|
|
147
|
+
# Process the response and set attributes on span
|
|
148
|
+
self._set_response_attributes(span, result, model, messages, start_time, text_format)
|
|
149
|
+
|
|
150
|
+
span.set_status(Status(StatusCode.OK))
|
|
151
|
+
return result
|
|
152
|
+
|
|
153
|
+
except Exception as e:
|
|
154
|
+
# Record error in span
|
|
155
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
156
|
+
span.record_exception(e)
|
|
157
|
+
|
|
158
|
+
# The exporter will handle creating error events from the span
|
|
159
|
+
raise
|
|
160
|
+
|
|
161
|
+
return wrapper
|
|
162
|
+
|
|
163
|
+
def _set_response_attributes(self, span, result, model: str, messages: list, start_time: float, text_format):
|
|
164
|
+
"""Set response attributes on the span for the exporter to use.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
span: OpenTelemetry span
|
|
168
|
+
result: Response from OpenAI
|
|
169
|
+
model: Model name
|
|
170
|
+
messages: Input messages
|
|
171
|
+
start_time: Request start time
|
|
172
|
+
text_format: Response format (Pydantic model)
|
|
173
|
+
"""
|
|
174
|
+
duration = time.time() - start_time
|
|
175
|
+
|
|
176
|
+
# Extract output
|
|
177
|
+
output_text = None
|
|
178
|
+
|
|
179
|
+
# Handle structured output response
|
|
180
|
+
if hasattr(result, 'output_parsed'):
|
|
181
|
+
output_text = str(result.output_parsed)
|
|
182
|
+
|
|
183
|
+
# Always set completion attributes so the exporter can extract them
|
|
184
|
+
span.set_attribute("gen_ai.completion.0.role", "assistant")
|
|
185
|
+
span.set_attribute("gen_ai.completion.0.content", output_text)
|
|
186
|
+
|
|
187
|
+
# Handle usage data
|
|
188
|
+
if hasattr(result, 'usage'):
|
|
189
|
+
usage = result.usage
|
|
190
|
+
|
|
191
|
+
# Debug logging
|
|
192
|
+
debug(f"[OpenAI Patch] Usage object type: {type(usage)}")
|
|
193
|
+
debug(f"[OpenAI Patch] Usage attributes: {[attr for attr in dir(usage) if not attr.startswith('_')]}")
|
|
194
|
+
|
|
195
|
+
# Extract tokens with proper handling
|
|
196
|
+
prompt_tokens = None
|
|
197
|
+
completion_tokens = None
|
|
198
|
+
total_tokens = None
|
|
199
|
+
|
|
200
|
+
# Try different ways to access token data
|
|
201
|
+
if hasattr(usage, 'prompt_tokens'):
|
|
202
|
+
prompt_tokens = usage.prompt_tokens
|
|
203
|
+
elif hasattr(usage, 'input_tokens'):
|
|
204
|
+
prompt_tokens = usage.input_tokens
|
|
205
|
+
|
|
206
|
+
if hasattr(usage, 'completion_tokens'):
|
|
207
|
+
completion_tokens = usage.completion_tokens
|
|
208
|
+
elif hasattr(usage, 'output_tokens'):
|
|
209
|
+
completion_tokens = usage.output_tokens
|
|
210
|
+
|
|
211
|
+
if hasattr(usage, 'total_tokens'):
|
|
212
|
+
total_tokens = usage.total_tokens
|
|
213
|
+
elif prompt_tokens is not None and completion_tokens is not None:
|
|
214
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
215
|
+
|
|
216
|
+
debug(f"[OpenAI Patch] Extracted tokens - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}")
|
|
217
|
+
|
|
218
|
+
# Set usage attributes on span
|
|
219
|
+
if prompt_tokens is not None:
|
|
220
|
+
span.set_attribute("gen_ai.usage.prompt_tokens", prompt_tokens)
|
|
221
|
+
if completion_tokens is not None:
|
|
222
|
+
span.set_attribute("gen_ai.usage.completion_tokens", completion_tokens)
|
|
223
|
+
if total_tokens is not None:
|
|
224
|
+
span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
|
|
225
|
+
|
|
226
|
+
# Set additional metadata for the exporter
|
|
227
|
+
if text_format and hasattr(text_format, '__name__'):
|
|
228
|
+
span.set_attribute("lucidic.response_format", text_format.__name__)
|
|
229
|
+
|
|
230
|
+
# Set duration as attribute
|
|
231
|
+
span.set_attribute("lucidic.duration_seconds", duration)
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def _should_capture_content(self) -> bool:
|
|
235
|
+
"""Check if message content should be captured.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
True if content capture is enabled
|
|
239
|
+
"""
|
|
240
|
+
|
|
241
|
+
return True # always capture content for now
|
|
242
|
+
|
|
243
|
+
import os
|
|
244
|
+
# check OTEL standard env var
|
|
245
|
+
otel_capture = os.getenv('OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT', 'false')
|
|
246
|
+
# check Lucidic-specific env var
|
|
247
|
+
lucidic_capture = os.getenv('LUCIDIC_CAPTURE_CONTENT', 'false')
|
|
248
|
+
|
|
249
|
+
return otel_capture.lower() == 'true' or lucidic_capture.lower() == 'true'
|
|
250
|
+
|
|
251
|
+
def unpatch(self):
|
|
252
|
+
"""Remove the patch and restore original behavior."""
|
|
253
|
+
if not self._is_patched:
|
|
254
|
+
return
|
|
255
|
+
|
|
256
|
+
try:
|
|
257
|
+
# restore original __init__ if we have it
|
|
258
|
+
if hasattr(self, '_original_init'):
|
|
259
|
+
import openai
|
|
260
|
+
from openai import OpenAI
|
|
261
|
+
OpenAI.__init__ = self._original_init
|
|
262
|
+
|
|
263
|
+
# restore original parse methods on tracked clients
|
|
264
|
+
for responses_obj, original_parse in self._client_refs:
|
|
265
|
+
try:
|
|
266
|
+
responses_obj.parse = original_parse
|
|
267
|
+
except:
|
|
268
|
+
pass # Client might have been garbage collected
|
|
269
|
+
|
|
270
|
+
self._client_refs.clear()
|
|
271
|
+
self._is_patched = False
|
|
272
|
+
|
|
273
|
+
logger.info("[OpenAI Patch] Successfully removed responses.parse patch")
|
|
274
|
+
|
|
275
|
+
except Exception as e:
|
|
276
|
+
logger.error(f"[OpenAI Patch] Failed to unpatch: {e}")
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
# Global singleton instance
|
|
280
|
+
_patcher_instance: Optional[OpenAIResponsesPatcher] = None
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def get_responses_patcher(tracer_provider=None) -> OpenAIResponsesPatcher:
|
|
284
|
+
"""Get or create the global patcher instance.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
tracer_provider: OpenTelemetry TracerProvider
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
The singleton patcher instance
|
|
291
|
+
"""
|
|
292
|
+
global _patcher_instance
|
|
293
|
+
if _patcher_instance is None:
|
|
294
|
+
_patcher_instance = OpenAIResponsesPatcher(tracer_provider)
|
|
295
|
+
return _patcher_instance
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Utility to uninstrument specific OpenAI methods to prevent duplicates.
|
|
2
|
+
|
|
3
|
+
This module helps prevent the standard OpenTelemetry instrumentation
|
|
4
|
+
from creating duplicate spans for methods we're handling ourselves.
|
|
5
|
+
"""
|
|
6
|
+
import logging
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger("Lucidic")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def uninstrument_responses(openai_module):
|
|
12
|
+
"""Remove any incorrect instrumentation from responses module.
|
|
13
|
+
|
|
14
|
+
The standard OpenTelemetry instrumentation might try to instrument
|
|
15
|
+
responses.create (which doesn't exist) or other responses methods.
|
|
16
|
+
This function removes any such instrumentation.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
openai_module: The OpenAI module
|
|
20
|
+
"""
|
|
21
|
+
try:
|
|
22
|
+
# Check if responses module exists
|
|
23
|
+
if not hasattr(openai_module, 'resources'):
|
|
24
|
+
return
|
|
25
|
+
|
|
26
|
+
resources = openai_module.resources
|
|
27
|
+
if not hasattr(resources, 'responses'):
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
responses = resources.responses
|
|
31
|
+
|
|
32
|
+
# Check for incorrectly wrapped methods
|
|
33
|
+
methods_to_check = ['create', 'parse']
|
|
34
|
+
|
|
35
|
+
for method_name in methods_to_check:
|
|
36
|
+
if hasattr(responses, method_name):
|
|
37
|
+
method = getattr(responses, method_name)
|
|
38
|
+
|
|
39
|
+
# Check if it's wrapped (wrapped methods usually have __wrapped__ attribute)
|
|
40
|
+
if hasattr(method, '__wrapped__'):
|
|
41
|
+
# Restore original
|
|
42
|
+
original = method.__wrapped__
|
|
43
|
+
setattr(responses, method_name, original)
|
|
44
|
+
logger.debug(f"[OpenAI Uninstrument] Removed wrapper from responses.{method_name}")
|
|
45
|
+
|
|
46
|
+
# Also check for _original_* attributes (another wrapping pattern)
|
|
47
|
+
original_attr = f'_original_{method_name}'
|
|
48
|
+
if hasattr(responses, original_attr):
|
|
49
|
+
original = getattr(responses, original_attr)
|
|
50
|
+
setattr(responses, method_name, original)
|
|
51
|
+
delattr(responses, original_attr)
|
|
52
|
+
logger.debug(f"[OpenAI Uninstrument] Restored original responses.{method_name}")
|
|
53
|
+
|
|
54
|
+
# Also check the Responses class itself
|
|
55
|
+
if hasattr(responses, 'Responses'):
|
|
56
|
+
Responses = responses.Responses
|
|
57
|
+
for method_name in methods_to_check:
|
|
58
|
+
if hasattr(Responses, method_name):
|
|
59
|
+
method = getattr(Responses, method_name)
|
|
60
|
+
if hasattr(method, '__wrapped__'):
|
|
61
|
+
original = method.__wrapped__
|
|
62
|
+
setattr(Responses, method_name, original)
|
|
63
|
+
logger.debug(f"[OpenAI Uninstrument] Removed wrapper from Responses.{method_name}")
|
|
64
|
+
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.debug(f"[OpenAI Uninstrument] Error while checking responses instrumentation: {e}")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def clean_openai_instrumentation():
|
|
70
|
+
"""Clean up any problematic OpenAI instrumentation.
|
|
71
|
+
|
|
72
|
+
This should be called after standard instrumentation but before our patches.
|
|
73
|
+
"""
|
|
74
|
+
try:
|
|
75
|
+
import openai
|
|
76
|
+
uninstrument_responses(openai)
|
|
77
|
+
|
|
78
|
+
# Also check if client instances need cleaning
|
|
79
|
+
if hasattr(openai, 'OpenAI'):
|
|
80
|
+
# The OpenAI class might have wrapped __init__ that creates bad instrumentation
|
|
81
|
+
# We don't want to break it, just ensure responses aren't double-instrumented
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
except ImportError:
|
|
85
|
+
pass # OpenAI not installed
|
|
86
|
+
except Exception as e:
|
|
87
|
+
logger.debug(f"[OpenAI Uninstrument] Error during cleanup: {e}")
|
|
@@ -55,7 +55,22 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
|
|
|
55
55
|
inst.instrument(tracer_provider=tracer_provider, enrich_token_usage=True)
|
|
56
56
|
_global_instrumentors["openai"] = inst
|
|
57
57
|
new_instrumentors["openai"] = inst
|
|
58
|
-
|
|
58
|
+
|
|
59
|
+
# Clean up any problematic instrumentation from standard library
|
|
60
|
+
from .openai_uninstrument import clean_openai_instrumentation
|
|
61
|
+
clean_openai_instrumentation()
|
|
62
|
+
|
|
63
|
+
# Add patch for responses.parse (not covered by standard instrumentation)
|
|
64
|
+
import os
|
|
65
|
+
if os.getenv('LUCIDIC_DISABLE_RESPONSES_PATCH', 'false').lower() != 'true':
|
|
66
|
+
from .openai_patch import get_responses_patcher
|
|
67
|
+
patcher = get_responses_patcher(tracer_provider)
|
|
68
|
+
patcher.patch()
|
|
69
|
+
_global_instrumentors["openai_responses_patch"] = patcher
|
|
70
|
+
else:
|
|
71
|
+
logger.info("[Telemetry] Skipping responses.parse patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
|
|
72
|
+
|
|
73
|
+
logger.info("[Telemetry] Instrumented OpenAI (including responses.parse)")
|
|
59
74
|
except Exception as e:
|
|
60
75
|
logger.error(f"Failed to instrument OpenAI: {e}")
|
|
61
76
|
|