ai-lib-python 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_lib_python/__init__.py +43 -0
- ai_lib_python/batch/__init__.py +15 -0
- ai_lib_python/batch/collector.py +244 -0
- ai_lib_python/batch/executor.py +224 -0
- ai_lib_python/cache/__init__.py +26 -0
- ai_lib_python/cache/backends.py +380 -0
- ai_lib_python/cache/key.py +237 -0
- ai_lib_python/cache/manager.py +332 -0
- ai_lib_python/client/__init__.py +37 -0
- ai_lib_python/client/builder.py +528 -0
- ai_lib_python/client/cancel.py +368 -0
- ai_lib_python/client/core.py +433 -0
- ai_lib_python/client/response.py +134 -0
- ai_lib_python/embeddings/__init__.py +36 -0
- ai_lib_python/embeddings/client.py +339 -0
- ai_lib_python/embeddings/types.py +234 -0
- ai_lib_python/embeddings/vectors.py +246 -0
- ai_lib_python/errors/__init__.py +41 -0
- ai_lib_python/errors/base.py +316 -0
- ai_lib_python/errors/classification.py +210 -0
- ai_lib_python/guardrails/__init__.py +35 -0
- ai_lib_python/guardrails/base.py +336 -0
- ai_lib_python/guardrails/filters.py +583 -0
- ai_lib_python/guardrails/validators.py +475 -0
- ai_lib_python/pipeline/__init__.py +55 -0
- ai_lib_python/pipeline/accumulate.py +248 -0
- ai_lib_python/pipeline/base.py +240 -0
- ai_lib_python/pipeline/decode.py +281 -0
- ai_lib_python/pipeline/event_map.py +506 -0
- ai_lib_python/pipeline/fan_out.py +284 -0
- ai_lib_python/pipeline/select.py +297 -0
- ai_lib_python/plugins/__init__.py +32 -0
- ai_lib_python/plugins/base.py +294 -0
- ai_lib_python/plugins/hooks.py +296 -0
- ai_lib_python/plugins/middleware.py +285 -0
- ai_lib_python/plugins/registry.py +294 -0
- ai_lib_python/protocol/__init__.py +71 -0
- ai_lib_python/protocol/loader.py +317 -0
- ai_lib_python/protocol/manifest.py +385 -0
- ai_lib_python/protocol/validator.py +460 -0
- ai_lib_python/py.typed +1 -0
- ai_lib_python/resilience/__init__.py +102 -0
- ai_lib_python/resilience/backpressure.py +225 -0
- ai_lib_python/resilience/circuit_breaker.py +318 -0
- ai_lib_python/resilience/executor.py +343 -0
- ai_lib_python/resilience/fallback.py +341 -0
- ai_lib_python/resilience/preflight.py +413 -0
- ai_lib_python/resilience/rate_limiter.py +291 -0
- ai_lib_python/resilience/retry.py +299 -0
- ai_lib_python/resilience/signals.py +283 -0
- ai_lib_python/routing/__init__.py +118 -0
- ai_lib_python/routing/manager.py +593 -0
- ai_lib_python/routing/strategy.py +345 -0
- ai_lib_python/routing/types.py +397 -0
- ai_lib_python/structured/__init__.py +33 -0
- ai_lib_python/structured/json_mode.py +281 -0
- ai_lib_python/structured/schema.py +316 -0
- ai_lib_python/structured/validator.py +334 -0
- ai_lib_python/telemetry/__init__.py +127 -0
- ai_lib_python/telemetry/exporters/__init__.py +9 -0
- ai_lib_python/telemetry/exporters/prometheus.py +111 -0
- ai_lib_python/telemetry/feedback.py +446 -0
- ai_lib_python/telemetry/health.py +409 -0
- ai_lib_python/telemetry/logger.py +389 -0
- ai_lib_python/telemetry/metrics.py +496 -0
- ai_lib_python/telemetry/tracer.py +473 -0
- ai_lib_python/tokens/__init__.py +25 -0
- ai_lib_python/tokens/counter.py +282 -0
- ai_lib_python/tokens/estimator.py +286 -0
- ai_lib_python/transport/__init__.py +34 -0
- ai_lib_python/transport/auth.py +141 -0
- ai_lib_python/transport/http.py +364 -0
- ai_lib_python/transport/pool.py +425 -0
- ai_lib_python/types/__init__.py +41 -0
- ai_lib_python/types/events.py +343 -0
- ai_lib_python/types/message.py +332 -0
- ai_lib_python/types/tool.py +191 -0
- ai_lib_python/utils/__init__.py +21 -0
- ai_lib_python/utils/tool_call_assembler.py +317 -0
- ai_lib_python-0.5.0.dist-info/METADATA +837 -0
- ai_lib_python-0.5.0.dist-info/RECORD +84 -0
- ai_lib_python-0.5.0.dist-info/WHEEL +4 -0
- ai_lib_python-0.5.0.dist-info/licenses/LICENSE-APACHE +201 -0
- ai_lib_python-0.5.0.dist-info/licenses/LICENSE-MIT +21 -0
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Telemetry module for ai-lib-python.
|
|
3
|
+
|
|
4
|
+
Provides structured logging, metrics collection, distributed tracing,
|
|
5
|
+
health monitoring, and user feedback collection.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from ai_lib_python.telemetry.feedback import (
|
|
9
|
+
ChoiceSelectionFeedback,
|
|
10
|
+
CompositeFeedbackSink,
|
|
11
|
+
ConsoleFeedbackSink,
|
|
12
|
+
CorrectionFeedback,
|
|
13
|
+
FeedbackEvent,
|
|
14
|
+
FeedbackSink,
|
|
15
|
+
FeedbackType,
|
|
16
|
+
InMemoryFeedbackSink,
|
|
17
|
+
NoopFeedbackSink,
|
|
18
|
+
RatingFeedback,
|
|
19
|
+
RegenerateFeedback,
|
|
20
|
+
StopFeedback,
|
|
21
|
+
TextFeedback,
|
|
22
|
+
ThumbsFeedback,
|
|
23
|
+
get_feedback_sink,
|
|
24
|
+
report_feedback,
|
|
25
|
+
set_feedback_sink,
|
|
26
|
+
)
|
|
27
|
+
from ai_lib_python.telemetry.health import (
|
|
28
|
+
AggregatedHealth,
|
|
29
|
+
HealthChecker,
|
|
30
|
+
HealthCheckResult,
|
|
31
|
+
HealthStatus,
|
|
32
|
+
ProviderHealthTracker,
|
|
33
|
+
get_health_checker,
|
|
34
|
+
get_health_tracker,
|
|
35
|
+
)
|
|
36
|
+
from ai_lib_python.telemetry.logger import (
|
|
37
|
+
AiLibLogger,
|
|
38
|
+
LogContext,
|
|
39
|
+
LogLevel,
|
|
40
|
+
SensitiveDataMasker,
|
|
41
|
+
clear_log_context,
|
|
42
|
+
get_log_context,
|
|
43
|
+
get_logger,
|
|
44
|
+
set_log_context,
|
|
45
|
+
)
|
|
46
|
+
from ai_lib_python.telemetry.metrics import (
|
|
47
|
+
HistogramBuckets,
|
|
48
|
+
MetricLabels,
|
|
49
|
+
MetricsCollector,
|
|
50
|
+
MetricSnapshot,
|
|
51
|
+
get_metrics_collector,
|
|
52
|
+
set_metrics_collector,
|
|
53
|
+
)
|
|
54
|
+
from ai_lib_python.telemetry.tracer import (
|
|
55
|
+
ConsoleExporter,
|
|
56
|
+
InMemoryExporter,
|
|
57
|
+
Span,
|
|
58
|
+
SpanContext,
|
|
59
|
+
SpanExporter,
|
|
60
|
+
SpanKind,
|
|
61
|
+
SpanStatus,
|
|
62
|
+
Tracer,
|
|
63
|
+
get_current_span,
|
|
64
|
+
get_current_span_id,
|
|
65
|
+
get_current_trace_id,
|
|
66
|
+
get_tracer,
|
|
67
|
+
set_tracer,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
__all__ = [
|
|
71
|
+
# Health
|
|
72
|
+
"AggregatedHealth",
|
|
73
|
+
# Logger
|
|
74
|
+
"AiLibLogger",
|
|
75
|
+
# Feedback
|
|
76
|
+
"ChoiceSelectionFeedback",
|
|
77
|
+
"CompositeFeedbackSink",
|
|
78
|
+
"ConsoleFeedbackSink",
|
|
79
|
+
# Tracer
|
|
80
|
+
"ConsoleExporter",
|
|
81
|
+
"CorrectionFeedback",
|
|
82
|
+
"FeedbackEvent",
|
|
83
|
+
"FeedbackSink",
|
|
84
|
+
"FeedbackType",
|
|
85
|
+
"HealthCheckResult",
|
|
86
|
+
"HealthChecker",
|
|
87
|
+
"HealthStatus",
|
|
88
|
+
"HistogramBuckets",
|
|
89
|
+
"InMemoryExporter",
|
|
90
|
+
"InMemoryFeedbackSink",
|
|
91
|
+
"LogContext",
|
|
92
|
+
"LogLevel",
|
|
93
|
+
# Metrics
|
|
94
|
+
"MetricLabels",
|
|
95
|
+
"MetricSnapshot",
|
|
96
|
+
"MetricsCollector",
|
|
97
|
+
"NoopFeedbackSink",
|
|
98
|
+
"ProviderHealthTracker",
|
|
99
|
+
"RatingFeedback",
|
|
100
|
+
"RegenerateFeedback",
|
|
101
|
+
"SensitiveDataMasker",
|
|
102
|
+
"Span",
|
|
103
|
+
"SpanContext",
|
|
104
|
+
"SpanExporter",
|
|
105
|
+
"SpanKind",
|
|
106
|
+
"SpanStatus",
|
|
107
|
+
"StopFeedback",
|
|
108
|
+
"TextFeedback",
|
|
109
|
+
"ThumbsFeedback",
|
|
110
|
+
"Tracer",
|
|
111
|
+
"clear_log_context",
|
|
112
|
+
"get_current_span",
|
|
113
|
+
"get_current_span_id",
|
|
114
|
+
"get_current_trace_id",
|
|
115
|
+
"get_feedback_sink",
|
|
116
|
+
"get_health_checker",
|
|
117
|
+
"get_health_tracker",
|
|
118
|
+
"get_log_context",
|
|
119
|
+
"get_logger",
|
|
120
|
+
"get_metrics_collector",
|
|
121
|
+
"get_tracer",
|
|
122
|
+
"report_feedback",
|
|
123
|
+
"set_feedback_sink",
|
|
124
|
+
"set_log_context",
|
|
125
|
+
"set_metrics_collector",
|
|
126
|
+
"set_tracer",
|
|
127
|
+
]
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prometheus metrics exporter.
|
|
3
|
+
|
|
4
|
+
Provides HTTP endpoint for Prometheus scraping.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from ai_lib_python.telemetry.metrics import MetricsCollector, get_metrics_collector
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PrometheusExporter:
|
|
15
|
+
"""Prometheus metrics exporter.
|
|
16
|
+
|
|
17
|
+
Provides metrics in Prometheus format, optionally serving
|
|
18
|
+
an HTTP endpoint for scraping.
|
|
19
|
+
|
|
20
|
+
Example:
|
|
21
|
+
>>> exporter = PrometheusExporter(port=9090)
|
|
22
|
+
>>> await exporter.start()
|
|
23
|
+
>>> # Metrics available at http://localhost:9090/metrics
|
|
24
|
+
>>> await exporter.stop()
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
collector: MetricsCollector | None = None,
|
|
30
|
+
port: int = 9090,
|
|
31
|
+
host: str = "0.0.0.0",
|
|
32
|
+
path: str = "/metrics",
|
|
33
|
+
) -> None:
|
|
34
|
+
"""Initialize exporter.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
collector: Metrics collector (uses global if None)
|
|
38
|
+
port: HTTP server port
|
|
39
|
+
host: HTTP server host
|
|
40
|
+
path: Metrics endpoint path
|
|
41
|
+
"""
|
|
42
|
+
self._collector = collector or get_metrics_collector()
|
|
43
|
+
self._port = port
|
|
44
|
+
self._host = host
|
|
45
|
+
self._path = path
|
|
46
|
+
self._server: Any = None
|
|
47
|
+
self._running = False
|
|
48
|
+
|
|
49
|
+
def get_metrics(self) -> str:
|
|
50
|
+
"""Get metrics in Prometheus format.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
Prometheus-formatted metrics string
|
|
54
|
+
"""
|
|
55
|
+
return self._collector.to_prometheus()
|
|
56
|
+
|
|
57
|
+
async def start(self) -> None:
|
|
58
|
+
"""Start the HTTP server for Prometheus scraping."""
|
|
59
|
+
if self._running:
|
|
60
|
+
return
|
|
61
|
+
|
|
62
|
+
try:
|
|
63
|
+
# Try to use aiohttp if available
|
|
64
|
+
from aiohttp import web
|
|
65
|
+
|
|
66
|
+
app = web.Application()
|
|
67
|
+
app.router.add_get(self._path, self._handle_metrics)
|
|
68
|
+
|
|
69
|
+
runner = web.AppRunner(app)
|
|
70
|
+
await runner.setup()
|
|
71
|
+
site = web.TCPSite(runner, self._host, self._port)
|
|
72
|
+
await site.start()
|
|
73
|
+
|
|
74
|
+
self._server = runner
|
|
75
|
+
self._running = True
|
|
76
|
+
|
|
77
|
+
except ImportError:
|
|
78
|
+
# Fall back to simple implementation
|
|
79
|
+
self._running = True
|
|
80
|
+
# Note: In production, recommend installing aiohttp
|
|
81
|
+
|
|
82
|
+
async def stop(self) -> None:
|
|
83
|
+
"""Stop the HTTP server."""
|
|
84
|
+
if not self._running:
|
|
85
|
+
return
|
|
86
|
+
|
|
87
|
+
if self._server:
|
|
88
|
+
await self._server.cleanup()
|
|
89
|
+
self._server = None
|
|
90
|
+
|
|
91
|
+
self._running = False
|
|
92
|
+
|
|
93
|
+
async def _handle_metrics(self, request: Any) -> Any: # noqa: ARG002
|
|
94
|
+
"""Handle metrics request."""
|
|
95
|
+
from aiohttp import web
|
|
96
|
+
|
|
97
|
+
metrics = self.get_metrics()
|
|
98
|
+
return web.Response(
|
|
99
|
+
text=metrics,
|
|
100
|
+
content_type="text/plain; charset=utf-8",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
@property
|
|
104
|
+
def is_running(self) -> bool:
|
|
105
|
+
"""Check if server is running."""
|
|
106
|
+
return self._running
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def endpoint(self) -> str:
|
|
110
|
+
"""Get metrics endpoint URL."""
|
|
111
|
+
return f"http://{self._host}:{self._port}{self._path}"
|
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
"""
|
|
2
|
+
User feedback collection system.
|
|
3
|
+
|
|
4
|
+
Provides typed feedback events and sinks for collecting user feedback,
|
|
5
|
+
particularly useful for multi-candidate selection and RLHF.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import time
|
|
11
|
+
from abc import ABC, abstractmethod
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from enum import Enum
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FeedbackType(str, Enum):
|
|
18
|
+
"""Types of feedback events."""
|
|
19
|
+
|
|
20
|
+
CHOICE_SELECTION = "choice_selection"
|
|
21
|
+
RATING = "rating"
|
|
22
|
+
THUMBS = "thumbs"
|
|
23
|
+
TEXT = "text"
|
|
24
|
+
CORRECTION = "correction"
|
|
25
|
+
REGENERATE = "regenerate"
|
|
26
|
+
STOP = "stop"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class ChoiceSelectionFeedback:
|
|
31
|
+
"""Feedback for multi-candidate selection.
|
|
32
|
+
|
|
33
|
+
Used when users select one response from multiple candidates.
|
|
34
|
+
Useful for preference learning and A/B testing.
|
|
35
|
+
|
|
36
|
+
Attributes:
|
|
37
|
+
request_id: Request identifier (client_request_id)
|
|
38
|
+
chosen_index: Index of the chosen candidate (0-based)
|
|
39
|
+
rejected_indices: Indices of rejected candidates
|
|
40
|
+
latency_to_select_ms: Time from render to selection
|
|
41
|
+
ui_context: Optional UI context (component name, experiment ID)
|
|
42
|
+
candidate_hashes: Content hashes to link choice without storing text
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
request_id: str
|
|
46
|
+
chosen_index: int
|
|
47
|
+
rejected_indices: list[int] | None = None
|
|
48
|
+
latency_to_select_ms: float | None = None
|
|
49
|
+
ui_context: dict[str, Any] | None = None
|
|
50
|
+
candidate_hashes: list[str] | None = None
|
|
51
|
+
timestamp: float = field(default_factory=time.time)
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> dict[str, Any]:
|
|
54
|
+
"""Convert to dictionary."""
|
|
55
|
+
return {
|
|
56
|
+
"type": FeedbackType.CHOICE_SELECTION.value,
|
|
57
|
+
"request_id": self.request_id,
|
|
58
|
+
"chosen_index": self.chosen_index,
|
|
59
|
+
"rejected_indices": self.rejected_indices,
|
|
60
|
+
"latency_to_select_ms": self.latency_to_select_ms,
|
|
61
|
+
"ui_context": self.ui_context,
|
|
62
|
+
"candidate_hashes": self.candidate_hashes,
|
|
63
|
+
"timestamp": self.timestamp,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@dataclass
|
|
68
|
+
class RatingFeedback:
|
|
69
|
+
"""Rating feedback (e.g., 1-5 stars).
|
|
70
|
+
|
|
71
|
+
Attributes:
|
|
72
|
+
request_id: Request identifier
|
|
73
|
+
rating: Rating value (typically 1-5)
|
|
74
|
+
max_rating: Maximum possible rating
|
|
75
|
+
category: Optional rating category
|
|
76
|
+
comment: Optional text comment
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
request_id: str
|
|
80
|
+
rating: int
|
|
81
|
+
max_rating: int = 5
|
|
82
|
+
category: str | None = None
|
|
83
|
+
comment: str | None = None
|
|
84
|
+
timestamp: float = field(default_factory=time.time)
|
|
85
|
+
|
|
86
|
+
def to_dict(self) -> dict[str, Any]:
|
|
87
|
+
"""Convert to dictionary."""
|
|
88
|
+
return {
|
|
89
|
+
"type": FeedbackType.RATING.value,
|
|
90
|
+
"request_id": self.request_id,
|
|
91
|
+
"rating": self.rating,
|
|
92
|
+
"max_rating": self.max_rating,
|
|
93
|
+
"category": self.category,
|
|
94
|
+
"comment": self.comment,
|
|
95
|
+
"timestamp": self.timestamp,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@dataclass
|
|
100
|
+
class ThumbsFeedback:
|
|
101
|
+
"""Simple thumbs up/down feedback.
|
|
102
|
+
|
|
103
|
+
Attributes:
|
|
104
|
+
request_id: Request identifier
|
|
105
|
+
is_positive: True for thumbs up, False for thumbs down
|
|
106
|
+
reason: Optional reason for the feedback
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
request_id: str
|
|
110
|
+
is_positive: bool
|
|
111
|
+
reason: str | None = None
|
|
112
|
+
timestamp: float = field(default_factory=time.time)
|
|
113
|
+
|
|
114
|
+
def to_dict(self) -> dict[str, Any]:
|
|
115
|
+
"""Convert to dictionary."""
|
|
116
|
+
return {
|
|
117
|
+
"type": FeedbackType.THUMBS.value,
|
|
118
|
+
"request_id": self.request_id,
|
|
119
|
+
"is_positive": self.is_positive,
|
|
120
|
+
"reason": self.reason,
|
|
121
|
+
"timestamp": self.timestamp,
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
@dataclass
|
|
126
|
+
class TextFeedback:
|
|
127
|
+
"""Free-form text feedback.
|
|
128
|
+
|
|
129
|
+
Attributes:
|
|
130
|
+
request_id: Request identifier
|
|
131
|
+
text: Feedback text
|
|
132
|
+
category: Optional category
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
request_id: str
|
|
136
|
+
text: str
|
|
137
|
+
category: str | None = None
|
|
138
|
+
timestamp: float = field(default_factory=time.time)
|
|
139
|
+
|
|
140
|
+
def to_dict(self) -> dict[str, Any]:
|
|
141
|
+
"""Convert to dictionary."""
|
|
142
|
+
return {
|
|
143
|
+
"type": FeedbackType.TEXT.value,
|
|
144
|
+
"request_id": self.request_id,
|
|
145
|
+
"text": self.text,
|
|
146
|
+
"category": self.category,
|
|
147
|
+
"timestamp": self.timestamp,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
@dataclass
|
|
152
|
+
class CorrectionFeedback:
|
|
153
|
+
"""Correction/edit feedback.
|
|
154
|
+
|
|
155
|
+
Captures when users edit or correct the AI response.
|
|
156
|
+
|
|
157
|
+
Attributes:
|
|
158
|
+
request_id: Request identifier
|
|
159
|
+
original_hash: Hash of original content
|
|
160
|
+
corrected_hash: Hash of corrected content
|
|
161
|
+
edit_distance: Optional edit distance metric
|
|
162
|
+
correction_type: Type of correction (grammar, factual, style, etc.)
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
request_id: str
|
|
166
|
+
original_hash: str
|
|
167
|
+
corrected_hash: str
|
|
168
|
+
edit_distance: int | None = None
|
|
169
|
+
correction_type: str | None = None
|
|
170
|
+
timestamp: float = field(default_factory=time.time)
|
|
171
|
+
|
|
172
|
+
def to_dict(self) -> dict[str, Any]:
|
|
173
|
+
"""Convert to dictionary."""
|
|
174
|
+
return {
|
|
175
|
+
"type": FeedbackType.CORRECTION.value,
|
|
176
|
+
"request_id": self.request_id,
|
|
177
|
+
"original_hash": self.original_hash,
|
|
178
|
+
"corrected_hash": self.corrected_hash,
|
|
179
|
+
"edit_distance": self.edit_distance,
|
|
180
|
+
"correction_type": self.correction_type,
|
|
181
|
+
"timestamp": self.timestamp,
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
@dataclass
|
|
186
|
+
class RegenerateFeedback:
|
|
187
|
+
"""Regeneration feedback.
|
|
188
|
+
|
|
189
|
+
Captures when users request regeneration.
|
|
190
|
+
|
|
191
|
+
Attributes:
|
|
192
|
+
request_id: Request identifier
|
|
193
|
+
regeneration_count: Number of regenerations
|
|
194
|
+
reason: Optional reason for regeneration
|
|
195
|
+
"""
|
|
196
|
+
|
|
197
|
+
request_id: str
|
|
198
|
+
regeneration_count: int = 1
|
|
199
|
+
reason: str | None = None
|
|
200
|
+
timestamp: float = field(default_factory=time.time)
|
|
201
|
+
|
|
202
|
+
def to_dict(self) -> dict[str, Any]:
|
|
203
|
+
"""Convert to dictionary."""
|
|
204
|
+
return {
|
|
205
|
+
"type": FeedbackType.REGENERATE.value,
|
|
206
|
+
"request_id": self.request_id,
|
|
207
|
+
"regeneration_count": self.regeneration_count,
|
|
208
|
+
"reason": self.reason,
|
|
209
|
+
"timestamp": self.timestamp,
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
@dataclass
|
|
214
|
+
class StopFeedback:
|
|
215
|
+
"""Stop generation feedback.
|
|
216
|
+
|
|
217
|
+
Captures when users stop generation mid-stream.
|
|
218
|
+
|
|
219
|
+
Attributes:
|
|
220
|
+
request_id: Request identifier
|
|
221
|
+
tokens_generated: Tokens generated before stop
|
|
222
|
+
reason: Optional reason for stopping
|
|
223
|
+
"""
|
|
224
|
+
|
|
225
|
+
request_id: str
|
|
226
|
+
tokens_generated: int | None = None
|
|
227
|
+
reason: str | None = None
|
|
228
|
+
timestamp: float = field(default_factory=time.time)
|
|
229
|
+
|
|
230
|
+
def to_dict(self) -> dict[str, Any]:
|
|
231
|
+
"""Convert to dictionary."""
|
|
232
|
+
return {
|
|
233
|
+
"type": FeedbackType.STOP.value,
|
|
234
|
+
"request_id": self.request_id,
|
|
235
|
+
"tokens_generated": self.tokens_generated,
|
|
236
|
+
"reason": self.reason,
|
|
237
|
+
"timestamp": self.timestamp,
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
# Union type for all feedback events
|
|
242
|
+
FeedbackEvent = (
|
|
243
|
+
ChoiceSelectionFeedback
|
|
244
|
+
| RatingFeedback
|
|
245
|
+
| ThumbsFeedback
|
|
246
|
+
| TextFeedback
|
|
247
|
+
| CorrectionFeedback
|
|
248
|
+
| RegenerateFeedback
|
|
249
|
+
| StopFeedback
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
class FeedbackSink(ABC):
|
|
254
|
+
"""Abstract base class for feedback sinks.
|
|
255
|
+
|
|
256
|
+
Feedback sinks determine where and how feedback is stored/reported.
|
|
257
|
+
Applications can implement custom sinks for their backend.
|
|
258
|
+
"""
|
|
259
|
+
|
|
260
|
+
@abstractmethod
|
|
261
|
+
async def report(self, event: FeedbackEvent) -> None:
|
|
262
|
+
"""Report a feedback event.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
event: Feedback event to report
|
|
266
|
+
"""
|
|
267
|
+
raise NotImplementedError
|
|
268
|
+
|
|
269
|
+
async def report_batch(self, events: list[FeedbackEvent]) -> None:
|
|
270
|
+
"""Report multiple feedback events.
|
|
271
|
+
|
|
272
|
+
Default implementation calls report() for each event.
|
|
273
|
+
Override for batch-optimized implementations.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
events: List of feedback events
|
|
277
|
+
"""
|
|
278
|
+
for event in events:
|
|
279
|
+
await self.report(event)
|
|
280
|
+
|
|
281
|
+
async def close(self) -> None:
|
|
282
|
+
"""Close the sink and release resources."""
|
|
283
|
+
pass
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
class NoopFeedbackSink(FeedbackSink):
|
|
287
|
+
"""No-op feedback sink that discards all feedback.
|
|
288
|
+
|
|
289
|
+
Default sink when no feedback collection is configured.
|
|
290
|
+
"""
|
|
291
|
+
|
|
292
|
+
async def report(self, event: FeedbackEvent) -> None:
|
|
293
|
+
"""Discard feedback event."""
|
|
294
|
+
pass
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
class InMemoryFeedbackSink(FeedbackSink):
|
|
298
|
+
"""In-memory feedback sink for testing and development.
|
|
299
|
+
|
|
300
|
+
Stores all feedback in memory for later retrieval.
|
|
301
|
+
"""
|
|
302
|
+
|
|
303
|
+
def __init__(self, max_events: int = 10000) -> None:
|
|
304
|
+
"""Initialize sink.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
max_events: Maximum events to store
|
|
308
|
+
"""
|
|
309
|
+
self._events: list[FeedbackEvent] = []
|
|
310
|
+
self._max_events = max_events
|
|
311
|
+
|
|
312
|
+
async def report(self, event: FeedbackEvent) -> None:
|
|
313
|
+
"""Store feedback event in memory."""
|
|
314
|
+
self._events.append(event)
|
|
315
|
+
if len(self._events) > self._max_events:
|
|
316
|
+
self._events = self._events[-self._max_events:]
|
|
317
|
+
|
|
318
|
+
def get_events(self) -> list[FeedbackEvent]:
|
|
319
|
+
"""Get all stored events."""
|
|
320
|
+
return list(self._events)
|
|
321
|
+
|
|
322
|
+
def get_events_by_request(self, request_id: str) -> list[FeedbackEvent]:
|
|
323
|
+
"""Get events for a specific request."""
|
|
324
|
+
return [e for e in self._events if e.request_id == request_id]
|
|
325
|
+
|
|
326
|
+
def get_events_by_type(self, feedback_type: FeedbackType) -> list[FeedbackEvent]:
|
|
327
|
+
"""Get events of a specific type."""
|
|
328
|
+
type_map = {
|
|
329
|
+
FeedbackType.CHOICE_SELECTION: ChoiceSelectionFeedback,
|
|
330
|
+
FeedbackType.RATING: RatingFeedback,
|
|
331
|
+
FeedbackType.THUMBS: ThumbsFeedback,
|
|
332
|
+
FeedbackType.TEXT: TextFeedback,
|
|
333
|
+
FeedbackType.CORRECTION: CorrectionFeedback,
|
|
334
|
+
FeedbackType.REGENERATE: RegenerateFeedback,
|
|
335
|
+
FeedbackType.STOP: StopFeedback,
|
|
336
|
+
}
|
|
337
|
+
target_class = type_map.get(feedback_type)
|
|
338
|
+
if target_class:
|
|
339
|
+
return [e for e in self._events if isinstance(e, target_class)]
|
|
340
|
+
return []
|
|
341
|
+
|
|
342
|
+
def clear(self) -> None:
|
|
343
|
+
"""Clear all stored events."""
|
|
344
|
+
self._events.clear()
|
|
345
|
+
|
|
346
|
+
def __len__(self) -> int:
|
|
347
|
+
"""Get number of stored events."""
|
|
348
|
+
return len(self._events)
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
class ConsoleFeedbackSink(FeedbackSink):
|
|
352
|
+
"""Console feedback sink for debugging.
|
|
353
|
+
|
|
354
|
+
Prints feedback events to console/stdout.
|
|
355
|
+
"""
|
|
356
|
+
|
|
357
|
+
def __init__(self, prefix: str = "[Feedback]") -> None:
|
|
358
|
+
"""Initialize sink.
|
|
359
|
+
|
|
360
|
+
Args:
|
|
361
|
+
prefix: Prefix for log messages
|
|
362
|
+
"""
|
|
363
|
+
self._prefix = prefix
|
|
364
|
+
|
|
365
|
+
async def report(self, event: FeedbackEvent) -> None:
|
|
366
|
+
"""Print feedback event to console."""
|
|
367
|
+
print(f"{self._prefix} {event.to_dict()}")
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
class CompositeFeedbackSink(FeedbackSink):
|
|
371
|
+
"""Composite sink that reports to multiple sinks.
|
|
372
|
+
|
|
373
|
+
Useful for sending feedback to multiple destinations.
|
|
374
|
+
"""
|
|
375
|
+
|
|
376
|
+
def __init__(self, sinks: list[FeedbackSink] | None = None) -> None:
|
|
377
|
+
"""Initialize composite sink.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
sinks: List of sinks to report to
|
|
381
|
+
"""
|
|
382
|
+
self._sinks = list(sinks) if sinks else []
|
|
383
|
+
|
|
384
|
+
def add_sink(self, sink: FeedbackSink) -> CompositeFeedbackSink:
|
|
385
|
+
"""Add a sink.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
sink: Sink to add
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
Self for chaining
|
|
392
|
+
"""
|
|
393
|
+
self._sinks.append(sink)
|
|
394
|
+
return self
|
|
395
|
+
|
|
396
|
+
async def report(self, event: FeedbackEvent) -> None:
|
|
397
|
+
"""Report to all sinks."""
|
|
398
|
+
for sink in self._sinks:
|
|
399
|
+
try:
|
|
400
|
+
await sink.report(event)
|
|
401
|
+
except Exception:
|
|
402
|
+
pass # Don't let one sink failure affect others
|
|
403
|
+
|
|
404
|
+
async def close(self) -> None:
|
|
405
|
+
"""Close all sinks."""
|
|
406
|
+
for sink in self._sinks:
|
|
407
|
+
try:
|
|
408
|
+
await sink.close()
|
|
409
|
+
except Exception:
|
|
410
|
+
pass
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
# Global feedback sink
|
|
414
|
+
_global_sink: FeedbackSink | None = None
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def get_feedback_sink() -> FeedbackSink:
|
|
418
|
+
"""Get the global feedback sink.
|
|
419
|
+
|
|
420
|
+
Returns:
|
|
421
|
+
Global FeedbackSink instance (NoopFeedbackSink if not set)
|
|
422
|
+
"""
|
|
423
|
+
global _global_sink
|
|
424
|
+
if _global_sink is None:
|
|
425
|
+
_global_sink = NoopFeedbackSink()
|
|
426
|
+
return _global_sink
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
def set_feedback_sink(sink: FeedbackSink) -> None:
|
|
430
|
+
"""Set the global feedback sink.
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
sink: FeedbackSink instance
|
|
434
|
+
"""
|
|
435
|
+
global _global_sink
|
|
436
|
+
_global_sink = sink
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
async def report_feedback(event: FeedbackEvent) -> None:
|
|
440
|
+
"""Report feedback using the global sink.
|
|
441
|
+
|
|
442
|
+
Args:
|
|
443
|
+
event: Feedback event to report
|
|
444
|
+
"""
|
|
445
|
+
sink = get_feedback_sink()
|
|
446
|
+
await sink.report(event)
|