fluxloop 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fluxloop might be problematic. Click here for more details.

fluxloop/__init__.py ADDED
@@ -0,0 +1,58 @@
1
+ """
2
+ FluxLoop SDK - Agent instrumentation and tracing library.
3
+ """
4
+
5
+ from .context import FluxLoopContext, get_current_context, instrument
6
+ from .decorators import agent, prompt, tool
7
+ from .schemas import (
8
+ ExperimentConfig,
9
+ PersonaConfig,
10
+ RunnerConfig,
11
+ VariationStrategy,
12
+ Trace,
13
+ Observation,
14
+ ObservationType,
15
+ ObservationLevel,
16
+ Score,
17
+ ScoreDataType,
18
+ TraceStatus,
19
+ )
20
+ from .client import FluxLoopClient
21
+ from .config import configure, get_config, reset_config
22
+ from .recording import disable_recording, enable_recording, record_call_args, set_recording_options
23
+
24
+ __version__ = "0.1.0"
25
+
26
+ __all__ = [
27
+ # Decorators
28
+ "agent",
29
+ "prompt",
30
+ "tool",
31
+ # Context
32
+ "instrument",
33
+ "get_current_context",
34
+ "FluxLoopContext",
35
+ # Client
36
+ "FluxLoopClient",
37
+ # Config
38
+ "configure",
39
+ "get_config",
40
+ "reset_config",
41
+ "enable_recording",
42
+ "disable_recording",
43
+ "set_recording_options",
44
+ "record_call_args",
45
+ # Schemas - configs
46
+ "ExperimentConfig",
47
+ "PersonaConfig",
48
+ "RunnerConfig",
49
+ "VariationStrategy",
50
+ # Schemas - tracing
51
+ "Trace",
52
+ "Observation",
53
+ "ObservationType",
54
+ "ObservationLevel",
55
+ "Score",
56
+ "ScoreDataType",
57
+ "TraceStatus",
58
+ ]
fluxloop/buffer.py ADDED
@@ -0,0 +1,186 @@
1
+ """
2
+ Event buffering and batch sending logic.
3
+ """
4
+
5
+ import atexit
6
+ import json
7
+ import threading
8
+ import time
9
+ from collections import deque
10
+ from datetime import datetime
11
+ from typing import Any, Deque, Dict, List, Optional, Tuple
12
+ from uuid import UUID
13
+
14
+ from .config import get_config
15
+ from .models import ObservationData, TraceData
16
+ from .storage import OfflineStore
17
+
18
+
19
+ class EventBuffer:
20
+ """
21
+ Singleton buffer for collecting and batching events.
22
+ """
23
+
24
+ _instance: Optional["EventBuffer"] = None
25
+ _lock = threading.Lock()
26
+
27
+ def __init__(self):
28
+ """Initialize the buffer."""
29
+ if EventBuffer._instance is not None:
30
+ raise RuntimeError("Use EventBuffer.get_instance() instead")
31
+
32
+ self.config = get_config()
33
+ self.traces: Deque[TraceData] = deque(maxlen=self.config.max_queue_size)
34
+ self.observations: Deque[Tuple[UUID, ObservationData]] = deque(
35
+ maxlen=self.config.max_queue_size
36
+ )
37
+
38
+ # Threading
39
+ self.send_lock = threading.Lock()
40
+ self.last_flush = time.time()
41
+
42
+ # Background thread for periodic flushing
43
+ self.stop_event = threading.Event()
44
+ self.flush_thread = threading.Thread(target=self._flush_periodically, daemon=True)
45
+ self.flush_thread.start()
46
+
47
+ # Register cleanup on exit
48
+ atexit.register(self.shutdown)
49
+
50
+ # Offline store
51
+ self.offline_store = OfflineStore()
52
+
53
+ @classmethod
54
+ def get_instance(cls) -> "EventBuffer":
55
+ """Get or create the singleton instance."""
56
+ if cls._instance is None:
57
+ with cls._lock:
58
+ if cls._instance is None:
59
+ cls._instance = cls()
60
+ return cls._instance
61
+
62
+ def add_trace(self, trace: TraceData) -> None:
63
+ """
64
+ Add a trace to the buffer.
65
+
66
+ Args:
67
+ trace: Trace data to add
68
+ """
69
+ if not self.config.enabled:
70
+ return
71
+
72
+ with self.send_lock:
73
+ self.traces.append(trace)
74
+
75
+ def add_observation(self, trace_id: UUID, observation: ObservationData) -> None:
76
+ """
77
+ Add an observation to the buffer.
78
+
79
+ Args:
80
+ trace_id: ID of the parent trace
81
+ observation: Observation data to add
82
+ """
83
+ if not self.config.enabled:
84
+ return
85
+
86
+ with self.send_lock:
87
+ self.observations.append((trace_id, observation))
88
+
89
+ def flush_if_needed(self) -> None:
90
+ """Flush the buffer if batch size is reached."""
91
+ should_flush = False
92
+
93
+ with self.send_lock:
94
+ total_items = len(self.traces) + len(self.observations)
95
+ should_flush = total_items >= self.config.batch_size
96
+
97
+ if should_flush:
98
+ self.flush()
99
+
100
+ def flush(self) -> None:
101
+ """Send all buffered events to the collector."""
102
+ if not self.config.enabled:
103
+ return
104
+
105
+ # Collect items to send
106
+ traces_to_send: List[TraceData] = []
107
+ observations_to_send: List[Tuple[UUID, ObservationData]] = []
108
+
109
+ with self.send_lock:
110
+ # Move items from buffer
111
+ while self.traces:
112
+ traces_to_send.append(self.traces.popleft())
113
+
114
+ while self.observations:
115
+ observations_to_send.append(self.observations.popleft())
116
+
117
+ self.last_flush = time.time()
118
+
119
+ # Send if there's data
120
+ if traces_to_send or observations_to_send:
121
+ self._send_batch(traces_to_send, observations_to_send)
122
+
123
+ def _send_batch(
124
+ self,
125
+ traces: List[TraceData],
126
+ observations: List[Tuple[UUID, ObservationData]]
127
+ ) -> None:
128
+ """
129
+ Send a batch of events to the collector.
130
+
131
+ Args:
132
+ traces: List of traces to send
133
+ observations: List of (trace_id, observation) tuples
134
+ """
135
+ # Import here to avoid circular dependency
136
+ send_errors = False
137
+
138
+ if self.config.use_collector:
139
+ from .client import FluxLoopClient
140
+
141
+ client = FluxLoopClient()
142
+
143
+ for trace in traces:
144
+ try:
145
+ client.send_trace(trace)
146
+ except Exception as e:
147
+ send_errors = True
148
+ if self.config.debug:
149
+ print(f"Failed to send trace {trace.id}: {e}")
150
+
151
+ for trace_id, observation in observations:
152
+ try:
153
+ client.send_observation(trace_id, observation)
154
+ except Exception as e:
155
+ send_errors = True
156
+ if self.config.debug:
157
+ print(f"Failed to send observation {observation.id}: {e}")
158
+
159
+ if send_errors or not self.config.use_collector:
160
+ self.offline_store.record_traces(traces)
161
+ self.offline_store.record_observations(observations)
162
+
163
+ def _flush_periodically(self) -> None:
164
+ """Background thread to flush periodically."""
165
+ while not self.stop_event.is_set():
166
+ time.sleep(self.config.flush_interval)
167
+
168
+ # Check if enough time has passed since last flush
169
+ with self.send_lock:
170
+ time_since_flush = time.time() - self.last_flush
171
+ has_data = bool(self.traces or self.observations)
172
+
173
+ if has_data and time_since_flush >= self.config.flush_interval:
174
+ self.flush()
175
+
176
+ def shutdown(self) -> None:
177
+ """Shutdown the buffer and flush remaining events."""
178
+ # Stop the background thread
179
+ self.stop_event.set()
180
+
181
+ # Final flush
182
+ self.flush()
183
+
184
+ # Wait for thread to stop (with timeout)
185
+ if self.flush_thread.is_alive():
186
+ self.flush_thread.join(timeout=2.0)
fluxloop/client.py ADDED
@@ -0,0 +1,175 @@
1
+ """
2
+ HTTP client for sending data to the collector.
3
+ """
4
+
5
+ import json
6
+ from typing import Any, Dict, Optional
7
+ from uuid import UUID
8
+
9
+ import httpx
10
+
11
+ from .config import get_config
12
+ from .models import ObservationData, TraceData
13
+
14
+
15
+ class FluxLoopClient:
16
+ """
17
+ HTTP client for communicating with the FluxLoop collector.
18
+ """
19
+
20
+ def __init__(self, collector_url: Optional[str] = None, api_key: Optional[str] = None):
21
+ """
22
+ Initialize the client.
23
+
24
+ Args:
25
+ collector_url: Override collector URL
26
+ api_key: Override API key
27
+ """
28
+ self.config = get_config()
29
+ self.collector_url = collector_url or self.config.collector_url
30
+ self.api_key = api_key or self.config.api_key
31
+ self._client: Optional[httpx.Client] = None
32
+ if self.config.use_collector:
33
+ self._client = httpx.Client(
34
+ base_url=self.collector_url,
35
+ timeout=self.config.timeout,
36
+ headers=self._get_headers(),
37
+ )
38
+
39
+ def _get_headers(self) -> Dict[str, str]:
40
+ """Get common headers for requests."""
41
+ headers = {
42
+ "Content-Type": "application/json",
43
+ "User-Agent": "fluxloop-sdk/0.1.0",
44
+ }
45
+
46
+ if self.api_key:
47
+ headers["Authorization"] = f"Bearer {self.api_key}"
48
+
49
+ if self.config.service_name:
50
+ headers["X-Service-Name"] = self.config.service_name
51
+
52
+ if self.config.environment:
53
+ headers["X-Environment"] = self.config.environment
54
+
55
+ return headers
56
+
57
+ def send_trace(self, trace: TraceData) -> Dict[str, Any]:
58
+ """
59
+ Send a trace to the collector.
60
+
61
+ Args:
62
+ trace: Trace data to send
63
+
64
+ Returns:
65
+ Response from the collector
66
+
67
+ Raises:
68
+ httpx.HTTPError: If the request fails
69
+ """
70
+ if not self.config.enabled:
71
+ return {"status": "disabled"}
72
+
73
+ # Convert to JSON-serializable format
74
+ payload = self._serialize_trace(trace)
75
+
76
+ if not self._client:
77
+ return {"status": "collector_disabled"}
78
+
79
+ try:
80
+ response = self._client.post("/api/traces", json=payload)
81
+ response.raise_for_status()
82
+ return response.json()
83
+ except httpx.HTTPError as e:
84
+ if self.config.debug:
85
+ print(f"Error sending trace: {e}")
86
+ raise
87
+
88
+ def send_observation(
89
+ self, trace_id: UUID, observation: ObservationData
90
+ ) -> Dict[str, Any]:
91
+ """
92
+ Send an observation to the collector.
93
+
94
+ Args:
95
+ trace_id: ID of the parent trace
96
+ observation: Observation data to send
97
+
98
+ Returns:
99
+ Response from the collector
100
+
101
+ Raises:
102
+ httpx.HTTPError: If the request fails
103
+ """
104
+ if not self.config.enabled:
105
+ return {"status": "disabled"}
106
+
107
+ # Convert to JSON-serializable format
108
+ payload = self._serialize_observation(observation)
109
+ payload["trace_id"] = str(trace_id)
110
+
111
+ if not self._client:
112
+ return {"status": "collector_disabled"}
113
+
114
+ try:
115
+ response = self._client.post(
116
+ f"/api/traces/{trace_id}/observations",
117
+ json=payload
118
+ )
119
+ response.raise_for_status()
120
+ return response.json()
121
+ except httpx.HTTPError as e:
122
+ if self.config.debug:
123
+ print(f"Error sending observation: {e}")
124
+ raise
125
+
126
+ def _serialize_trace(self, trace: TraceData) -> Dict[str, Any]:
127
+ """Serialize trace for JSON transmission."""
128
+ data = trace.model_dump(exclude_none=True)
129
+
130
+ # Convert UUIDs to strings
131
+ if "id" in data:
132
+ data["id"] = str(data["id"])
133
+ if "session_id" in data:
134
+ data["session_id"] = str(data["session_id"])
135
+
136
+ # Convert datetime to ISO format
137
+ if "start_time" in data:
138
+ data["start_time"] = data["start_time"].isoformat()
139
+ if "end_time" in data and data["end_time"]:
140
+ data["end_time"] = data["end_time"].isoformat()
141
+
142
+ return data
143
+
144
+ def _serialize_observation(self, observation: ObservationData) -> Dict[str, Any]:
145
+ """Serialize observation for JSON transmission."""
146
+ data = observation.model_dump(exclude_none=True)
147
+
148
+ # Convert UUIDs to strings
149
+ if "id" in data:
150
+ data["id"] = str(data["id"])
151
+ if "parent_observation_id" in data:
152
+ data["parent_observation_id"] = str(data["parent_observation_id"])
153
+ if "trace_id" in data:
154
+ data["trace_id"] = str(data["trace_id"])
155
+
156
+ # Convert datetime to ISO format
157
+ if "start_time" in data:
158
+ data["start_time"] = data["start_time"].isoformat()
159
+ if "end_time" in data and data["end_time"]:
160
+ data["end_time"] = data["end_time"].isoformat()
161
+
162
+ return data
163
+
164
+ def close(self) -> None:
165
+ """Close the HTTP client."""
166
+ if self._client:
167
+ self._client.close()
168
+
169
+ def __enter__(self):
170
+ """Context manager entry."""
171
+ return self
172
+
173
+ def __exit__(self, exc_type, exc_val, exc_tb):
174
+ """Context manager exit."""
175
+ self.close()
fluxloop/config.py ADDED
@@ -0,0 +1,191 @@
1
+ """
2
+ SDK Configuration management.
3
+ """
4
+
5
+ import os
6
+ from datetime import datetime
7
+ from pathlib import Path
8
+ from typing import Optional
9
+ from urllib.parse import urlparse
10
+
11
+ from dotenv import load_dotenv
12
+ from pydantic import BaseModel, Field, field_validator
13
+
14
+ from .recording import disable_recording, enable_recording
15
+
16
+
17
+ def _resolve_recording_path(path: Optional[str]) -> Path:
18
+ """Resolve the recording file path, creating parent directories."""
19
+
20
+ if path:
21
+ resolved = Path(path).expanduser().resolve()
22
+ else:
23
+ resolved = Path(
24
+ f"/tmp/fluxloop_args_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jsonl"
25
+ ).resolve()
26
+
27
+ resolved.parent.mkdir(parents=True, exist_ok=True)
28
+ return resolved
29
+
30
+
31
+ def _apply_recording_config(config: "SDKConfig") -> None:
32
+ """Enable or disable argument recording based on configuration."""
33
+
34
+ if config.record_args:
35
+ resolved_path = _resolve_recording_path(config.recording_file)
36
+ enable_recording(str(resolved_path))
37
+ config.recording_file = str(resolved_path)
38
+ if config.debug:
39
+ print(f"🎥 Argument recording enabled → {resolved_path}")
40
+ else:
41
+ disable_recording()
42
+ if config.debug:
43
+ print("🎥 Argument recording disabled")
44
+
45
+ # Load environment variables
46
+ load_dotenv()
47
+
48
+
49
+ class SDKConfig(BaseModel):
50
+ """SDK configuration settings."""
51
+
52
+ # Collector settings
53
+ collector_url: Optional[str] = Field(
54
+ default_factory=lambda: os.getenv("FLUXLOOP_COLLECTOR_URL", "http://localhost:8000")
55
+ )
56
+ api_key: Optional[str] = Field(
57
+ default_factory=lambda: os.getenv("FLUXLOOP_API_KEY")
58
+ )
59
+
60
+ # Behavior settings
61
+ enabled: bool = Field(
62
+ default_factory=lambda: os.getenv("FLUXLOOP_ENABLED", "true").lower() == "true"
63
+ )
64
+ debug: bool = Field(
65
+ default_factory=lambda: os.getenv("FLUXLOOP_DEBUG", "false").lower() == "true"
66
+ )
67
+ use_collector: bool = Field(
68
+ default_factory=lambda: os.getenv("FLUXLOOP_USE_COLLECTOR", "true").lower() == "true"
69
+ )
70
+ offline_store_enabled: bool = Field(
71
+ default_factory=lambda: os.getenv("FLUXLOOP_OFFLINE_ENABLED", "true").lower() == "true"
72
+ )
73
+ offline_store_dir: str = Field(
74
+ default_factory=lambda: os.getenv("FLUXLOOP_OFFLINE_DIR", "./fluxloop_artifacts")
75
+ )
76
+
77
+ # Argument recording (disabled by default)
78
+ record_args: bool = Field(
79
+ default_factory=lambda: os.getenv("FLUXLOOP_RECORD_ARGS", "false").lower() == "true"
80
+ )
81
+ recording_file: Optional[str] = Field(
82
+ default_factory=lambda: os.getenv("FLUXLOOP_RECORDING_FILE")
83
+ )
84
+
85
+ # Performance settings
86
+ batch_size: int = Field(
87
+ default_factory=lambda: int(os.getenv("FLUXLOOP_BATCH_SIZE", "10"))
88
+ )
89
+ flush_interval: float = Field(
90
+ default_factory=lambda: float(os.getenv("FLUXLOOP_FLUSH_INTERVAL", "5.0"))
91
+ )
92
+ max_queue_size: int = Field(
93
+ default_factory=lambda: int(os.getenv("FLUXLOOP_MAX_QUEUE_SIZE", "1000"))
94
+ )
95
+ timeout: float = Field(
96
+ default_factory=lambda: float(os.getenv("FLUXLOOP_TIMEOUT", "10.0"))
97
+ )
98
+
99
+ # Sampling
100
+ sample_rate: float = Field(
101
+ default_factory=lambda: float(os.getenv("FLUXLOOP_SAMPLE_RATE", "1.0"))
102
+ )
103
+
104
+ # Metadata
105
+ service_name: Optional[str] = Field(
106
+ default_factory=lambda: os.getenv("FLUXLOOP_SERVICE_NAME")
107
+ )
108
+ environment: Optional[str] = Field(
109
+ default_factory=lambda: os.getenv("FLUXLOOP_ENVIRONMENT", "development")
110
+ )
111
+
112
+ @field_validator("collector_url")
113
+ def validate_collector_url(cls, v):
114
+ """Ensure collector URL is valid."""
115
+ if v is None:
116
+ return None
117
+ try:
118
+ result = urlparse(v)
119
+ if not all([result.scheme, result.netloc]):
120
+ raise ValueError("Invalid URL format")
121
+ except Exception as e:
122
+ raise ValueError(f"Invalid collector URL: {e}")
123
+ return v.rstrip("/") # Remove trailing slash
124
+
125
+ @field_validator("sample_rate")
126
+ def validate_sample_rate(cls, v):
127
+ """Ensure sample rate is between 0 and 1."""
128
+ if not 0 <= v <= 1:
129
+ raise ValueError("sample_rate must be between 0 and 1")
130
+ return v
131
+
132
+ @field_validator("batch_size")
133
+ def validate_batch_size(cls, v):
134
+ """Ensure batch size is reasonable."""
135
+ if v < 1:
136
+ raise ValueError("batch_size must be at least 1")
137
+ if v > 100:
138
+ raise ValueError("batch_size must not exceed 100")
139
+ return v
140
+
141
+
142
+ # Global configuration instance
143
+ _config = SDKConfig()
144
+ _apply_recording_config(_config)
145
+
146
+
147
+ def configure(**kwargs) -> SDKConfig:
148
+ """
149
+ Configure the SDK.
150
+
151
+ Args:
152
+ **kwargs: Configuration parameters to override
153
+
154
+ Returns:
155
+ Updated configuration
156
+
157
+ Example:
158
+ >>> import fluxloop
159
+ >>> fluxloop.configure(
160
+ ... collector_url="https://api.fluxloop.dev",
161
+ ... api_key="your-api-key"
162
+ ... )
163
+ """
164
+ global _config
165
+
166
+ # Update configuration with provided values
167
+ for key, value in kwargs.items():
168
+ if hasattr(_config, key):
169
+ setattr(_config, key, value)
170
+ else:
171
+ raise ValueError(f"Unknown configuration parameter: {key}")
172
+
173
+ # Re-validate the configuration
174
+ _config = SDKConfig(**_config.model_dump())
175
+
176
+ _apply_recording_config(_config)
177
+
178
+ return _config
179
+
180
+
181
+ def get_config() -> SDKConfig:
182
+ """Get current SDK configuration."""
183
+ return _config
184
+
185
+
186
+ def reset_config() -> SDKConfig:
187
+ """Reset configuration to defaults."""
188
+ global _config
189
+ _config = SDKConfig()
190
+ _apply_recording_config(_config)
191
+ return _config