fluxloop 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fluxloop might be problematic. Click here for more details.

@@ -0,0 +1,76 @@
1
+ Metadata-Version: 2.4
2
+ Name: fluxloop
3
+ Version: 0.1.0
4
+ Summary: FluxLoop SDK for agent instrumentation and tracing
5
+ Author-email: FluxLoop Team <team@fluxloop.dev>
6
+ License: Apache-2.0
7
+ Project-URL: Homepage, https://github.com/fluxloop/fluxloop
8
+ Project-URL: Documentation, https://docs.fluxloop.dev
9
+ Project-URL: Repository, https://github.com/fluxloop/fluxloop
10
+ Project-URL: Issues, https://github.com/fluxloop/fluxloop/issues
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: Apache Software License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Requires-Python: >=3.8
21
+ Description-Content-Type: text/markdown
22
+ Requires-Dist: pydantic>=2.0
23
+ Requires-Dist: httpx>=0.24.0
24
+ Requires-Dist: python-dotenv>=1.0.0
25
+ Provides-Extra: dev
26
+ Requires-Dist: pytest>=7.0; extra == "dev"
27
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
28
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
29
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
30
+ Requires-Dist: mypy>=1.0; extra == "dev"
31
+ Requires-Dist: black>=23.0; extra == "dev"
32
+ Provides-Extra: langchain
33
+ Requires-Dist: langchain>=0.1.0; extra == "langchain"
34
+ Provides-Extra: langgraph
35
+ Requires-Dist: langgraph>=0.0.20; extra == "langgraph"
36
+
37
+ # FluxLoop SDK
38
+
39
+ FluxLoop SDK for agent instrumentation and tracing.
40
+
41
+ ## Installation
42
+
43
+ ```bash
44
+ pip install fluxloop
45
+ ```
46
+
47
+ ## Quick Start
48
+
49
+ ```python
50
+ from fluxloop import trace, FluxLoopClient
51
+
52
+ # Initialize the client
53
+ client = FluxLoopClient()
54
+
55
+ # Use the trace decorator
56
+ @trace()
57
+ def my_agent_function(prompt: str):
58
+ # Your agent logic here
59
+ return result
60
+ ```
61
+
62
+ ## Features
63
+
64
+ - 🔍 **Automatic Tracing**: Instrument your agent code with simple decorators
65
+ - 📊 **Rich Context**: Capture inputs, outputs, and metadata
66
+ - 🔄 **Async Support**: Works with both sync and async functions
67
+ - 🎯 **Framework Integration**: Built-in support for LangChain and LangGraph
68
+
69
+ ## Documentation
70
+
71
+ For detailed documentation, visit [https://docs.fluxloop.dev](https://docs.fluxloop.dev)
72
+
73
+ ## License
74
+
75
+ Apache License 2.0 - see LICENSE file for details
76
+
@@ -0,0 +1,40 @@
1
+ # FluxLoop SDK
2
+
3
+ FluxLoop SDK for agent instrumentation and tracing.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install fluxloop
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```python
14
+ from fluxloop import trace, FluxLoopClient
15
+
16
+ # Initialize the client
17
+ client = FluxLoopClient()
18
+
19
+ # Use the trace decorator
20
+ @trace()
21
+ def my_agent_function(prompt: str):
22
+ # Your agent logic here
23
+ return result
24
+ ```
25
+
26
+ ## Features
27
+
28
+ - 🔍 **Automatic Tracing**: Instrument your agent code with simple decorators
29
+ - 📊 **Rich Context**: Capture inputs, outputs, and metadata
30
+ - 🔄 **Async Support**: Works with both sync and async functions
31
+ - 🎯 **Framework Integration**: Built-in support for LangChain and LangGraph
32
+
33
+ ## Documentation
34
+
35
+ For detailed documentation, visit [https://docs.fluxloop.dev](https://docs.fluxloop.dev)
36
+
37
+ ## License
38
+
39
+ Apache License 2.0 - see LICENSE file for details
40
+
@@ -0,0 +1,58 @@
1
+ """
2
+ FluxLoop SDK - Agent instrumentation and tracing library.
3
+ """
4
+
5
+ from .context import FluxLoopContext, get_current_context, instrument
6
+ from .decorators import agent, prompt, tool
7
+ from .schemas import (
8
+ ExperimentConfig,
9
+ PersonaConfig,
10
+ RunnerConfig,
11
+ VariationStrategy,
12
+ Trace,
13
+ Observation,
14
+ ObservationType,
15
+ ObservationLevel,
16
+ Score,
17
+ ScoreDataType,
18
+ TraceStatus,
19
+ )
20
+ from .client import FluxLoopClient
21
+ from .config import configure, get_config, reset_config
22
+ from .recording import disable_recording, enable_recording, record_call_args, set_recording_options
23
+
24
+ __version__ = "0.1.0"
25
+
26
+ __all__ = [
27
+ # Decorators
28
+ "agent",
29
+ "prompt",
30
+ "tool",
31
+ # Context
32
+ "instrument",
33
+ "get_current_context",
34
+ "FluxLoopContext",
35
+ # Client
36
+ "FluxLoopClient",
37
+ # Config
38
+ "configure",
39
+ "get_config",
40
+ "reset_config",
41
+ "enable_recording",
42
+ "disable_recording",
43
+ "set_recording_options",
44
+ "record_call_args",
45
+ # Schemas - configs
46
+ "ExperimentConfig",
47
+ "PersonaConfig",
48
+ "RunnerConfig",
49
+ "VariationStrategy",
50
+ # Schemas - tracing
51
+ "Trace",
52
+ "Observation",
53
+ "ObservationType",
54
+ "ObservationLevel",
55
+ "Score",
56
+ "ScoreDataType",
57
+ "TraceStatus",
58
+ ]
@@ -0,0 +1,186 @@
1
+ """
2
+ Event buffering and batch sending logic.
3
+ """
4
+
5
+ import atexit
6
+ import json
7
+ import threading
8
+ import time
9
+ from collections import deque
10
+ from datetime import datetime
11
+ from typing import Any, Deque, Dict, List, Optional, Tuple
12
+ from uuid import UUID
13
+
14
+ from .config import get_config
15
+ from .models import ObservationData, TraceData
16
+ from .storage import OfflineStore
17
+
18
+
19
+ class EventBuffer:
20
+ """
21
+ Singleton buffer for collecting and batching events.
22
+ """
23
+
24
+ _instance: Optional["EventBuffer"] = None
25
+ _lock = threading.Lock()
26
+
27
+ def __init__(self):
28
+ """Initialize the buffer."""
29
+ if EventBuffer._instance is not None:
30
+ raise RuntimeError("Use EventBuffer.get_instance() instead")
31
+
32
+ self.config = get_config()
33
+ self.traces: Deque[TraceData] = deque(maxlen=self.config.max_queue_size)
34
+ self.observations: Deque[Tuple[UUID, ObservationData]] = deque(
35
+ maxlen=self.config.max_queue_size
36
+ )
37
+
38
+ # Threading
39
+ self.send_lock = threading.Lock()
40
+ self.last_flush = time.time()
41
+
42
+ # Background thread for periodic flushing
43
+ self.stop_event = threading.Event()
44
+ self.flush_thread = threading.Thread(target=self._flush_periodically, daemon=True)
45
+ self.flush_thread.start()
46
+
47
+ # Register cleanup on exit
48
+ atexit.register(self.shutdown)
49
+
50
+ # Offline store
51
+ self.offline_store = OfflineStore()
52
+
53
+ @classmethod
54
+ def get_instance(cls) -> "EventBuffer":
55
+ """Get or create the singleton instance."""
56
+ if cls._instance is None:
57
+ with cls._lock:
58
+ if cls._instance is None:
59
+ cls._instance = cls()
60
+ return cls._instance
61
+
62
+ def add_trace(self, trace: TraceData) -> None:
63
+ """
64
+ Add a trace to the buffer.
65
+
66
+ Args:
67
+ trace: Trace data to add
68
+ """
69
+ if not self.config.enabled:
70
+ return
71
+
72
+ with self.send_lock:
73
+ self.traces.append(trace)
74
+
75
+ def add_observation(self, trace_id: UUID, observation: ObservationData) -> None:
76
+ """
77
+ Add an observation to the buffer.
78
+
79
+ Args:
80
+ trace_id: ID of the parent trace
81
+ observation: Observation data to add
82
+ """
83
+ if not self.config.enabled:
84
+ return
85
+
86
+ with self.send_lock:
87
+ self.observations.append((trace_id, observation))
88
+
89
+ def flush_if_needed(self) -> None:
90
+ """Flush the buffer if batch size is reached."""
91
+ should_flush = False
92
+
93
+ with self.send_lock:
94
+ total_items = len(self.traces) + len(self.observations)
95
+ should_flush = total_items >= self.config.batch_size
96
+
97
+ if should_flush:
98
+ self.flush()
99
+
100
+ def flush(self) -> None:
101
+ """Send all buffered events to the collector."""
102
+ if not self.config.enabled:
103
+ return
104
+
105
+ # Collect items to send
106
+ traces_to_send: List[TraceData] = []
107
+ observations_to_send: List[Tuple[UUID, ObservationData]] = []
108
+
109
+ with self.send_lock:
110
+ # Move items from buffer
111
+ while self.traces:
112
+ traces_to_send.append(self.traces.popleft())
113
+
114
+ while self.observations:
115
+ observations_to_send.append(self.observations.popleft())
116
+
117
+ self.last_flush = time.time()
118
+
119
+ # Send if there's data
120
+ if traces_to_send or observations_to_send:
121
+ self._send_batch(traces_to_send, observations_to_send)
122
+
123
+ def _send_batch(
124
+ self,
125
+ traces: List[TraceData],
126
+ observations: List[Tuple[UUID, ObservationData]]
127
+ ) -> None:
128
+ """
129
+ Send a batch of events to the collector.
130
+
131
+ Args:
132
+ traces: List of traces to send
133
+ observations: List of (trace_id, observation) tuples
134
+ """
135
+ # Import here to avoid circular dependency
136
+ send_errors = False
137
+
138
+ if self.config.use_collector:
139
+ from .client import FluxLoopClient
140
+
141
+ client = FluxLoopClient()
142
+
143
+ for trace in traces:
144
+ try:
145
+ client.send_trace(trace)
146
+ except Exception as e:
147
+ send_errors = True
148
+ if self.config.debug:
149
+ print(f"Failed to send trace {trace.id}: {e}")
150
+
151
+ for trace_id, observation in observations:
152
+ try:
153
+ client.send_observation(trace_id, observation)
154
+ except Exception as e:
155
+ send_errors = True
156
+ if self.config.debug:
157
+ print(f"Failed to send observation {observation.id}: {e}")
158
+
159
+ if send_errors or not self.config.use_collector:
160
+ self.offline_store.record_traces(traces)
161
+ self.offline_store.record_observations(observations)
162
+
163
+ def _flush_periodically(self) -> None:
164
+ """Background thread to flush periodically."""
165
+ while not self.stop_event.is_set():
166
+ time.sleep(self.config.flush_interval)
167
+
168
+ # Check if enough time has passed since last flush
169
+ with self.send_lock:
170
+ time_since_flush = time.time() - self.last_flush
171
+ has_data = bool(self.traces or self.observations)
172
+
173
+ if has_data and time_since_flush >= self.config.flush_interval:
174
+ self.flush()
175
+
176
+ def shutdown(self) -> None:
177
+ """Shutdown the buffer and flush remaining events."""
178
+ # Stop the background thread
179
+ self.stop_event.set()
180
+
181
+ # Final flush
182
+ self.flush()
183
+
184
+ # Wait for thread to stop (with timeout)
185
+ if self.flush_thread.is_alive():
186
+ self.flush_thread.join(timeout=2.0)
@@ -0,0 +1,175 @@
1
+ """
2
+ HTTP client for sending data to the collector.
3
+ """
4
+
5
+ import json
6
+ from typing import Any, Dict, Optional
7
+ from uuid import UUID
8
+
9
+ import httpx
10
+
11
+ from .config import get_config
12
+ from .models import ObservationData, TraceData
13
+
14
+
15
+ class FluxLoopClient:
16
+ """
17
+ HTTP client for communicating with the FluxLoop collector.
18
+ """
19
+
20
+ def __init__(self, collector_url: Optional[str] = None, api_key: Optional[str] = None):
21
+ """
22
+ Initialize the client.
23
+
24
+ Args:
25
+ collector_url: Override collector URL
26
+ api_key: Override API key
27
+ """
28
+ self.config = get_config()
29
+ self.collector_url = collector_url or self.config.collector_url
30
+ self.api_key = api_key or self.config.api_key
31
+ self._client: Optional[httpx.Client] = None
32
+ if self.config.use_collector:
33
+ self._client = httpx.Client(
34
+ base_url=self.collector_url,
35
+ timeout=self.config.timeout,
36
+ headers=self._get_headers(),
37
+ )
38
+
39
+ def _get_headers(self) -> Dict[str, str]:
40
+ """Get common headers for requests."""
41
+ headers = {
42
+ "Content-Type": "application/json",
43
+ "User-Agent": "fluxloop-sdk/0.1.0",
44
+ }
45
+
46
+ if self.api_key:
47
+ headers["Authorization"] = f"Bearer {self.api_key}"
48
+
49
+ if self.config.service_name:
50
+ headers["X-Service-Name"] = self.config.service_name
51
+
52
+ if self.config.environment:
53
+ headers["X-Environment"] = self.config.environment
54
+
55
+ return headers
56
+
57
+ def send_trace(self, trace: TraceData) -> Dict[str, Any]:
58
+ """
59
+ Send a trace to the collector.
60
+
61
+ Args:
62
+ trace: Trace data to send
63
+
64
+ Returns:
65
+ Response from the collector
66
+
67
+ Raises:
68
+ httpx.HTTPError: If the request fails
69
+ """
70
+ if not self.config.enabled:
71
+ return {"status": "disabled"}
72
+
73
+ # Convert to JSON-serializable format
74
+ payload = self._serialize_trace(trace)
75
+
76
+ if not self._client:
77
+ return {"status": "collector_disabled"}
78
+
79
+ try:
80
+ response = self._client.post("/api/traces", json=payload)
81
+ response.raise_for_status()
82
+ return response.json()
83
+ except httpx.HTTPError as e:
84
+ if self.config.debug:
85
+ print(f"Error sending trace: {e}")
86
+ raise
87
+
88
+ def send_observation(
89
+ self, trace_id: UUID, observation: ObservationData
90
+ ) -> Dict[str, Any]:
91
+ """
92
+ Send an observation to the collector.
93
+
94
+ Args:
95
+ trace_id: ID of the parent trace
96
+ observation: Observation data to send
97
+
98
+ Returns:
99
+ Response from the collector
100
+
101
+ Raises:
102
+ httpx.HTTPError: If the request fails
103
+ """
104
+ if not self.config.enabled:
105
+ return {"status": "disabled"}
106
+
107
+ # Convert to JSON-serializable format
108
+ payload = self._serialize_observation(observation)
109
+ payload["trace_id"] = str(trace_id)
110
+
111
+ if not self._client:
112
+ return {"status": "collector_disabled"}
113
+
114
+ try:
115
+ response = self._client.post(
116
+ f"/api/traces/{trace_id}/observations",
117
+ json=payload
118
+ )
119
+ response.raise_for_status()
120
+ return response.json()
121
+ except httpx.HTTPError as e:
122
+ if self.config.debug:
123
+ print(f"Error sending observation: {e}")
124
+ raise
125
+
126
+ def _serialize_trace(self, trace: TraceData) -> Dict[str, Any]:
127
+ """Serialize trace for JSON transmission."""
128
+ data = trace.model_dump(exclude_none=True)
129
+
130
+ # Convert UUIDs to strings
131
+ if "id" in data:
132
+ data["id"] = str(data["id"])
133
+ if "session_id" in data:
134
+ data["session_id"] = str(data["session_id"])
135
+
136
+ # Convert datetime to ISO format
137
+ if "start_time" in data:
138
+ data["start_time"] = data["start_time"].isoformat()
139
+ if "end_time" in data and data["end_time"]:
140
+ data["end_time"] = data["end_time"].isoformat()
141
+
142
+ return data
143
+
144
+ def _serialize_observation(self, observation: ObservationData) -> Dict[str, Any]:
145
+ """Serialize observation for JSON transmission."""
146
+ data = observation.model_dump(exclude_none=True)
147
+
148
+ # Convert UUIDs to strings
149
+ if "id" in data:
150
+ data["id"] = str(data["id"])
151
+ if "parent_observation_id" in data:
152
+ data["parent_observation_id"] = str(data["parent_observation_id"])
153
+ if "trace_id" in data:
154
+ data["trace_id"] = str(data["trace_id"])
155
+
156
+ # Convert datetime to ISO format
157
+ if "start_time" in data:
158
+ data["start_time"] = data["start_time"].isoformat()
159
+ if "end_time" in data and data["end_time"]:
160
+ data["end_time"] = data["end_time"].isoformat()
161
+
162
+ return data
163
+
164
+ def close(self) -> None:
165
+ """Close the HTTP client."""
166
+ if self._client:
167
+ self._client.close()
168
+
169
+ def __enter__(self):
170
+ """Context manager entry."""
171
+ return self
172
+
173
+ def __exit__(self, exc_type, exc_val, exc_tb):
174
+ """Context manager exit."""
175
+ self.close()