quantumflow-sdk 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,577 @@
1
+ """
2
+ Temporal Memory for LSTM-like State Storage.
3
+
4
+ Provides:
5
+ - Sequential state storage with temporal ordering
6
+ - Cosine similarity-based pattern matching
7
+ - Cross-run historical queries
8
+ - Optional quantum compression
9
+
10
+ Example:
11
+ memory = TemporalMemoryStore()
12
+
13
+ # Store states
14
+ memory.store(pipeline_id, run_id, seq=0, state_vector=[0.1, 0.2, 0.3])
15
+ memory.store(pipeline_id, run_id, seq=1, state_vector=[0.2, 0.3, 0.4])
16
+
17
+ # Find similar patterns
18
+ similar = memory.find_similar(query_vector=[0.15, 0.25, 0.35], top_k=5)
19
+
20
+ # Use LSTM-like memory
21
+ lstm = LSTMMemory(hidden_size=64)
22
+ lstm.update(state_vector)
23
+ context = lstm.get_context()
24
+ """
25
+
26
+ import math
27
+ import logging
28
+ from dataclasses import dataclass, field
29
+ from typing import Any, Dict, List, Optional, Tuple
30
+ import uuid
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class MemoryEntry:
37
+ """A single entry in temporal memory."""
38
+
39
+ id: str
40
+ pipeline_id: str
41
+ run_id: str
42
+ sequence_number: int
43
+ state_vector: List[float]
44
+ dimension: int
45
+ is_compressed: bool = False
46
+ compressed_vector: Optional[List[float]] = None
47
+ compression_n_qubits: Optional[int] = None
48
+ extra_data: Dict[str, Any] = field(default_factory=dict)
49
+ norm: Optional[float] = None
50
+ cluster_id: Optional[int] = None
51
+
52
+ def __post_init__(self):
53
+ """Compute norm if not provided."""
54
+ if self.norm is None:
55
+ self.norm = math.sqrt(sum(x * x for x in self.state_vector))
56
+
57
+
58
+ class TemporalMemoryStore:
59
+ """
60
+ Sequential state storage for pattern matching.
61
+
62
+ Supports:
63
+ - Store/retrieve state vectors with temporal ordering
64
+ - Cosine similarity search
65
+ - Cross-run queries
66
+ - Optional quantum compression
67
+ """
68
+
69
+ def __init__(
70
+ self,
71
+ use_database: bool = True,
72
+ enable_compression: bool = False,
73
+ backend: str = "simulator",
74
+ ):
75
+ """
76
+ Initialize temporal memory store.
77
+
78
+ Args:
79
+ use_database: Whether to persist to database
80
+ enable_compression: Enable quantum compression
81
+ backend: Quantum backend for compression
82
+ """
83
+ self.use_database = use_database
84
+ self.enable_compression = enable_compression
85
+ self.backend = backend
86
+
87
+ # In-memory storage
88
+ self._entries: Dict[str, Dict[str, List[MemoryEntry]]] = {} # pipeline_id -> run_id -> entries
89
+
90
+ # Quantum compressor (lazy loaded)
91
+ self._compressor = None
92
+
93
+ def _get_compressor(self):
94
+ """Get or create quantum compressor."""
95
+ if self._compressor is None and self.enable_compression:
96
+ try:
97
+ from quantumflow.core.quantum_compressor import QuantumCompressor
98
+ self._compressor = QuantumCompressor(backend=self.backend)
99
+ except ImportError:
100
+ logger.warning("QuantumCompressor not available")
101
+ return self._compressor
102
+
103
+ def store(
104
+ self,
105
+ pipeline_id: str,
106
+ run_id: str,
107
+ sequence_number: int,
108
+ state_vector: List[float],
109
+ metadata: Optional[Dict[str, Any]] = None,
110
+ compress: bool = False,
111
+ ) -> str:
112
+ """
113
+ Store a state vector in temporal memory.
114
+
115
+ Args:
116
+ pipeline_id: Pipeline identifier
117
+ run_id: Run identifier
118
+ sequence_number: Sequence position
119
+ state_vector: State vector to store
120
+ metadata: Optional metadata
121
+ compress: Whether to apply quantum compression
122
+
123
+ Returns:
124
+ Entry ID
125
+ """
126
+ entry_id = str(uuid.uuid4())
127
+
128
+ # Compute norm for similarity search
129
+ norm = math.sqrt(sum(x * x for x in state_vector)) if state_vector else 0
130
+
131
+ # Quantum compression
132
+ compressed_vector = None
133
+ compression_n_qubits = None
134
+
135
+ if compress and self.enable_compression:
136
+ compressor = self._get_compressor()
137
+ if compressor:
138
+ try:
139
+ compressed = compressor.compress(state_vector)
140
+ compressed_vector = (
141
+ compressed.amplitudes.tolist()
142
+ if hasattr(compressed.amplitudes, "tolist")
143
+ else list(compressed.amplitudes)
144
+ )
145
+ compression_n_qubits = compressed.n_qubits
146
+ except Exception as e:
147
+ logger.warning(f"Compression failed: {e}")
148
+
149
+ entry = MemoryEntry(
150
+ id=entry_id,
151
+ pipeline_id=pipeline_id,
152
+ run_id=run_id,
153
+ sequence_number=sequence_number,
154
+ state_vector=state_vector,
155
+ dimension=len(state_vector),
156
+ is_compressed=compressed_vector is not None,
157
+ compressed_vector=compressed_vector,
158
+ compression_n_qubits=compression_n_qubits,
159
+ extra_data=metadata or {},
160
+ norm=norm,
161
+ )
162
+
163
+ # Store in memory
164
+ if pipeline_id not in self._entries:
165
+ self._entries[pipeline_id] = {}
166
+ if run_id not in self._entries[pipeline_id]:
167
+ self._entries[pipeline_id][run_id] = []
168
+
169
+ self._entries[pipeline_id][run_id].append(entry)
170
+
171
+ # Sort by sequence number
172
+ self._entries[pipeline_id][run_id].sort(key=lambda e: e.sequence_number)
173
+
174
+ # Database persistence
175
+ if self.use_database:
176
+ self._save_to_database(entry)
177
+
178
+ return entry_id
179
+
180
+ def _save_to_database(self, entry: MemoryEntry):
181
+ """Save entry to database."""
182
+ try:
183
+ from db.database import get_session
184
+ from db.models import TemporalMemoryState
185
+
186
+ with get_session() as session:
187
+ db_entry = TemporalMemoryState(
188
+ id=uuid.UUID(entry.id),
189
+ pipeline_id=uuid.UUID(entry.pipeline_id),
190
+ run_id=uuid.UUID(entry.run_id),
191
+ sequence_number=entry.sequence_number,
192
+ state_vector=entry.state_vector,
193
+ state_dimension=entry.dimension,
194
+ is_compressed=entry.is_compressed,
195
+ compressed_vector=entry.compressed_vector,
196
+ compression_n_qubits=entry.compression_n_qubits,
197
+ extra_data=entry.extra_data,
198
+ state_norm=entry.norm,
199
+ cluster_id=entry.cluster_id,
200
+ )
201
+ session.add(db_entry)
202
+ session.commit()
203
+
204
+ except Exception as e:
205
+ logger.warning(f"Database save failed: {e}")
206
+
207
+ def get_sequence(
208
+ self,
209
+ pipeline_id: str,
210
+ run_id: str,
211
+ start: int = 0,
212
+ end: Optional[int] = None,
213
+ ) -> List[MemoryEntry]:
214
+ """
215
+ Get a sequence of states from a run.
216
+
217
+ Args:
218
+ pipeline_id: Pipeline identifier
219
+ run_id: Run identifier
220
+ start: Start sequence number
221
+ end: End sequence number (exclusive)
222
+
223
+ Returns:
224
+ List of memory entries
225
+ """
226
+ if pipeline_id not in self._entries:
227
+ return []
228
+ if run_id not in self._entries[pipeline_id]:
229
+ return []
230
+
231
+ entries = self._entries[pipeline_id][run_id]
232
+
233
+ # Filter by sequence range
234
+ filtered = [
235
+ e
236
+ for e in entries
237
+ if e.sequence_number >= start and (end is None or e.sequence_number < end)
238
+ ]
239
+
240
+ return filtered
241
+
242
+ def get_latest(self, pipeline_id: str, run_id: str, n: int = 1) -> List[MemoryEntry]:
243
+ """
244
+ Get the N most recent entries from a run.
245
+
246
+ Args:
247
+ pipeline_id: Pipeline identifier
248
+ run_id: Run identifier
249
+ n: Number of entries
250
+
251
+ Returns:
252
+ List of most recent entries
253
+ """
254
+ entries = self.get_sequence(pipeline_id, run_id)
255
+ return entries[-n:] if entries else []
256
+
257
+ def find_similar(
258
+ self,
259
+ query_vector: List[float],
260
+ pipeline_id: Optional[str] = None,
261
+ top_k: int = 5,
262
+ threshold: float = 0.0,
263
+ ) -> List[Tuple[MemoryEntry, float]]:
264
+ """
265
+ Find similar states using cosine similarity.
266
+
267
+ Args:
268
+ query_vector: Query state vector
269
+ pipeline_id: Optional filter by pipeline
270
+ top_k: Number of results to return
271
+ threshold: Minimum similarity threshold
272
+
273
+ Returns:
274
+ List of (entry, similarity) tuples
275
+ """
276
+ query_norm = math.sqrt(sum(x * x for x in query_vector))
277
+ if query_norm == 0:
278
+ return []
279
+
280
+ results: List[Tuple[MemoryEntry, float]] = []
281
+
282
+ # Iterate through all entries
283
+ pipelines = [pipeline_id] if pipeline_id else list(self._entries.keys())
284
+
285
+ for pid in pipelines:
286
+ if pid not in self._entries:
287
+ continue
288
+
289
+ for run_id, entries in self._entries[pid].items():
290
+ for entry in entries:
291
+ # Skip if dimension mismatch
292
+ if entry.dimension != len(query_vector):
293
+ continue
294
+
295
+ # Compute cosine similarity
296
+ similarity = self._cosine_similarity(
297
+ query_vector, entry.state_vector, query_norm, entry.norm
298
+ )
299
+
300
+ if similarity >= threshold:
301
+ results.append((entry, similarity))
302
+
303
+ # Sort by similarity descending
304
+ results.sort(key=lambda x: x[1], reverse=True)
305
+
306
+ return results[:top_k]
307
+
308
+ def _cosine_similarity(
309
+ self,
310
+ vec1: List[float],
311
+ vec2: List[float],
312
+ norm1: Optional[float] = None,
313
+ norm2: Optional[float] = None,
314
+ ) -> float:
315
+ """Compute cosine similarity between two vectors."""
316
+ if len(vec1) != len(vec2):
317
+ return 0.0
318
+
319
+ dot_product = sum(a * b for a, b in zip(vec1, vec2))
320
+
321
+ if norm1 is None:
322
+ norm1 = math.sqrt(sum(x * x for x in vec1))
323
+ if norm2 is None:
324
+ norm2 = math.sqrt(sum(x * x for x in vec2))
325
+
326
+ if norm1 == 0 or norm2 == 0:
327
+ return 0.0
328
+
329
+ return dot_product / (norm1 * norm2)
330
+
331
+ def find_patterns(
332
+ self,
333
+ pattern: List[List[float]],
334
+ pipeline_id: Optional[str] = None,
335
+ similarity_threshold: float = 0.8,
336
+ ) -> List[Tuple[str, str, int, float]]:
337
+ """
338
+ Find sequences matching a pattern.
339
+
340
+ Args:
341
+ pattern: List of state vectors forming the pattern
342
+ pipeline_id: Optional filter by pipeline
343
+ similarity_threshold: Minimum average similarity
344
+
345
+ Returns:
346
+ List of (pipeline_id, run_id, start_seq, avg_similarity)
347
+ """
348
+ pattern_length = len(pattern)
349
+ if pattern_length == 0:
350
+ return []
351
+
352
+ results: List[Tuple[str, str, int, float]] = []
353
+
354
+ pipelines = [pipeline_id] if pipeline_id else list(self._entries.keys())
355
+
356
+ for pid in pipelines:
357
+ if pid not in self._entries:
358
+ continue
359
+
360
+ for run_id, entries in self._entries[pid].items():
361
+ if len(entries) < pattern_length:
362
+ continue
363
+
364
+ # Slide window through sequence
365
+ for start in range(len(entries) - pattern_length + 1):
366
+ window = entries[start : start + pattern_length]
367
+
368
+ # Check dimension compatibility
369
+ if any(
370
+ e.dimension != len(p)
371
+ for e, p in zip(window, pattern)
372
+ ):
373
+ continue
374
+
375
+ # Compute average similarity
376
+ similarities = [
377
+ self._cosine_similarity(e.state_vector, p)
378
+ for e, p in zip(window, pattern)
379
+ ]
380
+ avg_similarity = sum(similarities) / len(similarities)
381
+
382
+ if avg_similarity >= similarity_threshold:
383
+ results.append((pid, run_id, window[0].sequence_number, avg_similarity))
384
+
385
+ # Sort by similarity
386
+ results.sort(key=lambda x: x[3], reverse=True)
387
+
388
+ return results
389
+
390
+ def get_run_ids(self, pipeline_id: str) -> List[str]:
391
+ """Get all run IDs for a pipeline."""
392
+ if pipeline_id not in self._entries:
393
+ return []
394
+ return list(self._entries[pipeline_id].keys())
395
+
396
+ def clear_run(self, pipeline_id: str, run_id: str):
397
+ """Clear all entries for a run."""
398
+ if pipeline_id in self._entries and run_id in self._entries[pipeline_id]:
399
+ del self._entries[pipeline_id][run_id]
400
+
401
+ def clear_pipeline(self, pipeline_id: str):
402
+ """Clear all entries for a pipeline."""
403
+ if pipeline_id in self._entries:
404
+ del self._entries[pipeline_id]
405
+
406
+
407
+ class LSTMMemory:
408
+ """
409
+ LSTM-like memory for sequential state processing.
410
+
411
+ Maintains:
412
+ - Hidden state (short-term memory)
413
+ - Cell state (long-term memory)
414
+ - Attention over recent states
415
+
416
+ Simplified LSTM without trainable parameters - uses
417
+ fixed transformations for state updates.
418
+ """
419
+
420
+ def __init__(
421
+ self,
422
+ hidden_size: int = 64,
423
+ memory_length: int = 20,
424
+ forget_rate: float = 0.1,
425
+ ):
426
+ """
427
+ Initialize LSTM memory.
428
+
429
+ Args:
430
+ hidden_size: Size of hidden/cell states
431
+ memory_length: Number of recent states to attend over
432
+ forget_rate: Rate of forgetting (0-1)
433
+ """
434
+ self.hidden_size = hidden_size
435
+ self.memory_length = memory_length
436
+ self.forget_rate = forget_rate
437
+
438
+ # States (initialized to zeros)
439
+ self.hidden_state: List[float] = [0.0] * hidden_size
440
+ self.cell_state: List[float] = [0.0] * hidden_size
441
+
442
+ # Recent states for attention
443
+ self.recent_states: List[List[float]] = []
444
+
445
+ def _sigmoid(self, x: float) -> float:
446
+ """Sigmoid activation."""
447
+ return 1.0 / (1.0 + math.exp(-max(-500, min(500, x))))
448
+
449
+ def _tanh(self, x: float) -> float:
450
+ """Tanh activation."""
451
+ return math.tanh(max(-500, min(500, x)))
452
+
453
+ def _project(self, input_vec: List[float], target_size: int) -> List[float]:
454
+ """Project input to target size (simple averaging/padding)."""
455
+ if len(input_vec) == target_size:
456
+ return input_vec
457
+
458
+ if len(input_vec) > target_size:
459
+ # Average pooling
460
+ ratio = len(input_vec) / target_size
461
+ result = []
462
+ for i in range(target_size):
463
+ start = int(i * ratio)
464
+ end = int((i + 1) * ratio)
465
+ result.append(sum(input_vec[start:end]) / (end - start))
466
+ return result
467
+ else:
468
+ # Pad with zeros
469
+ return input_vec + [0.0] * (target_size - len(input_vec))
470
+
471
+ def update(self, input_state: List[float]) -> List[float]:
472
+ """
473
+ Update memory with new input state.
474
+
475
+ Implements simplified LSTM update:
476
+ - forget_gate = sigmoid(input + hidden)
477
+ - input_gate = sigmoid(input + hidden)
478
+ - cell_candidate = tanh(input + hidden)
479
+ - cell_state = forget_gate * cell_state + input_gate * cell_candidate
480
+ - hidden_state = tanh(cell_state)
481
+
482
+ Args:
483
+ input_state: Input state vector
484
+
485
+ Returns:
486
+ Updated hidden state
487
+ """
488
+ # Project input to hidden size
489
+ projected = self._project(input_state, self.hidden_size)
490
+
491
+ # Compute gates (simplified - no learned weights)
492
+ forget_gate = []
493
+ input_gate = []
494
+ cell_candidate = []
495
+
496
+ for i in range(self.hidden_size):
497
+ combined = projected[i] + self.hidden_state[i]
498
+
499
+ # Forget gate (how much to forget)
500
+ f = self._sigmoid(combined - self.forget_rate)
501
+ forget_gate.append(f)
502
+
503
+ # Input gate (how much to update)
504
+ ig = self._sigmoid(combined)
505
+ input_gate.append(ig)
506
+
507
+ # Cell candidate
508
+ c = self._tanh(combined)
509
+ cell_candidate.append(c)
510
+
511
+ # Update cell state
512
+ for i in range(self.hidden_size):
513
+ self.cell_state[i] = (
514
+ forget_gate[i] * self.cell_state[i] +
515
+ input_gate[i] * cell_candidate[i]
516
+ )
517
+
518
+ # Update hidden state
519
+ self.hidden_state = [self._tanh(c) for c in self.cell_state]
520
+
521
+ # Store in recent states
522
+ self.recent_states.append(projected)
523
+ if len(self.recent_states) > self.memory_length:
524
+ self.recent_states.pop(0)
525
+
526
+ return self.hidden_state.copy()
527
+
528
+ def get_context(self) -> List[float]:
529
+ """
530
+ Get context vector with attention over recent states.
531
+
532
+ Returns:
533
+ Context vector combining hidden state and attended states
534
+ """
535
+ if not self.recent_states:
536
+ return self.hidden_state.copy()
537
+
538
+ # Compute attention scores (dot product with hidden state)
539
+ scores = []
540
+ for state in self.recent_states:
541
+ score = sum(h * s for h, s in zip(self.hidden_state, state))
542
+ scores.append(score)
543
+
544
+ # Softmax normalization
545
+ max_score = max(scores) if scores else 0
546
+ exp_scores = [math.exp(s - max_score) for s in scores]
547
+ sum_exp = sum(exp_scores) or 1
548
+
549
+ attention_weights = [e / sum_exp for e in exp_scores]
550
+
551
+ # Weighted sum of recent states
552
+ attended = [0.0] * self.hidden_size
553
+ for weight, state in zip(attention_weights, self.recent_states):
554
+ for i in range(self.hidden_size):
555
+ attended[i] += weight * state[i]
556
+
557
+ # Combine hidden state and attended context
558
+ context = [
559
+ 0.5 * h + 0.5 * a
560
+ for h, a in zip(self.hidden_state, attended)
561
+ ]
562
+
563
+ return context
564
+
565
+ def reset(self):
566
+ """Reset memory states."""
567
+ self.hidden_state = [0.0] * self.hidden_size
568
+ self.cell_state = [0.0] * self.hidden_size
569
+ self.recent_states.clear()
570
+
571
+ def get_hidden_state(self) -> List[float]:
572
+ """Get current hidden state."""
573
+ return self.hidden_state.copy()
574
+
575
+ def get_cell_state(self) -> List[float]:
576
+ """Get current cell state."""
577
+ return self.cell_state.copy()
@@ -1,14 +1,14 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quantumflow-sdk
3
- Version: 0.3.0
4
- Summary: Quantum-optimized AI agent workflow platform with 53% token compression
3
+ Version: 0.4.0
4
+ Summary: Quantum-optimized AI agent workflow platform with 53% token compression, Gray-code measurement, and SES VQE
5
5
  Author-email: BlockQuantAI <hello@blockquant.ai>
6
6
  License-Expression: MIT
7
7
  Project-URL: Homepage, https://qflowai.dev
8
8
  Project-URL: Documentation, https://qflowai.dev/docs
9
9
  Project-URL: Repository, https://github.com/blockquantai/quantumflow
10
10
  Project-URL: Issues, https://github.com/blockquantai/quantumflow/issues
11
- Keywords: quantum,quantum-computing,ai,machine-learning,token-compression,qkd,teleportation,langchain,crewai
11
+ Keywords: quantum,quantum-computing,ai,machine-learning,token-compression,qkd,teleportation,langchain,crewai,vqe,gray-code,ses-ansatz,tight-binding
12
12
  Classifier: Development Status :: 4 - Beta
13
13
  Classifier: Intended Audience :: Developers
14
14
  Classifier: Intended Audience :: Science/Research
@@ -1,15 +1,17 @@
1
1
  api/__init__.py,sha256=Oc_JXlNXQ8vZqr7A32xlhaW-FLUk2DfSVqjDoh4cv4Y,31
2
2
  api/auth.py,sha256=8gxgMSwsfsUCK0mM-ZTWM_ZsvTDvyOfVdO0NYLCHg1I,5942
3
- api/main.py,sha256=WQgi8h7FAjkwPlSiwGQhnviHZiAn7VZ79g_qeRUg-aQ,12673
4
- api/models.py,sha256=i0bwfN697wSxnIchHo13aOWkzZQE90WSRFUArJS0a2M,3262
3
+ api/main.py,sha256=J6aBtKpBl3hj2jTxRmFrPZmeqJ03j09W0O_PnjswY18,13920
4
+ api/models.py,sha256=GcG0XZ8x7ES73bqs2RsAUnvVYkEm7C-0uzdqMcjYOB8,4692
5
5
  api/routes/__init__.py,sha256=NsndLhwM3PzBnPYSUiKAu0pJKf2JnVkt3vFbaFkA6PA,26
6
- api/routes/algorithm_routes.py,sha256=jPwogmqbRiW3YHZCY_XuUZEVSI08zTHpBOz2zKac0iA,30139
6
+ api/routes/algorithm_routes.py,sha256=9oZr0u2aqV0uRYun52thF1ZAfLZJ_uesQ7nNnKP0Lo8,34901
7
7
  api/routes/auth_routes.py,sha256=f80G08AfUh2XCUdziJbncNH8HT9GlOq3f9BpX9p_ans,5966
8
+ api/routes/chat_routes.py,sha256=ITg1PZRnY7e85pIMrQ1C5SedYCLydPAXyPP0UGExIxA,18023
9
+ api/routes/pipeline_routes.py,sha256=rg-nv9R2RNEAan1XKMoE5A5eAVv_lPB-Hd5xKeAbkLs,19442
8
10
  api/routes/teleport_routes.py,sha256=8G8-lAuB1H2qSom7Im7s1NiiHRy0HBPzlnJ-QAer3DI,12149
9
11
  db/__init__.py,sha256=CLyI_3_AP7wQATuxLV2yHPvYDNikJYmH5BMQ3Oez1xw,280
10
12
  db/crud.py,sha256=q_HpP0RzsI7Lz1EMhEWzaMxdxzGAdVTBD2LALqbGLGQ,8175
11
13
  db/database.py,sha256=Md_e3z1t6NzKCCVdS4gRo0Pf6LNZvRClIX4QU8F8InI,2078
12
- db/models.py,sha256=W-NLqT6kGtXTN3r4DqVPSPglt-_iFDBlqB6e3nzflrg,5975
14
+ db/models.py,sha256=FBwNQ6kd95dxpjsd3-IofwFsL-bsjsJKjNtME1LvkGY,18927
13
15
  quantumflow/__init__.py,sha256=lUdk3uElyzBJe_VRjAjbehIULFDcbmvFePieaayWIwk,1731
14
16
  quantumflow/algorithms/__init__.py,sha256=waXASb2jnbAcjV-xS1wx-bGmPJ5lCj111dJ14eB8KCo,916
15
17
  quantumflow/algorithms/compression/__init__.py,sha256=rejDCdZJEy1tamQdDaZodGbo8wA1rszlXEtwvYgZO7A,361
@@ -19,10 +21,10 @@ quantumflow/algorithms/compression/token_compression.py,sha256=qwjV-fWV7fy9PvEQS
19
21
  quantumflow/algorithms/cryptography/__init__.py,sha256=uzJMUgBJIH16gP_RaZhg9Ue66uw8mwqUYMllfhOU9Bw,173
20
22
  quantumflow/algorithms/cryptography/qkd.py,sha256=mFRlypQXi4AhFxSy0FBpuAapJh5La1rx-siL1cJoqTc,6023
21
23
  quantumflow/algorithms/cryptography/qrng.py,sha256=aVyFZZZYsNw-LKZghIF505BQDzX9uc2kIrcoK92OXU8,5875
22
- quantumflow/algorithms/machine_learning/__init__.py,sha256=P2jSE7ibOaA9uzUw2Mu8p2zc-D5t9nwEsnSBmgQSJ5Y,252
24
+ quantumflow/algorithms/machine_learning/__init__.py,sha256=OcDRTJIPUs86zxfnXhRMEQCFpAaibzoLrf1kBlr_h5o,408
23
25
  quantumflow/algorithms/machine_learning/qnn.py,sha256=1aPNNTu0I2c_C6maPKWDMHW2p-Torz6nhGrIeD8gCs0,7972
24
26
  quantumflow/algorithms/machine_learning/qsvm.py,sha256=_JAsrMI7nthOdmN8VETCXwpjCOaO1q-bQJQR-1eMPj8,7839
25
- quantumflow/algorithms/machine_learning/vqe.py,sha256=Vy5wxr2KAKBK_qphMn9seKckoVrqNbqKgsZr66SKNpU,6958
27
+ quantumflow/algorithms/machine_learning/vqe.py,sha256=SP_LHFSnKM3f71KuXvqPy4QEdwoTZ8PNQDWHG3p2tDU,19067
26
28
  quantumflow/algorithms/optimization/__init__.py,sha256=gQ_EgS4je9HBwk563bY86kuTfqWwPMnFGV-HcxuLsr0,297
27
29
  quantumflow/algorithms/optimization/grover.py,sha256=onn6rgIZFIw9CAPQQDhnc_1Y8PkQJYF8-GNLFUZPg8Y,6250
28
30
  quantumflow/algorithms/optimization/qaoa.py,sha256=7VsD456_P4JrzsUJ9nJ2DEKOEoGLHdHUetqOfIwedFw,7254
@@ -42,23 +44,33 @@ quantumflow/backends/simulator_backend.py,sha256=tMbDcGwOWYbhTA8gNZheCfk1inAwbsH
42
44
  quantumflow/billing/__init__.py,sha256=4b9JwTXIT7LsehvkX42bsvMZ6RvcP3om5MuW9i9ulXo,484
43
45
  quantumflow/billing/models.py,sha256=NeLbWX_31T52gKTL_toYcSwjUjzk3Te9jq0QFPaMTbY,3344
44
46
  quantumflow/billing/stripe_service.py,sha256=Fy5vUSUQt2r3kKAQhb2KzxMAklOoB-bOnjZE0fAKbYg,20525
45
- quantumflow/core/__init__.py,sha256=fZFBkVu1wsLf7qSY1s3OMi6oBIwy65K_Cwk-jrd_SSo,359
47
+ quantumflow/core/__init__.py,sha256=-oDYnG4iq8251BSueR3aFOK1xjNES8ZR-UIJ7IAFDHs,538
46
48
  quantumflow/core/entanglement.py,sha256=0H9XKU7D5OkUBgMEiNKsvfjAhsiD2TX3BmCYzrVyd5U,4663
47
49
  quantumflow/core/memory.py,sha256=0rB2Dks3XM1NEiL-v4G0P6jeL2BvcdJNljezRrVccdI,4494
48
50
  quantumflow/core/quantum_backprop.py,sha256=XozlPtwinegI5NosxlW4TrpP5Zh58_yI8WnxznwBHlU,12370
49
- quantumflow/core/quantum_compressor.py,sha256=okzfF4-1uL22FcK9Xhng1DLzmIbz3ADN-R1xzFTv-tI,10094
51
+ quantumflow/core/quantum_compressor.py,sha256=iwEON8FocQk6CcMtLdPEJbKq8J70PpZ-7GyMpbFMSDQ,21973
50
52
  quantumflow/core/teleportation.py,sha256=_T5rRItjFramqWdqBNLwu_fY7Yr9pmdUda4L7STcwcA,12420
51
53
  quantumflow/core/workflow.py,sha256=HCazmlPns68TvIJytvcVoy0LSHb9sO-pUcDN5wPR2Zw,10116
52
54
  quantumflow/integrations/__init__.py,sha256=ksQr0HHYMwTBY0_MgyT5MUBX9fdRn_QErDzHs8rQ7F8,2979
53
55
  quantumflow/integrations/autogen_tools.py,sha256=sTTX7mGFjUKbmVatlcY9N9F2NeR6vkIj-VYduE0SNus,14672
54
56
  quantumflow/integrations/crewai_agents.py,sha256=t62hukL0xg2FV54yczTAakNYQA-AOZ9AWWgzDnH0LGM,12604
55
57
  quantumflow/integrations/crewai_tools.py,sha256=bY5uJyKmCegt6Kb9hvErhvaKcCDlk2_injx50-krN7E,13744
58
+ quantumflow/integrations/domain_agents.py,sha256=-51HX4VPX9gfCw3egzduydpIEyMgdSXbRH1yZABAtQk,20545
56
59
  quantumflow/integrations/langchain_memory.py,sha256=wgYTdovncZNWpFwcNZjhNUqNRi661ys9GXaHYmbXP-Q,12608
57
60
  quantumflow/integrations/langchain_tools.py,sha256=bDrKZDYSRQJJGSNc9iay1Q4NoIR8CHmtZLcybS5ub_w,12401
58
61
  quantumflow/integrations/mcp_server.py,sha256=KJTAxJOyCVl7-whTD1iss9VZmyi0K1f4gNJCH8Cvl_0,21117
59
62
  quantumflow/integrations/openai_functions.py,sha256=8jQH4XkBxK9AbwC47BEYVIrbRAEWGdsMyw0xbZrGNB4,18412
60
- quantumflow_sdk-0.3.0.dist-info/METADATA,sha256=d-Zb7TDbvirOoWHjSPap2p2V6tHYZPhe0Zu5k7fIbGY,5461
61
- quantumflow_sdk-0.3.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
62
- quantumflow_sdk-0.3.0.dist-info/entry_points.txt,sha256=ebX2acoOLgym42XZEqym3OfKCYiPz-mFuPSSGsHFz4c,53
63
- quantumflow_sdk-0.3.0.dist-info/top_level.txt,sha256=hEr_GRvoZ3-83naVIhNuJvoAND1aCvhBag_ynxQguIo,19
64
- quantumflow_sdk-0.3.0.dist-info/RECORD,,
63
+ quantumflow/pipeline/__init__.py,sha256=OktP_tFPAoCREuAd7BmJbcSiGn7cmnsnqfbogsBrPEA,905
64
+ quantumflow/pipeline/anomaly_detector.py,sha256=VR46CMdSeoTnZu7YsNIvB4Q2tAIMSP1_cb3nxYBRVfo,16576
65
+ quantumflow/pipeline/base_pipeline.py,sha256=z5EhaUru-3UTBsBUcG08d6Oc3G48m5zxLnvDCU6Ji4I,18883
66
+ quantumflow/pipeline/checkpoint_manager.py,sha256=XadGFl6a5KL4SbD76TG7xIheECTUgrMyaxrNH-3nLRA,20105
67
+ quantumflow/pipeline/temporal_memory.py,sha256=SToIJQYec_5trS9jDd5j75_9SS-GKfgUWBjiw9rWj7Y,18391
68
+ quantumflow/pipeline/finance/__init__.py,sha256=Y3CWyWCRVfhBKy40UX0T40Obez3k5xq3ZHDqy7tYbZU,172
69
+ quantumflow/pipeline/finance/portfolio_optimization.py,sha256=D_Jt0Fu4BPQkhVxY_UF1nNZobTq-P19HGa7Y7g11JN4,20301
70
+ quantumflow/pipeline/healthcare/__init__.py,sha256=cxhCICqhp3LUVOxfMNnvKmDUDiPLUG1Da7FdoID_xko,157
71
+ quantumflow/pipeline/healthcare/protein_folding.py,sha256=nL2kEP55jaTs-jPuKSnyTFQAX4BlA4tjslr3PH0ogZI,34299
72
+ quantumflow_sdk-0.4.0.dist-info/METADATA,sha256=a-tFVJXcJMSqm_Av_2mQhCwfPbvcBtk8YgbXdKFWvbc,5536
73
+ quantumflow_sdk-0.4.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
74
+ quantumflow_sdk-0.4.0.dist-info/entry_points.txt,sha256=ebX2acoOLgym42XZEqym3OfKCYiPz-mFuPSSGsHFz4c,53
75
+ quantumflow_sdk-0.4.0.dist-info/top_level.txt,sha256=hEr_GRvoZ3-83naVIhNuJvoAND1aCvhBag_ynxQguIo,19
76
+ quantumflow_sdk-0.4.0.dist-info/RECORD,,