quantumflow-sdk 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. api/__init__.py +1 -0
  2. api/auth.py +208 -0
  3. api/main.py +403 -0
  4. api/models.py +137 -0
  5. api/routes/__init__.py +1 -0
  6. api/routes/auth_routes.py +234 -0
  7. api/routes/teleport_routes.py +415 -0
  8. db/__init__.py +15 -0
  9. db/crud.py +319 -0
  10. db/database.py +93 -0
  11. db/models.py +197 -0
  12. quantumflow/__init__.py +47 -0
  13. quantumflow/algorithms/__init__.py +48 -0
  14. quantumflow/algorithms/compression/__init__.py +7 -0
  15. quantumflow/algorithms/compression/amplitude_amplification.py +189 -0
  16. quantumflow/algorithms/compression/qft_compression.py +133 -0
  17. quantumflow/algorithms/compression/token_compression.py +261 -0
  18. quantumflow/algorithms/cryptography/__init__.py +6 -0
  19. quantumflow/algorithms/cryptography/qkd.py +205 -0
  20. quantumflow/algorithms/cryptography/qrng.py +231 -0
  21. quantumflow/algorithms/machine_learning/__init__.py +7 -0
  22. quantumflow/algorithms/machine_learning/qnn.py +276 -0
  23. quantumflow/algorithms/machine_learning/qsvm.py +249 -0
  24. quantumflow/algorithms/machine_learning/vqe.py +229 -0
  25. quantumflow/algorithms/optimization/__init__.py +7 -0
  26. quantumflow/algorithms/optimization/grover.py +223 -0
  27. quantumflow/algorithms/optimization/qaoa.py +251 -0
  28. quantumflow/algorithms/optimization/quantum_annealing.py +237 -0
  29. quantumflow/algorithms/utility/__init__.py +6 -0
  30. quantumflow/algorithms/utility/circuit_optimizer.py +194 -0
  31. quantumflow/algorithms/utility/error_correction.py +330 -0
  32. quantumflow/api/__init__.py +1 -0
  33. quantumflow/api/routes/__init__.py +4 -0
  34. quantumflow/api/routes/billing_routes.py +520 -0
  35. quantumflow/backends/__init__.py +33 -0
  36. quantumflow/backends/base_backend.py +184 -0
  37. quantumflow/backends/braket_backend.py +345 -0
  38. quantumflow/backends/ibm_backend.py +112 -0
  39. quantumflow/backends/simulator_backend.py +86 -0
  40. quantumflow/billing/__init__.py +25 -0
  41. quantumflow/billing/models.py +126 -0
  42. quantumflow/billing/stripe_service.py +619 -0
  43. quantumflow/core/__init__.py +12 -0
  44. quantumflow/core/entanglement.py +164 -0
  45. quantumflow/core/memory.py +147 -0
  46. quantumflow/core/quantum_backprop.py +394 -0
  47. quantumflow/core/quantum_compressor.py +309 -0
  48. quantumflow/core/teleportation.py +386 -0
  49. quantumflow/integrations/__init__.py +107 -0
  50. quantumflow/integrations/autogen_tools.py +501 -0
  51. quantumflow/integrations/crewai_agents.py +425 -0
  52. quantumflow/integrations/crewai_tools.py +407 -0
  53. quantumflow/integrations/langchain_memory.py +385 -0
  54. quantumflow/integrations/langchain_tools.py +366 -0
  55. quantumflow/integrations/mcp_server.py +575 -0
  56. quantumflow_sdk-0.1.0.dist-info/METADATA +190 -0
  57. quantumflow_sdk-0.1.0.dist-info/RECORD +60 -0
  58. quantumflow_sdk-0.1.0.dist-info/WHEEL +5 -0
  59. quantumflow_sdk-0.1.0.dist-info/entry_points.txt +2 -0
  60. quantumflow_sdk-0.1.0.dist-info/top_level.txt +3 -0
@@ -0,0 +1,385 @@
1
+ """
2
+ LangChain Memory Integration for QuantumFlow.
3
+
4
+ Provides quantum-enhanced memory classes:
5
+ - QuantumChatMemory: Compressed conversation history
6
+ - QuantumVectorStore: Quantum-compressed vector storage
7
+ """
8
+
9
+ from typing import Any, Dict, List, Optional
10
+ import numpy as np
11
+ from dataclasses import dataclass
12
+
13
+ try:
14
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
15
+ from langchain_core.vectorstores import VectorStore
16
+ from langchain_core.embeddings import Embeddings
17
+ from langchain_core.documents import Document
18
+ LANGCHAIN_AVAILABLE = True
19
+ except ImportError:
20
+ LANGCHAIN_AVAILABLE = False
21
+ # Dummy classes
22
+ class VectorStore:
23
+ pass
24
+ class Embeddings:
25
+ pass
26
+ class Document:
27
+ pass
28
+ class BaseMessage:
29
+ pass
30
+ class HumanMessage:
31
+ pass
32
+ class AIMessage:
33
+ pass
34
+
35
+ from quantumflow.core.quantum_compressor import QuantumCompressor
36
+ from quantumflow.core.memory import QuantumMemory
37
+
38
+
39
+ def _check_langchain():
40
+ if not LANGCHAIN_AVAILABLE:
41
+ raise ImportError(
42
+ "LangChain is not installed. "
43
+ "Install it with: pip install langchain langchain-core"
44
+ )
45
+
46
+
47
+ @dataclass
48
+ class QuantumMemoryStats:
49
+ """Statistics for quantum memory usage."""
50
+ total_messages: int
51
+ compressed_tokens: int
52
+ original_tokens: int
53
+ compression_ratio: float
54
+ qubits_used: int
55
+
56
+
57
+ class QuantumChatMemory:
58
+ """
59
+ Quantum-compressed chat memory for LangChain.
60
+
61
+ Uses quantum amplitude encoding to compress conversation history,
62
+ achieving 53% token reduction while preserving semantic content.
63
+
64
+ Example:
65
+ from langchain.llms import OpenAI
66
+ from langchain.chains import ConversationChain
67
+ from quantumflow.integrations import QuantumChatMemory
68
+
69
+ memory = QuantumChatMemory()
70
+ chain = ConversationChain(llm=OpenAI(), memory=memory)
71
+ chain.predict(input="Hello!")
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ backend: str = "simulator",
77
+ compression_level: int = 1,
78
+ max_token_limit: int = 2000,
79
+ human_prefix: str = "Human",
80
+ ai_prefix: str = "AI",
81
+ memory_key: str = "history",
82
+ return_messages: bool = False,
83
+ **kwargs
84
+ ):
85
+ _check_langchain()
86
+ self.human_prefix = human_prefix
87
+ self.ai_prefix = ai_prefix
88
+ self.memory_key = memory_key
89
+ self.return_messages = return_messages
90
+ self._compressor = QuantumCompressor(backend=backend)
91
+ self._quantum_memory = QuantumMemory(backend=backend)
92
+ self._compression_level = compression_level
93
+ self._max_token_limit = max_token_limit
94
+ self._messages: List[BaseMessage] = []
95
+ self._compressed_history: List[Dict[str, Any]] = []
96
+ self._stats = QuantumMemoryStats(
97
+ total_messages=0,
98
+ compressed_tokens=0,
99
+ original_tokens=0,
100
+ compression_ratio=1.0,
101
+ qubits_used=0,
102
+ )
103
+
104
+ @property
105
+ def memory_variables(self) -> List[str]:
106
+ """Return memory variables."""
107
+ return [self.memory_key]
108
+
109
+ def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
110
+ """Load memory variables for chain."""
111
+ if self.return_messages:
112
+ return {self.memory_key: self._messages}
113
+
114
+ # Return string format
115
+ history_str = self._format_history()
116
+ return {self.memory_key: history_str}
117
+
118
+ def _format_history(self) -> str:
119
+ """Format conversation history as string."""
120
+ lines = []
121
+ for msg in self._messages:
122
+ if isinstance(msg, HumanMessage):
123
+ lines.append(f"{self.human_prefix}: {msg.content}")
124
+ elif isinstance(msg, AIMessage):
125
+ lines.append(f"{self.ai_prefix}: {msg.content}")
126
+ return "\n".join(lines)
127
+
128
+ def _tokenize(self, text: str) -> List[int]:
129
+ """Simple tokenization (word-based)."""
130
+ words = text.split()
131
+ return [hash(w) % 10000 for w in words]
132
+
133
+ def _compress_message(self, content: str) -> Dict[str, Any]:
134
+ """Compress a message using quantum encoding."""
135
+ tokens = self._tokenize(content)
136
+
137
+ if len(tokens) < 2:
138
+ return {
139
+ "original": content,
140
+ "tokens": tokens,
141
+ "compressed": None,
142
+ "qubits": 0,
143
+ }
144
+
145
+ result = self._compressor.compress(
146
+ tokens=tokens,
147
+ compression_level=self._compression_level,
148
+ )
149
+
150
+ # Store in quantum memory
151
+ key = f"msg_{self._stats.total_messages}"
152
+ self._quantum_memory.store(key, [float(t) for t in tokens])
153
+
154
+ return {
155
+ "original": content,
156
+ "tokens": tokens,
157
+ "compressed": result,
158
+ "qubits": result.n_qubits,
159
+ "key": key,
160
+ }
161
+
162
+ def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
163
+ """Save context from conversation turn."""
164
+ input_key = list(inputs.keys())[0] if inputs else "input"
165
+ output_key = list(outputs.keys())[0] if outputs else "output"
166
+
167
+ human_input = inputs.get(input_key, "")
168
+ ai_output = outputs.get(output_key, "")
169
+
170
+ # Compress and store
171
+ human_compressed = self._compress_message(human_input)
172
+ ai_compressed = self._compress_message(ai_output)
173
+
174
+ self._compressed_history.append({
175
+ "human": human_compressed,
176
+ "ai": ai_compressed,
177
+ })
178
+
179
+ # Update messages
180
+ self._messages.append(HumanMessage(content=human_input))
181
+ self._messages.append(AIMessage(content=ai_output))
182
+
183
+ # Update stats
184
+ self._stats.total_messages += 2
185
+ self._stats.original_tokens += len(human_compressed["tokens"]) + len(ai_compressed["tokens"])
186
+ self._stats.compressed_tokens += human_compressed["qubits"] + ai_compressed["qubits"]
187
+ if self._stats.compressed_tokens > 0:
188
+ self._stats.compression_ratio = self._stats.original_tokens / max(1, self._stats.compressed_tokens)
189
+ self._stats.qubits_used = self._stats.compressed_tokens
190
+
191
+ # Prune if over limit
192
+ self._prune_if_needed()
193
+
194
+ def _prune_if_needed(self) -> None:
195
+ """Prune old messages if over token limit."""
196
+ while (
197
+ len(self._messages) > 2 and
198
+ self._stats.original_tokens > self._max_token_limit
199
+ ):
200
+ # Remove oldest pair
201
+ self._messages = self._messages[2:]
202
+ if self._compressed_history:
203
+ removed = self._compressed_history.pop(0)
204
+ self._stats.original_tokens -= (
205
+ len(removed["human"]["tokens"]) +
206
+ len(removed["ai"]["tokens"])
207
+ )
208
+ self._stats.compressed_tokens -= (
209
+ removed["human"]["qubits"] +
210
+ removed["ai"]["qubits"]
211
+ )
212
+
213
+ def clear(self) -> None:
214
+ """Clear memory."""
215
+ self._messages = []
216
+ self._compressed_history = []
217
+ self._quantum_memory.clear()
218
+ self._stats = QuantumMemoryStats(
219
+ total_messages=0,
220
+ compressed_tokens=0,
221
+ original_tokens=0,
222
+ compression_ratio=1.0,
223
+ qubits_used=0,
224
+ )
225
+
226
+ def get_stats(self) -> QuantumMemoryStats:
227
+ """Get memory statistics."""
228
+ return self._stats
229
+
230
+
231
+ class QuantumVectorStore(VectorStore):
232
+ """
233
+ Quantum-compressed vector store for LangChain.
234
+
235
+ Uses quantum amplitude encoding to compress embedding vectors,
236
+ achieving significant memory savings with O(log n) complexity.
237
+
238
+ Example:
239
+ from langchain.embeddings import OpenAIEmbeddings
240
+ from quantumflow.integrations import QuantumVectorStore
241
+
242
+ embeddings = OpenAIEmbeddings()
243
+ vectorstore = QuantumVectorStore(embedding=embeddings)
244
+ vectorstore.add_texts(["Document 1", "Document 2"])
245
+ results = vectorstore.similarity_search("query")
246
+ """
247
+
248
+ def __init__(
249
+ self,
250
+ embedding: Embeddings,
251
+ backend: str = "simulator",
252
+ compression_level: int = 1,
253
+ ):
254
+ _check_langchain()
255
+ self._embedding = embedding
256
+ self._compressor = QuantumCompressor(backend=backend)
257
+ self._quantum_memory = QuantumMemory(backend=backend)
258
+ self._compression_level = compression_level
259
+ self._documents: List[Document] = []
260
+ self._embeddings: List[np.ndarray] = []
261
+ self._compressed_embeddings: List[Any] = []
262
+
263
+ def add_texts(
264
+ self,
265
+ texts: List[str],
266
+ metadatas: Optional[List[dict]] = None,
267
+ **kwargs: Any,
268
+ ) -> List[str]:
269
+ """Add texts to the vector store."""
270
+ ids = []
271
+
272
+ for i, text in enumerate(texts):
273
+ # Get embedding
274
+ embedding = self._embedding.embed_query(text)
275
+ embedding_array = np.array(embedding)
276
+
277
+ # Compress embedding
278
+ # Normalize to positive values for quantum encoding
279
+ normalized = (embedding_array - embedding_array.min()) / (
280
+ embedding_array.max() - embedding_array.min() + 1e-8
281
+ )
282
+ tokens = (normalized * 10000).astype(int).tolist()
283
+
284
+ if len(tokens) >= 2:
285
+ compressed = self._compressor.compress(
286
+ tokens=tokens,
287
+ compression_level=self._compression_level,
288
+ )
289
+ else:
290
+ compressed = None
291
+
292
+ # Store
293
+ doc_id = f"doc_{len(self._documents)}"
294
+ metadata = metadatas[i] if metadatas else {}
295
+
296
+ self._documents.append(Document(page_content=text, metadata=metadata))
297
+ self._embeddings.append(embedding_array)
298
+ self._compressed_embeddings.append(compressed)
299
+
300
+ # Store in quantum memory
301
+ self._quantum_memory.store(doc_id, embedding_array.tolist())
302
+
303
+ ids.append(doc_id)
304
+
305
+ return ids
306
+
307
+ def similarity_search(
308
+ self,
309
+ query: str,
310
+ k: int = 4,
311
+ **kwargs: Any,
312
+ ) -> List[Document]:
313
+ """Search for similar documents."""
314
+ # Get query embedding
315
+ query_embedding = np.array(self._embedding.embed_query(query))
316
+
317
+ # Compute similarities (cosine)
318
+ similarities = []
319
+ for i, doc_embedding in enumerate(self._embeddings):
320
+ similarity = np.dot(query_embedding, doc_embedding) / (
321
+ np.linalg.norm(query_embedding) * np.linalg.norm(doc_embedding) + 1e-8
322
+ )
323
+ similarities.append((i, similarity))
324
+
325
+ # Sort by similarity
326
+ similarities.sort(key=lambda x: x[1], reverse=True)
327
+
328
+ # Return top k
329
+ results = []
330
+ for i, _ in similarities[:k]:
331
+ results.append(self._documents[i])
332
+
333
+ return results
334
+
335
+ def similarity_search_with_score(
336
+ self,
337
+ query: str,
338
+ k: int = 4,
339
+ **kwargs: Any,
340
+ ) -> List[tuple[Document, float]]:
341
+ """Search with similarity scores."""
342
+ query_embedding = np.array(self._embedding.embed_query(query))
343
+
344
+ similarities = []
345
+ for i, doc_embedding in enumerate(self._embeddings):
346
+ similarity = np.dot(query_embedding, doc_embedding) / (
347
+ np.linalg.norm(query_embedding) * np.linalg.norm(doc_embedding) + 1e-8
348
+ )
349
+ similarities.append((i, similarity))
350
+
351
+ similarities.sort(key=lambda x: x[1], reverse=True)
352
+
353
+ results = []
354
+ for i, score in similarities[:k]:
355
+ results.append((self._documents[i], score))
356
+
357
+ return results
358
+
359
+ @classmethod
360
+ def from_texts(
361
+ cls,
362
+ texts: List[str],
363
+ embedding: Embeddings,
364
+ metadatas: Optional[List[dict]] = None,
365
+ **kwargs: Any,
366
+ ) -> "QuantumVectorStore":
367
+ """Create vector store from texts."""
368
+ store = cls(embedding=embedding, **kwargs)
369
+ store.add_texts(texts, metadatas)
370
+ return store
371
+
372
+ def get_compression_stats(self) -> Dict[str, Any]:
373
+ """Get compression statistics."""
374
+ total_original = sum(len(e) for e in self._embeddings)
375
+ total_compressed = sum(
376
+ c.n_qubits if c else len(self._embeddings[i])
377
+ for i, c in enumerate(self._compressed_embeddings)
378
+ )
379
+
380
+ return {
381
+ "total_documents": len(self._documents),
382
+ "original_dimensions": total_original,
383
+ "compressed_qubits": total_compressed,
384
+ "compression_ratio": total_original / max(1, total_compressed),
385
+ }