text4q-cortex 0.1.0a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. text4q_cortex-0.1.0a0/LICENSE +96 -0
  2. text4q_cortex-0.1.0a0/PKG-INFO +131 -0
  3. text4q_cortex-0.1.0a0/README.md +97 -0
  4. text4q_cortex-0.1.0a0/cortex/__init__.py +16 -0
  5. text4q_cortex-0.1.0a0/cortex/cli.py +486 -0
  6. text4q_cortex-0.1.0a0/cortex/cloud/__init__.py +0 -0
  7. text4q_cortex-0.1.0a0/cortex/cloud/models.py +122 -0
  8. text4q_cortex-0.1.0a0/cortex/cloud/queue.py +202 -0
  9. text4q_cortex-0.1.0a0/cortex/cloud/server.py +254 -0
  10. text4q_cortex-0.1.0a0/cortex/connectors/__init__.py +0 -0
  11. text4q_cortex-0.1.0a0/cortex/connectors/ibm.py +147 -0
  12. text4q_cortex-0.1.0a0/cortex/core.py +119 -0
  13. text4q_cortex-0.1.0a0/cortex/models.py +58 -0
  14. text4q_cortex-0.1.0a0/cortex/nlp/__init__.py +0 -0
  15. text4q_cortex-0.1.0a0/cortex/nlp/engine.py +250 -0
  16. text4q_cortex-0.1.0a0/cortex/nlp/llm_engine.py +339 -0
  17. text4q_cortex-0.1.0a0/cortex/scheduler/__init__.py +0 -0
  18. text4q_cortex-0.1.0a0/cortex/scheduler/integration.py +183 -0
  19. text4q_cortex-0.1.0a0/cortex/scheduler/optimizer.py +311 -0
  20. text4q_cortex-0.1.0a0/cortex/scheduler/problem.py +236 -0
  21. text4q_cortex-0.1.0a0/cortex/scheduler/qaoa.py +207 -0
  22. text4q_cortex-0.1.0a0/pyproject.toml +61 -0
  23. text4q_cortex-0.1.0a0/setup.cfg +4 -0
  24. text4q_cortex-0.1.0a0/text4q_cortex.egg-info/PKG-INFO +131 -0
  25. text4q_cortex-0.1.0a0/text4q_cortex.egg-info/SOURCES.txt +27 -0
  26. text4q_cortex-0.1.0a0/text4q_cortex.egg-info/dependency_links.txt +1 -0
  27. text4q_cortex-0.1.0a0/text4q_cortex.egg-info/entry_points.txt +2 -0
  28. text4q_cortex-0.1.0a0/text4q_cortex.egg-info/requires.txt +19 -0
  29. text4q_cortex-0.1.0a0/text4q_cortex.egg-info/top_level.txt +1 -0
@@ -0,0 +1,96 @@
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity.
18
+
19
+ "You" (or "Your") shall mean an individual or Legal Entity
20
+ exercising permissions granted by this License.
21
+
22
+ "Source" form shall mean the preferred form for making modifications,
23
+ including but not limited to software source code, documentation
24
+ source, and configuration files.
25
+
26
+ "Object" form shall mean any form resulting from mechanical
27
+ transformation or translation of a Source form.
28
+
29
+ "Work" shall mean the work of authorship made available under
30
+ the License, as indicated by a copyright notice that is included in
31
+ or attached to the work.
32
+
33
+ "Derivative Works" shall mean any work that is based on the Work.
34
+
35
+ "Contribution" shall mean any work of authorship submitted to the
36
+ Licensor for inclusion in the Work by the copyright owner or by
37
+ an individual or Legal Entity authorized to submit on behalf of
38
+ the copyright owner.
39
+
40
+ "Contributor" shall mean Licensor and any Legal Entity on behalf of
41
+ whom a Contribution has been received by the Licensor.
42
+
43
+ 2. Grant of Copyright License. Subject to the terms and conditions of
44
+ this License, each Contributor hereby grants to You a perpetual,
45
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
46
+ copyright license to reproduce, prepare Derivative Works of,
47
+ publicly display, publicly perform, sublicense, and distribute the
48
+ Work and such Derivative Works in Source or Object form.
49
+
50
+ 3. Grant of Patent License. Subject to the terms and conditions of
51
+ this License, each Contributor hereby grants to You a perpetual,
52
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
53
+ patent license to make, use, sell, offer for sale, import, and
54
+ otherwise transfer the Work.
55
+
56
+ 4. Redistribution. You may reproduce and distribute copies of the
57
+ Work or Derivative Works thereof in any medium, with or without
58
+ modifications, and in Source or Object form, provided that You
59
+ meet the following conditions:
60
+
61
+ (a) You must give any other recipients of the Work or Derivative
62
+ Works a copy of this License; and
63
+
64
+ (b) You must cause any modified files to carry prominent notices
65
+ stating that You changed the files; and
66
+
67
+ (c) You must retain, in all copies or Derivative Works of the Work,
68
+ all copyright, patent, trademark, and attribution notices; and
69
+
70
+ (d) If the Work includes a "NOTICE" text file, include a readable
71
+ copy of the attribution notices contained within such NOTICE file.
72
+
73
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
74
+ any Contribution intentionally submitted for inclusion in the Work
75
+ shall be under the terms and conditions of this License.
76
+
77
+ 6. Trademarks. This License does not grant permission to use the trade
78
+ names, trademarks, service marks, or product names of the Licensor.
79
+
80
+ 7. Disclaimer of Warranty. The Work is provided on an "AS IS" BASIS,
81
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND.
82
+
83
+ 8. Limitation of Liability. In no event shall any Contributor be
84
+ liable for any damages arising from use of this Work.
85
+
86
+ 9. Accepting Warranty or Additional Liability. While redistributing
87
+ the Work, You may offer additional warranty or liability obligations
88
+ consistent with this License.
89
+
90
+ Copyright 2024 text4q Cortex Contributors
91
+
92
+ Licensed under the Apache License, Version 2.0 (the "License");
93
+ you may not use this file except in compliance with the License.
94
+ You may obtain a copy of the License at
95
+
96
+ http://www.apache.org/licenses/LICENSE-2.0
@@ -0,0 +1,131 @@
1
+ Metadata-Version: 2.4
2
+ Name: text4q-cortex
3
+ Version: 0.1.0a0
4
+ Summary: Natural language interface for quantum computing infrastructure
5
+ Author: text4q Cortex Contributors
6
+ License: Apache-2.0
7
+ Keywords: quantum computing,nlp,qiskit,openqasm,orchestration
8
+ Classifier: Development Status :: 2 - Pre-Alpha
9
+ Classifier: Intended Audience :: Science/Research
10
+ Classifier: License :: OSI Approved :: Apache Software License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Topic :: Scientific/Engineering :: Physics
13
+ Requires-Python: >=3.10
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE
16
+ Requires-Dist: qiskit>=1.0
17
+ Requires-Dist: qiskit-ibm-runtime>=0.20
18
+ Requires-Dist: pennylane>=0.35
19
+ Requires-Dist: openai>=1.0
20
+ Requires-Dist: pydantic>=2.0
21
+ Requires-Dist: typer>=0.9
22
+ Requires-Dist: rich>=13.0
23
+ Requires-Dist: httpx>=0.25
24
+ Requires-Dist: python-dotenv>=1.0
25
+ Provides-Extra: dev
26
+ Requires-Dist: pytest>=7.0; extra == "dev"
27
+ Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
28
+ Requires-Dist: ruff>=0.1; extra == "dev"
29
+ Requires-Dist: mypy>=1.0; extra == "dev"
30
+ Requires-Dist: pre-commit>=3.0; extra == "dev"
31
+ Provides-Extra: aer
32
+ Requires-Dist: qiskit-aer>=0.13; extra == "aer"
33
+ Dynamic: license-file
34
+
35
+ # text4q Cortex
36
+
37
+ > Natural language interface for quantum computing infrastructure.
38
+ > Write quantum intent in plain language — Cortex compiles, schedules, and executes it on real QPUs.
39
+
40
+ ```python
41
+ from cortex import Cortex
42
+
43
+ cx = Cortex(backend="ibm_quantum")
44
+ result = cx.run("Simulate a Bell state with 2 qubits and measure 1024 times")
45
+ print(result.counts)
46
+ # {'00': 512, '11': 512}
47
+ ```
48
+
49
+ ## What is text4q Cortex?
50
+
51
+ Cortex is an open-source quantum orchestration platform built on three pillars:
52
+
53
+ - **NLP → Circuit**: Translate natural language descriptions into OpenQASM 3.0 circuits
54
+ - **Orchestration**: Manage QPU resources via QRMI, integrating quantum and classical HPC
55
+ - **Execution**: Schedule and run jobs across IBM Quantum, Google, or custom lab QPUs
56
+
57
+ ## Architecture
58
+
59
+ ```
60
+ User (natural language)
61
+
62
+ Cortex NLP Engine ← text4q core: language → OpenQASM
63
+
64
+ OQTOPUS Job Queue ← cloud layer: scheduling + auth
65
+
66
+ QAOA Scheduler ← quantum-native optimization (roadmap)
67
+
68
+ QRMI Resource Manager ← QPU as HPC node
69
+
70
+ QPU / Simulator ← IBM Quantum, Google, Qiskit Aer
71
+ ```
72
+
73
+ ## Installation
74
+
75
+ ```bash
76
+ pip install text4q-cortex
77
+ ```
78
+
79
+ Or from source:
80
+
81
+ ```bash
82
+ git clone https://github.com/your-org/text4q-cortex
83
+ cd text4q-cortex
84
+ pip install -e ".[dev]"
85
+ ```
86
+
87
+ ## Quick Start
88
+
89
+ ```python
90
+ from cortex import Cortex
91
+ from cortex.connectors import IBMQuantumConnector
92
+
93
+ # Connect to IBM Quantum
94
+ connector = IBMQuantumConnector(token="YOUR_IBM_TOKEN")
95
+ cx = Cortex(connector=connector)
96
+
97
+ # Run from natural language
98
+ result = cx.run(
99
+ "Create a GHZ state with 3 qubits, apply noise model T1=50us, measure 2048 shots"
100
+ )
101
+
102
+ print(result.circuit) # the generated OpenQASM circuit
103
+ print(result.counts) # measurement results
104
+ print(result.metadata) # backend, shots, execution time
105
+ ```
106
+
107
+ ## Modules
108
+
109
+ | Module | Description | Status |
110
+ |--------|-------------|--------|
111
+ | `cortex.nlp` | NLP → OpenQASM translation engine | 🚧 v0.1 |
112
+ | `cortex.connectors` | IBM Quantum, Aer, Google backends | 🚧 v0.1 |
113
+ | `cortex.scheduler` | Job queue and QPU resource management | 📋 planned |
114
+ | `cortex.cloud` | Multi-user cloud layer (OQTOPUS-based) | 📋 planned |
115
+
116
+ ## Roadmap
117
+
118
+ - [x] Project structure and architecture
119
+ - [ ] v0.1 — NLP engine (pattern-based) + IBM Quantum connector
120
+ - [ ] v0.2 — LLM-powered circuit generation + multi-backend
121
+ - [ ] v0.3 — OQTOPUS job queue integration
122
+ - [ ] v0.4 — QAOA Scheduler (quantum-native scheduling)
123
+ - [ ] v1.0 — text4q Cortex Cloud (SaaS)
124
+
125
+ ## Contributing
126
+
127
+ Contributions welcome. Please read [CONTRIBUTING.md](docs/CONTRIBUTING.md) first.
128
+
129
+ ## License
130
+
131
+ Apache 2.0 — see [LICENSE](LICENSE).
@@ -0,0 +1,97 @@
1
+ # text4q Cortex
2
+
3
+ > Natural language interface for quantum computing infrastructure.
4
+ > Write quantum intent in plain language — Cortex compiles, schedules, and executes it on real QPUs.
5
+
6
+ ```python
7
+ from cortex import Cortex
8
+
9
+ cx = Cortex(backend="ibm_quantum")
10
+ result = cx.run("Simulate a Bell state with 2 qubits and measure 1024 times")
11
+ print(result.counts)
12
+ # {'00': 512, '11': 512}
13
+ ```
14
+
15
+ ## What is text4q Cortex?
16
+
17
+ Cortex is an open-source quantum orchestration platform built on three pillars:
18
+
19
+ - **NLP → Circuit**: Translate natural language descriptions into OpenQASM 3.0 circuits
20
+ - **Orchestration**: Manage QPU resources via QRMI, integrating quantum and classical HPC
21
+ - **Execution**: Schedule and run jobs across IBM Quantum, Google, or custom lab QPUs
22
+
23
+ ## Architecture
24
+
25
+ ```
26
+ User (natural language)
27
+
28
+ Cortex NLP Engine ← text4q core: language → OpenQASM
29
+
30
+ OQTOPUS Job Queue ← cloud layer: scheduling + auth
31
+
32
+ QAOA Scheduler ← quantum-native optimization (roadmap)
33
+
34
+ QRMI Resource Manager ← QPU as HPC node
35
+
36
+ QPU / Simulator ← IBM Quantum, Google, Qiskit Aer
37
+ ```
38
+
39
+ ## Installation
40
+
41
+ ```bash
42
+ pip install text4q-cortex
43
+ ```
44
+
45
+ Or from source:
46
+
47
+ ```bash
48
+ git clone https://github.com/your-org/text4q-cortex
49
+ cd text4q-cortex
50
+ pip install -e ".[dev]"
51
+ ```
52
+
53
+ ## Quick Start
54
+
55
+ ```python
56
+ from cortex import Cortex
57
+ from cortex.connectors import IBMQuantumConnector
58
+
59
+ # Connect to IBM Quantum
60
+ connector = IBMQuantumConnector(token="YOUR_IBM_TOKEN")
61
+ cx = Cortex(connector=connector)
62
+
63
+ # Run from natural language
64
+ result = cx.run(
65
+ "Create a GHZ state with 3 qubits, apply noise model T1=50us, measure 2048 shots"
66
+ )
67
+
68
+ print(result.circuit) # the generated OpenQASM circuit
69
+ print(result.counts) # measurement results
70
+ print(result.metadata) # backend, shots, execution time
71
+ ```
72
+
73
+ ## Modules
74
+
75
+ | Module | Description | Status |
76
+ |--------|-------------|--------|
77
+ | `cortex.nlp` | NLP → OpenQASM translation engine | 🚧 v0.1 |
78
+ | `cortex.connectors` | IBM Quantum, Aer, Google backends | 🚧 v0.1 |
79
+ | `cortex.scheduler` | Job queue and QPU resource management | 📋 planned |
80
+ | `cortex.cloud` | Multi-user cloud layer (OQTOPUS-based) | 📋 planned |
81
+
82
+ ## Roadmap
83
+
84
+ - [x] Project structure and architecture
85
+ - [ ] v0.1 — NLP engine (pattern-based) + IBM Quantum connector
86
+ - [ ] v0.2 — LLM-powered circuit generation + multi-backend
87
+ - [ ] v0.3 — OQTOPUS job queue integration
88
+ - [ ] v0.4 — QAOA Scheduler (quantum-native scheduling)
89
+ - [ ] v1.0 — text4q Cortex Cloud (SaaS)
90
+
91
+ ## Contributing
92
+
93
+ Contributions welcome. Please read [CONTRIBUTING.md](docs/CONTRIBUTING.md) first.
94
+
95
+ ## License
96
+
97
+ Apache 2.0 — see [LICENSE](LICENSE).
@@ -0,0 +1,16 @@
1
+ """
2
+ text4q Cortex
3
+ =============
4
+ Natural language interface for quantum computing infrastructure.
5
+
6
+ Quick start:
7
+ from cortex import Cortex
8
+ cx = Cortex(backend="ibm_quantum")
9
+ result = cx.run("Bell state with 2 qubits, 1024 shots")
10
+ """
11
+
12
+ from cortex.core import Cortex
13
+ from cortex.models import CortexResult, CircuitIntent
14
+
15
+ __version__ = "0.1.0-alpha"
16
+ __all__ = ["Cortex", "CortexResult", "CircuitIntent"]