luna-quantum 1.0.8rc2__cp314-cp314-win_amd64.whl → 1.0.8rc4__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of luna-quantum might be problematic. Click here for more details.

Files changed (31) hide show
  1. luna_quantum/__init__.py +19 -1
  2. luna_quantum/__init__.pyi +14 -1
  3. luna_quantum/_core.cp314-win_amd64.pyd +0 -0
  4. luna_quantum/_core.pyi +185 -84
  5. luna_quantum/_utility.py +148 -0
  6. luna_quantum/_utility.pyi +20 -0
  7. luna_quantum/exceptions/luna_quantum_call_type_error.py +9 -0
  8. luna_quantum/factories/usecase_factory.py +32 -0
  9. luna_quantum/solve/domain/solve_job.py +42 -8
  10. luna_quantum/solve/interfaces/usecases/__init__.py +4 -0
  11. luna_quantum/solve/interfaces/usecases/solve_job_get_by_id_usecase_i.py +27 -0
  12. luna_quantum/solve/parameters/algorithms/quantum_gate/__init__.py +1 -1
  13. luna_quantum/solve/parameters/algorithms/quantum_gate/flex_qaoa/__init__.py +9 -25
  14. luna_quantum/solve/parameters/algorithms/quantum_gate/flexqaoa/__init__.py +29 -0
  15. luna_quantum/solve/parameters/algorithms/quantum_gate/flexqaoa/config.py +58 -0
  16. luna_quantum/solve/parameters/algorithms/quantum_gate/{flex_qaoa/flex_qaoa.py → flexqaoa/flexqaoa.py} +48 -86
  17. luna_quantum/solve/parameters/algorithms/quantum_gate/flexqaoa/optimizers.py +53 -0
  18. luna_quantum/solve/parameters/algorithms/quantum_gate/flexqaoa/pipeline.py +164 -0
  19. luna_quantum/solve/parameters/backends/__init__.py +2 -0
  20. luna_quantum/solve/parameters/backends/aqarios_gpu.py +17 -0
  21. luna_quantum/solve/parameters/errors.py +30 -0
  22. luna_quantum/solve/usecases/solve_job_get_by_id_usecase.py +44 -0
  23. luna_quantum/solve/usecases/solve_job_get_result_usecase.py +21 -11
  24. {luna_quantum-1.0.8rc2.dist-info → luna_quantum-1.0.8rc4.dist-info}/METADATA +1 -1
  25. {luna_quantum-1.0.8rc2.dist-info → luna_quantum-1.0.8rc4.dist-info}/RECORD +28 -20
  26. luna_quantum/solve/parameters/algorithms/quantum_gate/flex_qaoa/config.py +0 -80
  27. luna_quantum/solve/parameters/algorithms/quantum_gate/flex_qaoa/optimizers.py +0 -99
  28. luna_quantum/solve/parameters/algorithms/quantum_gate/flex_qaoa/pipeline.py +0 -87
  29. {luna_quantum-1.0.8rc2.dist-info → luna_quantum-1.0.8rc4.dist-info}/WHEEL +0 -0
  30. {luna_quantum-1.0.8rc2.dist-info → luna_quantum-1.0.8rc4.dist-info}/licenses/LICENSE +0 -0
  31. {luna_quantum-1.0.8rc2.dist-info → luna_quantum-1.0.8rc4.dist-info}/licenses/NOTICE +0 -0
@@ -1,9 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from typing import Literal
4
+
3
5
  from pydantic import BaseModel, Field, model_validator
4
6
 
5
7
  from luna_quantum.solve.domain.abstract.luna_algorithm import LunaAlgorithm
6
- from luna_quantum.solve.errors.solve_base_error import SolveBaseError
7
8
  from luna_quantum.solve.parameters.algorithms.base_params.qaoa_circuit_params import (
8
9
  BasicQAOAParams,
9
10
  LinearQAOAParams,
@@ -13,51 +14,20 @@ from luna_quantum.solve.parameters.algorithms.base_params.scipy_optimizer import
13
14
  ScipyOptimizerParams,
14
15
  )
15
16
  from luna_quantum.solve.parameters.backends.aqarios import Aqarios
16
-
17
- from .config import AdvancedConfig
18
- from .optimizers import (
19
- CombinedOptimizerParams,
20
- InterpolateOptimizerParams,
21
- LinearOptimizerParams,
17
+ from luna_quantum.solve.parameters.backends.aqarios_gpu import AqariosGpu
18
+ from luna_quantum.solve.parameters.errors import (
19
+ InterpolateOptimizerError,
20
+ QAOAParameterOptimizerError,
21
+ QAOAParameterRepsMismatchError,
22
22
  )
23
- from .pipeline import PipelineParams
24
-
25
-
26
- class QAOAParameterOptimizerError(SolveBaseError):
27
- """QAOA cirucit parameters mismatch with optimizer exception."""
28
-
29
- def __init__(
30
- self,
31
- optimizer: ScipyOptimizerParams
32
- | LinearOptimizerParams
33
- | CombinedOptimizerParams
34
- | InterpolateOptimizerParams
35
- | None,
36
- params: BasicQAOAParams | LinearQAOAParams | RandomQAOAParams,
37
- extra: str = "",
38
- ) -> None:
39
- super().__init__(
40
- f"Parameter Mismatch of {optimizer.__class__} and {params.__class__}"
41
- + ((". " + extra) if extra else "")
42
- )
43
-
44
-
45
- class InterpolateOptimizerError(SolveBaseError):
46
- """Interpolate optimizer error when final number of reps is too small."""
47
-
48
- def __init__(self, reps_end: int, reps_start: int) -> None:
49
- super().__init__(f"{reps_end=} needs to be larger than {reps_start=}.")
50
-
51
-
52
- class QAOAParameterDepthMismatchError(SolveBaseError):
53
- """QAOA circuit params mismatch the specified reps."""
54
23
 
55
- def __init__(self, params_reps: int, reps: int) -> None:
56
- super().__init__(f"{params_reps=} needs to match {reps=}.")
24
+ from .config import CustomConfig
25
+ from .optimizers import CombinedOptimizerParams, InterpolateOptimizerParams
26
+ from .pipeline import PipelineParams
57
27
 
58
28
 
59
- class FlexQAOA(LunaAlgorithm[Aqarios], BaseModel):
60
- """The FlexQAOA Algorithm for constrained quantum optimization.
29
+ class FlexQAOA(LunaAlgorithm[Aqarios | AqariosGpu], BaseModel):
30
+ """The FlexQAOA algorithm for constrained quantum optimization.
61
31
 
62
32
  The FlexQAOA is an extension to the default QAOA with the capabilities to encode
63
33
  inequality constriants with indicator functions as well as one-hot constraints
@@ -73,18 +43,11 @@ class FlexQAOA(LunaAlgorithm[Aqarios], BaseModel):
73
43
  Central to this is the pipeline parameter which allows for different configurations.
74
44
 
75
45
  For instance, if one likes to explore ordinary QUBO simulation with all constraints
76
- represented as quadratic penalties, the `one_hot` and `indicator_function` options
46
+ represented as quadratic penalties, the `xy_mixers` and `indicator_function` options
77
47
  need to be manually disabled
78
48
  ```
79
- pipeline = {"one_hot": None, "indicator_function": None}
80
- ```
81
-
82
- If no indicator function is employed, but the input problem contains inequality
83
- constraints, slack variables are added to the optimization problem. FlexQAOA allows
84
- for a configuration that discards slack variables as their assignment is not
85
- necessarily of interest. This option can be enbled by setting
86
- ```
87
- qaoa_config = {"discard_slack": True}
49
+ pipeline.xy_mixer.enable = False
50
+ pipeline.indicator_function.enable = False
88
51
  ```
89
52
 
90
53
  Following the standard protocol for QAOA, a classical optimizer is required that
@@ -100,24 +63,35 @@ class FlexQAOA(LunaAlgorithm[Aqarios], BaseModel):
100
63
  Number of sampled shots.
101
64
  reps: int
102
65
  Number of QAOA layer repetitions
103
- pipeline: PipelineParams | Dict
66
+ pipeline: PipelineParams
104
67
  The pipeline defines the selected features for QAOA circuit generation. By
105
68
  default, all supported features are enabled (one-hot constraints, inequality
106
69
  constraints and quadratic penalties).
107
- optimizer: ScipyOptimizerParams | LinearOptimizerParams | CombinedOptimizerParams |\
108
- InterpolateOptimizerParams | None | Dict
109
- The classical optimizer for parameter tuning. Default: ScipyOptimizer. Setting
70
+ optimizer: ScipyOptimizerParams | CombinedOptimizerParams |\
71
+ InterpolateOptimizerParams | None
72
+ The classical optimizer for parameter tuning. Setting
110
73
  to `None` disables the optimization, leading to an evaluation of the initial
111
74
  parameters.
112
- qaoa_config: AdvancedConfig | Dict
113
- Additional options for the QAOA circuit and evalutation
114
75
  initial_params: LinearQAOAParams | BasicQAOAParams | RandomQAOAParams | Dict
115
76
  Custom QAOA variational circuit parameters. By default linear
116
77
  increasing/decreasing parameters for the selected `reps` are generated.
78
+ param_conversion: None | Literal["basic"] = "basic"
79
+ Parameter conversion after initialization. This option set to `None` means the
80
+ parameters, as specified are used. This parameter set to `"basic"` means the
81
+ parameters are converted to basic parameters before optimization. This is useful
82
+ if one only wants to optimize the linear schedule of parameters: Then the option
83
+ `None` needs to be selected alongside LinearQAOAParams. This
84
+ option is ignored when CombinedOptimizer is also selected.
85
+ custom_config: CustomConfig
86
+ Additional options for the FlexQAOA circuit.
117
87
  """
118
88
 
119
- shots: int = Field(default=1024, ge=1, description="Number of sampled shots.")
120
- reps: int = Field(default=1, ge=1, description="Number of QAOA layer repetitions")
89
+ shots: int = Field(
90
+ default=1024, ge=1, lt=1 << 16, description="Number of sampled shots."
91
+ )
92
+ reps: int = Field(
93
+ default=1, ge=1, lt=1000, description="Number of QAOA layer repetitions"
94
+ )
121
95
  pipeline: PipelineParams = Field(
122
96
  default_factory=lambda: PipelineParams(),
123
97
  description="The pipeline defines the selected features for QAOA circuit "
@@ -126,7 +100,6 @@ class FlexQAOA(LunaAlgorithm[Aqarios], BaseModel):
126
100
  )
127
101
  optimizer: (
128
102
  ScipyOptimizerParams
129
- | LinearOptimizerParams
130
103
  | CombinedOptimizerParams
131
104
  | InterpolateOptimizerParams
132
105
  | None
@@ -134,36 +107,25 @@ class FlexQAOA(LunaAlgorithm[Aqarios], BaseModel):
134
107
  default_factory=lambda: ScipyOptimizerParams(),
135
108
  description="The classical optimizer. Default: ScipyOptimizer",
136
109
  )
137
- qaoa_config: AdvancedConfig = Field(
138
- default_factory=lambda: AdvancedConfig(),
139
- description="Additional options for the QAOA circuit and evalutation",
140
- )
141
110
  initial_params: LinearQAOAParams | BasicQAOAParams | RandomQAOAParams = Field(
142
111
  default_factory=lambda: LinearQAOAParams(delta_beta=0.5, delta_gamma=0.5),
143
112
  description="Custom QAOA circuit parameters. By default linear "
144
113
  "increasing/decreasing parameters for the selected `reps` are generated.",
145
114
  )
115
+ param_conversion: None | Literal["basic"] = "basic"
116
+ custom_config: CustomConfig = Field(
117
+ default_factory=lambda: CustomConfig(),
118
+ description="Additional configuration options for the FlexQAOA circuit.",
119
+ )
146
120
 
147
121
  @model_validator(mode="after")
148
122
  def _check_param_type(self) -> FlexQAOA:
149
- if isinstance(self.optimizer, LinearOptimizerParams) and isinstance(
150
- self.initial_params, BasicQAOAParams
151
- ):
152
- raise QAOAParameterOptimizerError(self.optimizer, self.initial_params)
153
- if isinstance(self.optimizer, CombinedOptimizerParams) and isinstance(
154
- self.initial_params, BasicQAOAParams
155
- ):
156
- raise QAOAParameterOptimizerError(self.optimizer, self.initial_params)
157
- if (
158
- isinstance(self.optimizer, InterpolateOptimizerParams)
159
- and isinstance(self.optimizer.optimizer, LinearOptimizerParams)
160
- and isinstance(self.initial_params, BasicQAOAParams)
161
- ):
162
- raise QAOAParameterOptimizerError(
163
- self.optimizer,
164
- self.initial_params,
165
- extra="LinearOptimizer used in InterpolateOptimizer.",
166
- )
123
+ if isinstance(self.optimizer, CombinedOptimizerParams):
124
+ if isinstance(self.initial_params, (BasicQAOAParams, RandomQAOAParams)):
125
+ optim = self.optimizer.__class__.__name__
126
+ params = self.initial_params.__class__.__name__
127
+ raise QAOAParameterOptimizerError(optim, params)
128
+ self.param_conversion = None
167
129
  if (
168
130
  isinstance(self.optimizer, InterpolateOptimizerParams)
169
131
  and self.optimizer.reps_end < self.reps
@@ -177,7 +139,7 @@ class FlexQAOA(LunaAlgorithm[Aqarios], BaseModel):
177
139
  isinstance(self.initial_params, BasicQAOAParams)
178
140
  and self.initial_params.reps != self.reps
179
141
  ):
180
- raise QAOAParameterDepthMismatchError(self.initial_params.reps, self.reps)
142
+ raise QAOAParameterRepsMismatchError(self.initial_params.reps, self.reps)
181
143
  return self
182
144
 
183
145
  @property
@@ -213,7 +175,7 @@ class FlexQAOA(LunaAlgorithm[Aqarios], BaseModel):
213
175
  return Aqarios()
214
176
 
215
177
  @classmethod
216
- def get_compatible_backends(cls) -> tuple[type[Aqarios]]:
178
+ def get_compatible_backends(cls) -> tuple[type[Aqarios], type[AqariosGpu]]:
217
179
  """
218
180
  Check at runtime if the used backend is compatible with the solver.
219
181
 
@@ -223,4 +185,4 @@ class FlexQAOA(LunaAlgorithm[Aqarios], BaseModel):
223
185
  True if the backend is compatible with the solver, False otherwise.
224
186
 
225
187
  """
226
- return (Aqarios,)
188
+ return (Aqarios, AqariosGpu)
@@ -0,0 +1,53 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from luna_quantum.solve.parameters.algorithms.base_params.scipy_optimizer import (
6
+ ScipyOptimizerParams,
7
+ )
8
+
9
+
10
+ class CombinedOptimizerParams(BaseModel):
11
+ """Combination of LinearOptimizer and ScipyOptimizer.
12
+
13
+ Optimizer that first performs an optimization of the linear schedule and then
14
+ fine tunes individual parameters. Only works in conjunction with `LinearQAOAParams`.
15
+
16
+
17
+ Attributes
18
+ ----------
19
+ linear: ScipyOptimizerParams
20
+ Parameters of the linear optimizer.
21
+ fine_tune: ScipyOptimizerParams | None
22
+ Parameters of the fine tuning optimizer. If `None`, the same optimizer is used.
23
+ Default: `None`.
24
+ """
25
+
26
+ optimizer_type: Literal["combined"] = "combined"
27
+ linear: ScipyOptimizerParams = Field(default_factory=lambda: ScipyOptimizerParams())
28
+ fine_tune: ScipyOptimizerParams | None = None
29
+
30
+
31
+ class InterpolateOptimizerParams(BaseModel):
32
+ """Optimizer with sequentially increasing number of QAOA layers.
33
+
34
+ Optimizer that starts with `reps` iteration and interpolates sequentially in
35
+ `reps_step` steps to `reps_end`. In between it performs a full optimization routine
36
+ tunes individual parameters.
37
+
38
+ Attributes
39
+ ----------
40
+ optimizer: ScipyOptimizerParams
41
+ Parameters of the optimizer.
42
+ reps_step: int
43
+ Number of QAOA layers added for one interpolation.
44
+ reps_end: int
45
+ Final number of QAOA layers to be reached.
46
+ """
47
+
48
+ optimizer_type: Literal["interpolate"] = "interpolate"
49
+ optimizer: ScipyOptimizerParams = Field(
50
+ default_factory=lambda: ScipyOptimizerParams()
51
+ )
52
+ reps_step: int = Field(default=1, ge=1)
53
+ reps_end: int = Field(default=10, ge=1, lt=1000)
@@ -0,0 +1,164 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel, Field, PositiveFloat
4
+
5
+
6
+ class _EnableMixin:
7
+ enable: bool = True
8
+
9
+
10
+ class PenaltySetting(BaseModel):
11
+ """Penalty factor settings.
12
+
13
+ Attributes
14
+ ----------
15
+ override: PositiveFloat | None
16
+ Overrides the automatically evaluated penalty factor.
17
+ scaling: PositiveFloat
18
+ Scales the automatically evaluated penalty factor.
19
+ """
20
+
21
+ override: PositiveFloat | None = None
22
+ scaling: PositiveFloat = 1.0
23
+
24
+
25
+ class IndicatorFunctionConfig(BaseModel, _EnableMixin):
26
+ """Configuration for indicator functions to implement inequality constraints.
27
+
28
+ Attributes
29
+ ----------
30
+ penalty: PenaltySetting
31
+ Custom penalty setting for indicator functions.
32
+ method: Literal["const", "str"]
33
+ Indicator function implementation method. Default: `"const"`
34
+ Two options are available:
35
+
36
+ - `"const"`: Applies a constant penalty for every constraint violation.
37
+ - `"if"`: Applies the objective function only if all constraints are satisfied.
38
+ Automatically ensures objective to be negative.
39
+
40
+ enable : bool
41
+ Toggle to enable or disable this method. Default: True.
42
+ """
43
+
44
+ penalty: PenaltySetting = Field(
45
+ default_factory=lambda: PenaltySetting(scaling=1),
46
+ description="Penalty setting for indicator functions.",
47
+ )
48
+ method: Literal["if", "const"] = Field(
49
+ default="const",
50
+ description="Method of indicator function implementation. Constant Penalty "
51
+ "(const) or conditional application of cost function (if).",
52
+ )
53
+
54
+
55
+ class XYMixerConfig(BaseModel, _EnableMixin):
56
+ """Configuration for XY-mixers to implement one-hot constraints.
57
+
58
+ Attributes
59
+ ----------
60
+ trotter : int
61
+ Number of trotter steps for XY-mixer implementation. Default: 1.
62
+ types: list[Literal["even", "odd", "last"]]
63
+ Mixer types in XY-ring-mixer. Default: `["even", "odd", "last"]`.
64
+ enable : bool
65
+ Toggle to enable or disable this method. Default: True.
66
+ """
67
+
68
+ trotter: int = Field(
69
+ default=1,
70
+ lt=1000,
71
+ ge=1,
72
+ description="Number of trotter steps for XY-mixer implementation.",
73
+ )
74
+ types: list[Literal["even", "odd", "last"]] = Field(
75
+ default=["even", "odd", "last"],
76
+ description='Mixer types in XY-ring-mixer. Default: `["even", "odd", "last"]`',
77
+ )
78
+
79
+
80
+ class QuadraticPenaltyConfig(BaseModel, _EnableMixin):
81
+ """Configuration for quadratic penalties.
82
+
83
+ Adds penalty terms to the objective. Adds slack variables for inequality constraints
84
+ if neccessaray.
85
+
86
+ Attributes
87
+ ----------
88
+ penalty : PenaltySetting
89
+ Custom penalty setting for quadratic penalty terms.
90
+ enable : bool
91
+ Toggle to enable or disable this method. Default: True.
92
+ """
93
+
94
+ penalty: PenaltySetting = Field(
95
+ default_factory=lambda: PenaltySetting(scaling=2.0),
96
+ description="Penalty setting for quadratic penalties.",
97
+ )
98
+
99
+
100
+ class SetpackingAsOnehotConfig(BaseModel, _EnableMixin):
101
+ """Configuration for set-packing to one-hot constraint transformation.
102
+
103
+ Attributes
104
+ ----------
105
+ enable : bool
106
+ Toggle to enable or disable this method. Default: True.
107
+ """
108
+
109
+
110
+ class InequalityToEqualityConfig(BaseModel, _EnableMixin):
111
+ """Configuration for inequality to equality constraint transformation.
112
+
113
+ Attributes
114
+ ----------
115
+ max_slack : int
116
+ Maximum number of slack bits to add for each constraint. Default: 10.
117
+ enable : bool
118
+ Toggle to enable or disable this method. Default: True.
119
+ """
120
+
121
+ max_slack: int = Field(
122
+ default=10,
123
+ description="Maximum number of slack bits to add for each constraint.",
124
+ )
125
+
126
+
127
+ class PipelineParams(BaseModel):
128
+ """Define the modular FlexQAOA Pipeline.
129
+
130
+ Attributes
131
+ ----------
132
+ penalty : PenaltySetting
133
+ General penalty factor settings.
134
+ inequality_to_equality : InequalityToEqualityConfig
135
+ Configuration of the "inequality to equality" transformation.
136
+ setpacking_as_onehot : SetpackingAsOnehotConfig
137
+ Configuration of the "setpacking to onehot" transformation.
138
+ xy_mixer : XYMixerConfig
139
+ Configuration of the XY-mixers.
140
+ indicator_function : IndicatorFunctionConfig
141
+ Configuration of the indicator functions.
142
+ sp_quadratic_penalty : QuadraticPenaltyConfig
143
+ Configuration of the setpacking quadratic penalty function.
144
+ quadratic_penalty : QuadraticPenaltyConfig
145
+ Configuration of the general quadratic penalty function.
146
+ """
147
+
148
+ penalty: PenaltySetting = Field(default_factory=lambda: PenaltySetting(scaling=2.0))
149
+ inequality_to_equality: InequalityToEqualityConfig = Field(
150
+ default_factory=InequalityToEqualityConfig
151
+ )
152
+ setpacking_as_onehot: SetpackingAsOnehotConfig = Field(
153
+ default_factory=SetpackingAsOnehotConfig
154
+ )
155
+ xy_mixer: XYMixerConfig = Field(default_factory=XYMixerConfig)
156
+ indicator_function: IndicatorFunctionConfig = Field(
157
+ default_factory=IndicatorFunctionConfig
158
+ )
159
+ sp_quadratic_penalty: QuadraticPenaltyConfig = Field(
160
+ default_factory=QuadraticPenaltyConfig
161
+ )
162
+ quadratic_penalty: QuadraticPenaltyConfig = Field(
163
+ default_factory=QuadraticPenaltyConfig
164
+ )
@@ -1,4 +1,5 @@
1
1
  from .aqarios import Aqarios
2
+ from .aqarios_gpu import AqariosGpu
2
3
  from .aws import AWS, IQM, IonQ, Rigetti
3
4
  from .dwave import DWave
4
5
  from .dwave_qpu import DWaveQpu
@@ -13,6 +14,7 @@ __all__: list[str] = [
13
14
  "IQM",
14
15
  "ZIB",
15
16
  "Aqarios",
17
+ "AqariosGpu",
16
18
  "DWave",
17
19
  "DWaveQpu",
18
20
  "Fujitsu",
@@ -0,0 +1,17 @@
1
+ from luna_quantum.solve.interfaces.backend_i import IBackend
2
+
3
+
4
+ class AqariosGpu(IBackend):
5
+ """Configuration class for the Aqarios GPU backend."""
6
+
7
+ @property
8
+ def provider(self) -> str:
9
+ """
10
+ Retrieve the name of the provider.
11
+
12
+ Returns
13
+ -------
14
+ str
15
+ The name of the provider.
16
+ """
17
+ return "aqarios-gpu"
@@ -0,0 +1,30 @@
1
+ from luna_quantum.solve.errors.solve_base_error import SolveBaseError
2
+
3
+
4
+ class QAOAParameterOptimizerError(SolveBaseError):
5
+ """QAOA cirucit parameters mismatch with optimizer exception."""
6
+
7
+ def __init__(
8
+ self,
9
+ optimizer: str,
10
+ params: str,
11
+ extra: str = "",
12
+ ) -> None:
13
+ super().__init__(
14
+ f"Parameter Mismatch of '{optimizer}' and '{params}'"
15
+ + ((": " + extra) if extra else ".")
16
+ )
17
+
18
+
19
+ class InterpolateOptimizerError(SolveBaseError):
20
+ """Interpolate optimizer error when final number of reps is too small."""
21
+
22
+ def __init__(self, reps_end: int, reps_start: int) -> None:
23
+ super().__init__(f"{reps_end=} needs to be larger than {reps_start=}.")
24
+
25
+
26
+ class QAOAParameterRepsMismatchError(SolveBaseError):
27
+ """QAOA circuit params mismatch the specified reps."""
28
+
29
+ def __init__(self, params_reps: int, reps: int) -> None:
30
+ super().__init__(f"{params_reps=} needs to match {reps=}.")
@@ -0,0 +1,44 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from luna_quantum.client.interfaces.services.luna_solve_i import ILunaSolve
4
+ from luna_quantum.solve.domain.solve_job import SolveJob
5
+ from luna_quantum.solve.interfaces.usecases.solve_job_get_by_id_usecase_i import (
6
+ ISolveJobGetByIdUseCase,
7
+ )
8
+ from luna_quantum.util.log_utils import Logging, progress
9
+
10
+ if TYPE_CHECKING:
11
+ from luna_quantum.client.schemas.solve_job import SolveJobSchema
12
+
13
+
14
+ class SolveJobGetByIdUseCase(ISolveJobGetByIdUseCase):
15
+ """
16
+ Represent an abstract base to retrieve a solve-job by its id.
17
+
18
+ This class interacts with a backend client to retrieve a solve job by its id.
19
+
20
+ Attributes
21
+ ----------
22
+ client : ILunaSolve
23
+ Client used to retrieve the solve job.
24
+ """
25
+
26
+ client: ILunaSolve
27
+ logger = Logging.get_logger(__name__)
28
+
29
+ def __init__(self, client: ILunaSolve) -> None:
30
+ self.client = client
31
+
32
+ @progress(total=None, desc="Retrieving solve job by id...")
33
+ def __call__(self, solve_job_id: str) -> SolveJob:
34
+ """
35
+ Retive a solve-job by its id.
36
+
37
+ Parameters
38
+ ----------
39
+ solve_job_id : str
40
+ The id of the solve-job to retrieve.
41
+ """
42
+ solve_job: SolveJobSchema = self.client.solve_job.get(solve_job_id=solve_job_id)
43
+
44
+ return SolveJob.model_validate(solve_job.model_dump())
@@ -2,6 +2,9 @@ from luna_quantum._core import Solution
2
2
  from luna_quantum.client.interfaces.services.luna_solve_i import ILunaSolve
3
3
  from luna_quantum.client.schemas.enums.call_style import CallStyle
4
4
  from luna_quantum.client.schemas.enums.status import StatusEnum
5
+ from luna_quantum.exceptions.luna_quantum_call_type_error import (
6
+ LunaQuantumCallStyleError,
7
+ )
5
8
  from luna_quantum.solve.domain.solve_job import SolveJob
6
9
  from luna_quantum.solve.interfaces.usecases.solve_job_get_result_usecase_i import (
7
10
  ISolveJobGetResultUseCase,
@@ -61,19 +64,26 @@ class SolveJobGetResultUseCase(ISolveJobGetResultUseCase):
61
64
  The solution for the given solve job if successfully processed, otherwise
62
65
  None.
63
66
  """
64
- if call_style is CallStyle.ACTIVE_WAITING:
65
- final_states = StatusEnum.CANCELED, StatusEnum.DONE, StatusEnum.FAILED
67
+ match call_style:
68
+ case CallStyle.ACTIVE_WAITING:
69
+ final_states = StatusEnum.CANCELED, StatusEnum.DONE, StatusEnum.FAILED
66
70
 
67
- ActiveWaiting.run(
68
- loop_check=lambda: solve_job.get_status(
69
- client=self.client, status_source="remote"
71
+ ActiveWaiting.run(
72
+ loop_check=lambda: solve_job.get_status(
73
+ client=self.client, status_source="remote"
74
+ )
75
+ not in final_states,
76
+ loop_call=None,
77
+ sleep_time_max=sleep_time_max,
78
+ sleep_time_increment=sleep_time_increment,
79
+ sleep_time_initial=sleep_time_initial,
70
80
  )
71
- not in final_states,
72
- loop_call=None,
73
- sleep_time_max=sleep_time_max,
74
- sleep_time_increment=sleep_time_increment,
75
- sleep_time_initial=sleep_time_initial,
76
- )
81
+ case CallStyle.SINGLE_FETCH:
82
+ solve_job.get_status(client=self.client, status_source="remote")
83
+
84
+ case _:
85
+ raise LunaQuantumCallStyleError(call_style)
86
+
77
87
  try:
78
88
  if solve_job.status == StatusEnum.CANCELED:
79
89
  self.logger.warning(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: luna-quantum
3
- Version: 1.0.8rc2
3
+ Version: 1.0.8rc4
4
4
  Classifier: Programming Language :: Python :: 3
5
5
  Classifier: License :: OSI Approved :: Apache Software License
6
6
  Classifier: Operating System :: OS Independent