luna-quantum 1.0.4rc3__cp313-cp313-win_amd64.whl → 1.0.5__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of luna-quantum might be problematic. Click here for more details.

Files changed (29) hide show
  1. luna_quantum/_core.cp313-win_amd64.pyd +0 -0
  2. luna_quantum/_core.pyi +56 -2
  3. luna_quantum/client/controllers/luna_platform_client.py +122 -21
  4. luna_quantum/client/controllers/luna_q.py +6 -1
  5. luna_quantum/client/controllers/luna_solve.py +5 -1
  6. luna_quantum/client/interfaces/services/service_i.py +20 -0
  7. luna_quantum/client/rest_client/info_rest_client.py +1 -3
  8. luna_quantum/client/schemas/solve_job.py +3 -0
  9. luna_quantum/decorators.py +8 -7
  10. luna_quantum/factories/luna_solve_client_factory.py +28 -3
  11. luna_quantum/solve/domain/abstract/luna_algorithm.py +5 -3
  12. luna_quantum/solve/domain/model_metadata.py +3 -1
  13. luna_quantum/solve/domain/solve_job.py +14 -5
  14. luna_quantum/solve/interfaces/algorithm_i.py +3 -1
  15. luna_quantum/solve/parameters/algorithms/__init__.py +4 -0
  16. luna_quantum/solve/parameters/algorithms/lq_fda/__init__.py +9 -0
  17. luna_quantum/solve/parameters/algorithms/lq_fda/fujits_da_base.py +85 -0
  18. luna_quantum/solve/parameters/algorithms/lq_fda/fujitsu_da_cpu.py +125 -0
  19. luna_quantum/solve/parameters/algorithms/lq_fda/fujitsu_da_v3c.py +155 -0
  20. luna_quantum/solve/parameters/algorithms/lq_fda/fujitsu_da_v4.py +155 -0
  21. luna_quantum/solve/parameters/backends/__init__.py +2 -0
  22. luna_quantum/solve/parameters/backends/fda.py +17 -0
  23. luna_quantum/transformations.pyi +5 -1
  24. luna_quantum/util/pretty_base.py +1 -1
  25. {luna_quantum-1.0.4rc3.dist-info → luna_quantum-1.0.5.dist-info}/METADATA +1 -1
  26. {luna_quantum-1.0.4rc3.dist-info → luna_quantum-1.0.5.dist-info}/RECORD +29 -23
  27. {luna_quantum-1.0.4rc3.dist-info → luna_quantum-1.0.5.dist-info}/WHEEL +1 -1
  28. {luna_quantum-1.0.4rc3.dist-info → luna_quantum-1.0.5.dist-info}/licenses/LICENSE +0 -0
  29. {luna_quantum-1.0.4rc3.dist-info → luna_quantum-1.0.5.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,85 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ from pydantic import Field
6
+
7
+ from luna_quantum.solve.domain.abstract import LunaAlgorithm
8
+ from luna_quantum.solve.parameters.backends import Fujitsu
9
+
10
+
11
+ class FujitsuDABase(LunaAlgorithm[Fujitsu]):
12
+ """Fujitsu Digital Annealer base parameters.
13
+
14
+ Parameters
15
+ ----------
16
+ scaling_action: Literal["NOTHING", "SCALING", "AUTO_SCALING"]
17
+ Method for scaling ``qubo`` and determining temperatures:
18
+ - "NOTHING": No action (use parameters exactly as specified)
19
+ - "SCALING": ``scaling_factor`` is multiplied to ``qubo``,
20
+ ``temperature_start``, ``temperature_end`` and ``offset_increase_rate``.
21
+ - "AUTO_SCALING": A maximum scaling factor w.r.t. ``scaling_bit_precision``
22
+ is multiplied to ``qubo``, ``temperature_start``, ``temperature_end`` and
23
+ ``offset_increase_rate``.
24
+ scaling_factor: int | float
25
+ Multiplicative factor applied to model coefficients, temperatures, and other
26
+ parameters: the ``scaling_factor`` for ``qubo``, ``temperature_start``,
27
+ ``temperature_end`` and ``offset_increase_rate``.
28
+ Higher values can improve numerical precision but may lead to overflow.
29
+ Default is 1.0 (no scaling).
30
+ scaling_bit_precision: int
31
+ Maximum bit precision to use when scaling. Determines the maximum allowable
32
+ coefficient magnitude. Default is 64, using full double precision.
33
+ random_seed: Union[int, None]
34
+ Seed for random number generation to ensure reproducible results.
35
+ Must be between 0 and 9_999. Default is None (random seed).
36
+ penalty_factor: float
37
+ Penalty factor used to scale the equality constraint penalty function,
38
+ default 1.0.
39
+ inequality_factor: int
40
+ Penalty factor used to scale the inequality constraints, default 1.
41
+ remove_ohg_from_penalty: bool
42
+ If equality constraints, identified to be One-Hot constraints are only
43
+ considered within one-hot groups (`remove_ohg_from_penalty=True`), i.e.,
44
+ identified one-hot constraints are not added to the penalty function,
45
+ default True.
46
+ """
47
+
48
+ scaling_action: Literal["NOTHING", "SCALING", "AUTO_SCALING"] = "NOTHING"
49
+ scaling_factor: int | float = 1.0
50
+ scaling_bit_precision: int = 64
51
+ random_seed: int | None = Field(default=None, ge=0, le=9_999)
52
+
53
+ penalty_factor: float = 1.0
54
+ inequality_factor: int = 1
55
+ remove_ohg_from_penalty: bool = True
56
+
57
+ @classmethod
58
+ def get_default_backend(cls) -> Fujitsu:
59
+ """
60
+ Return the default backend implementation.
61
+
62
+ This property must be implemented by subclasses to provide
63
+ the default backend instance to use when no specific backend
64
+ is specified.
65
+
66
+ Returns
67
+ -------
68
+ IBackend
69
+ An instance of a class implementing the IBackend interface that serves
70
+ as the default backend.
71
+ """
72
+ return Fujitsu()
73
+
74
+ @classmethod
75
+ def get_compatible_backends(cls) -> tuple[type[Fujitsu], ...]:
76
+ """
77
+ Check at runtime if the used backend is compatible with the solver.
78
+
79
+ Returns
80
+ -------
81
+ tuple[type[IBackend], ...]
82
+ True if the backend is compatible with the solver, False otherwise.
83
+
84
+ """
85
+ return (Fujitsu,)
@@ -0,0 +1,125 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ from pydantic import Field
6
+
7
+ from .fujits_da_base import FujitsuDABase
8
+
9
+
10
+ class FujitsuDACpu(FujitsuDABase):
11
+ r"""
12
+ Parameters for the Fujitsu Digital Annealer (CPU).
13
+
14
+ Attributes
15
+ ----------
16
+ optimization_method: Literal["annealing", "parallel_tempering"]
17
+ Algorithm to use for optimization:
18
+ - "annealing": Standard simulated annealing with gradual cooling
19
+ - "parallel_tempering": Simultaneous runs at different temperatures with
20
+ periodic state exchanges, effective for complex energy landscapes
21
+ Default is "annealing".
22
+ number_runs: int
23
+ Number of stochastically independent runs. Default: 2, Min: 1, Max: 128
24
+ number_replicas:
25
+ Number of replicas in parallel tempering. Default: 5, Min: 5, Max: 128
26
+ number_iterations: int
27
+ Total number of iterations per run. Default: 1_000, Min: 1, Max: 100_000_000
28
+ temperature_sampling: bool
29
+ Temperatures. Default: True
30
+ temperature_start: float
31
+ Initial temperature for the annealing process. Higher values enable more
32
+ exploration initially. Default is 1000.0. Range: [0.0, 1e20].
33
+ temperature_end: float
34
+ Final temperature for the annealing process. Lower values enforce more
35
+ exploitation in final phases. Default is 1.0. Range: [0.0, 1e20].
36
+ temperature_mode: int
37
+ Cooling curve mode for temperature decay:
38
+ - 0: Exponential cooling - Reduce temperature by factor
39
+ :math:`(1-temperature\\_decay)` every ``temperature_interval`` steps
40
+ - 1: Inverse cooling - Reduce temperature by factor
41
+ :math:`(1-temperature\\_decay*temperature)` every ``temperature_interval``
42
+ steps
43
+ - 2: Inverse root cooling - Reduce temperature by factor
44
+ :math:`(1-temperature\\_decay*temperature^2)` every
45
+ ``temperature_interval`` steps.
46
+ Default is 0 (exponential).
47
+ temperature_interval: int
48
+ Number of iterations between temperature adjustments. Larger values
49
+ allow more exploration at each temperature. Default is 1. Range: [1, 1e20].
50
+ offset_increase_rate: float
51
+ Rate at which dynamic offset increases when no bit is selected.
52
+ Set to 0.0 to switch off dynamic energy feature.
53
+ Helps escape plateaus in the energy landscape. Default is 5.0.
54
+ Range: [0.0, 1e20].
55
+ pt_temperature_model: Literal['Linear', 'Exponential', 'Hukushima']
56
+ Temperature model for furnace temperature distribution for parallel tempering
57
+ process. Default: 'Exponential'
58
+ pt_replica_exchange_model: Literal['Neighbours', 'Far jump']
59
+ Select replica exchange model for parallel tempering process.
60
+ Default: "Neighbours"
61
+ solution_mode: Literal["QUICK", "COMPLETE"]
62
+ Determines solution reporting strategy:
63
+ - "QUICK": Return only the overall best solution (faster)
64
+ - "COMPLETE": Return best solutions from all runs (more diverse)
65
+ Default is "COMPLETE", providing more solution options.
66
+ scaling_action: Literal["NOTHING", "SCALING", "AUTO_SCALING"]
67
+ Method for scaling ``qubo`` and determining temperatures:
68
+ - "NOTHING": No action (use parameters exactly as specified)
69
+ - "SCALING": ``scaling_factor`` is multiplied to ``qubo``,
70
+ ``temperature_start``, ``temperature_end`` and ``offset_increase_rate``.
71
+ - "AUTO_SCALING": A maximum scaling factor w.r.t. ``scaling_bit_precision``
72
+ is multiplied to ``qubo``, ``temperature_start``, ``temperature_end`` and
73
+ ``offset_increase_rate``.
74
+ scaling_factor: int | float
75
+ Multiplicative factor applied to model coefficients, temperatures, and other
76
+ parameters: the ``scaling_factor`` for ``qubo``, ``temperature_start``,
77
+ ``temperature_end`` and ``offset_increase_rate``.
78
+ Higher values can improve numerical precision but may lead to overflow.
79
+ Default is 1.0 (no scaling).
80
+ scaling_bit_precision: int
81
+ Maximum bit precision to use when scaling. Determines the maximum allowable
82
+ coefficient magnitude. Default is 64, using full double precision.
83
+ random_seed: Union[int, None]
84
+ Seed for random number generation to ensure reproducible results.
85
+ Must be between 0 and 9_999. Default is None (random seed).
86
+ penalty_factor: float
87
+ Penalty factor used to scale the equality constraint penalty function,
88
+ default 1.0.
89
+ inequality_factor: int
90
+ Penalty factor used to scale the inequality constraints, default 1.
91
+ remove_ohg_from_penalty: bool
92
+ If equality constraints, identified to be One-Hot constraints are only
93
+ considered within one-hot groups (`remove_ohg_from_penalty=True`), i.e.,
94
+ identified one-hot constraints are not added to the penalty function,
95
+ default True.
96
+ """
97
+
98
+ optimization_method: Literal["annealing", "parallel_tempering"] = "annealing"
99
+ number_runs: int = Field(default=2, ge=1, le=128)
100
+ number_replicas: int = Field(default=5, ge=5, le=128)
101
+ number_iterations: int = Field(default=1_000, ge=1, le=100_000_000)
102
+ temperature_sampling: bool = True
103
+ temperature_start: float = Field(default=1_000.0, ge=0.0, le=1e20)
104
+ temperature_end: float = Field(default=1.0, ge=0.0, le=1e20)
105
+ temperature_mode: int = 0
106
+ temperature_interval: int = Field(default=1, ge=1, le=int(1e20))
107
+ offset_increase_rate: float = Field(default=5.0, ge=0.0, le=1e20)
108
+ pt_temperature_model: Literal["Linear", "Exponential", "Hukushima"] = "Exponential"
109
+ pt_replica_exchange_model: Literal["Neighbours", "Far jump"] = "Neighbours"
110
+ solution_mode: Literal["QUICK", "COMPLETE"] = "COMPLETE"
111
+
112
+ @property
113
+ def algorithm_name(self) -> str:
114
+ """
115
+ Returns the name of the algorithm.
116
+
117
+ This abstract property method is intended to be overridden by subclasses.
118
+ It should provide the name of the algorithm being implemented.
119
+
120
+ Returns
121
+ -------
122
+ str
123
+ The name of the algorithm.
124
+ """
125
+ return "FDACPU"
@@ -0,0 +1,155 @@
1
+ from pydantic import Field
2
+
3
+ from .fujits_da_base import FujitsuDABase
4
+
5
+
6
+ class FujitsuDAv3c(FujitsuDABase):
7
+ """
8
+ Parameters for the Fujitsu Digital Annealer (v3c).
9
+
10
+ Attributes
11
+ ----------
12
+ time_limit_sec: int | None
13
+ Maximum running time of DA in seconds. Specifies the upper limit of running
14
+ time of DA. Time_limit_sec should be selected according to problem hardness
15
+ and size (number of bits). Min: 1, Max: 3600
16
+ target_energy: int | None
17
+ Threshold energy for fast exit. This may not work correctly if the specified
18
+ value is larger than its max value or lower than its min value.
19
+ Min: -99_999_999_999, Max: 99_999_999_999
20
+ num_group: int
21
+ Number of independent optimization processes. Increasing the number of
22
+ independent optimization processes leads to better coverage of the search
23
+ space. Note: Increasing this number requires to also increase time_limit_sec
24
+ such that the search time for each process is sufficient.
25
+ Default: 1, Min: 1, Max: 16
26
+ num_solution: int
27
+ Number of solutions maintained and updated by each optimization process.
28
+ Default: 16, Min: 1, Max: 1024
29
+ num_output_solution: int
30
+ Maximal number of the best solutions returned by each optimization.
31
+ Total number of results is ``num_solution`` * ``num_group``.
32
+ Default: 5, Min: 1, Max: 1024
33
+ gs_num_iteration_factor: int
34
+ Maximal number of iterations in one epoch of the global search in each
35
+ optimization is ``gs_num_iteration_factor`` * *number of bits*.
36
+ Default: 5, Min: 0, Max: 100
37
+ gs_num_iteration_cl: int
38
+ Maximal number of iterations without improvement in one epoch of the global
39
+ search in each optimization before terminating and continuing with the next
40
+ epoch. For problems with very deep local minima having a very low value is
41
+ helpful. Default: 800, Min: 0, Max: 1000000
42
+ gs_ohs_xw1h_num_iteration_factor: int
43
+ Maximal number of iterations in one epoch of the global search in each
44
+ optimization is ``gs_ohs_xw1h_num_iteration_factor`` * *number of bits*.
45
+ Only used when 1Hot search is defined. Default: 3, Min: 0, Max: 100
46
+ gs_ohs_xw1h_num_iteration_cl: int
47
+ Maximal number of iterations without improvement in one epoch of the global
48
+ search in each optimization before terminating and continuing with the next
49
+ epoch. For problems with very deep local minima having a very low value is
50
+ helpful. Only used when 1Hot search is defined.
51
+ Default: 100, Min: 0, Max: 1000000
52
+ ohs_xw1h_internal_penalty: int | str
53
+ Mode of 1hot penalty constraint generation.
54
+ - 0: internal penalty generation off: 1hot constraint as part of penalty
55
+ polynomial required
56
+ - 1: internal penalty generation on: 1hot constraint not as part of penalty
57
+ polynomial required
58
+ If 1way 1hot constraint or a 2way 1hot constraint is specified,
59
+ ``ohs_xw1h_internal_penalty`` = 1 is recommended.
60
+ Default: 0, Min: 0, Max: 1
61
+ gs_penalty_auto_mode: int
62
+ Parameter to choose whether to automatically incrementally adapt
63
+ ``gs_penalty_coef`` to the optimal value.
64
+ - 0: Use ``gs_penalty_coef`` as the fixed factor to weight the penalty
65
+ polynomial during optimization.
66
+ - 1: Start with ``gs_penalty_coef`` as weight factor for penalty polynomial
67
+ and automatically and incrementally increase this factor during
68
+ optimization by multiplying ``gs_penalty_inc_rate`` / 100 repeatedly
69
+ until ``gs_max_penalty_coef`` is reached or the penalty energy iszero.
70
+ Default: 1, Min: 0, Max: 1
71
+ gs_penalty_coef: int
72
+ Factor to weight the penalty polynomial. If ``gs_penalty_auto_mode`` is 0,
73
+ this value does not change. If ``gs_penalty_auto_mode`` is 1, this initial
74
+ weight factor is repeatedly increased by ``gs_penalty_inc_rate`` until
75
+ ``gs_max_penalty_coef`` is reached or the penalty energy is zero.
76
+ Default: 1, Min: 1, Max: 9_223_372_036_854_775_807
77
+ gs_penalty_inc_rate: int
78
+ Only used if ``gs_penalty_auto_mode`` is 1. In this case, the initial weight
79
+ factor ``gs_penalty_coef`` for the penalty polynomial is repeatedly
80
+ increased by multiplying ``gs_penalty_inc_rate`` / 100 until
81
+ ``gs_max_penalty_coef`` is reached or the penalty energy is zero.
82
+ Default: 150, Min: 100, Max: 200
83
+ gs_max_penalty_coef: int
84
+ Maximal value for the penalty coefficient. If ``gs_penalty_auto_mode`` is 0,
85
+ this is the maximal value for ``gs_penalty_coef``.
86
+ If ``gs_penalty_auto_mode`` is 1, this is the maximal value to which
87
+ ``gs_penalty_coef`` can be increased during the automatic adjustment.
88
+ If ``gs_max_penalty_coef`` is set to 0, then the maximal penalty coefficient
89
+ is 2^63 - 1.
90
+ Default: 0, Min: 0, Max: 9_223_372_036_854_775_807
91
+
92
+
93
+ scaling_action: Literal["NOTHING", "SCALING", "AUTO_SCALING"]
94
+ Method for scaling ``qubo`` and determining temperatures:
95
+ - "NOTHING": No action (use parameters exactly as specified)
96
+ - "SCALING": ``scaling_factor`` is multiplied to ``qubo``,
97
+ ``temperature_start``, ``temperature_end`` and ``offset_increase_rate``.
98
+ - "AUTO_SCALING": A maximum scaling factor w.r.t. ``scaling_bit_precision``
99
+ is multiplied to ``qubo``, ``temperature_start``, ``temperature_end`` and
100
+ ``offset_increase_rate``.
101
+ scaling_factor: int | float
102
+ Multiplicative factor applied to model coefficients, temperatures, and other
103
+ parameters: the ``scaling_factor`` for ``qubo``, ``temperature_start``,
104
+ ``temperature_end`` and ``offset_increase_rate``.
105
+ Higher values can improve numerical precision but may lead to overflow.
106
+ Default is 1.0 (no scaling).
107
+ scaling_bit_precision: int
108
+ Maximum bit precision to use when scaling. Determines the maximum allowable
109
+ coefficient magnitude. Default is 64, using full double precision.
110
+ random_seed: Union[int, None]
111
+ Seed for random number generation to ensure reproducible results.
112
+ Must be between 0 and 9_999. Default is None (random seed).
113
+ penalty_factor: float
114
+ Penalty factor used to scale the equality constraint penalty function,
115
+ default 1.0.
116
+ inequality_factor: int
117
+ Penalty factor used to scale the inequality constraints, default 1.
118
+ remove_ohg_from_penalty: bool
119
+ If equality constraints, identified to be One-Hot constraints are only
120
+ considered within one-hot groups (`remove_ohg_from_penalty=True`),
121
+ i.e., identified one-hot constraints are not added to the penalty function,
122
+ default True.
123
+ """
124
+
125
+ time_limit_sec: int | None = Field(default=None, ge=1, le=3600)
126
+ target_energy: int | None = Field(
127
+ default=None, ge=-99_999_999_999, le=99_999_999_999
128
+ )
129
+ num_group: int = Field(default=1, ge=1, le=16)
130
+ num_solution: int = Field(default=16, ge=1, le=1024)
131
+ num_output_solution: int = Field(default=5, ge=1, le=1024)
132
+ gs_num_iteration_factor: int = Field(default=5, ge=0, le=100)
133
+ gs_num_iteration_cl: int = Field(default=800, ge=0, le=1_000_000)
134
+ gs_ohs_xw1h_num_iteration_factor: int = Field(default=3, ge=0, le=100)
135
+ gs_ohs_xw1h_num_iteration_cl: int = Field(default=100, ge=0, le=1_000_000)
136
+ ohs_xw1h_internal_penalty: int = Field(default=0, ge=0, le=1)
137
+ gs_penalty_auto_mode: int = Field(default=1, ge=0, le=1)
138
+ gs_penalty_coef: int = Field(default=1, ge=1, le=2**63 - 1)
139
+ gs_penalty_inc_rate: int = Field(default=150, ge=100, le=200)
140
+ gs_max_penalty_coef: int = Field(default=0, ge=0, le=2**63 - 1)
141
+
142
+ @property
143
+ def algorithm_name(self) -> str:
144
+ """
145
+ Returns the name of the algorithm.
146
+
147
+ This abstract property method is intended to be overridden by subclasses.
148
+ It should provide the name of the algorithm being implemented.
149
+
150
+ Returns
151
+ -------
152
+ str
153
+ The name of the algorithm.
154
+ """
155
+ return "FDAV3C"
@@ -0,0 +1,155 @@
1
+ from pydantic import Field
2
+
3
+ from .fujits_da_base import FujitsuDABase
4
+
5
+
6
+ class FujitsuDAv4(FujitsuDABase):
7
+ """
8
+ Parameters for the Fujitsu Digital Annealer (v4).
9
+
10
+ Attributes
11
+ ----------
12
+ time_limit_sec: int | None
13
+ Maximum running time of DA in seconds. Specifies the upper limit of running
14
+ time of DA. Time_limit_sec should be selected according to problem hardness
15
+ and size (number of bits). Min: 1, Max: 3600
16
+ target_energy: int | None
17
+ Threshold energy for fast exit. This may not work correctly if the specified
18
+ value is larger than its max value or lower than its min value.
19
+ Min: -99_999_999_999, Max: 99_999_999_999
20
+ num_group: int
21
+ Number of independent optimization processes. Increasing the number of
22
+ independent optimization processes leads to better coverage of the search
23
+ space. Note: Increasing this number requires to also increase time_limit_sec
24
+ such that the search time for each process is sufficient.
25
+ Default: 1, Min: 1, Max: 16
26
+ num_solution: int
27
+ Number of solutions maintained and updated by each optimization process.
28
+ Default: 16, Min: 1, Max: 1024
29
+ num_output_solution: int
30
+ Maximal number of the best solutions returned by each optimization.
31
+ Total number of results is ``num_solution`` * ``num_group``.
32
+ Default: 5, Min: 1, Max: 1024
33
+ gs_num_iteration_factor: int
34
+ Maximal number of iterations in one epoch of the global search in each
35
+ optimization is ``gs_num_iteration_factor`` * *number of bits*.
36
+ Default: 5, Min: 0, Max: 100
37
+ gs_num_iteration_cl: int
38
+ Maximal number of iterations without improvement in one epoch of the global
39
+ search in each optimization before terminating and continuing with the next
40
+ epoch. For problems with very deep local minima having a very low value is
41
+ helpful. Default: 800, Min: 0, Max: 1000000
42
+ gs_ohs_xw1h_num_iteration_factor: int
43
+ Maximal number of iterations in one epoch of the global search in each
44
+ optimization is ``gs_ohs_xw1h_num_iteration_factor`` * *number of bits*.
45
+ Only used when 1Hot search is defined. Default: 3, Min: 0, Max: 100
46
+ gs_ohs_xw1h_num_iteration_cl: int
47
+ Maximal number of iterations without improvement in one epoch of the global
48
+ search in each optimization before terminating and continuing with the next
49
+ epoch. For problems with very deep local minima having a very low value is
50
+ helpful. Only used when 1Hot search is defined.
51
+ Default: 100, Min: 0, Max: 1000000
52
+ ohs_xw1h_internal_penalty: int | str
53
+ Mode of 1hot penalty constraint generation.
54
+ - 0: internal penalty generation off: 1hot constraint as part of penalty
55
+ polynomial required
56
+ - 1: internal penalty generation on: 1hot constraint not as part of penalty
57
+ polynomial required
58
+ If 1way 1hot constraint or a 2way 1hot constraint is specified,
59
+ ``ohs_xw1h_internal_penalty`` = 1 is recommended.
60
+ Default: 0, Min: 0, Max: 1
61
+ gs_penalty_auto_mode: int
62
+ Parameter to choose whether to automatically incrementally adapt
63
+ ``gs_penalty_coef`` to the optimal value.
64
+ - 0: Use ``gs_penalty_coef`` as the fixed factor to weight the penalty
65
+ polynomial during optimization.
66
+ - 1: Start with ``gs_penalty_coef`` as weight factor for penalty polynomial
67
+ and automatically and incrementally increase this factor during
68
+ optimization by multiplying ``gs_penalty_inc_rate`` / 100 repeatedly
69
+ until ``gs_max_penalty_coef`` is reached or the penalty energy is zero.
70
+ Default: 1, Min: 0, Max: 1
71
+ gs_penalty_coef: int
72
+ Factor to weight the penalty polynomial. If ``gs_penalty_auto_mode`` is 0,
73
+ this value does not change. If ``gs_penalty_auto_mode`` is 1, this initial
74
+ weight factor is repeatedly increased by ``gs_penalty_inc_rate`` until
75
+ ``gs_max_penalty_coef`` is reached or the penalty energy is zero.
76
+ Default: 1, Min: 1, Max: 9_223_372_036_854_775_807
77
+ gs_penalty_inc_rate: int
78
+ Only used if ``gs_penalty_auto_mode`` is 1. In this case, the initial weight
79
+ factor ``gs_penalty_coef`` for the penalty polynomial is repeatedly
80
+ increased by multiplying ``gs_penalty_inc_rate`` / 100 until
81
+ ``gs_max_penalty_coef`` is reached or the penalty energy is zero.
82
+ Default: 150, Min: 100, Max: 200
83
+ gs_max_penalty_coef: int
84
+ Maximal value for the penalty coefficient. If ``gs_penalty_auto_mode`` is 0,
85
+ this is the maximal value for ``gs_penalty_coef``.
86
+ If ``gs_penalty_auto_mode`` is 1, this is the maximal value to which
87
+ ``gs_penalty_coef`` can be increased during the automatic adjustment.
88
+ If ``gs_max_penalty_coef`` is set to 0, then the maximal penalty coefficient
89
+ is 2^63 - 1.
90
+ Default: 0, Min: 0, Max: 9_223_372_036_854_775_807
91
+
92
+
93
+ scaling_action: Literal["NOTHING", "SCALING", "AUTO_SCALING"]
94
+ Method for scaling ``qubo`` and determining temperatures:
95
+ - "NOTHING": No action (use parameters exactly as specified)
96
+ - "SCALING": ``scaling_factor`` is multiplied to ``qubo``,
97
+ ``temperature_start``, ``temperature_end`` and ``offset_increase_rate``.
98
+ - "AUTO_SCALING": A maximum scaling factor w.r.t. ``scaling_bit_precision``
99
+ is multiplied to ``qubo``, ``temperature_start``, ``temperature_end`` and
100
+ ``offset_increase_rate``.
101
+ scaling_factor: int | float
102
+ Multiplicative factor applied to model coefficients, temperatures, and other
103
+ parameters: the ``scaling_factor`` for ``qubo``, ``temperature_start``,
104
+ ``temperature_end`` and ``offset_increase_rate``.
105
+ Higher values can improve numerical precision but may lead to overflow.
106
+ Default is 1.0 (no scaling).
107
+ scaling_bit_precision: int
108
+ Maximum bit precision to use when scaling. Determines the maximum allowable
109
+ coefficient magnitude. Default is 64, using full double precision.
110
+ random_seed: Union[int, None]
111
+ Seed for random number generation to ensure reproducible results.
112
+ Must be between 0 and 9_999. Default is None (random seed).
113
+ penalty_factor: float
114
+ Penalty factor used to scale the equality constraint penalty function,
115
+ default 1.0.
116
+ inequality_factor: int
117
+ Penalty factor used to scale the inequality constraints, default 1.
118
+ remove_ohg_from_penalty: bool
119
+ If equality constraints, identified to be One-Hot constraints are only
120
+ considered within one-hot groups (`remove_ohg_from_penalty=True`),
121
+ i.e., identified one-hot constraints are not added to
122
+ the penalty function, default True.
123
+ """
124
+
125
+ time_limit_sec: int | None = Field(default=None, ge=1, le=3600)
126
+ target_energy: int | None = Field(
127
+ default=None, ge=-99_999_999_999, le=99_999_999_999
128
+ )
129
+ num_group: int = Field(default=1, ge=1, le=16)
130
+ num_solution: int = Field(default=16, ge=1, le=1024)
131
+ num_output_solution: int = Field(default=5, ge=1, le=1024)
132
+ gs_num_iteration_factor: int = Field(default=5, ge=0, le=100)
133
+ gs_num_iteration_cl: int = Field(default=800, ge=0, le=1_000_000)
134
+ gs_ohs_xw1h_num_iteration_factor: int = Field(default=3, ge=0, le=100)
135
+ gs_ohs_xw1h_num_iteration_cl: int = Field(default=100, ge=0, le=1_000_000)
136
+ ohs_xw1h_internal_penalty: int = Field(default=0, ge=0, le=1)
137
+ gs_penalty_auto_mode: int = Field(default=1, ge=0, le=1)
138
+ gs_penalty_coef: int = Field(default=1, ge=1, le=2**63 - 1)
139
+ gs_penalty_inc_rate: int = Field(default=150, ge=100, le=200)
140
+ gs_max_penalty_coef: int = Field(default=0, ge=0, le=2**63 - 1)
141
+
142
+ @property
143
+ def algorithm_name(self) -> str:
144
+ """
145
+ Returns the name of the algorithm.
146
+
147
+ This abstract property method is intended to be overridden by subclasses.
148
+ It should provide the name of the algorithm being implemented.
149
+
150
+ Returns
151
+ -------
152
+ str
153
+ The name of the algorithm.
154
+ """
155
+ return "FDAV4"
@@ -2,6 +2,7 @@ from .aqarios import Aqarios
2
2
  from .aws import AWS, IQM, IonQ, Rigetti
3
3
  from .dwave import DWave
4
4
  from .dwave_qpu import DWaveQpu
5
+ from .fda import Fujitsu
5
6
  from .ibm import IBM
6
7
  from .qctrl import Qctrl
7
8
  from .zib import ZIB
@@ -14,6 +15,7 @@ __all__: list[str] = [
14
15
  "Aqarios",
15
16
  "DWave",
16
17
  "DWaveQpu",
18
+ "Fujitsu",
17
19
  "IonQ",
18
20
  "Qctrl",
19
21
  "Rigetti",
@@ -0,0 +1,17 @@
1
+ from luna_quantum.solve.interfaces.backend_i import IBackend
2
+
3
+
4
+ class Fujitsu(IBackend):
5
+ """Configuration class for the Fujitsu backend."""
6
+
7
+ @property
8
+ def provider(self) -> str:
9
+ """
10
+ Retrieve the name of the provider.
11
+
12
+ Returns
13
+ -------
14
+ str
15
+ The name of the provider.
16
+ """
17
+ return "fda"
@@ -123,8 +123,12 @@ class TransformationOutcome:
123
123
  action: ActionType
124
124
  analysis: ...
125
125
 
126
+ @overload
127
+ def __init__(self, model: Model, action: ActionType) -> None: ...
128
+ @overload
129
+ def __init__(self, model: Model, action: ActionType, analysis: object) -> None: ...
126
130
  def __init__(
127
- self, model: Model, action: ActionType, analysis: ... = None
131
+ self, model: Model, action: ActionType, analysis: object | None = ...
128
132
  ) -> None: ...
129
133
  @staticmethod
130
134
  def nothing(model: Model) -> TransformationOutcome:
@@ -62,6 +62,6 @@ class PrettyBase(BaseModel):
62
62
 
63
63
  data = self.model_dump()
64
64
 
65
- data_truncated, is_truncated = truncate(data, limit)
65
+ data_truncated, _ = truncate(data, limit)
66
66
 
67
67
  return self._pretty_print(data_truncated)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: luna-quantum
3
- Version: 1.0.4rc3
3
+ Version: 1.0.5
4
4
  Classifier: Programming Language :: Python :: 3
5
5
  Classifier: License :: OSI Approved :: Apache Software License
6
6
  Classifier: Operating System :: OS Independent