pycombinatorial 2.1.4__tar.gz → 2.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/PKG-INFO +4 -3
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/README.md +3 -2
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/__init__.py +1 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/aco.py +31 -20
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/alns.py +20 -7
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_4.py +1 -1
- pycombinatorial-2.1.8/pyCombinatorial/algorithm/rss.py +246 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ssi.py +1 -1
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/PKG-INFO +4 -3
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/SOURCES.txt +1 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/setup.py +1 -1
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/LICENSE +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/__init__.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/bb.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/bf.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/bhk.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/brkga.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/bt.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/christofides.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/conc_hull.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/conv_hull.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/cw.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/eln.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/eo.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/frnn.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ga.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/gksp.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/grasp.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/hpn.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ins_c.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ins_f.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ins_n.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ins_r.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ksp.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/lns.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/mf.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/nn.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_2.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_2_5.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_2_5s.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_2s.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_3.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_3s.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_4s.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_5.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_5s.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_or.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rl_double_ql.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rl_ql.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rl_sarsa.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rr.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rt.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_gui.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_itr.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_sct.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_shc.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_tabu.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_vns.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/sa.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/som.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/spfc_h.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/spfc_m.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/spfc_s.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/swp.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/tat.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/tbb.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/zs.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/utils/__init__.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/utils/graphs.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pyCombinatorial/utils/util.py +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/dependency_links.txt +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/requires.txt +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/top_level.txt +0 -0
- {pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pycombinatorial
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.8
|
|
4
4
|
Summary: A library to solve TSP (Travelling Salesman Problem) using Exact Algorithms, Heuristics, Metaheuristics and Reinforcement Learning
|
|
5
5
|
Home-page: https://github.com/Valdecy/pyCombinatorial
|
|
6
6
|
Author: Valdecy Pereira
|
|
@@ -15,7 +15,7 @@ License-File: LICENSE
|
|
|
15
15
|
|
|
16
16
|
**pyCombinatorial** is a Python-based library designed to tackle the classic Travelling Salesman Problem (TSP) through a diverse set of **Exact Algorithms**, **Heuristics**, **Metaheuristics** and **Reinforcement Learning**. It brings together both well-established and cutting-edge methodologies, offering end-users a flexible toolkit to generate high-quality solutions for TSP instances of various sizes and complexities.
|
|
17
17
|
|
|
18
|
-
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
18
|
+
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **Randomized Spectral Seriation**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
19
19
|
|
|
20
20
|
## Usage
|
|
21
21
|
|
|
@@ -111,6 +111,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
111
111
|
- Nearest Neighbour ([ Colab Demo ](https://colab.research.google.com/drive/1aL1kYXgSjUJYPfYSMy_0SWq4hJ3nrueJ?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/S0166-218X(01)00195-0))
|
|
112
112
|
- Random Insertion ([ Colab Demo ](https://colab.research.google.com/drive/1RP_grqrTXyDkHOLB_L1H8TkvxdLli5hG?usp=sharing)) ( [ Paper ](https://disco.ethz.ch/courses/fs16/podc/readingAssignment/1.pdf))
|
|
113
113
|
- Random Tour ([ Colab Demo ](https://colab.research.google.com/drive/1DPXMJXInkGKTyVFDAQ2bKXjglhy3DaCS?usp=sharing)) ( [ Paper ](https://doi.org/10.1023/A:1011263204536))
|
|
114
|
+
- Randomized Spectral Seriation ([ Colab Demo ](https://colab.research.google.com/drive/1PTtO6HJfftsFZEScCYZvzpBJsgMyjBCY?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
114
115
|
- RL Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1dnZhLAzQdz9kzxKrVcwMECWbyEKkZ7St?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
115
116
|
- RL Double Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1VTv8A6Ac-LvBxsereFyGRfkiLRbJI547?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
116
117
|
- RL S.A.R.S.A ([ Colab Demo ](https://colab.research.google.com/drive/1q9hon3jFf8xVCw4idxhu7goLREKbQ6N3?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
@@ -121,7 +122,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
121
122
|
- Space Filling Curve (Hilbert) ([ Colab Demo ](https://colab.research.google.com/drive/1FXzWrUBjdbJBngRFHv66CZw5pFN3yOs8?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/0960-0779(95)80046-J))
|
|
122
123
|
- Space Filling Curve (Morton) ([ Colab Demo ](https://colab.research.google.com/drive/1Z13kXyi7eaNQbBUmhvwuQjY4VaUfGVbs?usp=sharing)) ( [ Paper ](https://dominoweb.draco.res.ibm.com/reports/Morton1966.pdf))
|
|
123
124
|
- Space Filling Curve (Sierpinski) ([ Colab Demo ](https://colab.research.google.com/drive/1w-Zptd5kOryCwvQ0qSNBNhPXC61c8QXF?usp=sharing)) ( [ Paper ](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.67.9061&rep=rep1&type=pdf))
|
|
124
|
-
- Spectral Seriation Initializer ([ Colab Demo ](https://colab.research.google.com/drive/1lG0pYxASU75qh0jK-A_eMCoPpCWv0I4V?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/
|
|
125
|
+
- Spectral Seriation Initializer ([ Colab Demo ](https://colab.research.google.com/drive/1lG0pYxASU75qh0jK-A_eMCoPpCWv0I4V?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
125
126
|
- Stochastic Hill Climbing ([ Colab Demo ](https://colab.research.google.com/drive/1_wP6vg4JoRHGItGxEtXcf9Y9OuuoDlDl?usp=sharing)) ( [ Paper ](http://aima.cs.berkeley.edu/))
|
|
126
127
|
- Sweep ([ Colab Demo ](https://colab.research.google.com/drive/1AkAn4yeomAp6POBslk3Asd6OrxfBrHT7?usp=sharing)) ( [ Paper ](http://dx.doi.org/10.1287/opre.22.2.340))
|
|
127
128
|
- Tabu Search ([ Colab Demo ](https://colab.research.google.com/drive/1SRwQrBaxkKk18SDvQPy--0yNRWdl6Y1G?usp=sharing)) ( [ Paper ](https://doi.org/10.1287/ijoc.1.3.190))
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
**pyCombinatorial** is a Python-based library designed to tackle the classic Travelling Salesman Problem (TSP) through a diverse set of **Exact Algorithms**, **Heuristics**, **Metaheuristics** and **Reinforcement Learning**. It brings together both well-established and cutting-edge methodologies, offering end-users a flexible toolkit to generate high-quality solutions for TSP instances of various sizes and complexities.
|
|
6
6
|
|
|
7
|
-
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
7
|
+
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **Randomized Spectral Seriation**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
8
8
|
|
|
9
9
|
## Usage
|
|
10
10
|
|
|
@@ -100,6 +100,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
100
100
|
- Nearest Neighbour ([ Colab Demo ](https://colab.research.google.com/drive/1aL1kYXgSjUJYPfYSMy_0SWq4hJ3nrueJ?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/S0166-218X(01)00195-0))
|
|
101
101
|
- Random Insertion ([ Colab Demo ](https://colab.research.google.com/drive/1RP_grqrTXyDkHOLB_L1H8TkvxdLli5hG?usp=sharing)) ( [ Paper ](https://disco.ethz.ch/courses/fs16/podc/readingAssignment/1.pdf))
|
|
102
102
|
- Random Tour ([ Colab Demo ](https://colab.research.google.com/drive/1DPXMJXInkGKTyVFDAQ2bKXjglhy3DaCS?usp=sharing)) ( [ Paper ](https://doi.org/10.1023/A:1011263204536))
|
|
103
|
+
- Randomized Spectral Seriation ([ Colab Demo ](https://colab.research.google.com/drive/1PTtO6HJfftsFZEScCYZvzpBJsgMyjBCY?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
103
104
|
- RL Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1dnZhLAzQdz9kzxKrVcwMECWbyEKkZ7St?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
104
105
|
- RL Double Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1VTv8A6Ac-LvBxsereFyGRfkiLRbJI547?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
105
106
|
- RL S.A.R.S.A ([ Colab Demo ](https://colab.research.google.com/drive/1q9hon3jFf8xVCw4idxhu7goLREKbQ6N3?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
@@ -110,7 +111,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
110
111
|
- Space Filling Curve (Hilbert) ([ Colab Demo ](https://colab.research.google.com/drive/1FXzWrUBjdbJBngRFHv66CZw5pFN3yOs8?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/0960-0779(95)80046-J))
|
|
111
112
|
- Space Filling Curve (Morton) ([ Colab Demo ](https://colab.research.google.com/drive/1Z13kXyi7eaNQbBUmhvwuQjY4VaUfGVbs?usp=sharing)) ( [ Paper ](https://dominoweb.draco.res.ibm.com/reports/Morton1966.pdf))
|
|
112
113
|
- Space Filling Curve (Sierpinski) ([ Colab Demo ](https://colab.research.google.com/drive/1w-Zptd5kOryCwvQ0qSNBNhPXC61c8QXF?usp=sharing)) ( [ Paper ](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.67.9061&rep=rep1&type=pdf))
|
|
113
|
-
- Spectral Seriation Initializer ([ Colab Demo ](https://colab.research.google.com/drive/1lG0pYxASU75qh0jK-A_eMCoPpCWv0I4V?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/
|
|
114
|
+
- Spectral Seriation Initializer ([ Colab Demo ](https://colab.research.google.com/drive/1lG0pYxASU75qh0jK-A_eMCoPpCWv0I4V?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
114
115
|
- Stochastic Hill Climbing ([ Colab Demo ](https://colab.research.google.com/drive/1_wP6vg4JoRHGItGxEtXcf9Y9OuuoDlDl?usp=sharing)) ( [ Paper ](http://aima.cs.berkeley.edu/))
|
|
115
116
|
- Sweep ([ Colab Demo ](https://colab.research.google.com/drive/1AkAn4yeomAp6POBslk3Asd6OrxfBrHT7?usp=sharing)) ( [ Paper ](http://dx.doi.org/10.1287/opre.22.2.340))
|
|
116
117
|
- Tabu Search ([ Colab Demo ](https://colab.research.google.com/drive/1SRwQrBaxkKk18SDvQPy--0yNRWdl6Y1G?usp=sharing)) ( [ Paper ](https://doi.org/10.1287/ijoc.1.3.190))
|
|
@@ -39,6 +39,7 @@ from .rl_double_ql import double_q_learning
|
|
|
39
39
|
from .rl_ql import q_learning
|
|
40
40
|
from .rl_sarsa import sarsa
|
|
41
41
|
from .rr import ruin_and_recreate
|
|
42
|
+
from .rss import randomized_spectral_seriation
|
|
42
43
|
from .rt import random_tour
|
|
43
44
|
from .s_gui import guided_search
|
|
44
45
|
from .s_itr import iterated_search
|
|
@@ -62,41 +62,52 @@ def update_thau(distance_matrix, thau, city_list):
|
|
|
62
62
|
return thau
|
|
63
63
|
|
|
64
64
|
# Function: Generate Ant Paths
|
|
65
|
-
def ants_path(distance_matrix, h, thau, alpha, beta, full_list, ants, local_search):
|
|
66
|
-
best_path_distance = float(
|
|
65
|
+
def ants_path(distance_matrix, h, thau, alpha, beta, full_list, ants, local_search, deposit_mode = 'all'):
|
|
66
|
+
best_path_distance = float("inf")
|
|
67
67
|
best_city_list = None
|
|
68
|
+
all_tours = []
|
|
68
69
|
for _ in range(0, ants):
|
|
69
70
|
city_list = [np.random.choice(full_list)]
|
|
70
|
-
while
|
|
71
|
-
current_city
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
71
|
+
while len(city_list) < len(full_list):
|
|
72
|
+
current_city = city_list[-1]
|
|
73
|
+
candidates = [c for c in full_list if c not in city_list]
|
|
74
|
+
weights = []
|
|
75
|
+
for nxt in candidates:
|
|
76
|
+
tau = thau[current_city - 1, nxt - 1]
|
|
77
|
+
eta = h[current_city - 1, nxt - 1]
|
|
78
|
+
w = (tau ** alpha) * (eta ** beta)
|
|
79
|
+
weights.append(w)
|
|
80
|
+
weights = np.array(weights, dtype = float)
|
|
81
|
+
s = weights.sum()
|
|
82
|
+
if s <= 0 or not np.isfinite(s):
|
|
83
|
+
next_city = np.random.choice(candidates)
|
|
84
|
+
else:
|
|
85
|
+
probs = weights / s
|
|
86
|
+
next_city = np.random.choice(candidates, p = probs)
|
|
81
87
|
city_list.append(next_city)
|
|
82
88
|
path_distance = calculate_distance(distance_matrix, city_list)
|
|
83
|
-
if
|
|
84
|
-
best_city_list = copy.deepcopy(city_list)
|
|
89
|
+
if path_distance < best_path_distance:
|
|
85
90
|
best_path_distance = path_distance
|
|
86
|
-
|
|
87
|
-
|
|
91
|
+
best_city_list = copy.deepcopy(city_list)
|
|
92
|
+
if deposit_mode == "all":
|
|
93
|
+
all_tours.append((city_list, path_distance))
|
|
94
|
+
if local_search:
|
|
88
95
|
best_city_list, best_path_distance = local_search_2_opt(distance_matrix, city_tour = [best_city_list, best_path_distance])
|
|
89
|
-
|
|
96
|
+
if deposit_mode == "all":
|
|
97
|
+
for tour, dist in all_tours:
|
|
98
|
+
thau = update_thau(distance_matrix, thau, city_list = tour)
|
|
99
|
+
else:
|
|
100
|
+
thau = update_thau(distance_matrix, thau, city_list = best_city_list)
|
|
90
101
|
return best_city_list, best_path_distance, thau
|
|
91
102
|
|
|
92
103
|
############################################################################
|
|
93
104
|
|
|
94
105
|
# ACO Function
|
|
95
|
-
def ant_colony_optimization(distance_matrix, ants =
|
|
106
|
+
def ant_colony_optimization(distance_matrix, ants = 15, iterations = 100, alpha = 1, beta = 2, decay = 0.05, local_search = True, verbose = True):
|
|
96
107
|
count = 0
|
|
97
108
|
best_route = []
|
|
98
109
|
full_list = list(range(1, distance_matrix.shape[0] + 1))
|
|
99
|
-
distance =
|
|
110
|
+
distance = float('inf')
|
|
100
111
|
h = attractiveness(distance_matrix)
|
|
101
112
|
thau = np.ones((distance_matrix.shape[0], distance_matrix.shape[0]))
|
|
102
113
|
while (count <= iterations):
|
|
@@ -80,10 +80,12 @@ def local_search_2_opt(distance_matrix, city_tour, recursive_seeding = -1, verbo
|
|
|
80
80
|
# Function: Removal
|
|
81
81
|
def removal_operators():
|
|
82
82
|
def random_removal(city_tour, num_removals):
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
83
|
+
available = city_tour[1:]
|
|
84
|
+
if (len(available) == 0):
|
|
85
|
+
return []
|
|
86
|
+
k = int(num_removals)
|
|
87
|
+
k = max(1, min(k, len(available)))
|
|
88
|
+
return random.sample(available, k)
|
|
87
89
|
return [random_removal]
|
|
88
90
|
|
|
89
91
|
# Function: Insertion
|
|
@@ -104,7 +106,7 @@ def insertion_operators():
|
|
|
104
106
|
############################################################################
|
|
105
107
|
|
|
106
108
|
# Function: Adaptive Large Neighborhood Search
|
|
107
|
-
def adaptive_large_neighborhood_search(distance_matrix, iterations = 100, removal_fraction = 0.2, rho = 0.1, local_search = True, verbose = True):
|
|
109
|
+
def adaptive_large_neighborhood_search(distance_matrix, iterations = 100, removal_fraction = 0.2, rho = 0.1, local_search = True, verbose = True, sa = True, t0 = None, cooling = 0.995):
|
|
108
110
|
initial_tour = list(range(0, distance_matrix.shape[0]))
|
|
109
111
|
random.shuffle(initial_tour)
|
|
110
112
|
route = initial_tour.copy()
|
|
@@ -113,20 +115,29 @@ def adaptive_large_neighborhood_search(distance_matrix, iterations = 100, remova
|
|
|
113
115
|
insertion_ops = insertion_operators()
|
|
114
116
|
weights_removal = [1.0] * len(removal_ops)
|
|
115
117
|
weights_insertion = [1.0] * len(insertion_ops)
|
|
118
|
+
rho = max(0.0, min(float(rho), 0.99))
|
|
119
|
+
temp = float(distance) if (t0 is None) else float(t0)
|
|
120
|
+
temp = max(temp, 1e-12)
|
|
116
121
|
count = 0
|
|
117
|
-
while (count
|
|
122
|
+
while (count < iterations):
|
|
118
123
|
if (verbose == True and count > 0):
|
|
119
124
|
print('Iteration = ', count, 'Distance = ', round(distance, 2))
|
|
120
125
|
city_tour = route.copy()
|
|
121
126
|
removal_op = random.choices(removal_ops, weights = weights_removal)[0]
|
|
122
127
|
insertion_op = random.choices(insertion_ops, weights = weights_insertion)[0]
|
|
123
128
|
num_removals = int(removal_fraction * distance_matrix.shape[0])
|
|
129
|
+
num_removals = max(1, num_removals)
|
|
130
|
+
num_removals = min(num_removals, max(1, len(city_tour) - 1))
|
|
124
131
|
removed_nodes = removal_op(city_tour, num_removals)
|
|
125
132
|
for node in removed_nodes:
|
|
126
133
|
city_tour.remove(node)
|
|
127
134
|
new_tour = insertion_op(removed_nodes, city_tour, distance_matrix)
|
|
128
135
|
new_tour_distance = distance_point(distance_matrix, new_tour)
|
|
129
|
-
|
|
136
|
+
delta = new_tour_distance - distance
|
|
137
|
+
accept = (delta < 0)
|
|
138
|
+
if (sa == True and accept == False):
|
|
139
|
+
accept = (random.random() < np.exp(-delta / temp))
|
|
140
|
+
if (accept == True):
|
|
130
141
|
route = new_tour
|
|
131
142
|
distance = new_tour_distance
|
|
132
143
|
weights_removal[removal_ops.index(removal_op)] = weights_removal[removal_ops.index(removal_op)] * (1 + rho)
|
|
@@ -138,6 +149,8 @@ def adaptive_large_neighborhood_search(distance_matrix, iterations = 100, remova
|
|
|
138
149
|
total_weight_insertion = sum(weights_insertion)
|
|
139
150
|
weights_removal = [w / total_weight_removal for w in weights_removal]
|
|
140
151
|
weights_insertion = [w / total_weight_insertion for w in weights_insertion]
|
|
152
|
+
if (sa == True):
|
|
153
|
+
temp = max(temp * cooling, 1e-12)
|
|
141
154
|
count = count + 1
|
|
142
155
|
route = route + [route[0]]
|
|
143
156
|
route = [item + 1 for item in route]
|
|
@@ -145,7 +145,7 @@ def local_search_4_opt(distance_matrix, city_tour, recursive_seeding = -1, verbo
|
|
|
145
145
|
[a + c + b + d],
|
|
146
146
|
[a + c + d + b],
|
|
147
147
|
|
|
148
|
-
]
|
|
148
|
+
]
|
|
149
149
|
for item in trial:
|
|
150
150
|
best_route_1[0] = item[0]
|
|
151
151
|
best_route_1[1] = distance_calc(distance_matrix, [best_route_1[0] + [best_route_1[0][0]], 1])
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
############################################################################
|
|
2
|
+
|
|
3
|
+
# Created by: Prof. Valdecy Pereira, D.Sc.
|
|
4
|
+
# UFF - Universidade Federal Fluminense (Brazil)
|
|
5
|
+
# email: valdecy.pereira@gmail.com
|
|
6
|
+
# Lesson: pyCombinatorial - RSS (Randomized Spectral Seriation)
|
|
7
|
+
|
|
8
|
+
# GitHub Repository: <https://github.com/Valdecy>
|
|
9
|
+
|
|
10
|
+
############################################################################
|
|
11
|
+
|
|
12
|
+
# Required Libraries
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from numba import njit
|
|
16
|
+
from scipy.sparse import coo_matrix, diags
|
|
17
|
+
from scipy.sparse.linalg import eigsh, ArpackNoConvergence
|
|
18
|
+
|
|
19
|
+
############################################################################
|
|
20
|
+
|
|
21
|
+
# Numba Functions
|
|
22
|
+
@njit(fastmath = True, cache = True)
|
|
23
|
+
def tour_length(tour, D):
|
|
24
|
+
n = len(tour)
|
|
25
|
+
L = 0.0
|
|
26
|
+
for i in range(n - 1):
|
|
27
|
+
L = L + D[tour[i], tour[i + 1]]
|
|
28
|
+
L = L + D[tour[n - 1], tour[0]]
|
|
29
|
+
return L
|
|
30
|
+
|
|
31
|
+
@njit(fastmath = True, cache = True)
|
|
32
|
+
def _reverse_segment_inplace(tour, pos, p1, p2, n):
|
|
33
|
+
if p1 <= p2:
|
|
34
|
+
while p1 < p2:
|
|
35
|
+
tmp = tour[p1]
|
|
36
|
+
tour[p1] = tour[p2]
|
|
37
|
+
tour[p2] = tmp
|
|
38
|
+
pos[tour[p1]] = p1
|
|
39
|
+
pos[tour[p2]] = p2
|
|
40
|
+
p1 = p1 + 1
|
|
41
|
+
p2 = p2 - 1
|
|
42
|
+
else:
|
|
43
|
+
seg_len = (p2 - p1 + n) % n + 1
|
|
44
|
+
seg = np.empty(seg_len, dtype=np.int32)
|
|
45
|
+
for s in range(seg_len):
|
|
46
|
+
seg[s] = tour[(p1 + s) % n]
|
|
47
|
+
for s in range(seg_len):
|
|
48
|
+
ni = (p1 + s) % n
|
|
49
|
+
tour[ni] = seg[seg_len - 1 - s]
|
|
50
|
+
pos[tour[ni]] = ni
|
|
51
|
+
|
|
52
|
+
@njit(fastmath = True, cache = True)
|
|
53
|
+
def two_opt_candidates_restart(tour, D, candidates, max_passes):
|
|
54
|
+
n = len(tour)
|
|
55
|
+
pos = np.empty(n, dtype = np.int32)
|
|
56
|
+
for i in range(n):
|
|
57
|
+
pos[tour[i]] = i
|
|
58
|
+
passes = 0
|
|
59
|
+
improved_any = True
|
|
60
|
+
while improved_any and passes < max_passes:
|
|
61
|
+
passes = passes + 1
|
|
62
|
+
improved_any = False
|
|
63
|
+
for i in range(0, n):
|
|
64
|
+
a = tour[i]
|
|
65
|
+
b = tour[(i + 1) % n]
|
|
66
|
+
dab = D[a, b]
|
|
67
|
+
improved_here = False
|
|
68
|
+
for which_end in range(2):
|
|
69
|
+
x = a if which_end == 0 else b
|
|
70
|
+
for ci in range(candidates.shape[1]):
|
|
71
|
+
c = candidates[x, ci]
|
|
72
|
+
if D[x, c] >= dab:
|
|
73
|
+
break
|
|
74
|
+
j = pos[c]
|
|
75
|
+
d = tour[(j + 1) % n]
|
|
76
|
+
if d == a or d == b:
|
|
77
|
+
continue
|
|
78
|
+
gain = dab + D[c, d] - D[a, c] - D[b, d]
|
|
79
|
+
if gain > 1e-12:
|
|
80
|
+
p1 = (i + 1) % n
|
|
81
|
+
p2 = j
|
|
82
|
+
_reverse_segment_inplace(tour, pos, p1, p2, n)
|
|
83
|
+
improved_any = True
|
|
84
|
+
improved_here = True
|
|
85
|
+
break
|
|
86
|
+
if improved_here:
|
|
87
|
+
break
|
|
88
|
+
return tour
|
|
89
|
+
|
|
90
|
+
############################################################################
|
|
91
|
+
|
|
92
|
+
# Function: KNN
|
|
93
|
+
def knn_indices_fast(D, k):
|
|
94
|
+
n = D.shape[0]
|
|
95
|
+
k = int(min(k, n - 1))
|
|
96
|
+
pool = min(n, k + max(16, k // 2))
|
|
97
|
+
part = np.argpartition(D, kth = pool - 1, axis = 1)[:, :pool]
|
|
98
|
+
rows = np.arange(n)[:, None]
|
|
99
|
+
dvals = D[rows, part]
|
|
100
|
+
order = np.argsort(dvals, axis = 1)
|
|
101
|
+
part_sorted = part[rows, order]
|
|
102
|
+
nbrs = np.empty((n, k), dtype = np.int32)
|
|
103
|
+
for i in range(0, n):
|
|
104
|
+
out = []
|
|
105
|
+
for v in part_sorted[i]:
|
|
106
|
+
v = int(v)
|
|
107
|
+
if v != i:
|
|
108
|
+
out.append(v)
|
|
109
|
+
if len(out) == k:
|
|
110
|
+
break
|
|
111
|
+
if len(out) < k:
|
|
112
|
+
pool2 = pool
|
|
113
|
+
while len(out) < k:
|
|
114
|
+
if pool2 >= n:
|
|
115
|
+
break
|
|
116
|
+
pool2 = min(n, max(pool2 + 32, int(pool2 * 1.5), k + 16))
|
|
117
|
+
part2 = np.argpartition(D[i], kth = pool2 - 1)[:pool2]
|
|
118
|
+
part2 = part2[np.argsort(D[i, part2])]
|
|
119
|
+
out = []
|
|
120
|
+
for v in part2:
|
|
121
|
+
v = int(v)
|
|
122
|
+
if v != i:
|
|
123
|
+
out.append(v)
|
|
124
|
+
if len(out) == k:
|
|
125
|
+
break
|
|
126
|
+
nbrs[i, :] = np.array(out[:k], dtype = np.int32)
|
|
127
|
+
return nbrs
|
|
128
|
+
|
|
129
|
+
# Function: Affinity
|
|
130
|
+
def build_affinity_sparse(D, k, sigma_mode, sigma_fixed):
|
|
131
|
+
n = D.shape[0]
|
|
132
|
+
nbrs = knn_indices_fast(D.astype(float), k)
|
|
133
|
+
if sigma_mode == 'adaptive':
|
|
134
|
+
sig = D[np.arange(n), nbrs[:, -1]].astype(float) + 1e-12
|
|
135
|
+
else:
|
|
136
|
+
sig = np.full(n, sigma_fixed, dtype = float)
|
|
137
|
+
rows, cols, vals = [], [], []
|
|
138
|
+
for i in range(n):
|
|
139
|
+
si = sig[i]
|
|
140
|
+
for j in nbrs[i]:
|
|
141
|
+
sj = sig[j]
|
|
142
|
+
dij = float(D[i, j])
|
|
143
|
+
if sigma_mode == 'adaptive':
|
|
144
|
+
w = np.exp(-(dij * dij) / (si * sj + 1e-12))
|
|
145
|
+
else:
|
|
146
|
+
w = np.exp(-(dij * dij) / (2.0 * sigma_fixed * sigma_fixed + 1e-12))
|
|
147
|
+
rows.append(i); cols.append(j); vals.append(w)
|
|
148
|
+
W = coo_matrix((vals, (rows, cols)), shape = (n, n)).tocsr()
|
|
149
|
+
W = (W + W.T).tocsr()
|
|
150
|
+
W = W.tolil()
|
|
151
|
+
W.setdiag(0.0)
|
|
152
|
+
W = W.tocsr()
|
|
153
|
+
W.eliminate_zeros()
|
|
154
|
+
return W
|
|
155
|
+
|
|
156
|
+
# Function: W
|
|
157
|
+
def laplacian_from_W(W):
|
|
158
|
+
d = np.array(W.sum(axis = 1)).reshape(-1)
|
|
159
|
+
return (diags(d) - W).tocsr()
|
|
160
|
+
|
|
161
|
+
# Function: SE
|
|
162
|
+
def spectral_embedding(L, num_vecs = 3, tol = 1e-6, maxiter = 5000):
|
|
163
|
+
n = L.shape[0]
|
|
164
|
+
if n <= 2:
|
|
165
|
+
return np.zeros((n, 1), dtype = float)
|
|
166
|
+
num_vecs = int(max(1, min(num_vecs, n - 1)))
|
|
167
|
+
k_req = min(num_vecs + 3, n - 1)
|
|
168
|
+
k_req = max(k_req, 2)
|
|
169
|
+
try:
|
|
170
|
+
vals, vecs = eigsh(L, k = k_req, which = 'SM', tol = tol, maxiter = maxiter)
|
|
171
|
+
except ArpackNoConvergence as e:
|
|
172
|
+
if e.eigenvectors is not None and e.eigenvalues is not None:
|
|
173
|
+
vals, vecs = e.eigenvalues, e.eigenvectors
|
|
174
|
+
else:
|
|
175
|
+
k_req2 = max(2, min(k_req - 1, n - 1))
|
|
176
|
+
vals, vecs = eigsh(L, k = k_req2, which = 'SM', tol = tol, maxiter = maxiter)
|
|
177
|
+
idx = np.argsort(vals)
|
|
178
|
+
vals = vals[idx]
|
|
179
|
+
vecs = vecs[:, idx]
|
|
180
|
+
eps = 1e-10
|
|
181
|
+
j = 0
|
|
182
|
+
while j < len(vals) and vals[j] < eps:
|
|
183
|
+
j = j + 1
|
|
184
|
+
if j >= len(vals):
|
|
185
|
+
j = 1
|
|
186
|
+
end = min(j + num_vecs, vecs.shape[1])
|
|
187
|
+
if end <= j:
|
|
188
|
+
end = min(j + 1, vecs.shape[1])
|
|
189
|
+
return vecs[:, j:end].copy()
|
|
190
|
+
|
|
191
|
+
############################################################################
|
|
192
|
+
|
|
193
|
+
# RSS
|
|
194
|
+
def randomized_spectral_seriation(D, k = 12, iterations = 800, sigma_noise = 0.006, sigma_mode = 'adaptive', sigma_fixed = 250.0, two_opt_passes = 30, num_vecs = 3, cand_k = 35, scale_noise_by_std = True, noise_cap_frac = 0.10, rnd = 7, verbose = True):
|
|
195
|
+
D = np.asarray(D, dtype = np.float64)
|
|
196
|
+
n = D.shape[0]
|
|
197
|
+
rng = np.random.default_rng(rnd)
|
|
198
|
+
if n < 3:
|
|
199
|
+
tour = list(range(1, n + 1))
|
|
200
|
+
if n > 0:
|
|
201
|
+
tour.append(tour[0])
|
|
202
|
+
return tour, 0.0
|
|
203
|
+
k = int(max(1, min(k, n - 1)))
|
|
204
|
+
cand_k = int(max(1, min(cand_k, n - 1)))
|
|
205
|
+
W = build_affinity_sparse(D, k, sigma_mode, sigma_fixed)
|
|
206
|
+
L = laplacian_from_W(W)
|
|
207
|
+
embedding = spectral_embedding(L, num_vecs = num_vecs, tol = 1e-6, maxiter = 5000)
|
|
208
|
+
m = embedding.shape[1]
|
|
209
|
+
if m <= 0:
|
|
210
|
+
embedding = np.zeros((n, 1), dtype = float)
|
|
211
|
+
m = 1
|
|
212
|
+
candidates = knn_indices_fast(D, cand_k)
|
|
213
|
+
best_L = float("inf")
|
|
214
|
+
best_tour = None
|
|
215
|
+
for it in range(0, iterations):
|
|
216
|
+
coeffs = rng.normal(0.0, 1.0, size = m)
|
|
217
|
+
if m > 0:
|
|
218
|
+
coeffs[0] = 3.0 * coeffs[0]
|
|
219
|
+
coeffs = coeffs/(np.linalg.norm(coeffs) + 1e-12)
|
|
220
|
+
x_proj = embedding @ coeffs
|
|
221
|
+
if scale_noise_by_std:
|
|
222
|
+
s = float(np.std(x_proj) + 1e-12)
|
|
223
|
+
eff = sigma_noise * s
|
|
224
|
+
cap = noise_cap_frac * float((np.max(x_proj) - np.min(x_proj)) + 1e-12)
|
|
225
|
+
eff = min(eff, cap)
|
|
226
|
+
noise = rng.normal(0.0, eff, size = n)
|
|
227
|
+
else:
|
|
228
|
+
noise = rng.normal(0.0, sigma_noise, size = n)
|
|
229
|
+
x_noisy = x_proj + noise
|
|
230
|
+
candidate = np.argsort(x_noisy).astype(np.int32)
|
|
231
|
+
if rng.random() < 0.5:
|
|
232
|
+
candidate = candidate[::-1].copy()
|
|
233
|
+
shift = int(rng.integers(0, n))
|
|
234
|
+
candidate = np.roll(candidate, shift).astype(np.int32)
|
|
235
|
+
candidate = two_opt_candidates_restart(candidate, D, candidates, max(two_opt_passes, 2))
|
|
236
|
+
Lc = tour_length(candidate, D)
|
|
237
|
+
if Lc < best_L:
|
|
238
|
+
best_L = Lc
|
|
239
|
+
best_tour = candidate.copy()
|
|
240
|
+
if verbose:
|
|
241
|
+
print("Iteration =", it, "; Distance =", best_L)
|
|
242
|
+
best_out = (best_tour + 1).tolist()
|
|
243
|
+
best_out.append(best_out[0])
|
|
244
|
+
return best_out, float(best_L)
|
|
245
|
+
|
|
246
|
+
############################################################################
|
|
@@ -61,7 +61,7 @@ def two_opt(tour, D, max_passes):
|
|
|
61
61
|
|
|
62
62
|
# Function: KNN
|
|
63
63
|
def knn_indices(D, k):
|
|
64
|
-
return np.argsort(D, axis=1)[:, 1 : k + 1]
|
|
64
|
+
return np.argsort(D, axis = 1)[:, 1 : k + 1]
|
|
65
65
|
|
|
66
66
|
# Function: Affinity
|
|
67
67
|
def build_affinity_sparse(D, k, sigma_mode, sigma_fixed):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pycombinatorial
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.8
|
|
4
4
|
Summary: A library to solve TSP (Travelling Salesman Problem) using Exact Algorithms, Heuristics, Metaheuristics and Reinforcement Learning
|
|
5
5
|
Home-page: https://github.com/Valdecy/pyCombinatorial
|
|
6
6
|
Author: Valdecy Pereira
|
|
@@ -15,7 +15,7 @@ License-File: LICENSE
|
|
|
15
15
|
|
|
16
16
|
**pyCombinatorial** is a Python-based library designed to tackle the classic Travelling Salesman Problem (TSP) through a diverse set of **Exact Algorithms**, **Heuristics**, **Metaheuristics** and **Reinforcement Learning**. It brings together both well-established and cutting-edge methodologies, offering end-users a flexible toolkit to generate high-quality solutions for TSP instances of various sizes and complexities.
|
|
17
17
|
|
|
18
|
-
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
18
|
+
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **Randomized Spectral Seriation**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
19
19
|
|
|
20
20
|
## Usage
|
|
21
21
|
|
|
@@ -111,6 +111,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
111
111
|
- Nearest Neighbour ([ Colab Demo ](https://colab.research.google.com/drive/1aL1kYXgSjUJYPfYSMy_0SWq4hJ3nrueJ?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/S0166-218X(01)00195-0))
|
|
112
112
|
- Random Insertion ([ Colab Demo ](https://colab.research.google.com/drive/1RP_grqrTXyDkHOLB_L1H8TkvxdLli5hG?usp=sharing)) ( [ Paper ](https://disco.ethz.ch/courses/fs16/podc/readingAssignment/1.pdf))
|
|
113
113
|
- Random Tour ([ Colab Demo ](https://colab.research.google.com/drive/1DPXMJXInkGKTyVFDAQ2bKXjglhy3DaCS?usp=sharing)) ( [ Paper ](https://doi.org/10.1023/A:1011263204536))
|
|
114
|
+
- Randomized Spectral Seriation ([ Colab Demo ](https://colab.research.google.com/drive/1PTtO6HJfftsFZEScCYZvzpBJsgMyjBCY?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
114
115
|
- RL Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1dnZhLAzQdz9kzxKrVcwMECWbyEKkZ7St?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
115
116
|
- RL Double Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1VTv8A6Ac-LvBxsereFyGRfkiLRbJI547?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
116
117
|
- RL S.A.R.S.A ([ Colab Demo ](https://colab.research.google.com/drive/1q9hon3jFf8xVCw4idxhu7goLREKbQ6N3?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
@@ -121,7 +122,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
121
122
|
- Space Filling Curve (Hilbert) ([ Colab Demo ](https://colab.research.google.com/drive/1FXzWrUBjdbJBngRFHv66CZw5pFN3yOs8?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/0960-0779(95)80046-J))
|
|
122
123
|
- Space Filling Curve (Morton) ([ Colab Demo ](https://colab.research.google.com/drive/1Z13kXyi7eaNQbBUmhvwuQjY4VaUfGVbs?usp=sharing)) ( [ Paper ](https://dominoweb.draco.res.ibm.com/reports/Morton1966.pdf))
|
|
123
124
|
- Space Filling Curve (Sierpinski) ([ Colab Demo ](https://colab.research.google.com/drive/1w-Zptd5kOryCwvQ0qSNBNhPXC61c8QXF?usp=sharing)) ( [ Paper ](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.67.9061&rep=rep1&type=pdf))
|
|
124
|
-
- Spectral Seriation Initializer ([ Colab Demo ](https://colab.research.google.com/drive/1lG0pYxASU75qh0jK-A_eMCoPpCWv0I4V?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/
|
|
125
|
+
- Spectral Seriation Initializer ([ Colab Demo ](https://colab.research.google.com/drive/1lG0pYxASU75qh0jK-A_eMCoPpCWv0I4V?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
125
126
|
- Stochastic Hill Climbing ([ Colab Demo ](https://colab.research.google.com/drive/1_wP6vg4JoRHGItGxEtXcf9Y9OuuoDlDl?usp=sharing)) ( [ Paper ](http://aima.cs.berkeley.edu/))
|
|
126
127
|
- Sweep ([ Colab Demo ](https://colab.research.google.com/drive/1AkAn4yeomAp6POBslk3Asd6OrxfBrHT7?usp=sharing)) ( [ Paper ](http://dx.doi.org/10.1287/opre.22.2.340))
|
|
127
128
|
- Tabu Search ([ Colab Demo ](https://colab.research.google.com/drive/1SRwQrBaxkKk18SDvQPy--0yNRWdl6Y1G?usp=sharing)) ( [ Paper ](https://doi.org/10.1287/ijoc.1.3.190))
|
|
@@ -44,6 +44,7 @@ pyCombinatorial/algorithm/rl_double_ql.py
|
|
|
44
44
|
pyCombinatorial/algorithm/rl_ql.py
|
|
45
45
|
pyCombinatorial/algorithm/rl_sarsa.py
|
|
46
46
|
pyCombinatorial/algorithm/rr.py
|
|
47
|
+
pyCombinatorial/algorithm/rss.py
|
|
47
48
|
pyCombinatorial/algorithm/rt.py
|
|
48
49
|
pyCombinatorial/algorithm/s_gui.py
|
|
49
50
|
pyCombinatorial/algorithm/s_itr.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pycombinatorial-2.1.4 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|