pycombinatorial 2.1.7__tar.gz → 2.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/PKG-INFO +3 -2
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/README.md +2 -1
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/__init__.py +1 -0
- pycombinatorial-2.1.8/pyCombinatorial/algorithm/rss.py +246 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ssi.py +1 -1
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/PKG-INFO +3 -2
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/SOURCES.txt +1 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/setup.py +1 -1
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/LICENSE +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/__init__.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/aco.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/alns.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/bb.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/bf.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/bhk.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/brkga.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/bt.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/christofides.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/conc_hull.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/conv_hull.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/cw.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/eln.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/eo.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/frnn.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ga.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/gksp.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/grasp.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/hpn.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ins_c.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ins_f.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ins_n.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ins_r.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/ksp.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/lns.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/mf.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/nn.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_2.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_2_5.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_2_5s.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_2s.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_3.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_3s.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_4.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_4s.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_5.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_5s.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/opt_or.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rl_double_ql.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rl_ql.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rl_sarsa.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rr.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/rt.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_gui.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_itr.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_sct.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_shc.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_tabu.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/s_vns.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/sa.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/som.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/spfc_h.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/spfc_m.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/spfc_s.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/swp.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/tat.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/tbb.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/algorithm/zs.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/utils/__init__.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/utils/graphs.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pyCombinatorial/utils/util.py +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/dependency_links.txt +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/requires.txt +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/top_level.txt +0 -0
- {pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pycombinatorial
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.8
|
|
4
4
|
Summary: A library to solve TSP (Travelling Salesman Problem) using Exact Algorithms, Heuristics, Metaheuristics and Reinforcement Learning
|
|
5
5
|
Home-page: https://github.com/Valdecy/pyCombinatorial
|
|
6
6
|
Author: Valdecy Pereira
|
|
@@ -15,7 +15,7 @@ License-File: LICENSE
|
|
|
15
15
|
|
|
16
16
|
**pyCombinatorial** is a Python-based library designed to tackle the classic Travelling Salesman Problem (TSP) through a diverse set of **Exact Algorithms**, **Heuristics**, **Metaheuristics** and **Reinforcement Learning**. It brings together both well-established and cutting-edge methodologies, offering end-users a flexible toolkit to generate high-quality solutions for TSP instances of various sizes and complexities.
|
|
17
17
|
|
|
18
|
-
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
18
|
+
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **Randomized Spectral Seriation**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
19
19
|
|
|
20
20
|
## Usage
|
|
21
21
|
|
|
@@ -111,6 +111,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
111
111
|
- Nearest Neighbour ([ Colab Demo ](https://colab.research.google.com/drive/1aL1kYXgSjUJYPfYSMy_0SWq4hJ3nrueJ?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/S0166-218X(01)00195-0))
|
|
112
112
|
- Random Insertion ([ Colab Demo ](https://colab.research.google.com/drive/1RP_grqrTXyDkHOLB_L1H8TkvxdLli5hG?usp=sharing)) ( [ Paper ](https://disco.ethz.ch/courses/fs16/podc/readingAssignment/1.pdf))
|
|
113
113
|
- Random Tour ([ Colab Demo ](https://colab.research.google.com/drive/1DPXMJXInkGKTyVFDAQ2bKXjglhy3DaCS?usp=sharing)) ( [ Paper ](https://doi.org/10.1023/A:1011263204536))
|
|
114
|
+
- Randomized Spectral Seriation ([ Colab Demo ](https://colab.research.google.com/drive/1PTtO6HJfftsFZEScCYZvzpBJsgMyjBCY?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
114
115
|
- RL Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1dnZhLAzQdz9kzxKrVcwMECWbyEKkZ7St?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
115
116
|
- RL Double Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1VTv8A6Ac-LvBxsereFyGRfkiLRbJI547?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
116
117
|
- RL S.A.R.S.A ([ Colab Demo ](https://colab.research.google.com/drive/1q9hon3jFf8xVCw4idxhu7goLREKbQ6N3?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
**pyCombinatorial** is a Python-based library designed to tackle the classic Travelling Salesman Problem (TSP) through a diverse set of **Exact Algorithms**, **Heuristics**, **Metaheuristics** and **Reinforcement Learning**. It brings together both well-established and cutting-edge methodologies, offering end-users a flexible toolkit to generate high-quality solutions for TSP instances of various sizes and complexities.
|
|
6
6
|
|
|
7
|
-
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
7
|
+
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **Randomized Spectral Seriation**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
8
8
|
|
|
9
9
|
## Usage
|
|
10
10
|
|
|
@@ -100,6 +100,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
100
100
|
- Nearest Neighbour ([ Colab Demo ](https://colab.research.google.com/drive/1aL1kYXgSjUJYPfYSMy_0SWq4hJ3nrueJ?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/S0166-218X(01)00195-0))
|
|
101
101
|
- Random Insertion ([ Colab Demo ](https://colab.research.google.com/drive/1RP_grqrTXyDkHOLB_L1H8TkvxdLli5hG?usp=sharing)) ( [ Paper ](https://disco.ethz.ch/courses/fs16/podc/readingAssignment/1.pdf))
|
|
102
102
|
- Random Tour ([ Colab Demo ](https://colab.research.google.com/drive/1DPXMJXInkGKTyVFDAQ2bKXjglhy3DaCS?usp=sharing)) ( [ Paper ](https://doi.org/10.1023/A:1011263204536))
|
|
103
|
+
- Randomized Spectral Seriation ([ Colab Demo ](https://colab.research.google.com/drive/1PTtO6HJfftsFZEScCYZvzpBJsgMyjBCY?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
103
104
|
- RL Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1dnZhLAzQdz9kzxKrVcwMECWbyEKkZ7St?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
104
105
|
- RL Double Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1VTv8A6Ac-LvBxsereFyGRfkiLRbJI547?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
105
106
|
- RL S.A.R.S.A ([ Colab Demo ](https://colab.research.google.com/drive/1q9hon3jFf8xVCw4idxhu7goLREKbQ6N3?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
@@ -39,6 +39,7 @@ from .rl_double_ql import double_q_learning
|
|
|
39
39
|
from .rl_ql import q_learning
|
|
40
40
|
from .rl_sarsa import sarsa
|
|
41
41
|
from .rr import ruin_and_recreate
|
|
42
|
+
from .rss import randomized_spectral_seriation
|
|
42
43
|
from .rt import random_tour
|
|
43
44
|
from .s_gui import guided_search
|
|
44
45
|
from .s_itr import iterated_search
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
############################################################################
|
|
2
|
+
|
|
3
|
+
# Created by: Prof. Valdecy Pereira, D.Sc.
|
|
4
|
+
# UFF - Universidade Federal Fluminense (Brazil)
|
|
5
|
+
# email: valdecy.pereira@gmail.com
|
|
6
|
+
# Lesson: pyCombinatorial - RSS (Randomized Spectral Seriation)
|
|
7
|
+
|
|
8
|
+
# GitHub Repository: <https://github.com/Valdecy>
|
|
9
|
+
|
|
10
|
+
############################################################################
|
|
11
|
+
|
|
12
|
+
# Required Libraries
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
from numba import njit
|
|
16
|
+
from scipy.sparse import coo_matrix, diags
|
|
17
|
+
from scipy.sparse.linalg import eigsh, ArpackNoConvergence
|
|
18
|
+
|
|
19
|
+
############################################################################
|
|
20
|
+
|
|
21
|
+
# Numba Functions
|
|
22
|
+
@njit(fastmath = True, cache = True)
|
|
23
|
+
def tour_length(tour, D):
|
|
24
|
+
n = len(tour)
|
|
25
|
+
L = 0.0
|
|
26
|
+
for i in range(n - 1):
|
|
27
|
+
L = L + D[tour[i], tour[i + 1]]
|
|
28
|
+
L = L + D[tour[n - 1], tour[0]]
|
|
29
|
+
return L
|
|
30
|
+
|
|
31
|
+
@njit(fastmath = True, cache = True)
|
|
32
|
+
def _reverse_segment_inplace(tour, pos, p1, p2, n):
|
|
33
|
+
if p1 <= p2:
|
|
34
|
+
while p1 < p2:
|
|
35
|
+
tmp = tour[p1]
|
|
36
|
+
tour[p1] = tour[p2]
|
|
37
|
+
tour[p2] = tmp
|
|
38
|
+
pos[tour[p1]] = p1
|
|
39
|
+
pos[tour[p2]] = p2
|
|
40
|
+
p1 = p1 + 1
|
|
41
|
+
p2 = p2 - 1
|
|
42
|
+
else:
|
|
43
|
+
seg_len = (p2 - p1 + n) % n + 1
|
|
44
|
+
seg = np.empty(seg_len, dtype=np.int32)
|
|
45
|
+
for s in range(seg_len):
|
|
46
|
+
seg[s] = tour[(p1 + s) % n]
|
|
47
|
+
for s in range(seg_len):
|
|
48
|
+
ni = (p1 + s) % n
|
|
49
|
+
tour[ni] = seg[seg_len - 1 - s]
|
|
50
|
+
pos[tour[ni]] = ni
|
|
51
|
+
|
|
52
|
+
@njit(fastmath = True, cache = True)
|
|
53
|
+
def two_opt_candidates_restart(tour, D, candidates, max_passes):
|
|
54
|
+
n = len(tour)
|
|
55
|
+
pos = np.empty(n, dtype = np.int32)
|
|
56
|
+
for i in range(n):
|
|
57
|
+
pos[tour[i]] = i
|
|
58
|
+
passes = 0
|
|
59
|
+
improved_any = True
|
|
60
|
+
while improved_any and passes < max_passes:
|
|
61
|
+
passes = passes + 1
|
|
62
|
+
improved_any = False
|
|
63
|
+
for i in range(0, n):
|
|
64
|
+
a = tour[i]
|
|
65
|
+
b = tour[(i + 1) % n]
|
|
66
|
+
dab = D[a, b]
|
|
67
|
+
improved_here = False
|
|
68
|
+
for which_end in range(2):
|
|
69
|
+
x = a if which_end == 0 else b
|
|
70
|
+
for ci in range(candidates.shape[1]):
|
|
71
|
+
c = candidates[x, ci]
|
|
72
|
+
if D[x, c] >= dab:
|
|
73
|
+
break
|
|
74
|
+
j = pos[c]
|
|
75
|
+
d = tour[(j + 1) % n]
|
|
76
|
+
if d == a or d == b:
|
|
77
|
+
continue
|
|
78
|
+
gain = dab + D[c, d] - D[a, c] - D[b, d]
|
|
79
|
+
if gain > 1e-12:
|
|
80
|
+
p1 = (i + 1) % n
|
|
81
|
+
p2 = j
|
|
82
|
+
_reverse_segment_inplace(tour, pos, p1, p2, n)
|
|
83
|
+
improved_any = True
|
|
84
|
+
improved_here = True
|
|
85
|
+
break
|
|
86
|
+
if improved_here:
|
|
87
|
+
break
|
|
88
|
+
return tour
|
|
89
|
+
|
|
90
|
+
############################################################################
|
|
91
|
+
|
|
92
|
+
# Function: KNN
|
|
93
|
+
def knn_indices_fast(D, k):
|
|
94
|
+
n = D.shape[0]
|
|
95
|
+
k = int(min(k, n - 1))
|
|
96
|
+
pool = min(n, k + max(16, k // 2))
|
|
97
|
+
part = np.argpartition(D, kth = pool - 1, axis = 1)[:, :pool]
|
|
98
|
+
rows = np.arange(n)[:, None]
|
|
99
|
+
dvals = D[rows, part]
|
|
100
|
+
order = np.argsort(dvals, axis = 1)
|
|
101
|
+
part_sorted = part[rows, order]
|
|
102
|
+
nbrs = np.empty((n, k), dtype = np.int32)
|
|
103
|
+
for i in range(0, n):
|
|
104
|
+
out = []
|
|
105
|
+
for v in part_sorted[i]:
|
|
106
|
+
v = int(v)
|
|
107
|
+
if v != i:
|
|
108
|
+
out.append(v)
|
|
109
|
+
if len(out) == k:
|
|
110
|
+
break
|
|
111
|
+
if len(out) < k:
|
|
112
|
+
pool2 = pool
|
|
113
|
+
while len(out) < k:
|
|
114
|
+
if pool2 >= n:
|
|
115
|
+
break
|
|
116
|
+
pool2 = min(n, max(pool2 + 32, int(pool2 * 1.5), k + 16))
|
|
117
|
+
part2 = np.argpartition(D[i], kth = pool2 - 1)[:pool2]
|
|
118
|
+
part2 = part2[np.argsort(D[i, part2])]
|
|
119
|
+
out = []
|
|
120
|
+
for v in part2:
|
|
121
|
+
v = int(v)
|
|
122
|
+
if v != i:
|
|
123
|
+
out.append(v)
|
|
124
|
+
if len(out) == k:
|
|
125
|
+
break
|
|
126
|
+
nbrs[i, :] = np.array(out[:k], dtype = np.int32)
|
|
127
|
+
return nbrs
|
|
128
|
+
|
|
129
|
+
# Function: Affinity
|
|
130
|
+
def build_affinity_sparse(D, k, sigma_mode, sigma_fixed):
|
|
131
|
+
n = D.shape[0]
|
|
132
|
+
nbrs = knn_indices_fast(D.astype(float), k)
|
|
133
|
+
if sigma_mode == 'adaptive':
|
|
134
|
+
sig = D[np.arange(n), nbrs[:, -1]].astype(float) + 1e-12
|
|
135
|
+
else:
|
|
136
|
+
sig = np.full(n, sigma_fixed, dtype = float)
|
|
137
|
+
rows, cols, vals = [], [], []
|
|
138
|
+
for i in range(n):
|
|
139
|
+
si = sig[i]
|
|
140
|
+
for j in nbrs[i]:
|
|
141
|
+
sj = sig[j]
|
|
142
|
+
dij = float(D[i, j])
|
|
143
|
+
if sigma_mode == 'adaptive':
|
|
144
|
+
w = np.exp(-(dij * dij) / (si * sj + 1e-12))
|
|
145
|
+
else:
|
|
146
|
+
w = np.exp(-(dij * dij) / (2.0 * sigma_fixed * sigma_fixed + 1e-12))
|
|
147
|
+
rows.append(i); cols.append(j); vals.append(w)
|
|
148
|
+
W = coo_matrix((vals, (rows, cols)), shape = (n, n)).tocsr()
|
|
149
|
+
W = (W + W.T).tocsr()
|
|
150
|
+
W = W.tolil()
|
|
151
|
+
W.setdiag(0.0)
|
|
152
|
+
W = W.tocsr()
|
|
153
|
+
W.eliminate_zeros()
|
|
154
|
+
return W
|
|
155
|
+
|
|
156
|
+
# Function: W
|
|
157
|
+
def laplacian_from_W(W):
|
|
158
|
+
d = np.array(W.sum(axis = 1)).reshape(-1)
|
|
159
|
+
return (diags(d) - W).tocsr()
|
|
160
|
+
|
|
161
|
+
# Function: SE
|
|
162
|
+
def spectral_embedding(L, num_vecs = 3, tol = 1e-6, maxiter = 5000):
|
|
163
|
+
n = L.shape[0]
|
|
164
|
+
if n <= 2:
|
|
165
|
+
return np.zeros((n, 1), dtype = float)
|
|
166
|
+
num_vecs = int(max(1, min(num_vecs, n - 1)))
|
|
167
|
+
k_req = min(num_vecs + 3, n - 1)
|
|
168
|
+
k_req = max(k_req, 2)
|
|
169
|
+
try:
|
|
170
|
+
vals, vecs = eigsh(L, k = k_req, which = 'SM', tol = tol, maxiter = maxiter)
|
|
171
|
+
except ArpackNoConvergence as e:
|
|
172
|
+
if e.eigenvectors is not None and e.eigenvalues is not None:
|
|
173
|
+
vals, vecs = e.eigenvalues, e.eigenvectors
|
|
174
|
+
else:
|
|
175
|
+
k_req2 = max(2, min(k_req - 1, n - 1))
|
|
176
|
+
vals, vecs = eigsh(L, k = k_req2, which = 'SM', tol = tol, maxiter = maxiter)
|
|
177
|
+
idx = np.argsort(vals)
|
|
178
|
+
vals = vals[idx]
|
|
179
|
+
vecs = vecs[:, idx]
|
|
180
|
+
eps = 1e-10
|
|
181
|
+
j = 0
|
|
182
|
+
while j < len(vals) and vals[j] < eps:
|
|
183
|
+
j = j + 1
|
|
184
|
+
if j >= len(vals):
|
|
185
|
+
j = 1
|
|
186
|
+
end = min(j + num_vecs, vecs.shape[1])
|
|
187
|
+
if end <= j:
|
|
188
|
+
end = min(j + 1, vecs.shape[1])
|
|
189
|
+
return vecs[:, j:end].copy()
|
|
190
|
+
|
|
191
|
+
############################################################################
|
|
192
|
+
|
|
193
|
+
# RSS
|
|
194
|
+
def randomized_spectral_seriation(D, k = 12, iterations = 800, sigma_noise = 0.006, sigma_mode = 'adaptive', sigma_fixed = 250.0, two_opt_passes = 30, num_vecs = 3, cand_k = 35, scale_noise_by_std = True, noise_cap_frac = 0.10, rnd = 7, verbose = True):
|
|
195
|
+
D = np.asarray(D, dtype = np.float64)
|
|
196
|
+
n = D.shape[0]
|
|
197
|
+
rng = np.random.default_rng(rnd)
|
|
198
|
+
if n < 3:
|
|
199
|
+
tour = list(range(1, n + 1))
|
|
200
|
+
if n > 0:
|
|
201
|
+
tour.append(tour[0])
|
|
202
|
+
return tour, 0.0
|
|
203
|
+
k = int(max(1, min(k, n - 1)))
|
|
204
|
+
cand_k = int(max(1, min(cand_k, n - 1)))
|
|
205
|
+
W = build_affinity_sparse(D, k, sigma_mode, sigma_fixed)
|
|
206
|
+
L = laplacian_from_W(W)
|
|
207
|
+
embedding = spectral_embedding(L, num_vecs = num_vecs, tol = 1e-6, maxiter = 5000)
|
|
208
|
+
m = embedding.shape[1]
|
|
209
|
+
if m <= 0:
|
|
210
|
+
embedding = np.zeros((n, 1), dtype = float)
|
|
211
|
+
m = 1
|
|
212
|
+
candidates = knn_indices_fast(D, cand_k)
|
|
213
|
+
best_L = float("inf")
|
|
214
|
+
best_tour = None
|
|
215
|
+
for it in range(0, iterations):
|
|
216
|
+
coeffs = rng.normal(0.0, 1.0, size = m)
|
|
217
|
+
if m > 0:
|
|
218
|
+
coeffs[0] = 3.0 * coeffs[0]
|
|
219
|
+
coeffs = coeffs/(np.linalg.norm(coeffs) + 1e-12)
|
|
220
|
+
x_proj = embedding @ coeffs
|
|
221
|
+
if scale_noise_by_std:
|
|
222
|
+
s = float(np.std(x_proj) + 1e-12)
|
|
223
|
+
eff = sigma_noise * s
|
|
224
|
+
cap = noise_cap_frac * float((np.max(x_proj) - np.min(x_proj)) + 1e-12)
|
|
225
|
+
eff = min(eff, cap)
|
|
226
|
+
noise = rng.normal(0.0, eff, size = n)
|
|
227
|
+
else:
|
|
228
|
+
noise = rng.normal(0.0, sigma_noise, size = n)
|
|
229
|
+
x_noisy = x_proj + noise
|
|
230
|
+
candidate = np.argsort(x_noisy).astype(np.int32)
|
|
231
|
+
if rng.random() < 0.5:
|
|
232
|
+
candidate = candidate[::-1].copy()
|
|
233
|
+
shift = int(rng.integers(0, n))
|
|
234
|
+
candidate = np.roll(candidate, shift).astype(np.int32)
|
|
235
|
+
candidate = two_opt_candidates_restart(candidate, D, candidates, max(two_opt_passes, 2))
|
|
236
|
+
Lc = tour_length(candidate, D)
|
|
237
|
+
if Lc < best_L:
|
|
238
|
+
best_L = Lc
|
|
239
|
+
best_tour = candidate.copy()
|
|
240
|
+
if verbose:
|
|
241
|
+
print("Iteration =", it, "; Distance =", best_L)
|
|
242
|
+
best_out = (best_tour + 1).tolist()
|
|
243
|
+
best_out.append(best_out[0])
|
|
244
|
+
return best_out, float(best_L)
|
|
245
|
+
|
|
246
|
+
############################################################################
|
|
@@ -61,7 +61,7 @@ def two_opt(tour, D, max_passes):
|
|
|
61
61
|
|
|
62
62
|
# Function: KNN
|
|
63
63
|
def knn_indices(D, k):
|
|
64
|
-
return np.argsort(D, axis=1)[:, 1 : k + 1]
|
|
64
|
+
return np.argsort(D, axis = 1)[:, 1 : k + 1]
|
|
65
65
|
|
|
66
66
|
# Function: Affinity
|
|
67
67
|
def build_affinity_sparse(D, k, sigma_mode, sigma_fixed):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pycombinatorial
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.8
|
|
4
4
|
Summary: A library to solve TSP (Travelling Salesman Problem) using Exact Algorithms, Heuristics, Metaheuristics and Reinforcement Learning
|
|
5
5
|
Home-page: https://github.com/Valdecy/pyCombinatorial
|
|
6
6
|
Author: Valdecy Pereira
|
|
@@ -15,7 +15,7 @@ License-File: LICENSE
|
|
|
15
15
|
|
|
16
16
|
**pyCombinatorial** is a Python-based library designed to tackle the classic Travelling Salesman Problem (TSP) through a diverse set of **Exact Algorithms**, **Heuristics**, **Metaheuristics** and **Reinforcement Learning**. It brings together both well-established and cutting-edge methodologies, offering end-users a flexible toolkit to generate high-quality solutions for TSP instances of various sizes and complexities.
|
|
17
17
|
|
|
18
|
-
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
18
|
+
Techniques: **2-opt**; **2.5-opt**; **3-opt**; **4-opt**; **5-opt**; **Or-opt**; **2-opt Stochastic**; **2.5-opt Stochastic**; **3-opt Stochastic**; **4-opt Stochastic**; **5-opt Stochastic**; **Ant Colony Optimization**; **Adaptive Large Neighborhood Search**; **Bellman-Held-Karp Exact Algorithm**; **Bitonic Tour**; **Branch & Bound**; **BRKGA** (Biased Random Key Genetic Algorithm); **Brute Force**; **Cheapest Insertion**; **Christofides Algorithm**; **Clarke & Wright** (Savings Heuristic); **Concave Hull Algorithm**; **Convex Hull Algorithm**; **Elastic Net**; **Extremal Optimization**; **Farthest Insertion**; **FRNN** (Fixed Radius Near Neighbor); **Genetic Algorithm**; **GRASP** (Greedy Randomized Adaptive Search Procedure); **Greedy Karp-Steele Patching**; **Guided Search**; **Hopfield Network**; **Iterated Search**; **Karp-Steele Patching**; **Large Neighborhood Search**; **Multifragment Heuristic**; **Nearest Insertion**; **Nearest Neighbour**; **Random Insertion**; **Random Tour**; **Randomized Spectral Seriation**; **RL Q-Learning**; **RL Double Q-Learning**; **RL S.A.R.S.A** (State Action Reward State Action); **Ruin & Recreate**; **Scatter Search**; **Simulated Annealing**; **SOM** (Self Organizing Maps); **Space Filling Curve** (Hilbert); **Space Filling Curve** (Morton); **Space Filling Curve** (Sierpinski); **Spectral Seriation Initializer**; **Stochastic Hill Climbing**; **Sweep**; **Tabu Search**; **Truncated Branch & Bound**; **Twice-Around the Tree Algorithm** (Double Tree Algorithm); **Variable Neighborhood Search**; **Zero Suffix Method**.
|
|
19
19
|
|
|
20
20
|
## Usage
|
|
21
21
|
|
|
@@ -111,6 +111,7 @@ print('Total Distance: ', round(distance, 2))
|
|
|
111
111
|
- Nearest Neighbour ([ Colab Demo ](https://colab.research.google.com/drive/1aL1kYXgSjUJYPfYSMy_0SWq4hJ3nrueJ?usp=sharing)) ( [ Paper ](https://doi.org/10.1016/S0166-218X(01)00195-0))
|
|
112
112
|
- Random Insertion ([ Colab Demo ](https://colab.research.google.com/drive/1RP_grqrTXyDkHOLB_L1H8TkvxdLli5hG?usp=sharing)) ( [ Paper ](https://disco.ethz.ch/courses/fs16/podc/readingAssignment/1.pdf))
|
|
113
113
|
- Random Tour ([ Colab Demo ](https://colab.research.google.com/drive/1DPXMJXInkGKTyVFDAQ2bKXjglhy3DaCS?usp=sharing)) ( [ Paper ](https://doi.org/10.1023/A:1011263204536))
|
|
114
|
+
- Randomized Spectral Seriation ([ Colab Demo ](https://colab.research.google.com/drive/1PTtO6HJfftsFZEScCYZvzpBJsgMyjBCY?usp=sharing)) ( [ Paper ](https://doi.org/10.1137/S0097539795285771))
|
|
114
115
|
- RL Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1dnZhLAzQdz9kzxKrVcwMECWbyEKkZ7St?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
115
116
|
- RL Double Q-Learning ([ Colab Demo ](https://colab.research.google.com/drive/1VTv8A6Ac-LvBxsereFyGRfkiLRbJI547?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
116
117
|
- RL S.A.R.S.A ([ Colab Demo ](https://colab.research.google.com/drive/1q9hon3jFf8xVCw4idxhu7goLREKbQ6N3?usp=sharing)) ( [ Paper ](https://doi.org/10.1049/tje2.12303))
|
|
@@ -44,6 +44,7 @@ pyCombinatorial/algorithm/rl_double_ql.py
|
|
|
44
44
|
pyCombinatorial/algorithm/rl_ql.py
|
|
45
45
|
pyCombinatorial/algorithm/rl_sarsa.py
|
|
46
46
|
pyCombinatorial/algorithm/rr.py
|
|
47
|
+
pyCombinatorial/algorithm/rss.py
|
|
47
48
|
pyCombinatorial/algorithm/rt.py
|
|
48
49
|
pyCombinatorial/algorithm/s_gui.py
|
|
49
50
|
pyCombinatorial/algorithm/s_itr.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pycombinatorial-2.1.7 → pycombinatorial-2.1.8}/pycombinatorial.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|