pref_voting 1.16.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pref_voting/__init__.py +1 -0
- pref_voting/analysis.py +496 -0
- pref_voting/axiom.py +38 -0
- pref_voting/axiom_helpers.py +129 -0
- pref_voting/axioms.py +10 -0
- pref_voting/c1_methods.py +963 -0
- pref_voting/combined_methods.py +514 -0
- pref_voting/create_methods.py +128 -0
- pref_voting/data/examples/condorcet_winner/minimal_Anti-Plurality.soc +16 -0
- pref_voting/data/examples/condorcet_winner/minimal_Borda.soc +17 -0
- pref_voting/data/examples/condorcet_winner/minimal_Bracket_Voting.soc +20 -0
- pref_voting/data/examples/condorcet_winner/minimal_Bucklin.soc +19 -0
- pref_voting/data/examples/condorcet_winner/minimal_Coombs.soc +20 -0
- pref_voting/data/examples/condorcet_winner/minimal_Coombs_PUT.soc +20 -0
- pref_voting/data/examples/condorcet_winner/minimal_Coombs_TB.soc +20 -0
- pref_voting/data/examples/condorcet_winner/minimal_Dowdall.soc +19 -0
- pref_voting/data/examples/condorcet_winner/minimal_Instant_Runoff.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_Instant_Runoff_PUT.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_Instant_Runoff_TB.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_Iterated_Removal_Condorcet_Loser.soc +17 -0
- pref_voting/data/examples/condorcet_winner/minimal_Pareto.soc +17 -0
- pref_voting/data/examples/condorcet_winner/minimal_Plurality.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_PluralityWRunoff_PUT.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_Positive-Negative_Voting.soc +17 -0
- pref_voting/data/examples/condorcet_winner/minimal_Simplified_Bucklin.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_Superior_Voting.soc +19 -0
- pref_voting/data/examples/condorcet_winner/minimal_Weighted_Bucklin.soc +19 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Anti-Plurality.soc +17 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Borda.soc +17 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Bracket_Voting.soc +20 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Bucklin.soc +19 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Coombs.soc +21 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Coombs_PUT.soc +21 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Coombs_TB.soc +20 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Dowdall.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Instant_Runoff.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Instant_Runoff_PUT.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Instant_Runoff_TB.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Plurality.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_PluralityWRunoff_PUT.soc +18 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Positive-Negative_Voting.soc +17 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Simplified_Bucklin.soc +20 -0
- pref_voting/data/examples/condorcet_winner/minimal_resolute_Weighted_Bucklin.soc +19 -0
- pref_voting/data/voting_methods_properties.json +414 -0
- pref_voting/data/voting_methods_properties.json.lock +0 -0
- pref_voting/dominance_axioms.py +387 -0
- pref_voting/generate_profiles.py +801 -0
- pref_voting/generate_spatial_profiles.py +198 -0
- pref_voting/generate_utility_profiles.py +160 -0
- pref_voting/generate_weighted_majority_graphs.py +506 -0
- pref_voting/grade_methods.py +184 -0
- pref_voting/grade_profiles.py +357 -0
- pref_voting/helper.py +370 -0
- pref_voting/invariance_axioms.py +671 -0
- pref_voting/io/__init__.py +0 -0
- pref_voting/io/readers.py +432 -0
- pref_voting/io/writers.py +256 -0
- pref_voting/iterative_methods.py +2425 -0
- pref_voting/maj_graph_ex1.png +0 -0
- pref_voting/mappings.py +577 -0
- pref_voting/margin_based_methods.py +2345 -0
- pref_voting/monotonicity_axioms.py +872 -0
- pref_voting/num_evaluation_method.py +77 -0
- pref_voting/other_axioms.py +161 -0
- pref_voting/other_methods.py +939 -0
- pref_voting/pairwise_profiles.py +547 -0
- pref_voting/prob_voting_method.py +105 -0
- pref_voting/probabilistic_methods.py +287 -0
- pref_voting/profiles.py +856 -0
- pref_voting/profiles_with_ties.py +1069 -0
- pref_voting/rankings.py +466 -0
- pref_voting/scoring_methods.py +481 -0
- pref_voting/social_welfare_function.py +59 -0
- pref_voting/social_welfare_functions.py +7 -0
- pref_voting/spatial_profiles.py +448 -0
- pref_voting/stochastic_methods.py +99 -0
- pref_voting/strategic_axioms.py +1394 -0
- pref_voting/swf_axioms.py +173 -0
- pref_voting/utility_functions.py +102 -0
- pref_voting/utility_methods.py +178 -0
- pref_voting/utility_profiles.py +333 -0
- pref_voting/variable_candidate_axioms.py +640 -0
- pref_voting/variable_voter_axioms.py +3747 -0
- pref_voting/voting_method.py +355 -0
- pref_voting/voting_method_properties.py +92 -0
- pref_voting/voting_methods.py +8 -0
- pref_voting/voting_methods_registry.py +136 -0
- pref_voting/weighted_majority_graphs.py +1539 -0
- pref_voting-1.16.31.dist-info/METADATA +208 -0
- pref_voting-1.16.31.dist-info/RECORD +92 -0
- pref_voting-1.16.31.dist-info/WHEEL +4 -0
- pref_voting-1.16.31.dist-info/licenses/LICENSE.txt +21 -0
pref_voting/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = '1.16.31'
|
pref_voting/analysis.py
ADDED
@@ -0,0 +1,496 @@
|
|
1
|
+
"""
|
2
|
+
File: analysis.py
|
3
|
+
Author: Wes Holliday (wesholliday@berkeley.edu) and Eric Pacuit (epacuit@umd.edu)
|
4
|
+
Date: August 9, 2022
|
5
|
+
Updated: May 9, 2023
|
6
|
+
|
7
|
+
Functions to analyze voting methods
|
8
|
+
"""
|
9
|
+
|
10
|
+
from pref_voting.generate_profiles import generate_profile
|
11
|
+
from functools import partial
|
12
|
+
from pathos.multiprocessing import ProcessingPool as Pool
|
13
|
+
from scipy.stats import binomtest
|
14
|
+
|
15
|
+
import pandas as pd
|
16
|
+
import numpy as np
|
17
|
+
|
18
|
+
def find_profiles_with_different_winners(
|
19
|
+
vms,
|
20
|
+
numbers_of_candidates=[3, 4, 5],
|
21
|
+
numbers_of_voters=[5, 25, 50, 100],
|
22
|
+
all_unique_winners=False,
|
23
|
+
show_profiles=True,
|
24
|
+
show_margin_graphs=True,
|
25
|
+
show_winning_sets=True,
|
26
|
+
show_rankings_counts=False,
|
27
|
+
return_multiple_profiles=True,
|
28
|
+
probmod="IC",
|
29
|
+
num_trials=10000,
|
30
|
+
):
|
31
|
+
"""
|
32
|
+
Given a list of voting methods, search for profiles with different winning sets.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
vms (list(functions)): A list of voting methods,
|
36
|
+
numbers_of_candidates (list(int), default = [3, 4, 5]): The numbers of candidates to check.
|
37
|
+
numbers_of_voters (list(int), default = [5, 25, 50, 100]): The numbers of voters to check.
|
38
|
+
all_unique_winners (bool, default = False): If True, only return profiles in which each voting method has a unique winner.
|
39
|
+
show_profiles (bool, default=True): If True, show profiles with different winning sets for the voting methods when discovered.
|
40
|
+
show_margin_graphs (bool, default=True): If True, show margin graphs of the profiles with different winning sets for the voting methods when discovered.
|
41
|
+
show_winning_sets (bool, default=True): If True, show the different winning sets for the voting methods when discovered.
|
42
|
+
show_rankings_counts (bool, default=True): If True, show the rankings and counts of the profiles with different winning sets for the voting methods.
|
43
|
+
return_multiple_profiles (bool, default=True): If True, return all profiles that are found.
|
44
|
+
probmod (str, default="IC"): The probability model to be passed to the ``generate_profile`` method
|
45
|
+
num_trials (int, default=10000): The number of profiles to check for different winning sets.
|
46
|
+
|
47
|
+
"""
|
48
|
+
profiles = list()
|
49
|
+
for num_cands in numbers_of_candidates:
|
50
|
+
for num_voters in numbers_of_voters:
|
51
|
+
print(f"{num_cands} candidates, {num_voters} voters")
|
52
|
+
for t in range(num_trials):
|
53
|
+
prof = generate_profile(num_cands, num_voters, probmod=probmod)
|
54
|
+
winning_sets = {vm.name: vm(prof) for vm in vms}
|
55
|
+
wss = [tuple(ws) for ws in list(winning_sets.values())]
|
56
|
+
if (
|
57
|
+
not all_unique_winners or all([len(ws) == 1 for ws in wss])
|
58
|
+
) and list(set(wss)) == list(wss):
|
59
|
+
if show_profiles:
|
60
|
+
prof.display()
|
61
|
+
if show_margin_graphs:
|
62
|
+
prof.display_margin_graph()
|
63
|
+
if show_winning_sets:
|
64
|
+
for vm in vms:
|
65
|
+
vm.display(prof)
|
66
|
+
if show_rankings_counts:
|
67
|
+
print(prof.rankings_counts)
|
68
|
+
if not return_multiple_profiles:
|
69
|
+
return prof
|
70
|
+
else:
|
71
|
+
profiles.append(prof)
|
72
|
+
|
73
|
+
print(f"Found {len(profiles)} profiles with different winning sets")
|
74
|
+
return profiles
|
75
|
+
|
76
|
+
|
77
|
+
|
78
|
+
def record_condorcet_efficiency_data(vms, num_cands, num_voters, pm, t):
|
79
|
+
|
80
|
+
prof = pm(num_cands, num_voters)
|
81
|
+
cw = prof.condorcet_winner()
|
82
|
+
|
83
|
+
return {
|
84
|
+
"has_cw": cw is not None,
|
85
|
+
"cw_winner": {vm.name: cw is not None and [cw] == vm(prof) for vm in vms},
|
86
|
+
}
|
87
|
+
|
88
|
+
def condorcet_efficiency_data(
|
89
|
+
vms,
|
90
|
+
numbers_of_candidates=[3, 4, 5],
|
91
|
+
numbers_of_voters=[4, 10, 20, 50, 100, 500, 1000],
|
92
|
+
prob_models = {"IC": lambda nc, nv: generate_profile(nc, nv)},
|
93
|
+
min_num_samples=1000,
|
94
|
+
max_num_samples=100_000,
|
95
|
+
max_error=0.01,
|
96
|
+
use_parallel=True,
|
97
|
+
num_cpus=12,
|
98
|
+
):
|
99
|
+
"""
|
100
|
+
Returns a Pandas DataFrame with the Condorcet efficiency of a list of voting methods.
|
101
|
+
|
102
|
+
Args:
|
103
|
+
vms (list(functions)): A list of voting methods,
|
104
|
+
numbers_of_candidates (list(int), default = [3, 4, 5]): The numbers of candidates to check.
|
105
|
+
numbers_of_voters (list(int), default = [5, 25, 50, 100]): The numbers of voters to check.
|
106
|
+
probmod (dict, default="IC"): A dictionary with keys as the names of the probability models and values as functions that generate profiles. Each function should accept a number of candidates and a number of voters.
|
107
|
+
min_num_trials (int, default=1000): The minimum number of profiles to check.
|
108
|
+
max_num_trials (int, default=100_000): The maximum number of profiles to check.
|
109
|
+
max_error (float, default=0.01): The maximum error to allow in the 95% confidence interval.
|
110
|
+
use_parallel (bool, default=True): If True, then use parallel processing.
|
111
|
+
num_cpus (int, default=12): The number of (virtual) cpus to use if using parallel processing.
|
112
|
+
|
113
|
+
"""
|
114
|
+
|
115
|
+
if use_parallel:
|
116
|
+
pool = Pool(num_cpus)
|
117
|
+
|
118
|
+
data_for_df = {
|
119
|
+
"num_candidates": [],
|
120
|
+
"num_voters": [],
|
121
|
+
"prob_model": [],
|
122
|
+
"voting_method": [],
|
123
|
+
"condorcet_efficiency": [],
|
124
|
+
"error": [],
|
125
|
+
"num_samples": [],
|
126
|
+
"percent_condorcet_winner": [],
|
127
|
+
"percent_condorcet_winner_error": [],
|
128
|
+
"min_num_samples": list(),
|
129
|
+
"max_num_samples": list(),
|
130
|
+
"max_error": list(),
|
131
|
+
}
|
132
|
+
for pm_name, pm in prob_models.items():
|
133
|
+
for num_cands in numbers_of_candidates:
|
134
|
+
for num_voters in numbers_of_voters:
|
135
|
+
|
136
|
+
print(f"{pm_name}: {num_cands} candidates, {num_voters} voters")
|
137
|
+
|
138
|
+
get_data = partial(
|
139
|
+
record_condorcet_efficiency_data,
|
140
|
+
vms,
|
141
|
+
num_cands,
|
142
|
+
num_voters,
|
143
|
+
pm
|
144
|
+
)
|
145
|
+
|
146
|
+
num_samples = 0
|
147
|
+
error_ranges = []
|
148
|
+
elect_condorcet_winner = {
|
149
|
+
vm.name: [] for vm in vms
|
150
|
+
}
|
151
|
+
has_condorcet_winner = []
|
152
|
+
while num_samples < min_num_samples or (any([(err[1] - err[0]) > max_error for err in error_ranges]) and num_samples < max_num_samples):
|
153
|
+
|
154
|
+
if use_parallel:
|
155
|
+
data = pool.map(get_data, range(min_num_samples))
|
156
|
+
else:
|
157
|
+
data = list(map(get_data, range(min_num_samples)))
|
158
|
+
|
159
|
+
for d in data:
|
160
|
+
has_condorcet_winner.append(d["has_cw"])
|
161
|
+
if d["has_cw"]:
|
162
|
+
for vm in vms:
|
163
|
+
elect_condorcet_winner[vm.name].append(d["cw_winner"][vm.name])
|
164
|
+
|
165
|
+
error_ranges = [binomial_confidence_interval(elect_condorcet_winner[vm.name]) if len(elect_condorcet_winner[vm.name]) > 0 else (0, np.inf) for vm in vms]
|
166
|
+
|
167
|
+
num_samples += min_num_samples
|
168
|
+
|
169
|
+
for vm in vms:
|
170
|
+
data_for_df["num_candidates"].append(num_cands)
|
171
|
+
data_for_df["num_voters"].append(num_voters)
|
172
|
+
data_for_df["prob_model"].append(pm_name)
|
173
|
+
data_for_df["voting_method"].append(vm.name)
|
174
|
+
data_for_df["condorcet_efficiency"].append(np.mean(elect_condorcet_winner[vm.name]))
|
175
|
+
err_interval = binomial_confidence_interval(elect_condorcet_winner[vm.name])
|
176
|
+
data_for_df["error"].append(err_interval[1] - err_interval[0])
|
177
|
+
data_for_df["num_samples"].append(num_samples)
|
178
|
+
data_for_df["percent_condorcet_winner"].append(np.mean(has_condorcet_winner))
|
179
|
+
err_interval = binomial_confidence_interval(has_condorcet_winner)
|
180
|
+
data_for_df["percent_condorcet_winner_error"].append(err_interval[1] - err_interval[0])
|
181
|
+
data_for_df["min_num_samples"].append(min_num_samples)
|
182
|
+
data_for_df["max_num_samples"].append(max_num_samples)
|
183
|
+
data_for_df["max_error"].append(max_error)
|
184
|
+
|
185
|
+
return pd.DataFrame(data_for_df)
|
186
|
+
|
187
|
+
|
188
|
+
|
189
|
+
def record_num_winners_data(vms, num_cands, num_voters, probmod, probmod_param, t):
|
190
|
+
|
191
|
+
prof = generate_profile(num_cands, num_voters, probmod=probmod, probmod_param=probmod_param)
|
192
|
+
|
193
|
+
return {
|
194
|
+
"num_winners": {vm.name: len(vm(prof)) for vm in vms},
|
195
|
+
}
|
196
|
+
|
197
|
+
def resoluteness_data(
|
198
|
+
vms,
|
199
|
+
numbers_of_candidates=[3, 4, 5],
|
200
|
+
numbers_of_voters=[4, 10, 20, 50, 100, 500, 1000],
|
201
|
+
probmods=["IC"],
|
202
|
+
probmod_params=None,
|
203
|
+
num_trials=10000,
|
204
|
+
use_parallel=True,
|
205
|
+
num_cpus=12,
|
206
|
+
):
|
207
|
+
"""
|
208
|
+
Returns a Pandas DataFrame with resoluteness data for a list of voting methods.
|
209
|
+
|
210
|
+
Args:
|
211
|
+
vms (list(functions)): A list of voting methods,
|
212
|
+
numbers_of_candidates (list(int), default = [3, 4, 5]): The numbers of candidates to check.
|
213
|
+
numbers_of_voters (list(int), default = [5, 25, 50, 100]): The numbers of voters to check.
|
214
|
+
probmod (str, default="IC"): The probability model to be passed to the ``generate_profile`` method
|
215
|
+
num_trials (int, default=10000): The number of profiles to check for different winning sets.
|
216
|
+
use_parallel (bool, default=True): If True, then use parallel processing.
|
217
|
+
num_cpus (int, default=12): The number of (virtual) cpus to use if using parallel processing.
|
218
|
+
|
219
|
+
"""
|
220
|
+
|
221
|
+
probmod_params_list = [None]*len(probmods) if probmod_params is None else probmod_params
|
222
|
+
|
223
|
+
assert len(probmod_params_list) == len(probmods), "probmod_params must be a list of the same length as probmods"
|
224
|
+
|
225
|
+
if use_parallel:
|
226
|
+
pool = Pool(num_cpus)
|
227
|
+
|
228
|
+
data_for_df = {
|
229
|
+
"vm": list(),
|
230
|
+
"num_cands": list(),
|
231
|
+
"num_voters": list(),
|
232
|
+
"probmod": list(),
|
233
|
+
"probmod_param":list(),
|
234
|
+
"num_trials": list(),
|
235
|
+
"freq_multiple_winners": list(),
|
236
|
+
"avg_num_winners": list(),
|
237
|
+
"avg_percent_winners": list(),
|
238
|
+
}
|
239
|
+
for probmod,probmod_param in zip(probmods, probmod_params_list):
|
240
|
+
for num_cands in numbers_of_candidates:
|
241
|
+
for num_voters in numbers_of_voters:
|
242
|
+
|
243
|
+
print(f"{num_cands} candidates, {num_voters} voters")
|
244
|
+
get_data = partial(
|
245
|
+
record_num_winners_data,
|
246
|
+
vms,
|
247
|
+
num_cands,
|
248
|
+
num_voters,
|
249
|
+
probmod,
|
250
|
+
probmod_param
|
251
|
+
)
|
252
|
+
|
253
|
+
if use_parallel:
|
254
|
+
data = pool.map(get_data, range(num_trials))
|
255
|
+
else:
|
256
|
+
data = list(map(get_data, range(num_trials)))
|
257
|
+
|
258
|
+
num_winners = {vm.name: 0 for vm in vms}
|
259
|
+
multiple_winners = {vm.name: 0 for vm in vms}
|
260
|
+
|
261
|
+
for d in data:
|
262
|
+
for vm in vms:
|
263
|
+
num_winners[vm.name] += int(d["num_winners"][vm.name])
|
264
|
+
if d["num_winners"][vm.name] > 1:
|
265
|
+
multiple_winners[vm.name] += 1
|
266
|
+
|
267
|
+
for vm in vms:
|
268
|
+
data_for_df["vm"].append(vm.name)
|
269
|
+
data_for_df["num_cands"].append(num_cands)
|
270
|
+
data_for_df["num_voters"].append(num_voters)
|
271
|
+
data_for_df["probmod"].append(probmod)
|
272
|
+
data_for_df["probmod_param"].append(probmod_param)
|
273
|
+
data_for_df["num_trials"].append(num_trials)
|
274
|
+
data_for_df["freq_multiple_winners"].append(multiple_winners[vm.name] / num_trials)
|
275
|
+
data_for_df["avg_num_winners"].append(
|
276
|
+
num_winners[vm.name] / num_trials
|
277
|
+
)
|
278
|
+
data_for_df["avg_percent_winners"].append(
|
279
|
+
(num_winners[vm.name] / (num_cands * num_trials))
|
280
|
+
)
|
281
|
+
|
282
|
+
return pd.DataFrame(data_for_df)
|
283
|
+
|
284
|
+
# helper function for axiom_violations_data
|
285
|
+
def record_axiom_violation_data(
|
286
|
+
axioms,
|
287
|
+
vms,
|
288
|
+
num_cands,
|
289
|
+
num_voters,
|
290
|
+
probmod,
|
291
|
+
verbose,
|
292
|
+
t
|
293
|
+
):
|
294
|
+
|
295
|
+
prof = generate_profile(num_cands, num_voters, probmod=probmod)
|
296
|
+
|
297
|
+
return {ax.name: {vm.name: ax.has_violation(prof, vm, verbose=verbose) for vm in vms} for ax in axioms}
|
298
|
+
|
299
|
+
def axiom_violations_data(
|
300
|
+
axioms,
|
301
|
+
vms,
|
302
|
+
numbers_of_candidates=[3, 4, 5],
|
303
|
+
numbers_of_voters=[4, 5, 10, 11, 20, 21, 50, 51, 100, 101, 500, 501, 1000, 1001],
|
304
|
+
probmods=["IC"],
|
305
|
+
num_trials=10000,
|
306
|
+
verbose=False,
|
307
|
+
use_parallel=True,
|
308
|
+
num_cpus=12,
|
309
|
+
):
|
310
|
+
"""
|
311
|
+
Returns a Pandas DataFrame with axiom violation data for a list of voting methods.
|
312
|
+
|
313
|
+
Args:
|
314
|
+
vms (list(functions)): A list of voting methods,
|
315
|
+
numbers_of_candidates (list(int), default = [3, 4, 5]): The numbers of candidates to check.
|
316
|
+
numbers_of_voters (list(int), default = [5, 25, 50, 100]): The numbers of voters to check.
|
317
|
+
probmod (str, default="IC"): The probability model to be passed to the ``generate_profile`` method
|
318
|
+
num_trials (int, default=10000): The number of profiles to check for axiom violations.
|
319
|
+
use_parallel (bool, default=True): If True, then use parallel processing.
|
320
|
+
num_cpus (int, default=12): The number of (virtual) cpus to use if using parallel processing.
|
321
|
+
|
322
|
+
"""
|
323
|
+
|
324
|
+
if use_parallel:
|
325
|
+
pool = Pool(num_cpus)
|
326
|
+
|
327
|
+
data_for_df = {
|
328
|
+
"axiom": list(),
|
329
|
+
"vm": list(),
|
330
|
+
"num_cands": list(),
|
331
|
+
"num_voters": list(),
|
332
|
+
"probmod": list(),
|
333
|
+
"num_trials": list(),
|
334
|
+
"num_violations": list(),
|
335
|
+
}
|
336
|
+
for probmod in probmods:
|
337
|
+
print(f"{probmod} probability model")
|
338
|
+
for num_cands in numbers_of_candidates:
|
339
|
+
for num_voters in numbers_of_voters:
|
340
|
+
#print(f"{num_cands} candidates, {num_voters} voters")
|
341
|
+
_verbose = verbose if not use_parallel else False
|
342
|
+
get_data = partial(
|
343
|
+
record_axiom_violation_data,
|
344
|
+
axioms,
|
345
|
+
vms,
|
346
|
+
num_cands,
|
347
|
+
num_voters,
|
348
|
+
probmod,
|
349
|
+
_verbose
|
350
|
+
)
|
351
|
+
|
352
|
+
if use_parallel:
|
353
|
+
data = pool.map(get_data, range(num_trials))
|
354
|
+
else:
|
355
|
+
data = list(map(get_data, range(num_trials)))
|
356
|
+
|
357
|
+
for ax in axioms:
|
358
|
+
for vm in vms:
|
359
|
+
data_for_df["axiom"].append(ax.name)
|
360
|
+
data_for_df["vm"].append(vm.name)
|
361
|
+
data_for_df["num_cands"].append(num_cands)
|
362
|
+
data_for_df["num_voters"].append(num_voters)
|
363
|
+
data_for_df["probmod"].append(probmod)
|
364
|
+
data_for_df["num_trials"].append(num_trials)
|
365
|
+
data_for_df["num_violations"].append(sum([d[ax.name][vm.name] for d in data]))
|
366
|
+
print("Done.")
|
367
|
+
return pd.DataFrame(data_for_df)
|
368
|
+
|
369
|
+
|
370
|
+
def estimated_variance_of_sampling_dist(
|
371
|
+
values_for_each_experiment,
|
372
|
+
mean_for_each_experiment=None):
|
373
|
+
# values_for_each_vm is a 2d numpy array
|
374
|
+
|
375
|
+
mean_for_each_experiment = np.nanmean(values_for_each_experiment, axis=1) if mean_for_each_experiment is None else mean_for_each_experiment
|
376
|
+
|
377
|
+
num_val_for_each_exp = np.sum(~np.isnan(values_for_each_experiment), axis=1)
|
378
|
+
|
379
|
+
row_means_reshaped = mean_for_each_experiment[:, np.newaxis]
|
380
|
+
return np.where(
|
381
|
+
num_val_for_each_exp * (num_val_for_each_exp - 1) != 0.0,
|
382
|
+
(1 / (num_val_for_each_exp * (num_val_for_each_exp - 1))) * np.nansum(
|
383
|
+
(values_for_each_experiment - row_means_reshaped) ** 2,
|
384
|
+
axis=1),
|
385
|
+
np.nan
|
386
|
+
)
|
387
|
+
|
388
|
+
|
389
|
+
def binomial_confidence_interval(xs, confidence_level=0.95):
|
390
|
+
"""
|
391
|
+
Calculate the exact confidence interval for a binomial proportion.
|
392
|
+
|
393
|
+
This function computes the confidence interval for the true proportion of successes in a binary dataset using the exact binomial test. It is particularly useful for small sample sizes or when the normal approximation is not appropriate.
|
394
|
+
|
395
|
+
Parameters
|
396
|
+
----------
|
397
|
+
xs : array-like
|
398
|
+
A sequence of binary observations (0 for failure, 1 for success).
|
399
|
+
confidence_level : float, optional
|
400
|
+
The confidence level for the interval, between 0 and 1. Default is 0.95.
|
401
|
+
|
402
|
+
Returns
|
403
|
+
-------
|
404
|
+
tuple of float
|
405
|
+
A tuple containing the lower and upper bounds of the confidence interval.
|
406
|
+
|
407
|
+
Examples
|
408
|
+
--------
|
409
|
+
>>> xs = [1, 0, 1, 1, 0, 1, 0, 1, 1, 0]
|
410
|
+
>>> binomial_confidence_interval(xs)
|
411
|
+
(0.4662563841506048, 0.9337436158493953)
|
412
|
+
|
413
|
+
Notes
|
414
|
+
-----
|
415
|
+
- Uses the `binomtest` function from `scipy.stats` with the 'exact' method.
|
416
|
+
- Suitable for datasets where the normal approximation may not hold.
|
417
|
+
|
418
|
+
References
|
419
|
+
----------
|
420
|
+
.. [1] "Binomial Test", SciPy v1.7.1 Manual,
|
421
|
+
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binomtest.html
|
422
|
+
"""
|
423
|
+
binom_ci = binomtest(int(np.sum(xs)), len(xs)).proportion_ci(
|
424
|
+
confidence_level=confidence_level, method='exact'
|
425
|
+
)
|
426
|
+
return (binom_ci.low, binom_ci.high)
|
427
|
+
|
428
|
+
def estimated_std_error(values_for_each_experiment, mean_for_each_experiment=None):
|
429
|
+
# values_for_each_vm is a 2d numpy array
|
430
|
+
return np.sqrt(estimated_variance_of_sampling_dist(values_for_each_experiment, mean_for_each_experiment=mean_for_each_experiment))
|
431
|
+
|
432
|
+
def means_with_estimated_standard_error(
|
433
|
+
generate_samples,
|
434
|
+
max_std_error,
|
435
|
+
initial_trials=1000,
|
436
|
+
step_trials=1000,
|
437
|
+
min_num_trials=10_000,
|
438
|
+
max_num_trials=None,
|
439
|
+
verbose=False
|
440
|
+
):
|
441
|
+
"""
|
442
|
+
For each list of numbers produced by generate_samples, returns the means, the estimated standard error (https://en.wikipedia.org/wiki/Standard_error) of the means, the variance of the samples, and the total number of trials.
|
443
|
+
|
444
|
+
Uses the estimated_variance_of_sampling_dist (as described in https://berkeley-stat243.github.io/stat243-fall-2023/units/unit9-sim.html) and estimated_std_error functions.
|
445
|
+
|
446
|
+
Args:
|
447
|
+
generate_samples (function): A function that a 2d numpy array of samples. It should take two arguments: num_samples and step (only used if samples are drawn from a pre-computed source in order to ensure that we get new samples during the while loop below).
|
448
|
+
max_std_error (float): The desired estimated standard error for the mean of each sample.
|
449
|
+
initial_trials (int, default=1000): The number of samples to initially generate.
|
450
|
+
step_trials (int, default=1000): The number of samples to generate in each step.
|
451
|
+
min_num_trials (int, default=10000): The minimum number of trials to run.
|
452
|
+
max_num_trials (int, default=None): If not None, then the maximum number of trials to run.
|
453
|
+
verbose (bool, default=False): If True, then print progress information.
|
454
|
+
|
455
|
+
Returns:
|
456
|
+
A tuple (means, est_std_errors, variances, num_trials) where means is an array of the means of the samples, est_std_errors is an array of estimated standard errors of the samples, variances is an array of the variances of the samples, and num_trials is the total number of trials.
|
457
|
+
|
458
|
+
"""
|
459
|
+
|
460
|
+
# samples is a 2d numpy array
|
461
|
+
step = 0
|
462
|
+
samples = generate_samples(num_samples = initial_trials, step = step)
|
463
|
+
|
464
|
+
means = np.nanmean(samples, axis=1)
|
465
|
+
variances = np.nanvar(samples, axis=1)
|
466
|
+
est_std_errors = estimated_std_error(
|
467
|
+
samples,
|
468
|
+
mean_for_each_experiment=means)
|
469
|
+
|
470
|
+
if verbose:
|
471
|
+
print("Initial number of trials:", initial_trials)
|
472
|
+
print(f"Remaining estimated standard errors greater than {max_std_error}:", np.sum(est_std_errors > max_std_error))
|
473
|
+
print(f"Estimated standard errors that are still greater than {max_std_error}:\n",est_std_errors[est_std_errors > max_std_error])
|
474
|
+
|
475
|
+
num_trials = initial_trials
|
476
|
+
|
477
|
+
while (np.isnan(est_std_errors).any() or np.any(est_std_errors > max_std_error) or (num_trials < min_num_trials)) and (max_num_trials is None or num_trials < max_num_trials):
|
478
|
+
if verbose:
|
479
|
+
print("Number of trials:", num_trials)
|
480
|
+
print(f"Remaining estimated standard errors greater than {max_std_error}:", np.sum(est_std_errors > max_std_error))
|
481
|
+
print(f"Estimated standard errors that are still greater than {max_std_error}:\n",est_std_errors[est_std_errors > max_std_error])
|
482
|
+
|
483
|
+
step += 1
|
484
|
+
new_samples = generate_samples(num_samples=step_trials, step=step)
|
485
|
+
|
486
|
+
samples = np.concatenate((samples, new_samples), axis=1)
|
487
|
+
|
488
|
+
num_trials += step_trials
|
489
|
+
|
490
|
+
means = np.nanmean(samples, axis=1)
|
491
|
+
variances = np.nanvar(samples, axis=1)
|
492
|
+
est_std_errors = estimated_std_error(
|
493
|
+
samples,
|
494
|
+
mean_for_each_experiment=means)
|
495
|
+
|
496
|
+
return means, est_std_errors, variances, num_trials
|
pref_voting/axiom.py
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
"""
|
2
|
+
File: axiom.py
|
3
|
+
Author: Wes Holliday (wesholliday@berkeley.edu) and Eric Pacuit (epacuit@umd.edu)
|
4
|
+
Date: April 27, 2023
|
5
|
+
|
6
|
+
Define the Axiom class.
|
7
|
+
"""
|
8
|
+
|
9
|
+
class Axiom(object):
|
10
|
+
"""
|
11
|
+
A class to represent voting method axioms.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
name (string): The human-readable name of the axiom.
|
15
|
+
has_violation (function): function that returns a Boolean which is True when there is a violation of the axiom.
|
16
|
+
find_all_violations (function): function that returns all instances of violations of the axiom.
|
17
|
+
satisfying_vms (list): list of voting methods satisfying the axiom.
|
18
|
+
violating_vms (list): list of voting methods violating the axiom.
|
19
|
+
|
20
|
+
"""
|
21
|
+
def __init__(self, name, has_violation, find_all_violations):
|
22
|
+
self.name = name
|
23
|
+
self.has_violation = has_violation
|
24
|
+
self.find_all_violations = find_all_violations
|
25
|
+
self.satisfying_vms = list()
|
26
|
+
self.violating_vms = list()
|
27
|
+
|
28
|
+
def satisfies(vm):
|
29
|
+
return vm.name in self.satisfying_vms
|
30
|
+
|
31
|
+
def violates(vm):
|
32
|
+
return vm.name in self.violating_vms
|
33
|
+
|
34
|
+
def add_satisfying_vms(vms):
|
35
|
+
self.satisfying_vms += vms
|
36
|
+
|
37
|
+
def add_violating_vms(vms):
|
38
|
+
self.violating_vms += vms
|
@@ -0,0 +1,129 @@
|
|
1
|
+
|
2
|
+
from pref_voting.profiles import Profile
|
3
|
+
from pref_voting.profiles_with_ties import ProfileWithTies
|
4
|
+
from pref_voting.rankings import Ranking
|
5
|
+
from itertools import combinations, chain, permutations
|
6
|
+
import copy
|
7
|
+
|
8
|
+
def display_mg(edata):
|
9
|
+
if type(edata) == Profile or type(edata) == ProfileWithTies:
|
10
|
+
edata.display_margin_graph()
|
11
|
+
else:
|
12
|
+
edata.display()
|
13
|
+
|
14
|
+
def list_to_string(cands, cmap):
|
15
|
+
return "{" + ', '.join([cmap[c] for c in cands]) + "}"
|
16
|
+
|
17
|
+
|
18
|
+
def swap_candidates(ranking, c1, c2):
|
19
|
+
"""
|
20
|
+
Swap two candidates in a ranking.
|
21
|
+
:param ranking: either a tuple or a list of candidates or a Ranking object
|
22
|
+
:param c1: candidate 1
|
23
|
+
:param c2: candidate 2
|
24
|
+
:return: a new ranking (a tuple) with c1 and c2 swapped
|
25
|
+
"""
|
26
|
+
|
27
|
+
if isinstance(ranking, Ranking):
|
28
|
+
|
29
|
+
rmap = ranking.rmap
|
30
|
+
|
31
|
+
if c1 not in rmap or c2 not in rmap:
|
32
|
+
raise ValueError("One of the candidates is not in the ranking")
|
33
|
+
|
34
|
+
# swap the values associated with c1 and c2
|
35
|
+
new_rmap = rmap.copy()
|
36
|
+
new_rmap[c1], new_rmap[c2] = new_rmap[c2], new_rmap[c1]
|
37
|
+
new_ranking = Ranking(new_rmap)
|
38
|
+
|
39
|
+
elif isinstance(ranking, (list, tuple)):
|
40
|
+
new_ranking = []
|
41
|
+
for c in ranking:
|
42
|
+
if c == c1:
|
43
|
+
new_ranking.append(c2)
|
44
|
+
elif c == c2:
|
45
|
+
new_ranking.append(c1)
|
46
|
+
else:
|
47
|
+
new_ranking.append(c)
|
48
|
+
new_ranking = tuple(new_ranking)
|
49
|
+
return new_ranking
|
50
|
+
|
51
|
+
|
52
|
+
|
53
|
+
def equal_size_partitions_with_duplicates(lst):
|
54
|
+
"""
|
55
|
+
Generate all partitions of a list into two distinct subsets of equal size,
|
56
|
+
including cases where the input list contains duplicates and elements
|
57
|
+
that do not support ordering (<).
|
58
|
+
|
59
|
+
Parameters:
|
60
|
+
lst (list): The input list to partition. Must have an even number of elements.
|
61
|
+
|
62
|
+
Returns:
|
63
|
+
list of tuples: A list of tuples, where each tuple contains two lists of equal size.
|
64
|
+
"""
|
65
|
+
if len(lst) % 2 != 0:
|
66
|
+
raise ValueError("The input list must have an even number of elements.")
|
67
|
+
|
68
|
+
n = len(lst) // 2
|
69
|
+
partitions = []
|
70
|
+
|
71
|
+
seen = set()
|
72
|
+
|
73
|
+
for subset in combinations(lst, n):
|
74
|
+
complement = lst[:]
|
75
|
+
for item in subset:
|
76
|
+
complement.remove(item)
|
77
|
+
|
78
|
+
partition_key = frozenset([frozenset(subset), frozenset(complement)])
|
79
|
+
if partition_key not in seen:
|
80
|
+
seen.add(partition_key)
|
81
|
+
partitions.append((list(subset), complement))
|
82
|
+
|
83
|
+
return partitions
|
84
|
+
|
85
|
+
|
86
|
+
def get_rank(ranking, c):
|
87
|
+
"""
|
88
|
+
Get the (normalized) rank of a candidate in a ranking.
|
89
|
+
:param ranking: either a tuple or a list of candidates or a Ranking object
|
90
|
+
:param c: candidate
|
91
|
+
:return: the rank of c in the ranking
|
92
|
+
"""
|
93
|
+
if isinstance(ranking, Ranking):
|
94
|
+
norm_ranking = copy.deepcopy(ranking)
|
95
|
+
norm_ranking.normalize_ranks()
|
96
|
+
return norm_ranking.rmap[c]
|
97
|
+
elif isinstance(ranking, (list, tuple)):
|
98
|
+
return ranking.index(c)
|
99
|
+
else:
|
100
|
+
raise ValueError("Invalid input type")
|
101
|
+
|
102
|
+
|
103
|
+
# generate all subsets of a set, use combinations
|
104
|
+
def powerset(lst):
|
105
|
+
s = list(lst)
|
106
|
+
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
|
107
|
+
|
108
|
+
def linear_orders_with_reverse(cands):
|
109
|
+
|
110
|
+
lin_orders = list(permutations(cands))
|
111
|
+
lin_orders_with_reverse = []
|
112
|
+
for lin_order in lin_orders:
|
113
|
+
lin_orders_with_reverse.append((lin_order, lin_order[::-1]))
|
114
|
+
return lin_orders_with_reverse
|
115
|
+
|
116
|
+
def remove_first_occurrences(rankings, r1, r2):
|
117
|
+
removed_r1 = False
|
118
|
+
removed_r2 = False
|
119
|
+
result = []
|
120
|
+
|
121
|
+
for r in rankings:
|
122
|
+
if r == r1 and not removed_r1:
|
123
|
+
removed_r1 = True # Skip the first r1
|
124
|
+
elif r == r2 and not removed_r2:
|
125
|
+
removed_r2 = True # Skip the first r2
|
126
|
+
else:
|
127
|
+
result.append(r) # Keep all other elements
|
128
|
+
|
129
|
+
return result
|
pref_voting/axioms.py
ADDED
@@ -0,0 +1,10 @@
|
|
1
|
+
from pref_voting.dominance_axioms import *
|
2
|
+
from pref_voting.monotonicity_axioms import *
|
3
|
+
from pref_voting.invariance_axioms import *
|
4
|
+
from pref_voting.strategic_axioms import *
|
5
|
+
from pref_voting.variable_voter_axioms import *
|
6
|
+
from pref_voting.variable_candidate_axioms import *
|
7
|
+
from pref_voting.axiom import Axiom
|
8
|
+
|
9
|
+
|
10
|
+
axioms_dict = {name: obj for name, obj in globals().items() if isinstance(obj, Axiom)}
|