openskill 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +21 -0
- data/README.md +384 -0
- data/lib/openskill/models/common.rb +62 -0
- data/lib/openskill/models/plackett_luce.rb +694 -0
- data/lib/openskill/statistics/normal.rb +34 -0
- data/lib/openskill/version.rb +5 -0
- data/lib/openskill.rb +10 -0
- metadata +118 -0
checksums.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
SHA256:
|
|
3
|
+
metadata.gz: a7fcde3a56dfbdd99df6a10cfdebf1fb95c0b2b0fe21513792a3e3103e0606f1
|
|
4
|
+
data.tar.gz: be466ac8799195ba2a0353ac2bc94be71857c6c27d824c37db59d1ab088c13ca
|
|
5
|
+
SHA512:
|
|
6
|
+
metadata.gz: 9d3c96ae581d28403b828e90788582b06a6d77c7c6a418dfbe1b98abf0e164abdfcf4b1b28151a486a2ed7ab0ab902f7803fca4eb7861ca27c5550ab73c2e4a7
|
|
7
|
+
data.tar.gz: 867ce922496ccac80c0ea67195d2bf339afb253c7d52d73dbfd2c830848e9d762591b182e725f9e19262d06d209415b7b49921dffdb19b8fee92e8a12dca3903
|
data/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 OpenSkill Contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
data/README.md
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
1
|
+
# OpenSkill
|
|
2
|
+
|
|
3
|
+
A Ruby implementation of the OpenSkill rating system for multiplayer games. OpenSkill is a Bayesian skill rating system that can handle teams of varying sizes, asymmetric matches, and complex game scenarios.
|
|
4
|
+
|
|
5
|
+
[]()
|
|
6
|
+
[]()
|
|
7
|
+
[](LICENSE)
|
|
8
|
+
|
|
9
|
+
## Features
|
|
10
|
+
|
|
11
|
+
- ๐ฎ **Multiplayer Support**: Handle 2+ teams of any size
|
|
12
|
+
- โ๏ธ **Asymmetric Teams**: Teams don't need equal player counts
|
|
13
|
+
- ๐ฏ **Multiple Ranking Methods**: Use ranks or scores
|
|
14
|
+
- ๐ **Prediction Methods**: Predict win probabilities, draws, and final rankings
|
|
15
|
+
- ๐ข **Player Weights**: Account for partial participation or contribution
|
|
16
|
+
- ๐ **Score Margins**: Factor in impressive wins
|
|
17
|
+
- ๐ **Tie Handling**: Properly handle drawn matches
|
|
18
|
+
- โก **Fast**: Efficient Ruby implementation
|
|
19
|
+
- ๐งช **Well Tested**: Comprehensive test suite matching reference implementation
|
|
20
|
+
|
|
21
|
+
## Installation
|
|
22
|
+
|
|
23
|
+
Add this line to your application's Gemfile:
|
|
24
|
+
|
|
25
|
+
```ruby
|
|
26
|
+
gem 'openskill'
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
And then execute:
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
bundle install
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Or install it yourself as:
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
gem install openskill
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Quick Start
|
|
42
|
+
|
|
43
|
+
```ruby
|
|
44
|
+
require 'openskill'
|
|
45
|
+
|
|
46
|
+
# Create a model (Plackett-Luce by default)
|
|
47
|
+
model = OpenSkill::Models::PlackettLuce.new
|
|
48
|
+
|
|
49
|
+
# Create player ratings
|
|
50
|
+
alice = model.create_rating(name: "Alice")
|
|
51
|
+
bob = model.create_rating(name: "Bob")
|
|
52
|
+
charlie = model.create_rating(name: "Charlie")
|
|
53
|
+
dave = model.create_rating(name: "Dave")
|
|
54
|
+
|
|
55
|
+
# Simple 1v1 match (alice wins)
|
|
56
|
+
team1 = [alice]
|
|
57
|
+
team2 = [bob]
|
|
58
|
+
new_ratings = model.calculate_ratings([team1, team2])
|
|
59
|
+
alice, bob = new_ratings.flatten
|
|
60
|
+
|
|
61
|
+
puts "Alice: #{alice.mu.round(2)} ยฑ #{alice.sigma.round(2)}"
|
|
62
|
+
puts "Bob: #{bob.mu.round(2)} ยฑ #{bob.sigma.round(2)}"
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
## Usage
|
|
66
|
+
|
|
67
|
+
### Creating Ratings
|
|
68
|
+
|
|
69
|
+
```ruby
|
|
70
|
+
model = OpenSkill::Models::PlackettLuce.new
|
|
71
|
+
|
|
72
|
+
# Create with defaults (mu=25, sigma=8.333)
|
|
73
|
+
player = model.create_rating
|
|
74
|
+
|
|
75
|
+
# Create with custom values
|
|
76
|
+
player = model.create_rating(mu: 30.0, sigma: 5.0, name: "Alice")
|
|
77
|
+
|
|
78
|
+
# Load from database [mu, sigma]
|
|
79
|
+
player = model.load_rating([28.5, 7.2], name: "Bob")
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### Calculating New Ratings
|
|
83
|
+
|
|
84
|
+
#### Simple Match (Team 1 wins)
|
|
85
|
+
|
|
86
|
+
```ruby
|
|
87
|
+
team1 = [alice, bob]
|
|
88
|
+
team2 = [charlie, dave]
|
|
89
|
+
|
|
90
|
+
updated_teams = model.calculate_ratings([team1, team2])
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
#### Match with Explicit Ranks
|
|
94
|
+
|
|
95
|
+
Lower rank = better performance (0 is best)
|
|
96
|
+
|
|
97
|
+
```ruby
|
|
98
|
+
teams = [[alice], [bob], [charlie]]
|
|
99
|
+
# Charlie wins, Bob second, Alice third
|
|
100
|
+
updated = model.calculate_ratings(teams, ranks: [2, 1, 0])
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
#### Match with Scores
|
|
104
|
+
|
|
105
|
+
Higher score = better performance
|
|
106
|
+
|
|
107
|
+
```ruby
|
|
108
|
+
teams = [[alice, bob], [charlie, dave]]
|
|
109
|
+
# Team 2 wins 100-80
|
|
110
|
+
updated = model.calculate_ratings(teams, scores: [80, 100])
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
#### Match with Ties
|
|
114
|
+
|
|
115
|
+
```ruby
|
|
116
|
+
teams = [[alice], [bob], [charlie]]
|
|
117
|
+
# Alice and Charlie tie for first, Bob comes third
|
|
118
|
+
updated = model.calculate_ratings(teams, ranks: [0, 2, 0])
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
#### Player Contribution Weights
|
|
122
|
+
|
|
123
|
+
When players contribute different amounts:
|
|
124
|
+
|
|
125
|
+
```ruby
|
|
126
|
+
teams = [
|
|
127
|
+
[alice, bob], # Alice contributed more
|
|
128
|
+
[charlie, dave] # Dave carried the team
|
|
129
|
+
]
|
|
130
|
+
|
|
131
|
+
updated = model.calculate_ratings(
|
|
132
|
+
teams,
|
|
133
|
+
weights: [[2.0, 1.0], [1.0, 2.0]]
|
|
134
|
+
)
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
#### Score Margins (Impressive Wins)
|
|
138
|
+
|
|
139
|
+
Factor in score differences:
|
|
140
|
+
|
|
141
|
+
```ruby
|
|
142
|
+
model = OpenSkill::Models::PlackettLuce.new(margin: 5.0)
|
|
143
|
+
|
|
144
|
+
# Large score difference means more rating change
|
|
145
|
+
updated = model.calculate_ratings(
|
|
146
|
+
[[alice], [bob]],
|
|
147
|
+
scores: [100, 20] # Alice dominated
|
|
148
|
+
)
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
### Predictions
|
|
152
|
+
|
|
153
|
+
#### Win Probability
|
|
154
|
+
|
|
155
|
+
```ruby
|
|
156
|
+
teams = [[alice, bob], [charlie, dave], [eve]]
|
|
157
|
+
probabilities = model.predict_win_probability(teams)
|
|
158
|
+
# => [0.35, 0.45, 0.20] (sums to 1.0)
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
#### Draw Probability
|
|
162
|
+
|
|
163
|
+
Higher values mean more evenly matched:
|
|
164
|
+
|
|
165
|
+
```ruby
|
|
166
|
+
probability = model.predict_draw_probability([[alice], [bob]])
|
|
167
|
+
# => 0.25
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
#### Rank Prediction
|
|
171
|
+
|
|
172
|
+
```ruby
|
|
173
|
+
teams = [[alice], [bob], [charlie]]
|
|
174
|
+
predictions = model.predict_rank_probability(teams)
|
|
175
|
+
# => [[1, 0.504], [2, 0.333], [3, 0.163]]
|
|
176
|
+
# Format: [predicted_rank, probability]
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
### Rating Display
|
|
180
|
+
|
|
181
|
+
The `ordinal` method provides a conservative rating estimate:
|
|
182
|
+
|
|
183
|
+
```ruby
|
|
184
|
+
player = model.create_rating(mu: 30.0, sigma: 5.0)
|
|
185
|
+
|
|
186
|
+
# 99.7% confidence (3 standard deviations)
|
|
187
|
+
puts player.ordinal # => 15.0 (30 - 3*5)
|
|
188
|
+
|
|
189
|
+
# 99% confidence
|
|
190
|
+
puts player.ordinal(z: 2.576) # => 17.12
|
|
191
|
+
|
|
192
|
+
# For leaderboards
|
|
193
|
+
players.sort_by(&:ordinal).reverse
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Model Options
|
|
197
|
+
|
|
198
|
+
```ruby
|
|
199
|
+
model = OpenSkill::Models::PlackettLuce.new(
|
|
200
|
+
mu: 25.0, # Initial mean skill
|
|
201
|
+
sigma: 25.0 / 3, # Initial skill uncertainty
|
|
202
|
+
beta: 25.0 / 6, # Performance variance
|
|
203
|
+
kappa: 0.0001, # Minimum variance (regularization)
|
|
204
|
+
tau: 25.0 / 300, # Skill decay per match
|
|
205
|
+
margin: 0.0, # Score margin threshold
|
|
206
|
+
limit_sigma: false, # Prevent sigma from increasing
|
|
207
|
+
balance: false # Emphasize rating outliers in teams
|
|
208
|
+
)
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### Advanced Features
|
|
212
|
+
|
|
213
|
+
#### Prevent Rating Uncertainty from Growing
|
|
214
|
+
|
|
215
|
+
```ruby
|
|
216
|
+
# Useful for active players
|
|
217
|
+
updated = model.calculate_ratings(teams, limit_sigma: true)
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
#### Balance Outliers in Teams
|
|
221
|
+
|
|
222
|
+
```ruby
|
|
223
|
+
model = OpenSkill::Models::PlackettLuce.new(balance: true)
|
|
224
|
+
# Gives more weight to rating differences within teams
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
#### Custom Tau (Skill Decay)
|
|
228
|
+
|
|
229
|
+
```ruby
|
|
230
|
+
# Higher tau = more rating volatility
|
|
231
|
+
updated = model.calculate_ratings(teams, tau: 1.0)
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
## How It Works
|
|
235
|
+
|
|
236
|
+
OpenSkill uses a Bayesian approach to model player skill as a normal distribution:
|
|
237
|
+
|
|
238
|
+
- **ฮผ (mu)**: The mean skill level
|
|
239
|
+
- **ฯ (sigma)**: The uncertainty about the skill level
|
|
240
|
+
|
|
241
|
+
After each match:
|
|
242
|
+
1. Compute team strengths from individual player ratings
|
|
243
|
+
2. Calculate expected outcomes based on team strengths
|
|
244
|
+
3. Update ratings based on actual vs expected performance
|
|
245
|
+
4. Reduce uncertainty (sigma) as more matches are played
|
|
246
|
+
|
|
247
|
+
The **ordinal** value (`ฮผ - 3ฯ`) provides a conservative estimate where the true skill is 99.7% likely to be higher.
|
|
248
|
+
|
|
249
|
+
## Why OpenSkill?
|
|
250
|
+
|
|
251
|
+
### vs Elo
|
|
252
|
+
- โ
Handles multiplayer (3+ players/teams)
|
|
253
|
+
- โ
Works with team games
|
|
254
|
+
- โ
Accounts for rating uncertainty
|
|
255
|
+
- โ
Faster convergence to true skill
|
|
256
|
+
|
|
257
|
+
### vs TrueSkill
|
|
258
|
+
- โ
Open source (MIT license)
|
|
259
|
+
- โ
Faster computation
|
|
260
|
+
- โ
Similar accuracy
|
|
261
|
+
- โ
More flexible (weights, margins, custom parameters)
|
|
262
|
+
|
|
263
|
+
## API Design Philosophy
|
|
264
|
+
|
|
265
|
+
This Ruby implementation uses idiomatic Ruby naming conventions:
|
|
266
|
+
|
|
267
|
+
| Python API | Ruby API |
|
|
268
|
+
|------------|----------|
|
|
269
|
+
| `model.rating()` | `model.create_rating` |
|
|
270
|
+
| `model.create_rating([25, 8.3])` | `model.load_rating([25, 8.3])` |
|
|
271
|
+
| `model.rate(teams)` | `model.calculate_ratings(teams)` |
|
|
272
|
+
| `model.predict_win(teams)` | `model.predict_win_probability(teams)` |
|
|
273
|
+
| `model.predict_draw(teams)` | `model.predict_draw_probability(teams)` |
|
|
274
|
+
| `model.predict_rank(teams)` | `model.predict_rank_probability(teams)` |
|
|
275
|
+
|
|
276
|
+
## Examples
|
|
277
|
+
|
|
278
|
+
### 2v2 Team Game
|
|
279
|
+
|
|
280
|
+
```ruby
|
|
281
|
+
model = OpenSkill::Models::PlackettLuce.new
|
|
282
|
+
|
|
283
|
+
# Create players
|
|
284
|
+
alice = model.create_rating(name: "Alice")
|
|
285
|
+
bob = model.create_rating(name: "Bob")
|
|
286
|
+
charlie = model.create_rating(name: "Charlie")
|
|
287
|
+
dave = model.create_rating(name: "Dave")
|
|
288
|
+
|
|
289
|
+
# Match: Alice + Bob vs Charlie + Dave (Team 1 wins)
|
|
290
|
+
teams = [[alice, bob], [charlie, dave]]
|
|
291
|
+
updated = model.calculate_ratings(teams)
|
|
292
|
+
|
|
293
|
+
# Updated ratings
|
|
294
|
+
updated[0].each { |p| puts "#{p.name}: #{p.ordinal.round(1)}" }
|
|
295
|
+
updated[1].each { |p| puts "#{p.name}: #{p.ordinal.round(1)}" }
|
|
296
|
+
```
|
|
297
|
+
|
|
298
|
+
### Free-for-All (5 players)
|
|
299
|
+
|
|
300
|
+
```ruby
|
|
301
|
+
players = 5.times.map { model.create_rating }
|
|
302
|
+
|
|
303
|
+
# Player 3 wins, 1 second, 4 third, 0 fourth, 2 fifth
|
|
304
|
+
updated = model.calculate_ratings(
|
|
305
|
+
players.map { |p| [p] },
|
|
306
|
+
ranks: [3, 1, 4, 0, 2]
|
|
307
|
+
)
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
### Tracking Player Progress
|
|
311
|
+
|
|
312
|
+
```ruby
|
|
313
|
+
class Player
|
|
314
|
+
attr_accessor :name, :mu, :sigma
|
|
315
|
+
|
|
316
|
+
def initialize(name, model)
|
|
317
|
+
@name = name
|
|
318
|
+
rating = model.create_rating
|
|
319
|
+
@mu = rating.mu
|
|
320
|
+
@sigma = rating.sigma
|
|
321
|
+
end
|
|
322
|
+
|
|
323
|
+
def to_rating(model)
|
|
324
|
+
model.load_rating([@mu, @sigma], name: @name)
|
|
325
|
+
end
|
|
326
|
+
|
|
327
|
+
def update_from_rating!(rating)
|
|
328
|
+
@mu = rating.mu
|
|
329
|
+
@sigma = rating.sigma
|
|
330
|
+
end
|
|
331
|
+
|
|
332
|
+
def ordinal(z: 3.0)
|
|
333
|
+
@mu - z * @sigma
|
|
334
|
+
end
|
|
335
|
+
end
|
|
336
|
+
|
|
337
|
+
# Usage
|
|
338
|
+
model = OpenSkill::Models::PlackettLuce.new
|
|
339
|
+
alice = Player.new("Alice", model)
|
|
340
|
+
bob = Player.new("Bob", model)
|
|
341
|
+
|
|
342
|
+
# Play match
|
|
343
|
+
teams = [[alice.to_rating(model)], [bob.to_rating(model)]]
|
|
344
|
+
updated = model.calculate_ratings(teams)
|
|
345
|
+
|
|
346
|
+
# Update players
|
|
347
|
+
alice.update_from_rating!(updated[0][0])
|
|
348
|
+
bob.update_from_rating!(updated[1][0])
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
## Testing
|
|
352
|
+
|
|
353
|
+
```bash
|
|
354
|
+
bundle install
|
|
355
|
+
bundle exec rake test
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
## Development
|
|
359
|
+
|
|
360
|
+
This gem follows the [OpenSkill specification](https://openskill.me) and maintains compatibility with the Python reference implementation.
|
|
361
|
+
|
|
362
|
+
## Contributing
|
|
363
|
+
|
|
364
|
+
1. Fork it
|
|
365
|
+
2. Create your feature branch (`git checkout -b my-new-feature`)
|
|
366
|
+
3. Commit your changes (`git commit -am 'Add some feature'`)
|
|
367
|
+
4. Push to the branch (`git push origin my-new-feature`)
|
|
368
|
+
5. Create new Pull Request
|
|
369
|
+
|
|
370
|
+
## License
|
|
371
|
+
|
|
372
|
+
MIT License. See [LICENSE](LICENSE) for details.
|
|
373
|
+
|
|
374
|
+
## References
|
|
375
|
+
|
|
376
|
+
- [OpenSkill Python Implementation](https://github.com/vivekjoshy/openskill.py)
|
|
377
|
+
- [OpenSkill Documentation](https://openskill.me)
|
|
378
|
+
- Original Paper: [A Bayesian Approximation Method for Online Ranking](https://jmlr.org/papers/v12/weng11a.html) by Ruby C. Weng and Chih-Jen Lin
|
|
379
|
+
|
|
380
|
+
## Acknowledgments
|
|
381
|
+
|
|
382
|
+
This Ruby implementation is based on the excellent [openskill.py](https://github.com/vivekjoshy/openskill.py) Python library by Vivek Joshy.
|
|
383
|
+
|
|
384
|
+
The Plackett-Luce model implemented here is based on the work by Weng and Lin (2011), providing a faster and more accessible alternative to Microsoft's TrueSkill system.
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OpenSkill
|
|
4
|
+
module Models
|
|
5
|
+
# Common utility functions shared across models
|
|
6
|
+
module Common
|
|
7
|
+
# Normalize a vector to a target range
|
|
8
|
+
#
|
|
9
|
+
# @param vector [Array<Numeric>] the input vector
|
|
10
|
+
# @param target_min [Numeric] the target minimum value
|
|
11
|
+
# @param target_max [Numeric] the target maximum value
|
|
12
|
+
# @return [Array<Float>] the normalized vector
|
|
13
|
+
def self.normalize(vector, target_min, target_max)
|
|
14
|
+
return [] if vector.empty?
|
|
15
|
+
|
|
16
|
+
source_min = vector.min
|
|
17
|
+
source_max = vector.max
|
|
18
|
+
source_range = source_max - source_min
|
|
19
|
+
|
|
20
|
+
# If all values are the same, return target_min for all
|
|
21
|
+
return Array.new(vector.size, target_min.to_f) if source_range.zero?
|
|
22
|
+
|
|
23
|
+
target_range = target_max - target_min
|
|
24
|
+
|
|
25
|
+
vector.map do |value|
|
|
26
|
+
((value - source_min) / source_range) * target_range + target_min
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# Transpose a 2D matrix
|
|
31
|
+
#
|
|
32
|
+
# @param matrix [Array<Array>] the input matrix
|
|
33
|
+
# @return [Array<Array>] the transposed matrix
|
|
34
|
+
def self.matrix_transpose(matrix)
|
|
35
|
+
return [] if matrix.empty? || matrix[0].empty?
|
|
36
|
+
|
|
37
|
+
matrix[0].zip(*matrix[1..])
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
# Sort objects by tenet and return both sorted objects and indices to restore order
|
|
41
|
+
#
|
|
42
|
+
# @param tenet [Array<Numeric>] values to sort by
|
|
43
|
+
# @param objects [Array] objects to sort
|
|
44
|
+
# @return [Array<(Array, Array<Numeric>)>] sorted objects and restoration indices
|
|
45
|
+
def self.unwind(tenet, objects)
|
|
46
|
+
return [[], []] if objects.empty?
|
|
47
|
+
|
|
48
|
+
# Create array of [tenet_value, [object, original_index]]
|
|
49
|
+
indexed = tenet.each_with_index.map { |t, i| [t, [objects[i], i]] }
|
|
50
|
+
|
|
51
|
+
# Sort by tenet value
|
|
52
|
+
sorted = indexed.sort_by { |t, _| t }
|
|
53
|
+
|
|
54
|
+
# Extract sorted objects and their indices
|
|
55
|
+
sorted_objects = sorted.map { |_, (obj, _)| obj }
|
|
56
|
+
restoration_indices = sorted.map { |_, (_, idx)| idx }
|
|
57
|
+
|
|
58
|
+
[sorted_objects, restoration_indices]
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
@@ -0,0 +1,694 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'securerandom'
|
|
4
|
+
require_relative '../statistics/normal'
|
|
5
|
+
require_relative 'common'
|
|
6
|
+
|
|
7
|
+
module OpenSkill
|
|
8
|
+
module Models
|
|
9
|
+
# Plackett-Luce rating model
|
|
10
|
+
#
|
|
11
|
+
# This is a Bayesian rating system for multiplayer games that can handle
|
|
12
|
+
# teams of varying sizes and asymmetric matches.
|
|
13
|
+
class PlackettLuce
|
|
14
|
+
attr_reader :mu, :sigma, :beta, :kappa, :tau, :margin, :limit_sigma, :balance, :gamma
|
|
15
|
+
|
|
16
|
+
# Default gamma function for PlackettLuce
|
|
17
|
+
DEFAULT_GAMMA = lambda do |_c, _k, _mu, sigma_squared, _team, _rank, _weights|
|
|
18
|
+
Math.sqrt(sigma_squared) / _c
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
# @param mu [Float] initial mean skill rating
|
|
22
|
+
# @param sigma [Float] initial standard deviation
|
|
23
|
+
# @param beta [Float] performance uncertainty
|
|
24
|
+
# @param kappa [Float] minimum variance (regularization)
|
|
25
|
+
# @param gamma [Proc] custom gamma function
|
|
26
|
+
# @param tau [Float] dynamics factor (skill decay)
|
|
27
|
+
# @param margin [Float] score margin for impressive wins
|
|
28
|
+
# @param limit_sigma [Boolean] prevent sigma from increasing
|
|
29
|
+
# @param balance [Boolean] emphasize rating outliers
|
|
30
|
+
def initialize(
|
|
31
|
+
mu: 25.0,
|
|
32
|
+
sigma: 25.0 / 3.0,
|
|
33
|
+
beta: 25.0 / 6.0,
|
|
34
|
+
kappa: 0.0001,
|
|
35
|
+
gamma: DEFAULT_GAMMA,
|
|
36
|
+
tau: 25.0 / 300.0,
|
|
37
|
+
margin: 0.0,
|
|
38
|
+
limit_sigma: false,
|
|
39
|
+
balance: false
|
|
40
|
+
)
|
|
41
|
+
@mu = mu.to_f
|
|
42
|
+
@sigma = sigma.to_f
|
|
43
|
+
@beta = beta.to_f
|
|
44
|
+
@kappa = kappa.to_f
|
|
45
|
+
@gamma = gamma
|
|
46
|
+
@tau = tau.to_f
|
|
47
|
+
@margin = margin.to_f
|
|
48
|
+
@limit_sigma = limit_sigma
|
|
49
|
+
@balance = balance
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Create a new rating with default or custom parameters
|
|
53
|
+
#
|
|
54
|
+
# @param mu [Float, nil] override default mu
|
|
55
|
+
# @param sigma [Float, nil] override default sigma
|
|
56
|
+
# @param name [String, nil] optional player name
|
|
57
|
+
# @return [Rating] a new rating object
|
|
58
|
+
def create_rating(mu: nil, sigma: nil, name: nil)
|
|
59
|
+
Rating.new(
|
|
60
|
+
mu: mu || @mu,
|
|
61
|
+
sigma: sigma || @sigma,
|
|
62
|
+
name: name
|
|
63
|
+
)
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
# Load a rating from an array [mu, sigma]
|
|
67
|
+
#
|
|
68
|
+
# @param rating_array [Array<Numeric>] [mu, sigma]
|
|
69
|
+
# @param name [String, nil] optional player name
|
|
70
|
+
# @return [Rating] a new rating object
|
|
71
|
+
# @raise [ArgumentError] if rating_array is invalid
|
|
72
|
+
def load_rating(rating_array, name: nil)
|
|
73
|
+
raise ArgumentError, "Rating must be an Array, got #{rating_array.class}" unless rating_array.is_a?(Array)
|
|
74
|
+
raise ArgumentError, 'Rating array must have exactly 2 elements' unless rating_array.size == 2
|
|
75
|
+
raise ArgumentError, 'Rating values must be numeric' unless rating_array.all? { |v| v.is_a?(Numeric) }
|
|
76
|
+
|
|
77
|
+
Rating.new(mu: rating_array[0], sigma: rating_array[1], name: name)
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
# Calculate new ratings after a match
|
|
81
|
+
#
|
|
82
|
+
# @param teams [Array<Array<Rating>>] list of teams
|
|
83
|
+
# @param ranks [Array<Numeric>, nil] team ranks (lower is better, 0-indexed)
|
|
84
|
+
# @param scores [Array<Numeric>, nil] team scores (higher is better)
|
|
85
|
+
# @param weights [Array<Array<Numeric>>, nil] player contribution weights
|
|
86
|
+
# @param tau [Float, nil] override tau for this match
|
|
87
|
+
# @param limit_sigma [Boolean, nil] override limit_sigma for this match
|
|
88
|
+
# @return [Array<Array<Rating>>] updated teams
|
|
89
|
+
def calculate_ratings(teams, ranks: nil, scores: nil, weights: nil, tau: nil, limit_sigma: nil)
|
|
90
|
+
validate_teams!(teams)
|
|
91
|
+
validate_ranks!(teams, ranks) if ranks
|
|
92
|
+
validate_scores!(teams, scores) if scores
|
|
93
|
+
validate_weights!(teams, weights) if weights
|
|
94
|
+
|
|
95
|
+
# Can't have both ranks and scores
|
|
96
|
+
raise ArgumentError, 'Cannot provide both ranks and scores' if ranks && scores
|
|
97
|
+
|
|
98
|
+
# Deep copy teams to avoid mutating input
|
|
99
|
+
original_teams = teams
|
|
100
|
+
teams = deep_copy_teams(teams)
|
|
101
|
+
|
|
102
|
+
# Apply tau (skill decay over time)
|
|
103
|
+
tau_value = tau || @tau
|
|
104
|
+
tau_squared = tau_value**2
|
|
105
|
+
teams.each do |team|
|
|
106
|
+
team.each do |player|
|
|
107
|
+
player.sigma = Math.sqrt(player.sigma**2 + tau_squared)
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
# Convert scores to ranks if provided
|
|
112
|
+
if !ranks && scores
|
|
113
|
+
ranks = scores.map { |s| -s }
|
|
114
|
+
ranks = calculate_rankings(teams, ranks)
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
# Normalize weights to [1, 2] range
|
|
118
|
+
weights = weights.map { |w| Common.normalize(w, 1, 2) } if weights
|
|
119
|
+
|
|
120
|
+
# Sort teams by rank and track original order
|
|
121
|
+
tenet = nil
|
|
122
|
+
if ranks
|
|
123
|
+
sorted_objects, restoration_indices = Common.unwind(ranks, teams)
|
|
124
|
+
teams = sorted_objects
|
|
125
|
+
tenet = restoration_indices
|
|
126
|
+
|
|
127
|
+
weights, = Common.unwind(ranks, weights) if weights
|
|
128
|
+
|
|
129
|
+
ranks = ranks.sort
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
# Compute new ratings
|
|
133
|
+
result = compute_ratings(teams, ranks: ranks, scores: scores, weights: weights)
|
|
134
|
+
|
|
135
|
+
# Restore original order
|
|
136
|
+
result, = Common.unwind(tenet, result) if ranks && tenet
|
|
137
|
+
|
|
138
|
+
# Apply sigma limiting if requested
|
|
139
|
+
limit_sigma_value = limit_sigma.nil? ? @limit_sigma : limit_sigma
|
|
140
|
+
if limit_sigma_value
|
|
141
|
+
result = result.each_with_index.map do |team, team_idx|
|
|
142
|
+
team.each_with_index.map do |player, player_idx|
|
|
143
|
+
player.sigma = [player.sigma, original_teams[team_idx][player_idx].sigma].min
|
|
144
|
+
player
|
|
145
|
+
end
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
result
|
|
150
|
+
end
|
|
151
|
+
|
|
152
|
+
# Predict win probability for each team
|
|
153
|
+
#
|
|
154
|
+
# @param teams [Array<Array<Rating>>] list of teams
|
|
155
|
+
# @return [Array<Float>] probability each team wins
|
|
156
|
+
def predict_win_probability(teams)
|
|
157
|
+
validate_teams!(teams)
|
|
158
|
+
|
|
159
|
+
n = teams.size
|
|
160
|
+
|
|
161
|
+
# Special case for 2 teams
|
|
162
|
+
if n == 2
|
|
163
|
+
team_ratings = calculate_team_ratings(teams)
|
|
164
|
+
a = team_ratings[0]
|
|
165
|
+
b = team_ratings[1]
|
|
166
|
+
|
|
167
|
+
result = phi_major(
|
|
168
|
+
(a.mu - b.mu) / Math.sqrt(2 * @beta**2 + a.sigma_squared + b.sigma_squared)
|
|
169
|
+
)
|
|
170
|
+
return [result, 1 - result]
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
# For n teams, compute pairwise probabilities
|
|
174
|
+
team_ratings = teams.map { |team| calculate_team_ratings([team])[0] }
|
|
175
|
+
|
|
176
|
+
win_probs = []
|
|
177
|
+
team_ratings.each_with_index do |team_i, i|
|
|
178
|
+
prob_sum = 0.0
|
|
179
|
+
team_ratings.each_with_index do |team_j, j|
|
|
180
|
+
next if i == j
|
|
181
|
+
|
|
182
|
+
prob_sum += phi_major(
|
|
183
|
+
(team_i.mu - team_j.mu) / Math.sqrt(2 * @beta**2 + team_i.sigma_squared + team_j.sigma_squared)
|
|
184
|
+
)
|
|
185
|
+
end
|
|
186
|
+
win_probs << prob_sum / (n - 1)
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
# Normalize to sum to 1
|
|
190
|
+
total = win_probs.sum
|
|
191
|
+
win_probs.map { |p| p / total }
|
|
192
|
+
end
|
|
193
|
+
|
|
194
|
+
# Predict draw probability
|
|
195
|
+
#
|
|
196
|
+
# @param teams [Array<Array<Rating>>] list of teams
|
|
197
|
+
# @return [Float] probability of a draw
|
|
198
|
+
def predict_draw_probability(teams)
|
|
199
|
+
validate_teams!(teams)
|
|
200
|
+
|
|
201
|
+
total_player_count = teams.sum(&:size)
|
|
202
|
+
draw_probability = 1.0 / total_player_count
|
|
203
|
+
draw_margin = Math.sqrt(total_player_count) * @beta * phi_major_inverse((1 + draw_probability) / 2)
|
|
204
|
+
|
|
205
|
+
pairwise_probs = []
|
|
206
|
+
teams.combination(2).each do |team_a, team_b|
|
|
207
|
+
team_a_ratings = calculate_team_ratings([team_a])
|
|
208
|
+
team_b_ratings = calculate_team_ratings([team_b])
|
|
209
|
+
|
|
210
|
+
mu_a = team_a_ratings[0].mu
|
|
211
|
+
sigma_a = team_a_ratings[0].sigma_squared
|
|
212
|
+
mu_b = team_b_ratings[0].mu
|
|
213
|
+
sigma_b = team_b_ratings[0].sigma_squared
|
|
214
|
+
|
|
215
|
+
denominator = Math.sqrt(2 * @beta**2 + sigma_a + sigma_b)
|
|
216
|
+
|
|
217
|
+
pairwise_probs << (
|
|
218
|
+
phi_major((draw_margin - mu_a + mu_b) / denominator) -
|
|
219
|
+
phi_major((mu_b - mu_a - draw_margin) / denominator)
|
|
220
|
+
)
|
|
221
|
+
end
|
|
222
|
+
|
|
223
|
+
pairwise_probs.sum / pairwise_probs.size
|
|
224
|
+
end
|
|
225
|
+
|
|
226
|
+
# Predict rank probability for each team
|
|
227
|
+
#
|
|
228
|
+
# @param teams [Array<Array<Rating>>] list of teams
|
|
229
|
+
# @return [Array<Array(Integer, Float)>] rank and probability for each team
|
|
230
|
+
def predict_rank_probability(teams)
|
|
231
|
+
validate_teams!(teams)
|
|
232
|
+
|
|
233
|
+
n = teams.size
|
|
234
|
+
team_ratings = calculate_team_ratings(teams)
|
|
235
|
+
|
|
236
|
+
# Calculate win probability for each team against all others
|
|
237
|
+
win_probs = team_ratings.map do |team_i|
|
|
238
|
+
prob = 0.0
|
|
239
|
+
team_ratings.each do |team_j|
|
|
240
|
+
next if team_i == team_j
|
|
241
|
+
|
|
242
|
+
prob += phi_major(
|
|
243
|
+
(team_i.mu - team_j.mu) /
|
|
244
|
+
Math.sqrt(2 * @beta**2 + team_i.sigma_squared + team_j.sigma_squared)
|
|
245
|
+
)
|
|
246
|
+
end
|
|
247
|
+
prob / (n - 1)
|
|
248
|
+
end
|
|
249
|
+
|
|
250
|
+
# Normalize probabilities
|
|
251
|
+
total = win_probs.sum
|
|
252
|
+
normalized_probs = win_probs.map { |p| p / total }
|
|
253
|
+
|
|
254
|
+
# Sort by probability (descending) and assign ranks
|
|
255
|
+
sorted_indices = normalized_probs.each_with_index.sort_by { |prob, _| -prob }
|
|
256
|
+
ranks = Array.new(n)
|
|
257
|
+
|
|
258
|
+
current_rank = 1
|
|
259
|
+
sorted_indices.each_with_index do |(prob, team_idx), i|
|
|
260
|
+
current_rank = i + 1 if i > 0 && prob < sorted_indices[i - 1][0]
|
|
261
|
+
ranks[team_idx] = current_rank
|
|
262
|
+
end
|
|
263
|
+
|
|
264
|
+
ranks.zip(normalized_probs)
|
|
265
|
+
end
|
|
266
|
+
|
|
267
|
+
private
|
|
268
|
+
|
|
269
|
+
# Helper for log(1 + x)
|
|
270
|
+
def log1p(value)
|
|
271
|
+
Math.log(1 + value)
|
|
272
|
+
end
|
|
273
|
+
|
|
274
|
+
# Rating class for individual players
|
|
275
|
+
class Rating
|
|
276
|
+
attr_accessor :mu, :sigma, :name
|
|
277
|
+
attr_reader :id
|
|
278
|
+
|
|
279
|
+
def initialize(mu:, sigma:, name: nil)
|
|
280
|
+
@id = SecureRandom.hex
|
|
281
|
+
@mu = mu.to_f
|
|
282
|
+
@sigma = sigma.to_f
|
|
283
|
+
@name = name
|
|
284
|
+
end
|
|
285
|
+
|
|
286
|
+
# Calculate display rating (conservative estimate)
|
|
287
|
+
#
|
|
288
|
+
# @param z [Float] number of standard deviations
|
|
289
|
+
# @param alpha [Float] scaling factor
|
|
290
|
+
# @param target [Float] target adjustment
|
|
291
|
+
# @return [Float] the ordinal rating
|
|
292
|
+
def ordinal(z: 3.0, alpha: 1.0, target: 0.0)
|
|
293
|
+
alpha * ((@mu - z * @sigma) + (target / alpha))
|
|
294
|
+
end
|
|
295
|
+
|
|
296
|
+
def <=>(other)
|
|
297
|
+
return nil unless other.is_a?(Rating)
|
|
298
|
+
|
|
299
|
+
ordinal <=> other.ordinal
|
|
300
|
+
end
|
|
301
|
+
|
|
302
|
+
def <(other)
|
|
303
|
+
raise ArgumentError, 'comparison with non-Rating' unless other.is_a?(Rating)
|
|
304
|
+
|
|
305
|
+
ordinal < other.ordinal
|
|
306
|
+
end
|
|
307
|
+
|
|
308
|
+
def >(other)
|
|
309
|
+
raise ArgumentError, 'comparison with non-Rating' unless other.is_a?(Rating)
|
|
310
|
+
|
|
311
|
+
ordinal > other.ordinal
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
def <=(other)
|
|
315
|
+
raise ArgumentError, 'comparison with non-Rating' unless other.is_a?(Rating)
|
|
316
|
+
|
|
317
|
+
ordinal <= other.ordinal
|
|
318
|
+
end
|
|
319
|
+
|
|
320
|
+
def >=(other)
|
|
321
|
+
raise ArgumentError, 'comparison with non-Rating' unless other.is_a?(Rating)
|
|
322
|
+
|
|
323
|
+
ordinal >= other.ordinal
|
|
324
|
+
end
|
|
325
|
+
|
|
326
|
+
def ==(other)
|
|
327
|
+
return false unless other.is_a?(Rating)
|
|
328
|
+
|
|
329
|
+
@mu == other.mu && @sigma == other.sigma
|
|
330
|
+
end
|
|
331
|
+
|
|
332
|
+
def hash
|
|
333
|
+
[@id, @mu, @sigma].hash
|
|
334
|
+
end
|
|
335
|
+
|
|
336
|
+
def eql?(other)
|
|
337
|
+
self == other
|
|
338
|
+
end
|
|
339
|
+
|
|
340
|
+
def to_s
|
|
341
|
+
"Rating(mu=#{@mu}, sigma=#{@sigma}#{", name=#{@name}" if @name})"
|
|
342
|
+
end
|
|
343
|
+
|
|
344
|
+
def inspect
|
|
345
|
+
to_s
|
|
346
|
+
end
|
|
347
|
+
end
|
|
348
|
+
|
|
349
|
+
# Internal class for team ratings
|
|
350
|
+
class TeamRating
|
|
351
|
+
attr_reader :mu, :sigma_squared, :team, :rank
|
|
352
|
+
|
|
353
|
+
def initialize(mu:, sigma_squared:, team:, rank:)
|
|
354
|
+
@mu = mu.to_f
|
|
355
|
+
@sigma_squared = sigma_squared.to_f
|
|
356
|
+
@team = team
|
|
357
|
+
@rank = rank.to_i
|
|
358
|
+
end
|
|
359
|
+
|
|
360
|
+
def ==(other)
|
|
361
|
+
return false unless other.is_a?(TeamRating)
|
|
362
|
+
|
|
363
|
+
@mu == other.mu &&
|
|
364
|
+
@sigma_squared == other.sigma_squared &&
|
|
365
|
+
@team == other.team &&
|
|
366
|
+
@rank == other.rank
|
|
367
|
+
end
|
|
368
|
+
|
|
369
|
+
def hash
|
|
370
|
+
[@mu, @sigma_squared, @team, @rank].hash
|
|
371
|
+
end
|
|
372
|
+
|
|
373
|
+
def to_s
|
|
374
|
+
"TeamRating(mu=#{@mu}, sigma_squared=#{@sigma_squared}, rank=#{@rank})"
|
|
375
|
+
end
|
|
376
|
+
end
|
|
377
|
+
|
|
378
|
+
# Validate teams structure
|
|
379
|
+
def validate_teams!(teams)
|
|
380
|
+
raise ArgumentError, 'Teams must be an Array' unless teams.is_a?(Array)
|
|
381
|
+
raise ArgumentError, 'Must have at least 2 teams' if teams.size < 2
|
|
382
|
+
|
|
383
|
+
teams.each_with_index do |team, idx|
|
|
384
|
+
raise ArgumentError, "Team #{idx} must be an Array" unless team.is_a?(Array)
|
|
385
|
+
raise ArgumentError, "Team #{idx} must have at least 1 player" if team.empty?
|
|
386
|
+
|
|
387
|
+
team.each do |player|
|
|
388
|
+
raise ArgumentError, "All players must be Rating objects, got #{player.class}" unless player.is_a?(Rating)
|
|
389
|
+
end
|
|
390
|
+
end
|
|
391
|
+
end
|
|
392
|
+
|
|
393
|
+
# Validate ranks
|
|
394
|
+
def validate_ranks!(teams, ranks)
|
|
395
|
+
raise ArgumentError, "Ranks must be an Array, got #{ranks.class}" unless ranks.is_a?(Array)
|
|
396
|
+
raise ArgumentError, 'Ranks must have same length as teams' if ranks.size != teams.size
|
|
397
|
+
|
|
398
|
+
ranks.each do |rank|
|
|
399
|
+
raise ArgumentError, "All ranks must be numeric, got #{rank.class}" unless rank.is_a?(Numeric)
|
|
400
|
+
end
|
|
401
|
+
end
|
|
402
|
+
|
|
403
|
+
# Validate scores
|
|
404
|
+
def validate_scores!(teams, scores)
|
|
405
|
+
raise ArgumentError, "Scores must be an Array, got #{scores.class}" unless scores.is_a?(Array)
|
|
406
|
+
raise ArgumentError, 'Scores must have same length as teams' if scores.size != teams.size
|
|
407
|
+
|
|
408
|
+
scores.each do |score|
|
|
409
|
+
raise ArgumentError, "All scores must be numeric, got #{score.class}" unless score.is_a?(Numeric)
|
|
410
|
+
end
|
|
411
|
+
end
|
|
412
|
+
|
|
413
|
+
# Validate weights
|
|
414
|
+
def validate_weights!(teams, weights)
|
|
415
|
+
raise ArgumentError, "Weights must be an Array, got #{weights.class}" unless weights.is_a?(Array)
|
|
416
|
+
raise ArgumentError, 'Weights must have same length as teams' if weights.size != teams.size
|
|
417
|
+
|
|
418
|
+
weights.each_with_index do |team_weights, idx|
|
|
419
|
+
raise ArgumentError, "Weights for team #{idx} must be an Array" unless team_weights.is_a?(Array)
|
|
420
|
+
unless team_weights.size == teams[idx].size
|
|
421
|
+
raise ArgumentError, "Weights for team #{idx} must match team size"
|
|
422
|
+
end
|
|
423
|
+
|
|
424
|
+
team_weights.each do |weight|
|
|
425
|
+
raise ArgumentError, "All weights must be numeric, got #{weight.class}" unless weight.is_a?(Numeric)
|
|
426
|
+
end
|
|
427
|
+
end
|
|
428
|
+
end
|
|
429
|
+
|
|
430
|
+
# Deep copy teams to avoid mutation
|
|
431
|
+
def deep_copy_teams(teams)
|
|
432
|
+
teams.map do |team|
|
|
433
|
+
team.map do |player|
|
|
434
|
+
Rating.new(mu: player.mu, sigma: player.sigma, name: player.name).tap do |new_player|
|
|
435
|
+
# Preserve the original player's ID
|
|
436
|
+
new_player.instance_variable_set(:@id, player.id)
|
|
437
|
+
end
|
|
438
|
+
end
|
|
439
|
+
end
|
|
440
|
+
end
|
|
441
|
+
|
|
442
|
+
# Calculate collective team sigma (c)
|
|
443
|
+
def collective_team_sigma(team_ratings)
|
|
444
|
+
beta_squared = @beta**2
|
|
445
|
+
sum = team_ratings.sum { |team| team.sigma_squared + beta_squared }
|
|
446
|
+
Math.sqrt(sum)
|
|
447
|
+
end
|
|
448
|
+
|
|
449
|
+
# Calculate sum_q values for PlackettLuce algorithm
|
|
450
|
+
def sum_q_values(team_ratings, c_value, scores: nil)
|
|
451
|
+
score_mapping = {}
|
|
452
|
+
if scores && scores.size == team_ratings.size
|
|
453
|
+
team_ratings.each_with_index do |_team, i|
|
|
454
|
+
score_mapping[i] = scores[i]
|
|
455
|
+
end
|
|
456
|
+
end
|
|
457
|
+
|
|
458
|
+
sum_q = {}
|
|
459
|
+
team_ratings.each_with_index do |team_i, i|
|
|
460
|
+
adjusted_mu = team_i.mu
|
|
461
|
+
|
|
462
|
+
# Apply margin factor if scores provided
|
|
463
|
+
if scores && score_mapping.key?(i)
|
|
464
|
+
margin_adjustment = 0.0
|
|
465
|
+
comparison_count = 0
|
|
466
|
+
|
|
467
|
+
team_ratings.each_with_index do |team_j, j|
|
|
468
|
+
next if i == j || !score_mapping.key?(j)
|
|
469
|
+
|
|
470
|
+
score_diff = (score_mapping[i] - score_mapping[j]).abs
|
|
471
|
+
next unless score_diff > 0
|
|
472
|
+
|
|
473
|
+
margin_factor = 1.0
|
|
474
|
+
margin_factor = log1p(score_diff / @margin) if score_diff > @margin && @margin > 0.0
|
|
475
|
+
|
|
476
|
+
if score_mapping[i] > score_mapping[j]
|
|
477
|
+
margin_adjustment += (team_i.mu - team_j.mu) * (margin_factor - 1.0)
|
|
478
|
+
else
|
|
479
|
+
margin_adjustment -= (team_j.mu - team_i.mu) * (margin_factor - 1.0)
|
|
480
|
+
end
|
|
481
|
+
comparison_count += 1
|
|
482
|
+
end
|
|
483
|
+
|
|
484
|
+
adjusted_mu += comparison_count > 0 ? (margin_adjustment / comparison_count) : margin_adjustment
|
|
485
|
+
end
|
|
486
|
+
|
|
487
|
+
summed = Math.exp(adjusted_mu / c_value)
|
|
488
|
+
|
|
489
|
+
team_ratings.each_with_index do |team_q, q|
|
|
490
|
+
next unless team_i.rank >= team_q.rank
|
|
491
|
+
|
|
492
|
+
sum_q[q] = (sum_q[q] || 0) + summed
|
|
493
|
+
end
|
|
494
|
+
end
|
|
495
|
+
|
|
496
|
+
sum_q.sort.map { |_, v| v }
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
# Count how many teams share each rank
|
|
500
|
+
def count_rank_occurrences(team_ratings)
|
|
501
|
+
team_ratings.map do |team_i|
|
|
502
|
+
team_ratings.count { |team_q| team_q.rank == team_i.rank }
|
|
503
|
+
end
|
|
504
|
+
end
|
|
505
|
+
|
|
506
|
+
# Calculate team ratings from individual player ratings
|
|
507
|
+
def calculate_team_ratings(game, ranks: nil, weights: nil)
|
|
508
|
+
ranks ||= calculate_rankings(game)
|
|
509
|
+
|
|
510
|
+
game.each_with_index.map do |team, idx|
|
|
511
|
+
sorted_team = team.sort_by { |p| -p.ordinal }
|
|
512
|
+
max_ordinal = sorted_team.first.ordinal
|
|
513
|
+
|
|
514
|
+
mu_sum = 0.0
|
|
515
|
+
sigma_squared_sum = 0.0
|
|
516
|
+
|
|
517
|
+
sorted_team.each do |player|
|
|
518
|
+
balance_weight = if @balance
|
|
519
|
+
ordinal_diff = max_ordinal - player.ordinal
|
|
520
|
+
1 + (ordinal_diff / (max_ordinal + @kappa))
|
|
521
|
+
else
|
|
522
|
+
1.0
|
|
523
|
+
end
|
|
524
|
+
|
|
525
|
+
mu_sum += player.mu * balance_weight
|
|
526
|
+
sigma_squared_sum += (player.sigma * balance_weight)**2
|
|
527
|
+
end
|
|
528
|
+
|
|
529
|
+
TeamRating.new(
|
|
530
|
+
mu: mu_sum,
|
|
531
|
+
sigma_squared: sigma_squared_sum,
|
|
532
|
+
team: team,
|
|
533
|
+
rank: ranks[idx].to_i
|
|
534
|
+
)
|
|
535
|
+
end
|
|
536
|
+
end
|
|
537
|
+
|
|
538
|
+
# Calculate rankings from scores or indices
|
|
539
|
+
def calculate_rankings(game, ranks = nil)
|
|
540
|
+
return [] if game.empty?
|
|
541
|
+
|
|
542
|
+
team_scores = if ranks
|
|
543
|
+
ranks.each_with_index.map { |rank, idx| rank || idx }
|
|
544
|
+
else
|
|
545
|
+
game.each_index.to_a
|
|
546
|
+
end
|
|
547
|
+
|
|
548
|
+
sorted_scores = team_scores.sort
|
|
549
|
+
rank_map = {}
|
|
550
|
+
sorted_scores.each_with_index do |value, index|
|
|
551
|
+
rank_map[value] ||= index
|
|
552
|
+
end
|
|
553
|
+
|
|
554
|
+
team_scores.map { |score| rank_map[score].to_f }
|
|
555
|
+
end
|
|
556
|
+
|
|
557
|
+
# Core rating computation algorithm
|
|
558
|
+
def compute_ratings(teams, ranks: nil, scores: nil, weights: nil)
|
|
559
|
+
team_ratings = calculate_team_ratings(teams, ranks: ranks)
|
|
560
|
+
c_value = collective_team_sigma(team_ratings)
|
|
561
|
+
sum_q = sum_q_values(team_ratings, c_value, scores: scores)
|
|
562
|
+
a_values = count_rank_occurrences(team_ratings)
|
|
563
|
+
|
|
564
|
+
# Build score mapping for margin calculations
|
|
565
|
+
score_mapping = {}
|
|
566
|
+
if scores && scores.size == team_ratings.size
|
|
567
|
+
team_ratings.each_with_index do |_, i|
|
|
568
|
+
score_mapping[i] = scores[i]
|
|
569
|
+
end
|
|
570
|
+
end
|
|
571
|
+
|
|
572
|
+
# Group teams by rank for tie handling
|
|
573
|
+
rank_groups = {}
|
|
574
|
+
team_ratings.each_with_index do |team, i|
|
|
575
|
+
rank_groups[team.rank] ||= []
|
|
576
|
+
rank_groups[team.rank] << i
|
|
577
|
+
end
|
|
578
|
+
|
|
579
|
+
result = team_ratings.each_with_index.map do |team_i, i|
|
|
580
|
+
omega = 0.0
|
|
581
|
+
delta = 0.0
|
|
582
|
+
|
|
583
|
+
# Calculate adjusted mu with margin
|
|
584
|
+
adjusted_mu_i = team_i.mu
|
|
585
|
+
if scores && score_mapping.key?(i)
|
|
586
|
+
margin_adjustment = 0.0
|
|
587
|
+
comparison_count = 0
|
|
588
|
+
|
|
589
|
+
team_ratings.each_with_index do |team_j, j|
|
|
590
|
+
next if i == j || !score_mapping.key?(j)
|
|
591
|
+
|
|
592
|
+
score_diff = (score_mapping[i] - score_mapping[j]).abs
|
|
593
|
+
next unless score_diff > 0
|
|
594
|
+
|
|
595
|
+
margin_factor = 1.0
|
|
596
|
+
margin_factor = log1p(score_diff / @margin) if score_diff > @margin && @margin > 0.0
|
|
597
|
+
|
|
598
|
+
if score_mapping[i] > score_mapping[j]
|
|
599
|
+
margin_adjustment += (team_i.mu - team_j.mu) * (margin_factor - 1.0)
|
|
600
|
+
else
|
|
601
|
+
margin_adjustment -= (team_j.mu - team_i.mu) * (margin_factor - 1.0)
|
|
602
|
+
end
|
|
603
|
+
comparison_count += 1
|
|
604
|
+
end
|
|
605
|
+
|
|
606
|
+
adjusted_mu_i += comparison_count > 0 ? (margin_adjustment / comparison_count) : margin_adjustment
|
|
607
|
+
end
|
|
608
|
+
|
|
609
|
+
i_mu_over_c = Math.exp(adjusted_mu_i / c_value)
|
|
610
|
+
|
|
611
|
+
# Calculate omega and delta
|
|
612
|
+
team_ratings.each_with_index do |team_q, q|
|
|
613
|
+
next unless team_q.rank <= team_i.rank
|
|
614
|
+
|
|
615
|
+
i_mu_over_ce_over_sum_q = i_mu_over_c / sum_q[q]
|
|
616
|
+
|
|
617
|
+
delta += i_mu_over_ce_over_sum_q * (1 - i_mu_over_ce_over_sum_q) / a_values[q]
|
|
618
|
+
|
|
619
|
+
if q == i
|
|
620
|
+
omega += (1 - i_mu_over_ce_over_sum_q) / a_values[q]
|
|
621
|
+
else
|
|
622
|
+
omega -= i_mu_over_ce_over_sum_q / a_values[q]
|
|
623
|
+
end
|
|
624
|
+
end
|
|
625
|
+
|
|
626
|
+
omega *= team_i.sigma_squared / c_value
|
|
627
|
+
delta *= team_i.sigma_squared / (c_value**2)
|
|
628
|
+
|
|
629
|
+
# Apply gamma
|
|
630
|
+
team_weights = weights ? weights[i] : nil
|
|
631
|
+
gamma_value = @gamma.call(
|
|
632
|
+
c_value,
|
|
633
|
+
team_ratings.size,
|
|
634
|
+
team_i.mu,
|
|
635
|
+
team_i.sigma_squared,
|
|
636
|
+
team_i.team,
|
|
637
|
+
team_i.rank,
|
|
638
|
+
team_weights
|
|
639
|
+
)
|
|
640
|
+
delta *= gamma_value
|
|
641
|
+
|
|
642
|
+
# Update each player in the team
|
|
643
|
+
team_i.team.each_with_index.map do |player, j|
|
|
644
|
+
weight = weights ? weights[i][j] : 1.0
|
|
645
|
+
|
|
646
|
+
new_mu = player.mu
|
|
647
|
+
new_sigma = player.sigma
|
|
648
|
+
|
|
649
|
+
if omega >= 0
|
|
650
|
+
new_mu += (new_sigma**2 / team_i.sigma_squared) * omega * weight
|
|
651
|
+
new_sigma *= Math.sqrt(
|
|
652
|
+
[1 - (new_sigma**2 / team_i.sigma_squared) * delta * weight, @kappa].max
|
|
653
|
+
)
|
|
654
|
+
else
|
|
655
|
+
new_mu += (new_sigma**2 / team_i.sigma_squared) * omega / weight
|
|
656
|
+
new_sigma *= Math.sqrt(
|
|
657
|
+
[1 - (new_sigma**2 / team_i.sigma_squared) * delta / weight, @kappa].max
|
|
658
|
+
)
|
|
659
|
+
end
|
|
660
|
+
|
|
661
|
+
player.mu = new_mu
|
|
662
|
+
player.sigma = new_sigma
|
|
663
|
+
player
|
|
664
|
+
end
|
|
665
|
+
end
|
|
666
|
+
|
|
667
|
+
# Handle ties - average mu changes for tied teams
|
|
668
|
+
rank_groups.each do |_rank, indices|
|
|
669
|
+
next unless indices.size > 1
|
|
670
|
+
|
|
671
|
+
avg_mu_change = indices.sum { |i| result[i][0].mu - teams[i][0].mu } / indices.size.to_f
|
|
672
|
+
|
|
673
|
+
indices.each do |i|
|
|
674
|
+
result[i].each_with_index do |player, j|
|
|
675
|
+
player.mu = teams[i][j].mu + avg_mu_change
|
|
676
|
+
end
|
|
677
|
+
end
|
|
678
|
+
end
|
|
679
|
+
|
|
680
|
+
result
|
|
681
|
+
end
|
|
682
|
+
|
|
683
|
+
# Normal distribution CDF
|
|
684
|
+
def phi_major(value)
|
|
685
|
+
Statistics::Normal.cdf(value)
|
|
686
|
+
end
|
|
687
|
+
|
|
688
|
+
# Normal distribution inverse CDF
|
|
689
|
+
def phi_major_inverse(value)
|
|
690
|
+
Statistics::Normal.inv_cdf(value)
|
|
691
|
+
end
|
|
692
|
+
end
|
|
693
|
+
end
|
|
694
|
+
end
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'distribution'
|
|
4
|
+
|
|
5
|
+
module OpenSkill
|
|
6
|
+
module Statistics
|
|
7
|
+
# Wrapper for normal distribution functions
|
|
8
|
+
class Normal
|
|
9
|
+
# Normal cumulative distribution function (CDF)
|
|
10
|
+
#
|
|
11
|
+
# @param x [Float] the value
|
|
12
|
+
# @return [Float] the cumulative probability
|
|
13
|
+
def self.cdf(x)
|
|
14
|
+
Distribution::Normal.cdf(x)
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
# Normal inverse cumulative distribution function (inverse CDF)
|
|
18
|
+
#
|
|
19
|
+
# @param x [Float] the probability (0 to 1)
|
|
20
|
+
# @return [Float] the value at that probability
|
|
21
|
+
def self.inv_cdf(x)
|
|
22
|
+
Distribution::Normal.p_value(x)
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
# Normal probability density function (PDF)
|
|
26
|
+
#
|
|
27
|
+
# @param x [Float] the value
|
|
28
|
+
# @return [Float] the probability density
|
|
29
|
+
def self.pdf(x)
|
|
30
|
+
Distribution::Normal.pdf(x)
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
end
|
data/lib/openskill.rb
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'openskill/version'
|
|
4
|
+
require_relative 'openskill/statistics/normal'
|
|
5
|
+
require_relative 'openskill/models/common'
|
|
6
|
+
require_relative 'openskill/models/plackett_luce'
|
|
7
|
+
|
|
8
|
+
module OpenSkill
|
|
9
|
+
class Error < StandardError; end
|
|
10
|
+
end
|
metadata
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
|
2
|
+
name: openskill
|
|
3
|
+
version: !ruby/object:Gem::Version
|
|
4
|
+
version: 0.1.0
|
|
5
|
+
platform: ruby
|
|
6
|
+
authors:
|
|
7
|
+
- Tamas Erdos
|
|
8
|
+
bindir: bin
|
|
9
|
+
cert_chain: []
|
|
10
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
|
11
|
+
dependencies:
|
|
12
|
+
- !ruby/object:Gem::Dependency
|
|
13
|
+
name: bigdecimal
|
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
|
15
|
+
requirements:
|
|
16
|
+
- - "~>"
|
|
17
|
+
- !ruby/object:Gem::Version
|
|
18
|
+
version: '3.1'
|
|
19
|
+
type: :runtime
|
|
20
|
+
prerelease: false
|
|
21
|
+
version_requirements: !ruby/object:Gem::Requirement
|
|
22
|
+
requirements:
|
|
23
|
+
- - "~>"
|
|
24
|
+
- !ruby/object:Gem::Version
|
|
25
|
+
version: '3.1'
|
|
26
|
+
- !ruby/object:Gem::Dependency
|
|
27
|
+
name: distribution
|
|
28
|
+
requirement: !ruby/object:Gem::Requirement
|
|
29
|
+
requirements:
|
|
30
|
+
- - "~>"
|
|
31
|
+
- !ruby/object:Gem::Version
|
|
32
|
+
version: '0.8'
|
|
33
|
+
type: :runtime
|
|
34
|
+
prerelease: false
|
|
35
|
+
version_requirements: !ruby/object:Gem::Requirement
|
|
36
|
+
requirements:
|
|
37
|
+
- - "~>"
|
|
38
|
+
- !ruby/object:Gem::Version
|
|
39
|
+
version: '0.8'
|
|
40
|
+
- !ruby/object:Gem::Dependency
|
|
41
|
+
name: prime
|
|
42
|
+
requirement: !ruby/object:Gem::Requirement
|
|
43
|
+
requirements:
|
|
44
|
+
- - "~>"
|
|
45
|
+
- !ruby/object:Gem::Version
|
|
46
|
+
version: '0.1'
|
|
47
|
+
type: :runtime
|
|
48
|
+
prerelease: false
|
|
49
|
+
version_requirements: !ruby/object:Gem::Requirement
|
|
50
|
+
requirements:
|
|
51
|
+
- - "~>"
|
|
52
|
+
- !ruby/object:Gem::Version
|
|
53
|
+
version: '0.1'
|
|
54
|
+
- !ruby/object:Gem::Dependency
|
|
55
|
+
name: minitest
|
|
56
|
+
requirement: !ruby/object:Gem::Requirement
|
|
57
|
+
requirements:
|
|
58
|
+
- - "~>"
|
|
59
|
+
- !ruby/object:Gem::Version
|
|
60
|
+
version: '5.0'
|
|
61
|
+
type: :development
|
|
62
|
+
prerelease: false
|
|
63
|
+
version_requirements: !ruby/object:Gem::Requirement
|
|
64
|
+
requirements:
|
|
65
|
+
- - "~>"
|
|
66
|
+
- !ruby/object:Gem::Version
|
|
67
|
+
version: '5.0'
|
|
68
|
+
- !ruby/object:Gem::Dependency
|
|
69
|
+
name: rake
|
|
70
|
+
requirement: !ruby/object:Gem::Requirement
|
|
71
|
+
requirements:
|
|
72
|
+
- - "~>"
|
|
73
|
+
- !ruby/object:Gem::Version
|
|
74
|
+
version: '13.0'
|
|
75
|
+
type: :development
|
|
76
|
+
prerelease: false
|
|
77
|
+
version_requirements: !ruby/object:Gem::Requirement
|
|
78
|
+
requirements:
|
|
79
|
+
- - "~>"
|
|
80
|
+
- !ruby/object:Gem::Version
|
|
81
|
+
version: '13.0'
|
|
82
|
+
description: A Ruby implementation of the OpenSkill rating system, providing Bayesian
|
|
83
|
+
skill ratings for multiplayer games
|
|
84
|
+
email:
|
|
85
|
+
- tamas at tamaserdos.com
|
|
86
|
+
executables: []
|
|
87
|
+
extensions: []
|
|
88
|
+
extra_rdoc_files: []
|
|
89
|
+
files:
|
|
90
|
+
- LICENSE
|
|
91
|
+
- README.md
|
|
92
|
+
- lib/openskill.rb
|
|
93
|
+
- lib/openskill/models/common.rb
|
|
94
|
+
- lib/openskill/models/plackett_luce.rb
|
|
95
|
+
- lib/openskill/statistics/normal.rb
|
|
96
|
+
- lib/openskill/version.rb
|
|
97
|
+
homepage: https://github.com/erdostom/openskill-ruby
|
|
98
|
+
licenses:
|
|
99
|
+
- MIT
|
|
100
|
+
metadata: {}
|
|
101
|
+
rdoc_options: []
|
|
102
|
+
require_paths:
|
|
103
|
+
- lib
|
|
104
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
|
105
|
+
requirements:
|
|
106
|
+
- - ">="
|
|
107
|
+
- !ruby/object:Gem::Version
|
|
108
|
+
version: 3.1.0
|
|
109
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
|
110
|
+
requirements:
|
|
111
|
+
- - ">="
|
|
112
|
+
- !ruby/object:Gem::Version
|
|
113
|
+
version: '0'
|
|
114
|
+
requirements: []
|
|
115
|
+
rubygems_version: 3.6.9
|
|
116
|
+
specification_version: 4
|
|
117
|
+
summary: Multiplayer rating system for Ruby
|
|
118
|
+
test_files: []
|