@shaxpir/duiduidui-models 1.9.22 → 1.9.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/models/SkillLevel.d.ts +73 -63
- package/dist/models/SkillLevel.js +53 -99
- package/dist/util/DifficultyRange.d.ts +60 -0
- package/dist/util/DifficultyRange.js +95 -0
- package/dist/util/index.d.ts +1 -0
- package/dist/util/index.js +1 -0
- package/package.json +1 -1
|
@@ -15,31 +15,71 @@ export interface SkillLevel {
|
|
|
15
15
|
sigma: number;
|
|
16
16
|
}
|
|
17
17
|
/**
|
|
18
|
-
*
|
|
19
|
-
*
|
|
20
|
-
* @deprecated Use SkillLevel with mu/sigma instead
|
|
18
|
+
* Parameters for skill level updates.
|
|
19
|
+
* All parameters are optional and have sensible defaults.
|
|
21
20
|
*/
|
|
22
|
-
export interface
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
21
|
+
export interface SkillUpdateParams {
|
|
22
|
+
/**
|
|
23
|
+
* Coefficient controlling how quickly skill moves toward evidence.
|
|
24
|
+
*
|
|
25
|
+
* When informative evidence is received, skill moves this fraction
|
|
26
|
+
* of the distance from current mu toward the card's difficulty.
|
|
27
|
+
*
|
|
28
|
+
* - 0.5 means move halfway toward the card difficulty each time
|
|
29
|
+
* - Lower values = more conservative, requires more evidence
|
|
30
|
+
* - Higher values = more aggressive, trusts individual observations more
|
|
31
|
+
*
|
|
32
|
+
* With coefficient 0.5, approaching a difficulty level:
|
|
33
|
+
* - After 1 review: 50% of the way
|
|
34
|
+
* - After 2 reviews: 75% of the way
|
|
35
|
+
* - After 3 reviews: 87.5% of the way
|
|
36
|
+
* - After 5 reviews: 96.9% of the way
|
|
37
|
+
*
|
|
38
|
+
* @default 0.5
|
|
39
|
+
*/
|
|
40
|
+
updateCoefficient?: number;
|
|
41
|
+
/**
|
|
42
|
+
* Coefficient for sigma updates when evidence is informative.
|
|
43
|
+
* Sigma is multiplied by this value (so 0.9 = 10% decay).
|
|
44
|
+
*
|
|
45
|
+
* @default 0.9
|
|
46
|
+
*/
|
|
47
|
+
sigmaDecay?: number;
|
|
48
|
+
/**
|
|
49
|
+
* Minimum sigma value to prevent over-confidence.
|
|
50
|
+
* Even with many observations, we maintain some uncertainty.
|
|
51
|
+
*
|
|
52
|
+
* @default 5
|
|
53
|
+
*/
|
|
54
|
+
minSigma?: number;
|
|
55
|
+
/**
|
|
56
|
+
* Maximum sigma value.
|
|
57
|
+
*
|
|
58
|
+
* @default 100
|
|
59
|
+
*/
|
|
60
|
+
maxSigma?: number;
|
|
26
61
|
}
|
|
27
62
|
/**
|
|
28
|
-
*
|
|
63
|
+
* Default parameters for skill updates.
|
|
64
|
+
*/
|
|
65
|
+
export declare const DEFAULT_SKILL_PARAMS: Required<SkillUpdateParams>;
|
|
66
|
+
/**
|
|
67
|
+
* Model for updating and managing skill levels using Item Response Theory (IRT)
|
|
68
|
+
* with asymmetric evidence clamping.
|
|
29
69
|
*
|
|
30
|
-
* This model
|
|
31
|
-
*
|
|
70
|
+
* This model is based on standard IRT psychometric principles but applies a
|
|
71
|
+
* clamping constraint that reflects the asymmetric nature of flashcard evidence:
|
|
32
72
|
*
|
|
33
|
-
*
|
|
34
|
-
*
|
|
35
|
-
* -
|
|
36
|
-
*
|
|
37
|
-
* -
|
|
73
|
+
* - Succeeding on a card of difficulty D provides evidence that skill >= D,
|
|
74
|
+
* but a single success should NOT push skill estimate above D
|
|
75
|
+
* - Failing on a card of difficulty D provides evidence that skill <= D,
|
|
76
|
+
* but a single failure should NOT push skill estimate below D
|
|
77
|
+
* - Succeeding on easy cards (difficulty < skill) is uninformative
|
|
78
|
+
* - Failing on hard cards (difficulty > skill) is uninformative
|
|
38
79
|
*
|
|
39
|
-
* The
|
|
40
|
-
*
|
|
41
|
-
*
|
|
42
|
-
* - No full history is needed - just μ and σ are sufficient statistics
|
|
80
|
+
* The update rule moves skill toward the card difficulty by a fraction (coefficient),
|
|
81
|
+
* ensuring that skill can only asymptotically approach demonstrated competence
|
|
82
|
+
* through multiple consistent observations.
|
|
43
83
|
*
|
|
44
84
|
* Scale semantics:
|
|
45
85
|
* - μ = 0: absolute beginner (knows ~0 characters)
|
|
@@ -48,48 +88,31 @@ export interface LegacySkillLevel {
|
|
|
48
88
|
*/
|
|
49
89
|
export declare class SkillLevelModel {
|
|
50
90
|
/**
|
|
51
|
-
*
|
|
52
|
-
*
|
|
53
|
-
*
|
|
54
|
-
* - k = 0.05 means the transition zone spans ~40 difficulty points
|
|
55
|
-
* - At difficulty = μ, P(success) = 50%
|
|
56
|
-
* - At difficulty = μ + 20, P(success) ≈ 27%
|
|
57
|
-
* - At difficulty = μ + 40, P(success) ≈ 12%
|
|
58
|
-
*
|
|
59
|
-
* Lower k = more gradual transition, higher k = sharper cutoff.
|
|
91
|
+
* Discrimination parameter for predictSuccess logistic function.
|
|
92
|
+
* Controls how sharply probability transitions around difficulty = mu.
|
|
93
|
+
* Higher values = steeper curve. 0.05 gives gradual transition over ~60 difficulty units.
|
|
60
94
|
*/
|
|
61
95
|
private static readonly K;
|
|
62
96
|
/**
|
|
63
|
-
*
|
|
64
|
-
* Even with many observations, we maintain some uncertainty.
|
|
65
|
-
*/
|
|
66
|
-
private static readonly MIN_SIGMA;
|
|
67
|
-
/**
|
|
68
|
-
* Maximum sigma value for initial state.
|
|
69
|
-
*/
|
|
70
|
-
private static readonly MAX_SIGMA;
|
|
71
|
-
/**
|
|
72
|
-
* Update skill level using IRT after a review.
|
|
97
|
+
* Update skill level using clamped IRT after a review.
|
|
73
98
|
*
|
|
74
|
-
*
|
|
75
|
-
*
|
|
99
|
+
* Applies asymmetric evidence rules:
|
|
100
|
+
* - Success on harder card (difficulty > mu): informative, move mu up toward difficulty
|
|
101
|
+
* - Failure on easier card (difficulty < mu): informative, move mu down toward difficulty
|
|
102
|
+
* - Success on easier card: uninformative, no change
|
|
103
|
+
* - Failure on harder card: uninformative, no change
|
|
104
|
+
*
|
|
105
|
+
* When evidence is informative, sigma also decreases to reflect increased confidence.
|
|
106
|
+
* When evidence is uninformative, both mu and sigma remain unchanged.
|
|
76
107
|
*
|
|
77
108
|
* @param mu Current skill level estimate
|
|
78
109
|
* @param sigma Current uncertainty (standard deviation)
|
|
79
110
|
* @param cardDifficulty Difficulty of the card reviewed
|
|
80
111
|
* @param outcome Review result (FAIL/HARD/GOOD/EASY)
|
|
112
|
+
* @param params Optional parameters to customize update behavior
|
|
81
113
|
* @returns Updated skill level with new mu and sigma
|
|
82
114
|
*/
|
|
83
|
-
static update(mu: number, sigma: number, cardDifficulty: number, outcome: ReviewResult): SkillLevel;
|
|
84
|
-
/**
|
|
85
|
-
* Update skill level using IRT (Glicko-2 compatible signature).
|
|
86
|
-
*
|
|
87
|
-
* This method provides backwards compatibility with the old Glicko-2 API.
|
|
88
|
-
* The volatility and cardRatingDeviation parameters are ignored.
|
|
89
|
-
*
|
|
90
|
-
* @deprecated Use update() instead for cleaner IRT semantics
|
|
91
|
-
*/
|
|
92
|
-
static updateWithGlicko2(rating: number, ratingDeviation: number, _volatility: number, cardDifficulty: number, _cardRatingDeviation: number, outcome: ReviewResult, _tau?: number): LegacySkillLevel;
|
|
115
|
+
static update(mu: number, sigma: number, cardDifficulty: number, outcome: ReviewResult, params?: SkillUpdateParams): SkillLevel;
|
|
93
116
|
/**
|
|
94
117
|
* Map a review outcome to a success score for IRT.
|
|
95
118
|
*
|
|
@@ -122,19 +145,6 @@ export declare class SkillLevelModel {
|
|
|
122
145
|
* @returns New SkillLevel object
|
|
123
146
|
*/
|
|
124
147
|
static createDefault(initialSigma?: number): SkillLevel;
|
|
125
|
-
/**
|
|
126
|
-
* Create a default skill level in legacy format.
|
|
127
|
-
* @deprecated Use createDefault() instead
|
|
128
|
-
*/
|
|
129
|
-
static createDefaultLegacy(initialRatingDeviation?: number, initialVolatility?: number): LegacySkillLevel;
|
|
130
|
-
/**
|
|
131
|
-
* Convert from legacy Glicko-2 format to IRT format.
|
|
132
|
-
*/
|
|
133
|
-
static fromLegacy(legacy: LegacySkillLevel): SkillLevel;
|
|
134
|
-
/**
|
|
135
|
-
* Convert from IRT format to legacy Glicko-2 format.
|
|
136
|
-
*/
|
|
137
|
-
static toLegacy(skill: SkillLevel): LegacySkillLevel;
|
|
138
148
|
/**
|
|
139
149
|
* Check if the skill level has high confidence (low uncertainty).
|
|
140
150
|
*
|
|
@@ -1,22 +1,32 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.SkillLevelModel = void 0;
|
|
3
|
+
exports.SkillLevelModel = exports.DEFAULT_SKILL_PARAMS = void 0;
|
|
4
4
|
/**
|
|
5
|
-
*
|
|
5
|
+
* Default parameters for skill updates.
|
|
6
|
+
*/
|
|
7
|
+
exports.DEFAULT_SKILL_PARAMS = {
|
|
8
|
+
updateCoefficient: 0.5,
|
|
9
|
+
sigmaDecay: 0.9,
|
|
10
|
+
minSigma: 5,
|
|
11
|
+
maxSigma: 100
|
|
12
|
+
};
|
|
13
|
+
/**
|
|
14
|
+
* Model for updating and managing skill levels using Item Response Theory (IRT)
|
|
15
|
+
* with asymmetric evidence clamping.
|
|
6
16
|
*
|
|
7
|
-
* This model
|
|
8
|
-
*
|
|
17
|
+
* This model is based on standard IRT psychometric principles but applies a
|
|
18
|
+
* clamping constraint that reflects the asymmetric nature of flashcard evidence:
|
|
9
19
|
*
|
|
10
|
-
*
|
|
11
|
-
*
|
|
12
|
-
* -
|
|
13
|
-
*
|
|
14
|
-
* -
|
|
20
|
+
* - Succeeding on a card of difficulty D provides evidence that skill >= D,
|
|
21
|
+
* but a single success should NOT push skill estimate above D
|
|
22
|
+
* - Failing on a card of difficulty D provides evidence that skill <= D,
|
|
23
|
+
* but a single failure should NOT push skill estimate below D
|
|
24
|
+
* - Succeeding on easy cards (difficulty < skill) is uninformative
|
|
25
|
+
* - Failing on hard cards (difficulty > skill) is uninformative
|
|
15
26
|
*
|
|
16
|
-
* The
|
|
17
|
-
*
|
|
18
|
-
*
|
|
19
|
-
* - No full history is needed - just μ and σ are sufficient statistics
|
|
27
|
+
* The update rule moves skill toward the card difficulty by a fraction (coefficient),
|
|
28
|
+
* ensuring that skill can only asymptotically approach demonstrated competence
|
|
29
|
+
* through multiple consistent observations.
|
|
20
30
|
*
|
|
21
31
|
* Scale semantics:
|
|
22
32
|
* - μ = 0: absolute beginner (knows ~0 characters)
|
|
@@ -25,53 +35,42 @@ exports.SkillLevelModel = void 0;
|
|
|
25
35
|
*/
|
|
26
36
|
class SkillLevelModel {
|
|
27
37
|
/**
|
|
28
|
-
* Update skill level using IRT after a review.
|
|
38
|
+
* Update skill level using clamped IRT after a review.
|
|
29
39
|
*
|
|
30
|
-
*
|
|
31
|
-
*
|
|
40
|
+
* Applies asymmetric evidence rules:
|
|
41
|
+
* - Success on harder card (difficulty > mu): informative, move mu up toward difficulty
|
|
42
|
+
* - Failure on easier card (difficulty < mu): informative, move mu down toward difficulty
|
|
43
|
+
* - Success on easier card: uninformative, no change
|
|
44
|
+
* - Failure on harder card: uninformative, no change
|
|
45
|
+
*
|
|
46
|
+
* When evidence is informative, sigma also decreases to reflect increased confidence.
|
|
47
|
+
* When evidence is uninformative, both mu and sigma remain unchanged.
|
|
32
48
|
*
|
|
33
49
|
* @param mu Current skill level estimate
|
|
34
50
|
* @param sigma Current uncertainty (standard deviation)
|
|
35
51
|
* @param cardDifficulty Difficulty of the card reviewed
|
|
36
52
|
* @param outcome Review result (FAIL/HARD/GOOD/EASY)
|
|
53
|
+
* @param params Optional parameters to customize update behavior
|
|
37
54
|
* @returns Updated skill level with new mu and sigma
|
|
38
55
|
*/
|
|
39
|
-
static update(mu, sigma, cardDifficulty, outcome) {
|
|
40
|
-
const
|
|
41
|
-
|
|
42
|
-
const
|
|
43
|
-
//
|
|
44
|
-
const
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
const
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
const newMu = mu + newSigmaSquared * k * error;
|
|
55
|
-
return {
|
|
56
|
-
mu: Math.max(0, newMu), // Skill can't go negative
|
|
57
|
-
sigma: Math.max(SkillLevelModel.MIN_SIGMA, Math.min(SkillLevelModel.MAX_SIGMA, newSigma))
|
|
58
|
-
};
|
|
59
|
-
}
|
|
60
|
-
/**
|
|
61
|
-
* Update skill level using IRT (Glicko-2 compatible signature).
|
|
62
|
-
*
|
|
63
|
-
* This method provides backwards compatibility with the old Glicko-2 API.
|
|
64
|
-
* The volatility and cardRatingDeviation parameters are ignored.
|
|
65
|
-
*
|
|
66
|
-
* @deprecated Use update() instead for cleaner IRT semantics
|
|
67
|
-
*/
|
|
68
|
-
static updateWithGlicko2(rating, ratingDeviation, _volatility, cardDifficulty, _cardRatingDeviation, outcome, _tau = 0.5) {
|
|
69
|
-
const result = SkillLevelModel.update(rating, ratingDeviation, cardDifficulty, outcome);
|
|
70
|
-
// Return in legacy format
|
|
56
|
+
static update(mu, sigma, cardDifficulty, outcome, params = {}) {
|
|
57
|
+
const { updateCoefficient = exports.DEFAULT_SKILL_PARAMS.updateCoefficient, sigmaDecay = exports.DEFAULT_SKILL_PARAMS.sigmaDecay, minSigma = exports.DEFAULT_SKILL_PARAMS.minSigma, maxSigma = exports.DEFAULT_SKILL_PARAMS.maxSigma } = params;
|
|
58
|
+
const isSuccess = outcome === 'EASY' || outcome === 'GOOD';
|
|
59
|
+
const isFailure = outcome === 'FAIL' || outcome === 'HARD';
|
|
60
|
+
// Determine if this observation is informative
|
|
61
|
+
const isInformative = (isSuccess && cardDifficulty > mu) || // Success on harder card
|
|
62
|
+
(isFailure && cardDifficulty < mu); // Failure on easier card
|
|
63
|
+
if (!isInformative) {
|
|
64
|
+
// Uninformative evidence: no change to skill estimate
|
|
65
|
+
return { mu, sigma };
|
|
66
|
+
}
|
|
67
|
+
// Informative evidence: move mu toward card difficulty
|
|
68
|
+
const newMu = mu + updateCoefficient * (cardDifficulty - mu);
|
|
69
|
+
// Decrease sigma since we learned something
|
|
70
|
+
const newSigma = sigma * sigmaDecay;
|
|
71
71
|
return {
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
volatility: 0.06 // Fixed value, not used in IRT
|
|
72
|
+
mu: Math.max(0, newMu),
|
|
73
|
+
sigma: Math.max(minSigma, Math.min(maxSigma, newSigma))
|
|
75
74
|
};
|
|
76
75
|
}
|
|
77
76
|
/**
|
|
@@ -127,36 +126,6 @@ class SkillLevelModel {
|
|
|
127
126
|
sigma: initialSigma
|
|
128
127
|
};
|
|
129
128
|
}
|
|
130
|
-
/**
|
|
131
|
-
* Create a default skill level in legacy format.
|
|
132
|
-
* @deprecated Use createDefault() instead
|
|
133
|
-
*/
|
|
134
|
-
static createDefaultLegacy(initialRatingDeviation = 50, initialVolatility = 0.06) {
|
|
135
|
-
return {
|
|
136
|
-
rating: 0,
|
|
137
|
-
rating_deviation: initialRatingDeviation,
|
|
138
|
-
volatility: initialVolatility
|
|
139
|
-
};
|
|
140
|
-
}
|
|
141
|
-
/**
|
|
142
|
-
* Convert from legacy Glicko-2 format to IRT format.
|
|
143
|
-
*/
|
|
144
|
-
static fromLegacy(legacy) {
|
|
145
|
-
return {
|
|
146
|
-
mu: legacy.rating,
|
|
147
|
-
sigma: legacy.rating_deviation
|
|
148
|
-
};
|
|
149
|
-
}
|
|
150
|
-
/**
|
|
151
|
-
* Convert from IRT format to legacy Glicko-2 format.
|
|
152
|
-
*/
|
|
153
|
-
static toLegacy(skill) {
|
|
154
|
-
return {
|
|
155
|
-
rating: skill.mu,
|
|
156
|
-
rating_deviation: skill.sigma,
|
|
157
|
-
volatility: 0.06
|
|
158
|
-
};
|
|
159
|
-
}
|
|
160
129
|
/**
|
|
161
130
|
* Check if the skill level has high confidence (low uncertainty).
|
|
162
131
|
*
|
|
@@ -170,23 +139,8 @@ class SkillLevelModel {
|
|
|
170
139
|
}
|
|
171
140
|
exports.SkillLevelModel = SkillLevelModel;
|
|
172
141
|
/**
|
|
173
|
-
*
|
|
174
|
-
*
|
|
175
|
-
*
|
|
176
|
-
* - k = 0.05 means the transition zone spans ~40 difficulty points
|
|
177
|
-
* - At difficulty = μ, P(success) = 50%
|
|
178
|
-
* - At difficulty = μ + 20, P(success) ≈ 27%
|
|
179
|
-
* - At difficulty = μ + 40, P(success) ≈ 12%
|
|
180
|
-
*
|
|
181
|
-
* Lower k = more gradual transition, higher k = sharper cutoff.
|
|
142
|
+
* Discrimination parameter for predictSuccess logistic function.
|
|
143
|
+
* Controls how sharply probability transitions around difficulty = mu.
|
|
144
|
+
* Higher values = steeper curve. 0.05 gives gradual transition over ~60 difficulty units.
|
|
182
145
|
*/
|
|
183
146
|
SkillLevelModel.K = 0.05;
|
|
184
|
-
/**
|
|
185
|
-
* Minimum sigma value to prevent over-confidence.
|
|
186
|
-
* Even with many observations, we maintain some uncertainty.
|
|
187
|
-
*/
|
|
188
|
-
SkillLevelModel.MIN_SIGMA = 5;
|
|
189
|
-
/**
|
|
190
|
-
* Maximum sigma value for initial state.
|
|
191
|
-
*/
|
|
192
|
-
SkillLevelModel.MAX_SIGMA = 100;
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import { Conditions } from '../models/Condition';
|
|
2
|
+
/**
|
|
3
|
+
* Represents a difficulty range with guaranteed min (defaults to 0).
|
|
4
|
+
* Used for intersecting pool difficulty ranges with Collection constraints.
|
|
5
|
+
*/
|
|
6
|
+
export interface DifficultyRange {
|
|
7
|
+
min: number;
|
|
8
|
+
max: number | undefined;
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Extracts difficulty constraints from Conditions.
|
|
12
|
+
*
|
|
13
|
+
* Checks the 'all' section for difficulty conditions since that's where
|
|
14
|
+
* Collection-level constraints are placed (they must all be satisfied).
|
|
15
|
+
*
|
|
16
|
+
* Exploits the invariant that difficulty and skill level are always >= 0,
|
|
17
|
+
* so min defaults to 0 when not specified.
|
|
18
|
+
*
|
|
19
|
+
* @param conditions - The conditions to search
|
|
20
|
+
* @returns DifficultyRange if a difficulty condition is found, null otherwise
|
|
21
|
+
*/
|
|
22
|
+
export declare function extractDifficultyConstraints(conditions?: Conditions): DifficultyRange | null;
|
|
23
|
+
/**
|
|
24
|
+
* Result of intersecting difficulty ranges.
|
|
25
|
+
*/
|
|
26
|
+
export interface DifficultyRangeIntersection {
|
|
27
|
+
/** The effective minimum difficulty to use */
|
|
28
|
+
min: number;
|
|
29
|
+
/** The effective maximum difficulty to use */
|
|
30
|
+
max: number;
|
|
31
|
+
/** True if a valid intersection was found, false if fell back to Collection range */
|
|
32
|
+
usedIntersection: boolean;
|
|
33
|
+
/** True if Collection had a difficulty constraint */
|
|
34
|
+
hadCollectionConstraint: boolean;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Calculates the intersection of a pool's difficulty range with Collection constraints.
|
|
38
|
+
*
|
|
39
|
+
* If no intersection exists (ranges are disjoint), returns the Collection's range
|
|
40
|
+
* since the Collection constraints take precedence over skill-based calculations.
|
|
41
|
+
*
|
|
42
|
+
* @param poolMin - The pool's calculated minimum difficulty
|
|
43
|
+
* @param poolMax - The pool's calculated maximum difficulty
|
|
44
|
+
* @param collectionRange - The Collection's difficulty constraints (if any)
|
|
45
|
+
* @returns The effective range to use for queries, plus metadata
|
|
46
|
+
*/
|
|
47
|
+
export declare function intersectDifficultyRanges(poolMin: number, poolMax: number, collectionRange: DifficultyRange | null): DifficultyRangeIntersection;
|
|
48
|
+
/**
|
|
49
|
+
* Checks if a difficulty value falls within a range.
|
|
50
|
+
*
|
|
51
|
+
* Useful for in-memory filtering in strategies that don't use database queries
|
|
52
|
+
* (ReinforcementPoolStrategy, StalePoolStrategy, DecompositionPoolStrategy).
|
|
53
|
+
*
|
|
54
|
+
* Uses half-open interval semantics: min is inclusive (>=), max is exclusive (<).
|
|
55
|
+
*
|
|
56
|
+
* @param difficulty - The difficulty value to check
|
|
57
|
+
* @param range - The range to check against (null means no constraint)
|
|
58
|
+
* @returns true if difficulty is within range or no range specified
|
|
59
|
+
*/
|
|
60
|
+
export declare function isWithinDifficultyRange(difficulty: number, range: DifficultyRange | null): boolean;
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.extractDifficultyConstraints = extractDifficultyConstraints;
|
|
4
|
+
exports.intersectDifficultyRanges = intersectDifficultyRanges;
|
|
5
|
+
exports.isWithinDifficultyRange = isWithinDifficultyRange;
|
|
6
|
+
/**
|
|
7
|
+
* Extracts difficulty constraints from Conditions.
|
|
8
|
+
*
|
|
9
|
+
* Checks the 'all' section for difficulty conditions since that's where
|
|
10
|
+
* Collection-level constraints are placed (they must all be satisfied).
|
|
11
|
+
*
|
|
12
|
+
* Exploits the invariant that difficulty and skill level are always >= 0,
|
|
13
|
+
* so min defaults to 0 when not specified.
|
|
14
|
+
*
|
|
15
|
+
* @param conditions - The conditions to search
|
|
16
|
+
* @returns DifficultyRange if a difficulty condition is found, null otherwise
|
|
17
|
+
*/
|
|
18
|
+
function extractDifficultyConstraints(conditions) {
|
|
19
|
+
if (!conditions?.all)
|
|
20
|
+
return null;
|
|
21
|
+
for (const condition of conditions.all) {
|
|
22
|
+
if (condition.type === 'difficulty') {
|
|
23
|
+
const diffCondition = condition;
|
|
24
|
+
return {
|
|
25
|
+
min: diffCondition.min ?? 0,
|
|
26
|
+
max: diffCondition.max
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
return null;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Calculates the intersection of a pool's difficulty range with Collection constraints.
|
|
34
|
+
*
|
|
35
|
+
* If no intersection exists (ranges are disjoint), returns the Collection's range
|
|
36
|
+
* since the Collection constraints take precedence over skill-based calculations.
|
|
37
|
+
*
|
|
38
|
+
* @param poolMin - The pool's calculated minimum difficulty
|
|
39
|
+
* @param poolMax - The pool's calculated maximum difficulty
|
|
40
|
+
* @param collectionRange - The Collection's difficulty constraints (if any)
|
|
41
|
+
* @returns The effective range to use for queries, plus metadata
|
|
42
|
+
*/
|
|
43
|
+
function intersectDifficultyRanges(poolMin, poolMax, collectionRange) {
|
|
44
|
+
// No Collection constraint - use pool's range as-is
|
|
45
|
+
if (!collectionRange) {
|
|
46
|
+
return {
|
|
47
|
+
min: poolMin,
|
|
48
|
+
max: poolMax,
|
|
49
|
+
usedIntersection: true,
|
|
50
|
+
hadCollectionConstraint: false
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
// Calculate intersection
|
|
54
|
+
const intersectMin = Math.max(poolMin, collectionRange.min);
|
|
55
|
+
const intersectMax = collectionRange.max !== undefined
|
|
56
|
+
? Math.min(poolMax, collectionRange.max)
|
|
57
|
+
: poolMax; // No upper bound on Collection, use pool's max
|
|
58
|
+
// Check if intersection is valid (non-empty range)
|
|
59
|
+
if (intersectMin < intersectMax) {
|
|
60
|
+
return {
|
|
61
|
+
min: intersectMin,
|
|
62
|
+
max: intersectMax,
|
|
63
|
+
usedIntersection: true,
|
|
64
|
+
hadCollectionConstraint: true
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
// No valid intersection - fall back to Collection's range
|
|
68
|
+
return {
|
|
69
|
+
min: collectionRange.min,
|
|
70
|
+
max: collectionRange.max ?? poolMax, // Use pool max if Collection has no upper bound
|
|
71
|
+
usedIntersection: false,
|
|
72
|
+
hadCollectionConstraint: true
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Checks if a difficulty value falls within a range.
|
|
77
|
+
*
|
|
78
|
+
* Useful for in-memory filtering in strategies that don't use database queries
|
|
79
|
+
* (ReinforcementPoolStrategy, StalePoolStrategy, DecompositionPoolStrategy).
|
|
80
|
+
*
|
|
81
|
+
* Uses half-open interval semantics: min is inclusive (>=), max is exclusive (<).
|
|
82
|
+
*
|
|
83
|
+
* @param difficulty - The difficulty value to check
|
|
84
|
+
* @param range - The range to check against (null means no constraint)
|
|
85
|
+
* @returns true if difficulty is within range or no range specified
|
|
86
|
+
*/
|
|
87
|
+
function isWithinDifficultyRange(difficulty, range) {
|
|
88
|
+
if (!range)
|
|
89
|
+
return true;
|
|
90
|
+
if (difficulty < range.min)
|
|
91
|
+
return false;
|
|
92
|
+
if (range.max !== undefined && difficulty >= range.max)
|
|
93
|
+
return false;
|
|
94
|
+
return true;
|
|
95
|
+
}
|
package/dist/util/index.d.ts
CHANGED
package/dist/util/index.js
CHANGED
|
@@ -18,6 +18,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
18
18
|
__exportStar(require("./AvatarUri"), exports);
|
|
19
19
|
__exportStar(require("./ConditionMatcher"), exports);
|
|
20
20
|
__exportStar(require("./Database"), exports);
|
|
21
|
+
__exportStar(require("./DifficultyRange"), exports);
|
|
21
22
|
__exportStar(require("./Encryption"), exports);
|
|
22
23
|
__exportStar(require("./Logging"), exports);
|
|
23
24
|
__exportStar(require("./SenseRankEncoder"), exports);
|