ruby-statistics 2.1.2 → 2.1.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f02b2824672be63fdfac39ad23d7ef4c6c60c9832ac1cdeae665e5e8ccefb5f3
4
- data.tar.gz: 7dd359dc331405961c1b5739a70c19831c8c12908bd09152e8d30ef6f3a42c08
3
+ metadata.gz: 6612502f03d8077d0158d997a42dfbc4d1002f2ab01ce2b7bdb5fbd510187e3e
4
+ data.tar.gz: 14fb04073b5b788dfa9e93aa586daef050dd105c2d2f8bdd17db30ad1fbcf144
5
5
  SHA512:
6
- metadata.gz: 259fc76594f462aa63770351f8851cdd694d3a045e7d31d7a58a7a578c1fd6d805870b8bec8f2a7f1f2df4995537ab7eef2efb76542aa77e0371311e99e81680
7
- data.tar.gz: a06d8899bcfcbc05b8ad1da6c09695d0247c11756ea26087334b229c182f5c11831b3692ccad30c75ef3dc9fc8a2699269ff7aaba22224a2d03d1280363ff1a9
6
+ metadata.gz: '09590f836a59563819a1a847830e5dc2ee3554415cadc81c35b2a0f43ab1af87204f028659e8aa2f30a14b58c69c3e4f65db5e722d0a00ced5d92faa1e7dce82'
7
+ data.tar.gz: 2e66a26c23bf1f05cb9de40e992b302c4f0fef13aa70b4e509de479cb15b9700d4032f5d548aa45110f161ef9dac417f9b1872479a02dca0e729a051be2a4fc8
@@ -0,0 +1,15 @@
1
+ # To get started with Dependabot version updates, you'll need to specify which
2
+ # package ecosystems to update and where the package manifests are located.
3
+ # Please see the documentation for all configuration options:
4
+ # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
5
+
6
+ version: 2
7
+ updates:
8
+ - package-ecosystem: "bundler" # See documentation for possible values
9
+ directory: "/" # Location of package manifests
10
+ schedule:
11
+ interval: "weekly"
12
+ - package-ecosystem: "github-actions" # See documentation for possible values
13
+ directory: "/" # Location of package manifests
14
+ schedule:
15
+ interval: "weekly"
@@ -8,9 +8,9 @@ jobs:
8
8
  runs-on: ubuntu-latest
9
9
 
10
10
  steps:
11
- - uses: actions/checkout@v1
11
+ - uses: actions/checkout@v2.3.4
12
12
  - name: Set up Ruby 2.6
13
- uses: actions/setup-ruby@v1
13
+ uses: actions/setup-ruby@v1.1.2
14
14
  with:
15
15
  ruby-version: 2.6.x
16
16
  - name: Build and test with Rake
@@ -23,9 +23,9 @@ jobs:
23
23
  runs-on: ubuntu-latest
24
24
 
25
25
  steps:
26
- - uses: actions/checkout@v1
26
+ - uses: actions/checkout@v2.3.4
27
27
  - name: Set up Ruby 2.7
28
- uses: actions/setup-ruby@v1
28
+ uses: actions/setup-ruby@v1.1.2
29
29
  with:
30
30
  ruby-version: 2.7.x
31
31
  - name: Build and test with Rake
data/lib/math.rb CHANGED
@@ -9,11 +9,11 @@ module Math
9
9
  end
10
10
 
11
11
  def self.combination(n, r)
12
- self.factorial(n)/(self.factorial(r) * self.factorial(n - r)).to_f # n!/(r! * [n - r]!)
12
+ self.factorial(n)/(self.factorial(r) * self.factorial(n - r)).to_r # n!/(r! * [n - r]!)
13
13
  end
14
14
 
15
15
  def self.permutation(n, k)
16
- self.factorial(n)/self.factorial(n - k).to_f
16
+ self.factorial(n)/self.factorial(n - k).to_r
17
17
  end
18
18
 
19
19
  # Function adapted from the python implementation that exists in https://en.wikipedia.org/wiki/Simpson%27s_rule#Sample_implementation
@@ -24,7 +24,8 @@ module Math
24
24
  return
25
25
  end
26
26
 
27
- h = (b - a)/n.to_f
27
+ h = (b - a)/n.to_r
28
+
28
29
  resA = yield(a)
29
30
  resB = yield(b)
30
31
 
@@ -45,7 +46,7 @@ module Math
45
46
 
46
47
  def self.lower_incomplete_gamma_function(s, x)
47
48
  # The greater the iterations, the better. That's why we are iterating 10_000 * x times
48
- self.simpson_rule(0, x, (10_000 * x.round).round) do |t|
49
+ self.simpson_rule(0, x.to_r, (10_000 * x.round).round) do |t|
49
50
  (t ** (s - 1)) * Math.exp(-t)
50
51
  end
51
52
  end
@@ -72,7 +73,7 @@ module Math
72
73
  # To avoid overflow problems, the implementation applies the logarithm properties
73
74
  # to calculate in a faster and safer way the values.
74
75
  lbet_ab = (Math.lgamma(alp)[0] + Math.lgamma(bet)[0] - Math.lgamma(alp + bet)[0]).freeze
75
- front = (Math.exp(Math.log(x) * alp + Math.log(1.0 - x) * bet - lbet_ab) / alp.to_f).freeze
76
+ front = (Math.exp(Math.log(x) * alp + Math.log(1.0 - x) * bet - lbet_ab) / alp.to_r).freeze
76
77
 
77
78
  # This is the non-log version of the left part of the formula (before the continuous fraction)
78
79
  # down_left = alp * self.beta_function(alp, bet)
@@ -24,7 +24,7 @@ module Statistics
24
24
  end
25
25
 
26
26
  def self.skewness(p)
27
- (1.0 - 2.0*p).to_f / Math.sqrt(p * (1.0 - p))
27
+ (1.0 - 2.0*p).to_r / Math.sqrt(p * (1.0 - p))
28
28
  end
29
29
 
30
30
  def self.kurtosis(p)
@@ -4,8 +4,8 @@ module Statistics
4
4
  attr_accessor :alpha, :beta
5
5
 
6
6
  def initialize(alp, bet)
7
- self.alpha = alp.to_f
8
- self.beta = bet.to_f
7
+ self.alpha = alp.to_r
8
+ self.beta = bet.to_r
9
9
  end
10
10
 
11
11
  def cumulative_function(value)
@@ -19,7 +19,7 @@ module Statistics
19
19
  summation
20
20
  end
21
21
 
22
- cumulative_sum / samples.size.to_f
22
+ cumulative_sum / samples.size.to_r
23
23
  end
24
24
  end
25
25
  end
@@ -10,7 +10,7 @@ module Statistics
10
10
 
11
11
  # Formula extracted from http://www.itl.nist.gov/div898/handbook/eda/section3/eda3665.htm#CDF
12
12
  def cumulative_function(value)
13
- k = d2/(d2 + d1 * value.to_f)
13
+ k = d2/(d2 + d1 * value.to_r)
14
14
 
15
15
  1 - Math.incomplete_beta_function(k, d2/2.0, d1/2.0)
16
16
  end
@@ -18,28 +18,28 @@ module Statistics
18
18
  def density_function(value)
19
19
  return if d1 < 0 || d2 < 0 # F-pdf is well defined for the [0, +infinity) interval.
20
20
 
21
- val = value.to_f
21
+ val = value.to_r
22
22
  upper = ((d1 * val) ** d1) * (d2**d2)
23
23
  lower = (d1 * val + d2) ** (d1 + d2)
24
- up = Math.sqrt(upper/lower.to_f)
24
+ up = Math.sqrt(upper/lower.to_r)
25
25
  down = val * Math.beta_function(d1/2.0, d2/2.0)
26
26
 
27
- up/down.to_f
27
+ up/down.to_r
28
28
  end
29
29
 
30
30
  def mean
31
31
  return if d2 <= 2
32
32
 
33
- d2/(d2 - 2).to_f
33
+ d2/(d2 - 2).to_r
34
34
  end
35
35
 
36
36
  def mode
37
37
  return if d1 <= 2
38
38
 
39
- left = (d1 - 2)/d1.to_f
40
- right = d2/(d2 + 2).to_f
39
+ left = (d1 - 2)/d1.to_r
40
+ right = d2/(d2 + 2).to_r
41
41
 
42
- left * right
42
+ (left * right).to_f
43
43
  end
44
44
  end
45
45
  end
@@ -4,7 +4,7 @@ module Statistics
4
4
  attr_accessor :probability_of_success, :always_success_allowed
5
5
 
6
6
  def initialize(p, always_success: false)
7
- self.probability_of_success = p.to_f
7
+ self.probability_of_success = p.to_r
8
8
  self.always_success_allowed = always_success
9
9
  end
10
10
 
@@ -6,7 +6,7 @@ module Statistics
6
6
  k = k.to_i
7
7
 
8
8
  left = (-1.0 / Math.log(1.0 - p))
9
- right = (p ** k).to_f
9
+ right = (p ** k).to_r
10
10
 
11
11
  left * right / k
12
12
  end
@@ -44,7 +44,7 @@ module Statistics
44
44
  up = p + Math.log(1.0 - p)
45
45
  down = ((1.0 - p) ** 2) * (Math.log(1.0 - p) ** 2)
46
46
 
47
- (-1.0 * p) * (up / down.to_f)
47
+ (-1.0 * p) * (up / down.to_r)
48
48
  end
49
49
  end
50
50
  end
@@ -25,21 +25,21 @@ module Statistics
25
25
  end
26
26
 
27
27
  def mean
28
- (probability_per_trial * number_of_failures)/(1 - probability_per_trial).to_f
28
+ (probability_per_trial * number_of_failures)/(1 - probability_per_trial).to_r
29
29
  end
30
30
 
31
31
  def variance
32
- (probability_per_trial * number_of_failures)/((1 - probability_per_trial) ** 2).to_f
32
+ (probability_per_trial * number_of_failures)/((1 - probability_per_trial) ** 2).to_r
33
33
  end
34
34
 
35
35
  def skewness
36
- (1 + probability_per_trial).to_f / Math.sqrt(probability_per_trial * number_of_failures)
36
+ (1 + probability_per_trial).to_r / Math.sqrt(probability_per_trial * number_of_failures)
37
37
  end
38
38
 
39
39
  def mode
40
40
  if number_of_failures > 1
41
41
  up = probability_per_trial * (number_of_failures - 1)
42
- down = (1 - probability_per_trial).to_f
42
+ down = (1 - probability_per_trial).to_r
43
43
 
44
44
  (up/down).floor
45
45
  elsif number_of_failures <= 1
@@ -5,9 +5,9 @@ module Statistics
5
5
  alias_method :mode, :mean
6
6
 
7
7
  def initialize(avg, std)
8
- self.mean = avg.to_f
9
- self.standard_deviation = std.to_f
10
- self.variance = std.to_f**2
8
+ self.mean = avg.to_r
9
+ self.standard_deviation = std.to_r
10
+ self.variance = std.to_r**2
11
11
  end
12
12
 
13
13
  def cumulative_function(value)
@@ -18,7 +18,7 @@ module Statistics
18
18
  upper = (expected_number_of_occurrences ** k) * Math.exp(-expected_number_of_occurrences)
19
19
  lower = Math.factorial(k)
20
20
 
21
- upper/lower.to_f
21
+ upper/lower.to_r
22
22
  end
23
23
 
24
24
  def cumulative_function(k)
@@ -31,7 +31,7 @@ module Statistics
31
31
 
32
32
  # We need the right tail, i.e.: The upper incomplete gamma function. This can be
33
33
  # achieved by doing a substraction between 1 and the lower incomplete gamma function.
34
- 1 - (upper/lower.to_f)
34
+ 1 - (upper/lower.to_r)
35
35
  end
36
36
  end
37
37
  end
@@ -29,7 +29,7 @@ module Statistics
29
29
  upper = Math.gamma((degrees_of_freedom + 1)/2.0)
30
30
  lower = Math.sqrt(degrees_of_freedom * Math::PI) * Math.gamma(degrees_of_freedom/2.0)
31
31
  left = upper/lower
32
- right = (1 + ((value ** 2)/degrees_of_freedom.to_f)) ** -((degrees_of_freedom + 1)/2.0)
32
+ right = (1 + ((value ** 2)/degrees_of_freedom.to_r)) ** -((degrees_of_freedom + 1)/2.0)
33
33
 
34
34
  left * right
35
35
  end
@@ -64,8 +64,8 @@ module Statistics
64
64
  results << Math.simpson_rule(threshold, y, 10_000) do |t|
65
65
  up = Math.gamma((v+1)/2.0)
66
66
  down = Math.sqrt(Math::PI * v) * Math.gamma(v/2.0)
67
- right = (1 + ((y ** 2)/v.to_f)) ** ((v+1)/2.0)
68
- left = up/down.to_f
67
+ right = (1 + ((y ** 2)/v.to_r)) ** ((v+1)/2.0)
68
+ left = up/down.to_r
69
69
 
70
70
  left * right
71
71
  end
@@ -4,8 +4,8 @@ module Statistics
4
4
  attr_accessor :left, :right
5
5
 
6
6
  def initialize(a, b)
7
- self.left = a.to_f
8
- self.right = b.to_f
7
+ self.left = a.to_r
8
+ self.right = b.to_r
9
9
  end
10
10
 
11
11
  def density_function(value)
@@ -4,8 +4,8 @@ module Statistics
4
4
  attr_accessor :shape, :scale # k and lambda
5
5
 
6
6
  def initialize(k, lamb)
7
- self.shape = k.to_f
8
- self.scale = lamb.to_f
7
+ self.shape = k.to_r
8
+ self.scale = lamb.to_r
9
9
  end
10
10
 
11
11
  def cumulative_function(random_value)
@@ -14,7 +14,7 @@ module Statistics
14
14
  if rankings.fetch(value, false)
15
15
  rankings[value][:rank] += (temporal_ranking + rankings[value][:counter])
16
16
  rankings[value][:counter] += 1
17
- rankings[value][:tie_rank] = rankings[value][:rank] / rankings[value][:counter].to_f
17
+ rankings[value][:tie_rank] = rankings[value][:rank] / rankings[value][:counter].to_r
18
18
  else
19
19
  rankings[value] = { counter: 1, rank: temporal_ranking, tie_rank: temporal_ranking }
20
20
  end
@@ -35,7 +35,7 @@ module Statistics
35
35
  return if set_one.size == 0 && set_two.size == 0
36
36
 
37
37
  set_one_mean, set_two_mean = set_one.mean, set_two.mean
38
- have_tie_ranks = (set_one + set_two).any? { |rank| rank.is_a?(Float) }
38
+ have_tie_ranks = (set_one + set_two).any? { |rank| rank.is_a?(Float) || rank.is_a?(Rational) }
39
39
 
40
40
  if have_tie_ranks
41
41
  numerator = 0
@@ -54,7 +54,7 @@ module Statistics
54
54
 
55
55
  denominator = Math.sqrt(squared_differences_set_one * squared_differences_set_two)
56
56
 
57
- numerator / denominator.to_f # This is rho or spearman's coefficient.
57
+ numerator / denominator.to_r # This is rho or spearman's coefficient.
58
58
  else
59
59
  sum_squared_differences = set_one.each_with_index.reduce(0) do |memo, (rank_one, index)|
60
60
  memo += ((rank_one - set_two[index]) ** 2)
@@ -64,7 +64,7 @@ module Statistics
64
64
  numerator = 6 * sum_squared_differences
65
65
  denominator = ((set_one.size ** 3) - set_one.size)
66
66
 
67
- 1.0 - (numerator / denominator.to_f) # This is rho or spearman's coefficient.
67
+ 1.0 - (numerator / denominator.to_r) # This is rho or spearman's coefficient.
68
68
  end
69
69
  end
70
70
  end
@@ -8,12 +8,12 @@ module Statistics
8
8
  statistic = if expected.is_a? Numeric
9
9
  observed.reduce(0) do |memo, observed_value|
10
10
  up = (observed_value - expected) ** 2
11
- memo += (up/expected.to_f)
11
+ memo += (up/expected.to_r)
12
12
  end
13
13
  else
14
14
  expected.each_with_index.reduce(0) do |memo, (expected_value, index)|
15
15
  up = (observed[index] - expected_value) ** 2
16
- memo += (up/expected_value.to_f)
16
+ memo += (up/expected_value.to_r)
17
17
  end
18
18
  end
19
19
 
@@ -19,7 +19,7 @@ module Statistics
19
19
  if args.size == 2
20
20
  variances = [args[0].variance, args[1].variance]
21
21
 
22
- f_score = variances.max/variances.min.to_f
22
+ f_score = variances.max/variances.min.to_r
23
23
  df1 = 1 # k-1 (k = 2)
24
24
  df2 = args.flatten.size - 2 # N-k (k = 2)
25
25
  elsif args.size > 2
@@ -37,18 +37,18 @@ module Statistics
37
37
  variance_between_groups = iterator.reduce(0) do |summation, (size, index)|
38
38
  inner_calculation = size * ((sample_means[index] - overall_mean) ** 2)
39
39
 
40
- summation += (inner_calculation / (total_groups - 1).to_f)
40
+ summation += (inner_calculation / (total_groups - 1).to_r)
41
41
  end
42
42
 
43
43
  # Variance within groups
44
44
  variance_within_groups = (0...total_groups).reduce(0) do |outer_summation, group_index|
45
45
  outer_summation += args[group_index].reduce(0) do |inner_sumation, observation|
46
46
  inner_calculation = ((observation - sample_means[group_index]) ** 2)
47
- inner_sumation += (inner_calculation / (total_elements - total_groups).to_f)
47
+ inner_sumation += (inner_calculation / (total_elements - total_groups).to_r)
48
48
  end
49
49
  end
50
50
 
51
- f_score = variance_between_groups/variance_within_groups.to_f
51
+ f_score = variance_between_groups/variance_within_groups.to_r
52
52
  df1 = total_groups - 1
53
53
  df2 = total_elements - total_groups
54
54
  end
@@ -17,7 +17,7 @@ module Statistics
17
17
 
18
18
  # TODO: Validate calculation of Common alpha.
19
19
  common_alpha = Math.sqrt((-0.5 * Math.log(alpha)))
20
- radicand = (group_one.size + group_two.size) / (group_one.size * group_two.size).to_f
20
+ radicand = (group_one.size + group_two.size) / (group_one.size * group_two.size).to_r
21
21
 
22
22
  critical_d = common_alpha * Math.sqrt(radicand)
23
23
  # critical_d = self.critical_d(alpha: alpha, n: samples.size)
@@ -23,7 +23,7 @@ module Statistics
23
23
  comparison_mean = args[0]
24
24
  degrees_of_freedom = args[1].size - 1
25
25
 
26
- (data_mean - comparison_mean)/(data_std / Math.sqrt(args[1].size).to_f).to_f
26
+ (data_mean - comparison_mean)/(data_std / Math.sqrt(args[1].size).to_r).to_r
27
27
  else
28
28
  sample_left_mean = args[0].mean
29
29
  sample_left_variance = args[0].variance
@@ -31,12 +31,12 @@ module Statistics
31
31
  sample_right_mean = args[1].mean
32
32
  degrees_of_freedom = args.flatten.size - 2
33
33
 
34
- left_root = sample_left_variance/args[0].size.to_f
35
- right_root = sample_right_variance/args[1].size.to_f
34
+ left_root = sample_left_variance/args[0].size.to_r
35
+ right_root = sample_right_variance/args[1].size.to_r
36
36
 
37
37
  standard_error = Math.sqrt(left_root + right_root)
38
38
 
39
- (sample_left_mean - sample_right_mean).abs/standard_error.to_f
39
+ (sample_left_mean - sample_right_mean).abs/standard_error.to_r
40
40
  end
41
41
 
42
42
  t_distribution = Distribution::TStudent.new(degrees_of_freedom)
@@ -72,7 +72,7 @@ module Statistics
72
72
 
73
73
  down = difference_std/Math.sqrt(differences.size)
74
74
 
75
- t_score = (differences.mean - 0)/down.to_f
75
+ t_score = (differences.mean - 0)/down.to_r
76
76
 
77
77
  probability = Distribution::TStudent.new(degrees_of_freedom).cumulative_function(t_score)
78
78
 
@@ -73,7 +73,7 @@ module Statistics
73
73
  memo += ((t[:counter] ** 3) - t[:counter])/12.0
74
74
  end
75
75
 
76
- left = (total_group_one * total_group_two)/(n * (n - 1)).to_f
76
+ left = (total_group_one * total_group_two)/(n * (n - 1)).to_r
77
77
  right = (((n ** 3) - n)/12.0) - rank_sum
78
78
 
79
79
  Math.sqrt(left * right)
@@ -82,7 +82,7 @@ module Statistics
82
82
  private def ranked_sum_for(total, group)
83
83
  # sum rankings per group
84
84
  group.reduce(0) do |memo, element|
85
- rank_of_element = total[element][:rank] / total[element][:counter].to_f
85
+ rank_of_element = total[element][:rank] / total[element][:counter].to_r
86
86
  memo += rank_of_element
87
87
  end
88
88
  end
@@ -1,3 +1,3 @@
1
1
  module Statistics
2
- VERSION = "2.1.2"
2
+ VERSION = "2.1.3"
3
3
  end
@@ -27,8 +27,9 @@ Gem::Specification.new do |spec|
27
27
  spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
28
28
  spec.require_paths = ["lib"]
29
29
 
30
- spec.add_development_dependency "rake", '~> 12.0', '>= 12.0.0'
30
+ spec.add_development_dependency "rake", '>= 12.0.0', '~> 13.0'
31
31
  spec.add_development_dependency "rspec", '>= 3.6.0'
32
32
  spec.add_development_dependency "grb", '~> 0.4.1', '>= 0.4.1'
33
33
  spec.add_development_dependency 'byebug', '>= 9.1.0'
34
+ spec.add_development_dependency 'pry'
34
35
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-statistics
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.1.2
4
+ version: 2.1.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - esteban zapata
8
- autorequire:
8
+ autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-03-01 00:00:00.000000000 Z
11
+ date: 2021-02-04 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake
@@ -19,7 +19,7 @@ dependencies:
19
19
  version: 12.0.0
20
20
  - - "~>"
21
21
  - !ruby/object:Gem::Version
22
- version: '12.0'
22
+ version: '13.0'
23
23
  type: :development
24
24
  prerelease: false
25
25
  version_requirements: !ruby/object:Gem::Requirement
@@ -29,7 +29,7 @@ dependencies:
29
29
  version: 12.0.0
30
30
  - - "~>"
31
31
  - !ruby/object:Gem::Version
32
- version: '12.0'
32
+ version: '13.0'
33
33
  - !ruby/object:Gem::Dependency
34
34
  name: rspec
35
35
  requirement: !ruby/object:Gem::Requirement
@@ -48,20 +48,20 @@ dependencies:
48
48
  name: grb
49
49
  requirement: !ruby/object:Gem::Requirement
50
50
  requirements:
51
- - - ">="
51
+ - - "~>"
52
52
  - !ruby/object:Gem::Version
53
53
  version: 0.4.1
54
- - - "~>"
54
+ - - ">="
55
55
  - !ruby/object:Gem::Version
56
56
  version: 0.4.1
57
57
  type: :development
58
58
  prerelease: false
59
59
  version_requirements: !ruby/object:Gem::Requirement
60
60
  requirements:
61
- - - ">="
61
+ - - "~>"
62
62
  - !ruby/object:Gem::Version
63
63
  version: 0.4.1
64
- - - "~>"
64
+ - - ">="
65
65
  - !ruby/object:Gem::Version
66
66
  version: 0.4.1
67
67
  - !ruby/object:Gem::Dependency
@@ -78,6 +78,20 @@ dependencies:
78
78
  - - ">="
79
79
  - !ruby/object:Gem::Version
80
80
  version: 9.1.0
81
+ - !ruby/object:Gem::Dependency
82
+ name: pry
83
+ requirement: !ruby/object:Gem::Requirement
84
+ requirements:
85
+ - - ">="
86
+ - !ruby/object:Gem::Version
87
+ version: '0'
88
+ type: :development
89
+ prerelease: false
90
+ version_requirements: !ruby/object:Gem::Requirement
91
+ requirements:
92
+ - - ">="
93
+ - !ruby/object:Gem::Version
94
+ version: '0'
81
95
  description: |-
82
96
  This gem is intended to accomplish the same purpose as jStat js library:
83
97
  to provide ruby with statistical capabilities without the need
@@ -90,6 +104,7 @@ executables: []
90
104
  extensions: []
91
105
  extra_rdoc_files: []
92
106
  files:
107
+ - ".github/dependabot.yml"
93
108
  - ".github/workflows/ruby.yml"
94
109
  - ".gitignore"
95
110
  - ".rspec"
@@ -134,7 +149,7 @@ homepage: https://github.com/estebanz01/ruby-statistics
134
149
  licenses:
135
150
  - MIT
136
151
  metadata: {}
137
- post_install_message:
152
+ post_install_message:
138
153
  rdoc_options: []
139
154
  require_paths:
140
155
  - lib
@@ -149,8 +164,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
149
164
  - !ruby/object:Gem::Version
150
165
  version: '0'
151
166
  requirements: []
152
- rubygems_version: 3.0.8
153
- signing_key:
167
+ rubygems_version: 3.1.4
168
+ signing_key:
154
169
  specification_version: 4
155
170
  summary: A ruby gem for som specific statistics. Inspired by the jStat js library.
156
171
  test_files: []