qesuite 1.0.65 → 1.0.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -503,7 +503,8 @@ declare const Beta: {
503
503
  * @param a The first shape parameter; x^(a-1).
504
504
  * @param b The second shape parameter; (1 - x)^(b-1).
505
505
  */
506
- inc(x: number, a: number, b: number): number;
506
+ inc(X: number, A: number, B: number): number;
507
+ incomplete(x: any, a: any, b: any): any;
507
508
  /**
508
509
  * Evaluates the regularized incomplete beta function of x for the beta function with specified shape parameters a and b.
509
510
  * @customfunction
@@ -512,6 +513,7 @@ declare const Beta: {
512
513
  * @param b The second shape parameter; (1 - x)^(b-1).
513
514
  */
514
515
  incr(x: number, a: number, b: number): number;
516
+ invInc(p: any, a: any, b: any): any;
515
517
  };
516
518
  declare const ERF: {
517
519
  /**
@@ -589,7 +591,7 @@ declare const Distributions: {
589
591
  * @param a The first shape parameter, x^(a-1).
590
592
  * @param b The second shape parameter; (1 - x)^(b-1).
591
593
  */
592
- cdf(x: number, a: number, b: number): number;
594
+ cdf(x: number, a: number, b: number): any;
593
595
  /**
594
596
  * Probability Density Function. Calculates the probability of random variable x occuring from the beta distribution with the specified shape parameters a and b.
595
597
  * @param x The value to evaluate.
@@ -662,7 +664,7 @@ declare const Distributions: {
662
664
  * @param df2 Denominator degrees of freedom
663
665
  * @returns the Right-Tailed P-Value of the F Distribution
664
666
  */
665
- cdf(x: number, df1: number, df2: number): number;
667
+ cdf(x: number, df1: number, df2: number): any;
666
668
  /**
667
669
  * Calculates the right-tailed p-value of the supplied F distribution.
668
670
  * @param f F value to evaluate
@@ -1140,6 +1142,9 @@ declare const ANOVA: {
1140
1142
  chart: HTMLCanvasElement;
1141
1143
  };
1142
1144
  };
1145
+ Balanced: {
1146
+ Table(responses: any, factors: any, interactions?: any): {};
1147
+ };
1143
1148
  GeneralLinearModel(responses: any, factors: any, covariates: any): void;
1144
1149
  OneWay: {
1145
1150
  Table(data: any): any;
package/dist/index.d.ts CHANGED
@@ -503,7 +503,8 @@ declare const Beta: {
503
503
  * @param a The first shape parameter; x^(a-1).
504
504
  * @param b The second shape parameter; (1 - x)^(b-1).
505
505
  */
506
- inc(x: number, a: number, b: number): number;
506
+ inc(X: number, A: number, B: number): number;
507
+ incomplete(x: any, a: any, b: any): any;
507
508
  /**
508
509
  * Evaluates the regularized incomplete beta function of x for the beta function with specified shape parameters a and b.
509
510
  * @customfunction
@@ -512,6 +513,7 @@ declare const Beta: {
512
513
  * @param b The second shape parameter; (1 - x)^(b-1).
513
514
  */
514
515
  incr(x: number, a: number, b: number): number;
516
+ invInc(p: any, a: any, b: any): any;
515
517
  };
516
518
  declare const ERF: {
517
519
  /**
@@ -589,7 +591,7 @@ declare const Distributions: {
589
591
  * @param a The first shape parameter, x^(a-1).
590
592
  * @param b The second shape parameter; (1 - x)^(b-1).
591
593
  */
592
- cdf(x: number, a: number, b: number): number;
594
+ cdf(x: number, a: number, b: number): any;
593
595
  /**
594
596
  * Probability Density Function. Calculates the probability of random variable x occuring from the beta distribution with the specified shape parameters a and b.
595
597
  * @param x The value to evaluate.
@@ -662,7 +664,7 @@ declare const Distributions: {
662
664
  * @param df2 Denominator degrees of freedom
663
665
  * @returns the Right-Tailed P-Value of the F Distribution
664
666
  */
665
- cdf(x: number, df1: number, df2: number): number;
667
+ cdf(x: number, df1: number, df2: number): any;
666
668
  /**
667
669
  * Calculates the right-tailed p-value of the supplied F distribution.
668
670
  * @param f F value to evaluate
@@ -1140,6 +1142,9 @@ declare const ANOVA: {
1140
1142
  chart: HTMLCanvasElement;
1141
1143
  };
1142
1144
  };
1145
+ Balanced: {
1146
+ Table(responses: any, factors: any, interactions?: any): {};
1147
+ };
1143
1148
  GeneralLinearModel(responses: any, factors: any, covariates: any): void;
1144
1149
  OneWay: {
1145
1150
  Table(data: any): any;
package/dist/index.js CHANGED
@@ -1078,7 +1078,6 @@ var Capability = {
1078
1078
  */
1079
1079
  Cp(data, spec, subgroupSize = 1, method, w, UnbiasingConstant2 = true) {
1080
1080
  let standarddeviation = StDev.W(data, subgroupSize, method, w, UnbiasingConstant2);
1081
- console.log({ StDevW: standarddeviation });
1082
1081
  if (!Number.isNaN(spec.USL) && !Number.isNaN(spec.LSL)) {
1083
1082
  return Number((spec.USL - spec.LSL) / (6 * standarddeviation));
1084
1083
  } else {
@@ -1317,6 +1316,7 @@ var Capability = {
1317
1316
  if (confidenceInterval == void 0) {
1318
1317
  sides = void 0;
1319
1318
  }
1319
+ console.log(confidenceInterval, sides);
1320
1320
  let results = Capability.Analysis(data, spec, target, subgroupSize, confidenceInterval, sides, method, w, UnbiasingConstant2);
1321
1321
  let mean = Mean(data);
1322
1322
  let std = StDev.S(data);
@@ -1802,10 +1802,34 @@ var Beta = {
1802
1802
  * @param a The first shape parameter; x^(a-1).
1803
1803
  * @param b The second shape parameter; (1 - x)^(b-1).
1804
1804
  */
1805
- inc(x, a, b) {
1805
+ inc(X, A, B) {
1806
+ var A0 = 0;
1807
+ var B0 = 1;
1808
+ var A1 = 1;
1809
+ var B1 = 1;
1810
+ var M9 = 0;
1811
+ var A2 = 0;
1812
+ var C9;
1813
+ while (Math.abs((A1 - A2) / A1) > 1e-5) {
1814
+ A2 = A1;
1815
+ C9 = -(A + M9) * (A + B + M9) * X / (A + 2 * M9) / (A + 2 * M9 + 1);
1816
+ A0 = A1 + C9 * A0;
1817
+ B0 = B1 + C9 * B0;
1818
+ M9 = M9 + 1;
1819
+ C9 = M9 * (B - M9) * X / (A + 2 * M9 - 1) / (A + 2 * M9);
1820
+ A1 = A0 + C9 * A1;
1821
+ B1 = B0 + C9 * B1;
1822
+ A0 = A0 / B1;
1823
+ B0 = B0 / B1;
1824
+ A1 = A1 / B1;
1825
+ B1 = 1;
1826
+ }
1827
+ return A1 / A;
1828
+ },
1829
+ incomplete(x, a, b) {
1806
1830
  var bt = x === 0 || x === 1 ? 0 : Math.exp(Gamma.ln(a + b) - Gamma.ln(a) - Gamma.ln(b) + a * Math.log(x) + b * Math.log(1 - x));
1807
1831
  if (x < 0 || x > 1)
1808
- return NaN;
1832
+ return false;
1809
1833
  if (x < (a + 1) / (a + b + 2))
1810
1834
  return bt * Beta.cf(x, a, b) / a;
1811
1835
  return 1 - bt * Beta.cf(1 - x, b, a) / b;
@@ -1819,6 +1843,54 @@ var Beta = {
1819
1843
  */
1820
1844
  incr(x, a, b) {
1821
1845
  return Beta.inc(x, a, b) / Beta.fn(a, b);
1846
+ },
1847
+ invInc(p, a, b) {
1848
+ var EPS = 1e-8;
1849
+ var a1 = a - 1;
1850
+ var b1 = b - 1;
1851
+ var j = 0;
1852
+ var lna, lnb, pp, t, u, err, x, al, h, w, afac;
1853
+ if (p <= 0)
1854
+ return 0;
1855
+ if (p >= 1)
1856
+ return 1;
1857
+ if (a >= 1 && b >= 1) {
1858
+ pp = p < 0.5 ? p : 1 - p;
1859
+ t = Math.sqrt(-2 * Math.log(pp));
1860
+ x = (2.30753 + t * 0.27061) / (1 + t * (0.99229 + t * 0.04481)) - t;
1861
+ if (p < 0.5)
1862
+ x = -x;
1863
+ al = (x * x - 3) / 6;
1864
+ h = 2 / (1 / (2 * a - 1) + 1 / (2 * b - 1));
1865
+ w = x * Math.sqrt(al + h) / h - (1 / (2 * b - 1) - 1 / (2 * a - 1)) * (al + 5 / 6 - 2 / (3 * h));
1866
+ x = a / (a + b * Math.exp(2 * w));
1867
+ } else {
1868
+ lna = Math.log(a / (a + b));
1869
+ lnb = Math.log(b / (a + b));
1870
+ t = Math.exp(a * lna) / a;
1871
+ u = Math.exp(b * lnb) / b;
1872
+ w = t + u;
1873
+ if (p < t / w)
1874
+ x = Math.pow(a * w * p, 1 / a);
1875
+ else
1876
+ x = 1 - Math.pow(b * w * (1 - p), 1 / b);
1877
+ }
1878
+ afac = -Gamma.ln(a) - Gamma.ln(b) + Gamma.ln(a + b);
1879
+ for (; j < 10; j++) {
1880
+ if (x === 0 || x === 1)
1881
+ return x;
1882
+ err = Beta.incomplete(x, a, b) - p;
1883
+ t = Math.exp(a1 * Math.log(x) + b1 * Math.log(1 - x) + afac);
1884
+ u = err / t;
1885
+ x -= t = u / (1 - 0.5 * Math.min(1, u * (a1 / x - b1 / (1 - x))));
1886
+ if (x <= 0)
1887
+ x = 0.5 * (x + t);
1888
+ if (x >= 1)
1889
+ x = 0.5 * (x + t + 1);
1890
+ if (Math.abs(t) < EPS * x && j > 0)
1891
+ break;
1892
+ }
1893
+ return x;
1822
1894
  }
1823
1895
  };
1824
1896
  var ERF = {
@@ -1983,7 +2055,17 @@ var Distributions = {
1983
2055
  * @param b The second shape parameter; (1 - x)^(b-1).
1984
2056
  */
1985
2057
  cdf(x, a, b) {
1986
- return Beta.incr(x, a, b);
2058
+ var S;
2059
+ var BT;
2060
+ var Bcdf;
2061
+ S = a + b;
2062
+ BT = Math.exp(Gamma.ln(S) - Gamma.ln(b) - Gamma.ln(a) + a * Math.log(x) + b * Math.log(1 - x));
2063
+ if (x < (a + 1) / (S + 2)) {
2064
+ Bcdf = BT * Beta.inc(x, a, b);
2065
+ } else {
2066
+ Bcdf = 1 - BT * Beta.inc(1 - x, b, a);
2067
+ }
2068
+ return Bcdf;
1987
2069
  },
1988
2070
  /**
1989
2071
  * Probability Density Function. Calculates the probability of random variable x occuring from the beta distribution with the specified shape parameters a and b.
@@ -3852,8 +3934,8 @@ var Distributions = {
3852
3934
  * @returns the Right-Tailed P-Value of the F Distribution
3853
3935
  */
3854
3936
  cdf(x, df1, df2) {
3855
- let subX = df1 * x / (df1 * x + df2);
3856
- let p = Beta.incr(subX, df1 / 2, df2 / 2);
3937
+ let z = x / (x + df2 / df1);
3938
+ let p = Distributions.Beta.cdf(z, df1 / 2, df2 / 2);
3857
3939
  if (p > 1) {
3858
3940
  p = 1;
3859
3941
  }
@@ -4771,7 +4853,7 @@ var Distributions = {
4771
4853
  if (p == 0.5) {
4772
4854
  return 0;
4773
4855
  }
4774
- let x = Distributions.Beta.inv(2 * Math.min(p, 1 - p), 0.5 * df, 0.5);
4856
+ var x = Beta.invInc(2 * Math.min(p, 1 - p), 0.5 * df, 0.5);
4775
4857
  x = Math.sqrt(df * (1 - x) / x);
4776
4858
  return p > 0.5 ? x : -x;
4777
4859
  }
@@ -5304,9 +5386,7 @@ var ANOVA = {
5304
5386
  let pooledSTD = StDev.Pooled(pooledData);
5305
5387
  let alphaADJ = (1 - Math.pow(1 - alpha, 1 / rCount)) / 2;
5306
5388
  let hAlpha = Math.abs(Distributions.T.inv(alphaADJ, grandArray.length - rCount));
5307
- console.log({ alphaADJ, ln: grandArray.length, rCount });
5308
5389
  let scaleDF = Math.sqrt((rCount - 1) / (rCount * tCount));
5309
- console.log({ grandMean, pooledSTD, hAlpha, scaleDF });
5310
5390
  let UDL = grandMean + pooledSTD * hAlpha * scaleDF;
5311
5391
  let LDL = grandMean - pooledSTD * hAlpha * scaleDF;
5312
5392
  let chartSettings = new ChartSettings();
@@ -5331,6 +5411,110 @@ var ANOVA = {
5331
5411
  };
5332
5412
  }
5333
5413
  },
5414
+ Balanced: {
5415
+ Table(responses, factors, interactions = []) {
5416
+ let grandMean = Mean(responses);
5417
+ let factorsWInteractions = {};
5418
+ Object.keys(factors).forEach((f) => {
5419
+ factorsWInteractions[f] = factors[f];
5420
+ });
5421
+ interactions.forEach((element) => {
5422
+ factorsWInteractions[element] = responses.map((r, i) => {
5423
+ return element.split("*").map((k) => {
5424
+ return factors[k][i];
5425
+ }).join("*");
5426
+ });
5427
+ });
5428
+ let factorInfo = {};
5429
+ let totalLevelCount = 1;
5430
+ Object.keys(factors).forEach((k, i) => {
5431
+ let levels = Unique(factors[k]);
5432
+ totalLevelCount *= levels.length;
5433
+ factorInfo[k] = {
5434
+ Factor: k,
5435
+ Levels: levels.length,
5436
+ Values: levels.join(", ")
5437
+ };
5438
+ });
5439
+ let SSFactors = 0;
5440
+ let DFFactors = 0;
5441
+ let anovaTable = {};
5442
+ Object.keys(factorsWInteractions).forEach((k, i) => {
5443
+ let df = Product(k.split("*").map((f) => {
5444
+ return factorInfo[f].Levels - 1;
5445
+ }));
5446
+ DFFactors += df;
5447
+ let ss = 0;
5448
+ Unique(factorsWInteractions[k]).forEach((group) => {
5449
+ let groupMean = Mean(responses.filter((r, ri) => {
5450
+ return factorsWInteractions[k][ri] == group;
5451
+ }));
5452
+ if (k.includes("*")) {
5453
+ let factorLevelMeans = k.split("*").map((f, fi) => {
5454
+ return responses.filter((r, ri) => {
5455
+ return factors[f][ri] == group.split("*")[fi];
5456
+ });
5457
+ });
5458
+ let factorCombos = [];
5459
+ k.split("*").forEach((f, fi) => {
5460
+ k.split("*").forEach((f2, fi2) => {
5461
+ if (fi2 > fi) {
5462
+ let responseValues = responses.filter((r, ri) => {
5463
+ return factors[k.split("*")[fi]][ri] == f && factors[k.split("*")[fi2]][ri] == f2;
5464
+ });
5465
+ factorCombos.push(responseValues);
5466
+ }
5467
+ });
5468
+ });
5469
+ ss += Math.pow(groupMean - Sum(factorLevelMeans.map((arr) => {
5470
+ return Mean(arr);
5471
+ })) + grandMean, 2);
5472
+ } else {
5473
+ ss += Math.pow(groupMean - grandMean, 2);
5474
+ }
5475
+ });
5476
+ let n = responses.length / totalLevelCount;
5477
+ ss *= n * Product(Object.keys(factorInfo).filter((f) => {
5478
+ return k.includes(f) == false;
5479
+ }).map((fk) => {
5480
+ return factorInfo[fk].Levels;
5481
+ }));
5482
+ SSFactors += ss;
5483
+ anovaTable[k] = {
5484
+ Source: k,
5485
+ DF: df,
5486
+ SS: ss,
5487
+ MS: ss / df,
5488
+ F: NaN,
5489
+ p: NaN
5490
+ };
5491
+ });
5492
+ let SST = SumSq(responses.map((r) => {
5493
+ return r - grandMean;
5494
+ }));
5495
+ let DFE = responses.length - DFFactors - 1;
5496
+ let SSE = SST - SSFactors;
5497
+ let MSE = SSE / DFE;
5498
+ Object.keys(anovaTable).forEach((key) => {
5499
+ let fValue = anovaTable[key].MS / MSE;
5500
+ let pValue = Distributions.F.RightTail(fValue, anovaTable[key].DF, DFE);
5501
+ anovaTable[key].F = fValue;
5502
+ anovaTable[key].p = pValue;
5503
+ });
5504
+ anovaTable["Error"] = {
5505
+ Source: "Error",
5506
+ DF: DFE,
5507
+ SS: SSE,
5508
+ MS: MSE
5509
+ };
5510
+ anovaTable["Total"] = {
5511
+ Source: "Total",
5512
+ DF: responses.length - 1,
5513
+ SS: SST
5514
+ };
5515
+ return anovaTable;
5516
+ }
5517
+ },
5334
5518
  GeneralLinearModel(responses, factors, covariates) {
5335
5519
  },
5336
5520
  OneWay: {
@@ -7572,7 +7756,7 @@ var EquivalenceTesting = {
7572
7756
  let mean = Mean(dta);
7573
7757
  let se = StandardError(dta);
7574
7758
  let LSL = multiply ? lowerSpecification * target : lowerSpecification;
7575
- let USL = multiply ? upperSpecification * target : lowerSpecification;
7759
+ let USL = multiply ? upperSpecification * target : upperSpecification;
7576
7760
  let diff = mean - target;
7577
7761
  let CI_Low = Math.min((LSL + USL) / 2, diff - Distributions.T.inv(1 - confidenceLevel, dta.length - 1) * se);
7578
7762
  let CI_High = Math.max((LSL + USL) / 2, diff + Distributions.T.inv(1 - confidenceLevel, dta.length - 1) * se);
@@ -7899,7 +8083,7 @@ var NonparametricTesting = {
7899
8083
  let avgOverallRank = Mean(combinedRanks);
7900
8084
  let N = combinedData.length;
7901
8085
  let zValues = avgRanks.map((Rj, j) => {
7902
- return (Rj - avgOverallRank) / Math.sqrt((N + 1) * (N / data[j].length - 1) / 12);
8086
+ return (Rj - avgOverallRank) / Math.sqrt((N + 1) * (N / extractedData[j].length - 1) / 12);
7903
8087
  });
7904
8088
  let tieSets = CompileTies(combinedData);
7905
8089
  let adjustH = tieSets.length > 0;
@@ -11356,11 +11540,14 @@ function AttirbuteAgreementConfidenceIntervals(object, confidenceLevel) {
11356
11540
  let v1_low = 2 * object.MatchedCount;
11357
11541
  let v2_low = 2 * (object.InspectedCount - object.MatchedCount + 1);
11358
11542
  let lowP = object.MatchedCount == object.InspectedCount ? alpha : alpha / 2;
11359
- let Fv1v2_a_2 = object.MatchedCount == 0 ? 0 : Distributions.F.inv(lowP, v1_low, v2_low);
11543
+ const F_inv = (p, df1, df2) => {
11544
+ return df2 / (df1 * (1 / Beta.invInc(p, df1 / 2, df2 / 2) - 1));
11545
+ };
11546
+ let Fv1v2_a_2 = object.MatchedCount == 0 ? 0 : F_inv(lowP, v1_low, v2_low);
11360
11547
  let v1_high = 2 * (object.MatchedCount + 1);
11361
11548
  let v2_high = 2 * (object.InspectedCount - object.MatchedCount);
11362
11549
  let highP = object.MatchedCount == 0 ? 1 - alpha : 1 - alpha / 2;
11363
- let Fv1v2_1_a_2 = object.MatchedCount == object.InspectedCount ? 1 : Distributions.F.inv(highP, v1_high, v2_high);
11550
+ let Fv1v2_1_a_2 = object.MatchedCount == object.InspectedCount ? 1 : F_inv(highP, v1_high, v2_high);
11364
11551
  object.Percent = object.MatchedCount / object.InspectedCount;
11365
11552
  object.CI_Low = v1_low * Fv1v2_a_2 / (v2_low + v1_low * Fv1v2_a_2);
11366
11553
  object.CI_High = v1_high * Fv1v2_1_a_2 / (v2_high + v1_high * Fv1v2_1_a_2);
@@ -11951,7 +12138,6 @@ function GageLinearityAndBias(parts, referenceValues, responses, processVariatio
11951
12138
  let \u03C3B2 = \u03C3R2 / Math.sqrt(m);
11952
12139
  let t2 = avgBias3 / \u03C3B2;
11953
12140
  let p2 = Distributions.T.cdf(-Math.abs(t2), df2) + (1 - Distributions.T.cdf(Math.abs(t2), df2));
11954
- console.log({ t: t2, df: df2, \u03C3B: \u03C3B2, \u03C3R: \u03C3R2, m, rBar: rBar2, d2: d22 });
11955
12141
  partBiasObjects.push({
11956
12142
  ReferenceValue: ref,
11957
12143
  Bias: avgBias3,
package/dist/index.mjs CHANGED
@@ -952,7 +952,6 @@ var Capability = {
952
952
  */
953
953
  Cp(data, spec, subgroupSize = 1, method, w, UnbiasingConstant2 = true) {
954
954
  let standarddeviation = StDev.W(data, subgroupSize, method, w, UnbiasingConstant2);
955
- console.log({ StDevW: standarddeviation });
956
955
  if (!Number.isNaN(spec.USL) && !Number.isNaN(spec.LSL)) {
957
956
  return Number((spec.USL - spec.LSL) / (6 * standarddeviation));
958
957
  } else {
@@ -1191,6 +1190,7 @@ var Capability = {
1191
1190
  if (confidenceInterval == void 0) {
1192
1191
  sides = void 0;
1193
1192
  }
1193
+ console.log(confidenceInterval, sides);
1194
1194
  let results = Capability.Analysis(data, spec, target, subgroupSize, confidenceInterval, sides, method, w, UnbiasingConstant2);
1195
1195
  let mean = Mean(data);
1196
1196
  let std = StDev.S(data);
@@ -1676,10 +1676,34 @@ var Beta = {
1676
1676
  * @param a The first shape parameter; x^(a-1).
1677
1677
  * @param b The second shape parameter; (1 - x)^(b-1).
1678
1678
  */
1679
- inc(x, a, b) {
1679
+ inc(X, A, B) {
1680
+ var A0 = 0;
1681
+ var B0 = 1;
1682
+ var A1 = 1;
1683
+ var B1 = 1;
1684
+ var M9 = 0;
1685
+ var A2 = 0;
1686
+ var C9;
1687
+ while (Math.abs((A1 - A2) / A1) > 1e-5) {
1688
+ A2 = A1;
1689
+ C9 = -(A + M9) * (A + B + M9) * X / (A + 2 * M9) / (A + 2 * M9 + 1);
1690
+ A0 = A1 + C9 * A0;
1691
+ B0 = B1 + C9 * B0;
1692
+ M9 = M9 + 1;
1693
+ C9 = M9 * (B - M9) * X / (A + 2 * M9 - 1) / (A + 2 * M9);
1694
+ A1 = A0 + C9 * A1;
1695
+ B1 = B0 + C9 * B1;
1696
+ A0 = A0 / B1;
1697
+ B0 = B0 / B1;
1698
+ A1 = A1 / B1;
1699
+ B1 = 1;
1700
+ }
1701
+ return A1 / A;
1702
+ },
1703
+ incomplete(x, a, b) {
1680
1704
  var bt = x === 0 || x === 1 ? 0 : Math.exp(Gamma.ln(a + b) - Gamma.ln(a) - Gamma.ln(b) + a * Math.log(x) + b * Math.log(1 - x));
1681
1705
  if (x < 0 || x > 1)
1682
- return NaN;
1706
+ return false;
1683
1707
  if (x < (a + 1) / (a + b + 2))
1684
1708
  return bt * Beta.cf(x, a, b) / a;
1685
1709
  return 1 - bt * Beta.cf(1 - x, b, a) / b;
@@ -1693,6 +1717,54 @@ var Beta = {
1693
1717
  */
1694
1718
  incr(x, a, b) {
1695
1719
  return Beta.inc(x, a, b) / Beta.fn(a, b);
1720
+ },
1721
+ invInc(p, a, b) {
1722
+ var EPS = 1e-8;
1723
+ var a1 = a - 1;
1724
+ var b1 = b - 1;
1725
+ var j = 0;
1726
+ var lna, lnb, pp, t, u, err, x, al, h, w, afac;
1727
+ if (p <= 0)
1728
+ return 0;
1729
+ if (p >= 1)
1730
+ return 1;
1731
+ if (a >= 1 && b >= 1) {
1732
+ pp = p < 0.5 ? p : 1 - p;
1733
+ t = Math.sqrt(-2 * Math.log(pp));
1734
+ x = (2.30753 + t * 0.27061) / (1 + t * (0.99229 + t * 0.04481)) - t;
1735
+ if (p < 0.5)
1736
+ x = -x;
1737
+ al = (x * x - 3) / 6;
1738
+ h = 2 / (1 / (2 * a - 1) + 1 / (2 * b - 1));
1739
+ w = x * Math.sqrt(al + h) / h - (1 / (2 * b - 1) - 1 / (2 * a - 1)) * (al + 5 / 6 - 2 / (3 * h));
1740
+ x = a / (a + b * Math.exp(2 * w));
1741
+ } else {
1742
+ lna = Math.log(a / (a + b));
1743
+ lnb = Math.log(b / (a + b));
1744
+ t = Math.exp(a * lna) / a;
1745
+ u = Math.exp(b * lnb) / b;
1746
+ w = t + u;
1747
+ if (p < t / w)
1748
+ x = Math.pow(a * w * p, 1 / a);
1749
+ else
1750
+ x = 1 - Math.pow(b * w * (1 - p), 1 / b);
1751
+ }
1752
+ afac = -Gamma.ln(a) - Gamma.ln(b) + Gamma.ln(a + b);
1753
+ for (; j < 10; j++) {
1754
+ if (x === 0 || x === 1)
1755
+ return x;
1756
+ err = Beta.incomplete(x, a, b) - p;
1757
+ t = Math.exp(a1 * Math.log(x) + b1 * Math.log(1 - x) + afac);
1758
+ u = err / t;
1759
+ x -= t = u / (1 - 0.5 * Math.min(1, u * (a1 / x - b1 / (1 - x))));
1760
+ if (x <= 0)
1761
+ x = 0.5 * (x + t);
1762
+ if (x >= 1)
1763
+ x = 0.5 * (x + t + 1);
1764
+ if (Math.abs(t) < EPS * x && j > 0)
1765
+ break;
1766
+ }
1767
+ return x;
1696
1768
  }
1697
1769
  };
1698
1770
  var ERF = {
@@ -1857,7 +1929,17 @@ var Distributions = {
1857
1929
  * @param b The second shape parameter; (1 - x)^(b-1).
1858
1930
  */
1859
1931
  cdf(x, a, b) {
1860
- return Beta.incr(x, a, b);
1932
+ var S;
1933
+ var BT;
1934
+ var Bcdf;
1935
+ S = a + b;
1936
+ BT = Math.exp(Gamma.ln(S) - Gamma.ln(b) - Gamma.ln(a) + a * Math.log(x) + b * Math.log(1 - x));
1937
+ if (x < (a + 1) / (S + 2)) {
1938
+ Bcdf = BT * Beta.inc(x, a, b);
1939
+ } else {
1940
+ Bcdf = 1 - BT * Beta.inc(1 - x, b, a);
1941
+ }
1942
+ return Bcdf;
1861
1943
  },
1862
1944
  /**
1863
1945
  * Probability Density Function. Calculates the probability of random variable x occuring from the beta distribution with the specified shape parameters a and b.
@@ -3726,8 +3808,8 @@ var Distributions = {
3726
3808
  * @returns the Right-Tailed P-Value of the F Distribution
3727
3809
  */
3728
3810
  cdf(x, df1, df2) {
3729
- let subX = df1 * x / (df1 * x + df2);
3730
- let p = Beta.incr(subX, df1 / 2, df2 / 2);
3811
+ let z = x / (x + df2 / df1);
3812
+ let p = Distributions.Beta.cdf(z, df1 / 2, df2 / 2);
3731
3813
  if (p > 1) {
3732
3814
  p = 1;
3733
3815
  }
@@ -4645,7 +4727,7 @@ var Distributions = {
4645
4727
  if (p == 0.5) {
4646
4728
  return 0;
4647
4729
  }
4648
- let x = Distributions.Beta.inv(2 * Math.min(p, 1 - p), 0.5 * df, 0.5);
4730
+ var x = Beta.invInc(2 * Math.min(p, 1 - p), 0.5 * df, 0.5);
4649
4731
  x = Math.sqrt(df * (1 - x) / x);
4650
4732
  return p > 0.5 ? x : -x;
4651
4733
  }
@@ -5178,9 +5260,7 @@ var ANOVA = {
5178
5260
  let pooledSTD = StDev.Pooled(pooledData);
5179
5261
  let alphaADJ = (1 - Math.pow(1 - alpha, 1 / rCount)) / 2;
5180
5262
  let hAlpha = Math.abs(Distributions.T.inv(alphaADJ, grandArray.length - rCount));
5181
- console.log({ alphaADJ, ln: grandArray.length, rCount });
5182
5263
  let scaleDF = Math.sqrt((rCount - 1) / (rCount * tCount));
5183
- console.log({ grandMean, pooledSTD, hAlpha, scaleDF });
5184
5264
  let UDL = grandMean + pooledSTD * hAlpha * scaleDF;
5185
5265
  let LDL = grandMean - pooledSTD * hAlpha * scaleDF;
5186
5266
  let chartSettings = new ChartSettings();
@@ -5205,6 +5285,110 @@ var ANOVA = {
5205
5285
  };
5206
5286
  }
5207
5287
  },
5288
+ Balanced: {
5289
+ Table(responses, factors, interactions = []) {
5290
+ let grandMean = Mean(responses);
5291
+ let factorsWInteractions = {};
5292
+ Object.keys(factors).forEach((f) => {
5293
+ factorsWInteractions[f] = factors[f];
5294
+ });
5295
+ interactions.forEach((element) => {
5296
+ factorsWInteractions[element] = responses.map((r, i) => {
5297
+ return element.split("*").map((k) => {
5298
+ return factors[k][i];
5299
+ }).join("*");
5300
+ });
5301
+ });
5302
+ let factorInfo = {};
5303
+ let totalLevelCount = 1;
5304
+ Object.keys(factors).forEach((k, i) => {
5305
+ let levels = Unique(factors[k]);
5306
+ totalLevelCount *= levels.length;
5307
+ factorInfo[k] = {
5308
+ Factor: k,
5309
+ Levels: levels.length,
5310
+ Values: levels.join(", ")
5311
+ };
5312
+ });
5313
+ let SSFactors = 0;
5314
+ let DFFactors = 0;
5315
+ let anovaTable = {};
5316
+ Object.keys(factorsWInteractions).forEach((k, i) => {
5317
+ let df = Product(k.split("*").map((f) => {
5318
+ return factorInfo[f].Levels - 1;
5319
+ }));
5320
+ DFFactors += df;
5321
+ let ss = 0;
5322
+ Unique(factorsWInteractions[k]).forEach((group) => {
5323
+ let groupMean = Mean(responses.filter((r, ri) => {
5324
+ return factorsWInteractions[k][ri] == group;
5325
+ }));
5326
+ if (k.includes("*")) {
5327
+ let factorLevelMeans = k.split("*").map((f, fi) => {
5328
+ return responses.filter((r, ri) => {
5329
+ return factors[f][ri] == group.split("*")[fi];
5330
+ });
5331
+ });
5332
+ let factorCombos = [];
5333
+ k.split("*").forEach((f, fi) => {
5334
+ k.split("*").forEach((f2, fi2) => {
5335
+ if (fi2 > fi) {
5336
+ let responseValues = responses.filter((r, ri) => {
5337
+ return factors[k.split("*")[fi]][ri] == f && factors[k.split("*")[fi2]][ri] == f2;
5338
+ });
5339
+ factorCombos.push(responseValues);
5340
+ }
5341
+ });
5342
+ });
5343
+ ss += Math.pow(groupMean - Sum(factorLevelMeans.map((arr) => {
5344
+ return Mean(arr);
5345
+ })) + grandMean, 2);
5346
+ } else {
5347
+ ss += Math.pow(groupMean - grandMean, 2);
5348
+ }
5349
+ });
5350
+ let n = responses.length / totalLevelCount;
5351
+ ss *= n * Product(Object.keys(factorInfo).filter((f) => {
5352
+ return k.includes(f) == false;
5353
+ }).map((fk) => {
5354
+ return factorInfo[fk].Levels;
5355
+ }));
5356
+ SSFactors += ss;
5357
+ anovaTable[k] = {
5358
+ Source: k,
5359
+ DF: df,
5360
+ SS: ss,
5361
+ MS: ss / df,
5362
+ F: NaN,
5363
+ p: NaN
5364
+ };
5365
+ });
5366
+ let SST = SumSq(responses.map((r) => {
5367
+ return r - grandMean;
5368
+ }));
5369
+ let DFE = responses.length - DFFactors - 1;
5370
+ let SSE = SST - SSFactors;
5371
+ let MSE = SSE / DFE;
5372
+ Object.keys(anovaTable).forEach((key) => {
5373
+ let fValue = anovaTable[key].MS / MSE;
5374
+ let pValue = Distributions.F.RightTail(fValue, anovaTable[key].DF, DFE);
5375
+ anovaTable[key].F = fValue;
5376
+ anovaTable[key].p = pValue;
5377
+ });
5378
+ anovaTable["Error"] = {
5379
+ Source: "Error",
5380
+ DF: DFE,
5381
+ SS: SSE,
5382
+ MS: MSE
5383
+ };
5384
+ anovaTable["Total"] = {
5385
+ Source: "Total",
5386
+ DF: responses.length - 1,
5387
+ SS: SST
5388
+ };
5389
+ return anovaTable;
5390
+ }
5391
+ },
5208
5392
  GeneralLinearModel(responses, factors, covariates) {
5209
5393
  },
5210
5394
  OneWay: {
@@ -7446,7 +7630,7 @@ var EquivalenceTesting = {
7446
7630
  let mean = Mean(dta);
7447
7631
  let se = StandardError(dta);
7448
7632
  let LSL = multiply ? lowerSpecification * target : lowerSpecification;
7449
- let USL = multiply ? upperSpecification * target : lowerSpecification;
7633
+ let USL = multiply ? upperSpecification * target : upperSpecification;
7450
7634
  let diff = mean - target;
7451
7635
  let CI_Low = Math.min((LSL + USL) / 2, diff - Distributions.T.inv(1 - confidenceLevel, dta.length - 1) * se);
7452
7636
  let CI_High = Math.max((LSL + USL) / 2, diff + Distributions.T.inv(1 - confidenceLevel, dta.length - 1) * se);
@@ -7773,7 +7957,7 @@ var NonparametricTesting = {
7773
7957
  let avgOverallRank = Mean(combinedRanks);
7774
7958
  let N = combinedData.length;
7775
7959
  let zValues = avgRanks.map((Rj, j) => {
7776
- return (Rj - avgOverallRank) / Math.sqrt((N + 1) * (N / data[j].length - 1) / 12);
7960
+ return (Rj - avgOverallRank) / Math.sqrt((N + 1) * (N / extractedData[j].length - 1) / 12);
7777
7961
  });
7778
7962
  let tieSets = CompileTies(combinedData);
7779
7963
  let adjustH = tieSets.length > 0;
@@ -11230,11 +11414,14 @@ function AttirbuteAgreementConfidenceIntervals(object, confidenceLevel) {
11230
11414
  let v1_low = 2 * object.MatchedCount;
11231
11415
  let v2_low = 2 * (object.InspectedCount - object.MatchedCount + 1);
11232
11416
  let lowP = object.MatchedCount == object.InspectedCount ? alpha : alpha / 2;
11233
- let Fv1v2_a_2 = object.MatchedCount == 0 ? 0 : Distributions.F.inv(lowP, v1_low, v2_low);
11417
+ const F_inv = (p, df1, df2) => {
11418
+ return df2 / (df1 * (1 / Beta.invInc(p, df1 / 2, df2 / 2) - 1));
11419
+ };
11420
+ let Fv1v2_a_2 = object.MatchedCount == 0 ? 0 : F_inv(lowP, v1_low, v2_low);
11234
11421
  let v1_high = 2 * (object.MatchedCount + 1);
11235
11422
  let v2_high = 2 * (object.InspectedCount - object.MatchedCount);
11236
11423
  let highP = object.MatchedCount == 0 ? 1 - alpha : 1 - alpha / 2;
11237
- let Fv1v2_1_a_2 = object.MatchedCount == object.InspectedCount ? 1 : Distributions.F.inv(highP, v1_high, v2_high);
11424
+ let Fv1v2_1_a_2 = object.MatchedCount == object.InspectedCount ? 1 : F_inv(highP, v1_high, v2_high);
11238
11425
  object.Percent = object.MatchedCount / object.InspectedCount;
11239
11426
  object.CI_Low = v1_low * Fv1v2_a_2 / (v2_low + v1_low * Fv1v2_a_2);
11240
11427
  object.CI_High = v1_high * Fv1v2_1_a_2 / (v2_high + v1_high * Fv1v2_1_a_2);
@@ -11825,7 +12012,6 @@ function GageLinearityAndBias(parts, referenceValues, responses, processVariatio
11825
12012
  let \u03C3B2 = \u03C3R2 / Math.sqrt(m);
11826
12013
  let t2 = avgBias3 / \u03C3B2;
11827
12014
  let p2 = Distributions.T.cdf(-Math.abs(t2), df2) + (1 - Distributions.T.cdf(Math.abs(t2), df2));
11828
- console.log({ t: t2, df: df2, \u03C3B: \u03C3B2, \u03C3R: \u03C3R2, m, rBar: rBar2, d2: d22 });
11829
12015
  partBiasObjects.push({
11830
12016
  ReferenceValue: ref,
11831
12017
  Bias: avgBias3,
package/index.ts CHANGED
@@ -1080,7 +1080,6 @@ CI:{
1080
1080
  */
1081
1081
  Cp(data: number[], spec: Specification, subgroupSize: number = 1, method?: string, w?: number, UnbiasingConstant: boolean = true){
1082
1082
  let standarddeviation = StDev.W(data, subgroupSize, method, w, UnbiasingConstant);
1083
- console.log({StDevW: standarddeviation})
1084
1083
  if(!Number.isNaN(spec.USL) && !Number.isNaN(spec.LSL)){
1085
1084
  return Number((spec.USL - spec.LSL) / (6 * standarddeviation));
1086
1085
  }else{
@@ -1333,6 +1332,7 @@ SixPack(data: number[], spec: Specification, title?: string){
1333
1332
  },
1334
1333
  Report(data: number[], spec: Specification, target?: number, subgroupSize?: number, confidenceInterval?: number, sides: any = -1, method?: string, w?: number, UnbiasingConstant?: boolean){
1335
1334
  if(confidenceInterval == undefined){sides = undefined}
1335
+ console.log(confidenceInterval, sides)
1336
1336
  let results: any = Capability.Analysis(data, spec, target, subgroupSize, confidenceInterval, sides, method, w, UnbiasingConstant);
1337
1337
  let mean = Mean(data);
1338
1338
  let std = StDev.S(data);
@@ -1899,28 +1899,39 @@ export const Beta = {
1899
1899
  * @param a The first shape parameter; x^(a-1).
1900
1900
  * @param b The second shape parameter; (1 - x)^(b-1).
1901
1901
  */
1902
- inc(x: number, a: number, b: number){
1903
- // let sum = 0;
1904
-
1905
- // for(let n = 0; n < 1000; n++){
1906
- // let ploc = PlochhammerSymbol(1-b, n);
1907
- // let den = Probability.Factorialize(n) * (a + n);
1908
- // let xn = Math.pow(x, n);
1909
- // let term_n = ploc / den * xn;
1910
- // if(Math.abs(term_n) < 1e-9 && n > 100){
1911
- // n = 1001
1912
- // }
1913
- // sum += term_n;
1914
- // }
1915
- // return Math.pow(x, a) * sum;
1916
-
1902
+ inc(X: number, A: number, B: number){
1903
+ var A0=0;
1904
+ var B0=1;
1905
+ var A1=1;
1906
+ var B1=1;
1907
+ var M9=0;
1908
+ var A2=0;
1909
+ var C9;
1910
+ while (Math.abs((A1-A2)/A1)>.00001) {
1911
+ A2=A1;
1912
+ C9=-(A+M9)*(A+B+M9)*X/(A+2*M9)/(A+2*M9+1);
1913
+ A0=A1+C9*A0;
1914
+ B0=B1+C9*B0;
1915
+ M9=M9+1;
1916
+ C9=M9*(B-M9)*X/(A+2*M9-1)/(A+2*M9);
1917
+ A1=A0+C9*A1;
1918
+ B1=B0+C9*B1;
1919
+ A0=A0/B1;
1920
+ B0=B0/B1;
1921
+ A1=A1/B1;
1922
+ B1=1;
1923
+ }
1924
+ return A1/A
1925
+
1926
+ },
1927
+ incomplete(x, a, b): any{
1917
1928
  // Factors in front of the continued fraction.
1918
1929
  var bt = (x === 0 || x === 1) ? 0 :
1919
1930
  Math.exp(Gamma.ln(a + b) - Gamma.ln(a) -
1920
- Gamma.ln(b) + a * Math.log(x) + b *
1931
+ Gamma.ln(b) + a * Math.log(x) + b *
1921
1932
  Math.log(1 - x));
1922
1933
  if (x < 0 || x > 1)
1923
- return NaN;
1934
+ return false;
1924
1935
  if (x < (a + 1) / (a + b + 2))
1925
1936
  // Use continued fraction directly.
1926
1937
  return bt * Beta.cf(x, a, b) / a;
@@ -1936,7 +1947,56 @@ export const Beta = {
1936
1947
  */
1937
1948
  incr(x: number, a: number, b: number){
1938
1949
  return Beta.inc(x, a, b)/Beta.fn(a, b)
1939
- }
1950
+ },
1951
+ invInc(p, a, b){
1952
+ var EPS = 1e-8;
1953
+ var a1 = a - 1;
1954
+ var b1 = b - 1;
1955
+ var j = 0;
1956
+ var lna, lnb, pp, t, u, err, x, al, h, w, afac;
1957
+ if (p <= 0)
1958
+ return 0;
1959
+ if (p >= 1)
1960
+ return 1;
1961
+ if (a >= 1 && b >= 1) {
1962
+ pp = (p < 0.5) ? p : 1 - p;
1963
+ t = Math.sqrt(-2 * Math.log(pp));
1964
+ x = (2.30753 + t * 0.27061) / (1 + t* (0.99229 + t * 0.04481)) - t;
1965
+ if (p < 0.5)
1966
+ x = -x;
1967
+ al = (x * x - 3) / 6;
1968
+ h = 2 / (1 / (2 * a - 1) + 1 / (2 * b - 1));
1969
+ w = (x * Math.sqrt(al + h) / h) - (1 / (2 * b - 1) - 1 / (2 * a - 1)) *
1970
+ (al + 5 / 6 - 2 / (3 * h));
1971
+ x = a / (a + b * Math.exp(2 * w));
1972
+ } else {
1973
+ lna = Math.log(a / (a + b));
1974
+ lnb = Math.log(b / (a + b));
1975
+ t = Math.exp(a * lna) / a;
1976
+ u = Math.exp(b * lnb) / b;
1977
+ w = t + u;
1978
+ if (p < t / w)
1979
+ x = Math.pow(a * w * p, 1 / a);
1980
+ else
1981
+ x = 1 - Math.pow(b * w * (1 - p), 1 / b);
1982
+ }
1983
+ afac = -Gamma.ln(a) - Gamma.ln(b) + Gamma.ln(a + b);
1984
+ for(; j < 10; j++) {
1985
+ if (x === 0 || x === 1)
1986
+ return x;
1987
+ err = Beta.incomplete(x, a, b) - p;
1988
+ t = Math.exp(a1 * Math.log(x) + b1 * Math.log(1 - x) + afac);
1989
+ u = err / t;
1990
+ x -= (t = u / (1 - 0.5 * Math.min(1, u * (a1 / x - b1 / (1 - x)))));
1991
+ if (x <= 0)
1992
+ x = 0.5 * (x + t);
1993
+ if (x >= 1)
1994
+ x = 0.5 * (x + t + 1);
1995
+ if (Math.abs(t) < EPS * x && j > 0)
1996
+ break;
1997
+ }
1998
+ return x;
1999
+ }
1940
2000
  }
1941
2001
 
1942
2002
  export const ERF = {
@@ -2136,7 +2196,18 @@ export const Distributions = {
2136
2196
  * @param b The second shape parameter; (1 - x)^(b-1).
2137
2197
  */
2138
2198
  cdf(x: number, a: number, b: number){
2139
- return Beta.incr(x, a, b);
2199
+ var S;
2200
+ var BT;
2201
+ var Bcdf;
2202
+
2203
+ S = a + b;
2204
+ BT = Math.exp(Gamma.ln(S) - Gamma.ln(b) - Gamma.ln(a) + a * Math.log(x) + b * Math.log(1 - x));
2205
+ if (x < (a + 1)/(S + 2)) {
2206
+ Bcdf = BT * Beta.inc(x, a, b)
2207
+ } else {
2208
+ Bcdf = 1 - BT * Beta.inc(1 - x, b, a)
2209
+ }
2210
+ return Bcdf
2140
2211
  },
2141
2212
  /**
2142
2213
  * Probability Density Function. Calculates the probability of random variable x occuring from the beta distribution with the specified shape parameters a and b.
@@ -4011,8 +4082,8 @@ export const Distributions = {
4011
4082
  * @returns the Right-Tailed P-Value of the F Distribution
4012
4083
  */
4013
4084
  cdf(x: number, df1: number, df2: number){
4014
- let subX = (df1 * x)/(df1 * x + df2);
4015
- let p = Beta.incr(subX, df1/2, df2/2);
4085
+ let z = x / (x + df2/df1)
4086
+ let p = Distributions.Beta.cdf(z, df1/2, df2/2)
4016
4087
  if(p > 1){p = 1}
4017
4088
  if(p < 0){p = 0}
4018
4089
  return p
@@ -4943,9 +5014,9 @@ export const Distributions = {
4943
5014
  // if(t){
4944
5015
  // return t
4945
5016
  // }
4946
- let x = Distributions.Beta.inv(2*Math.min(p, 1-p), 0.5*df, 0.5);
4947
- x = Math.sqrt(df*(1-x)/x);
4948
- return p > 0.5 ? x : -x;
5017
+ var x = Beta.invInc(2 * Math.min(p, 1 - p), 0.5 * df, 0.5);
5018
+ x = Math.sqrt(df * (1 - x) / x);
5019
+ return (p > 0.5) ? x : -x;
4949
5020
  },
4950
5021
  // invTable(p: number, df: number){
4951
5022
  // // let table = tTable
@@ -5583,10 +5654,9 @@ export const ANOVA = {
5583
5654
  let pooledSTD = StDev.Pooled(pooledData);
5584
5655
  let alphaADJ = (1 - Math.pow(1-alpha,1/rCount))/2;
5585
5656
  let hAlpha = Math.abs(Distributions.T.inv(alphaADJ, grandArray.length - rCount))
5586
- console.log({alphaADJ, ln: grandArray.length, rCount})
5657
+
5587
5658
  let scaleDF = Math.sqrt((rCount-1)/(rCount*tCount));
5588
5659
 
5589
- console.log({grandMean, pooledSTD, hAlpha, scaleDF})
5590
5660
  let UDL = grandMean + pooledSTD*hAlpha*scaleDF
5591
5661
  let LDL = grandMean - pooledSTD*hAlpha*scaleDF
5592
5662
 
@@ -5614,6 +5684,103 @@ export const ANOVA = {
5614
5684
  }
5615
5685
  }
5616
5686
  },
5687
+ Balanced:{
5688
+ Table(responses, factors, interactions: any = []){
5689
+ let grandMean = Mean(responses)
5690
+
5691
+ let factorsWInteractions = {};
5692
+
5693
+ Object.keys(factors).forEach(f => {
5694
+ factorsWInteractions[f] = factors[f]
5695
+ })
5696
+
5697
+ interactions.forEach(element => {
5698
+ factorsWInteractions[element] = responses.map((r,i) => {return element.split("*").map(k =>{return factors[k][i]}).join("*")})
5699
+ });
5700
+
5701
+ let factorInfo = {}
5702
+ let totalLevelCount = 1;
5703
+ Object.keys(factors).forEach((k,i) => {
5704
+ let levels = Unique(factors[k])
5705
+ totalLevelCount *= levels.length;
5706
+ factorInfo[k] = {
5707
+ Factor: k,
5708
+ Levels: levels.length,
5709
+ Values: levels.join(", ")
5710
+ }
5711
+ })
5712
+
5713
+ let SSFactors = 0;
5714
+ let DFFactors = 0;
5715
+ let anovaTable = {}
5716
+ Object.keys(factorsWInteractions).forEach((k,i) => {
5717
+ let df = Product(k.split("*").map(f => {return factorInfo[f].Levels - 1}))
5718
+ DFFactors += df;
5719
+
5720
+ let ss = 0;
5721
+ Unique(factorsWInteractions[k]).forEach(group => {
5722
+ let groupMean = Mean(responses.filter((r,ri) => {return factorsWInteractions[k][ri] == group}))
5723
+ if(k.includes("*")){
5724
+ // is an interaction
5725
+ let factorLevelMeans = k.split("*").map((f,fi) => {return responses.filter((r,ri) => {return factors[f][ri] == group.split("*")[fi]})})
5726
+
5727
+ let factorCombos: any = [];
5728
+ k.split("*").forEach((f,fi) => {
5729
+ k.split("*").forEach((f2, fi2) => {
5730
+ if(fi2 > fi){
5731
+ let responseValues = responses.filter((r, ri) => {return factors[k.split("*")[fi]][ri] == f && factors[k.split("*")[fi2]][ri] == f2})
5732
+ factorCombos.push(responseValues)
5733
+ }
5734
+ })
5735
+ })
5736
+
5737
+ ss += Math.pow(groupMean - Sum(factorLevelMeans.map(arr => {return Mean(arr)})) + grandMean, 2)
5738
+ }else{
5739
+ ss += Math.pow(groupMean - grandMean, 2)
5740
+ }
5741
+ })
5742
+ let n = responses.length / totalLevelCount;
5743
+ ss *= n * Product(Object.keys(factorInfo).filter(f => {return k.includes(f) == false}).map(fk => {return factorInfo[fk].Levels}))
5744
+ SSFactors += ss;
5745
+ anovaTable[k] = {
5746
+ Source: k,
5747
+ DF: df,
5748
+ SS: ss,
5749
+ MS: ss/df,
5750
+ F: NaN,
5751
+ p: NaN
5752
+ }
5753
+ })
5754
+ let SST = SumSq(responses.map(r => {return r - grandMean}))
5755
+
5756
+ let DFE = responses.length - DFFactors - 1;
5757
+ let SSE = SST - SSFactors;
5758
+ let MSE = SSE / DFE;
5759
+
5760
+ Object.keys(anovaTable).forEach(key => {
5761
+ let fValue = anovaTable[key].MS / MSE
5762
+ let pValue = Distributions.F.RightTail(fValue, anovaTable[key].DF, DFE)
5763
+ anovaTable[key].F = fValue
5764
+ anovaTable[key].p = pValue
5765
+ })
5766
+
5767
+ anovaTable["Error"] = {
5768
+ Source: "Error",
5769
+ DF: DFE,
5770
+ SS: SSE,
5771
+ MS: MSE
5772
+ }
5773
+
5774
+ anovaTable["Total"] = {
5775
+ Source: "Total",
5776
+ DF: responses.length - 1,
5777
+ SS: SST
5778
+ }
5779
+
5780
+ return anovaTable
5781
+
5782
+ }
5783
+ },
5617
5784
  GeneralLinearModel(responses: any, factors: any, covariates: any){
5618
5785
 
5619
5786
  },
@@ -6829,10 +6996,10 @@ export const ANOVA = {
6829
6996
  refMeanIndex += fx_i * prod
6830
6997
  })
6831
6998
  let refMean = refMeans[refMeanIndex]
6832
-
6999
+
6833
7000
  SumSq += Math.pow(levelMean - refMeans[refMeanIndex], 2);
6834
7001
  })
6835
-
7002
+
6836
7003
  ANOVA[fi].SS = abc * SumSq;
6837
7004
  ANOVA[fi].MS = ANOVA[fi].SS/ANOVA[fi].DF
6838
7005
  })
@@ -6841,6 +7008,7 @@ export const ANOVA = {
6841
7008
  factors.forEach((factor, factor_i) => {
6842
7009
  if(factor_i != factors.length - 1){
6843
7010
  ANOVA[factor_i].F = ANOVA[factor_i].MS / ANOVA[factor_i + 1].MS
7011
+
6844
7012
  ANOVA[factor_i].p = Distributions.F.RightTail(ANOVA[factor_i].F, ANOVA[factor_i].DF, ANOVA[factor_i + 1].DF)
6845
7013
  }
6846
7014
  })
@@ -7944,11 +8112,11 @@ export const EquivalenceTesting = {
7944
8112
  let mean = Mean(dta)
7945
8113
  let se = StandardError(dta)
7946
8114
  let LSL = multiply ? lowerSpecification * target : lowerSpecification
7947
- let USL = multiply ? upperSpecification * target : lowerSpecification
8115
+ let USL = multiply ? upperSpecification * target : upperSpecification
7948
8116
  let diff = mean - target;
7949
8117
 
7950
8118
  let CI_Low = Math.min((LSL + USL)/2, diff - Distributions.T.inv(1 - confidenceLevel, dta.length - 1)*se);
7951
- let CI_High = Math.max((LSL + USL)/2, diff + Distributions.T.inv(1 - confidenceLevel, dta.length - 1)*se)
8119
+ let CI_High = Math.max((LSL + USL)/2, diff + Distributions.T.inv(1 - confidenceLevel, dta.length - 1)*se);
7952
8120
 
7953
8121
  let equivChart = EquivalenceCharts.Equivalence({name: `Mean(${name ?? "Sample"})`, value: mean}, {name: "Target", value: target}, LSL, USL, CI_Low, CI_High, confidenceLevel);
7954
8122
  let histograms = EquivalenceCharts.Histogram([data])
@@ -8321,12 +8489,13 @@ export const NonparametricTesting = {
8321
8489
  let avgOverallRank = Mean(combinedRanks);
8322
8490
 
8323
8491
  let N = combinedData.length;
8324
- let zValues = avgRanks.map((Rj, j) => {return (Rj - avgOverallRank)/Math.sqrt(((N + 1)*(N/data[j].length - 1))/12)})
8325
-
8492
+ let zValues = avgRanks.map((Rj, j) => {return (Rj - avgOverallRank)/Math.sqrt(((N + 1)*(N/extractedData[j].length - 1))/12)})
8493
+
8326
8494
  let tieSets = CompileTies(combinedData)
8327
8495
  let adjustH = tieSets.length > 0
8328
8496
  let H = 12 * Sum(avgRanks.map((Rj, j) => {return data[j].length * Math.pow(Rj - avgOverallRank, 2)}))/(N *(N + 1));
8329
8497
  let H_not = 0 + H;
8498
+
8330
8499
  if(adjustH){
8331
8500
  // H = H/(1 - Sum(tieSets.map((t: any) => {return Math.pow(t.length, 3) - t.length}))/(Math.pow(N,3) - N));
8332
8501
  H = (12 / (N * (N + 1))) * (Sum(ranks.map(r => {return Math.pow(Sum(r),2)/r.length}))) - 3 *(N + 1);
@@ -12181,11 +12350,14 @@ function AttirbuteAgreementConfidenceIntervals(object: any, confidenceLevel?: nu
12181
12350
  let v1_low = 2 * object.MatchedCount
12182
12351
  let v2_low = 2 * (object.InspectedCount - object.MatchedCount + 1)
12183
12352
  let lowP = object.MatchedCount == object.InspectedCount ? alpha : alpha / 2;
12184
- let Fv1v2_a_2 = object.MatchedCount == 0 ? 0 : Distributions.F.inv(lowP, v1_low, v2_low);
12353
+ const F_inv = (p, df1, df2) => {
12354
+ return df2 / (df1 * (1 / Beta.invInc(p, df1 / 2, df2 / 2) - 1))
12355
+ }
12356
+ let Fv1v2_a_2 = object.MatchedCount == 0 ? 0 : F_inv(lowP, v1_low, v2_low);
12185
12357
  let v1_high = 2 * (object.MatchedCount + 1)
12186
12358
  let v2_high = 2 * (object.InspectedCount - object.MatchedCount)
12187
12359
  let highP = object.MatchedCount == 0 ? 1 - alpha : 1-alpha/2
12188
- let Fv1v2_1_a_2 = object.MatchedCount == object.InspectedCount ? 1 : Distributions.F.inv(highP, v1_high, v2_high);
12360
+ let Fv1v2_1_a_2 = object.MatchedCount == object.InspectedCount ? 1 : F_inv(highP, v1_high, v2_high);
12189
12361
  object.Percent = object.MatchedCount / object.InspectedCount;
12190
12362
  object.CI_Low = (v1_low * Fv1v2_a_2)/(v2_low + (v1_low * Fv1v2_a_2));
12191
12363
  object.CI_High = (v1_high * Fv1v2_1_a_2)/(v2_high + (v1_high * Fv1v2_1_a_2));
@@ -12840,7 +13012,7 @@ export function GageLinearityAndBias(parts: any[], referenceValues: number[], re
12840
13012
  let σB = σR / Math.sqrt(m);
12841
13013
  let t = avgBias / σB
12842
13014
  let p = Distributions.T.cdf(-Math.abs(t), df) + (1 - Distributions.T.cdf(Math.abs(t), df))
12843
- console.log({t, df, σB, σR, m, rBar, d2});
13015
+
12844
13016
  partBiasObjects.push({
12845
13017
  ReferenceValue: ref,
12846
13018
  Bias: avgBias,
@@ -13889,5 +14061,4 @@ const Matrix = {
13889
14061
  return newMatrix
13890
14062
  },
13891
14063
  }
13892
- // End Matrix Functions
13893
- /** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */// Validation
14064
+ // End Matrix Functions
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "qesuite",
3
- "version": "1.0.65",
3
+ "version": "1.0.68",
4
4
  "description": "Performs advanced statistical analysis of data. Specifically designed for engineering statistical analysis",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.mjs",
@@ -0,0 +1 @@
1
+ - Updated T.Inv()
@@ -0,0 +1 @@
1
+ Updated Confidence Interval Calculation in Attribute Agreement Analysis