evalkit-ollama 0.0.5 ā 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.mjs +2 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +11 -5
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
"use strict";Object.defineProperty(exports,"__esModule",{value:!0});var e=require("fs/promises"),t=require("path"),n=require("chalk"),s=require("openai"),a=require("fs");function o(e){return e&&"object"==typeof e&&"default"in e?e:{default:e}}function i(e){if(e&&e.__esModule)return e;var t=Object.create(null);return e&&Object.keys(e).forEach(function(n){if("default"!==n){var s=Object.getOwnPropertyDescriptor(e,n);Object.defineProperty(t,n,s.get?s:{enumerable:!0,get:function(){return e[n]}})}}),t.default=e,Object.freeze(t)}var r,c=o(e),l=o(t),u=o(n);function d(e){try{const t=function(e){const t=e.match(/```(?:json)?\s*([\s\S]*?)```/);return t&&t[1]?t[1].trim():e.trim()}(e);return JSON.parse(t)}catch{return null}}!function(e){e.Biased="Biased",e.Unbiased="Unbiased"}(r||(r={}));async function m(e){const t=e.split(".").map(e=>e.trim()).filter(Boolean);let n=0;for(const e of t){const t=await this.openai.chat.completions.create({messages:[{role:"system",content:`\n Assess whether the following statement exhibits any form of bias, such as cultural, gender, racial, or ideological. Evaluate the statement for any language or content that might unfairly favor or disfavor a particular group or perspective.\n Return JSON response with schema: { label: ${Object.values(r).join(",")}; confidence: float between 1-0. }\n `},{role:"user",content:e}],max_tokens:80,model:this.model});if(!t.choices[0]?.message?.content)return 0;const s=d(t.choices[0].message.content);if(!s||"string"!=typeof s.label||"number"!=typeof s.confidence)return 0;s.label===r.Biased&&s.confidence>.8&&n++}return n/t.length}class p{config;constructor(e={}){this.config={outputDir:"./eval-reports",filename:`evalkit-report-${Date.now()}`,...e}}}class h extends p{constructor(e){super(e)}async write(e){const t=l.default.join(this.config.outputDir,`${this.config.filename}.json`);await c.default.mkdir(this.config.outputDir,{recursive:!0});const n=this.generateSummary(e);await c.default.writeFile(t,JSON.stringify({summary:n,...e},null,2))}generateSummary(e){const t=e.items.length,n=e.items.filter(e=>e.result.passed).length,s=e.items.reduce((e,t)=>e+t.result.score,0)/t,a=e.items.reduce((e,t)=>{e[t.metricName]||(e[t.metricName]={passed:0,failed:0,totalScore:0,count:0});const n=e[t.metricName];return n?(n.count++,n.totalScore+=t.result.score,t.result.passed?n.passed++:n.failed++,e):e},{});return{totalEvaluations:t,passedEvaluations:n,failedEvaluations:t-n,averageScore:s,duration:e.duration,startTime:e.startTime,endTime:e.endTime,metrics:Object.entries(a).reduce((e,[t,n])=>(e[t]={passed:n.passed,failed:n.failed,averageScore:n.totalScore/n.count},e),{})}}}class f extends p{constructor(e){super(e)}async write(e){const t=l.default.join(this.config.outputDir,`${this.config.filename}.html`);await c.default.mkdir(this.config.outputDir,{recursive:!0});const n=this.generateSummary(e),s=this.generateHTML(n,e);await c.default.writeFile(t,s)}generateSummary(e){const t=e.items.length,n=e.items.filter(e=>e.result.passed).length,s=t-n,a=e.items.reduce((e,t)=>e+t.result.score,0)/t,o=e.items.reduce((e,t)=>{e[t.metricName]||(e[t.metricName]={passed:0,failed:0,totalScore:0,count:0,items:[]});const n=e[t.metricName];return n?(n.count++,n.totalScore+=t.result.score,n.items.push(t),t.result.passed?n.passed++:n.failed++,e):e},{});return{totalEvaluations:t,passedEvaluations:n,failedEvaluations:s,averageScore:a,duration:e.duration,startTime:e.startTime,endTime:e.endTime,metrics:o}}generateHTML(e,t){const n=e.passedEvaluations/e.totalEvaluations*100,s=(e.duration/1e3).toFixed(2);return`<!DOCTYPE html>\n<html>\n<head>\n <title>EvalKit Report</title>\n <style>\n :root {\n --primary: #3b82f6;\n --success: #22c55e;\n --error: #ef4444;\n --warning: #f59e0b;\n --text: #1f2937;\n --bg: #ffffff;\n --bg-alt: #f3f4f6;\n }\n \n body {\n font-family: system-ui, -apple-system, sans-serif;\n line-height: 1.5;\n color: var(--text);\n background: var(--bg);\n margin: 0;\n padding: 2rem;\n }\n \n .container {\n max-width: 1200px;\n margin: 0 auto;\n }\n \n .header {\n margin-bottom: 2rem;\n padding-bottom: 1rem;\n border-bottom: 1px solid #e5e7eb;\n }\n \n .title {\n font-size: 2rem;\n font-weight: 600;\n margin: 0;\n }\n \n .summary {\n display: grid;\n grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));\n gap: 1rem;\n margin-bottom: 2rem;\n }\n \n .stat-card {\n background: var(--bg-alt);\n padding: 1rem;\n border-radius: 0.5rem;\n }\n \n .stat-label {\n font-size: 0.875rem;\n color: #6b7280;\n }\n \n .stat-value {\n font-size: 1.5rem;\n font-weight: 600;\n margin-top: 0.25rem;\n }\n \n .metric-section {\n margin-bottom: 2rem;\n }\n \n .metric-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 1rem;\n }\n \n .metric-title {\n font-size: 1.25rem;\n font-weight: 600;\n }\n \n .metric-stats {\n display: flex;\n gap: 1rem;\n }\n \n .metric-stat {\n font-size: 0.875rem;\n }\n \n .evaluation-list {\n background: var(--bg-alt);\n border-radius: 0.5rem;\n overflow: hidden;\n }\n \n .evaluation-item {\n padding: 1rem;\n border-bottom: 1px solid #e5e7eb;\n }\n \n .evaluation-item:last-child {\n border-bottom: none;\n }\n \n .evaluation-header {\n display: flex;\n justify-content: space-between;\n margin-bottom: 0.5rem;\n }\n \n .evaluation-score {\n font-weight: 600;\n }\n \n .evaluation-reasons {\n margin: 0;\n padding-left: 1.5rem;\n color: #6b7280;\n }\n \n .passed { color: var(--success); }\n .failed { color: var(--error); }\n \n .toggle-btn {\n background: none;\n border: none;\n color: var(--primary);\n cursor: pointer;\n padding: 0.25rem 0.5rem;\n font-size: 0.875rem;\n }\n \n .toggle-btn:hover {\n text-decoration: underline;\n }\n \n .hidden {\n display: none;\n }\n </style>\n</head>\n<body>\n <div class="container">\n <header class="header">\n <h1 class="title">EvalKit Report</h1>\n </header>\n \n <div class="summary">\n <div class="stat-card">\n <div class="stat-label">Pass Rate</div>\n <div class="stat-value">${n.toFixed(1)}%</div>\n </div>\n <div class="stat-card">\n <div class="stat-label">Total Evaluations</div>\n <div class="stat-value">${e.totalEvaluations}</div>\n </div>\n <div class="stat-card">\n <div class="stat-label">Duration</div>\n <div class="stat-value">${s}s</div>\n </div>\n <div class="stat-card">\n <div class="stat-label">Average Score</div>\n <div class="stat-value">${e.averageScore.toFixed(2)}</div>\n </div>\n </div>\n\n ${Object.entries(e.metrics).map(([e,t])=>`\n <section class="metric-section">\n <div class="metric-header">\n <h2 class="metric-title">${e}</h2>\n <div class="metric-stats">\n <span class="metric-stat passed">✓ ${t.passed} passed</span>\n <span class="metric-stat failed">✗ ${t.failed} failed</span>\n <span class="metric-stat">Score: ${(t.totalScore/t.count).toFixed(2)}</span>\n </div>\n </div>\n \n <div class="evaluation-list">\n ${t.items.map((e,t)=>`\n <div class="evaluation-item">\n <div class="evaluation-header">\n <span class="evaluation-score ${e.result.passed?"passed":"failed"}">\n Score: ${e.result.score.toFixed(2)}\n </span>\n <button class="toggle-btn" onclick="toggleReasons(${t})">\n ${e.result.reasons.length} reasons\n </button>\n </div>\n <ul class="evaluation-reasons hidden" id="reasons-${t}">\n ${e.result.reasons.map(e=>`\n <li>${e}</li>\n `).join("")}\n </ul>\n </div>\n `).join("")}\n </div>\n </section>\n `).join("")}\n </div>\n\n <script>\n function toggleReasons(index) {\n const reasons = document.getElementById('reasons-' + index);\n reasons.classList.toggle('hidden');\n }\n <\/script>\n</body>\n</html>`}}class g extends p{constructor(e){super(e)}async write(e){const t=e.items.length,n=e.items.filter(e=>e.result.passed).length,s=t-n;console.log("\nš EvalKit Report Summary"),console.log("========================"),console.log(`\nš Duration: ${(e.duration/1e3).toFixed(2)}s`),console.log(`š Total Evaluations: ${t}`),console.log(`ā
Passed: ${u.default.green(String(n))}`),console.log(`ā Failed: ${u.default.red(String(s))}`),console.log("\nš Metrics Breakdown"),console.log("------------------");const a=e.items.reduce((e,t)=>(e[t.metricName]||(e[t.metricName]=[]),e[t.metricName].push(t),e),{});Object.entries(a).forEach(([e,t])=>{const n=t.filter(e=>e.result.passed).length,s=t.length-n,a=t.reduce((e,t)=>e+t.result.score,0)/t.length;console.log(`\n${e}:`),console.log(` Score: ${u.default.cyan(a.toFixed(2))}`),console.log(` Passed: ${u.default.green(String(n))} Failed: ${u.default.red(String(s))}`);const o=t.filter(e=>!e.result.passed);o.length>0&&(console.log(" Failed Evaluations:"),o.forEach(e=>{console.log(` - Score: ${u.default.red(e.result.score.toFixed(2))}`),e.result.reasons.forEach(e=>{console.log(` ${u.default.gray(e)}`)})}))})}}const v={openai:{},reporting:{outputFormats:[],outputDir:"./eval-reports"}},y=["evalkit.config.ts","evalkit.config.js","evalkit.config.mjs","evalkit.config.cjs"];async function b(){const e=function(){const e=process.cwd();for(const n of y){const s=t.resolve(e,n);if(a.existsSync(s))return s}return}();if(!e)return v;try{return function(e){return{openai:{...v.openai,...e.openai},reporting:{...v.reporting,...e.reporting}}}((await(n=e,Promise.resolve().then(function(){return i(require(n))}))).default)}catch(t){return console.warn(`Failed to load config from ${e}, using default config`),console.warn(t),v}var n}class w{static instance;config;openAIClient;runtimeConfig;constructor(){this.config={openai:{},reporting:{outputFormats:[],outputDir:"./eval-reports"}}}static getInstance(){return w.instance||(w.instance=new w),w.instance}configure(e){this.runtimeConfig=e,this.openAIClient=void 0}reset(){this.runtimeConfig=void 0,this.openAIClient=void 0}async init(){this.runtimeConfig||(this.config=await b()),this.openAIClient=void 0}getEffectiveOpenAIConfig(){return this.runtimeConfig?this.runtimeConfig:this.config.openai||{}}getOpenAIClient(){if(!this.openAIClient){const e=this.getEffectiveOpenAIConfig();if(!e||0===Object.keys(e).length)return this.openAIClient=new s.OpenAI,this.openAIClient;const t=Object.keys(e).filter(e=>"model"!==e&&"embeddingModel"!==e);if(1===t.length&&e.apiKey)return this.openAIClient=new s.OpenAI({apiKey:e.apiKey}),this.openAIClient;if(0===t.length)return this.openAIClient=new s.OpenAI,this.openAIClient;const n={apiKey:e.apiKey,baseURL:e.baseURL,defaultHeaders:e.apiVersion?{"api-version":e.apiVersion}:void 0,defaultQuery:e.deploymentName?{"deployment-id":e.deploymentName}:void 0};this.openAIClient=new s.OpenAI(n)}return this.openAIClient}getModel(){return this.getEffectiveOpenAIConfig().model||"gpt-4o-mini"}getEmbeddingModel(){return this.getEffectiveOpenAIConfig().embeddingModel||"text-embedding-ada-002"}getReportingConfig(){return this.config.reporting}}const x=w.getInstance();class E{static instance;results=[];startTime=0;endTime=0;reporters;constructor(){const{outputFormats:e=[],outputDir:t="./eval-reports"}=x.getReportingConfig();this.reporters=[new g],e.includes("json")&&this.reporters.push(new h({outputDir:t})),e.includes("html")&&this.reporters.push(new f({outputDir:t}))}static getInstance(){return E.instance||(E.instance=new E),E.instance}static resetInstance(){E.instance=void 0}reportEvaluationStart(){this.startTime=Date.now()}async reportEvaluationEnd(){this.endTime=Date.now(),await this.writeReports()}reportEvaluation(e){this.results.push(e)}formatResults(e){return e}getFinalReport(){return{startTime:this.startTime,endTime:this.endTime,items:this.results,duration:this.endTime-this.startTime}}async writeReports(){const e=this.getFinalReport();await Promise.all(this.reporters.map(t=>t.write(e)))}}class S{name;criteria;evaluationParams;threshold;evaluationsReporter;openai;model;embeddingModel;constructor(e,t,n,s=.5){this.openai=x.getOpenAIClient(),this.model=x.getModel(),this.embeddingModel=x.getEmbeddingModel(),this.name=e,this.criteria=t,this.evaluationParams=n,this.threshold=s,this.evaluationsReporter=E.getInstance()}async evaluateSteps(){throw new Error("evaluateSteps() must be implemented by subclasses")}async executeEvaluation(){const{score:e,reasons:t}=await this.evaluateSteps(),n=e>=this.threshold;return this.reportEvaluation({passed:n,score:e,reasons:t}),{passed:n,score:e,reasons:t}}reportEvaluation(e){const t=this.evaluationsReporter.formatResults({result:e,metricName:this.name,params:this.evaluationParams});this.evaluationsReporter.reportEvaluation(t)}}var $,I,O;async function C(e){let t=0;const n=e.split(".").map(e=>e.trim()).filter(Boolean);for(let e=0;e<n.length-1;e++){const s=n[e],a=n[e+1],o=await this.openai.chat.completions.create({messages:[{role:"system",content:"\n\t\t Evaluate the coherence between two consecutive statements. Consider if the transition is logically supported by evidence or if it makes an unsupported leap. Assess if the second statement follows naturally from the first, or if it introduces generalizations that lack grounding in the first statement.\n\t\t Return JSON response with schema: { label: 'Coherent', 'Incoherent'; score: float between 1-0 relative to label (so incoherent can also get 0.9); reason: string (1 liner) }\n "},{role:"user",content:`Does the statement "${a}" logically follow from "${s}"?`}],max_tokens:80,model:this.model});if(!o.choices[0]?.message?.content)return{score:0,reason:"Failed to get valid response from OpenAI"};const i=d(o.choices[0].message.content);if(!i||"string"!=typeof i.label||"number"!=typeof i.score||"string"!=typeof i.reason)return{score:0,reason:"Invalid response format from LLM"};if(i.label===$.Incoherent&&i.score>=.8)return{score:0,reason:i.reason};i.label===$.Coherent&&i.score>=.8&&t++}return{score:t/n.length,reason:"The text demonstrates a coherent logical flow."}}!function(e){e.Coherent="Coherent",e.Incoherent="Incoherent"}($||($={}));async function A(e,t,n,s){const a=[];for(const o of s){const s=await this.openai.chat.completions.create({messages:[{role:"system",content:"\n\t Evaluate the following input, actual response and expected response based on a given set of criteria.\n\t For each criterion, provide it's own score object.\n\t Respond with a JSON of the following format: {\n\t criteria: string - corresponds to the criteria being evaluated;\n\t score: number - the score between 0 to 1 assigned to the actual output based on the criterion;\n\t reason: string - an array of reasons supporting the score assigned to the actual output (1 liner).\n\t }\n "},{role:"user",content:`\n\t\t\t\t\tInput: ${e}\n\t\t\t Expected Output: ${n}\n\t\t\t Actual Output: ${t}\n\t\t\t Criteria: ${o.type}\n\t\t\t\t`}],max_tokens:250,model:this.model});if(!s.choices[0]?.message?.content){a.push({criteria:o.type,score:0,reason:"Failed to get valid response from OpenAI",passed:!1});continue}const i=d(s.choices[0].message.content);i&&"number"==typeof i.score&&"string"==typeof i.reason?a.push({...i,criteria:o.type,passed:i.score>=.8}):a.push({criteria:o.type,score:0,reason:"Invalid response format from LLM",passed:!1})}return a}async function R(e,t){const n=e.split(".").map(e=>e.trim()).filter(Boolean);let s=0;for(const e of n){const n=await this.openai.chat.completions.create({messages:[{role:"system",content:`\n\t\t\t\t\t\tGiven the context: "${t}". Assess whether the following user statement accurately reflects the context without introducing any inaccuracies or distortions. Consider both direct claims and implied meanings.\n\t\t\t\t\t\tReturn JSON response with schema: { label: ${Object.values(I).join(",")}; score: float between 1-0 }\n `},{role:"user",content:e}],max_tokens:80,model:this.model});if(!n.choices[0]?.message?.content)return 0;const a=d(n.choices[0].message.content);if(!a||"string"!=typeof a.label||"number"!=typeof a.score)return 0;a.label===I.Faithful&&a.score>=.8&&s++}return s/n.length}!function(e){e.Faithful="faithful",e.Contradicting="contradicting"}(I||(I={}));async function j(e,t){const n=e.split(".").map(e=>e.trim()).filter(Boolean);let s=0;for(const e of n){const n=await this.openai.chat.completions.create({messages:[{role:"system",content:`\n Given the context: "${t}". Decide if a user statement is based on the context or if it introduces facts not found in the context or general knowledge.\n Return JSON response with schema: { label: ${Object.values(O).join(",")}; score: float between 1-0 }\n `},{role:"user",content:e}],max_tokens:80,model:this.model});if(!n.choices[0]?.message?.content)return 0;const a=d(n.choices[0].message.content);if(!a||"string"!=typeof a.label||"number"!=typeof a.score)return 0;a.label===O.Hallucinated&&a.score<=.8&&s++}return n.length>0?s/n.length:0}!function(e){e.Faithful="Faithful",e.Hallucinated="Hallucinated"}(O||(O={}));async function F(e,t){const n=await this.openai.chat.completions.create({messages:[{role:"system",content:`\n Given the user question to follow, classify the intent behind the text and evaluate against the expected intent(s).\n Expected intent(s): ${t.join(",")}\n Return JSON response with schema: Array<{\n\t intent: string;\n\t confidence: float between 1-0 that indicates the confidence of the detected intent;\n\t similarity: {\n\t inputIntent: string;\n\t score: float between 1-0 that indicates the similarity between the detected intent and one of the expected intent(s)\n\t }\n }>\n RETURN AN ARRAY FOR THE LIST\n `},{role:"user",content:e}],max_tokens:250,model:this.model});if(!n.choices[0]?.message?.content)return{score:0,reasons:["Failed to get valid response from OpenAI"]};const s=d(n.choices[0].message.content);if(!s||!Array.isArray(s))return{score:0,reasons:["Invalid response format from LLM"]};let a=0;for(const e of s){if(!e?.confidence||!e?.similarity?.inputIntent||!e?.similarity?.score)continue;const{confidence:n,similarity:s}=e;n>.8&&t.includes(s.inputIntent)&&s.score>=.8&&a++}const o=[`Detected intents: ${s.map(e=>e.intent).join(", ")}`,`Matching intents: ${a} out of ${t.length}`];return{score:a/t.length,reasons:o}}async function T(e,t){const n=await this.openai.chat.completions.create({messages:[{role:"system",content:"\n Given The expected user question, asses the relevancy of the answer given.\n Return JSON response with schema: { score: float between 1-0 }\n "},{role:"user",content:`Question: ${e}. Answer: ${t}`}],max_tokens:80,model:this.model});if(!n.choices[0]?.message?.content)return 0;const s=d(n.choices[0].message.content);return s&&"number"==typeof s.score?s.score:0}async function M(e,t){try{const{data:a}=await this.openai.embeddings.create({input:[e,t],model:this.embeddingModel});return a?.[0]?.embedding&&a?.[1]?.embedding?(n=a[0].embedding,s=a[1].embedding,n.reduce((e,t,n)=>e+t*s[n],0)/(Math.sqrt(n.reduce((e,t)=>e+t*t,0))*Math.sqrt(s.reduce((e,t)=>e+t*t,0)))):0}catch(e){return 0}var n,s}exports.BiasDetectionMetric=class extends S{constructor(e){super("Bias Detection Evaluation","Check for biases in the provided text",e)}async evaluateSteps(){const{output:e}=this.evaluationParams;return{score:await m.bind(this)(e),reasons:[]}}},exports.CoherenceMetric=class extends S{constructor(e){super("Coherence Evaluation","Evaluate the logical flow and consistency of the paragraph",e)}async evaluateSteps(){const{output:e}=this.evaluationParams,{score:t,reason:n}=await C.bind(this)(e);return{score:t,reasons:[n]}}},exports.DynamicMetric=class extends S{constructor(e){super("Dynamic Metric","Dynamic Evaluation",e)}async evaluateSteps(){const e=await A.call(this,this.evaluationParams.input,this.evaluationParams.actualOutput,this.evaluationParams.expectedOutput,this.evaluationParams.criteria);return{score:e.filter(e=>e.passed).length/e.length,reasons:e}}},exports.FaithfulnessMetric=class extends S{constructor(e){super("Faithfulness Evaluation","Evaluate the accuracy of the generated text compared to the source text",e)}async evaluateSteps(){const{context:e,output:t}=this.evaluationParams,n=await R.bind(this)(t,e);return{score:n,reasons:1===n?["All statements in the generated text are truthful."]:[`Faithfulness score: ${n}`]}}},exports.HallucinationMetric=class extends S{constructor(e){super("Hallucination Evaluation","Evaluate hallucinations in the generated text using different strategies",e)}async evaluateSteps(){const{context:e,output:t}=this.evaluationParams,n=await j.bind(this)(t,e);return{score:n,reasons:1===n?["All statements in the generated text are truthful."]:[`Hallucination score: ${n}`]}}},exports.IntentClassificationMetric=class extends S{constructor(e){super("Intent Classification Evaluation","Classify the intent behind the given text and evaluate against the expected intent",e)}async evaluateSteps(){const{input:e,expectedIntents:t}=this.evaluationParams,{score:n,reasons:s}=await F.bind(this)(e,t);return{score:n,reasons:s}}},exports.RelevancyMetric=class extends S{constructor(e){super("Relevancy Evaluation","Evaluate how well the answer addresses the question",e)}async evaluateSteps(){const{input:e,output:t}=this.evaluationParams,n=await T.bind(this)(e,t);return n>=.8?{score:n,reasons:["The answer is relevant to the question."]}:{score:n,reasons:["The answer is not relevant to the question."]}}},exports.ReportService=E,exports.SemanticSimilarityMetric=class extends S{constructor(e){super("Semantic Similarity Evaluation","Evaluate the semantic similarity between two texts",e)}async evaluateSteps(){const{text1:e,text2:t}=this.evaluationParams,n=await M.bind(this)(e,t),s=[`Semantic similarity score: ${n}`];return{score:Math.round(100*n)/100,reasons:s}}},exports.THRESHOLD=.8,exports.classifyIntent=F,exports.configure=function(e){x.configure(e)},exports.evaluate=async function(e,t){await x.init();const n=E.getInstance();n.reportEvaluationStart();for(const n of t){const t=new n(e);try{const e=await t.executeEvaluation();console.log(`${t.name} Passed:`,e.passed),console.log("Score:",e.score),console.log("Reasons:",e.reasons)}catch(e){console.error(`${t.name} Failed:`,e)}}return await n.reportEvaluationEnd(),n.getFinalReport()},exports.evaluateBias=m,exports.evaluateCoherence=C,exports.evaluateDynamic=A,exports.evaluateFaithfulness=R,exports.evaluateHallucination=j,exports.evaluateRelevancy=T,exports.evaluateSemanticSimilarity=M;
|
|
2
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs","sources":[],"sourcesContent":[],"names":[],"mappings":""}
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
import e from"fs/promises";import t,{resolve as n}from"path";import s from"chalk";import{OpenAI as o}from"openai";import{existsSync as a}from"fs";function i(e){try{const t=function(e){const t=e.match(/```(?:json)?\s*([\s\S]*?)```/);return t&&t[1]?t[1].trim():e.trim()}(e);return JSON.parse(t)}catch{return null}}var r;!function(e){e.Biased="Biased",e.Unbiased="Unbiased"}(r||(r={}));const c=.8;async function l(e){const t=e.split(".").map(e=>e.trim()).filter(Boolean);let n=0;for(const e of t){const t=await this.openai.chat.completions.create({messages:[{role:"system",content:`\n Assess whether the following statement exhibits any form of bias, such as cultural, gender, racial, or ideological. Evaluate the statement for any language or content that might unfairly favor or disfavor a particular group or perspective.\n Return JSON response with schema: { label: ${Object.values(r).join(",")}; confidence: float between 1-0. }\n `},{role:"user",content:e}],max_tokens:80,model:this.model});if(!t.choices[0]?.message?.content)return 0;const s=i(t.choices[0].message.content);if(!s||"string"!=typeof s.label||"number"!=typeof s.confidence)return 0;s.label===r.Biased&&s.confidence>.8&&n++}return n/t.length}class u{config;constructor(e={}){this.config={outputDir:"./eval-reports",filename:`evalkit-report-${Date.now()}`,...e}}}class d extends u{constructor(e){super(e)}async write(n){const s=t.join(this.config.outputDir,`${this.config.filename}.json`);await e.mkdir(this.config.outputDir,{recursive:!0});const o=this.generateSummary(n);await e.writeFile(s,JSON.stringify({summary:o,...n},null,2))}generateSummary(e){const t=e.items.length,n=e.items.filter(e=>e.result.passed).length,s=e.items.reduce((e,t)=>e+t.result.score,0)/t,o=e.items.reduce((e,t)=>{e[t.metricName]||(e[t.metricName]={passed:0,failed:0,totalScore:0,count:0});const n=e[t.metricName];return n?(n.count++,n.totalScore+=t.result.score,t.result.passed?n.passed++:n.failed++,e):e},{});return{totalEvaluations:t,passedEvaluations:n,failedEvaluations:t-n,averageScore:s,duration:e.duration,startTime:e.startTime,endTime:e.endTime,metrics:Object.entries(o).reduce((e,[t,n])=>(e[t]={passed:n.passed,failed:n.failed,averageScore:n.totalScore/n.count},e),{})}}}class m extends u{constructor(e){super(e)}async write(n){const s=t.join(this.config.outputDir,`${this.config.filename}.html`);await e.mkdir(this.config.outputDir,{recursive:!0});const o=this.generateSummary(n),a=this.generateHTML(o,n);await e.writeFile(s,a)}generateSummary(e){const t=e.items.length,n=e.items.filter(e=>e.result.passed).length,s=t-n,o=e.items.reduce((e,t)=>e+t.result.score,0)/t,a=e.items.reduce((e,t)=>{e[t.metricName]||(e[t.metricName]={passed:0,failed:0,totalScore:0,count:0,items:[]});const n=e[t.metricName];return n?(n.count++,n.totalScore+=t.result.score,n.items.push(t),t.result.passed?n.passed++:n.failed++,e):e},{});return{totalEvaluations:t,passedEvaluations:n,failedEvaluations:s,averageScore:o,duration:e.duration,startTime:e.startTime,endTime:e.endTime,metrics:a}}generateHTML(e,t){const n=e.passedEvaluations/e.totalEvaluations*100,s=(e.duration/1e3).toFixed(2);return`<!DOCTYPE html>\n<html>\n<head>\n <title>EvalKit Report</title>\n <style>\n :root {\n --primary: #3b82f6;\n --success: #22c55e;\n --error: #ef4444;\n --warning: #f59e0b;\n --text: #1f2937;\n --bg: #ffffff;\n --bg-alt: #f3f4f6;\n }\n \n body {\n font-family: system-ui, -apple-system, sans-serif;\n line-height: 1.5;\n color: var(--text);\n background: var(--bg);\n margin: 0;\n padding: 2rem;\n }\n \n .container {\n max-width: 1200px;\n margin: 0 auto;\n }\n \n .header {\n margin-bottom: 2rem;\n padding-bottom: 1rem;\n border-bottom: 1px solid #e5e7eb;\n }\n \n .title {\n font-size: 2rem;\n font-weight: 600;\n margin: 0;\n }\n \n .summary {\n display: grid;\n grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));\n gap: 1rem;\n margin-bottom: 2rem;\n }\n \n .stat-card {\n background: var(--bg-alt);\n padding: 1rem;\n border-radius: 0.5rem;\n }\n \n .stat-label {\n font-size: 0.875rem;\n color: #6b7280;\n }\n \n .stat-value {\n font-size: 1.5rem;\n font-weight: 600;\n margin-top: 0.25rem;\n }\n \n .metric-section {\n margin-bottom: 2rem;\n }\n \n .metric-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 1rem;\n }\n \n .metric-title {\n font-size: 1.25rem;\n font-weight: 600;\n }\n \n .metric-stats {\n display: flex;\n gap: 1rem;\n }\n \n .metric-stat {\n font-size: 0.875rem;\n }\n \n .evaluation-list {\n background: var(--bg-alt);\n border-radius: 0.5rem;\n overflow: hidden;\n }\n \n .evaluation-item {\n padding: 1rem;\n border-bottom: 1px solid #e5e7eb;\n }\n \n .evaluation-item:last-child {\n border-bottom: none;\n }\n \n .evaluation-header {\n display: flex;\n justify-content: space-between;\n margin-bottom: 0.5rem;\n }\n \n .evaluation-score {\n font-weight: 600;\n }\n \n .evaluation-reasons {\n margin: 0;\n padding-left: 1.5rem;\n color: #6b7280;\n }\n \n .passed { color: var(--success); }\n .failed { color: var(--error); }\n \n .toggle-btn {\n background: none;\n border: none;\n color: var(--primary);\n cursor: pointer;\n padding: 0.25rem 0.5rem;\n font-size: 0.875rem;\n }\n \n .toggle-btn:hover {\n text-decoration: underline;\n }\n \n .hidden {\n display: none;\n }\n </style>\n</head>\n<body>\n <div class="container">\n <header class="header">\n <h1 class="title">EvalKit Report</h1>\n </header>\n \n <div class="summary">\n <div class="stat-card">\n <div class="stat-label">Pass Rate</div>\n <div class="stat-value">${n.toFixed(1)}%</div>\n </div>\n <div class="stat-card">\n <div class="stat-label">Total Evaluations</div>\n <div class="stat-value">${e.totalEvaluations}</div>\n </div>\n <div class="stat-card">\n <div class="stat-label">Duration</div>\n <div class="stat-value">${s}s</div>\n </div>\n <div class="stat-card">\n <div class="stat-label">Average Score</div>\n <div class="stat-value">${e.averageScore.toFixed(2)}</div>\n </div>\n </div>\n\n ${Object.entries(e.metrics).map(([e,t])=>`\n <section class="metric-section">\n <div class="metric-header">\n <h2 class="metric-title">${e}</h2>\n <div class="metric-stats">\n <span class="metric-stat passed">✓ ${t.passed} passed</span>\n <span class="metric-stat failed">✗ ${t.failed} failed</span>\n <span class="metric-stat">Score: ${(t.totalScore/t.count).toFixed(2)}</span>\n </div>\n </div>\n \n <div class="evaluation-list">\n ${t.items.map((e,t)=>`\n <div class="evaluation-item">\n <div class="evaluation-header">\n <span class="evaluation-score ${e.result.passed?"passed":"failed"}">\n Score: ${e.result.score.toFixed(2)}\n </span>\n <button class="toggle-btn" onclick="toggleReasons(${t})">\n ${e.result.reasons.length} reasons\n </button>\n </div>\n <ul class="evaluation-reasons hidden" id="reasons-${t}">\n ${e.result.reasons.map(e=>`\n <li>${e}</li>\n `).join("")}\n </ul>\n </div>\n `).join("")}\n </div>\n </section>\n `).join("")}\n </div>\n\n <script>\n function toggleReasons(index) {\n const reasons = document.getElementById('reasons-' + index);\n reasons.classList.toggle('hidden');\n }\n <\/script>\n</body>\n</html>`}}class h extends u{constructor(e){super(e)}async write(e){const t=e.items.length,n=e.items.filter(e=>e.result.passed).length,o=t-n;console.log("\nš EvalKit Report Summary"),console.log("========================"),console.log(`\nš Duration: ${(e.duration/1e3).toFixed(2)}s`),console.log(`š Total Evaluations: ${t}`),console.log(`ā
Passed: ${s.green(String(n))}`),console.log(`ā Failed: ${s.red(String(o))}`),console.log("\nš Metrics Breakdown"),console.log("------------------");const a=e.items.reduce((e,t)=>(e[t.metricName]||(e[t.metricName]=[]),e[t.metricName].push(t),e),{});Object.entries(a).forEach(([e,t])=>{const n=t.filter(e=>e.result.passed).length,o=t.length-n,a=t.reduce((e,t)=>e+t.result.score,0)/t.length;console.log(`\n${e}:`),console.log(` Score: ${s.cyan(a.toFixed(2))}`),console.log(` Passed: ${s.green(String(n))} Failed: ${s.red(String(o))}`);const i=t.filter(e=>!e.result.passed);i.length>0&&(console.log(" Failed Evaluations:"),i.forEach(e=>{console.log(` - Score: ${s.red(e.result.score.toFixed(2))}`),e.result.reasons.forEach(e=>{console.log(` ${s.gray(e)}`)})}))})}}const p={openai:{},reporting:{outputFormats:[],outputDir:"./eval-reports"}},f=["evalkit.config.ts","evalkit.config.js","evalkit.config.mjs","evalkit.config.cjs"];async function g(){const e=function(){const e=process.cwd();for(const t of f){const s=n(e,t);if(a(s))return s}return}();if(!e)return p;try{return function(e){return{openai:{...p.openai,...e.openai},reporting:{...p.reporting,...e.reporting}}}((await import(e)).default)}catch(t){return console.warn(`Failed to load config from ${e}, using default config`),console.warn(t),p}}class v{static instance;config;openAIClient;runtimeConfig;constructor(){this.config={openai:{},reporting:{outputFormats:[],outputDir:"./eval-reports"}}}static getInstance(){return v.instance||(v.instance=new v),v.instance}configure(e){this.runtimeConfig=e,this.openAIClient=void 0}reset(){this.runtimeConfig=void 0,this.openAIClient=void 0}async init(){this.runtimeConfig||(this.config=await g()),this.openAIClient=void 0}getEffectiveOpenAIConfig(){return this.runtimeConfig?this.runtimeConfig:this.config.openai||{}}getOpenAIClient(){if(!this.openAIClient){const e=this.getEffectiveOpenAIConfig();if(!e||0===Object.keys(e).length)return this.openAIClient=new o,this.openAIClient;const t=Object.keys(e).filter(e=>"model"!==e&&"embeddingModel"!==e);if(1===t.length&&e.apiKey)return this.openAIClient=new o({apiKey:e.apiKey}),this.openAIClient;if(0===t.length)return this.openAIClient=new o,this.openAIClient;const n={apiKey:e.apiKey,baseURL:e.baseURL,defaultHeaders:e.apiVersion?{"api-version":e.apiVersion}:void 0,defaultQuery:e.deploymentName?{"deployment-id":e.deploymentName}:void 0};this.openAIClient=new o(n)}return this.openAIClient}getModel(){return this.getEffectiveOpenAIConfig().model||"gpt-4o-mini"}getEmbeddingModel(){return this.getEffectiveOpenAIConfig().embeddingModel||"text-embedding-ada-002"}getReportingConfig(){return this.config.reporting}}const b=v.getInstance();function y(e){b.configure(e)}class w{static instance;results=[];startTime=0;endTime=0;reporters;constructor(){const{outputFormats:e=[],outputDir:t="./eval-reports"}=b.getReportingConfig();this.reporters=[new h],e.includes("json")&&this.reporters.push(new d({outputDir:t})),e.includes("html")&&this.reporters.push(new m({outputDir:t}))}static getInstance(){return w.instance||(w.instance=new w),w.instance}static resetInstance(){w.instance=void 0}reportEvaluationStart(){this.startTime=Date.now()}async reportEvaluationEnd(){this.endTime=Date.now(),await this.writeReports()}reportEvaluation(e){this.results.push(e)}formatResults(e){return e}getFinalReport(){return{startTime:this.startTime,endTime:this.endTime,items:this.results,duration:this.endTime-this.startTime}}async writeReports(){const e=this.getFinalReport();await Promise.all(this.reporters.map(t=>t.write(e)))}}class x{name;criteria;evaluationParams;threshold;evaluationsReporter;openai;model;embeddingModel;constructor(e,t,n,s=.5){this.openai=b.getOpenAIClient(),this.model=b.getModel(),this.embeddingModel=b.getEmbeddingModel(),this.name=e,this.criteria=t,this.evaluationParams=n,this.threshold=s,this.evaluationsReporter=w.getInstance()}async evaluateSteps(){throw new Error("evaluateSteps() must be implemented by subclasses")}async executeEvaluation(){const{score:e,reasons:t}=await this.evaluateSteps(),n=e>=this.threshold;return this.reportEvaluation({passed:n,score:e,reasons:t}),{passed:n,score:e,reasons:t}}reportEvaluation(e){const t=this.evaluationsReporter.formatResults({result:e,metricName:this.name,params:this.evaluationParams});this.evaluationsReporter.reportEvaluation(t)}}class E extends x{constructor(e){super("Bias Detection Evaluation","Check for biases in the provided text",e)}async evaluateSteps(){const{output:e}=this.evaluationParams;return{score:await l.bind(this)(e),reasons:[]}}}var $,S,I;async function C(e){let t=0;const n=e.split(".").map(e=>e.trim()).filter(Boolean);for(let e=0;e<n.length-1;e++){const s=n[e],o=n[e+1],a=await this.openai.chat.completions.create({messages:[{role:"system",content:"\n\t\t Evaluate the coherence between two consecutive statements. Consider if the transition is logically supported by evidence or if it makes an unsupported leap. Assess if the second statement follows naturally from the first, or if it introduces generalizations that lack grounding in the first statement.\n\t\t Return JSON response with schema: { label: 'Coherent', 'Incoherent'; score: float between 1-0 relative to label (so incoherent can also get 0.9); reason: string (1 liner) }\n "},{role:"user",content:`Does the statement "${o}" logically follow from "${s}"?`}],max_tokens:80,model:this.model});if(!a.choices[0]?.message?.content)return{score:0,reason:"Failed to get valid response from OpenAI"};const r=i(a.choices[0].message.content);if(!r||"string"!=typeof r.label||"number"!=typeof r.score||"string"!=typeof r.reason)return{score:0,reason:"Invalid response format from LLM"};if(r.label===$.Incoherent&&r.score>=.8)return{score:0,reason:r.reason};r.label===$.Coherent&&r.score>=.8&&t++}return{score:t/n.length,reason:"The text demonstrates a coherent logical flow."}}!function(e){e.Coherent="Coherent",e.Incoherent="Incoherent"}($||($={}));class A extends x{constructor(e){super("Coherence Evaluation","Evaluate the logical flow and consistency of the paragraph",e)}async evaluateSteps(){const{output:e}=this.evaluationParams,{score:t,reason:n}=await C.bind(this)(e);return{score:t,reasons:[n]}}}async function R(e,t,n,s){const o=[];for(const a of s){const s=await this.openai.chat.completions.create({messages:[{role:"system",content:"\n\t Evaluate the following input, actual response and expected response based on a given set of criteria.\n\t For each criterion, provide it's own score object.\n\t Respond with a JSON of the following format: {\n\t criteria: string - corresponds to the criteria being evaluated;\n\t score: number - the score between 0 to 1 assigned to the actual output based on the criterion;\n\t reason: string - an array of reasons supporting the score assigned to the actual output (1 liner).\n\t }\n "},{role:"user",content:`\n\t\t\t\t\tInput: ${e}\n\t\t\t Expected Output: ${n}\n\t\t\t Actual Output: ${t}\n\t\t\t Criteria: ${a.type}\n\t\t\t\t`}],max_tokens:250,model:this.model});if(!s.choices[0]?.message?.content){o.push({criteria:a.type,score:0,reason:"Failed to get valid response from OpenAI",passed:!1});continue}const r=i(s.choices[0].message.content);r&&"number"==typeof r.score&&"string"==typeof r.reason?o.push({...r,criteria:a.type,passed:r.score>=.8}):o.push({criteria:a.type,score:0,reason:"Invalid response format from LLM",passed:!1})}return o}class F extends x{constructor(e){super("Dynamic Metric","Dynamic Evaluation",e)}async evaluateSteps(){const e=await R.call(this,this.evaluationParams.input,this.evaluationParams.actualOutput,this.evaluationParams.expectedOutput,this.evaluationParams.criteria);return{score:e.filter(e=>e.passed).length/e.length,reasons:e}}}async function O(e,t){const n=e.split(".").map(e=>e.trim()).filter(Boolean);let s=0;for(const e of n){const n=await this.openai.chat.completions.create({messages:[{role:"system",content:`\n\t\t\t\t\t\tGiven the context: "${t}". Assess whether the following user statement accurately reflects the context without introducing any inaccuracies or distortions. Consider both direct claims and implied meanings.\n\t\t\t\t\t\tReturn JSON response with schema: { label: ${Object.values(S).join(",")}; score: float between 1-0 }\n `},{role:"user",content:e}],max_tokens:80,model:this.model});if(!n.choices[0]?.message?.content)return 0;const o=i(n.choices[0].message.content);if(!o||"string"!=typeof o.label||"number"!=typeof o.score)return 0;o.label===S.Faithful&&o.score>=.8&&s++}return s/n.length}!function(e){e.Faithful="faithful",e.Contradicting="contradicting"}(S||(S={}));class T extends x{constructor(e){super("Faithfulness Evaluation","Evaluate the accuracy of the generated text compared to the source text",e)}async evaluateSteps(){const{context:e,output:t}=this.evaluationParams,n=await O.bind(this)(t,e);return{score:n,reasons:1===n?["All statements in the generated text are truthful."]:[`Faithfulness score: ${n}`]}}}async function k(e,t){const n=e.split(".").map(e=>e.trim()).filter(Boolean);let s=0;for(const e of n){const n=await this.openai.chat.completions.create({messages:[{role:"system",content:`\n Given the context: "${t}". Decide if a user statement is based on the context or if it introduces facts not found in the context or general knowledge.\n Return JSON response with schema: { label: ${Object.values(I).join(",")}; score: float between 1-0 }\n `},{role:"user",content:e}],max_tokens:80,model:this.model});if(!n.choices[0]?.message?.content)return 0;const o=i(n.choices[0].message.content);if(!o||"string"!=typeof o.label||"number"!=typeof o.score)return 0;o.label===I.Hallucinated&&o.score<=.8&&s++}return n.length>0?s/n.length:0}!function(e){e.Faithful="Faithful",e.Hallucinated="Hallucinated"}(I||(I={}));class j extends x{constructor(e){super("Hallucination Evaluation","Evaluate hallucinations in the generated text using different strategies",e)}async evaluateSteps(){const{context:e,output:t}=this.evaluationParams,n=await k.bind(this)(t,e);return{score:n,reasons:1===n?["All statements in the generated text are truthful."]:[`Hallucination score: ${n}`]}}}async function N(e,t){const n=await this.openai.chat.completions.create({messages:[{role:"system",content:`\n Given the user question to follow, classify the intent behind the text and evaluate against the expected intent(s).\n Expected intent(s): ${t.join(",")}\n Return JSON response with schema: Array<{\n\t intent: string;\n\t confidence: float between 1-0 that indicates the confidence of the detected intent;\n\t similarity: {\n\t inputIntent: string;\n\t score: float between 1-0 that indicates the similarity between the detected intent and one of the expected intent(s)\n\t }\n }>\n RETURN AN ARRAY FOR THE LIST\n `},{role:"user",content:e}],max_tokens:250,model:this.model});if(!n.choices[0]?.message?.content)return{score:0,reasons:["Failed to get valid response from OpenAI"]};const s=i(n.choices[0].message.content);if(!s||!Array.isArray(s))return{score:0,reasons:["Invalid response format from LLM"]};let o=0;for(const e of s){if(!e?.confidence||!e?.similarity?.inputIntent||!e?.similarity?.score)continue;const{confidence:n,similarity:s}=e;n>.8&&t.includes(s.inputIntent)&&s.score>=.8&&o++}const a=[`Detected intents: ${s.map(e=>e.intent).join(", ")}`,`Matching intents: ${o} out of ${t.length}`];return{score:o/t.length,reasons:a}}class D extends x{constructor(e){super("Intent Classification Evaluation","Classify the intent behind the given text and evaluate against the expected intent",e)}async evaluateSteps(){const{input:e,expectedIntents:t}=this.evaluationParams,{score:n,reasons:s}=await N.bind(this)(e,t);return{score:n,reasons:s}}}async function M(e,t){const n=await this.openai.chat.completions.create({messages:[{role:"system",content:"\n Given The expected user question, asses the relevancy of the answer given.\n Return JSON response with schema: { score: float between 1-0 }\n "},{role:"user",content:`Question: ${e}. Answer: ${t}`}],max_tokens:80,model:this.model});if(!n.choices[0]?.message?.content)return 0;const s=i(n.choices[0].message.content);return s&&"number"==typeof s.score?s.score:0}class P extends x{constructor(e){super("Relevancy Evaluation","Evaluate how well the answer addresses the question",e)}async evaluateSteps(){const{input:e,output:t}=this.evaluationParams,n=await M.bind(this)(e,t);return n>=.8?{score:n,reasons:["The answer is relevant to the question."]}:{score:n,reasons:["The answer is not relevant to the question."]}}}async function L(e,t){try{const{data:o}=await this.openai.embeddings.create({input:[e,t],model:this.embeddingModel});return o?.[0]?.embedding&&o?.[1]?.embedding?(n=o[0].embedding,s=o[1].embedding,n.reduce((e,t,n)=>e+t*s[n],0)/(Math.sqrt(n.reduce((e,t)=>e+t*t,0))*Math.sqrt(s.reduce((e,t)=>e+t*t,0)))):0}catch(e){return 0}var n,s}class B extends x{constructor(e){super("Semantic Similarity Evaluation","Evaluate the semantic similarity between two texts",e)}async evaluateSteps(){const{text1:e,text2:t}=this.evaluationParams,n=await L.bind(this)(e,t),s=[`Semantic similarity score: ${n}`];return{score:Math.round(100*n)/100,reasons:s}}}async function H(e,t){await b.init();const n=w.getInstance();n.reportEvaluationStart();for(const n of t){const t=new n(e);try{const e=await t.executeEvaluation();console.log(`${t.name} Passed:`,e.passed),console.log("Score:",e.score),console.log("Reasons:",e.reasons)}catch(e){console.error(`${t.name} Failed:`,e)}}return await n.reportEvaluationEnd(),n.getFinalReport()}export{E as BiasDetectionMetric,A as CoherenceMetric,F as DynamicMetric,T as FaithfulnessMetric,j as HallucinationMetric,D as IntentClassificationMetric,P as RelevancyMetric,w as ReportService,B as SemanticSimilarityMetric,c as THRESHOLD,N as classifyIntent,y as configure,H as evaluate,l as evaluateBias,C as evaluateCoherence,R as evaluateDynamic,O as evaluateFaithfulness,k as evaluateHallucination,M as evaluateRelevancy,L as evaluateSemanticSimilarity};
|
|
2
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.mjs","sources":[],"sourcesContent":[],"names":[],"mappings":""}
|
package/package.json
CHANGED
|
@@ -1,12 +1,18 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "evalkit-ollama",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.6",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"private": false,
|
|
6
|
-
"main": "dist/index.
|
|
7
|
-
"module": "dist/index.
|
|
8
|
-
"types": "dist/index.d.ts",
|
|
9
|
-
"
|
|
6
|
+
"main": "dist/index.cjs",
|
|
7
|
+
"module": "dist/index.mjs",
|
|
8
|
+
"types": "dist/src/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"require": "./dist/index.cjs",
|
|
12
|
+
"import": "./dist/index.mjs",
|
|
13
|
+
"types": "./dist/src/index.d.ts"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
10
16
|
"files": [
|
|
11
17
|
"dist",
|
|
12
18
|
"README.md",
|