@aci-metrics/score 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,432 @@
1
+ /**
2
+ * OllamaProvider - Inference provider using Ollama HTTP API
3
+ *
4
+ * This provider connects to a locally running Ollama server.
5
+ * It uses the format: "json" option for structured output,
6
+ * with manual validation against the schema.
7
+ *
8
+ * Benefits:
9
+ * - Easy model management (ollama pull/run)
10
+ * - Multiple models can be loaded simultaneously
11
+ * - Works with any Ollama-compatible model
12
+ *
13
+ * Requirements:
14
+ * - Ollama installed and running (https://ollama.ai)
15
+ * - Model pulled (e.g., ollama pull qwen2.5:1.5b)
16
+ *
17
+ * Note: Ollama's JSON mode doesn't enforce schema strictly,
18
+ * so we validate the output manually after generation.
19
+ */
20
+
21
+ var BaseProvider = require('./base');
22
+
23
+ /**
24
+ * Provider implementation for Ollama HTTP API
25
+ *
26
+ * @class OllamaProvider
27
+ * @extends BaseProvider
28
+ */
29
+ class OllamaProvider extends BaseProvider {
30
+ /**
31
+ * Create a new Ollama provider
32
+ *
33
+ * @param {Object} config - Runtime configuration
34
+ * @param {Object} modelConfig - Model configuration
35
+ * @param {string} basePath - Base path (not used for Ollama)
36
+ */
37
+ constructor(config, modelConfig, basePath) {
38
+ super(config, modelConfig);
39
+
40
+ this.providerName = 'ollama';
41
+ this.basePath = basePath;
42
+
43
+ // Get Ollama host from config or use default
44
+ this.host = (config.ollama && config.ollama.host) || 'http://localhost:11434';
45
+
46
+ // Model name to use in Ollama
47
+ this.ollamaModel = modelConfig.ollamaModel || modelConfig.id;
48
+ }
49
+
50
+ /**
51
+ * Initialize the provider
52
+ *
53
+ * Verifies that Ollama is running and the model is available.
54
+ *
55
+ * @returns {Promise<void>}
56
+ * @throws {Error} If Ollama is not running or model not found
57
+ */
58
+ async initialize() {
59
+ this.log('Initializing...');
60
+ this.log('Host: ' + this.host);
61
+ this.log('Model: ' + this.ollamaModel);
62
+
63
+ // Check if Ollama is running by calling the API
64
+ this.log('Checking Ollama server...');
65
+ try {
66
+ var response = await this.httpGet(this.host + '/api/tags');
67
+
68
+ if (!response.ok) {
69
+ throw new Error('Ollama returned status ' + response.status);
70
+ }
71
+
72
+ var data = response.data;
73
+ this.log('Ollama server is running');
74
+
75
+ // Check if the model is available
76
+ var modelFound = false;
77
+ if (data.models && Array.isArray(data.models)) {
78
+ for (var i = 0; i < data.models.length; i++) {
79
+ var modelName = data.models[i].name;
80
+ // Ollama model names may include :latest or version tags
81
+ if (modelName === this.ollamaModel || modelName.startsWith(this.ollamaModel + ':')) {
82
+ modelFound = true;
83
+ break;
84
+ }
85
+ }
86
+ }
87
+
88
+ if (!modelFound) {
89
+ var errorMessage = 'Model not found in Ollama: ' + this.ollamaModel + '\n\n';
90
+ errorMessage += 'Pull the model with:\n';
91
+ errorMessage += ' ollama pull ' + this.ollamaModel + '\n\n';
92
+ errorMessage += 'Available models:\n';
93
+ if (data.models && data.models.length > 0) {
94
+ for (var j = 0; j < data.models.length; j++) {
95
+ errorMessage += ' - ' + data.models[j].name + '\n';
96
+ }
97
+ } else {
98
+ errorMessage += ' (none)\n';
99
+ }
100
+ throw new Error(errorMessage);
101
+ }
102
+
103
+ this.log('Model is available');
104
+ this.isInitialized = true;
105
+ this.log('Ready');
106
+
107
+ } catch (error) {
108
+ if (error.code === 'ECONNREFUSED') {
109
+ var connectionError = 'Cannot connect to Ollama at ' + this.host + '\n\n';
110
+ connectionError += 'Make sure Ollama is running:\n';
111
+ connectionError += ' 1. Install from https://ollama.ai\n';
112
+ connectionError += ' 2. Run: ollama serve\n';
113
+ throw new Error(connectionError);
114
+ }
115
+ throw error;
116
+ }
117
+ }
118
+
119
+ /**
120
+ * Generate a response with JSON output
121
+ *
122
+ * Uses Ollama's format: "json" option and validates against schema.
123
+ *
124
+ * @param {string} prompt - The prompt to send to the model
125
+ * @param {Object} [schema] - JSON schema for validation
126
+ * @returns {Promise<Object>} Parsed JSON response
127
+ * @throws {Error} If not initialized, generation fails, or validation fails
128
+ */
129
+ async generate(prompt, schema) {
130
+ // Check initialization
131
+ if (!this.isInitialized) {
132
+ throw new Error('Provider not initialized. Call initialize() first.');
133
+ }
134
+
135
+ // Get inference settings from config
136
+ var temperature = this.config.inference ? this.config.inference.temperature : 0.1;
137
+ var maxTokens = this.config.inference ? this.config.inference.maxTokens : 1024;
138
+
139
+ // Build the request body
140
+ // Enhance prompt with schema information if provided
141
+ var enhancedPrompt = prompt;
142
+ if (schema) {
143
+ enhancedPrompt = this.buildSchemaPrompt(prompt, schema);
144
+ }
145
+
146
+ var requestBody = {
147
+ model: this.ollamaModel,
148
+ prompt: enhancedPrompt,
149
+ stream: false,
150
+ format: 'json',
151
+ options: {
152
+ temperature: temperature,
153
+ num_predict: maxTokens
154
+ }
155
+ };
156
+
157
+ // Make the API call
158
+ this.log('Generating response...');
159
+ var response = await this.httpPost(this.host + '/api/generate', requestBody);
160
+
161
+ if (!response.ok) {
162
+ throw new Error('Ollama API error: ' + response.status);
163
+ }
164
+
165
+ var data = response.data;
166
+
167
+ if (!data.response) {
168
+ throw new Error('Ollama returned empty response');
169
+ }
170
+
171
+ // Parse the JSON response
172
+ this.log('Parsing response...');
173
+ var parsed;
174
+ try {
175
+ parsed = JSON.parse(data.response);
176
+ } catch (parseError) {
177
+ this.logError('Failed to parse JSON response', parseError);
178
+ this.log('Raw response: ' + data.response);
179
+ throw new Error('Model returned invalid JSON: ' + parseError.message);
180
+ }
181
+
182
+ // Validate against schema if provided
183
+ if (schema) {
184
+ var validation = this.validateResponse(parsed, schema);
185
+ if (!validation.isValid) {
186
+ this.logError('Response failed schema validation');
187
+ for (var i = 0; i < validation.errors.length; i++) {
188
+ this.log(' - ' + validation.errors[i]);
189
+ }
190
+
191
+ // Try to fix common issues and retry validation
192
+ var fixed = this.attemptFix(parsed, schema);
193
+ if (fixed) {
194
+ var revalidation = this.validateResponse(fixed, schema);
195
+ if (revalidation.isValid) {
196
+ this.log('Fixed validation issues');
197
+ return fixed;
198
+ }
199
+ }
200
+
201
+ throw new Error('Schema validation failed: ' + validation.errors.join('; '));
202
+ }
203
+ }
204
+
205
+ this.log('Generation complete');
206
+ return parsed;
207
+ }
208
+
209
+ /**
210
+ * Build a prompt that includes schema information
211
+ *
212
+ * This helps the model understand the expected output format
213
+ * since Ollama doesn't have native schema enforcement.
214
+ *
215
+ * @private
216
+ * @param {string} prompt - Original prompt
217
+ * @param {Object} schema - JSON schema
218
+ * @returns {string} Enhanced prompt
219
+ */
220
+ buildSchemaPrompt(prompt, schema) {
221
+ var schemaDescription = 'You must respond with a JSON object with the following structure:\n\n';
222
+
223
+ if (schema.properties) {
224
+ var propNames = Object.keys(schema.properties);
225
+ for (var i = 0; i < propNames.length; i++) {
226
+ var name = propNames[i];
227
+ var prop = schema.properties[name];
228
+ var required = schema.required && schema.required.indexOf(name) !== -1;
229
+
230
+ schemaDescription += '- "' + name + '" (' + prop.type + ')';
231
+ if (required) {
232
+ schemaDescription += ' [required]';
233
+ }
234
+ if (prop.enum) {
235
+ schemaDescription += ' - one of: ' + prop.enum.join(', ');
236
+ }
237
+ if (prop.minimum !== undefined || prop.maximum !== undefined) {
238
+ schemaDescription += ' - range: ' +
239
+ (prop.minimum !== undefined ? prop.minimum : 'any') +
240
+ ' to ' +
241
+ (prop.maximum !== undefined ? prop.maximum : 'any');
242
+ }
243
+ schemaDescription += '\n';
244
+ }
245
+ }
246
+
247
+ return schemaDescription + '\n' + prompt;
248
+ }
249
+
250
+ /**
251
+ * Attempt to fix common validation issues
252
+ *
253
+ * @private
254
+ * @param {Object} response - Parsed response
255
+ * @param {Object} schema - JSON schema
256
+ * @returns {Object|null} Fixed response or null
257
+ */
258
+ attemptFix(response, schema) {
259
+ var fixed = JSON.parse(JSON.stringify(response)); // Deep clone
260
+
261
+ if (!schema.properties) {
262
+ return null;
263
+ }
264
+
265
+ var propNames = Object.keys(schema.properties);
266
+ for (var i = 0; i < propNames.length; i++) {
267
+ var name = propNames[i];
268
+ var prop = schema.properties[name];
269
+
270
+ if (!(name in fixed)) {
271
+ continue;
272
+ }
273
+
274
+ // Try to coerce types
275
+ if (prop.type === 'number' || prop.type === 'integer') {
276
+ if (typeof fixed[name] === 'string') {
277
+ var num = Number(fixed[name]);
278
+ if (!isNaN(num)) {
279
+ fixed[name] = num;
280
+ }
281
+ }
282
+ }
283
+
284
+ // Clamp numbers to range
285
+ if (typeof fixed[name] === 'number') {
286
+ if (prop.minimum !== undefined && fixed[name] < prop.minimum) {
287
+ fixed[name] = prop.minimum;
288
+ }
289
+ if (prop.maximum !== undefined && fixed[name] > prop.maximum) {
290
+ fixed[name] = prop.maximum;
291
+ }
292
+ }
293
+ }
294
+
295
+ return fixed;
296
+ }
297
+
298
+ /**
299
+ * Clean up resources
300
+ *
301
+ * Nothing to clean up for HTTP-based provider.
302
+ *
303
+ * @returns {Promise<void>}
304
+ */
305
+ async destroy() {
306
+ this.log('Cleaning up...');
307
+ this.isInitialized = false;
308
+ this.log('Cleanup complete');
309
+ }
310
+
311
+ /**
312
+ * Make an HTTP GET request
313
+ *
314
+ * Uses Node.js built-in http/https modules.
315
+ *
316
+ * @private
317
+ * @param {string} url - URL to fetch
318
+ * @returns {Promise<Object>} Response with ok, status, and data
319
+ */
320
+ async httpGet(url) {
321
+ return new Promise(function(resolve, reject) {
322
+ var parsedUrl = new URL(url);
323
+ var httpModule = parsedUrl.protocol === 'https:' ? require('https') : require('http');
324
+
325
+ var options = {
326
+ hostname: parsedUrl.hostname,
327
+ port: parsedUrl.port || (parsedUrl.protocol === 'https:' ? 443 : 80),
328
+ path: parsedUrl.pathname + parsedUrl.search,
329
+ method: 'GET',
330
+ headers: {
331
+ 'Accept': 'application/json'
332
+ }
333
+ };
334
+
335
+ var req = httpModule.request(options, function(res) {
336
+ var chunks = [];
337
+
338
+ res.on('data', function(chunk) {
339
+ chunks.push(chunk);
340
+ });
341
+
342
+ res.on('end', function() {
343
+ var body = Buffer.concat(chunks).toString();
344
+ var data = null;
345
+
346
+ try {
347
+ data = JSON.parse(body);
348
+ } catch (e) {
349
+ data = body;
350
+ }
351
+
352
+ resolve({
353
+ ok: res.statusCode >= 200 && res.statusCode < 300,
354
+ status: res.statusCode,
355
+ data: data
356
+ });
357
+ });
358
+ });
359
+
360
+ req.on('error', function(err) {
361
+ reject(err);
362
+ });
363
+
364
+ req.end();
365
+ });
366
+ }
367
+
368
+ /**
369
+ * Make an HTTP POST request
370
+ *
371
+ * @private
372
+ * @param {string} url - URL to post to
373
+ * @param {Object} body - Request body (will be JSON stringified)
374
+ * @returns {Promise<Object>} Response with ok, status, and data
375
+ */
376
+ async httpPost(url, body) {
377
+ var self = this;
378
+
379
+ return new Promise(function(resolve, reject) {
380
+ var parsedUrl = new URL(url);
381
+ var httpModule = parsedUrl.protocol === 'https:' ? require('https') : require('http');
382
+ var bodyStr = JSON.stringify(body);
383
+
384
+ var options = {
385
+ hostname: parsedUrl.hostname,
386
+ port: parsedUrl.port || (parsedUrl.protocol === 'https:' ? 443 : 80),
387
+ path: parsedUrl.pathname + parsedUrl.search,
388
+ method: 'POST',
389
+ headers: {
390
+ 'Content-Type': 'application/json',
391
+ 'Content-Length': Buffer.byteLength(bodyStr),
392
+ 'Accept': 'application/json'
393
+ }
394
+ };
395
+
396
+ var req = httpModule.request(options, function(res) {
397
+ var chunks = [];
398
+
399
+ res.on('data', function(chunk) {
400
+ chunks.push(chunk);
401
+ });
402
+
403
+ res.on('end', function() {
404
+ var responseBody = Buffer.concat(chunks).toString();
405
+ var data = null;
406
+
407
+ try {
408
+ data = JSON.parse(responseBody);
409
+ } catch (e) {
410
+ data = responseBody;
411
+ }
412
+
413
+ resolve({
414
+ ok: res.statusCode >= 200 && res.statusCode < 300,
415
+ status: res.statusCode,
416
+ data: data
417
+ });
418
+ });
419
+ });
420
+
421
+ req.on('error', function(err) {
422
+ reject(err);
423
+ });
424
+
425
+ req.write(bodyStr);
426
+ req.end();
427
+ });
428
+ }
429
+ }
430
+
431
+ // Export the class
432
+ module.exports = OllamaProvider;
@@ -0,0 +1,2 @@
1
+ # This file ensures the models directory exists in git
2
+ # Model files (*.gguf) are ignored - download separately
package/package.json ADDED
@@ -0,0 +1,31 @@
1
+ {
2
+ "name": "@aci-metrics/score",
3
+ "version": "0.0.1",
4
+ "description": "The ACI scoring CLI is in beta. For an invite, contact us at acimetrics.com.",
5
+ "main": "aci-score.js",
6
+ "scripts": {
7
+ "score": "node aci-score.js",
8
+ "test": "node test-model.js",
9
+ "download-model": "echo 'Run: huggingface-cli download bartowski/Qwen2.5-1.5B-Instruct-GGUF Qwen2.5-1.5B-Instruct-Q4_K_M.gguf --local-dir ./models'"
10
+ },
11
+ "bin": {
12
+ "aci-score": "./aci-score.js"
13
+ },
14
+ "keywords": [
15
+ "aci",
16
+ "ai-collaboration",
17
+ "llm",
18
+ "scoring",
19
+ "aci-metrics"
20
+ ],
21
+ "author": "Original Engineering LLC",
22
+ "license": "UNLICENSED",
23
+ "private": false,
24
+ "engines": {
25
+ "node": ">=18.0.0"
26
+ },
27
+ "dependencies": {
28
+ "node-llama-cpp": "^3.3.0",
29
+ "simple-statistics": "^7.8.3"
30
+ }
31
+ }
@@ -0,0 +1,15 @@
1
+ <start_of_turn>user
2
+ You are an AI collaboration analyst. Analyze the following coding session metrics and respond with a JSON object only.
3
+
4
+ Session Metrics:
5
+ {{METRICS}}
6
+
7
+ Required JSON fields:
8
+ - task_type: One of "feature", "bugfix", "refactor", "docs", "test", "config", "other"
9
+ - complexity: Integer from 1 to 10
10
+ - collaboration_quality: Integer from 1 to 10
11
+ - archetype: One of "orchestrator", "refiner", "sprinter", "diver", "director", "partner"
12
+ - summary: Brief description (1-2 sentences)
13
+
14
+ Respond with valid JSON only, no other text.<end_of_turn>
15
+ <start_of_turn>model
@@ -0,0 +1,17 @@
1
+ <|begin_of_text|><|start_header_id|>system<|end_header_id|>
2
+
3
+ You are an AI collaboration analyst. Analyze coding session metrics and provide structured JSON assessments. Always respond with valid JSON only.<|eot_id|><|start_header_id|>user<|end_header_id|>
4
+
5
+ Analyze these session metrics:
6
+
7
+ {{METRICS}}
8
+
9
+ Provide a JSON object with these fields:
10
+ - task_type: One of (feature, bugfix, refactor, docs, test, config, other)
11
+ - complexity: Integer 1-10 based on scope and duration
12
+ - collaboration_quality: Integer 1-10 for human-AI collaboration effectiveness
13
+ - archetype: One of (orchestrator, refiner, sprinter, diver, director, partner)
14
+ - summary: Brief 1-2 sentence description
15
+
16
+ JSON response only:<|eot_id|><|start_header_id|>assistant<|end_header_id|>
17
+
@@ -0,0 +1,16 @@
1
+ <|system|>
2
+ You are an AI collaboration analyst specializing in evaluating human-AI coding sessions. You analyze metrics and provide structured assessments in JSON format. Always respond with valid JSON only, no additional text.
3
+ <|end|>
4
+ <|user|>
5
+ Please analyze the following coding session metrics and provide your assessment:
6
+
7
+ {{METRICS}}
8
+
9
+ Your JSON response must include:
10
+ - task_type: The primary type of task (feature, bugfix, refactor, docs, test, config, or other)
11
+ - complexity: A score from 1 to 10 indicating task complexity
12
+ - collaboration_quality: A score from 1 to 10 rating the human-AI collaboration
13
+ - archetype: The collaboration style (orchestrator, refiner, sprinter, diver, director, or partner)
14
+ - summary: A brief 1-2 sentence summary of what was accomplished
15
+ <|end|>
16
+ <|assistant|>
@@ -0,0 +1,18 @@
1
+ You are an AI collaboration analyst. Your task is to analyze metrics from AI-assisted coding sessions and provide structured assessments.
2
+
3
+ Analyze the following session metrics and provide your assessment.
4
+
5
+ ## Session Metrics
6
+
7
+ {{METRICS}}
8
+
9
+ ## Instructions
10
+
11
+ Based on the metrics above, provide a JSON response with:
12
+ 1. task_type: Classify the primary task type (feature, bugfix, refactor, docs, test, config, other)
13
+ 2. complexity: Rate complexity from 1-10 based on scope, files touched, and session duration
14
+ 3. collaboration_quality: Rate the human-AI collaboration quality from 1-10
15
+ 4. archetype: Assign a primary archetype (orchestrator, refiner, sprinter, diver, director, partner)
16
+ 5. summary: Brief 1-2 sentence description of what was accomplished
17
+
18
+ Respond with valid JSON only. No additional text or explanation.