@open-matrix/driver-ollama 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +20 -0
- package/dist/index.js +86 -0
- package/package.json +20 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { type InferenceDriver, type DriverCapabilities, type InferRequest, type InferResponse, type HealthResult } from "@open-matrix/inference";
|
|
2
|
+
export interface OllamaDriverConfig {
|
|
3
|
+
model?: string;
|
|
4
|
+
baseUrl?: string;
|
|
5
|
+
maxTokens?: number;
|
|
6
|
+
temperature?: number;
|
|
7
|
+
}
|
|
8
|
+
/** Check if an Ollama model supports native tool calling */
|
|
9
|
+
export declare function modelSupportsTools(model: string): boolean;
|
|
10
|
+
export declare class OllamaDriver implements InferenceDriver {
|
|
11
|
+
readonly id = "ollama";
|
|
12
|
+
readonly capabilities: DriverCapabilities;
|
|
13
|
+
private model;
|
|
14
|
+
private baseUrl;
|
|
15
|
+
private maxTokens;
|
|
16
|
+
private temperature;
|
|
17
|
+
constructor(config?: OllamaDriverConfig);
|
|
18
|
+
infer(request: InferRequest): Promise<InferResponse>;
|
|
19
|
+
health(): Promise<HealthResult>;
|
|
20
|
+
}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
// @omega/driver-ollama
|
|
2
|
+
// InferenceDriver implementation for the Ollama local inference API.
|
|
3
|
+
// Supports: local models, optional tool calling (model-dependent).
|
|
4
|
+
// ZERO imports from OmegaLLM internals.
|
|
5
|
+
import { InferenceError, } from "@open-matrix/inference";
|
|
6
|
+
/** Check if an Ollama model supports native tool calling */
|
|
7
|
+
export function modelSupportsTools(model) {
|
|
8
|
+
const toolModels = ["llama3.1", "llama3.2", "mistral", "mixtral", "qwen2.5", "command-r"];
|
|
9
|
+
return toolModels.some(m => model.toLowerCase().includes(m));
|
|
10
|
+
}
|
|
11
|
+
export class OllamaDriver {
|
|
12
|
+
id = "ollama";
|
|
13
|
+
capabilities;
|
|
14
|
+
model;
|
|
15
|
+
baseUrl;
|
|
16
|
+
maxTokens;
|
|
17
|
+
temperature;
|
|
18
|
+
constructor(config) {
|
|
19
|
+
this.model = config?.model ?? process.env.OLLAMA_MODEL ?? "llama3.2";
|
|
20
|
+
this.baseUrl = config?.baseUrl ?? process.env.OLLAMA_BASE_URL ?? "http://localhost:11434";
|
|
21
|
+
this.maxTokens = config?.maxTokens ?? 4096;
|
|
22
|
+
this.temperature = config?.temperature ?? 0.7;
|
|
23
|
+
const hasTools = modelSupportsTools(this.model);
|
|
24
|
+
this.capabilities = {
|
|
25
|
+
oracleProtocol: { reentrant: false, supportedOps: [] },
|
|
26
|
+
tooling: { native: hasTools, format: hasTools ? "ollama" : undefined },
|
|
27
|
+
session: { multiTurn: false, streaming: false, managedSessions: false, maxContext: 8192 },
|
|
28
|
+
io: { vision: false, audio: false, structuredOutput: false, fileUpload: false, artifacts: false },
|
|
29
|
+
transport: { kind: "local-process", latency: "seconds" },
|
|
30
|
+
mcp: { client: false, server: false },
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
async infer(request) {
|
|
34
|
+
const start = Date.now();
|
|
35
|
+
const model = request.model ?? this.model;
|
|
36
|
+
const body = {
|
|
37
|
+
model,
|
|
38
|
+
prompt: request.prompt,
|
|
39
|
+
system: request.systemPrompt,
|
|
40
|
+
options: {
|
|
41
|
+
num_predict: request.maxTokens ?? this.maxTokens,
|
|
42
|
+
temperature: request.temperature ?? this.temperature,
|
|
43
|
+
},
|
|
44
|
+
stream: false,
|
|
45
|
+
};
|
|
46
|
+
const resp = await fetch(`${this.baseUrl}/api/generate`, {
|
|
47
|
+
method: "POST",
|
|
48
|
+
headers: { "Content-Type": "application/json" },
|
|
49
|
+
body: JSON.stringify(body),
|
|
50
|
+
});
|
|
51
|
+
if (!resp.ok) {
|
|
52
|
+
const text = await resp.text();
|
|
53
|
+
throw new InferenceError(`Ollama API ${resp.status}: ${text}`, this.id, "network");
|
|
54
|
+
}
|
|
55
|
+
const data = await resp.json();
|
|
56
|
+
const latencyMs = Date.now() - start;
|
|
57
|
+
return {
|
|
58
|
+
content: data.response ?? "",
|
|
59
|
+
latencyMs,
|
|
60
|
+
driverId: this.id,
|
|
61
|
+
usage: data.eval_count ? {
|
|
62
|
+
inputTokens: data.prompt_eval_count ?? 0,
|
|
63
|
+
outputTokens: data.eval_count ?? 0,
|
|
64
|
+
} : undefined,
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
async health() {
|
|
68
|
+
try {
|
|
69
|
+
const start = Date.now();
|
|
70
|
+
const resp = await fetch(`${this.baseUrl}/api/tags`);
|
|
71
|
+
if (!resp.ok)
|
|
72
|
+
return { ok: false, message: `HTTP ${resp.status}` };
|
|
73
|
+
const data = await resp.json();
|
|
74
|
+
const models = (data.models ?? []).map((m) => m.name);
|
|
75
|
+
const hasModel = models.some((n) => n.includes(this.model));
|
|
76
|
+
return {
|
|
77
|
+
ok: true,
|
|
78
|
+
latencyMs: Date.now() - start,
|
|
79
|
+
message: hasModel ? undefined : `Model "${this.model}" not found. Available: ${models.join(", ")}`,
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
catch (e) {
|
|
83
|
+
return { ok: false, message: `Ollama not reachable: ${e.message}` };
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@open-matrix/driver-ollama",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Ollama local inference driver — raw fetch, zero SDK deps",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"files": ["dist"],
|
|
9
|
+
"scripts": {
|
|
10
|
+
"build": "tsc",
|
|
11
|
+
"clean": "rimraf dist"
|
|
12
|
+
},
|
|
13
|
+
"dependencies": {
|
|
14
|
+
"@open-matrix/inference": "^0.1.0"
|
|
15
|
+
},
|
|
16
|
+
"devDependencies": {
|
|
17
|
+
"@types/node": "^20.0.0",
|
|
18
|
+
"typescript": "^5.5.0"
|
|
19
|
+
}
|
|
20
|
+
}
|