amalfa 1.0.28 → 1.0.29
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -0
- package/package.json +3 -1
- package/src/core/GraphEngine.ts +252 -0
- package/src/core/GraphGardener.ts +244 -0
- package/src/core/VectorEngine.ts +4 -1
- package/src/daemon/sonar-agent.ts +179 -859
- package/src/daemon/sonar-inference.ts +116 -0
- package/src/daemon/sonar-logic.ts +662 -0
- package/src/daemon/sonar-strategies.ts +187 -0
- package/src/daemon/sonar-types.ts +68 -0
- package/src/mcp/index.ts +20 -5
- package/src/pipeline/AmalfaIngestor.ts +2 -2
- package/src/resonance/db.ts +9 -2
- package/src/resonance/schema.ts +15 -2
- package/src/utils/TagInjector.ts +90 -0
- package/src/utils/sonar-client.ts +17 -0
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import { AMALFA_DIRS, loadConfig } from "@src/config/defaults";
|
|
2
|
+
import { getLogger } from "@src/utils/Logger";
|
|
3
|
+
import type { Message, RequestOptions } from "./sonar-types";
|
|
4
|
+
|
|
5
|
+
const log = getLogger("SonarInference");
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Shared state for inference
|
|
9
|
+
*/
|
|
10
|
+
export const inferenceState = {
|
|
11
|
+
ollamaAvailable: false,
|
|
12
|
+
ollamaModel: "phi3:latest",
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Call Ollama HTTP API for inference
|
|
17
|
+
* Supports both local Ollama and OpenRouter (cloud) providers.
|
|
18
|
+
*/
|
|
19
|
+
export async function callOllama(
|
|
20
|
+
messages: Message[],
|
|
21
|
+
options: RequestOptions = {},
|
|
22
|
+
): Promise<{ message: Message }> {
|
|
23
|
+
const config = await loadConfig();
|
|
24
|
+
const hostArgs = config.sonar;
|
|
25
|
+
|
|
26
|
+
// Cloud toggle: dev-cloud/prod-local strategy
|
|
27
|
+
const cloudConfig = hostArgs.cloud;
|
|
28
|
+
const useCloud = cloudConfig?.enabled === true;
|
|
29
|
+
const provider = useCloud ? cloudConfig.provider || "ollama" : "ollama";
|
|
30
|
+
|
|
31
|
+
// Tiered model strategy: options.model > cloud.model > discovered > config > default
|
|
32
|
+
const { format, model: overrideModel, ...modelOptions } = options;
|
|
33
|
+
const model =
|
|
34
|
+
overrideModel ||
|
|
35
|
+
(useCloud ? cloudConfig.model : null) ||
|
|
36
|
+
inferenceState.ollamaModel ||
|
|
37
|
+
hostArgs.model ||
|
|
38
|
+
"qwen2.5:1.5b";
|
|
39
|
+
|
|
40
|
+
// Build headers
|
|
41
|
+
const headers: Record<string, string> = {
|
|
42
|
+
"Content-Type": "application/json",
|
|
43
|
+
};
|
|
44
|
+
// API key: prefer env var (OPENROUTER_API_KEY) over config
|
|
45
|
+
const apiKey = process.env.OPENROUTER_API_KEY || cloudConfig?.apiKey;
|
|
46
|
+
if (useCloud && apiKey) {
|
|
47
|
+
headers.Authorization = `Bearer ${apiKey}`;
|
|
48
|
+
log.info(
|
|
49
|
+
{ provider, hasKey: !!apiKey, keyLength: apiKey?.length },
|
|
50
|
+
"Cloud request with API key",
|
|
51
|
+
);
|
|
52
|
+
} else if (useCloud) {
|
|
53
|
+
log.warn("Cloud enabled but no API key found in env or config!");
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// OpenRouter requires site headers for tracking
|
|
57
|
+
if (provider === "openrouter") {
|
|
58
|
+
headers["HTTP-Referer"] = "https://github.com/pjsvis/amalfa";
|
|
59
|
+
headers["X-Title"] = "AMALFA Knowledge Graph";
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Determine endpoint and request format based on provider
|
|
63
|
+
let endpoint: string;
|
|
64
|
+
let body: string;
|
|
65
|
+
|
|
66
|
+
if (provider === "openrouter") {
|
|
67
|
+
endpoint = "https://openrouter.ai/api/v1/chat/completions";
|
|
68
|
+
body = JSON.stringify({
|
|
69
|
+
model,
|
|
70
|
+
messages,
|
|
71
|
+
stream: false,
|
|
72
|
+
...modelOptions,
|
|
73
|
+
});
|
|
74
|
+
} else {
|
|
75
|
+
// Local Ollama
|
|
76
|
+
const host = hostArgs.host || "localhost:11434";
|
|
77
|
+
endpoint = `http://${host}/api/chat`;
|
|
78
|
+
body = JSON.stringify({
|
|
79
|
+
model,
|
|
80
|
+
messages,
|
|
81
|
+
stream: false,
|
|
82
|
+
format,
|
|
83
|
+
options: modelOptions,
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
try {
|
|
88
|
+
const response = await fetch(endpoint, {
|
|
89
|
+
method: "POST",
|
|
90
|
+
headers,
|
|
91
|
+
body,
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
if (!response.ok) {
|
|
95
|
+
const error = await response.text();
|
|
96
|
+
throw new Error(`Inference failed (${response.status}): ${error}`);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const result = await response.json();
|
|
100
|
+
|
|
101
|
+
if (provider === "openrouter") {
|
|
102
|
+
// OpenAI format
|
|
103
|
+
return {
|
|
104
|
+
message: (result as any).choices[0].message,
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
// Ollama format
|
|
108
|
+
return {
|
|
109
|
+
message: (result as any).message,
|
|
110
|
+
};
|
|
111
|
+
} catch (error) {
|
|
112
|
+
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
113
|
+
log.error({ error: errorMsg, endpoint, model }, "Ollama inference failed");
|
|
114
|
+
throw error;
|
|
115
|
+
}
|
|
116
|
+
}
|