@matthesketh/utopia-ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,220 @@
1
+ "use strict";
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __export = (target, all) => {
9
+ for (var name in all)
10
+ __defProp(target, name, { get: all[name], enumerable: true });
11
+ };
12
+ var __copyProps = (to, from, except, desc) => {
13
+ if (from && typeof from === "object" || typeof from === "function") {
14
+ for (let key of __getOwnPropNames(from))
15
+ if (!__hasOwnProp.call(to, key) && key !== except)
16
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
+ }
18
+ return to;
19
+ };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
28
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
+
30
+ // src/adapters/google.ts
31
+ var google_exports = {};
32
+ __export(google_exports, {
33
+ googleAdapter: () => googleAdapter
34
+ });
35
+ module.exports = __toCommonJS(google_exports);
36
+ function googleAdapter(config) {
37
+ let genAI = null;
38
+ async function getGenAI() {
39
+ if (genAI) return genAI;
40
+ let GoogleGenerativeAI;
41
+ try {
42
+ const mod = await import("@google/generative-ai");
43
+ GoogleGenerativeAI = mod.GoogleGenerativeAI;
44
+ } catch {
45
+ throw new Error(
46
+ '@matthesketh/utopia-ai: "@google/generative-ai" package is required for the Google adapter. Install it with: npm install @google/generative-ai'
47
+ );
48
+ }
49
+ genAI = new GoogleGenerativeAI(config.apiKey);
50
+ return genAI;
51
+ }
52
+ return {
53
+ async chat(request) {
54
+ const ai = await getGenAI();
55
+ const modelName = request.model ?? config.defaultModel ?? "gemini-2.0-flash";
56
+ const modelConfig = {};
57
+ if (request.temperature !== void 0) modelConfig.temperature = request.temperature;
58
+ if (request.maxTokens !== void 0) modelConfig.maxOutputTokens = request.maxTokens;
59
+ if (request.topP !== void 0) modelConfig.topP = request.topP;
60
+ if (request.stop) modelConfig.stopSequences = request.stop;
61
+ const model = ai.getGenerativeModel({
62
+ model: modelName,
63
+ generationConfig: modelConfig,
64
+ ...request.tools?.length ? { tools: [{ functionDeclarations: request.tools.map(toGeminiTool) }] } : {},
65
+ ...request.extra
66
+ });
67
+ const { system, contents } = toGeminiContents(request.messages);
68
+ const result = await model.generateContent({
69
+ contents,
70
+ ...system ? { systemInstruction: { parts: [{ text: system }] } } : {}
71
+ });
72
+ const response = result.response;
73
+ const candidate = response.candidates?.[0];
74
+ const parts = candidate?.content?.parts ?? [];
75
+ const textParts = parts.filter((p) => p.text).map((p) => p.text);
76
+ const fnCalls = parts.filter((p) => p.functionCall);
77
+ const toolCalls = fnCalls.map((p) => ({
78
+ id: `call_${crypto.randomUUID()}`,
79
+ name: p.functionCall.name,
80
+ arguments: p.functionCall.args ?? {}
81
+ }));
82
+ return {
83
+ content: textParts.join(""),
84
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
85
+ finishReason: mapFinishReason(candidate?.finishReason),
86
+ usage: response.usageMetadata ? {
87
+ promptTokens: response.usageMetadata.promptTokenCount ?? 0,
88
+ completionTokens: response.usageMetadata.candidatesTokenCount ?? 0,
89
+ totalTokens: response.usageMetadata.totalTokenCount ?? 0
90
+ } : void 0,
91
+ raw: response
92
+ };
93
+ },
94
+ async *stream(request) {
95
+ const ai = await getGenAI();
96
+ const modelName = request.model ?? config.defaultModel ?? "gemini-2.0-flash";
97
+ const modelConfig = {};
98
+ if (request.temperature !== void 0) modelConfig.temperature = request.temperature;
99
+ if (request.maxTokens !== void 0) modelConfig.maxOutputTokens = request.maxTokens;
100
+ if (request.topP !== void 0) modelConfig.topP = request.topP;
101
+ if (request.stop) modelConfig.stopSequences = request.stop;
102
+ const model = ai.getGenerativeModel({
103
+ model: modelName,
104
+ generationConfig: modelConfig,
105
+ ...request.tools?.length ? { tools: [{ functionDeclarations: request.tools.map(toGeminiTool) }] } : {},
106
+ ...request.extra
107
+ });
108
+ const { system, contents } = toGeminiContents(request.messages);
109
+ const result = await model.generateContentStream({
110
+ contents,
111
+ ...system ? { systemInstruction: { parts: [{ text: system }] } } : {}
112
+ });
113
+ for await (const chunk of result.stream) {
114
+ const parts = chunk.candidates?.[0]?.content?.parts ?? [];
115
+ const text = parts.filter((p) => p.text).map((p) => p.text).join("");
116
+ const finishReason = chunk.candidates?.[0]?.finishReason;
117
+ yield {
118
+ delta: text,
119
+ finishReason: finishReason ? mapFinishReason(finishReason) : void 0,
120
+ usage: chunk.usageMetadata ? {
121
+ promptTokens: chunk.usageMetadata.promptTokenCount ?? 0,
122
+ completionTokens: chunk.usageMetadata.candidatesTokenCount ?? 0,
123
+ totalTokens: chunk.usageMetadata.totalTokenCount ?? 0
124
+ } : void 0
125
+ };
126
+ }
127
+ },
128
+ async embeddings(request) {
129
+ const ai = await getGenAI();
130
+ const model = ai.getGenerativeModel({
131
+ model: request.model ?? "text-embedding-004"
132
+ });
133
+ const inputs = Array.isArray(request.input) ? request.input : [request.input];
134
+ const result = await model.batchEmbedContents({
135
+ requests: inputs.map((text) => ({
136
+ content: { parts: [{ text }] }
137
+ }))
138
+ });
139
+ return {
140
+ embeddings: result.embeddings.map((e) => e.values),
141
+ raw: result
142
+ };
143
+ }
144
+ };
145
+ }
146
+ function toGeminiContents(messages) {
147
+ let system;
148
+ const contents = [];
149
+ for (const msg of messages) {
150
+ if (msg.role === "system") {
151
+ system = typeof msg.content === "string" ? msg.content : "";
152
+ continue;
153
+ }
154
+ const role = msg.role === "assistant" ? "model" : "user";
155
+ if (typeof msg.content === "string") {
156
+ contents.push({ role, parts: [{ text: msg.content }] });
157
+ continue;
158
+ }
159
+ if (Array.isArray(msg.content)) {
160
+ const parts = [];
161
+ for (const part2 of msg.content) {
162
+ if (typeof part2 === "string") {
163
+ parts.push({ text: part2 });
164
+ } else if (part2.type === "text") {
165
+ parts.push({ text: part2.text });
166
+ } else if (part2.type === "image") {
167
+ parts.push({
168
+ inlineData: {
169
+ mimeType: part2.mediaType ?? "image/png",
170
+ data: part2.source
171
+ }
172
+ });
173
+ } else if (part2.type === "tool_call") {
174
+ parts.push({
175
+ functionCall: { name: part2.name, args: part2.arguments }
176
+ });
177
+ } else if (part2.type === "tool_result") {
178
+ parts.push({
179
+ functionResponse: {
180
+ name: part2.id,
181
+ response: { content: part2.content }
182
+ }
183
+ });
184
+ }
185
+ }
186
+ contents.push({ role, parts });
187
+ continue;
188
+ }
189
+ const part = msg.content;
190
+ if (part.type === "text") {
191
+ contents.push({ role, parts: [{ text: part.text }] });
192
+ }
193
+ }
194
+ return { system, contents };
195
+ }
196
+ function toGeminiTool(tool) {
197
+ return {
198
+ name: tool.name,
199
+ description: tool.description,
200
+ parameters: tool.parameters
201
+ };
202
+ }
203
+ function mapFinishReason(reason) {
204
+ switch (reason) {
205
+ case "STOP":
206
+ return "stop";
207
+ case "MAX_TOKENS":
208
+ return "length";
209
+ case "SAFETY":
210
+ case "RECITATION":
211
+ case "OTHER":
212
+ return "stop";
213
+ default:
214
+ return "stop";
215
+ }
216
+ }
217
+ // Annotate the CommonJS export names for ESM import in node:
218
+ 0 && (module.exports = {
219
+ googleAdapter
220
+ });
@@ -0,0 +1,17 @@
1
+ import { G as GoogleConfig, A as AIAdapter } from '../types-FSnS43LM.cjs';
2
+
3
+ /**
4
+ * Create a Google Gemini adapter.
5
+ *
6
+ * Requires `@google/generative-ai` as a peer dependency.
7
+ *
8
+ * ```ts
9
+ * import { createAI } from '@matthesketh/utopia-ai';
10
+ * import { googleAdapter } from '@matthesketh/utopia-ai/google';
11
+ *
12
+ * const ai = createAI(googleAdapter({ apiKey: process.env.GOOGLE_API_KEY }));
13
+ * ```
14
+ */
15
+ declare function googleAdapter(config: GoogleConfig): AIAdapter;
16
+
17
+ export { googleAdapter };
@@ -0,0 +1,17 @@
1
+ import { G as GoogleConfig, A as AIAdapter } from '../types-FSnS43LM.js';
2
+
3
+ /**
4
+ * Create a Google Gemini adapter.
5
+ *
6
+ * Requires `@google/generative-ai` as a peer dependency.
7
+ *
8
+ * ```ts
9
+ * import { createAI } from '@matthesketh/utopia-ai';
10
+ * import { googleAdapter } from '@matthesketh/utopia-ai/google';
11
+ *
12
+ * const ai = createAI(googleAdapter({ apiKey: process.env.GOOGLE_API_KEY }));
13
+ * ```
14
+ */
15
+ declare function googleAdapter(config: GoogleConfig): AIAdapter;
16
+
17
+ export { googleAdapter };
@@ -0,0 +1,185 @@
1
+ // src/adapters/google.ts
2
+ function googleAdapter(config) {
3
+ let genAI = null;
4
+ async function getGenAI() {
5
+ if (genAI) return genAI;
6
+ let GoogleGenerativeAI;
7
+ try {
8
+ const mod = await import("@google/generative-ai");
9
+ GoogleGenerativeAI = mod.GoogleGenerativeAI;
10
+ } catch {
11
+ throw new Error(
12
+ '@matthesketh/utopia-ai: "@google/generative-ai" package is required for the Google adapter. Install it with: npm install @google/generative-ai'
13
+ );
14
+ }
15
+ genAI = new GoogleGenerativeAI(config.apiKey);
16
+ return genAI;
17
+ }
18
+ return {
19
+ async chat(request) {
20
+ const ai = await getGenAI();
21
+ const modelName = request.model ?? config.defaultModel ?? "gemini-2.0-flash";
22
+ const modelConfig = {};
23
+ if (request.temperature !== void 0) modelConfig.temperature = request.temperature;
24
+ if (request.maxTokens !== void 0) modelConfig.maxOutputTokens = request.maxTokens;
25
+ if (request.topP !== void 0) modelConfig.topP = request.topP;
26
+ if (request.stop) modelConfig.stopSequences = request.stop;
27
+ const model = ai.getGenerativeModel({
28
+ model: modelName,
29
+ generationConfig: modelConfig,
30
+ ...request.tools?.length ? { tools: [{ functionDeclarations: request.tools.map(toGeminiTool) }] } : {},
31
+ ...request.extra
32
+ });
33
+ const { system, contents } = toGeminiContents(request.messages);
34
+ const result = await model.generateContent({
35
+ contents,
36
+ ...system ? { systemInstruction: { parts: [{ text: system }] } } : {}
37
+ });
38
+ const response = result.response;
39
+ const candidate = response.candidates?.[0];
40
+ const parts = candidate?.content?.parts ?? [];
41
+ const textParts = parts.filter((p) => p.text).map((p) => p.text);
42
+ const fnCalls = parts.filter((p) => p.functionCall);
43
+ const toolCalls = fnCalls.map((p) => ({
44
+ id: `call_${crypto.randomUUID()}`,
45
+ name: p.functionCall.name,
46
+ arguments: p.functionCall.args ?? {}
47
+ }));
48
+ return {
49
+ content: textParts.join(""),
50
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
51
+ finishReason: mapFinishReason(candidate?.finishReason),
52
+ usage: response.usageMetadata ? {
53
+ promptTokens: response.usageMetadata.promptTokenCount ?? 0,
54
+ completionTokens: response.usageMetadata.candidatesTokenCount ?? 0,
55
+ totalTokens: response.usageMetadata.totalTokenCount ?? 0
56
+ } : void 0,
57
+ raw: response
58
+ };
59
+ },
60
+ async *stream(request) {
61
+ const ai = await getGenAI();
62
+ const modelName = request.model ?? config.defaultModel ?? "gemini-2.0-flash";
63
+ const modelConfig = {};
64
+ if (request.temperature !== void 0) modelConfig.temperature = request.temperature;
65
+ if (request.maxTokens !== void 0) modelConfig.maxOutputTokens = request.maxTokens;
66
+ if (request.topP !== void 0) modelConfig.topP = request.topP;
67
+ if (request.stop) modelConfig.stopSequences = request.stop;
68
+ const model = ai.getGenerativeModel({
69
+ model: modelName,
70
+ generationConfig: modelConfig,
71
+ ...request.tools?.length ? { tools: [{ functionDeclarations: request.tools.map(toGeminiTool) }] } : {},
72
+ ...request.extra
73
+ });
74
+ const { system, contents } = toGeminiContents(request.messages);
75
+ const result = await model.generateContentStream({
76
+ contents,
77
+ ...system ? { systemInstruction: { parts: [{ text: system }] } } : {}
78
+ });
79
+ for await (const chunk of result.stream) {
80
+ const parts = chunk.candidates?.[0]?.content?.parts ?? [];
81
+ const text = parts.filter((p) => p.text).map((p) => p.text).join("");
82
+ const finishReason = chunk.candidates?.[0]?.finishReason;
83
+ yield {
84
+ delta: text,
85
+ finishReason: finishReason ? mapFinishReason(finishReason) : void 0,
86
+ usage: chunk.usageMetadata ? {
87
+ promptTokens: chunk.usageMetadata.promptTokenCount ?? 0,
88
+ completionTokens: chunk.usageMetadata.candidatesTokenCount ?? 0,
89
+ totalTokens: chunk.usageMetadata.totalTokenCount ?? 0
90
+ } : void 0
91
+ };
92
+ }
93
+ },
94
+ async embeddings(request) {
95
+ const ai = await getGenAI();
96
+ const model = ai.getGenerativeModel({
97
+ model: request.model ?? "text-embedding-004"
98
+ });
99
+ const inputs = Array.isArray(request.input) ? request.input : [request.input];
100
+ const result = await model.batchEmbedContents({
101
+ requests: inputs.map((text) => ({
102
+ content: { parts: [{ text }] }
103
+ }))
104
+ });
105
+ return {
106
+ embeddings: result.embeddings.map((e) => e.values),
107
+ raw: result
108
+ };
109
+ }
110
+ };
111
+ }
112
+ function toGeminiContents(messages) {
113
+ let system;
114
+ const contents = [];
115
+ for (const msg of messages) {
116
+ if (msg.role === "system") {
117
+ system = typeof msg.content === "string" ? msg.content : "";
118
+ continue;
119
+ }
120
+ const role = msg.role === "assistant" ? "model" : "user";
121
+ if (typeof msg.content === "string") {
122
+ contents.push({ role, parts: [{ text: msg.content }] });
123
+ continue;
124
+ }
125
+ if (Array.isArray(msg.content)) {
126
+ const parts = [];
127
+ for (const part2 of msg.content) {
128
+ if (typeof part2 === "string") {
129
+ parts.push({ text: part2 });
130
+ } else if (part2.type === "text") {
131
+ parts.push({ text: part2.text });
132
+ } else if (part2.type === "image") {
133
+ parts.push({
134
+ inlineData: {
135
+ mimeType: part2.mediaType ?? "image/png",
136
+ data: part2.source
137
+ }
138
+ });
139
+ } else if (part2.type === "tool_call") {
140
+ parts.push({
141
+ functionCall: { name: part2.name, args: part2.arguments }
142
+ });
143
+ } else if (part2.type === "tool_result") {
144
+ parts.push({
145
+ functionResponse: {
146
+ name: part2.id,
147
+ response: { content: part2.content }
148
+ }
149
+ });
150
+ }
151
+ }
152
+ contents.push({ role, parts });
153
+ continue;
154
+ }
155
+ const part = msg.content;
156
+ if (part.type === "text") {
157
+ contents.push({ role, parts: [{ text: part.text }] });
158
+ }
159
+ }
160
+ return { system, contents };
161
+ }
162
+ function toGeminiTool(tool) {
163
+ return {
164
+ name: tool.name,
165
+ description: tool.description,
166
+ parameters: tool.parameters
167
+ };
168
+ }
169
+ function mapFinishReason(reason) {
170
+ switch (reason) {
171
+ case "STOP":
172
+ return "stop";
173
+ case "MAX_TOKENS":
174
+ return "length";
175
+ case "SAFETY":
176
+ case "RECITATION":
177
+ case "OTHER":
178
+ return "stop";
179
+ default:
180
+ return "stop";
181
+ }
182
+ }
183
+ export {
184
+ googleAdapter
185
+ };
@@ -0,0 +1,187 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/adapters/ollama.ts
21
+ var ollama_exports = {};
22
+ __export(ollama_exports, {
23
+ ollamaAdapter: () => ollamaAdapter
24
+ });
25
+ module.exports = __toCommonJS(ollama_exports);
26
+ function ollamaAdapter(config = {}) {
27
+ const baseURL = (config.baseURL ?? "http://localhost:11434").replace(/\/$/, "");
28
+ return {
29
+ async chat(request) {
30
+ const model = request.model ?? config.defaultModel ?? "llama3.2";
31
+ const body = {
32
+ model,
33
+ messages: toOllamaMessages(request.messages),
34
+ stream: false,
35
+ options: {}
36
+ };
37
+ if (request.temperature !== void 0) body.options.temperature = request.temperature;
38
+ if (request.topP !== void 0) body.options.top_p = request.topP;
39
+ if (request.maxTokens !== void 0) body.options.num_predict = request.maxTokens;
40
+ if (request.stop) body.options.stop = request.stop;
41
+ if (request.tools?.length) {
42
+ body.tools = request.tools.map(toOllamaTool);
43
+ }
44
+ const response = await fetch(`${baseURL}/api/chat`, {
45
+ method: "POST",
46
+ headers: { "Content-Type": "application/json" },
47
+ body: JSON.stringify(body)
48
+ });
49
+ if (!response.ok) {
50
+ const text = await response.text();
51
+ throw new Error(`Ollama error ${response.status}: ${text}`);
52
+ }
53
+ const data = await response.json();
54
+ const toolCalls = (data.message?.tool_calls ?? []).map(
55
+ (tc, i) => ({
56
+ id: `call_${i}`,
57
+ name: tc.function.name,
58
+ arguments: tc.function.arguments ?? {}
59
+ })
60
+ );
61
+ return {
62
+ content: data.message?.content ?? "",
63
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
64
+ finishReason: data.done_reason === "length" ? "length" : "stop",
65
+ usage: {
66
+ promptTokens: data.prompt_eval_count ?? 0,
67
+ completionTokens: data.eval_count ?? 0,
68
+ totalTokens: (data.prompt_eval_count ?? 0) + (data.eval_count ?? 0)
69
+ },
70
+ raw: data
71
+ };
72
+ },
73
+ async *stream(request) {
74
+ const model = request.model ?? config.defaultModel ?? "llama3.2";
75
+ const body = {
76
+ model,
77
+ messages: toOllamaMessages(request.messages),
78
+ stream: true,
79
+ options: {}
80
+ };
81
+ if (request.temperature !== void 0) body.options.temperature = request.temperature;
82
+ if (request.topP !== void 0) body.options.top_p = request.topP;
83
+ if (request.maxTokens !== void 0) body.options.num_predict = request.maxTokens;
84
+ if (request.stop) body.options.stop = request.stop;
85
+ if (request.tools?.length) {
86
+ body.tools = request.tools.map(toOllamaTool);
87
+ }
88
+ const response = await fetch(`${baseURL}/api/chat`, {
89
+ method: "POST",
90
+ headers: { "Content-Type": "application/json" },
91
+ body: JSON.stringify(body)
92
+ });
93
+ if (!response.ok) {
94
+ const text = await response.text();
95
+ throw new Error(`Ollama error ${response.status}: ${text}`);
96
+ }
97
+ const reader = response.body.getReader();
98
+ const decoder = new TextDecoder();
99
+ let buffer = "";
100
+ while (true) {
101
+ const { done, value } = await reader.read();
102
+ if (done) break;
103
+ buffer += decoder.decode(value, { stream: true });
104
+ const lines = buffer.split("\n");
105
+ buffer = lines.pop() ?? "";
106
+ for (const line of lines) {
107
+ if (!line.trim()) continue;
108
+ const data = JSON.parse(line);
109
+ yield {
110
+ delta: data.message?.content ?? "",
111
+ finishReason: data.done ? "stop" : void 0,
112
+ usage: data.done ? {
113
+ promptTokens: data.prompt_eval_count ?? 0,
114
+ completionTokens: data.eval_count ?? 0,
115
+ totalTokens: (data.prompt_eval_count ?? 0) + (data.eval_count ?? 0)
116
+ } : void 0
117
+ };
118
+ }
119
+ }
120
+ },
121
+ async embeddings(request) {
122
+ const model = request.model ?? "nomic-embed-text";
123
+ const inputs = Array.isArray(request.input) ? request.input : [request.input];
124
+ const results = [];
125
+ for (const input of inputs) {
126
+ const response = await fetch(`${baseURL}/api/embed`, {
127
+ method: "POST",
128
+ headers: { "Content-Type": "application/json" },
129
+ body: JSON.stringify({ model, input })
130
+ });
131
+ if (!response.ok) {
132
+ const text = await response.text();
133
+ throw new Error(`Ollama error ${response.status}: ${text}`);
134
+ }
135
+ const data = await response.json();
136
+ results.push(...data.embeddings ?? [data.embedding]);
137
+ }
138
+ return { embeddings: results };
139
+ }
140
+ };
141
+ }
142
+ function toOllamaMessages(messages) {
143
+ return messages.map((msg) => {
144
+ if (typeof msg.content === "string") {
145
+ return { role: msg.role, content: msg.content };
146
+ }
147
+ if (Array.isArray(msg.content)) {
148
+ const texts = [];
149
+ const images = [];
150
+ for (const part2 of msg.content) {
151
+ if (typeof part2 === "string") {
152
+ texts.push(part2);
153
+ } else if (part2.type === "text") {
154
+ texts.push(part2.text);
155
+ } else if (part2.type === "image") {
156
+ images.push(part2.source);
157
+ } else if (part2.type === "tool_result") {
158
+ texts.push(part2.content);
159
+ }
160
+ }
161
+ return {
162
+ role: msg.role === "tool" ? "user" : msg.role,
163
+ content: texts.join("\n"),
164
+ ...images.length > 0 ? { images } : {}
165
+ };
166
+ }
167
+ const part = msg.content;
168
+ if (part.type === "text") {
169
+ return { role: msg.role, content: part.text };
170
+ }
171
+ return { role: msg.role, content: "" };
172
+ });
173
+ }
174
+ function toOllamaTool(tool) {
175
+ return {
176
+ type: "function",
177
+ function: {
178
+ name: tool.name,
179
+ description: tool.description,
180
+ parameters: tool.parameters
181
+ }
182
+ };
183
+ }
184
+ // Annotate the CommonJS export names for ESM import in node:
185
+ 0 && (module.exports = {
186
+ ollamaAdapter
187
+ });
@@ -0,0 +1,17 @@
1
+ import { O as OllamaConfig, A as AIAdapter } from '../types-FSnS43LM.cjs';
2
+
3
+ /**
4
+ * Create an Ollama adapter for local models.
5
+ *
6
+ * No external dependencies required — uses native fetch.
7
+ *
8
+ * ```ts
9
+ * import { createAI } from '@matthesketh/utopia-ai';
10
+ * import { ollamaAdapter } from '@matthesketh/utopia-ai/ollama';
11
+ *
12
+ * const ai = createAI(ollamaAdapter({ defaultModel: 'llama3.2' }));
13
+ * ```
14
+ */
15
+ declare function ollamaAdapter(config?: OllamaConfig): AIAdapter;
16
+
17
+ export { ollamaAdapter };
@@ -0,0 +1,17 @@
1
+ import { O as OllamaConfig, A as AIAdapter } from '../types-FSnS43LM.js';
2
+
3
+ /**
4
+ * Create an Ollama adapter for local models.
5
+ *
6
+ * No external dependencies required — uses native fetch.
7
+ *
8
+ * ```ts
9
+ * import { createAI } from '@matthesketh/utopia-ai';
10
+ * import { ollamaAdapter } from '@matthesketh/utopia-ai/ollama';
11
+ *
12
+ * const ai = createAI(ollamaAdapter({ defaultModel: 'llama3.2' }));
13
+ * ```
14
+ */
15
+ declare function ollamaAdapter(config?: OllamaConfig): AIAdapter;
16
+
17
+ export { ollamaAdapter };