@yourgpt/llm-sdk 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +145 -450
  11. package/dist/index.d.ts +145 -450
  12. package/dist/index.js +1837 -307
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1827 -305
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
@@ -0,0 +1,232 @@
1
+ import { generateMessageId } from '@yourgpt/copilot-sdk/core';
2
+
3
+ // src/adapters/ollama.ts
4
+
5
+ // src/adapters/base.ts
6
+ function formatMessages(messages, systemPrompt) {
7
+ const formatted = [];
8
+ if (systemPrompt) {
9
+ formatted.push({ role: "system", content: systemPrompt });
10
+ }
11
+ for (const msg of messages) {
12
+ formatted.push({
13
+ role: msg.role,
14
+ content: msg.content ?? ""
15
+ });
16
+ }
17
+ return formatted;
18
+ }
19
+
20
+ // src/adapters/ollama.ts
21
+ var OllamaAdapter = class {
22
+ constructor(config = {}) {
23
+ this.provider = "ollama";
24
+ this.config = config;
25
+ this.model = config.model || "llama3";
26
+ this.baseUrl = config.baseUrl || "http://localhost:11434";
27
+ }
28
+ async *stream(request) {
29
+ const messages = formatMessages(request.messages, request.systemPrompt);
30
+ const messageId = generateMessageId();
31
+ yield { type: "message:start", id: messageId };
32
+ try {
33
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
34
+ method: "POST",
35
+ headers: {
36
+ "Content-Type": "application/json"
37
+ },
38
+ body: JSON.stringify({
39
+ model: request.config?.model || this.model,
40
+ messages,
41
+ stream: true,
42
+ options: {
43
+ temperature: request.config?.temperature ?? this.config.temperature,
44
+ num_predict: request.config?.maxTokens ?? this.config.maxTokens
45
+ }
46
+ }),
47
+ signal: request.signal
48
+ });
49
+ if (!response.ok) {
50
+ throw new Error(`Ollama API error: ${response.status}`);
51
+ }
52
+ if (!response.body) {
53
+ throw new Error("No response body");
54
+ }
55
+ const reader = response.body.getReader();
56
+ const decoder = new TextDecoder();
57
+ let buffer = "";
58
+ while (true) {
59
+ const { done, value } = await reader.read();
60
+ if (done) break;
61
+ buffer += decoder.decode(value, { stream: true });
62
+ const lines = buffer.split("\n");
63
+ buffer = lines.pop() || "";
64
+ for (const line of lines) {
65
+ if (!line.trim()) continue;
66
+ try {
67
+ const chunk = JSON.parse(line);
68
+ if (chunk.message?.content) {
69
+ yield { type: "message:delta", content: chunk.message.content };
70
+ }
71
+ if (chunk.done) {
72
+ break;
73
+ }
74
+ } catch {
75
+ }
76
+ }
77
+ }
78
+ yield { type: "message:end" };
79
+ yield { type: "done" };
80
+ } catch (error) {
81
+ if (error.name === "AbortError") {
82
+ yield { type: "done" };
83
+ } else {
84
+ yield {
85
+ type: "error",
86
+ message: error instanceof Error ? error.message : "Unknown error",
87
+ code: "OLLAMA_ERROR"
88
+ };
89
+ }
90
+ }
91
+ }
92
+ };
93
+ function createOllamaAdapter(config) {
94
+ return new OllamaAdapter(config);
95
+ }
96
+
97
+ // src/providers/ollama/index.ts
98
+ var OLLAMA_MODELS = {
99
+ // Llama series
100
+ llama3: {
101
+ vision: false,
102
+ tools: true,
103
+ maxTokens: 8192
104
+ },
105
+ "llama3:70b": {
106
+ vision: false,
107
+ tools: true,
108
+ maxTokens: 8192
109
+ },
110
+ "llama3.2": {
111
+ vision: false,
112
+ tools: true,
113
+ maxTokens: 8192
114
+ },
115
+ "llama3.2-vision": {
116
+ vision: true,
117
+ tools: true,
118
+ maxTokens: 8192
119
+ },
120
+ // Mistral series
121
+ mistral: {
122
+ vision: false,
123
+ tools: true,
124
+ maxTokens: 8192
125
+ },
126
+ "mistral-nemo": {
127
+ vision: false,
128
+ tools: true,
129
+ maxTokens: 128e3
130
+ },
131
+ mixtral: {
132
+ vision: false,
133
+ tools: true,
134
+ maxTokens: 32768
135
+ },
136
+ // CodeLlama
137
+ codellama: {
138
+ vision: false,
139
+ tools: false,
140
+ maxTokens: 16384
141
+ },
142
+ // Phi series
143
+ phi3: {
144
+ vision: false,
145
+ tools: true,
146
+ maxTokens: 4096
147
+ },
148
+ "phi3:medium": {
149
+ vision: false,
150
+ tools: true,
151
+ maxTokens: 4096
152
+ },
153
+ // Gemma series
154
+ gemma2: {
155
+ vision: false,
156
+ tools: false,
157
+ maxTokens: 8192
158
+ },
159
+ "gemma2:27b": {
160
+ vision: false,
161
+ tools: false,
162
+ maxTokens: 8192
163
+ },
164
+ // Qwen series
165
+ qwen2: {
166
+ vision: false,
167
+ tools: true,
168
+ maxTokens: 32768
169
+ },
170
+ "qwen2.5-coder": {
171
+ vision: false,
172
+ tools: true,
173
+ maxTokens: 32768
174
+ },
175
+ // LLaVA (vision)
176
+ llava: {
177
+ vision: true,
178
+ tools: false,
179
+ maxTokens: 4096
180
+ },
181
+ // DeepSeek
182
+ deepseek: {
183
+ vision: false,
184
+ tools: true,
185
+ maxTokens: 16384
186
+ },
187
+ "deepseek-coder": {
188
+ vision: false,
189
+ tools: false,
190
+ maxTokens: 16384
191
+ }
192
+ };
193
+ var DEFAULT_MODEL_CAPS = {
194
+ vision: false,
195
+ tools: false,
196
+ maxTokens: 4096
197
+ };
198
+ function createOllama(config = {}) {
199
+ const baseUrl = config.baseUrl ?? "http://localhost:11434";
200
+ return {
201
+ name: "ollama",
202
+ supportedModels: Object.keys(OLLAMA_MODELS),
203
+ languageModel(modelId) {
204
+ return createOllamaAdapter({
205
+ model: modelId,
206
+ baseUrl
207
+ });
208
+ },
209
+ getCapabilities(modelId) {
210
+ const baseModelName = modelId.split(":")[0];
211
+ const model = OLLAMA_MODELS[modelId] ?? OLLAMA_MODELS[baseModelName] ?? DEFAULT_MODEL_CAPS;
212
+ return {
213
+ supportsVision: model.vision,
214
+ supportsTools: model.tools,
215
+ supportsThinking: false,
216
+ supportsStreaming: true,
217
+ supportsPDF: false,
218
+ supportsAudio: false,
219
+ supportsVideo: false,
220
+ maxTokens: model.maxTokens,
221
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif"] : [],
222
+ supportsJsonMode: false,
223
+ supportsSystemMessages: true
224
+ };
225
+ }
226
+ };
227
+ }
228
+ var createOllamaProvider = createOllama;
229
+
230
+ export { createOllama, createOllamaProvider };
231
+ //# sourceMappingURL=index.mjs.map
232
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../src/adapters/base.ts","../../../src/adapters/ollama.ts","../../../src/providers/ollama/index.ts"],"names":[],"mappings":";;;;;AAyEO,SAAS,cAAA,CACd,UACA,YAAA,EAC0C;AAC1C,EAAA,MAAM,YAAsD,EAAC;AAG7D,EAAA,IAAI,YAAA,EAAc;AAChB,IAAA,SAAA,CAAU,KAAK,EAAE,IAAA,EAAM,QAAA,EAAU,OAAA,EAAS,cAAc,CAAA;AAAA,EAC1D;AAGA,EAAA,KAAA,MAAW,OAAO,QAAA,EAAU;AAC1B,IAAA,SAAA,CAAU,IAAA,CAAK;AAAA,MACb,MAAM,GAAA,CAAI,IAAA;AAAA,MACV,OAAA,EAAS,IAAI,OAAA,IAAW;AAAA,KACzB,CAAA;AAAA,EACH;AAEA,EAAA,OAAO,SAAA;AACT;;;AC3EO,IAAM,gBAAN,MAA0C;AAAA,EAO/C,WAAA,CAAY,MAAA,GAA8B,EAAC,EAAG;AAN9C,IAAA,IAAA,CAAS,QAAA,GAAW,QAAA;AAOlB,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,KAAA,GAAQ,OAAO,KAAA,IAAS,QAAA;AAC7B,IAAA,IAAA,CAAK,OAAA,GAAU,OAAO,OAAA,IAAW,wBAAA;AAAA,EACnC;AAAA,EAEA,OAAO,OAAO,OAAA,EAA6D;AACzE,IAAA,MAAM,QAAA,GAAW,cAAA,CAAe,OAAA,CAAQ,QAAA,EAAU,QAAQ,YAAY,CAAA;AAEtE,IAAA,MAAM,YAAY,iBAAA,EAAkB;AAGpC,IAAA,MAAM,EAAE,IAAA,EAAM,eAAA,EAAiB,EAAA,EAAI,SAAA,EAAU;AAE7C,IAAA,IAAI;AACF,MAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,OAAO,CAAA,SAAA,CAAA,EAAa;AAAA,QACvD,MAAA,EAAQ,MAAA;AAAA,QACR,OAAA,EAAS;AAAA,UACP,cAAA,EAAgB;AAAA,SAClB;AAAA,QACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,UACnB,KAAA,EAAO,OAAA,CAAQ,MAAA,EAAQ,KAAA,IAAS,IAAA,CAAK,KAAA;AAAA,UACrC,QAAA;AAAA,UACA,MAAA,EAAQ,IAAA;AAAA,UACR,OAAA,EAAS;AAAA,YACP,WAAA,EAAa,OAAA,CAAQ,MAAA,EAAQ,WAAA,IAAe,KAAK,MAAA,CAAO,WAAA;AAAA,YACxD,WAAA,EAAa,OAAA,CAAQ,MAAA,EAAQ,SAAA,IAAa,KAAK,MAAA,CAAO;AAAA;AACxD,SACD,CAAA;AAAA,QACD,QAAQ,OAAA,CAAQ;AAAA,OACjB,CAAA;AAED,MAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AAChB,QAAA,MAAM,IAAI,KAAA,CAAM,CAAA,kBAAA,EAAqB,QAAA,CAAS,MAAM,CAAA,CAAE,CAAA;AAAA,MACxD;AAEA,MAAA,IAAI,CAAC,SAAS,IAAA,EAAM;AAClB,QAAA,MAAM,IAAI,MAAM,kBAAkB,CAAA;AAAA,MACpC;AAEA,MAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,MAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,MAAA,IAAI,MAAA,GAAS,EAAA;AAEb,MAAA,OAAO,IAAA,EAAM;AACX,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAC/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACxB,UAAA,IAAI,CAAC,IAAA,CAAK,IAAA,EAAK,EAAG;AAElB,UAAA,IAAI;AACF,YAAA,MAAM,KAAA,GAAQ,IAAA,CAAK,KAAA,CAAM,IAAI,CAAA;AAE7B,YAAA,IAAI,KAAA,CAAM,SAAS,OAAA,EAAS;AAC1B,cAAA,MAAM,EAAE,IAAA,EAAM,eAAA,EAAiB,OAAA,EAAS,KAAA,CAAM,QAAQ,OAAA,EAAQ;AAAA,YAChE;AAGA,YAAA,IAAI,MAAM,IAAA,EAAM;AACd,cAAA;AAAA,YACF;AAAA,UACF,CAAA,CAAA,MAAQ;AAAA,UAER;AAAA,QACF;AAAA,MACF;AAEA,MAAA,MAAM,EAAE,MAAM,aAAA,EAAc;AAC5B,MAAA,MAAM,EAAE,MAAM,MAAA,EAAO;AAAA,IACvB,SAAS,KAAA,EAAO;AACd,MAAA,IAAK,KAAA,CAAgB,SAAS,YAAA,EAAc;AAC1C,QAAA,MAAM,EAAE,MAAM,MAAA,EAAO;AAAA,MACvB,CAAA,MAAO;AACL,QAAA,MAAM;AAAA,UACJ,IAAA,EAAM,OAAA;AAAA,UACN,OAAA,EAAS,KAAA,YAAiB,KAAA,GAAQ,KAAA,CAAM,OAAA,GAAU,eAAA;AAAA,UAClD,IAAA,EAAM;AAAA,SACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF,CAAA;AAKO,SAAS,oBACd,MAAA,EACe;AACf,EAAA,OAAO,IAAI,cAAc,MAAM,CAAA;AACjC;;;AC/FA,IAAM,aAAA,GAAmD;AAAA;AAAA,EAEvD,MAAA,EAAQ;AAAA,IACN,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,YAAA,EAAc;AAAA,IACZ,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,UAAA,EAAY;AAAA,IACV,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,iBAAA,EAAmB;AAAA,IACjB,MAAA,EAAQ,IAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA;AAAA,EAGA,OAAA,EAAS;AAAA,IACP,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,cAAA,EAAgB;AAAA,IACd,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,OAAA,EAAS;AAAA,IACP,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA;AAAA,EAGA,SAAA,EAAW;AAAA,IACT,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,KAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA;AAAA,EAGA,IAAA,EAAM;AAAA,IACJ,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,aAAA,EAAe;AAAA,IACb,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA;AAAA,EAGA,MAAA,EAAQ;AAAA,IACN,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,KAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,YAAA,EAAc;AAAA,IACZ,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,KAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA;AAAA,EAGA,KAAA,EAAO;AAAA,IACL,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,eAAA,EAAiB;AAAA,IACf,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA;AAAA,EAGA,KAAA,EAAO;AAAA,IACL,MAAA,EAAQ,IAAA;AAAA,IACR,KAAA,EAAO,KAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA;AAAA,EAGA,QAAA,EAAU;AAAA,IACR,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,IAAA;AAAA,IACP,SAAA,EAAW;AAAA,GACb;AAAA,EACA,gBAAA,EAAkB;AAAA,IAChB,MAAA,EAAQ,KAAA;AAAA,IACR,KAAA,EAAO,KAAA;AAAA,IACP,SAAA,EAAW;AAAA;AAEf,CAAA;AAGA,IAAM,kBAAA,GAAwC;AAAA,EAC5C,MAAA,EAAQ,KAAA;AAAA,EACR,KAAA,EAAO,KAAA;AAAA,EACP,SAAA,EAAW;AACb,CAAA;AAeO,SAAS,YAAA,CAAa,MAAA,GAA+B,EAAC,EAAe;AAC1E,EAAA,MAAM,OAAA,GAAU,OAAO,OAAA,IAAW,wBAAA;AAElC,EAAA,OAAO;AAAA,IACL,IAAA,EAAM,QAAA;AAAA,IACN,eAAA,EAAiB,MAAA,CAAO,IAAA,CAAK,aAAa,CAAA;AAAA,IAE1C,cAAc,OAAA,EAAiB;AAC7B,MAAA,OAAO,mBAAA,CAAoB;AAAA,QACzB,KAAA,EAAO,OAAA;AAAA,QACP;AAAA,OACD,CAAA;AAAA,IACH,CAAA;AAAA,IAEA,gBAAgB,OAAA,EAAuC;AAErD,MAAA,MAAM,aAAA,GAAgB,OAAA,CAAQ,KAAA,CAAM,GAAG,EAAE,CAAC,CAAA;AAC1C,MAAA,MAAM,QACJ,aAAA,CAAc,OAAO,CAAA,IACrB,aAAA,CAAc,aAAa,CAAA,IAC3B,kBAAA;AAEF,MAAA,OAAO;AAAA,QACL,gBAAgB,KAAA,CAAM,MAAA;AAAA,QACtB,eAAe,KAAA,CAAM,KAAA;AAAA,QACrB,gBAAA,EAAkB,KAAA;AAAA,QAClB,iBAAA,EAAmB,IAAA;AAAA,QACnB,WAAA,EAAa,KAAA;AAAA,QACb,aAAA,EAAe,KAAA;AAAA,QACf,aAAA,EAAe,KAAA;AAAA,QACf,WAAW,KAAA,CAAM,SAAA;AAAA,QACjB,mBAAA,EAAqB,MAAM,MAAA,GACvB,CAAC,aAAa,YAAA,EAAc,WAAW,IACvC,EAAC;AAAA,QACL,gBAAA,EAAkB,KAAA;AAAA,QAClB,sBAAA,EAAwB;AAAA,OAC1B;AAAA,IACF;AAAA,GACF;AACF;AAGO,IAAM,oBAAA,GAAuB","file":"index.mjs","sourcesContent":["import type {\n Message,\n MessageAttachment,\n ActionDefinition,\n StreamEvent,\n LLMConfig,\n} from \"@yourgpt/copilot-sdk/core\";\n\n/**\n * Chat completion request\n */\nexport interface ChatCompletionRequest {\n /** Conversation messages */\n messages: Message[];\n /**\n * Raw provider-formatted messages (for agent loop with tool calls)\n * When provided, these are used instead of converting from Message[]\n * This allows passing messages with tool_calls and tool role\n */\n rawMessages?: Array<Record<string, unknown>>;\n /** Available actions/tools */\n actions?: ActionDefinition[];\n /** System prompt */\n systemPrompt?: string;\n /** LLM configuration overrides */\n config?: Partial<LLMConfig>;\n /** Abort signal for cancellation */\n signal?: AbortSignal;\n}\n\n/**\n * Non-streaming completion result\n */\nexport interface CompletionResult {\n /** Text content */\n content: string;\n /** Tool calls */\n toolCalls: Array<{ id: string; name: string; args: Record<string, unknown> }>;\n /** Thinking content (if extended thinking enabled) */\n thinking?: string;\n /** Raw provider response for debugging */\n rawResponse: Record<string, unknown>;\n}\n\n/**\n * Base LLM adapter interface\n */\nexport interface LLMAdapter {\n /** Provider name */\n readonly provider: string;\n\n /** Model name */\n readonly model: string;\n\n /**\n * Stream a chat completion\n */\n stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;\n\n /**\n * Non-streaming chat completion (for debugging/comparison)\n */\n complete?(request: ChatCompletionRequest): Promise<CompletionResult>;\n}\n\n/**\n * Adapter factory function type\n */\nexport type AdapterFactory = (config: LLMConfig) => LLMAdapter;\n\n/**\n * Convert messages to provider format (simple text only)\n */\nexport function formatMessages(\n messages: Message[],\n systemPrompt?: string,\n): Array<{ role: string; content: string }> {\n const formatted: Array<{ role: string; content: string }> = [];\n\n // Add system prompt if provided\n if (systemPrompt) {\n formatted.push({ role: \"system\", content: systemPrompt });\n }\n\n // Add conversation messages\n for (const msg of messages) {\n formatted.push({\n role: msg.role,\n content: msg.content ?? \"\",\n });\n }\n\n return formatted;\n}\n\n/**\n * Convert ActionParameter to JSON Schema format recursively\n */\nfunction parameterToJsonSchema(param: {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n}): Record<string, unknown> {\n const schema: Record<string, unknown> = {\n type: param.type,\n };\n\n if (param.description) {\n schema.description = param.description;\n }\n\n if (param.enum) {\n schema.enum = param.enum;\n }\n\n // Handle array items\n if (param.type === \"array\" && param.items) {\n schema.items = parameterToJsonSchema(\n param.items as {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n },\n );\n }\n\n // Handle nested object properties\n if (param.type === \"object\" && param.properties) {\n schema.properties = Object.fromEntries(\n Object.entries(param.properties).map(([key, prop]) => [\n key,\n parameterToJsonSchema(\n prop as {\n type: string;\n description?: string;\n enum?: string[];\n items?: unknown;\n properties?: Record<string, unknown>;\n },\n ),\n ]),\n );\n }\n\n return schema;\n}\n\n/**\n * Convert actions to OpenAI tool format\n */\nexport function formatTools(actions: ActionDefinition[]): Array<{\n type: \"function\";\n function: {\n name: string;\n description: string;\n parameters: object;\n };\n}> {\n return actions.map((action) => ({\n type: \"function\" as const,\n function: {\n name: action.name,\n description: action.description,\n parameters: {\n type: \"object\",\n properties: action.parameters\n ? Object.fromEntries(\n Object.entries(action.parameters).map(([key, param]) => [\n key,\n parameterToJsonSchema(param),\n ]),\n )\n : {},\n required: action.parameters\n ? Object.entries(action.parameters)\n .filter(([, param]) => param.required)\n .map(([key]) => key)\n : [],\n },\n },\n }));\n}\n\n// ============================================\n// Vision/Multimodal Support\n// ============================================\n\n/**\n * Content block types for multimodal messages\n */\nexport type AnthropicContentBlock =\n | { type: \"text\"; text: string }\n | {\n type: \"image\";\n source:\n | {\n type: \"base64\";\n media_type: string;\n data: string;\n }\n | {\n type: \"url\";\n url: string;\n };\n }\n | {\n type: \"document\";\n source:\n | {\n type: \"base64\";\n media_type: string;\n data: string;\n }\n | {\n type: \"url\";\n url: string;\n };\n };\n\nexport type OpenAIContentBlock =\n | { type: \"text\"; text: string }\n | {\n type: \"image_url\";\n image_url: {\n url: string;\n detail?: \"low\" | \"high\" | \"auto\";\n };\n };\n\n/**\n * Check if a message has image attachments\n * Supports both new format (metadata.attachments) and legacy (attachments)\n */\nexport function hasImageAttachments(message: Message): boolean {\n const attachments = message.metadata?.attachments;\n return attachments?.some((a) => a.type === \"image\") ?? false;\n}\n\n/**\n * Check if a message has media attachments (images or PDFs)\n */\nexport function hasMediaAttachments(message: Message): boolean {\n const attachments = message.metadata?.attachments;\n return (\n attachments?.some(\n (a) =>\n a.type === \"image\" ||\n (a.type === \"file\" && a.mimeType === \"application/pdf\"),\n ) ?? false\n );\n}\n\n/**\n * Convert MessageAttachment to Anthropic image content block\n *\n * Anthropic format:\n * {\n * type: \"image\",\n * source: {\n * type: \"base64\",\n * media_type: \"image/png\",\n * data: \"base64data...\"\n * }\n * }\n */\nexport function attachmentToAnthropicImage(\n attachment: MessageAttachment,\n): AnthropicContentBlock | null {\n if (attachment.type !== \"image\") return null;\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n return {\n type: \"image\",\n source: {\n type: \"url\",\n url: attachment.url,\n },\n };\n }\n\n // Fall back to base64 data\n if (!attachment.data) return null;\n\n // Extract base64 data (remove data URI prefix if present)\n let base64Data = attachment.data;\n if (base64Data.startsWith(\"data:\")) {\n const commaIndex = base64Data.indexOf(\",\");\n if (commaIndex !== -1) {\n base64Data = base64Data.slice(commaIndex + 1);\n }\n }\n\n return {\n type: \"image\",\n source: {\n type: \"base64\",\n media_type: attachment.mimeType || \"image/png\",\n data: base64Data,\n },\n };\n}\n\n/**\n * Convert MessageAttachment to OpenAI image_url content block\n *\n * OpenAI format:\n * {\n * type: \"image_url\",\n * image_url: {\n * url: \"data:image/png;base64,...\"\n * }\n * }\n */\nexport function attachmentToOpenAIImage(\n attachment: MessageAttachment,\n): OpenAIContentBlock | null {\n if (attachment.type !== \"image\") return null;\n\n let imageUrl: string;\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n imageUrl = attachment.url;\n } else if (attachment.data) {\n // Build data URI if not already one\n imageUrl = attachment.data.startsWith(\"data:\")\n ? attachment.data\n : `data:${attachment.mimeType || \"image/png\"};base64,${attachment.data}`;\n } else {\n return null;\n }\n\n return {\n type: \"image_url\",\n image_url: {\n url: imageUrl,\n detail: \"auto\",\n },\n };\n}\n\n/**\n * Convert MessageAttachment (PDF) to Anthropic document content block\n *\n * Anthropic format:\n * {\n * type: \"document\",\n * source: {\n * type: \"base64\",\n * media_type: \"application/pdf\",\n * data: \"base64data...\"\n * }\n * }\n */\nexport function attachmentToAnthropicDocument(\n attachment: MessageAttachment,\n): AnthropicContentBlock | null {\n // Only handle PDF files\n if (attachment.type !== \"file\" || attachment.mimeType !== \"application/pdf\") {\n return null;\n }\n\n // Use URL if available (cloud storage)\n if (attachment.url) {\n return {\n type: \"document\",\n source: {\n type: \"url\",\n url: attachment.url,\n },\n };\n }\n\n // Fall back to base64 data\n if (!attachment.data) return null;\n\n // Extract base64 data (remove data URI prefix if present)\n let base64Data = attachment.data;\n if (base64Data.startsWith(\"data:\")) {\n const commaIndex = base64Data.indexOf(\",\");\n if (commaIndex !== -1) {\n base64Data = base64Data.slice(commaIndex + 1);\n }\n }\n\n return {\n type: \"document\",\n source: {\n type: \"base64\",\n media_type: \"application/pdf\",\n data: base64Data,\n },\n };\n}\n\n/**\n * Convert a Message to Anthropic multimodal content blocks\n */\nexport function messageToAnthropicContent(\n message: Message,\n): string | AnthropicContentBlock[] {\n const attachments = message.metadata?.attachments;\n const content = message.content ?? \"\";\n\n // If no media attachments (images or PDFs), return simple string\n if (!hasMediaAttachments(message)) {\n return content;\n }\n\n // Build content blocks array\n const blocks: AnthropicContentBlock[] = [];\n\n // Add media attachments first (Claude recommends media before text)\n if (attachments) {\n for (const attachment of attachments) {\n // Try image first\n const imageBlock = attachmentToAnthropicImage(attachment);\n if (imageBlock) {\n blocks.push(imageBlock);\n continue;\n }\n // Try document (PDF)\n const docBlock = attachmentToAnthropicDocument(attachment);\n if (docBlock) {\n blocks.push(docBlock);\n }\n }\n }\n\n // Add text content\n if (content) {\n blocks.push({ type: \"text\", text: content });\n }\n\n return blocks;\n}\n\n/**\n * Convert a Message to OpenAI multimodal content blocks\n */\nexport function messageToOpenAIContent(\n message: Message,\n): string | OpenAIContentBlock[] {\n const attachments = message.metadata?.attachments;\n const content = message.content ?? \"\";\n\n // If no image attachments, return simple string\n if (!hasImageAttachments(message)) {\n return content;\n }\n\n // Build content blocks array\n const blocks: OpenAIContentBlock[] = [];\n\n // Add text content first\n if (content) {\n blocks.push({ type: \"text\", text: content });\n }\n\n // Add image attachments\n if (attachments) {\n for (const attachment of attachments) {\n const imageBlock = attachmentToOpenAIImage(attachment);\n if (imageBlock) {\n blocks.push(imageBlock);\n }\n }\n }\n\n return blocks;\n}\n\n/**\n * Anthropic content block types (extended for tools)\n */\nexport type AnthropicToolUseBlock = {\n type: \"tool_use\";\n id: string;\n name: string;\n input: Record<string, unknown>;\n};\n\nexport type AnthropicToolResultBlock = {\n type: \"tool_result\";\n tool_use_id: string;\n content: string;\n};\n\nexport type AnthropicMessageContent =\n | string\n | Array<\n AnthropicContentBlock | AnthropicToolUseBlock | AnthropicToolResultBlock\n >;\n\n/**\n * Format messages for Anthropic with full tool support\n * Handles: text, images, tool_use, and tool_result\n *\n * Key differences from OpenAI:\n * - tool_calls become tool_use blocks in assistant content\n * - tool results become tool_result blocks in user content\n */\nexport function formatMessagesForAnthropic(\n messages: Message[],\n systemPrompt?: string,\n): {\n system: string;\n messages: Array<{\n role: \"user\" | \"assistant\";\n content: AnthropicMessageContent;\n }>;\n} {\n const formatted: Array<{\n role: \"user\" | \"assistant\";\n content: AnthropicMessageContent;\n }> = [];\n\n for (let i = 0; i < messages.length; i++) {\n const msg = messages[i];\n\n if (msg.role === \"system\") continue; // System handled separately\n\n if (msg.role === \"assistant\") {\n // Build content array for assistant\n const content: Array<AnthropicContentBlock | AnthropicToolUseBlock> = [];\n\n // Add text content if present\n if (msg.content) {\n content.push({ type: \"text\", text: msg.content });\n }\n\n // Convert tool_calls to tool_use blocks\n if (msg.tool_calls && msg.tool_calls.length > 0) {\n for (const tc of msg.tool_calls) {\n content.push({\n type: \"tool_use\",\n id: tc.id,\n name: tc.function.name,\n input: JSON.parse(tc.function.arguments),\n });\n }\n }\n\n formatted.push({\n role: \"assistant\",\n content:\n content.length === 1 && content[0].type === \"text\"\n ? (content[0] as { type: \"text\"; text: string }).text\n : content,\n });\n } else if (msg.role === \"tool\" && msg.tool_call_id) {\n // Tool results go in user message as tool_result blocks\n // Group consecutive tool messages together\n const toolResults: AnthropicToolResultBlock[] = [\n {\n type: \"tool_result\",\n tool_use_id: msg.tool_call_id,\n content: msg.content ?? \"\",\n },\n ];\n\n // Look ahead for more consecutive tool messages\n while (i + 1 < messages.length && messages[i + 1].role === \"tool\") {\n i++;\n const nextTool = messages[i];\n if (nextTool.tool_call_id) {\n toolResults.push({\n type: \"tool_result\",\n tool_use_id: nextTool.tool_call_id,\n content: nextTool.content ?? \"\",\n });\n }\n }\n\n formatted.push({\n role: \"user\",\n content: toolResults,\n });\n } else if (msg.role === \"user\") {\n formatted.push({\n role: \"user\",\n content: messageToAnthropicContent(msg),\n });\n }\n }\n\n return {\n system: systemPrompt || \"\",\n messages: formatted,\n };\n}\n\n/**\n * OpenAI message format with tool support\n */\nexport type OpenAIMessage =\n | { role: \"system\"; content: string }\n | { role: \"user\"; content: string | OpenAIContentBlock[] }\n | {\n role: \"assistant\";\n content: string | null;\n tool_calls?: Array<{\n id: string;\n type: \"function\";\n function: { name: string; arguments: string };\n }>;\n }\n | { role: \"tool\"; content: string; tool_call_id: string };\n\n/**\n * Format messages for OpenAI with full tool support\n * Handles: text, images, tool_calls, and tool results\n */\nexport function formatMessagesForOpenAI(\n messages: Message[],\n systemPrompt?: string,\n): OpenAIMessage[] {\n const formatted: OpenAIMessage[] = [];\n\n // Add system prompt if provided\n if (systemPrompt) {\n formatted.push({ role: \"system\", content: systemPrompt });\n }\n\n for (const msg of messages) {\n if (msg.role === \"system\") {\n formatted.push({ role: \"system\", content: msg.content ?? \"\" });\n } else if (msg.role === \"user\") {\n formatted.push({\n role: \"user\",\n content: messageToOpenAIContent(msg),\n });\n } else if (msg.role === \"assistant\") {\n const assistantMsg: OpenAIMessage = {\n role: \"assistant\",\n content: msg.content,\n };\n if (msg.tool_calls && msg.tool_calls.length > 0) {\n (assistantMsg as { tool_calls: typeof msg.tool_calls }).tool_calls =\n msg.tool_calls;\n }\n formatted.push(assistantMsg);\n } else if (msg.role === \"tool\" && msg.tool_call_id) {\n formatted.push({\n role: \"tool\",\n content: msg.content ?? \"\",\n tool_call_id: msg.tool_call_id,\n });\n }\n }\n\n return formatted;\n}\n","import type { LLMConfig, StreamEvent } from \"@yourgpt/copilot-sdk/core\";\nimport { generateMessageId } from \"@yourgpt/copilot-sdk/core\";\nimport type { LLMAdapter, ChatCompletionRequest } from \"./base\";\nimport { formatMessages } from \"./base\";\n\n/**\n * Ollama adapter configuration\n */\nexport interface OllamaAdapterConfig extends Partial<LLMConfig> {\n model?: string;\n baseUrl?: string;\n}\n\n/**\n * Ollama LLM Adapter (Local models)\n *\n * Supports: Llama 3, Mistral, Phi, Gemma, CodeLlama, etc.\n */\nexport class OllamaAdapter implements LLMAdapter {\n readonly provider = \"ollama\";\n readonly model: string;\n\n private baseUrl: string;\n private config: OllamaAdapterConfig;\n\n constructor(config: OllamaAdapterConfig = {}) {\n this.config = config;\n this.model = config.model || \"llama3\";\n this.baseUrl = config.baseUrl || \"http://localhost:11434\";\n }\n\n async *stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent> {\n const messages = formatMessages(request.messages, request.systemPrompt);\n\n const messageId = generateMessageId();\n\n // Emit message start\n yield { type: \"message:start\", id: messageId };\n\n try {\n const response = await fetch(`${this.baseUrl}/api/chat`, {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n },\n body: JSON.stringify({\n model: request.config?.model || this.model,\n messages,\n stream: true,\n options: {\n temperature: request.config?.temperature ?? this.config.temperature,\n num_predict: request.config?.maxTokens ?? this.config.maxTokens,\n },\n }),\n signal: request.signal,\n });\n\n if (!response.ok) {\n throw new Error(`Ollama API error: ${response.status}`);\n }\n\n if (!response.body) {\n throw new Error(\"No response body\");\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = \"\";\n\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split(\"\\n\");\n buffer = lines.pop() || \"\";\n\n for (const line of lines) {\n if (!line.trim()) continue;\n\n try {\n const chunk = JSON.parse(line);\n\n if (chunk.message?.content) {\n yield { type: \"message:delta\", content: chunk.message.content };\n }\n\n // Ollama indicates completion with done: true\n if (chunk.done) {\n break;\n }\n } catch {\n // Skip invalid JSON\n }\n }\n }\n\n yield { type: \"message:end\" };\n yield { type: \"done\" };\n } catch (error) {\n if ((error as Error).name === \"AbortError\") {\n yield { type: \"done\" };\n } else {\n yield {\n type: \"error\",\n message: error instanceof Error ? error.message : \"Unknown error\",\n code: \"OLLAMA_ERROR\",\n };\n }\n }\n }\n}\n\n/**\n * Create Ollama adapter\n */\nexport function createOllamaAdapter(\n config?: OllamaAdapterConfig,\n): OllamaAdapter {\n return new OllamaAdapter(config);\n}\n","/**\n * Ollama Provider\n *\n * Wraps the existing OllamaAdapter with provider interface.\n * Ollama runs models locally on your machine.\n */\n\nimport { createOllamaAdapter } from \"../../adapters/ollama\";\nimport type {\n AIProvider,\n ProviderCapabilities,\n OllamaProviderConfig,\n} from \"../types\";\n\n// ============================================\n// Model Definitions\n// ============================================\n\ninterface ModelCapabilities {\n vision: boolean;\n tools: boolean;\n maxTokens: number;\n}\n\n// Common Ollama models - users can run any model\nconst OLLAMA_MODELS: Record<string, ModelCapabilities> = {\n // Llama series\n llama3: {\n vision: false,\n tools: true,\n maxTokens: 8192,\n },\n \"llama3:70b\": {\n vision: false,\n tools: true,\n maxTokens: 8192,\n },\n \"llama3.2\": {\n vision: false,\n tools: true,\n maxTokens: 8192,\n },\n \"llama3.2-vision\": {\n vision: true,\n tools: true,\n maxTokens: 8192,\n },\n\n // Mistral series\n mistral: {\n vision: false,\n tools: true,\n maxTokens: 8192,\n },\n \"mistral-nemo\": {\n vision: false,\n tools: true,\n maxTokens: 128000,\n },\n mixtral: {\n vision: false,\n tools: true,\n maxTokens: 32768,\n },\n\n // CodeLlama\n codellama: {\n vision: false,\n tools: false,\n maxTokens: 16384,\n },\n\n // Phi series\n phi3: {\n vision: false,\n tools: true,\n maxTokens: 4096,\n },\n \"phi3:medium\": {\n vision: false,\n tools: true,\n maxTokens: 4096,\n },\n\n // Gemma series\n gemma2: {\n vision: false,\n tools: false,\n maxTokens: 8192,\n },\n \"gemma2:27b\": {\n vision: false,\n tools: false,\n maxTokens: 8192,\n },\n\n // Qwen series\n qwen2: {\n vision: false,\n tools: true,\n maxTokens: 32768,\n },\n \"qwen2.5-coder\": {\n vision: false,\n tools: true,\n maxTokens: 32768,\n },\n\n // LLaVA (vision)\n llava: {\n vision: true,\n tools: false,\n maxTokens: 4096,\n },\n\n // DeepSeek\n deepseek: {\n vision: false,\n tools: true,\n maxTokens: 16384,\n },\n \"deepseek-coder\": {\n vision: false,\n tools: false,\n maxTokens: 16384,\n },\n};\n\n// Default capabilities for unknown models\nconst DEFAULT_MODEL_CAPS: ModelCapabilities = {\n vision: false,\n tools: false,\n maxTokens: 4096,\n};\n\n// ============================================\n// Provider Implementation\n// ============================================\n\n/**\n * Create an Ollama provider\n *\n * @example\n * ```typescript\n * const ollama = createOllama({ baseUrl: 'http://localhost:11434' });\n * const adapter = ollama.languageModel('llama3');\n * ```\n */\nexport function createOllama(config: OllamaProviderConfig = {}): AIProvider {\n const baseUrl = config.baseUrl ?? \"http://localhost:11434\";\n\n return {\n name: \"ollama\",\n supportedModels: Object.keys(OLLAMA_MODELS),\n\n languageModel(modelId: string) {\n return createOllamaAdapter({\n model: modelId,\n baseUrl,\n });\n },\n\n getCapabilities(modelId: string): ProviderCapabilities {\n // Try exact match first, then try base model name\n const baseModelName = modelId.split(\":\")[0];\n const model =\n OLLAMA_MODELS[modelId] ??\n OLLAMA_MODELS[baseModelName] ??\n DEFAULT_MODEL_CAPS;\n\n return {\n supportsVision: model.vision,\n supportsTools: model.tools,\n supportsThinking: false,\n supportsStreaming: true,\n supportsPDF: false,\n supportsAudio: false,\n supportsVideo: false,\n maxTokens: model.maxTokens,\n supportedImageTypes: model.vision\n ? [\"image/png\", \"image/jpeg\", \"image/gif\"]\n : [],\n supportsJsonMode: false,\n supportsSystemMessages: true,\n };\n },\n };\n}\n\n// Alias for consistency\nexport const createOllamaProvider = createOllama;\n"]}
@@ -0,0 +1,82 @@
1
+ import { L as LanguageModel } from '../../types-CdORv1Yu.mjs';
2
+ import { b as OpenAIProviderConfig, A as AIProvider } from '../../types-BBCZ3Fxy.mjs';
3
+ import 'zod';
4
+ import '@yourgpt/copilot-sdk/core';
5
+ import '../../base-D_FyHFKj.mjs';
6
+
7
+ /**
8
+ * OpenAI Provider - Modern Pattern
9
+ *
10
+ * Returns a LanguageModel instance that can be used directly with
11
+ * generateText() and streamText().
12
+ *
13
+ * @example
14
+ * ```ts
15
+ * import { openai } from '@yourgpt/llm-sdk/openai';
16
+ * import { generateText } from '@yourgpt/llm-sdk';
17
+ *
18
+ * const result = await generateText({
19
+ * model: openai('gpt-4o'),
20
+ * prompt: 'Hello!',
21
+ * });
22
+ * ```
23
+ */
24
+
25
+ interface OpenAIProviderOptions {
26
+ /** API key (defaults to OPENAI_API_KEY env var) */
27
+ apiKey?: string;
28
+ /** Base URL for API (defaults to https://api.openai.com/v1) */
29
+ baseURL?: string;
30
+ /** Organization ID */
31
+ organization?: string;
32
+ /** Default headers */
33
+ headers?: Record<string, string>;
34
+ }
35
+ /**
36
+ * Create an OpenAI language model
37
+ *
38
+ * @param modelId - Model ID (e.g., 'gpt-4o', 'gpt-4o-mini')
39
+ * @param options - Provider options
40
+ * @returns LanguageModel instance
41
+ *
42
+ * @example
43
+ * ```ts
44
+ * // Basic usage
45
+ * const model = openai('gpt-4o');
46
+ *
47
+ * // With custom options
48
+ * const model = openai('gpt-4o', {
49
+ * apiKey: 'sk-...',
50
+ * baseURL: 'https://custom-endpoint.com/v1',
51
+ * });
52
+ * ```
53
+ */
54
+ declare function openai(modelId: string, options?: OpenAIProviderOptions): LanguageModel;
55
+
56
+ /**
57
+ * OpenAI Provider
58
+ *
59
+ * Modern pattern: openai('gpt-4o') returns a LanguageModel
60
+ * Legacy pattern: createOpenAI({ apiKey }) returns an AIProvider
61
+ */
62
+
63
+ /**
64
+ * Create an OpenAI provider (Legacy API)
65
+ *
66
+ * @deprecated Use `import { openai } from '@yourgpt/llm-sdk/openai'` instead.
67
+ *
68
+ * @example
69
+ * ```typescript
70
+ * // OLD (deprecated):
71
+ * const provider = createOpenAI({ apiKey: '...' });
72
+ * const adapter = provider.languageModel('gpt-4o');
73
+ *
74
+ * // NEW (recommended):
75
+ * import { openai } from '@yourgpt/llm-sdk/openai';
76
+ * const model = openai('gpt-4o');
77
+ * ```
78
+ */
79
+ declare function createOpenAI(config?: OpenAIProviderConfig): AIProvider;
80
+ declare const createOpenAIProvider: typeof createOpenAI;
81
+
82
+ export { type OpenAIProviderOptions, createOpenAI, openai as createOpenAIModel, createOpenAIProvider, openai };
@@ -0,0 +1,82 @@
1
+ import { L as LanguageModel } from '../../types-CdORv1Yu.js';
2
+ import { b as OpenAIProviderConfig, A as AIProvider } from '../../types-DcoCaVVC.js';
3
+ import 'zod';
4
+ import '@yourgpt/copilot-sdk/core';
5
+ import '../../base-D_FyHFKj.js';
6
+
7
+ /**
8
+ * OpenAI Provider - Modern Pattern
9
+ *
10
+ * Returns a LanguageModel instance that can be used directly with
11
+ * generateText() and streamText().
12
+ *
13
+ * @example
14
+ * ```ts
15
+ * import { openai } from '@yourgpt/llm-sdk/openai';
16
+ * import { generateText } from '@yourgpt/llm-sdk';
17
+ *
18
+ * const result = await generateText({
19
+ * model: openai('gpt-4o'),
20
+ * prompt: 'Hello!',
21
+ * });
22
+ * ```
23
+ */
24
+
25
+ interface OpenAIProviderOptions {
26
+ /** API key (defaults to OPENAI_API_KEY env var) */
27
+ apiKey?: string;
28
+ /** Base URL for API (defaults to https://api.openai.com/v1) */
29
+ baseURL?: string;
30
+ /** Organization ID */
31
+ organization?: string;
32
+ /** Default headers */
33
+ headers?: Record<string, string>;
34
+ }
35
+ /**
36
+ * Create an OpenAI language model
37
+ *
38
+ * @param modelId - Model ID (e.g., 'gpt-4o', 'gpt-4o-mini')
39
+ * @param options - Provider options
40
+ * @returns LanguageModel instance
41
+ *
42
+ * @example
43
+ * ```ts
44
+ * // Basic usage
45
+ * const model = openai('gpt-4o');
46
+ *
47
+ * // With custom options
48
+ * const model = openai('gpt-4o', {
49
+ * apiKey: 'sk-...',
50
+ * baseURL: 'https://custom-endpoint.com/v1',
51
+ * });
52
+ * ```
53
+ */
54
+ declare function openai(modelId: string, options?: OpenAIProviderOptions): LanguageModel;
55
+
56
+ /**
57
+ * OpenAI Provider
58
+ *
59
+ * Modern pattern: openai('gpt-4o') returns a LanguageModel
60
+ * Legacy pattern: createOpenAI({ apiKey }) returns an AIProvider
61
+ */
62
+
63
+ /**
64
+ * Create an OpenAI provider (Legacy API)
65
+ *
66
+ * @deprecated Use `import { openai } from '@yourgpt/llm-sdk/openai'` instead.
67
+ *
68
+ * @example
69
+ * ```typescript
70
+ * // OLD (deprecated):
71
+ * const provider = createOpenAI({ apiKey: '...' });
72
+ * const adapter = provider.languageModel('gpt-4o');
73
+ *
74
+ * // NEW (recommended):
75
+ * import { openai } from '@yourgpt/llm-sdk/openai';
76
+ * const model = openai('gpt-4o');
77
+ * ```
78
+ */
79
+ declare function createOpenAI(config?: OpenAIProviderConfig): AIProvider;
80
+ declare const createOpenAIProvider: typeof createOpenAI;
81
+
82
+ export { type OpenAIProviderOptions, createOpenAI, openai as createOpenAIModel, createOpenAIProvider, openai };