@mastra/core 0.2.0-alpha.84 → 0.2.0-alpha.86

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/README.md +1 -1
  2. package/dist/action/index.d.ts +2 -2
  3. package/dist/agent/index.d.ts +6 -6
  4. package/dist/agent/index.js +1 -1
  5. package/dist/chunk-2ISN3AA7.js +392 -0
  6. package/dist/chunk-2J5OHBUG.js +24 -0
  7. package/dist/{chunk-Y7TKCKRI.js → chunk-5NQ3MEZM.js} +8 -8
  8. package/dist/{chunk-SAXFXAKK.js → chunk-73XDWPXJ.js} +41 -24
  9. package/dist/{chunk-3IV6WDJY.js → chunk-D66E7L7R.js} +1 -1
  10. package/dist/{chunk-3THCTISX.js → chunk-EVYBUFXB.js} +12 -8
  11. package/dist/{chunk-PRYZIZXD.js → chunk-I3MJB67Z.js} +8 -8
  12. package/dist/{chunk-42THOFKJ.js → chunk-RCS7AVH6.js} +1 -1
  13. package/dist/chunk-RLPH6TDJ.js +30 -0
  14. package/dist/embeddings/index.d.ts +2 -2
  15. package/dist/embeddings/index.js +1 -1
  16. package/dist/eval/index.d.ts +2 -2
  17. package/dist/filter/index.d.ts +6 -6
  18. package/dist/{index-62DyKJRU.d.ts → index-Duqv1Yom.d.ts} +340 -322
  19. package/dist/index.d.ts +6 -6
  20. package/dist/index.js +10 -10
  21. package/dist/integration/index.d.ts +7 -7
  22. package/dist/llm/index.d.ts +3 -3
  23. package/dist/llm/model/providers/anthropic-vertex.d.ts +31 -0
  24. package/dist/llm/model/providers/anthropic-vertex.js +23 -0
  25. package/dist/llm/model/providers/anthropic.d.ts +29 -0
  26. package/dist/llm/model/providers/anthropic.js +21 -0
  27. package/dist/llm/model/providers/azure.d.ts +48 -0
  28. package/dist/llm/model/providers/azure.js +50 -0
  29. package/dist/llm/model/providers/baseten.d.ts +33 -0
  30. package/dist/llm/model/providers/baseten.js +29 -0
  31. package/dist/llm/model/providers/bedrock.d.ts +32 -0
  32. package/dist/llm/model/providers/bedrock.js +24 -0
  33. package/dist/llm/model/providers/cerebras.d.ts +30 -0
  34. package/dist/llm/model/providers/cerebras.js +22 -0
  35. package/dist/llm/model/providers/cohere.d.ts +30 -0
  36. package/dist/llm/model/providers/cohere.js +22 -0
  37. package/dist/llm/model/providers/deepinfra.d.ts +30 -0
  38. package/dist/llm/model/providers/deepinfra.js +22 -0
  39. package/dist/llm/model/providers/deepseek.d.ts +30 -0
  40. package/dist/llm/model/providers/deepseek.js +22 -0
  41. package/dist/llm/model/providers/fireworks.d.ts +35 -0
  42. package/dist/llm/model/providers/fireworks.js +40 -0
  43. package/dist/llm/model/providers/google-vertex.d.ts +48 -0
  44. package/dist/llm/model/providers/google-vertex.js +22 -0
  45. package/dist/llm/model/providers/google.d.ts +54 -0
  46. package/dist/llm/model/providers/google.js +23 -0
  47. package/dist/llm/model/providers/grok.d.ts +32 -0
  48. package/dist/llm/model/providers/grok.js +22 -0
  49. package/dist/llm/model/providers/groq.d.ts +37 -0
  50. package/dist/llm/model/providers/groq.js +42 -0
  51. package/dist/llm/model/providers/lmstudio.d.ts +29 -0
  52. package/dist/llm/model/providers/lmstudio.js +22 -0
  53. package/dist/llm/model/providers/mistral.d.ts +30 -0
  54. package/dist/llm/model/providers/mistral.js +22 -0
  55. package/dist/llm/model/providers/mock.d.ts +30 -0
  56. package/dist/llm/model/providers/mock.js +83 -0
  57. package/dist/llm/model/providers/ollama.d.ts +31 -0
  58. package/dist/llm/model/providers/ollama.js +23 -0
  59. package/dist/llm/model/providers/openai-compat.d.ts +39 -0
  60. package/dist/llm/model/providers/openai-compat.js +6 -0
  61. package/dist/llm/model/providers/openai.d.ts +34 -0
  62. package/dist/llm/model/providers/openai.js +6 -0
  63. package/dist/llm/model/providers/openai.test.d.ts +2 -0
  64. package/dist/llm/model/providers/openai.test.js +220 -0
  65. package/dist/llm/model/providers/perplexity.d.ts +30 -0
  66. package/dist/llm/model/providers/perplexity.js +22 -0
  67. package/dist/llm/model/providers/portkey.d.ts +34 -0
  68. package/dist/llm/model/providers/portkey.js +22 -0
  69. package/dist/llm/model/providers/togetherai.d.ts +30 -0
  70. package/dist/llm/model/providers/togetherai.js +22 -0
  71. package/dist/mastra/index.d.ts +3 -3
  72. package/dist/memory/index.d.ts +1 -1
  73. package/dist/memory/index.js +1 -1
  74. package/dist/model-QGWIMOSx.d.ts +31 -0
  75. package/dist/relevance/index.d.ts +2 -2
  76. package/dist/relevance/index.js +2 -2
  77. package/dist/storage/index.d.ts +3 -3
  78. package/dist/storage/index.js +1 -1
  79. package/dist/tools/index.d.ts +3 -3
  80. package/dist/vector/index.js +2 -2
  81. package/dist/{workflow-DGktrYAL.d.ts → workflow-DQ8CtzzU.d.ts} +1 -1
  82. package/dist/workflows/index.d.ts +4 -4
  83. package/package.json +30 -14
@@ -0,0 +1,31 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ type OllamaModel = string & {};
22
+ declare class OllamaAI extends MastraLLM {
23
+ constructor({ name, baseURL, headers, fetch, }: {
24
+ name: OllamaModel;
25
+ baseURL: string;
26
+ headers?: Record<string, string>;
27
+ fetch?: typeof globalThis.fetch;
28
+ });
29
+ }
30
+
31
+ export { OllamaAI, type OllamaModel };
@@ -0,0 +1,23 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createOllama } from 'ollama-ai-provider';
7
+
8
+ var _OllamaAI = class _OllamaAI extends MastraLLM {
9
+ constructor({ name, baseURL, headers, fetch }) {
10
+ const ollama = createOllama({
11
+ baseURL,
12
+ fetch,
13
+ headers
14
+ });
15
+ super({
16
+ model: ollama(name)
17
+ });
18
+ }
19
+ };
20
+ __name(_OllamaAI, "OllamaAI");
21
+ var OllamaAI = _OllamaAI;
22
+
23
+ export { OllamaAI };
@@ -0,0 +1,39 @@
1
+ import * as ai from 'ai';
2
+ import { OpenAIChatSettings } from '@ai-sdk/openai/internal';
3
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
4
+ import 'json-schema';
5
+ import 'zod';
6
+ import '../../../index-Duqv1Yom.js';
7
+ import '../../../base.js';
8
+ import '@opentelemetry/api';
9
+ import '../../../index-B0-NXUYv.js';
10
+ import 'pino';
11
+ import 'stream';
12
+ import '../../../telemetry-oCUM52DG.js';
13
+ import '@opentelemetry/sdk-node';
14
+ import '@opentelemetry/sdk-trace-base';
15
+ import '../../../metric-D2V4CR8D.js';
16
+ import 'sift';
17
+ import '../../../types-M16hSruO.js';
18
+ import '../../../vector/index.js';
19
+ import '../../../engine-EwEG-4Iv.js';
20
+ import '../../../tts/index.js';
21
+
22
+ declare function openaiCompat({ baseURL, apiKey, modelName, fetch, settings, }: {
23
+ baseURL?: string;
24
+ apiKey?: string;
25
+ modelName: string;
26
+ fetch?: typeof globalThis.fetch;
27
+ settings?: OpenAIChatSettings;
28
+ }): ai.LanguageModelV1;
29
+ declare class OpenaiCompat extends MastraLLM {
30
+ constructor({ name, apiKey, baseURL, fetch, settings, }: {
31
+ name: string;
32
+ apiKey?: string;
33
+ baseURL?: string;
34
+ fetch?: typeof globalThis.fetch;
35
+ settings?: OpenAIChatSettings;
36
+ });
37
+ }
38
+
39
+ export { OpenaiCompat, openaiCompat };
@@ -0,0 +1,6 @@
1
+ export { OpenaiCompat, openaiCompat } from '../../../chunk-RLPH6TDJ.js';
2
+ import '../../../chunk-2ISN3AA7.js';
3
+ import '../../../chunk-LUULSM4U.js';
4
+ import '../../../chunk-JCRGAEY6.js';
5
+ import '../../../chunk-TJK6TGSR.js';
6
+ import '../../../chunk-AJJZUHB4.js';
@@ -0,0 +1,34 @@
1
+ import { OpenAIChatSettings } from '@ai-sdk/openai/internal';
2
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
3
+ import 'ai';
4
+ import 'json-schema';
5
+ import 'zod';
6
+ import '../../../index-Duqv1Yom.js';
7
+ import '../../../base.js';
8
+ import '@opentelemetry/api';
9
+ import '../../../index-B0-NXUYv.js';
10
+ import 'pino';
11
+ import 'stream';
12
+ import '../../../telemetry-oCUM52DG.js';
13
+ import '@opentelemetry/sdk-node';
14
+ import '@opentelemetry/sdk-trace-base';
15
+ import '../../../metric-D2V4CR8D.js';
16
+ import 'sift';
17
+ import '../../../types-M16hSruO.js';
18
+ import '../../../vector/index.js';
19
+ import '../../../engine-EwEG-4Iv.js';
20
+ import '../../../tts/index.js';
21
+
22
+ type OpenAIModel = 'gpt-4' | 'gpt-4-turbo' | 'gpt-3.5-turbo' | 'gpt-4o' | 'gpt-4o-mini' | 'o1' | 'o1-mini' | 'o1-preview' | 'o1-preview-mini' | 'o3-mini' | (string & {});
23
+ declare class OpenAI extends MastraLLM {
24
+ constructor({ name, apiKey, headers, fetch, baseURL, settings, }: {
25
+ name?: OpenAIModel;
26
+ apiKey?: string;
27
+ baseURL?: string;
28
+ headers?: Record<string, string>;
29
+ fetch?: typeof globalThis.fetch;
30
+ settings?: OpenAIChatSettings;
31
+ });
32
+ }
33
+
34
+ export { OpenAI, type OpenAIModel };
@@ -0,0 +1,6 @@
1
+ export { OpenAI } from '../../../chunk-2J5OHBUG.js';
2
+ import '../../../chunk-2ISN3AA7.js';
3
+ import '../../../chunk-LUULSM4U.js';
4
+ import '../../../chunk-JCRGAEY6.js';
5
+ import '../../../chunk-TJK6TGSR.js';
6
+ import '../../../chunk-AJJZUHB4.js';
@@ -0,0 +1,2 @@
1
+
2
+ export { }
@@ -0,0 +1,220 @@
1
+ import { OpenAI } from '../../../chunk-2J5OHBUG.js';
2
+ import '../../../chunk-2ISN3AA7.js';
3
+ import { createTool } from '../../../chunk-VOUPGVRD.js';
4
+ import '../../../chunk-LUULSM4U.js';
5
+ import '../../../chunk-JCRGAEY6.js';
6
+ import '../../../chunk-TJK6TGSR.js';
7
+ import { __name } from '../../../chunk-AJJZUHB4.js';
8
+ import { describe, it, expect } from 'vitest';
9
+ import { z } from 'zod';
10
+ import 'dotenv/config';
11
+
12
+ var calculatorTool = createTool({
13
+ id: "Calculator",
14
+ description: "A simple calculator tool",
15
+ inputSchema: z.object({
16
+ a: z.number(),
17
+ b: z.number()
18
+ }),
19
+ execute: /* @__PURE__ */ __name(async ({ context }) => {
20
+ return {
21
+ result: context.a + context.a
22
+ };
23
+ }, "execute")
24
+ });
25
+ describe("LLM Class Integration Tests", () => {
26
+ const llm = new OpenAI({
27
+ name: "gpt-4o-mini"
28
+ });
29
+ describe("OpenAI Integration", () => {
30
+ it("should generate text completion", async () => {
31
+ const response = await llm.generate("What is 2+2?");
32
+ expect(response.text).toBeDefined();
33
+ expect(typeof response.text).toBe("string");
34
+ }, 3e4);
35
+ it("should generate structured output", async () => {
36
+ const schema = z.object({
37
+ answer: z.number(),
38
+ explanation: z.string()
39
+ });
40
+ const response = await llm.generate("What is 2+2?", {
41
+ output: schema
42
+ });
43
+ expect(response.object).toBeDefined();
44
+ expect(response.object.answer).toBe(4);
45
+ expect(typeof response.object.explanation).toBe("string");
46
+ }, 3e4);
47
+ it("should generate structured output using json schema of type object", async () => {
48
+ const schema = {
49
+ type: "object",
50
+ properties: {
51
+ answer: {
52
+ type: "number"
53
+ },
54
+ explanation: {
55
+ type: "string"
56
+ }
57
+ },
58
+ additionalProperties: false,
59
+ required: [
60
+ "answer",
61
+ "explanation"
62
+ ]
63
+ };
64
+ const response = await llm.generate("What is 2+2?", {
65
+ output: schema
66
+ });
67
+ expect(response.object).toBeDefined();
68
+ expect(response.object.answer).toBe(4);
69
+ expect(typeof response.object.explanation).toBe("string");
70
+ }, 3e3);
71
+ it("should generate structured output using json schema of nested object", async () => {
72
+ const schema = {
73
+ type: "object",
74
+ properties: {
75
+ student: {
76
+ type: "object",
77
+ properties: {
78
+ profile: {
79
+ type: "object",
80
+ properties: {
81
+ id: {
82
+ type: "number"
83
+ },
84
+ name: {
85
+ type: "string"
86
+ },
87
+ questionAttempted: {
88
+ type: "boolean"
89
+ }
90
+ },
91
+ additionalProperties: false,
92
+ required: [
93
+ "id",
94
+ "name",
95
+ "questionAttempted"
96
+ ]
97
+ },
98
+ calculation: {
99
+ type: "array",
100
+ items: {
101
+ type: "number"
102
+ }
103
+ }
104
+ },
105
+ additionalProperties: false,
106
+ required: [
107
+ "profile",
108
+ "calculation"
109
+ ]
110
+ }
111
+ },
112
+ additionalProperties: false,
113
+ required: [
114
+ "student"
115
+ ]
116
+ };
117
+ const response = await llm.generate("Student Sarah Johnson (ID: 78901) is counting from 1 to 5. What are her numbers?", {
118
+ output: schema
119
+ });
120
+ expect(response.object).toBeDefined();
121
+ console.log("response.object", response.object);
122
+ expect(typeof response.object.student.profile.id).toBe("number");
123
+ expect(response.object.student.profile.id).toBe(78901);
124
+ expect(typeof response.object.student.profile.name).toBe("string");
125
+ expect(response.object.student.profile.name).toBe("Sarah Johnson");
126
+ expect(typeof response.object.student.profile.questionAttempted).toBe("boolean");
127
+ expect(response.object.student.profile.questionAttempted).toBe(true);
128
+ expect(response.object.student.calculation[0]).toBe(1);
129
+ expect(response.object.student.calculation[1]).toBe(2);
130
+ expect(response.object.student.calculation[2]).toBe(3);
131
+ expect(response.object.student.calculation[3]).toBe(4);
132
+ expect(response.object.student.calculation[4]).toBe(5);
133
+ }, 3e3);
134
+ it("should stream structured output using json schema of type object", async () => {
135
+ const chunks = [];
136
+ const schema = {
137
+ type: "object",
138
+ properties: {
139
+ answer: {
140
+ type: "number"
141
+ },
142
+ explanation: {
143
+ type: "string"
144
+ }
145
+ },
146
+ additionalProperties: false,
147
+ required: [
148
+ "answer",
149
+ "explanation"
150
+ ]
151
+ };
152
+ const response = await llm.stream("What is 2+2?", {
153
+ output: schema,
154
+ onFinish: /* @__PURE__ */ __name((text) => {
155
+ chunks.push(text);
156
+ return;
157
+ }, "onFinish")
158
+ });
159
+ for await (const chunk of response.textStream) {
160
+ expect(chunk).toBeDefined();
161
+ }
162
+ expect(chunks.length).toBeGreaterThan(0);
163
+ expect(response.object).toBeDefined();
164
+ const resObject = await response.object;
165
+ expect(resObject.answer).toBe(4);
166
+ expect(typeof resObject.explanation).toBe("string");
167
+ }, 3e3);
168
+ it("should stream text completion", async () => {
169
+ const chunks = [];
170
+ const response = await llm.stream("Count from 1 to 5.", {
171
+ onFinish: /* @__PURE__ */ __name((text) => {
172
+ chunks.push(text);
173
+ return;
174
+ }, "onFinish")
175
+ });
176
+ for await (const chunk of response.textStream) {
177
+ expect(chunk).toBeDefined();
178
+ }
179
+ expect(chunks.length).toBeGreaterThan(0);
180
+ }, 3e4);
181
+ });
182
+ describe("Tool Integration", () => {
183
+ const llm2 = new OpenAI({
184
+ name: "gpt-4"
185
+ });
186
+ it("should use tools in generation", async () => {
187
+ const response = await llm2.generate("What is 123 + 456? Use the calculator tool to find out.", {
188
+ tools: {
189
+ calculatorTool
190
+ }
191
+ });
192
+ expect(response.text).toBeDefined();
193
+ }, 3e4);
194
+ });
195
+ describe("Error Handling", () => {
196
+ const llm2 = new OpenAI({
197
+ name: "invalid-model"
198
+ });
199
+ it("should handle invalid model configurations", async () => {
200
+ await expect(llm2.generate("test")).rejects.toThrow();
201
+ });
202
+ it("should handle missing API keys", async () => {
203
+ const originalKey = process.env.OPENAI_API_KEY;
204
+ delete process.env.OPENAI_API_KEY;
205
+ await expect(llm2.generate("test")).rejects.toThrow();
206
+ process.env.OPENAI_API_KEY = originalKey;
207
+ });
208
+ });
209
+ describe("Rate Limiting", () => {
210
+ const rateLimitLLM = new OpenAI({
211
+ name: "gpt-3.5-turbo"
212
+ });
213
+ it("should handle rate limits gracefully", async () => {
214
+ const promises = Array(5).fill(null).map(() => rateLimitLLM.generate("What is 2+2?"));
215
+ const results = await Promise.allSettled(promises);
216
+ const successfulResults = results.filter((r) => r.status === "fulfilled");
217
+ expect(successfulResults.length).toBeGreaterThan(0);
218
+ }, 6e4);
219
+ });
220
+ });
@@ -0,0 +1,30 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ type PerplexityModel = 'sonar' | 'sonar-pro' | (string & {});
22
+ declare class Perplexity extends MastraLLM {
23
+ constructor({ name, apiKey, baseURL, }?: {
24
+ name?: PerplexityModel;
25
+ apiKey?: string;
26
+ baseURL?: string;
27
+ });
28
+ }
29
+
30
+ export { Perplexity, type PerplexityModel };
@@ -0,0 +1,22 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createPerplexity } from '@ai-sdk/perplexity';
7
+
8
+ var _Perplexity = class _Perplexity extends MastraLLM {
9
+ constructor({ name, apiKey, baseURL } = {}) {
10
+ const perplexityModel = createPerplexity({
11
+ baseURL,
12
+ apiKey
13
+ });
14
+ super({
15
+ model: perplexityModel(name || "sonar")
16
+ });
17
+ }
18
+ };
19
+ __name(_Perplexity, "Perplexity");
20
+ var Perplexity = _Perplexity;
21
+
22
+ export { Perplexity };
@@ -0,0 +1,34 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ declare class Portkey extends MastraLLM {
22
+ constructor({ portkeyApiKey, portkeyConfig, }: {
23
+ portkeyApiKey: string;
24
+ portkeyConfig: {
25
+ provider: string;
26
+ api_key: string;
27
+ override_params: {
28
+ model: string;
29
+ };
30
+ };
31
+ });
32
+ }
33
+
34
+ export { Portkey };
@@ -0,0 +1,22 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createPortkey } from '@portkey-ai/vercel-provider';
7
+
8
+ var _Portkey = class _Portkey extends MastraLLM {
9
+ constructor({ portkeyApiKey, portkeyConfig }) {
10
+ const portkey = createPortkey({
11
+ apiKey: portkeyApiKey,
12
+ config: portkeyConfig
13
+ });
14
+ super({
15
+ model: portkey.chatModel("")
16
+ });
17
+ }
18
+ };
19
+ __name(_Portkey, "Portkey");
20
+ var Portkey = _Portkey;
21
+
22
+ export { Portkey };
@@ -0,0 +1,30 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ type TogetherAiModel = 'codellama/CodeLlama-34b-Instruct-hf' | 'upstage/SOLAR-10.7B-Instruct-v1.0' | 'mistralai/Mixtral-8x7B-v0.1' | 'WhereIsAI/UAE-Large-V1' | 'black-forest-labs/FLUX.1-depth' | 'togethercomputer/m2-bert-80M-32k-retrieval' | 'black-forest-labs/FLUX.1-canny' | 'black-forest-labs/FLUX.1-dev' | 'black-forest-labs/FLUX.1-redux' | 'BAAI/bge-large-en-v1.5' | 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo' | 'togethercomputer/Llama-3-8b-chat-hf-int4' | 'stabilityai/stable-diffusion-xl-base-1.0' | 'Gryphe/MythoMax-L2-13b' | 'meta-llama/Meta-Llama-3-8B' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'deepseek-ai/deepseek-llm-67b-chat' | 'togethercomputer/m2-bert-80M-8k-retrieval' | 'llava-hf/llava-v1.6-mistral-7b-hf' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'databricks/dbrx-instruct' | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro' | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' | 'scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct' | 'microsoft/WizardLM-2-8x22B' | 'BAAI/bge-base-en-v1.5' | 'togethercomputer/m2-bert-80M-2k-retrieval' | 'google/gemma-2b-it' | 'meta-llama/Llama-2-70b-hf' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'meta-llama/LlamaGuard-2-8b' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-v0.1' | 'black-forest-labs/FLUX.1-pro' | 'black-forest-labs/FLUX.1-schnell' | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'Meta-Llama/Llama-Guard-7b' | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' | 'google/gemma-2-27b-it' | 'meta-llama/Llama-3-8b-chat-hf' | 'Qwen/Qwen2.5-72B-Instruct-Turbo' | 'Salesforce/Llama-Rank-V1' | 'meta-llama/Llama-Guard-3-11B-Vision-Turbo' | 'google/gemma-2-9b-it' | 'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' | 'meta-llama/Llama-3-70b-chat-hf' | 'Qwen/Qwen2.5-7B-Instruct-Turbo' | 'scb10x/scb10x-llama3-typhoon-v1-5x-4f316' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'Gryphe/MythoMax-L2-13b-Lite' | 'black-forest-labs/FLUX.1-schnell-Free' | 'meta-llama/Llama-2-7b-chat-hf' | 'meta-llama/Meta-Llama-Guard-3-8B' | 'togethercomputer/Llama-3-8b-chat-hf-int8' | 'meta-llama/Llama-Vision-Free' | 'Qwen/Qwen2-72B-Instruct' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'black-forest-labs/FLUX.1.1-pro' | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' | 'meta-llama/Llama-2-13b-chat-hf' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'Nexusflow/NexusRaven-V2-13B' | 'bert-base-uncased' | 'WizardLM/WizardLM-13B-V1.2' | 'google/gemma-7b' | 'togethercomputer/Koala-7B' | 'zero-one-ai/Yi-34B' | 'togethercomputer/StripedHyena-Hessian-7B' | 'teknium/OpenHermes-2-Mistral-7B' | 'Qwen/Qwen2-7B-Instruct' | 'togethercomputer/guanaco-65b' | 'togethercomputer/llama-2-7b' | 'hazyresearch/M2-BERT-2k-Retrieval-Encoder-V1' | 'huggyllama/llama-7b' | 'Undi95/ReMM-SLERP-L2-13B' | 'NousResearch/Nous-Capybara-7B-V1p9' | 'lmsys/vicuna-7b-v1.3' | 'Undi95/Toppy-M-7B' | 'Qwen/Qwen2-72B' | 'NousResearch/Nous-Hermes-Llama2-70b' | 'WizardLM/WizardLM-70B-V1.0' | 'huggyllama/llama-65b' | 'lmsys/vicuna-13b-v1.5-16k' | 'openchat/openchat-3.5-1210' | 'Qwen/Qwen1.5-0.5B' | 'Qwen/Qwen1.5-4B' | 'Qwen/Qwen1.5-7B' | 'snorkelai/Snorkel-Mistral-PairRM-DPO' | 'Qwen/Qwen1.5-14B-Chat' | 'Qwen/Qwen1.5-1.8B-Chat' | 'Snowflake/snowflake-arctic-instruct' | 'togethercomputer/llama-2-13b' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT' | 'deepseek-ai/deepseek-coder-33b-instruct' | 'togethercomputer/CodeLlama-7b-Instruct' | 'NousResearch/Nous-Hermes-Llama2-13b' | 'lmsys/vicuna-13b-v1.5' | 'togethercomputer/guanaco-13b' | 'togethercomputer/CodeLlama-34b-Instruct' | 'togethercomputer/llama-2-70b' | 'codellama/CodeLlama-13b-Instruct-hf' | 'Qwen/Qwen2-7B' | 'Qwen/Qwen2-1.5B' | 'togethercomputer/CodeLlama-13b-Instruct' | 'meta-llama/Llama-2-13b-hf' | 'togethercomputer/llama-2-13b-chat' | 'huggyllama/llama-30b' | 'NousResearch/Nous-Hermes-2-Mistral-7B-DPO' | 'togethercomputer/alpaca-7b' | 'google/gemma-7b-it' | 'allenai/OLMo-7B' | 'togethercomputer/guanaco-33b' | 'togethercomputer/llama-2-7b-chat' | 'togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4' | 'togethercomputer/guanaco-7b' | 'Open-Orca/Mistral-7B-OpenOrca' | 'Qwen/Qwen1.5-32B' | 'EleutherAI/llemma_7b' | 'NousResearch/Nous-Hermes-llama-2-7b' | 'Qwen/Qwen1.5-32B-Chat' | 'meta-llama/Meta-Llama-3-70B' | 'meta-llama/Llama-3-8b-hf' | 'sentence-transformers/msmarco-bert-base-dot-v5' | 'zero-one-ai/Yi-6B' | 'meta-llama/Meta-Llama-3-8B-Instruct' | 'teknium/OpenHermes-2p5-Mistral-7B' | 'Qwen/Qwen1.5-4B-Chat' | 'wavymulder/Analog-Diffusion' | 'runwayml/stable-diffusion-v1-5' | 'prompthero/openjourney' | 'meta-llama/Llama-2-7b-hf' | 'SG161222/Realistic_Vision_V3.0_VAE' | 'Qwen/Qwen1.5-0.5B-Chat' | 'codellama/CodeLlama-7b-Instruct-hf' | 'google/gemma-2b' | 'mistralai/Mixtral-8x22B' | 'meta-llama/Llama-2-70b-chat-hf' | 'zero-one-ai/Yi-34B-Chat' | 'google/gemma-2-9b' | 'meta-llama/Meta-Llama-3-70B-Instruct' | 'togethercomputer/LLaMA-2-7B-32K' | 'codellama/CodeLlama-70b-Instruct-hf' | 'NousResearch/Hermes-2-Theta-Llama-3-70B' | 'test/test11' | 'stabilityai/stable-diffusion-2-1' | 'microsoft/phi-2' | 'Qwen/Qwen1.5-7B-Chat' | 'cognitivecomputations/dolphin-2.5-mixtral-8x7b' | 'togethercomputer/evo-1-131k-base' | 'togethercomputer/evo-1-8k-base' | 'togethercomputer/llama-2-70b-chat' | 'Qwen/Qwen1.5-14B' | 'carson/ml318br' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Reference' | 'meta-llama/Meta-Llama-3.1-8B-Reference' | 'gradientai/Llama-3-70B-Instruct-Gradient-1048k' | 'meta-llama/Meta-Llama-3.1-70B-Reference' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Reference' | 'meta-llama/Llama-3-70b-hf' | 'Qwen/Qwen2-1.5B-Instruct' | 'NousResearch/Nous-Hermes-13b' | 'HuggingFaceH4/zephyr-7b-beta' | 'Austism/chronos-hermes-13b' | 'Qwen/Qwen1.5-1.8B' | 'Qwen/Qwen1.5-72B' | 'lmsys/vicuna-13b-v1.3' | 'huggyllama/llama-13b' | 'garage-bAInd/Platypus2-70B-instruct' | 'allenai/OLMo-7B-Instruct' | 'togethercomputer/Koala-13B' | 'lmsys/vicuna-7b-v1.5' | (string & {});
22
+ declare class TogetherAI extends MastraLLM {
23
+ constructor({ name, apiKey, baseURL, }?: {
24
+ name?: TogetherAiModel;
25
+ apiKey?: string;
26
+ baseURL?: string;
27
+ });
28
+ }
29
+
30
+ export { TogetherAI, type TogetherAiModel };
@@ -0,0 +1,22 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createTogetherAI } from '@ai-sdk/togetherai';
7
+
8
+ var _TogetherAI = class _TogetherAI extends MastraLLM {
9
+ constructor({ name = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", apiKey = process.env.TOGETHER_AI_API_KEY || "", baseURL = "https://api.together.xyz/v1" } = {}) {
10
+ const togetheraiModel = createTogetherAI({
11
+ baseURL,
12
+ apiKey
13
+ });
14
+ super({
15
+ model: togetheraiModel(name)
16
+ });
17
+ }
18
+ };
19
+ __name(_TogetherAI, "TogetherAI");
20
+ var TogetherAI = _TogetherAI;
21
+
22
+ export { TogetherAI };
@@ -1,7 +1,7 @@
1
- import { A as Agent, M as MastraStorage, a as MastraMemory, b as ModelConfig, L as LLM } from '../index-62DyKJRU.js';
1
+ import { A as Agent, M as MastraStorage, a as MastraMemory, b as ModelConfig, L as LLM } from '../index-Duqv1Yom.js';
2
2
  import { M as MastraEngine } from '../engine-EwEG-4Iv.js';
3
3
  import { L as Logger, B as BaseLogMessage } from '../index-B0-NXUYv.js';
4
- import { W as Workflow } from '../workflow-DGktrYAL.js';
4
+ import { W as Workflow } from '../workflow-DQ8CtzzU.js';
5
5
  import { O as OtelConfig, T as Telemetry } from '../telemetry-oCUM52DG.js';
6
6
  import { MastraTTS } from '../tts/index.js';
7
7
  import { MastraVector } from '../vector/index.js';
@@ -12,8 +12,8 @@ import 'zod';
12
12
  import '../base.js';
13
13
  import '@opentelemetry/api';
14
14
  import '../metric-D2V4CR8D.js';
15
- import '../types-M16hSruO.js';
16
15
  import 'sift';
16
+ import '../types-M16hSruO.js';
17
17
  import 'pino';
18
18
  import 'stream';
19
19
  import 'xstate';
@@ -1,7 +1,7 @@
1
1
  import 'ai';
2
2
  import '../base.js';
3
3
  import '../types-M16hSruO.js';
4
- export { an as AiMessageType, a as MastraMemory, ar as MemoryConfig, aq as MessageResponse, ao as MessageType, as as SharedMemoryConfig, ap as StorageThreadType } from '../index-62DyKJRU.js';
4
+ export { aq as AiMessageType, a as MastraMemory, au as MemoryConfig, at as MessageResponse, ar as MessageType, av as SharedMemoryConfig, as as StorageThreadType } from '../index-Duqv1Yom.js';
5
5
  import '../vector/index.js';
6
6
  import '@opentelemetry/api';
7
7
  import '../index-B0-NXUYv.js';
@@ -1,4 +1,4 @@
1
- export { MastraMemory } from '../chunk-PRYZIZXD.js';
1
+ export { MastraMemory } from '../chunk-I3MJB67Z.js';
2
2
  import '../chunk-JCRGAEY6.js';
3
3
  import '../chunk-TJK6TGSR.js';
4
4
  import '../chunk-AJJZUHB4.js';
@@ -0,0 +1,31 @@
1
+ import * as ai from 'ai';
2
+ import { LanguageModel, Tool, CoreMessage } from 'ai';
3
+ import { JSONSchema7 } from 'json-schema';
4
+ import { ZodSchema } from 'zod';
5
+ import { c as MastraLLMBase, d as MastraPrimitives, T as ToolsInput, e as LLMTextOptions, C as CoreTool, f as LLMTextObjectOptions, g as LLMInnerStreamOptions, h as LLMStreamObjectOptions, i as LLMStreamOptions, G as GenerateReturn, S as StreamReturn } from './index-Duqv1Yom.js';
6
+ import './telemetry-oCUM52DG.js';
7
+
8
+ declare class MastraLLM extends MastraLLMBase {
9
+ #private;
10
+ constructor({ model, mastra }: {
11
+ model: LanguageModel;
12
+ mastra?: MastraPrimitives;
13
+ });
14
+ convertTools(tools?: ToolsInput): Record<string, Tool>;
15
+ __text({ runId, messages, maxSteps, tools, convertedTools, temperature, toolChoice, onStepFinish, }: LLMTextOptions): Promise<ai.GenerateTextResult<{
16
+ [x: string]: CoreTool;
17
+ } | {
18
+ [x: string]: Tool;
19
+ }, never>>;
20
+ __textObject<T>({ messages, onStepFinish, maxSteps, tools, convertedTools, structuredOutput, runId, temperature, }: LLMTextObjectOptions<T>): Promise<ai.GenerateObjectResult<T>>;
21
+ __stream({ messages, onStepFinish, onFinish, maxSteps, tools, convertedTools, runId, temperature, }: LLMInnerStreamOptions): Promise<ai.StreamTextResult<{
22
+ [x: string]: CoreTool;
23
+ } | {
24
+ [x: string]: Tool;
25
+ }, never>>;
26
+ __streamObject<T>({ messages, onStepFinish, onFinish, maxSteps, tools, convertedTools, structuredOutput, runId, temperature, }: LLMStreamObjectOptions<T>): Promise<ai.StreamObjectResult<ai.DeepPartial<T>, T, never>>;
27
+ generate<Z extends ZodSchema | JSONSchema7 | undefined = undefined>(messages: string | string[] | CoreMessage[], { maxSteps, onStepFinish, tools, convertedTools, runId, output, temperature, }?: LLMStreamOptions<Z>): Promise<GenerateReturn<Z>>;
28
+ stream<Z extends ZodSchema | JSONSchema7 | undefined = undefined>(messages: string | string[] | CoreMessage[], { maxSteps, onFinish, onStepFinish, tools, convertedTools, runId, output, temperature, }?: LLMStreamOptions<Z>): Promise<StreamReturn<Z>>;
29
+ }
30
+
31
+ export { MastraLLM as M };
@@ -1,4 +1,4 @@
1
- import { $ as CustomModelConfig } from '../index-62DyKJRU.js';
1
+ import { a9 as CustomModelConfig } from '../index-Duqv1Yom.js';
2
2
  import 'ai';
3
3
  import 'json-schema';
4
4
  import 'zod';
@@ -11,8 +11,8 @@ import '../telemetry-oCUM52DG.js';
11
11
  import '@opentelemetry/sdk-node';
12
12
  import '@opentelemetry/sdk-trace-base';
13
13
  import '../metric-D2V4CR8D.js';
14
- import '../types-M16hSruO.js';
15
14
  import 'sift';
15
+ import '../types-M16hSruO.js';
16
16
  import '../vector/index.js';
17
17
  import '../engine-EwEG-4Iv.js';
18
18
  import '../tts/index.js';
@@ -1,5 +1,5 @@
1
- export { CohereRelevanceScorer, MastraAgentRelevanceScorer, createSimilarityPrompt } from '../chunk-3IV6WDJY.js';
2
- import '../chunk-SAXFXAKK.js';
1
+ export { CohereRelevanceScorer, MastraAgentRelevanceScorer, createSimilarityPrompt } from '../chunk-D66E7L7R.js';
2
+ import '../chunk-73XDWPXJ.js';
3
3
  import '../chunk-HBTQNIAX.js';
4
4
  import '../chunk-SDKEPBBH.js';
5
5
  import '../chunk-6WJREZ5F.js';