@mastra/core 0.2.0-alpha.93 → 0.2.0-alpha.95

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. package/README.md +11 -8
  2. package/dist/action/index.d.ts +1 -3
  3. package/dist/agent/index.d.ts +2 -4
  4. package/dist/agent/index.js +3 -5
  5. package/dist/{chunk-SDKEPBBH.js → chunk-4ZUSEHLH.js} +109 -12
  6. package/dist/{chunk-F7ILHRX5.js → chunk-5DZIXJRV.js} +1 -1
  7. package/dist/chunk-AE3H2QEY.js +1 -0
  8. package/dist/{chunk-IE37CBXB.js → chunk-G4LP2IJU.js} +1 -3
  9. package/dist/{chunk-JWVCENG2.js → chunk-JY322VLW.js} +445 -32
  10. package/dist/{chunk-ECUVL2G5.js → chunk-K3N7KJHH.js} +1 -1
  11. package/dist/chunk-MBOUQZQT.js +16 -0
  12. package/dist/{chunk-GGYXCZUW.js → chunk-N44BCOWP.js} +4 -12
  13. package/dist/{chunk-WBPDZBUT.js → chunk-TYIBRZOY.js} +2 -17
  14. package/dist/eval/index.d.ts +1 -3
  15. package/dist/index-CBxGSZZE.d.ts +808 -0
  16. package/dist/index.d.ts +6 -12
  17. package/dist/index.js +12 -26
  18. package/dist/integration/index.d.ts +5 -7
  19. package/dist/llm/index.d.ts +4 -6
  20. package/dist/llm/index.js +1 -6
  21. package/dist/mastra/index.d.ts +2 -5
  22. package/dist/mastra/index.js +2 -6
  23. package/dist/memory/index.d.ts +1 -3
  24. package/dist/memory/index.js +1 -1
  25. package/dist/relevance/index.d.ts +2 -19
  26. package/dist/relevance/index.js +4 -6
  27. package/dist/storage/index.d.ts +2 -4
  28. package/dist/storage/index.js +2 -3
  29. package/dist/telemetry/index.js +1 -2
  30. package/dist/tools/index.d.ts +2 -4
  31. package/dist/tts/index.js +2 -3
  32. package/dist/vector/index.d.ts +0 -7
  33. package/dist/vector/index.js +1 -2
  34. package/dist/vector/libsql/index.d.ts +0 -1
  35. package/dist/vector/libsql/index.js +2 -3
  36. package/dist/{workflow-CSwxE-4q.d.ts → workflow-BA2Pnq90.d.ts} +1 -1
  37. package/dist/workflows/index.d.ts +3 -5
  38. package/package.json +20 -56
  39. package/dist/chunk-5DYJZVB7.js +0 -66
  40. package/dist/chunk-6ZVFVYLE.js +0 -101
  41. package/dist/chunk-AD6IIOCI.js +0 -11
  42. package/dist/chunk-BOS3IA23.js +0 -76
  43. package/dist/chunk-CQU72KBX.js +0 -54
  44. package/dist/chunk-HH5JIATB.js +0 -36
  45. package/dist/chunk-IEFQZ3IL.js +0 -78
  46. package/dist/chunk-IJ55HGH4.js +0 -24
  47. package/dist/chunk-NNNAWW2H.js +0 -66
  48. package/dist/chunk-NYBGZL6Z.js +0 -30
  49. package/dist/chunk-QX2CHXA7.js +0 -408
  50. package/dist/chunk-RAR7V3E3.js +0 -66
  51. package/dist/chunk-RI3ECMVF.js +0 -636
  52. package/dist/chunk-VPSYTVWP.js +0 -66
  53. package/dist/chunk-ZA7MIXNW.js +0 -66
  54. package/dist/embeddings/index.d.ts +0 -8
  55. package/dist/embeddings/index.js +0 -2
  56. package/dist/embeddings/model/providers/bedrock.d.ts +0 -42
  57. package/dist/embeddings/model/providers/bedrock.js +0 -3
  58. package/dist/embeddings/model/providers/bedrock.test.d.ts +0 -2
  59. package/dist/embeddings/model/providers/bedrock.test.js +0 -37
  60. package/dist/embeddings/model/providers/cohere.d.ts +0 -34
  61. package/dist/embeddings/model/providers/cohere.js +0 -3
  62. package/dist/embeddings/model/providers/cohere.test.d.ts +0 -2
  63. package/dist/embeddings/model/providers/cohere.test.js +0 -37
  64. package/dist/embeddings/model/providers/embedder.d.ts +0 -28
  65. package/dist/embeddings/model/providers/embedder.js +0 -2
  66. package/dist/embeddings/model/providers/google.d.ts +0 -34
  67. package/dist/embeddings/model/providers/google.js +0 -3
  68. package/dist/embeddings/model/providers/google.test.d.ts +0 -2
  69. package/dist/embeddings/model/providers/google.test.js +0 -37
  70. package/dist/embeddings/model/providers/mistral.d.ts +0 -34
  71. package/dist/embeddings/model/providers/mistral.js +0 -3
  72. package/dist/embeddings/model/providers/mistral.test.d.ts +0 -2
  73. package/dist/embeddings/model/providers/mistral.test.js +0 -37
  74. package/dist/embeddings/model/providers/mock.d.ts +0 -20
  75. package/dist/embeddings/model/providers/mock.js +0 -3
  76. package/dist/embeddings/model/providers/mock.test.d.ts +0 -2
  77. package/dist/embeddings/model/providers/mock.test.js +0 -29
  78. package/dist/embeddings/model/providers/openai.d.ts +0 -34
  79. package/dist/embeddings/model/providers/openai.js +0 -3
  80. package/dist/embeddings/model/providers/openai.test.d.ts +0 -2
  81. package/dist/embeddings/model/providers/openai.test.js +0 -37
  82. package/dist/embeddings/model/providers/voyage.d.ts +0 -34
  83. package/dist/embeddings/model/providers/voyage.js +0 -3
  84. package/dist/embeddings/model/providers/voyage.test.d.ts +0 -2
  85. package/dist/embeddings/model/providers/voyage.test.js +0 -37
  86. package/dist/index-B48181D5.d.ts +0 -1070
  87. package/dist/llm/model/index.d.ts +0 -45
  88. package/dist/llm/model/index.js +0 -5
  89. package/dist/llm/model/providers/anthropic-vertex.d.ts +0 -31
  90. package/dist/llm/model/providers/anthropic-vertex.js +0 -23
  91. package/dist/llm/model/providers/anthropic.d.ts +0 -29
  92. package/dist/llm/model/providers/anthropic.js +0 -21
  93. package/dist/llm/model/providers/azure.d.ts +0 -48
  94. package/dist/llm/model/providers/azure.js +0 -50
  95. package/dist/llm/model/providers/baseten.d.ts +0 -33
  96. package/dist/llm/model/providers/baseten.js +0 -29
  97. package/dist/llm/model/providers/bedrock.d.ts +0 -32
  98. package/dist/llm/model/providers/bedrock.js +0 -24
  99. package/dist/llm/model/providers/cerebras.d.ts +0 -30
  100. package/dist/llm/model/providers/cerebras.js +0 -22
  101. package/dist/llm/model/providers/cohere.d.ts +0 -30
  102. package/dist/llm/model/providers/cohere.js +0 -22
  103. package/dist/llm/model/providers/deepinfra.d.ts +0 -30
  104. package/dist/llm/model/providers/deepinfra.js +0 -22
  105. package/dist/llm/model/providers/deepseek.d.ts +0 -30
  106. package/dist/llm/model/providers/deepseek.js +0 -22
  107. package/dist/llm/model/providers/fireworks.d.ts +0 -35
  108. package/dist/llm/model/providers/fireworks.js +0 -40
  109. package/dist/llm/model/providers/google-vertex.d.ts +0 -48
  110. package/dist/llm/model/providers/google-vertex.js +0 -22
  111. package/dist/llm/model/providers/google.d.ts +0 -54
  112. package/dist/llm/model/providers/google.js +0 -23
  113. package/dist/llm/model/providers/grok.d.ts +0 -32
  114. package/dist/llm/model/providers/grok.js +0 -22
  115. package/dist/llm/model/providers/groq.d.ts +0 -37
  116. package/dist/llm/model/providers/groq.js +0 -42
  117. package/dist/llm/model/providers/lmstudio.d.ts +0 -29
  118. package/dist/llm/model/providers/lmstudio.js +0 -22
  119. package/dist/llm/model/providers/mistral.d.ts +0 -30
  120. package/dist/llm/model/providers/mistral.js +0 -22
  121. package/dist/llm/model/providers/mock.d.ts +0 -30
  122. package/dist/llm/model/providers/mock.js +0 -83
  123. package/dist/llm/model/providers/ollama.d.ts +0 -31
  124. package/dist/llm/model/providers/ollama.js +0 -23
  125. package/dist/llm/model/providers/openai-compat.d.ts +0 -39
  126. package/dist/llm/model/providers/openai-compat.js +0 -6
  127. package/dist/llm/model/providers/openai.d.ts +0 -34
  128. package/dist/llm/model/providers/openai.js +0 -6
  129. package/dist/llm/model/providers/openai.test.d.ts +0 -2
  130. package/dist/llm/model/providers/openai.test.js +0 -220
  131. package/dist/llm/model/providers/perplexity.d.ts +0 -30
  132. package/dist/llm/model/providers/perplexity.js +0 -22
  133. package/dist/llm/model/providers/portkey.d.ts +0 -34
  134. package/dist/llm/model/providers/portkey.js +0 -22
  135. package/dist/llm/model/providers/togetherai.d.ts +0 -30
  136. package/dist/llm/model/providers/togetherai.js +0 -22
  137. package/dist/types-M16hSruO.d.ts +0 -40
@@ -1,12 +1,427 @@
1
- import { LLM } from './chunk-RI3ECMVF.js';
2
- import { InstrumentClass } from './chunk-6ZVFVYLE.js';
1
+ import { InstrumentClass } from './chunk-4ZUSEHLH.js';
2
+ import { delay } from './chunk-KNPBNSJ7.js';
3
+ import { executeHook, AvailableHooks } from './chunk-HBTQNIAX.js';
3
4
  import { MastraBase } from './chunk-G4MCO7XF.js';
4
5
  import { RegisteredLogger, LogLevel } from './chunk-ICMEXHKD.js';
5
- import { executeHook, AvailableHooks } from './chunk-HBTQNIAX.js';
6
- import { __name, __publicField, __privateAdd, __privateSet, __privateGet } from './chunk-AJJZUHB4.js';
6
+ import { __name, __privateAdd, __privateSet, __privateGet, __publicField } from './chunk-AJJZUHB4.js';
7
7
  import { randomUUID } from 'crypto';
8
8
  import { z } from 'zod';
9
+ import { generateText, jsonSchema, generateObject, streamText, streamObject } from 'ai';
9
10
 
11
+ // src/llm/model/base.ts
12
+ var _mastra, _model;
13
+ var _MastraLLMBase = class _MastraLLMBase extends MastraBase {
14
+ constructor({ name, model }) {
15
+ super({
16
+ component: RegisteredLogger.LLM,
17
+ name
18
+ });
19
+ // @ts-ignore
20
+ __privateAdd(this, _mastra);
21
+ __privateAdd(this, _model);
22
+ __privateSet(this, _model, model);
23
+ }
24
+ getProvider() {
25
+ return __privateGet(this, _model).provider;
26
+ }
27
+ convertToMessages(messages) {
28
+ if (Array.isArray(messages)) {
29
+ return messages.map((m) => {
30
+ if (typeof m === "string") {
31
+ return {
32
+ role: "user",
33
+ content: m
34
+ };
35
+ }
36
+ return m;
37
+ });
38
+ }
39
+ return [
40
+ {
41
+ role: "user",
42
+ content: messages
43
+ }
44
+ ];
45
+ }
46
+ __registerPrimitives(p) {
47
+ if (p.telemetry) {
48
+ this.__setTelemetry(p.telemetry);
49
+ }
50
+ if (p.logger) {
51
+ this.__setLogger(p.logger);
52
+ }
53
+ __privateSet(this, _mastra, p);
54
+ }
55
+ async __text(input) {
56
+ this.logger.debug(`[LLMs:${this.name}] Generating text.`, {
57
+ input
58
+ });
59
+ throw new Error("Method not implemented.");
60
+ }
61
+ async __textObject(input) {
62
+ this.logger.debug(`[LLMs:${this.name}] Generating object.`, {
63
+ input
64
+ });
65
+ throw new Error("Method not implemented.");
66
+ }
67
+ async generate(messages, options = {}) {
68
+ this.logger.debug(`[LLMs:${this.name}] Generating text.`, {
69
+ messages,
70
+ options
71
+ });
72
+ throw new Error("Method not implemented.");
73
+ }
74
+ async __stream(input) {
75
+ this.logger.debug(`[LLMs:${this.name}] Streaming text.`, {
76
+ input
77
+ });
78
+ throw new Error("Method not implemented.");
79
+ }
80
+ async __streamObject(input) {
81
+ this.logger.debug(`[LLMs:${this.name}] Streaming object.`, {
82
+ input
83
+ });
84
+ throw new Error("Method not implemented.");
85
+ }
86
+ async stream(messages, options = {}) {
87
+ this.logger.debug(`[LLMs:${this.name}] Streaming text.`, {
88
+ messages,
89
+ options
90
+ });
91
+ throw new Error("Method not implemented.");
92
+ }
93
+ };
94
+ _mastra = new WeakMap();
95
+ _model = new WeakMap();
96
+ __name(_MastraLLMBase, "MastraLLMBase");
97
+ var MastraLLMBase = _MastraLLMBase;
98
+ var _model2, _mastra2;
99
+ var _MastraLLM = class _MastraLLM extends MastraLLMBase {
100
+ constructor({ model, mastra }) {
101
+ super({
102
+ name: "aisdk",
103
+ model
104
+ });
105
+ __privateAdd(this, _model2);
106
+ __privateAdd(this, _mastra2);
107
+ __privateSet(this, _model2, model);
108
+ if (mastra) {
109
+ __privateSet(this, _mastra2, mastra);
110
+ if (mastra.logger) {
111
+ this.__setLogger(mastra.logger);
112
+ }
113
+ }
114
+ }
115
+ __registerPrimitives(p) {
116
+ if (p.telemetry) {
117
+ this.__setTelemetry(p.telemetry);
118
+ }
119
+ if (p.logger) {
120
+ this.__setLogger(p.logger);
121
+ }
122
+ __privateSet(this, _mastra2, p);
123
+ }
124
+ getProvider() {
125
+ return __privateGet(this, _model2).provider;
126
+ }
127
+ convertTools(tools) {
128
+ this.logger.debug("Starting tool conversion for LLM");
129
+ const converted = Object.entries(tools || {}).reduce((memo, value) => {
130
+ const k = value[0];
131
+ const tool = value[1];
132
+ if (tool) {
133
+ memo[k] = {
134
+ description: tool.description,
135
+ parameters: tool.inputSchema,
136
+ execute: /* @__PURE__ */ __name(async (props) => {
137
+ try {
138
+ this.logger.debug("Executing tool", {
139
+ tool: k,
140
+ props
141
+ });
142
+ return tool.execute({
143
+ context: props,
144
+ mastra: __privateGet(this, _mastra2)
145
+ });
146
+ } catch (error) {
147
+ this.logger.error("Error executing tool", {
148
+ tool: k,
149
+ props,
150
+ error
151
+ });
152
+ throw error;
153
+ }
154
+ }, "execute")
155
+ };
156
+ }
157
+ return memo;
158
+ }, {});
159
+ this.logger.debug(`Converted tools for LLM`);
160
+ return converted;
161
+ }
162
+ async __text({ runId, messages, maxSteps, tools, convertedTools, temperature, toolChoice = "auto", onStepFinish }) {
163
+ const model = __privateGet(this, _model2);
164
+ this.logger.debug(`[LLM] - Generating text`, {
165
+ runId,
166
+ messages,
167
+ maxSteps,
168
+ tools: Object.keys(tools || convertedTools || {})
169
+ });
170
+ const finalTools = convertedTools || this.convertTools(tools);
171
+ const argsForExecute = {
172
+ model,
173
+ temperature,
174
+ tools: {
175
+ ...finalTools
176
+ },
177
+ toolChoice,
178
+ maxSteps,
179
+ onStepFinish: /* @__PURE__ */ __name(async (props) => {
180
+ onStepFinish?.(JSON.stringify(props, null, 2));
181
+ this.logger.debug("[LLM] - Step Change:", {
182
+ text: props?.text,
183
+ toolCalls: props?.toolCalls,
184
+ toolResults: props?.toolResults,
185
+ finishReason: props?.finishReason,
186
+ usage: props?.usage,
187
+ runId
188
+ });
189
+ if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
190
+ this.logger.warn("Rate limit approaching, waiting 10 seconds");
191
+ await delay(10 * 1e3);
192
+ }
193
+ }, "onStepFinish")
194
+ };
195
+ return await generateText({
196
+ messages,
197
+ ...argsForExecute,
198
+ experimental_telemetry: this.experimental_telemetry
199
+ });
200
+ }
201
+ async __textObject({ messages, onStepFinish, maxSteps = 5, tools, convertedTools, structuredOutput, runId, temperature, toolChoice = "auto" }) {
202
+ const model = __privateGet(this, _model2);
203
+ this.logger.debug(`[LLM] - Generating a text object`, {
204
+ runId
205
+ });
206
+ const finalTools = convertedTools || this.convertTools(tools);
207
+ const argsForExecute = {
208
+ model,
209
+ temperature,
210
+ tools: {
211
+ ...finalTools
212
+ },
213
+ maxSteps,
214
+ toolChoice,
215
+ onStepFinish: /* @__PURE__ */ __name(async (props) => {
216
+ onStepFinish?.(JSON.stringify(props, null, 2));
217
+ this.logger.debug("[LLM] - Step Change:", {
218
+ text: props?.text,
219
+ toolCalls: props?.toolCalls,
220
+ toolResults: props?.toolResults,
221
+ finishReason: props?.finishReason,
222
+ usage: props?.usage,
223
+ runId
224
+ });
225
+ if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
226
+ this.logger.warn("Rate limit approaching, waiting 10 seconds", {
227
+ runId
228
+ });
229
+ await delay(10 * 1e3);
230
+ }
231
+ }, "onStepFinish")
232
+ };
233
+ let schema;
234
+ let output = "object";
235
+ if (typeof structuredOutput.parse === "function") {
236
+ schema = structuredOutput;
237
+ if (schema instanceof z.ZodArray) {
238
+ output = "array";
239
+ schema = schema._def.type;
240
+ }
241
+ } else {
242
+ schema = jsonSchema(structuredOutput);
243
+ }
244
+ return await generateObject({
245
+ messages,
246
+ ...argsForExecute,
247
+ output,
248
+ schema,
249
+ experimental_telemetry: this.experimental_telemetry
250
+ });
251
+ }
252
+ async __stream({ messages, onStepFinish, onFinish, maxSteps = 5, tools, convertedTools, runId, temperature, toolChoice = "auto" }) {
253
+ const model = __privateGet(this, _model2);
254
+ this.logger.debug(`[LLM] - Streaming text`, {
255
+ runId,
256
+ messages,
257
+ maxSteps,
258
+ tools: Object.keys(tools || convertedTools || {})
259
+ });
260
+ const finalTools = convertedTools || this.convertTools(tools);
261
+ const argsForExecute = {
262
+ model,
263
+ temperature,
264
+ tools: {
265
+ ...finalTools
266
+ },
267
+ maxSteps,
268
+ toolChoice,
269
+ onStepFinish: /* @__PURE__ */ __name(async (props) => {
270
+ onStepFinish?.(JSON.stringify(props, null, 2));
271
+ this.logger.debug("[LLM] - Stream Step Change:", {
272
+ text: props?.text,
273
+ toolCalls: props?.toolCalls,
274
+ toolResults: props?.toolResults,
275
+ finishReason: props?.finishReason,
276
+ usage: props?.usage,
277
+ runId
278
+ });
279
+ if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
280
+ this.logger.warn("Rate limit approaching, waiting 10 seconds", {
281
+ runId
282
+ });
283
+ await delay(10 * 1e3);
284
+ }
285
+ }, "onStepFinish"),
286
+ onFinish: /* @__PURE__ */ __name(async (props) => {
287
+ onFinish?.(JSON.stringify(props, null, 2));
288
+ this.logger.debug("[LLM] - Stream Finished:", {
289
+ text: props?.text,
290
+ toolCalls: props?.toolCalls,
291
+ toolResults: props?.toolResults,
292
+ finishReason: props?.finishReason,
293
+ usage: props?.usage,
294
+ runId
295
+ });
296
+ }, "onFinish")
297
+ };
298
+ return await streamText({
299
+ messages,
300
+ ...argsForExecute,
301
+ experimental_telemetry: this.experimental_telemetry
302
+ });
303
+ }
304
+ async __streamObject({ messages, onStepFinish, onFinish, maxSteps = 5, tools, convertedTools, structuredOutput, runId, temperature, toolChoice = "auto" }) {
305
+ const model = __privateGet(this, _model2);
306
+ this.logger.debug(`[LLM] - Streaming structured output`, {
307
+ runId,
308
+ messages,
309
+ maxSteps,
310
+ tools: Object.keys(tools || convertedTools || {})
311
+ });
312
+ const finalTools = convertedTools || this.convertTools(tools);
313
+ const argsForExecute = {
314
+ model,
315
+ temperature,
316
+ tools: {
317
+ ...finalTools
318
+ },
319
+ maxSteps,
320
+ toolChoice,
321
+ onStepFinish: /* @__PURE__ */ __name(async (props) => {
322
+ onStepFinish?.(JSON.stringify(props, null, 2));
323
+ this.logger.debug("[LLM] - Stream Step Change:", {
324
+ text: props?.text,
325
+ toolCalls: props?.toolCalls,
326
+ toolResults: props?.toolResults,
327
+ finishReason: props?.finishReason,
328
+ usage: props?.usage,
329
+ runId
330
+ });
331
+ if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
332
+ this.logger.warn("Rate limit approaching, waiting 10 seconds", {
333
+ runId
334
+ });
335
+ await delay(10 * 1e3);
336
+ }
337
+ }, "onStepFinish"),
338
+ onFinish: /* @__PURE__ */ __name(async (props) => {
339
+ onFinish?.(JSON.stringify(props, null, 2));
340
+ this.logger.debug("[LLM] - Stream Finished:", {
341
+ text: props?.text,
342
+ toolCalls: props?.toolCalls,
343
+ toolResults: props?.toolResults,
344
+ finishReason: props?.finishReason,
345
+ usage: props?.usage,
346
+ runId
347
+ });
348
+ }, "onFinish")
349
+ };
350
+ let schema;
351
+ let output = "object";
352
+ if (typeof structuredOutput.parse === "function") {
353
+ schema = structuredOutput;
354
+ if (schema instanceof z.ZodArray) {
355
+ output = "array";
356
+ schema = schema._def.type;
357
+ }
358
+ } else {
359
+ schema = jsonSchema(structuredOutput);
360
+ }
361
+ return streamObject({
362
+ messages,
363
+ ...argsForExecute,
364
+ output,
365
+ schema,
366
+ experimental_telemetry: this.experimental_telemetry
367
+ });
368
+ }
369
+ async generate(messages, { maxSteps = 5, onStepFinish, tools, convertedTools, runId, output = "text", temperature } = {}) {
370
+ const msgs = this.convertToMessages(messages);
371
+ if (output === "text") {
372
+ return await this.__text({
373
+ messages: msgs,
374
+ onStepFinish,
375
+ maxSteps,
376
+ tools,
377
+ convertedTools,
378
+ runId,
379
+ temperature
380
+ });
381
+ }
382
+ return await this.__textObject({
383
+ messages: msgs,
384
+ structuredOutput: output,
385
+ onStepFinish,
386
+ maxSteps,
387
+ tools,
388
+ convertedTools,
389
+ runId
390
+ });
391
+ }
392
+ async stream(messages, { maxSteps = 5, onFinish, onStepFinish, tools, convertedTools, runId, output = "text", temperature } = {}) {
393
+ const msgs = this.convertToMessages(messages);
394
+ if (output === "text") {
395
+ return await this.__stream({
396
+ messages: msgs,
397
+ onStepFinish,
398
+ onFinish,
399
+ maxSteps,
400
+ tools,
401
+ convertedTools,
402
+ runId,
403
+ temperature
404
+ });
405
+ }
406
+ return await this.__streamObject({
407
+ messages: msgs,
408
+ structuredOutput: output,
409
+ onStepFinish,
410
+ onFinish,
411
+ maxSteps,
412
+ tools,
413
+ convertedTools,
414
+ runId,
415
+ temperature
416
+ });
417
+ }
418
+ };
419
+ _model2 = new WeakMap();
420
+ _mastra2 = new WeakMap();
421
+ __name(_MastraLLM, "MastraLLM");
422
+ var MastraLLM = _MastraLLM;
423
+
424
+ // src/agent/index.ts
10
425
  function _ts_decorate(decorators, target, key, desc) {
11
426
  var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
12
427
  if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
@@ -18,41 +433,35 @@ function _ts_metadata(k, v) {
18
433
  if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
19
434
  }
20
435
  __name(_ts_metadata, "_ts_metadata");
21
- var _mastra, _memory;
436
+ var _mastra3, _memory;
22
437
  var _Agent = class _Agent extends MastraBase {
23
438
  constructor(config) {
24
439
  super({
25
440
  component: RegisteredLogger.AGENT
26
441
  });
27
442
  __publicField(this, "name");
28
- // @ts-ignore
29
443
  __publicField(this, "llm");
30
444
  __publicField(this, "instructions");
31
445
  __publicField(this, "model");
32
- __privateAdd(this, _mastra);
446
+ __privateAdd(this, _mastra3);
33
447
  __privateAdd(this, _memory);
34
448
  __publicField(this, "tools");
35
449
  __publicField(this, "metrics");
36
450
  this.name = config.name;
37
451
  this.instructions = config.instructions;
38
- if (!config.model && !config.llm) {
39
- throw new Error("Either model or llm is required");
40
- }
41
- if (config.llm) {
42
- this.llm = config.llm;
43
- } else if (config.model) {
44
- this.model = config.model;
45
- this.llm = new LLM({
46
- model: config.model
47
- });
452
+ if (!config.model) {
453
+ throw new Error(`LanugageModel is required to create an Agent. Please provider the 'model'.`);
48
454
  }
455
+ this.llm = new MastraLLM({
456
+ model: config.model
457
+ });
49
458
  this.tools = {};
50
459
  this.metrics = {};
51
460
  if (config.tools) {
52
461
  this.tools = config.tools;
53
462
  }
54
463
  if (config.mastra) {
55
- __privateSet(this, _mastra, config.mastra);
464
+ __privateSet(this, _mastra3, config.mastra);
56
465
  }
57
466
  if (config.metrics) {
58
467
  this.metrics = config.metrics;
@@ -65,7 +474,7 @@ var _Agent = class _Agent extends MastraBase {
65
474
  return Boolean(__privateGet(this, _memory));
66
475
  }
67
476
  getMemory() {
68
- return __privateGet(this, _memory) ?? __privateGet(this, _mastra)?.memory;
477
+ return __privateGet(this, _memory) ?? __privateGet(this, _mastra3)?.memory;
69
478
  }
70
479
  __registerPrimitives(p) {
71
480
  if (p.telemetry) {
@@ -75,7 +484,7 @@ var _Agent = class _Agent extends MastraBase {
75
484
  this.__setLogger(p.logger);
76
485
  }
77
486
  this.llm.__registerPrimitives(p);
78
- __privateSet(this, _mastra, p);
487
+ __privateSet(this, _mastra3, p);
79
488
  this.logger.debug(`[Agents:${this.name}] initialized.`, {
80
489
  model: this.model,
81
490
  name: this.name
@@ -285,11 +694,11 @@ ${memorySystemMessage}` : ""}`
285
694
  const responseMessagesWithoutIncompleteToolCalls = this.sanitizeResponseMessages(ms);
286
695
  const memory = this.getMemory();
287
696
  if (memory) {
288
- this.logger.debug(`[Agent:${this.name}] - Memory persistence: store=${__privateGet(this, _mastra)?.memory?.constructor.name} threadId=${threadId}`, {
697
+ this.logger.debug(`[Agent:${this.name}] - Memory persistence: store=${__privateGet(this, _mastra3)?.memory?.constructor.name} threadId=${threadId}`, {
289
698
  runId,
290
699
  resourceId,
291
700
  threadId,
292
- memoryStore: __privateGet(this, _mastra)?.memory?.constructor.name
701
+ memoryStore: __privateGet(this, _mastra3)?.memory?.constructor.name
293
702
  });
294
703
  await memory.saveMessages({
295
704
  memoryConfig,
@@ -388,7 +797,7 @@ ${memorySystemMessage}` : ""}`
388
797
  });
389
798
  return tool.execute({
390
799
  context: args,
391
- mastra: __privateGet(this, _mastra)
800
+ mastra: __privateGet(this, _mastra3)
392
801
  });
393
802
  } catch (err) {
394
803
  this.logger.error(`[Agent:${this.name}] - Failed execution`, {
@@ -474,11 +883,11 @@ ${memorySystemMessage}` : ""}`
474
883
  let coreMessages = messages;
475
884
  let threadIdToUse = threadId;
476
885
  if (this.getMemory() && resourceId) {
477
- this.logger.debug(`[Agent:${this.name}] - Memory persistence enabled: store=${__privateGet(this, _mastra)?.memory?.constructor.name}, resourceId=${resourceId}`, {
886
+ this.logger.debug(`[Agent:${this.name}] - Memory persistence enabled: store=${__privateGet(this, _mastra3)?.memory?.constructor.name}, resourceId=${resourceId}`, {
478
887
  runId,
479
888
  resourceId,
480
889
  threadId: threadIdToUse,
481
- memoryStore: __privateGet(this, _mastra)?.memory?.constructor.name
890
+ memoryStore: __privateGet(this, _mastra3)?.memory?.constructor.name
482
891
  });
483
892
  const preExecuteResult = await this.preExecute({
484
893
  resourceId,
@@ -579,7 +988,7 @@ ${memorySystemMessage}` : ""}`
579
988
  }, "after")
580
989
  };
581
990
  }
582
- async generate(messages, { context, threadId: threadIdInFn, memoryOptions, resourceId, maxSteps = 5, onStepFinish, runId, toolsets, output = "text", temperature } = {}) {
991
+ async generate(messages, { context, threadId: threadIdInFn, memoryOptions, resourceId, maxSteps = 5, onStepFinish, runId, toolsets, output = "text", temperature, toolChoice = "auto" } = {}) {
583
992
  let messagesToUse = [];
584
993
  if (typeof messages === `string`) {
585
994
  messagesToUse = [
@@ -618,7 +1027,8 @@ ${memorySystemMessage}` : ""}`
618
1027
  onStepFinish,
619
1028
  maxSteps,
620
1029
  runId: runIdToUse,
621
- temperature
1030
+ temperature,
1031
+ toolChoice
622
1032
  });
623
1033
  const outputText2 = result2.text;
624
1034
  await after({
@@ -638,7 +1048,8 @@ ${memorySystemMessage}` : ""}`
638
1048
  onStepFinish,
639
1049
  maxSteps,
640
1050
  runId: runIdToUse,
641
- temperature
1051
+ temperature,
1052
+ toolChoice
642
1053
  });
643
1054
  const outputText = JSON.stringify(result.object);
644
1055
  await after({
@@ -650,7 +1061,7 @@ ${memorySystemMessage}` : ""}`
650
1061
  });
651
1062
  return result;
652
1063
  }
653
- async stream(messages, { context, threadId: threadIdInFn, memoryOptions, resourceId, maxSteps = 5, onFinish, onStepFinish, runId, toolsets, output = "text", temperature } = {}) {
1064
+ async stream(messages, { context, threadId: threadIdInFn, memoryOptions, resourceId, maxSteps = 5, onFinish, onStepFinish, runId, toolsets, output = "text", temperature, toolChoice = "auto" } = {}) {
654
1065
  const runIdToUse = runId || randomUUID();
655
1066
  let messagesToUse = [];
656
1067
  if (typeof messages === `string`) {
@@ -711,7 +1122,8 @@ ${memorySystemMessage}` : ""}`
711
1122
  onFinish?.(result);
712
1123
  }, "onFinish"),
713
1124
  maxSteps,
714
- runId
1125
+ runId,
1126
+ toolChoice
715
1127
  });
716
1128
  }
717
1129
  this.logger.debug(`Starting agent ${this.name} llm streamObject call`, {
@@ -744,11 +1156,12 @@ ${memorySystemMessage}` : ""}`
744
1156
  onFinish?.(result);
745
1157
  }, "onFinish"),
746
1158
  maxSteps,
747
- runId
1159
+ runId,
1160
+ toolChoice
748
1161
  });
749
1162
  }
750
1163
  };
751
- _mastra = new WeakMap();
1164
+ _mastra3 = new WeakMap();
752
1165
  _memory = new WeakMap();
753
1166
  __name(_Agent, "Agent");
754
1167
  var Agent = _Agent;
@@ -1,4 +1,4 @@
1
- import { InstrumentClass } from './chunk-6ZVFVYLE.js';
1
+ import { InstrumentClass } from './chunk-4ZUSEHLH.js';
2
2
  import { MastraBase } from './chunk-G4MCO7XF.js';
3
3
  import { __name, __publicField } from './chunk-AJJZUHB4.js';
4
4
 
@@ -0,0 +1,16 @@
1
+ import { MastraBase } from './chunk-G4MCO7XF.js';
2
+ import { __name } from './chunk-AJJZUHB4.js';
3
+
4
+ // src/vector/index.ts
5
+ var _MastraVector = class _MastraVector extends MastraBase {
6
+ constructor() {
7
+ super({
8
+ name: "MastraVector",
9
+ component: "VECTOR"
10
+ });
11
+ }
12
+ };
13
+ __name(_MastraVector, "MastraVector");
14
+ var MastraVector = _MastraVector;
15
+
16
+ export { MastraVector };
@@ -1,4 +1,4 @@
1
- import { Agent } from './chunk-JWVCENG2.js';
1
+ import { Agent } from './chunk-JY322VLW.js';
2
2
  import { __name, __publicField } from './chunk-AJJZUHB4.js';
3
3
  import { CohereClient } from 'cohere-ai';
4
4
 
@@ -40,18 +40,10 @@ __name(createSimilarityPrompt, "createSimilarityPrompt");
40
40
 
41
41
  // src/relevance/mastra-agent/index.ts
42
42
  var _MastraAgentRelevanceScorer = class _MastraAgentRelevanceScorer {
43
- constructor(provider, name, model) {
43
+ constructor(name, model) {
44
44
  __publicField(this, "agent");
45
- const modelConfig = {
46
- provider,
47
- ...model ? {
48
- model
49
- } : {
50
- name
51
- }
52
- };
53
45
  this.agent = new Agent({
54
- name: `Relevance Scorer ${provider} ${name}`,
46
+ name: `Relevance Scorer ${name}`,
55
47
  instructions: `You are a specialized agent for evaluating the relevance of text to queries.
56
48
  Your task is to rate how well a text passage answers a given query.
57
49
  Output only a number between 0 and 1, where:
@@ -62,7 +54,7 @@ Consider:
62
54
  - Completeness of information
63
55
  - Quality and specificity
64
56
  Always return just the number, no explanation.`,
65
- model: modelConfig
57
+ model
66
58
  });
67
59
  }
68
60
  async getRelevanceScore(query, text) {
@@ -1,6 +1,4 @@
1
- import { Telemetry } from './chunk-SDKEPBBH.js';
2
- import { LLM } from './chunk-RI3ECMVF.js';
3
- import { InstrumentClass } from './chunk-6ZVFVYLE.js';
1
+ import { InstrumentClass, Telemetry } from './chunk-4ZUSEHLH.js';
4
2
  import { LogLevel, createLogger, noopLogger } from './chunk-ICMEXHKD.js';
5
3
  import { __name, __publicField } from './chunk-AJJZUHB4.js';
6
4
 
@@ -147,8 +145,7 @@ var _Mastra = class _Mastra {
147
145
  memory: this.memory,
148
146
  agents: this.agents,
149
147
  tts: this.tts,
150
- vectors: this.vectors,
151
- llm: this.LLM
148
+ vectors: this.vectors
152
149
  });
153
150
  this.workflows[key] = workflow;
154
151
  });
@@ -157,18 +154,6 @@ var _Mastra = class _Mastra {
157
154
  logger
158
155
  });
159
156
  }
160
- LLM(modelConfig) {
161
- const llm = new LLM({
162
- model: modelConfig
163
- });
164
- if (this.telemetry) {
165
- llm.__setTelemetry(this.telemetry);
166
- }
167
- if (this.getLogger) {
168
- llm.__setLogger(this.getLogger());
169
- }
170
- return llm;
171
- }
172
157
  getAgent(name) {
173
158
  const agent = this.agents?.[name];
174
159
  if (!agent) {