@mastra/core 0.2.0-alpha.84 → 0.2.0-alpha.86

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/README.md +1 -1
  2. package/dist/action/index.d.ts +2 -2
  3. package/dist/agent/index.d.ts +6 -6
  4. package/dist/agent/index.js +1 -1
  5. package/dist/chunk-2ISN3AA7.js +392 -0
  6. package/dist/chunk-2J5OHBUG.js +24 -0
  7. package/dist/{chunk-Y7TKCKRI.js → chunk-5NQ3MEZM.js} +8 -8
  8. package/dist/{chunk-SAXFXAKK.js → chunk-73XDWPXJ.js} +41 -24
  9. package/dist/{chunk-3IV6WDJY.js → chunk-D66E7L7R.js} +1 -1
  10. package/dist/{chunk-3THCTISX.js → chunk-EVYBUFXB.js} +12 -8
  11. package/dist/{chunk-PRYZIZXD.js → chunk-I3MJB67Z.js} +8 -8
  12. package/dist/{chunk-42THOFKJ.js → chunk-RCS7AVH6.js} +1 -1
  13. package/dist/chunk-RLPH6TDJ.js +30 -0
  14. package/dist/embeddings/index.d.ts +2 -2
  15. package/dist/embeddings/index.js +1 -1
  16. package/dist/eval/index.d.ts +2 -2
  17. package/dist/filter/index.d.ts +6 -6
  18. package/dist/{index-62DyKJRU.d.ts → index-Duqv1Yom.d.ts} +340 -322
  19. package/dist/index.d.ts +6 -6
  20. package/dist/index.js +10 -10
  21. package/dist/integration/index.d.ts +7 -7
  22. package/dist/llm/index.d.ts +3 -3
  23. package/dist/llm/model/providers/anthropic-vertex.d.ts +31 -0
  24. package/dist/llm/model/providers/anthropic-vertex.js +23 -0
  25. package/dist/llm/model/providers/anthropic.d.ts +29 -0
  26. package/dist/llm/model/providers/anthropic.js +21 -0
  27. package/dist/llm/model/providers/azure.d.ts +48 -0
  28. package/dist/llm/model/providers/azure.js +50 -0
  29. package/dist/llm/model/providers/baseten.d.ts +33 -0
  30. package/dist/llm/model/providers/baseten.js +29 -0
  31. package/dist/llm/model/providers/bedrock.d.ts +32 -0
  32. package/dist/llm/model/providers/bedrock.js +24 -0
  33. package/dist/llm/model/providers/cerebras.d.ts +30 -0
  34. package/dist/llm/model/providers/cerebras.js +22 -0
  35. package/dist/llm/model/providers/cohere.d.ts +30 -0
  36. package/dist/llm/model/providers/cohere.js +22 -0
  37. package/dist/llm/model/providers/deepinfra.d.ts +30 -0
  38. package/dist/llm/model/providers/deepinfra.js +22 -0
  39. package/dist/llm/model/providers/deepseek.d.ts +30 -0
  40. package/dist/llm/model/providers/deepseek.js +22 -0
  41. package/dist/llm/model/providers/fireworks.d.ts +35 -0
  42. package/dist/llm/model/providers/fireworks.js +40 -0
  43. package/dist/llm/model/providers/google-vertex.d.ts +48 -0
  44. package/dist/llm/model/providers/google-vertex.js +22 -0
  45. package/dist/llm/model/providers/google.d.ts +54 -0
  46. package/dist/llm/model/providers/google.js +23 -0
  47. package/dist/llm/model/providers/grok.d.ts +32 -0
  48. package/dist/llm/model/providers/grok.js +22 -0
  49. package/dist/llm/model/providers/groq.d.ts +37 -0
  50. package/dist/llm/model/providers/groq.js +42 -0
  51. package/dist/llm/model/providers/lmstudio.d.ts +29 -0
  52. package/dist/llm/model/providers/lmstudio.js +22 -0
  53. package/dist/llm/model/providers/mistral.d.ts +30 -0
  54. package/dist/llm/model/providers/mistral.js +22 -0
  55. package/dist/llm/model/providers/mock.d.ts +30 -0
  56. package/dist/llm/model/providers/mock.js +83 -0
  57. package/dist/llm/model/providers/ollama.d.ts +31 -0
  58. package/dist/llm/model/providers/ollama.js +23 -0
  59. package/dist/llm/model/providers/openai-compat.d.ts +39 -0
  60. package/dist/llm/model/providers/openai-compat.js +6 -0
  61. package/dist/llm/model/providers/openai.d.ts +34 -0
  62. package/dist/llm/model/providers/openai.js +6 -0
  63. package/dist/llm/model/providers/openai.test.d.ts +2 -0
  64. package/dist/llm/model/providers/openai.test.js +220 -0
  65. package/dist/llm/model/providers/perplexity.d.ts +30 -0
  66. package/dist/llm/model/providers/perplexity.js +22 -0
  67. package/dist/llm/model/providers/portkey.d.ts +34 -0
  68. package/dist/llm/model/providers/portkey.js +22 -0
  69. package/dist/llm/model/providers/togetherai.d.ts +30 -0
  70. package/dist/llm/model/providers/togetherai.js +22 -0
  71. package/dist/mastra/index.d.ts +3 -3
  72. package/dist/memory/index.d.ts +1 -1
  73. package/dist/memory/index.js +1 -1
  74. package/dist/model-QGWIMOSx.d.ts +31 -0
  75. package/dist/relevance/index.d.ts +2 -2
  76. package/dist/relevance/index.js +2 -2
  77. package/dist/storage/index.d.ts +3 -3
  78. package/dist/storage/index.js +1 -1
  79. package/dist/tools/index.d.ts +3 -3
  80. package/dist/vector/index.js +2 -2
  81. package/dist/{workflow-DGktrYAL.d.ts → workflow-DQ8CtzzU.d.ts} +1 -1
  82. package/dist/workflows/index.d.ts +4 -4
  83. package/package.json +30 -14
@@ -0,0 +1,30 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ type DeepseekModel = 'deepseek-chat' | 'deepseek-reasoner' | (string & {});
22
+ declare class DeepSeek extends MastraLLM {
23
+ constructor({ name, apiKey, baseURL, }?: {
24
+ name?: string;
25
+ apiKey?: string;
26
+ baseURL?: string;
27
+ });
28
+ }
29
+
30
+ export { DeepSeek, type DeepseekModel };
@@ -0,0 +1,22 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createDeepSeek } from '@ai-sdk/deepseek';
7
+
8
+ var _DeepSeek = class _DeepSeek extends MastraLLM {
9
+ constructor({ name = "deepseek-chat", apiKey = process.env.DEEPSEEK_API_KEY || "", baseURL = "https://api.deepseek.com/v1" } = {}) {
10
+ const deepseekModel = createDeepSeek({
11
+ baseURL,
12
+ apiKey
13
+ });
14
+ super({
15
+ model: deepseekModel(name)
16
+ });
17
+ }
18
+ };
19
+ __name(_DeepSeek, "DeepSeek");
20
+ var DeepSeek = _DeepSeek;
21
+
22
+ export { DeepSeek };
@@ -0,0 +1,35 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ type FireworksModel = 'accounts/fireworks/models/deepseek-v3' | 'accounts/fireworks/models/llama-v3p1-405b-instruct' | 'accounts/fireworks/models/llama-v3p1-8b-instruct' | 'accounts/fireworks/models/llama-v3p2-3b-instruct' | 'accounts/fireworks/models/llama-v3p3-70b-instruct' | 'accounts/fireworks/models/mixtral-8x7b-instruct-hf' | 'accounts/fireworks/models/mixtral-8x22b-instruct' | 'accounts/fireworks/models/qwen2p5-coder-32b-instruct' | 'accounts/fireworks/models/llama-v3p2-11b-vision-instruct' | 'accounts/fireworks/models/yi-large' | (string & {});
22
+ declare class Fireworks extends MastraLLM {
23
+ constructor({ name, apiKey }: {
24
+ name: FireworksModel;
25
+ apiKey?: string;
26
+ });
27
+ }
28
+ declare class FireworksReasoning extends MastraLLM {
29
+ constructor({ name, apiKey, }: {
30
+ name: 'accounts/fireworks/models/deepseek-r1';
31
+ apiKey?: string;
32
+ });
33
+ }
34
+
35
+ export { Fireworks, type FireworksModel, FireworksReasoning };
@@ -0,0 +1,40 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createFireworks } from '@ai-sdk/fireworks';
7
+ import { wrapLanguageModel, extractReasoningMiddleware } from 'ai';
8
+
9
+ var _Fireworks = class _Fireworks extends MastraLLM {
10
+ constructor({ name, apiKey = process.env.FIREWORKS_API_KEY || "" }) {
11
+ const fireworksModel = createFireworks({
12
+ apiKey
13
+ });
14
+ super({
15
+ model: fireworksModel(name)
16
+ });
17
+ }
18
+ };
19
+ __name(_Fireworks, "Fireworks");
20
+ var Fireworks = _Fireworks;
21
+ var _FireworksReasoning = class _FireworksReasoning extends MastraLLM {
22
+ constructor({ name, apiKey = process.env.FIREWORKS_API_KEY || "" }) {
23
+ const fireworksModel = createFireworks({
24
+ apiKey
25
+ });
26
+ const enhancedModel = wrapLanguageModel({
27
+ model: fireworksModel(name),
28
+ middleware: extractReasoningMiddleware({
29
+ tagName: "think"
30
+ })
31
+ });
32
+ super({
33
+ model: enhancedModel
34
+ });
35
+ }
36
+ };
37
+ __name(_FireworksReasoning, "FireworksReasoning");
38
+ var FireworksReasoning = _FireworksReasoning;
39
+
40
+ export { Fireworks, FireworksReasoning };
@@ -0,0 +1,48 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ interface GoogleVertexSettings {
22
+ /**
23
+ Optional. The maximum number of tokens to consider when sampling.
24
+
25
+ Models use nucleus sampling or combined Top-k and nucleus sampling.
26
+ Top-k sampling considers the set of topK most probable tokens.
27
+ Models running with nucleus sampling don't allow topK setting.
28
+ */
29
+ topK?: number;
30
+ /**
31
+ Optional. A list of unique safety settings for blocking unsafe content.
32
+ */
33
+ safetySettings?: Array<{
34
+ category: 'HARM_CATEGORY_UNSPECIFIED' | 'HARM_CATEGORY_HATE_SPEECH' | 'HARM_CATEGORY_DANGEROUS_CONTENT' | 'HARM_CATEGORY_HARASSMENT' | 'HARM_CATEGORY_SEXUALLY_EXPLICIT';
35
+ threshold: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' | 'BLOCK_LOW_AND_ABOVE' | 'BLOCK_MEDIUM_AND_ABOVE' | 'BLOCK_ONLY_HIGH' | 'BLOCK_NONE';
36
+ }>;
37
+ }
38
+ type GoogleVertexModel = 'gemini-2.0-flash-exp' | 'gemini-1.5-flash' | 'gemini-1.5-pro';
39
+ declare class GoogleVertex extends MastraLLM {
40
+ constructor({ name, project, location, settings, }?: {
41
+ name?: GoogleVertexModel;
42
+ project?: string;
43
+ location?: string;
44
+ settings?: GoogleVertexSettings;
45
+ });
46
+ }
47
+
48
+ export { GoogleVertex, type GoogleVertexModel, type GoogleVertexSettings };
@@ -0,0 +1,22 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createVertex } from '@ai-sdk/google-vertex';
7
+
8
+ var _GoogleVertex = class _GoogleVertex extends MastraLLM {
9
+ constructor({ name = "gemini-1.5-pro", project = process.env.GOOGLE_VERTEX_PROJECT || "", location = process.env.GOOGLE_VERTEX_LOCATION || "us-central1", settings } = {}) {
10
+ const vertexModel = createVertex({
11
+ project,
12
+ location
13
+ });
14
+ super({
15
+ model: vertexModel(name, settings)
16
+ });
17
+ }
18
+ };
19
+ __name(_GoogleVertex, "GoogleVertex");
20
+ var GoogleVertex = _GoogleVertex;
21
+
22
+ export { GoogleVertex };
@@ -0,0 +1,54 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ interface GoogleGenerativeAISettings {
22
+ /**
23
+ Optional.
24
+ The name of the cached content used as context to serve the prediction.
25
+ Format: cachedContents/{cachedContent}
26
+ */
27
+ cachedContent?: string;
28
+ /**
29
+ * Optional. Enable structured output. Default is true.
30
+ *
31
+ * This is useful when the JSON Schema contains elements that are
32
+ * not supported by the OpenAPI schema version that
33
+ * Google Generative AI uses. You can use this to disable
34
+ * structured outputs if you need to.
35
+ */
36
+ structuredOutputs?: boolean;
37
+ /**
38
+ Optional. A list of unique safety settings for blocking unsafe content.
39
+ */
40
+ safetySettings?: Array<{
41
+ category: 'HARM_CATEGORY_HATE_SPEECH' | 'HARM_CATEGORY_DANGEROUS_CONTENT' | 'HARM_CATEGORY_HARASSMENT' | 'HARM_CATEGORY_SEXUALLY_EXPLICIT';
42
+ threshold: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' | 'BLOCK_LOW_AND_ABOVE' | 'BLOCK_MEDIUM_AND_ABOVE' | 'BLOCK_ONLY_HIGH' | 'BLOCK_NONE';
43
+ }>;
44
+ }
45
+ type GoogleModel = 'gemini-1.5-pro-latest' | 'gemini-1.5-pro' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash' | 'gemini-2.0-flash-exp-latest' | 'gemini-2.0-flash-thinking-exp-1219' | 'gemini-exp-1206' | (string & {});
46
+ declare class Gemini extends MastraLLM {
47
+ constructor({ name, apiKey, settings, }?: {
48
+ name?: GoogleModel;
49
+ apiKey?: string;
50
+ settings?: GoogleGenerativeAISettings;
51
+ });
52
+ }
53
+
54
+ export { Gemini, type GoogleGenerativeAISettings, type GoogleModel };
@@ -0,0 +1,23 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createGoogleGenerativeAI } from '@ai-sdk/google';
7
+
8
+ var _Gemini = class _Gemini extends MastraLLM {
9
+ constructor({ name = "gemini-1.5-pro-latest", apiKey = process.env.GOOGLE_GENERATIVE_AI_API_KEY || "", settings } = {}) {
10
+ const google = createGoogleGenerativeAI({
11
+ baseURL: "https://generativelanguage.googleapis.com/v1beta",
12
+ apiKey
13
+ });
14
+ const gemini = google(name, settings);
15
+ super({
16
+ model: gemini
17
+ });
18
+ }
19
+ };
20
+ __name(_Gemini, "Gemini");
21
+ var Gemini = _Gemini;
22
+
23
+ export { Gemini };
@@ -0,0 +1,32 @@
1
+ import { OpenAIChatSettings } from '@ai-sdk/openai/internal';
2
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
3
+ import 'ai';
4
+ import 'json-schema';
5
+ import 'zod';
6
+ import '../../../index-Duqv1Yom.js';
7
+ import '../../../base.js';
8
+ import '@opentelemetry/api';
9
+ import '../../../index-B0-NXUYv.js';
10
+ import 'pino';
11
+ import 'stream';
12
+ import '../../../telemetry-oCUM52DG.js';
13
+ import '@opentelemetry/sdk-node';
14
+ import '@opentelemetry/sdk-trace-base';
15
+ import '../../../metric-D2V4CR8D.js';
16
+ import 'sift';
17
+ import '../../../types-M16hSruO.js';
18
+ import '../../../vector/index.js';
19
+ import '../../../engine-EwEG-4Iv.js';
20
+ import '../../../tts/index.js';
21
+
22
+ type XGrokModel = 'grok-beta' | 'grok-vision-beta' | 'grok-2-1212' | 'grok-2-vision-1212' | (string & {});
23
+ declare class Grok extends MastraLLM {
24
+ constructor({ name, apiKey, baseURL, settings, }: {
25
+ settings?: OpenAIChatSettings;
26
+ name?: string;
27
+ apiKey?: string;
28
+ baseURL?: string;
29
+ });
30
+ }
31
+
32
+ export { Grok, type XGrokModel };
@@ -0,0 +1,22 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createXai } from '@ai-sdk/xai';
7
+
8
+ var _Grok = class _Grok extends MastraLLM {
9
+ constructor({ name = "grok-beta", apiKey = process.env.XAI_API_KEY ?? "", baseURL = "https://api.x.ai/v1", settings }) {
10
+ const xAi = createXai({
11
+ baseURL,
12
+ apiKey
13
+ });
14
+ super({
15
+ model: xAi(name, settings)
16
+ });
17
+ }
18
+ };
19
+ __name(_Grok, "Grok");
20
+ var Grok = _Grok;
21
+
22
+ export { Grok };
@@ -0,0 +1,37 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ type GroqModel = 'llama3-groq-70b-8192-tool-use-preview' | 'llama3-groq-8b-8192-tool-use-preview' | 'gemma2-9b-it' | 'gemma-7b-it' | (string & {});
22
+ declare class Groq extends MastraLLM {
23
+ constructor({ name, apiKey, baseURL, }?: {
24
+ name?: GroqModel;
25
+ apiKey?: string;
26
+ baseURL?: string;
27
+ });
28
+ }
29
+ declare class GroqReasoning extends MastraLLM {
30
+ constructor({ name, apiKey, baseURL, }?: {
31
+ name?: 'deepseek-r1-distill-llama-70b';
32
+ apiKey?: string;
33
+ baseURL?: string;
34
+ });
35
+ }
36
+
37
+ export { Groq, type GroqModel, GroqReasoning };
@@ -0,0 +1,42 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createGroq } from '@ai-sdk/groq';
7
+ import { wrapLanguageModel, extractReasoningMiddleware } from 'ai';
8
+
9
+ var _Groq = class _Groq extends MastraLLM {
10
+ constructor({ name = "gemma2-9b-it", apiKey = process.env.GROQ_API_KEY || "", baseURL = "https://api.groq.com/openai/v1" } = {}) {
11
+ const groqModel = createGroq({
12
+ baseURL,
13
+ apiKey
14
+ });
15
+ super({
16
+ model: groqModel(name)
17
+ });
18
+ }
19
+ };
20
+ __name(_Groq, "Groq");
21
+ var Groq = _Groq;
22
+ var _GroqReasoning = class _GroqReasoning extends MastraLLM {
23
+ constructor({ name = "deepseek-r1-distill-llama-70b", apiKey = process.env.GROQ_API_KEY || "", baseURL = "https://api.groq.com/openai/v1" } = {}) {
24
+ const groqModel = createGroq({
25
+ baseURL,
26
+ apiKey
27
+ });
28
+ const enhancedModel = wrapLanguageModel({
29
+ model: groqModel(name),
30
+ middleware: extractReasoningMiddleware({
31
+ tagName: "think"
32
+ })
33
+ });
34
+ super({
35
+ model: enhancedModel
36
+ });
37
+ }
38
+ };
39
+ __name(_GroqReasoning, "GroqReasoning");
40
+ var GroqReasoning = _GroqReasoning;
41
+
42
+ export { Groq, GroqReasoning };
@@ -0,0 +1,29 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ type LMStudioModel = 'qwen2-7b-instruct-4bit' | 'qwen2-math-1.5b' | 'qwen2-0.5b' | 'aya-23-8b' | 'mistral-7b-v0.3' | 'stablecode' | 'cohere-command-r-v01-4bit' | 'command-r' | 'starcoder2-7b' | 'deepseek-math-7b' | 'qwen2.5-coder-14b' | 'qwen2.5-coder-32b' | 'qwen2.5-coder-3b' | 'llama-3.2-3b-instruct-4bit' | 'llama-3.2-1b' | 'llama-3.2-3b' | 'qwen2.5-coder-7b' | 'qwen2.5-14b' | 'yi-coder-9b' | 'hermes-3-llama-3.1-8b' | 'internlm-2.5-20b' | 'llava-v1.5' | 'llama-3.1-8b-instruct-4bit' | 'meta-llama-3.1-8b' | 'mistral-nemo-2407' | 'mistral-nemo-instruct-2407-4bit' | 'gemma-2-2b' | 'mathstral-7b' | 'gemma-2-9b' | 'deepseek-coder-v2-lite-instruct-4bit' | 'smollm-360m-v0.2' | 'phi-3-mini-4k-instruct-4bit' | 'gemma-2-27b' | 'codestral-22b' | 'phi-3.1-mini-128k' | 'deepseek-coder-v2-lite' | (string & {});
22
+ declare class LMStudio extends MastraLLM {
23
+ constructor({ name, baseURL }: {
24
+ name: LMStudioModel;
25
+ baseURL: string;
26
+ });
27
+ }
28
+
29
+ export { LMStudio, type LMStudioModel };
@@ -0,0 +1,22 @@
1
+ import { openaiCompat } from '../../../chunk-RLPH6TDJ.js';
2
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
3
+ import '../../../chunk-LUULSM4U.js';
4
+ import '../../../chunk-JCRGAEY6.js';
5
+ import '../../../chunk-TJK6TGSR.js';
6
+ import { __name } from '../../../chunk-AJJZUHB4.js';
7
+
8
+ // src/llm/model/providers/lmstudio.ts
9
+ var _LMStudio = class _LMStudio extends MastraLLM {
10
+ constructor({ name, baseURL }) {
11
+ super({
12
+ model: openaiCompat({
13
+ modelName: name,
14
+ baseURL
15
+ })
16
+ });
17
+ }
18
+ };
19
+ __name(_LMStudio, "LMStudio");
20
+ var LMStudio = _LMStudio;
21
+
22
+ export { LMStudio };
@@ -0,0 +1,30 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ type MistralModel = 'pixtral-large-latest' | 'mistral-large-latest' | 'mistral-small-latest' | 'ministral-3b-latest' | 'ministral-8b-latest' | 'pixtral-12b-2409' | (string & {});
22
+ declare class Mistral extends MastraLLM {
23
+ constructor({ name, apiKey, baseURL, }?: {
24
+ name?: MistralModel;
25
+ apiKey?: string;
26
+ baseURL?: string;
27
+ });
28
+ }
29
+
30
+ export { Mistral, type MistralModel };
@@ -0,0 +1,22 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { createMistral } from '@ai-sdk/mistral';
7
+
8
+ var _Mistral = class _Mistral extends MastraLLM {
9
+ constructor({ name = "pixtral-large-latest", apiKey = process.env.MISTRAL_API_KEY || "", baseURL = "https://api.mistral.ai/v1" } = {}) {
10
+ const mistralModel = createMistral({
11
+ baseURL,
12
+ apiKey
13
+ });
14
+ super({
15
+ model: mistralModel(name)
16
+ });
17
+ }
18
+ };
19
+ __name(_Mistral, "Mistral");
20
+ var Mistral = _Mistral;
21
+
22
+ export { Mistral };
@@ -0,0 +1,30 @@
1
+ import { M as MastraLLM } from '../../../model-QGWIMOSx.js';
2
+ import 'ai';
3
+ import 'json-schema';
4
+ import 'zod';
5
+ import '../../../index-Duqv1Yom.js';
6
+ import '../../../base.js';
7
+ import '@opentelemetry/api';
8
+ import '../../../index-B0-NXUYv.js';
9
+ import 'pino';
10
+ import 'stream';
11
+ import '../../../telemetry-oCUM52DG.js';
12
+ import '@opentelemetry/sdk-node';
13
+ import '@opentelemetry/sdk-trace-base';
14
+ import '../../../metric-D2V4CR8D.js';
15
+ import 'sift';
16
+ import '../../../types-M16hSruO.js';
17
+ import '../../../vector/index.js';
18
+ import '../../../engine-EwEG-4Iv.js';
19
+ import '../../../tts/index.js';
20
+
21
+ declare class MockProvider extends MastraLLM {
22
+ constructor({ spyGenerate, spyStream, objectGenerationMode, mockText, }: {
23
+ spyGenerate?: (props: any) => void;
24
+ spyStream?: (props: any) => void;
25
+ objectGenerationMode?: 'json';
26
+ mockText?: string | Record<string, any>;
27
+ });
28
+ }
29
+
30
+ export { MockProvider };
@@ -0,0 +1,83 @@
1
+ import { MastraLLM } from '../../../chunk-2ISN3AA7.js';
2
+ import '../../../chunk-LUULSM4U.js';
3
+ import '../../../chunk-JCRGAEY6.js';
4
+ import '../../../chunk-TJK6TGSR.js';
5
+ import { __name } from '../../../chunk-AJJZUHB4.js';
6
+ import { simulateReadableStream } from 'ai';
7
+ import { MockLanguageModelV1 } from 'ai/test';
8
+
9
+ var _MockProvider = class _MockProvider extends MastraLLM {
10
+ constructor({ spyGenerate, spyStream, objectGenerationMode, mockText = "Hello, world!" }) {
11
+ const mockModel = new MockLanguageModelV1({
12
+ defaultObjectGenerationMode: objectGenerationMode,
13
+ doGenerate: /* @__PURE__ */ __name(async (props) => {
14
+ if (spyGenerate) {
15
+ spyGenerate(props);
16
+ }
17
+ if (objectGenerationMode === "json") {
18
+ return {
19
+ rawCall: {
20
+ rawPrompt: null,
21
+ rawSettings: {}
22
+ },
23
+ finishReason: "stop",
24
+ usage: {
25
+ promptTokens: 10,
26
+ completionTokens: 20
27
+ },
28
+ text: JSON.stringify(mockText)
29
+ };
30
+ }
31
+ return {
32
+ rawCall: {
33
+ rawPrompt: null,
34
+ rawSettings: {}
35
+ },
36
+ finishReason: "stop",
37
+ usage: {
38
+ promptTokens: 10,
39
+ completionTokens: 20
40
+ },
41
+ text: typeof mockText === "string" ? mockText : JSON.stringify(mockText)
42
+ };
43
+ }, "doGenerate"),
44
+ doStream: /* @__PURE__ */ __name(async (props) => {
45
+ if (spyStream) {
46
+ spyStream(props);
47
+ }
48
+ const text = typeof mockText === "string" ? mockText : JSON.stringify(mockText);
49
+ const chunks = text.split(" ").map((word) => ({
50
+ type: "text-delta",
51
+ textDelta: word + " "
52
+ }));
53
+ return {
54
+ stream: simulateReadableStream({
55
+ chunks: [
56
+ ...chunks,
57
+ {
58
+ type: "finish",
59
+ finishReason: "stop",
60
+ logprobs: undefined,
61
+ usage: {
62
+ completionTokens: 10,
63
+ promptTokens: 3
64
+ }
65
+ }
66
+ ]
67
+ }),
68
+ rawCall: {
69
+ rawPrompt: null,
70
+ rawSettings: {}
71
+ }
72
+ };
73
+ }, "doStream")
74
+ });
75
+ super({
76
+ model: mockModel
77
+ });
78
+ }
79
+ };
80
+ __name(_MockProvider, "MockProvider");
81
+ var MockProvider = _MockProvider;
82
+
83
+ export { MockProvider };