@ai-sdk/deepinfra 0.2.5 → 0.2.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +1 -1
- package/dist/index.d.ts +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,19 @@
|
|
|
1
1
|
# @ai-sdk/deepinfra
|
|
2
2
|
|
|
3
|
+
## 0.2.7
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 264b1e0: feat (providers/deepinfra): add llama 4 models
|
|
8
|
+
|
|
9
|
+
## 0.2.6
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- Updated dependencies [2c19b9a]
|
|
14
|
+
- @ai-sdk/provider-utils@2.2.4
|
|
15
|
+
- @ai-sdk/openai-compatible@0.2.6
|
|
16
|
+
|
|
3
17
|
## 0.2.5
|
|
4
18
|
|
|
5
19
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -3,7 +3,7 @@ import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
|
3
3
|
import { OpenAICompatibleChatSettings, OpenAICompatibleEmbeddingSettings, OpenAICompatibleCompletionSettings } from '@ai-sdk/openai-compatible';
|
|
4
4
|
export { OpenAICompatibleErrorData as DeepInfraErrorData } from '@ai-sdk/openai-compatible';
|
|
5
5
|
|
|
6
|
-
type DeepInfraChatModelId = 'meta-llama/Llama-3.3-70B-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct' | 'meta-llama/Meta-Llama-3.1-8B-Instruct' | 'meta-llama/Meta-Llama-3.1-405B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct' | 'Qwen/Qwen2.5-72B-Instruct' | 'meta-llama/Llama-3.2-90B-Vision-Instruct' | 'meta-llama/Llama-3.2-11B-Vision-Instruct' | 'microsoft/WizardLM-2-8x22B' | '01-ai/Yi-34B-Chat' | 'Austism/chronos-hermes-13b-v2' | 'Gryphe/MythoMax-L2-13b' | 'Gryphe/MythoMax-L2-13b-turbo' | 'HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1' | 'KoboldAI/LLaMA2-13B-Tiefighter' | 'NousResearch/Hermes-3-Llama-3.1-405B' | 'Phind/Phind-CodeLlama-34B-v2' | 'Qwen/Qwen2-72B-Instruct' | 'Qwen/Qwen2-7B-Instruct' | 'Qwen/Qwen2.5-7B-Instruct' | 'Qwen/Qwen2.5-Coder-7B' | 'Sao10K/L3-70B-Euryale-v2.1' | 'Sao10K/L3-8B-Lunaris-v1' | 'Sao10K/L3.1-70B-Euryale-v2.2' | 'bigcode/starcoder2-15b' | 'bigcode/starcoder2-15b-instruct-v0.1' | 'codellama/CodeLlama-34b-Instruct-hf' | 'codellama/CodeLlama-70b-Instruct-hf' | 'cognitivecomputations/dolphin-2.6-mixtral-8x7b' | 'cognitivecomputations/dolphin-2.9.1-llama-3-70b' | 'databricks/dbrx-instruct' | 'deepinfra/airoboros-70b' | 'deepseek-ai/DeepSeek-V3' | 'google/codegemma-7b-it' | 'google/gemma-1.1-7b-it' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'lizpreciatior/lzlv_70b_fp16_hf' | 'mattshumer/Reflection-Llama-3.1-70B' | 'meta-llama/Llama-2-13b-chat-hf' | 'meta-llama/Llama-2-70b-chat-hf' | 'meta-llama/Llama-2-7b-chat-hf' | 'meta-llama/Llama-3.2-1B-Instruct' | 'meta-llama/Llama-3.2-3B-Instruct' | 'meta-llama/Meta-Llama-3-70B-Instruct' | 'meta-llama/Meta-Llama-3-8B-Instruct' | 'microsoft/Phi-3-medium-4k-instruct' | 'microsoft/WizardLM-2-7B' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mistral-Nemo-Instruct-2407' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-v0.1' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'nvidia/Nemotron-4-340B-Instruct' | 'openbmb/MiniCPM-Llama3-V-2_5' | 'openchat/openchat-3.6-8b' | 'openchat/openchat_3.5' | (string & {});
|
|
6
|
+
type DeepInfraChatModelId = 'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8' | 'meta-llama/Llama-4-Scout-17B-16E-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct' | 'meta-llama/Meta-Llama-3.1-8B-Instruct' | 'meta-llama/Meta-Llama-3.1-405B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct' | 'Qwen/Qwen2.5-72B-Instruct' | 'meta-llama/Llama-3.2-90B-Vision-Instruct' | 'meta-llama/Llama-3.2-11B-Vision-Instruct' | 'microsoft/WizardLM-2-8x22B' | '01-ai/Yi-34B-Chat' | 'Austism/chronos-hermes-13b-v2' | 'Gryphe/MythoMax-L2-13b' | 'Gryphe/MythoMax-L2-13b-turbo' | 'HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1' | 'KoboldAI/LLaMA2-13B-Tiefighter' | 'NousResearch/Hermes-3-Llama-3.1-405B' | 'Phind/Phind-CodeLlama-34B-v2' | 'Qwen/Qwen2-72B-Instruct' | 'Qwen/Qwen2-7B-Instruct' | 'Qwen/Qwen2.5-7B-Instruct' | 'Qwen/Qwen2.5-Coder-7B' | 'Sao10K/L3-70B-Euryale-v2.1' | 'Sao10K/L3-8B-Lunaris-v1' | 'Sao10K/L3.1-70B-Euryale-v2.2' | 'bigcode/starcoder2-15b' | 'bigcode/starcoder2-15b-instruct-v0.1' | 'codellama/CodeLlama-34b-Instruct-hf' | 'codellama/CodeLlama-70b-Instruct-hf' | 'cognitivecomputations/dolphin-2.6-mixtral-8x7b' | 'cognitivecomputations/dolphin-2.9.1-llama-3-70b' | 'databricks/dbrx-instruct' | 'deepinfra/airoboros-70b' | 'deepseek-ai/DeepSeek-V3' | 'google/codegemma-7b-it' | 'google/gemma-1.1-7b-it' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'lizpreciatior/lzlv_70b_fp16_hf' | 'mattshumer/Reflection-Llama-3.1-70B' | 'meta-llama/Llama-2-13b-chat-hf' | 'meta-llama/Llama-2-70b-chat-hf' | 'meta-llama/Llama-2-7b-chat-hf' | 'meta-llama/Llama-3.2-1B-Instruct' | 'meta-llama/Llama-3.2-3B-Instruct' | 'meta-llama/Meta-Llama-3-70B-Instruct' | 'meta-llama/Meta-Llama-3-8B-Instruct' | 'microsoft/Phi-3-medium-4k-instruct' | 'microsoft/WizardLM-2-7B' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mistral-Nemo-Instruct-2407' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-v0.1' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'nvidia/Nemotron-4-340B-Instruct' | 'openbmb/MiniCPM-Llama3-V-2_5' | 'openchat/openchat-3.6-8b' | 'openchat/openchat_3.5' | (string & {});
|
|
7
7
|
interface DeepInfraChatSettings extends OpenAICompatibleChatSettings {
|
|
8
8
|
}
|
|
9
9
|
|
package/dist/index.d.ts
CHANGED
|
@@ -3,7 +3,7 @@ import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
|
3
3
|
import { OpenAICompatibleChatSettings, OpenAICompatibleEmbeddingSettings, OpenAICompatibleCompletionSettings } from '@ai-sdk/openai-compatible';
|
|
4
4
|
export { OpenAICompatibleErrorData as DeepInfraErrorData } from '@ai-sdk/openai-compatible';
|
|
5
5
|
|
|
6
|
-
type DeepInfraChatModelId = 'meta-llama/Llama-3.3-70B-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct' | 'meta-llama/Meta-Llama-3.1-8B-Instruct' | 'meta-llama/Meta-Llama-3.1-405B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct' | 'Qwen/Qwen2.5-72B-Instruct' | 'meta-llama/Llama-3.2-90B-Vision-Instruct' | 'meta-llama/Llama-3.2-11B-Vision-Instruct' | 'microsoft/WizardLM-2-8x22B' | '01-ai/Yi-34B-Chat' | 'Austism/chronos-hermes-13b-v2' | 'Gryphe/MythoMax-L2-13b' | 'Gryphe/MythoMax-L2-13b-turbo' | 'HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1' | 'KoboldAI/LLaMA2-13B-Tiefighter' | 'NousResearch/Hermes-3-Llama-3.1-405B' | 'Phind/Phind-CodeLlama-34B-v2' | 'Qwen/Qwen2-72B-Instruct' | 'Qwen/Qwen2-7B-Instruct' | 'Qwen/Qwen2.5-7B-Instruct' | 'Qwen/Qwen2.5-Coder-7B' | 'Sao10K/L3-70B-Euryale-v2.1' | 'Sao10K/L3-8B-Lunaris-v1' | 'Sao10K/L3.1-70B-Euryale-v2.2' | 'bigcode/starcoder2-15b' | 'bigcode/starcoder2-15b-instruct-v0.1' | 'codellama/CodeLlama-34b-Instruct-hf' | 'codellama/CodeLlama-70b-Instruct-hf' | 'cognitivecomputations/dolphin-2.6-mixtral-8x7b' | 'cognitivecomputations/dolphin-2.9.1-llama-3-70b' | 'databricks/dbrx-instruct' | 'deepinfra/airoboros-70b' | 'deepseek-ai/DeepSeek-V3' | 'google/codegemma-7b-it' | 'google/gemma-1.1-7b-it' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'lizpreciatior/lzlv_70b_fp16_hf' | 'mattshumer/Reflection-Llama-3.1-70B' | 'meta-llama/Llama-2-13b-chat-hf' | 'meta-llama/Llama-2-70b-chat-hf' | 'meta-llama/Llama-2-7b-chat-hf' | 'meta-llama/Llama-3.2-1B-Instruct' | 'meta-llama/Llama-3.2-3B-Instruct' | 'meta-llama/Meta-Llama-3-70B-Instruct' | 'meta-llama/Meta-Llama-3-8B-Instruct' | 'microsoft/Phi-3-medium-4k-instruct' | 'microsoft/WizardLM-2-7B' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mistral-Nemo-Instruct-2407' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-v0.1' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'nvidia/Nemotron-4-340B-Instruct' | 'openbmb/MiniCPM-Llama3-V-2_5' | 'openchat/openchat-3.6-8b' | 'openchat/openchat_3.5' | (string & {});
|
|
6
|
+
type DeepInfraChatModelId = 'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8' | 'meta-llama/Llama-4-Scout-17B-16E-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct' | 'meta-llama/Meta-Llama-3.1-8B-Instruct' | 'meta-llama/Meta-Llama-3.1-405B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct' | 'Qwen/Qwen2.5-72B-Instruct' | 'meta-llama/Llama-3.2-90B-Vision-Instruct' | 'meta-llama/Llama-3.2-11B-Vision-Instruct' | 'microsoft/WizardLM-2-8x22B' | '01-ai/Yi-34B-Chat' | 'Austism/chronos-hermes-13b-v2' | 'Gryphe/MythoMax-L2-13b' | 'Gryphe/MythoMax-L2-13b-turbo' | 'HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1' | 'KoboldAI/LLaMA2-13B-Tiefighter' | 'NousResearch/Hermes-3-Llama-3.1-405B' | 'Phind/Phind-CodeLlama-34B-v2' | 'Qwen/Qwen2-72B-Instruct' | 'Qwen/Qwen2-7B-Instruct' | 'Qwen/Qwen2.5-7B-Instruct' | 'Qwen/Qwen2.5-Coder-7B' | 'Sao10K/L3-70B-Euryale-v2.1' | 'Sao10K/L3-8B-Lunaris-v1' | 'Sao10K/L3.1-70B-Euryale-v2.2' | 'bigcode/starcoder2-15b' | 'bigcode/starcoder2-15b-instruct-v0.1' | 'codellama/CodeLlama-34b-Instruct-hf' | 'codellama/CodeLlama-70b-Instruct-hf' | 'cognitivecomputations/dolphin-2.6-mixtral-8x7b' | 'cognitivecomputations/dolphin-2.9.1-llama-3-70b' | 'databricks/dbrx-instruct' | 'deepinfra/airoboros-70b' | 'deepseek-ai/DeepSeek-V3' | 'google/codegemma-7b-it' | 'google/gemma-1.1-7b-it' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'lizpreciatior/lzlv_70b_fp16_hf' | 'mattshumer/Reflection-Llama-3.1-70B' | 'meta-llama/Llama-2-13b-chat-hf' | 'meta-llama/Llama-2-70b-chat-hf' | 'meta-llama/Llama-2-7b-chat-hf' | 'meta-llama/Llama-3.2-1B-Instruct' | 'meta-llama/Llama-3.2-3B-Instruct' | 'meta-llama/Meta-Llama-3-70B-Instruct' | 'meta-llama/Meta-Llama-3-8B-Instruct' | 'microsoft/Phi-3-medium-4k-instruct' | 'microsoft/WizardLM-2-7B' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mistral-Nemo-Instruct-2407' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-v0.1' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'nvidia/Nemotron-4-340B-Instruct' | 'openbmb/MiniCPM-Llama3-V-2_5' | 'openchat/openchat-3.6-8b' | 'openchat/openchat_3.5' | (string & {});
|
|
7
7
|
interface DeepInfraChatSettings extends OpenAICompatibleChatSettings {
|
|
8
8
|
}
|
|
9
9
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ai-sdk/deepinfra",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.7",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"sideEffects": false,
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -19,9 +19,9 @@
|
|
|
19
19
|
}
|
|
20
20
|
},
|
|
21
21
|
"dependencies": {
|
|
22
|
-
"@ai-sdk/openai-compatible": "0.2.
|
|
22
|
+
"@ai-sdk/openai-compatible": "0.2.6",
|
|
23
23
|
"@ai-sdk/provider": "1.1.0",
|
|
24
|
-
"@ai-sdk/provider-utils": "2.2.
|
|
24
|
+
"@ai-sdk/provider-utils": "2.2.4"
|
|
25
25
|
},
|
|
26
26
|
"devDependencies": {
|
|
27
27
|
"@types/node": "20.17.24",
|