@nocobase/plugin-ai 1.9.0-beta.17 → 1.9.0-beta.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,10 +8,10 @@
8
8
  */
9
9
 
10
10
  module.exports = {
11
- "@nocobase/client": "1.9.0-beta.17",
12
- "@nocobase/plugin-workflow": "1.9.0-beta.17",
13
- "@nocobase/utils": "1.9.0-beta.17",
14
- "@nocobase/server": "1.9.0-beta.17",
11
+ "@nocobase/client": "1.9.0-beta.18",
12
+ "@nocobase/plugin-workflow": "1.9.0-beta.18",
13
+ "@nocobase/utils": "1.9.0-beta.18",
14
+ "@nocobase/server": "1.9.0-beta.18",
15
15
  "react": "18.2.0",
16
16
  "@formily/antd-v5": "1.2.3",
17
17
  "@formily/react": "2.3.0",
@@ -19,8 +19,8 @@ module.exports = {
19
19
  "antd": "5.24.2",
20
20
  "@ant-design/icons": "5.6.1",
21
21
  "@formily/shared": "2.3.2",
22
- "@nocobase/database": "1.9.0-beta.17",
22
+ "@nocobase/database": "1.9.0-beta.18",
23
23
  "axios": "1.7.7",
24
- "@nocobase/resourcer": "1.9.0-beta.17",
24
+ "@nocobase/resourcer": "1.9.0-beta.18",
25
25
  "lodash": "4.17.21"
26
26
  };
@@ -1,25 +1,25 @@
1
1
  {
2
2
  "AI integration": "KI-Integration",
3
- "LLM services": "LLM-Dienste",
4
- "LLM service": "LLM-Dienst",
5
- "Model": "Modell",
6
- "Messages": "Nachrichten",
7
- "Structured output": "Strukturierte Ausgabe",
8
- "Message": "Nachricht",
9
- "Role": "Rolle",
10
- "UID": "UID",
11
3
  "Add content": "Inhalt hinzufügen",
12
4
  "Add prompt": "Prompt hinzufügen",
13
- "Provider": "Anbieter",
14
- "Text": "Text",
15
- "Image": "Bild",
16
- "Timout (ms)": "Zeitüberschreitung (ms)",
17
- "Max retries": "Maximale Wiederholungen",
18
5
  "Frequency penalty description": "Zahl zwischen -2,0 und 2,0. Positive Werte bestrafen neue Tokens basierend auf ihrer vorhandenen Häufigkeit im bisherigen Text und verringern so die Wahrscheinlichkeit, dass das Modell dieselbe Zeile wörtlich wiederholt.",
6
+ "Get models list failed, you can enter a model name manually.": "Abrufen der Modellliste fehlgeschlagen, Sie können einen Modellnamen manuell eingeben.",
7
+ "Image": "Bild",
8
+ "LLM service": "LLM-Dienst",
9
+ "LLM services": "LLM-Dienste",
19
10
  "Max completion tokens description": "Eine Obergrenze für die Anzahl der Tokens, die für eine Vervollständigung generiert werden können, einschließlich sichtbarer Ausgabe-Tokens und Reasoning-Tokens.",
11
+ "Max retries": "Maximale Wiederholungen",
12
+ "Message": "Nachricht",
13
+ "Messages": "Nachrichten",
14
+ "Model": "Modell",
20
15
  "Presence penalty description": "Zahl zwischen -2,0 und 2,0. Positive Werte bestrafen neue Tokens basierend darauf, ob sie bisher im Text vorkommen, und erhöhen die Wahrscheinlichkeit, dass das Modell über neue Themen spricht.",
16
+ "Provider": "Anbieter",
21
17
  "Response format description": "Wichtig: Bei Verwendung des JSON-Modus müssen Sie das Modell auch selbst über eine System- oder Benutzernachricht anweisen, JSON zu erzeugen.",
18
+ "Role": "Rolle",
19
+ "Structured output": "Strukturierte Ausgabe",
22
20
  "Temperature description": "Welche Sampling-Temperatur verwendet werden soll, zwischen 0 und 2. Höhere Werte wie 0,8 machen die Ausgabe zufälliger, während niedrigere Werte wie 0,2 sie fokussierter und deterministischer machen.",
21
+ "Text": "Text",
22
+ "Timout (ms)": "Zeitüberschreitung (ms)",
23
23
  "Top P description": "Eine Alternative zum Sampling mit Temperatur, genannt Nucleus-Sampling, bei dem das Modell die Ergebnisse der Tokens mit der Wahrscheinlichkeitsmasse top_p berücksichtigt. 0,1 bedeutet also, dass nur die Tokens berücksichtigt werden, die die obersten 10% der Wahrscheinlichkeitsmasse ausmachen.",
24
- "Get models list failed, you can enter a model name manually.": "Abrufen der Modellliste fehlgeschlagen, Sie können einen Modellnamen manuell eingeben."
25
- }
24
+ "UID": "UID"
25
+ }
@@ -1,25 +1,25 @@
1
1
  {
2
2
  "AI integration": "AI integration",
3
- "LLM services": "LLM services",
4
- "LLM service": "LLM service",
5
- "Model": "Model",
6
- "Messages": "Messages",
7
- "Structured output": "Structured output",
8
- "Message": "Message",
9
- "Role": "Role",
10
- "UID": "UID",
11
3
  "Add content": "Add content",
12
4
  "Add prompt": "Add prompt",
13
- "Provider": "Provider",
14
- "Text": "Text",
15
- "Image": "Image",
16
- "Timout (ms)": "Timout (ms)",
17
- "Max retries": "Max retries",
18
5
  "Frequency penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "Image",
8
+ "LLM service": "LLM service",
9
+ "LLM services": "LLM services",
19
10
  "Max completion tokens description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.",
11
+ "Max retries": "Max retries",
12
+ "Message": "Message",
13
+ "Messages": "Messages",
14
+ "Model": "Model",
20
15
  "Presence penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
16
+ "Provider": "Provider",
21
17
  "Response format description": "Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.",
18
+ "Role": "Role",
19
+ "Structured output": "Structured output",
22
20
  "Temperature description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
21
+ "Text": "Text",
22
+ "Timout (ms)": "Timout (ms)",
23
23
  "Top P description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
24
- "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually."
25
- }
24
+ "UID": "UID"
25
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "AI integration": "AI integration",
3
+ "Add content": "Add content",
4
+ "Add prompt": "Add prompt",
5
+ "Frequency penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "Image",
8
+ "LLM service": "LLM service",
9
+ "LLM services": "LLM services",
10
+ "Max completion tokens description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.",
11
+ "Max retries": "Max retries",
12
+ "Message": "Message",
13
+ "Messages": "Messages",
14
+ "Model": "Model",
15
+ "Presence penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
16
+ "Provider": "Provider",
17
+ "Response format description": "Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.",
18
+ "Role": "Role",
19
+ "Structured output": "Structured output",
20
+ "Temperature description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
21
+ "Text": "Text",
22
+ "Timout (ms)": "Timout (ms)",
23
+ "Top P description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
24
+ "UID": "UID"
25
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "AI integration": "AI integration",
3
+ "Add content": "Add content",
4
+ "Add prompt": "Add prompt",
5
+ "Frequency penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "Image",
8
+ "LLM service": "LLM service",
9
+ "LLM services": "LLM services",
10
+ "Max completion tokens description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.",
11
+ "Max retries": "Max retries",
12
+ "Message": "Message",
13
+ "Messages": "Messages",
14
+ "Model": "Model",
15
+ "Presence penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
16
+ "Provider": "Provider",
17
+ "Response format description": "Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.",
18
+ "Role": "Role",
19
+ "Structured output": "Structured output",
20
+ "Temperature description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
21
+ "Text": "Text",
22
+ "Timout (ms)": "Timout (ms)",
23
+ "Top P description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
24
+ "UID": "UID"
25
+ }
@@ -1,25 +1,25 @@
1
1
  {
2
2
  "AI integration": "Integrazione AI",
3
- "LLM services": "Servizi LLM",
4
- "LLM service": "Servizio LLM",
5
- "Model": "Modello",
6
- "Messages": "Messaggi",
7
- "Structured output": "Output strutturato",
8
- "Message": "Messaggio",
9
- "Role": "Ruolo",
10
- "UID": "UID",
11
3
  "Add content": "Aggiungi contenuto",
12
4
  "Add prompt": "Aggiungi prompt",
13
- "Provider": "Fornitore",
14
- "Text": "Testo",
15
- "Image": "Immagine",
16
- "Timout (ms)": "Timeout (ms)",
17
- "Max retries": "Tentativi massimi",
18
5
  "Frequency penalty description": "Un numero compreso tra -2,0 e 2,0. Se il valore è positivo, i nuovi token vengono penalizzati in base alla loro frequenza di occorrenza nel testo esistente, riducendo la probabilità che il modello ripeta lo stesso contenuto.",
6
+ "Get models list failed, you can enter a model name manually.": "Recupero dell'elenco dei modelli fallito, è possibile inserire manualmente il nome del modello.",
7
+ "Image": "Immagine",
8
+ "LLM service": "Servizio LLM",
9
+ "LLM services": "Servizi LLM",
19
10
  "Max completion tokens description": "Un limite superiore per il numero di token che possono essere generati per un completamento, inclusi i token di output visibili e i token di ragionamento.",
11
+ "Max retries": "Tentativi massimi",
12
+ "Message": "Messaggio",
13
+ "Messages": "Messaggi",
14
+ "Model": "Modello",
20
15
  "Presence penalty description": "Numero tra -2.0 e 2.0. I valori positivi penalizzano i nuovi token in base alla loro presenza o meno nel testo finora, aumentando la probabilità del modello di parlare di nuovi argomenti.",
16
+ "Provider": "Fornitore",
21
17
  "Response format description": "Importante: quando si utilizza la modalità JSON, è necessario anche istruire il modello a produrre JSON tramite un messaggio di sistema o messaggio utente.",
18
+ "Role": "Ruolo",
19
+ "Structured output": "Output strutturato",
22
20
  "Temperature description": "Quale temperatura di campionamento utilizzare, tra 0 e 2. Valori più alti come 0.8 renderanno l'output più casuale, mentre valori più bassi come 0.2 lo renderanno più focalizzato e deterministico.",
21
+ "Text": "Testo",
22
+ "Timout (ms)": "Timeout (ms)",
23
23
  "Top P description": "Un'alternativa al campionamento con temperatura, chiamata campionamento del nucleo, in cui il modello considera i risultati dei token con massa di probabilità top_p. Quindi 0.1 significa che vengono considerati solo i token che comprendono la massa di probabilità del 10% o superiore.",
24
- "Get models list failed, you can enter a model name manually.": "Recupero dell'elenco dei modelli fallito, è possibile inserire manualmente il nome del modello."
25
- }
24
+ "UID": "UID"
25
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "AI integration": "AI統合",
3
+ "Add content": "コンテンツ追加",
4
+ "Add prompt": "プロンプト追加",
5
+ "Frequency penalty description": "-2.0から2.0の数値。正の値を設定すると、テキスト内での既存の出現頻度に基づき新規トークンにペナルティを課し、モデルが同一表現を反復する可能性を低減します。",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "画像",
8
+ "LLM service": "LLMサービス",
9
+ "LLM services": "LLMサービス",
10
+ "Max completion tokens description": "生成可能なトークン数の上限値(可視出力トークンと推論トークンを含む)。入力トークンと出力トークンの総数はモデルのコンテキスト長制限に準拠します。",
11
+ "Max retries": "最大リトライ回数",
12
+ "Message": "メッセージ",
13
+ "Messages": "メッセージ",
14
+ "Model": "モデル",
15
+ "Presence penalty description": "-2.0から2.0の数値。正の値を設定すると、テキスト内での既存出現有無に基づき新規トークンにペナルティを課し、モデルが新規トピックを扱う可能性を増加させます。",
16
+ "Provider": "プロバイダ",
17
+ "Response format description": "重要:JSONモード使用時は、システムまたはユーザーメッセージを通じてモデルにJSON生成を明示的に指示する必要があります。",
18
+ "Role": "ロール",
19
+ "Structured output": "構造化出力",
20
+ "Temperature description": "サンプリング温度(0~2)。0.8などの高い値は出力のランダム性を増加させ、0.2などの低い値は出力を集中化・決定論的にします。",
21
+ "Text": "テキスト",
22
+ "Timout (ms)": "タイムアウト(ミリ秒)",
23
+ "Top P description": "温度サンプリングの代替手法である核サンプリング(top_p)を指定。0.1は上位10%確率質量のトークンのみを考慮対象とします。",
24
+ "UID": "UID"
25
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "AI integration": "AI 통합",
3
+ "Add content": "콘텐츠 추가",
4
+ "Add prompt": "프롬프트 추가",
5
+ "Frequency penalty description": "-2.0에서 2.0 사이의 숫자입니다. 양수일수록 이미 등장한 단어에 패널티를 주어, 같은 문장이 반복될 확률을 낮춥니다.",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "이미지",
8
+ "LLM service": "LLM 서비스",
9
+ "LLM services": "LLM 서비스",
10
+ "Max completion tokens description": "완료에 대해 생성할 수 있는 토큰 수의 상한값으로, 표시 가능한 출력 토큰과 추론 토큰이 포함됩니다.",
11
+ "Max retries": "최대 재시도 횟수",
12
+ "Message": "메시지",
13
+ "Messages": "메시지",
14
+ "Model": "모델",
15
+ "Presence penalty description": "-2.0에서 2.0 사이의 숫자입니다. 양수 값은 텍스트에 등장 여부에 따라 새로운 토큰에 페널티를 부여하여 모델이 새로운 주제에 대해 이야기할 가능성을 높입니다.",
16
+ "Provider": "공급자",
17
+ "Response format description": "중요: JSON 모드를 사용하는 경우 시스템이나 사용자 메시지를 통해 모델이 직접 JSON을 생성하도록 지시해야 합니다.",
18
+ "Role": "역할",
19
+ "Structured output": "구조화된 출력",
20
+ "Temperature description": "0과 2 사이의 샘플링 온도를 사용하세요. 0.8과 같은 높은 값을 사용하면 출력이 더 무작위화되고, 0.2와 같은 낮은 값을 사용하면 출력이 더 집중적이고 결정론적으로 됩니다.",
21
+ "Text": "텍스트",
22
+ "Timout (ms)": "타임아웃 (밀리초)",
23
+ "Top P description": "온도 기반 샘플링의 대안으로, 핵 샘플링이라고 불리는데, 이 모델은 확률 질량이 상위 p인 토큰의 결과를 고려합니다. 따라서 0.1은 확률 질량이 상위 10%인 토큰만 고려한다는 것을 의미합니다.",
24
+ "UID": "UID"
25
+ }
@@ -1,26 +1,25 @@
1
1
  {
2
- "AI integration": "AI integratie",
3
- "LLM services": "LLM diensten",
4
- "LLM service": "LLM dienst",
5
- "Model": "Model",
6
- "Messages": "Berichten",
7
- "Structured output": "Gestructureerde output",
8
- "Message": "Bericht",
9
- "Role": "Rol",
10
- "UID": "UID",
11
- "Add content": "Voeg inhoud toe",
12
- "Add prompt": "Voeg prompt toe",
13
- "Provider": "Provider",
14
- "Text": "Tekst",
15
- "Image": "Afbeelding",
16
- "Timout (ms)": "Timout (ms)",
17
- "Max retries": "Max herhalingen",
18
- "Frequency penalty description": "Nummer tussen -2.0 en 2.0. Positieve waarden straffen nieuwe tokens op basis van hun bestaande frequentie in de tekst tot nu toe, waardoor de kans van het model om dezelfde regel letterlijk te herhalen afneemt.",
19
- "Max completion tokens description": "Een bovengrens voor het aantal tokens dat kan worden gegenereerd voor een voltooiing, inclusief zichtbare outputtokens en redeneertokens.",
20
- "Presence penalty description": "Nummer tussen -2.0 en 2.0. Positieve waarden straffen nieuwe tokens op basis van of ze tot nu toe in de tekst voorkomen, waardoor de kans van het model om over nieuwe onderwerpen te praten toeneemt.",
21
- "Response format description": "Belangrijk: bij gebruik van de JSON-modus moet u het model ook zelf opdracht geven om JSON te produceren via een systeem- of gebruikersbericht.",
22
- "Temperature description": "Welke bemonsteringstemperatuur te gebruiken, tussen 0 en 2. Hogere waarden zoals 0,8 maken de output willekeuriger, terwijl lagere waarden zoals 0,2 deze meer gefocust en deterministisch maken.",
23
- "Top P description": "Een alternatief voor bemonstering met temperatuur, genaamd nucleusbemonstering, waarbij het model de resultaten van de tokens met top_p waarschijnlijkheidsmassa overweegt. Dus 0,1 betekent dat alleen de tokens die de top 10% waarschijnlijkheidsmassa vormen, worden overwogen.",
24
- "Get models list failed, you can enter a model name manually.": "Het ophalen van de lijst met modellen is mislukt, u kunt een modelnaam handmatig invoeren."
25
- }
26
-
2
+ "AI integration": "AI integratie",
3
+ "Add content": "Voeg inhoud toe",
4
+ "Add prompt": "Voeg prompt toe",
5
+ "Frequency penalty description": "Nummer tussen -2.0 en 2.0. Positieve waarden straffen nieuwe tokens op basis van hun bestaande frequentie in de tekst tot nu toe, waardoor de kans van het model om dezelfde regel letterlijk te herhalen afneemt.",
6
+ "Get models list failed, you can enter a model name manually.": "Het ophalen van de lijst met modellen is mislukt, u kunt een modelnaam handmatig invoeren.",
7
+ "Image": "Afbeelding",
8
+ "LLM service": "LLM dienst",
9
+ "LLM services": "LLM diensten",
10
+ "Max completion tokens description": "Een bovengrens voor het aantal tokens dat kan worden gegenereerd voor een voltooiing, inclusief zichtbare outputtokens en redeneertokens.",
11
+ "Max retries": "Max herhalingen",
12
+ "Message": "Bericht",
13
+ "Messages": "Berichten",
14
+ "Model": "Model",
15
+ "Presence penalty description": "Nummer tussen -2.0 en 2.0. Positieve waarden straffen nieuwe tokens op basis van of ze tot nu toe in de tekst voorkomen, waardoor de kans van het model om over nieuwe onderwerpen te praten toeneemt.",
16
+ "Provider": "Provider",
17
+ "Response format description": "Belangrijk: bij gebruik van de JSON-modus moet u het model ook zelf opdracht geven om JSON te produceren via een systeem- of gebruikersbericht.",
18
+ "Role": "Rol",
19
+ "Structured output": "Gestructureerde output",
20
+ "Temperature description": "Welke bemonsteringstemperatuur te gebruiken, tussen 0 en 2. Hogere waarden zoals 0,8 maken de output willekeuriger, terwijl lagere waarden zoals 0,2 deze meer gefocust en deterministisch maken.",
21
+ "Text": "Tekst",
22
+ "Timout (ms)": "Timout (ms)",
23
+ "Top P description": "Een alternatief voor bemonstering met temperatuur, genaamd nucleusbemonstering, waarbij het model de resultaten van de tokens met top_p waarschijnlijkheidsmassa overweegt. Dus 0,1 betekent dat alleen de tokens die de top 10% waarschijnlijkheidsmassa vormen, worden overwogen.",
24
+ "UID": "UID"
25
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "AI integration": "AI integration",
3
+ "Add content": "Add content",
4
+ "Add prompt": "Add prompt",
5
+ "Frequency penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "Image",
8
+ "LLM service": "LLM service",
9
+ "LLM services": "LLM services",
10
+ "Max completion tokens description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.",
11
+ "Max retries": "Max retries",
12
+ "Message": "Message",
13
+ "Messages": "Messages",
14
+ "Model": "Model",
15
+ "Presence penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
16
+ "Provider": "Provider",
17
+ "Response format description": "Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.",
18
+ "Role": "Role",
19
+ "Structured output": "Structured output",
20
+ "Temperature description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
21
+ "Text": "Text",
22
+ "Timout (ms)": "Timout (ms)",
23
+ "Top P description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
24
+ "UID": "UID"
25
+ }
@@ -1,7 +1,7 @@
1
1
  {
2
+ "AI integration": "Интеграция с ИИ",
2
3
  "Add content": "Добавить контент",
3
4
  "Add prompt": "Добавить запрос",
4
- "AI integration": "Интеграция с ИИ",
5
5
  "Frequency penalty description": "Число от -2.0 до 2.0. Положительные значения штрафуют новые токены в зависимости от их частоты в тексте, уменьшая вероятность повторения модели.",
6
6
  "Get models list failed, you can enter a model name manually.": "Не удалось получить список моделей, вы можете ввести имя модели вручную.",
7
7
  "Image": "Изображение",
@@ -22,4 +22,4 @@
22
22
  "Timout (ms)": "Тайм-аут (мс)",
23
23
  "Top P description": "Альтернатива выборке с температурой — ядерная выборка, где модель учитывает токены с массой вероятности top_p. Например, 0.1 означает учет только 10% наиболее вероятных токенов.",
24
24
  "UID": "UID"
25
- }
25
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "AI integration": "AI integration",
3
+ "Add content": "Add content",
4
+ "Add prompt": "Add prompt",
5
+ "Frequency penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "Image",
8
+ "LLM service": "LLM service",
9
+ "LLM services": "LLM services",
10
+ "Max completion tokens description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.",
11
+ "Max retries": "Max retries",
12
+ "Message": "Message",
13
+ "Messages": "Messages",
14
+ "Model": "Model",
15
+ "Presence penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
16
+ "Provider": "Provider",
17
+ "Response format description": "Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.",
18
+ "Role": "Role",
19
+ "Structured output": "Structured output",
20
+ "Temperature description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
21
+ "Text": "Text",
22
+ "Timout (ms)": "Timout (ms)",
23
+ "Top P description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
24
+ "UID": "UID"
25
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "AI integration": "AI integration",
3
+ "Add content": "Add content",
4
+ "Add prompt": "Add prompt",
5
+ "Frequency penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "Image",
8
+ "LLM service": "LLM service",
9
+ "LLM services": "LLM services",
10
+ "Max completion tokens description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.",
11
+ "Max retries": "Max retries",
12
+ "Message": "Message",
13
+ "Messages": "Messages",
14
+ "Model": "Model",
15
+ "Presence penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
16
+ "Provider": "Provider",
17
+ "Response format description": "Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.",
18
+ "Role": "Role",
19
+ "Structured output": "Structured output",
20
+ "Temperature description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
21
+ "Text": "Text",
22
+ "Timout (ms)": "Timout (ms)",
23
+ "Top P description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
24
+ "UID": "UID"
25
+ }
@@ -1,25 +1,25 @@
1
1
  {
2
2
  "AI integration": "AI 集成",
3
- "LLM services": "LLM 服务",
4
- "LLM service": "LLM 服务",
5
- "Model": "模型",
6
- "UID": "唯一标识",
7
- "Provider": "LLM 类型",
8
- "Messages": "消息",
9
- "Structured output": "结构化输出",
10
- "Message": "消息",
11
- "Role": "角色",
12
3
  "Add content": "添加内容",
13
4
  "Add prompt": "添加提示",
14
- "Text": "文本",
15
- "Image": "图片",
16
- "Timout (ms)": "超时时间(毫秒)",
17
- "Max retries": "最大重试次数",
18
5
  "Frequency penalty description": "介于 -2.0 和 2.0 之间的数字。如果该值为正,那么新 token 会根据其在已有文本中的出现频率受到相应的惩罚,降低模型重复相同内容的可能性。",
6
+ "Get models list failed, you can enter a model name manually.": "获取模型列表失败,你可以手动输入模型名称。",
7
+ "Image": "图片",
8
+ "LLM service": "LLM 服务",
9
+ "LLM services": "LLM 服务",
19
10
  "Max completion tokens description": "限制一次请求中模型生成 completion 的最大 token 数。输入 token 和输出 token 的总长度受模型的上下文长度的限制。",
11
+ "Max retries": "最大重试次数",
12
+ "Message": "消息",
13
+ "Messages": "消息",
14
+ "Model": "模型",
20
15
  "Presence penalty description": "介于 -2.0 和 2.0 之间的数字。如果该值为正,那么新 token 会根据其是否已在已有文本中出现受到相应的惩罚,从而增加模型谈论新主题的可能性。",
16
+ "Provider": "LLM 类型",
21
17
  "Response format description": "使用 JSON 模式时,你还必须通过系统或用户消息指示模型生成 JSON。",
18
+ "Role": "角色",
19
+ "Structured output": "结构化输出",
22
20
  "Temperature description": "采样温度,介于 0 和 2 之间。更高的值,如 0.8,会使输出更随机,而更低的值,如 0.2,会使其更加集中和确定。",
21
+ "Text": "文本",
22
+ "Timout (ms)": "超时时间(毫秒)",
23
23
  "Top P description": "作为调节采样温度的替代方案,模型会考虑前 top_p 概率的 token 的结果。所以 0.1 就意味着只有包括在最高 10% 概率中的 token 会被考虑。",
24
- "Get models list failed, you can enter a model name manually.": "获取模型列表失败,你可以手动输入模型名称。"
25
- }
24
+ "UID": "唯一标识"
25
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "AI integration": "AI integration",
3
+ "Add content": "Add content",
4
+ "Add prompt": "Add prompt",
5
+ "Frequency penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
6
+ "Get models list failed, you can enter a model name manually.": "Get models list failed, you can enter a model name manually.",
7
+ "Image": "Image",
8
+ "LLM service": "LLM service",
9
+ "LLM services": "LLM services",
10
+ "Max completion tokens description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.",
11
+ "Max retries": "Max retries",
12
+ "Message": "Message",
13
+ "Messages": "Messages",
14
+ "Model": "Model",
15
+ "Presence penalty description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
16
+ "Provider": "Provider",
17
+ "Response format description": "Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.",
18
+ "Role": "Role",
19
+ "Structured output": "Structured output",
20
+ "Temperature description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
21
+ "Text": "Text",
22
+ "Timout (ms)": "Timout (ms)",
23
+ "Top P description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
24
+ "UID": "UID"
25
+ }
@@ -1 +1 @@
1
- {"name":"@langchain/core","version":"0.3.42","description":"Core LangChain.js abstractions and schemas","type":"module","engines":{"node":">=18"},"main":"./index.js","types":"./index.d.ts","repository":{"type":"git","url":"git@github.com:langchain-ai/langchainjs.git"},"homepage":"https://github.com/langchain-ai/langchainjs/tree/main/langchain-core/","scripts":{"build":"yarn turbo:command build:internal --filter=@langchain/core","build:internal":"yarn lc_build --create-entrypoints --pre --tree-shaking","clean":"rm -rf .turbo dist/","lint:eslint":"NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/","lint:dpdm":"dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts","lint":"yarn lint:eslint && yarn lint:dpdm","lint:fix":"yarn lint:eslint --fix && yarn lint:dpdm","prepack":"yarn build","release":"release-it --only-version --config .release-it.json","test":"NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%","test:integration":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%","test:watch":"NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts","test:single":"NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000","format":"prettier --config .prettierrc --write \"src\"","format:check":"prettier --config .prettierrc --check \"src\""},"author":"LangChain","license":"MIT","dependencies":{"@cfworker/json-schema":"^4.0.2","ansi-styles":"^5.0.0","camelcase":"6","decamelize":"1.2.0","js-tiktoken":"^1.0.12","langsmith":">=0.2.8 <0.4.0","mustache":"^4.2.0","p-queue":"^6.6.2","p-retry":"4","uuid":"^10.0.0","zod":"^3.22.4","zod-to-json-schema":"^3.22.3"},"devDependencies":{"@jest/globals":"^29.5.0","@langchain/scripts":">=0.1.0 <0.2.0","@swc/core":"^1.3.90","@swc/jest":"^0.2.29","@types/decamelize":"^1.2.0","@types/mustache":"^4","dpdm":"^3.12.0","eslint":"^8.33.0","eslint-config-airbnb-base":"^15.0.0","eslint-config-prettier":"^8.6.0","eslint-plugin-import":"^2.27.5","eslint-plugin-jest":"^27.6.0","eslint-plugin-no-instanceof":"^1.0.1","eslint-plugin-prettier":"^4.2.1","jest":"^29.5.0","jest-environment-node":"^29.6.4","ml-matrix":"^6.10.4","prettier":"^2.8.3","release-it":"^17.6.0","rimraf":"^5.0.1","ts-jest":"^29.1.0","typescript":"~5.1.6","web-streams-polyfill":"^4.0.0"},"publishConfig":{"access":"public"},"keywords":["llm","ai","gpt3","chain","prompt","prompt engineering","chatgpt","machine learning","ml","openai","embeddings","vectorstores"],"exports":{".":{"types":{"import":"./index.d.ts","require":"./index.d.cts","default":"./index.d.ts"},"import":"./index.js","require":"./index.cjs"},"./agents":{"types":{"import":"./agents.d.ts","require":"./agents.d.cts","default":"./agents.d.ts"},"import":"./agents.js","require":"./agents.cjs"},"./caches":{"types":{"import":"./caches.d.ts","require":"./caches.d.cts","default":"./caches.d.ts"},"import":"./caches.js","require":"./caches.cjs"},"./callbacks/base":{"types":{"import":"./callbacks/base.d.ts","require":"./callbacks/base.d.cts","default":"./callbacks/base.d.ts"},"import":"./callbacks/base.js","require":"./callbacks/base.cjs"},"./callbacks/dispatch":{"types":{"import":"./callbacks/dispatch.d.ts","require":"./callbacks/dispatch.d.cts","default":"./callbacks/dispatch.d.ts"},"import":"./callbacks/dispatch.js","require":"./callbacks/dispatch.cjs"},"./callbacks/dispatch/web":{"types":{"import":"./callbacks/dispatch/web.d.ts","require":"./callbacks/dispatch/web.d.cts","default":"./callbacks/dispatch/web.d.ts"},"import":"./callbacks/dispatch/web.js","require":"./callbacks/dispatch/web.cjs"},"./callbacks/manager":{"types":{"import":"./callbacks/manager.d.ts","require":"./callbacks/manager.d.cts","default":"./callbacks/manager.d.ts"},"import":"./callbacks/manager.js","require":"./callbacks/manager.cjs"},"./callbacks/promises":{"types":{"import":"./callbacks/promises.d.ts","require":"./callbacks/promises.d.cts","default":"./callbacks/promises.d.ts"},"import":"./callbacks/promises.js","require":"./callbacks/promises.cjs"},"./chat_history":{"types":{"import":"./chat_history.d.ts","require":"./chat_history.d.cts","default":"./chat_history.d.ts"},"import":"./chat_history.js","require":"./chat_history.cjs"},"./context":{"types":{"import":"./context.d.ts","require":"./context.d.cts","default":"./context.d.ts"},"import":"./context.js","require":"./context.cjs"},"./documents":{"types":{"import":"./documents.d.ts","require":"./documents.d.cts","default":"./documents.d.ts"},"import":"./documents.js","require":"./documents.cjs"},"./document_loaders/base":{"types":{"import":"./document_loaders/base.d.ts","require":"./document_loaders/base.d.cts","default":"./document_loaders/base.d.ts"},"import":"./document_loaders/base.js","require":"./document_loaders/base.cjs"},"./document_loaders/langsmith":{"types":{"import":"./document_loaders/langsmith.d.ts","require":"./document_loaders/langsmith.d.cts","default":"./document_loaders/langsmith.d.ts"},"import":"./document_loaders/langsmith.js","require":"./document_loaders/langsmith.cjs"},"./embeddings":{"types":{"import":"./embeddings.d.ts","require":"./embeddings.d.cts","default":"./embeddings.d.ts"},"import":"./embeddings.js","require":"./embeddings.cjs"},"./example_selectors":{"types":{"import":"./example_selectors.d.ts","require":"./example_selectors.d.cts","default":"./example_selectors.d.ts"},"import":"./example_selectors.js","require":"./example_selectors.cjs"},"./indexing":{"types":{"import":"./indexing.d.ts","require":"./indexing.d.cts","default":"./indexing.d.ts"},"import":"./indexing.js","require":"./indexing.cjs"},"./language_models/base":{"types":{"import":"./language_models/base.d.ts","require":"./language_models/base.d.cts","default":"./language_models/base.d.ts"},"import":"./language_models/base.js","require":"./language_models/base.cjs"},"./language_models/chat_models":{"types":{"import":"./language_models/chat_models.d.ts","require":"./language_models/chat_models.d.cts","default":"./language_models/chat_models.d.ts"},"import":"./language_models/chat_models.js","require":"./language_models/chat_models.cjs"},"./language_models/llms":{"types":{"import":"./language_models/llms.d.ts","require":"./language_models/llms.d.cts","default":"./language_models/llms.d.ts"},"import":"./language_models/llms.js","require":"./language_models/llms.cjs"},"./load":{"types":{"import":"./load.d.ts","require":"./load.d.cts","default":"./load.d.ts"},"import":"./load.js","require":"./load.cjs"},"./load/serializable":{"types":{"import":"./load/serializable.d.ts","require":"./load/serializable.d.cts","default":"./load/serializable.d.ts"},"import":"./load/serializable.js","require":"./load/serializable.cjs"},"./memory":{"types":{"import":"./memory.d.ts","require":"./memory.d.cts","default":"./memory.d.ts"},"import":"./memory.js","require":"./memory.cjs"},"./messages":{"types":{"import":"./messages.d.ts","require":"./messages.d.cts","default":"./messages.d.ts"},"import":"./messages.js","require":"./messages.cjs"},"./messages/tool":{"types":{"import":"./messages/tool.d.ts","require":"./messages/tool.d.cts","default":"./messages/tool.d.ts"},"import":"./messages/tool.js","require":"./messages/tool.cjs"},"./output_parsers":{"types":{"import":"./output_parsers.d.ts","require":"./output_parsers.d.cts","default":"./output_parsers.d.ts"},"import":"./output_parsers.js","require":"./output_parsers.cjs"},"./output_parsers/openai_tools":{"types":{"import":"./output_parsers/openai_tools.d.ts","require":"./output_parsers/openai_tools.d.cts","default":"./output_parsers/openai_tools.d.ts"},"import":"./output_parsers/openai_tools.js","require":"./output_parsers/openai_tools.cjs"},"./output_parsers/openai_functions":{"types":{"import":"./output_parsers/openai_functions.d.ts","require":"./output_parsers/openai_functions.d.cts","default":"./output_parsers/openai_functions.d.ts"},"import":"./output_parsers/openai_functions.js","require":"./output_parsers/openai_functions.cjs"},"./outputs":{"types":{"import":"./outputs.d.ts","require":"./outputs.d.cts","default":"./outputs.d.ts"},"import":"./outputs.js","require":"./outputs.cjs"},"./prompts":{"types":{"import":"./prompts.d.ts","require":"./prompts.d.cts","default":"./prompts.d.ts"},"import":"./prompts.js","require":"./prompts.cjs"},"./prompt_values":{"types":{"import":"./prompt_values.d.ts","require":"./prompt_values.d.cts","default":"./prompt_values.d.ts"},"import":"./prompt_values.js","require":"./prompt_values.cjs"},"./runnables":{"types":{"import":"./runnables.d.ts","require":"./runnables.d.cts","default":"./runnables.d.ts"},"import":"./runnables.js","require":"./runnables.cjs"},"./runnables/graph":{"types":{"import":"./runnables/graph.d.ts","require":"./runnables/graph.d.cts","default":"./runnables/graph.d.ts"},"import":"./runnables/graph.js","require":"./runnables/graph.cjs"},"./runnables/remote":{"types":{"import":"./runnables/remote.d.ts","require":"./runnables/remote.d.cts","default":"./runnables/remote.d.ts"},"import":"./runnables/remote.js","require":"./runnables/remote.cjs"},"./retrievers":{"types":{"import":"./retrievers.d.ts","require":"./retrievers.d.cts","default":"./retrievers.d.ts"},"import":"./retrievers.js","require":"./retrievers.cjs"},"./retrievers/document_compressors":{"types":{"import":"./retrievers/document_compressors.d.ts","require":"./retrievers/document_compressors.d.cts","default":"./retrievers/document_compressors.d.ts"},"import":"./retrievers/document_compressors.js","require":"./retrievers/document_compressors.cjs"},"./singletons":{"types":{"import":"./singletons.d.ts","require":"./singletons.d.cts","default":"./singletons.d.ts"},"import":"./singletons.js","require":"./singletons.cjs"},"./stores":{"types":{"import":"./stores.d.ts","require":"./stores.d.cts","default":"./stores.d.ts"},"import":"./stores.js","require":"./stores.cjs"},"./structured_query":{"types":{"import":"./structured_query.d.ts","require":"./structured_query.d.cts","default":"./structured_query.d.ts"},"import":"./structured_query.js","require":"./structured_query.cjs"},"./tools":{"types":{"import":"./tools.d.ts","require":"./tools.d.cts","default":"./tools.d.ts"},"import":"./tools.js","require":"./tools.cjs"},"./tracers/base":{"types":{"import":"./tracers/base.d.ts","require":"./tracers/base.d.cts","default":"./tracers/base.d.ts"},"import":"./tracers/base.js","require":"./tracers/base.cjs"},"./tracers/console":{"types":{"import":"./tracers/console.d.ts","require":"./tracers/console.d.cts","default":"./tracers/console.d.ts"},"import":"./tracers/console.js","require":"./tracers/console.cjs"},"./tracers/initialize":{"types":{"import":"./tracers/initialize.d.ts","require":"./tracers/initialize.d.cts","default":"./tracers/initialize.d.ts"},"import":"./tracers/initialize.js","require":"./tracers/initialize.cjs"},"./tracers/log_stream":{"types":{"import":"./tracers/log_stream.d.ts","require":"./tracers/log_stream.d.cts","default":"./tracers/log_stream.d.ts"},"import":"./tracers/log_stream.js","require":"./tracers/log_stream.cjs"},"./tracers/run_collector":{"types":{"import":"./tracers/run_collector.d.ts","require":"./tracers/run_collector.d.cts","default":"./tracers/run_collector.d.ts"},"import":"./tracers/run_collector.js","require":"./tracers/run_collector.cjs"},"./tracers/tracer_langchain":{"types":{"import":"./tracers/tracer_langchain.d.ts","require":"./tracers/tracer_langchain.d.cts","default":"./tracers/tracer_langchain.d.ts"},"import":"./tracers/tracer_langchain.js","require":"./tracers/tracer_langchain.cjs"},"./tracers/tracer_langchain_v1":{"types":{"import":"./tracers/tracer_langchain_v1.d.ts","require":"./tracers/tracer_langchain_v1.d.cts","default":"./tracers/tracer_langchain_v1.d.ts"},"import":"./tracers/tracer_langchain_v1.js","require":"./tracers/tracer_langchain_v1.cjs"},"./types/stream":{"types":{"import":"./types/stream.d.ts","require":"./types/stream.d.cts","default":"./types/stream.d.ts"},"import":"./types/stream.js","require":"./types/stream.cjs"},"./utils/async_caller":{"types":{"import":"./utils/async_caller.d.ts","require":"./utils/async_caller.d.cts","default":"./utils/async_caller.d.ts"},"import":"./utils/async_caller.js","require":"./utils/async_caller.cjs"},"./utils/chunk_array":{"types":{"import":"./utils/chunk_array.d.ts","require":"./utils/chunk_array.d.cts","default":"./utils/chunk_array.d.ts"},"import":"./utils/chunk_array.js","require":"./utils/chunk_array.cjs"},"./utils/env":{"types":{"import":"./utils/env.d.ts","require":"./utils/env.d.cts","default":"./utils/env.d.ts"},"import":"./utils/env.js","require":"./utils/env.cjs"},"./utils/event_source_parse":{"types":{"import":"./utils/event_source_parse.d.ts","require":"./utils/event_source_parse.d.cts","default":"./utils/event_source_parse.d.ts"},"import":"./utils/event_source_parse.js","require":"./utils/event_source_parse.cjs"},"./utils/function_calling":{"types":{"import":"./utils/function_calling.d.ts","require":"./utils/function_calling.d.cts","default":"./utils/function_calling.d.ts"},"import":"./utils/function_calling.js","require":"./utils/function_calling.cjs"},"./utils/hash":{"types":{"import":"./utils/hash.d.ts","require":"./utils/hash.d.cts","default":"./utils/hash.d.ts"},"import":"./utils/hash.js","require":"./utils/hash.cjs"},"./utils/json_patch":{"types":{"import":"./utils/json_patch.d.ts","require":"./utils/json_patch.d.cts","default":"./utils/json_patch.d.ts"},"import":"./utils/json_patch.js","require":"./utils/json_patch.cjs"},"./utils/json_schema":{"types":{"import":"./utils/json_schema.d.ts","require":"./utils/json_schema.d.cts","default":"./utils/json_schema.d.ts"},"import":"./utils/json_schema.js","require":"./utils/json_schema.cjs"},"./utils/math":{"types":{"import":"./utils/math.d.ts","require":"./utils/math.d.cts","default":"./utils/math.d.ts"},"import":"./utils/math.js","require":"./utils/math.cjs"},"./utils/stream":{"types":{"import":"./utils/stream.d.ts","require":"./utils/stream.d.cts","default":"./utils/stream.d.ts"},"import":"./utils/stream.js","require":"./utils/stream.cjs"},"./utils/testing":{"types":{"import":"./utils/testing.d.ts","require":"./utils/testing.d.cts","default":"./utils/testing.d.ts"},"import":"./utils/testing.js","require":"./utils/testing.cjs"},"./utils/tiktoken":{"types":{"import":"./utils/tiktoken.d.ts","require":"./utils/tiktoken.d.cts","default":"./utils/tiktoken.d.ts"},"import":"./utils/tiktoken.js","require":"./utils/tiktoken.cjs"},"./utils/types":{"types":{"import":"./utils/types.d.ts","require":"./utils/types.d.cts","default":"./utils/types.d.ts"},"import":"./utils/types.js","require":"./utils/types.cjs"},"./vectorstores":{"types":{"import":"./vectorstores.d.ts","require":"./vectorstores.d.cts","default":"./vectorstores.d.ts"},"import":"./vectorstores.js","require":"./vectorstores.cjs"},"./package.json":"./package.json"},"files":["dist/","agents.cjs","agents.js","agents.d.ts","agents.d.cts","caches.cjs","caches.js","caches.d.ts","caches.d.cts","callbacks/base.cjs","callbacks/base.js","callbacks/base.d.ts","callbacks/base.d.cts","callbacks/dispatch.cjs","callbacks/dispatch.js","callbacks/dispatch.d.ts","callbacks/dispatch.d.cts","callbacks/dispatch/web.cjs","callbacks/dispatch/web.js","callbacks/dispatch/web.d.ts","callbacks/dispatch/web.d.cts","callbacks/manager.cjs","callbacks/manager.js","callbacks/manager.d.ts","callbacks/manager.d.cts","callbacks/promises.cjs","callbacks/promises.js","callbacks/promises.d.ts","callbacks/promises.d.cts","chat_history.cjs","chat_history.js","chat_history.d.ts","chat_history.d.cts","context.cjs","context.js","context.d.ts","context.d.cts","documents.cjs","documents.js","documents.d.ts","documents.d.cts","document_loaders/base.cjs","document_loaders/base.js","document_loaders/base.d.ts","document_loaders/base.d.cts","document_loaders/langsmith.cjs","document_loaders/langsmith.js","document_loaders/langsmith.d.ts","document_loaders/langsmith.d.cts","embeddings.cjs","embeddings.js","embeddings.d.ts","embeddings.d.cts","example_selectors.cjs","example_selectors.js","example_selectors.d.ts","example_selectors.d.cts","indexing.cjs","indexing.js","indexing.d.ts","indexing.d.cts","language_models/base.cjs","language_models/base.js","language_models/base.d.ts","language_models/base.d.cts","language_models/chat_models.cjs","language_models/chat_models.js","language_models/chat_models.d.ts","language_models/chat_models.d.cts","language_models/llms.cjs","language_models/llms.js","language_models/llms.d.ts","language_models/llms.d.cts","load.cjs","load.js","load.d.ts","load.d.cts","load/serializable.cjs","load/serializable.js","load/serializable.d.ts","load/serializable.d.cts","memory.cjs","memory.js","memory.d.ts","memory.d.cts","messages.cjs","messages.js","messages.d.ts","messages.d.cts","messages/tool.cjs","messages/tool.js","messages/tool.d.ts","messages/tool.d.cts","output_parsers.cjs","output_parsers.js","output_parsers.d.ts","output_parsers.d.cts","output_parsers/openai_tools.cjs","output_parsers/openai_tools.js","output_parsers/openai_tools.d.ts","output_parsers/openai_tools.d.cts","output_parsers/openai_functions.cjs","output_parsers/openai_functions.js","output_parsers/openai_functions.d.ts","output_parsers/openai_functions.d.cts","outputs.cjs","outputs.js","outputs.d.ts","outputs.d.cts","prompts.cjs","prompts.js","prompts.d.ts","prompts.d.cts","prompt_values.cjs","prompt_values.js","prompt_values.d.ts","prompt_values.d.cts","runnables.cjs","runnables.js","runnables.d.ts","runnables.d.cts","runnables/graph.cjs","runnables/graph.js","runnables/graph.d.ts","runnables/graph.d.cts","runnables/remote.cjs","runnables/remote.js","runnables/remote.d.ts","runnables/remote.d.cts","retrievers.cjs","retrievers.js","retrievers.d.ts","retrievers.d.cts","retrievers/document_compressors.cjs","retrievers/document_compressors.js","retrievers/document_compressors.d.ts","retrievers/document_compressors.d.cts","singletons.cjs","singletons.js","singletons.d.ts","singletons.d.cts","stores.cjs","stores.js","stores.d.ts","stores.d.cts","structured_query.cjs","structured_query.js","structured_query.d.ts","structured_query.d.cts","tools.cjs","tools.js","tools.d.ts","tools.d.cts","tracers/base.cjs","tracers/base.js","tracers/base.d.ts","tracers/base.d.cts","tracers/console.cjs","tracers/console.js","tracers/console.d.ts","tracers/console.d.cts","tracers/initialize.cjs","tracers/initialize.js","tracers/initialize.d.ts","tracers/initialize.d.cts","tracers/log_stream.cjs","tracers/log_stream.js","tracers/log_stream.d.ts","tracers/log_stream.d.cts","tracers/run_collector.cjs","tracers/run_collector.js","tracers/run_collector.d.ts","tracers/run_collector.d.cts","tracers/tracer_langchain.cjs","tracers/tracer_langchain.js","tracers/tracer_langchain.d.ts","tracers/tracer_langchain.d.cts","tracers/tracer_langchain_v1.cjs","tracers/tracer_langchain_v1.js","tracers/tracer_langchain_v1.d.ts","tracers/tracer_langchain_v1.d.cts","types/stream.cjs","types/stream.js","types/stream.d.ts","types/stream.d.cts","utils/async_caller.cjs","utils/async_caller.js","utils/async_caller.d.ts","utils/async_caller.d.cts","utils/chunk_array.cjs","utils/chunk_array.js","utils/chunk_array.d.ts","utils/chunk_array.d.cts","utils/env.cjs","utils/env.js","utils/env.d.ts","utils/env.d.cts","utils/event_source_parse.cjs","utils/event_source_parse.js","utils/event_source_parse.d.ts","utils/event_source_parse.d.cts","utils/function_calling.cjs","utils/function_calling.js","utils/function_calling.d.ts","utils/function_calling.d.cts","utils/hash.cjs","utils/hash.js","utils/hash.d.ts","utils/hash.d.cts","utils/json_patch.cjs","utils/json_patch.js","utils/json_patch.d.ts","utils/json_patch.d.cts","utils/json_schema.cjs","utils/json_schema.js","utils/json_schema.d.ts","utils/json_schema.d.cts","utils/math.cjs","utils/math.js","utils/math.d.ts","utils/math.d.cts","utils/stream.cjs","utils/stream.js","utils/stream.d.ts","utils/stream.d.cts","utils/testing.cjs","utils/testing.js","utils/testing.d.ts","utils/testing.d.cts","utils/tiktoken.cjs","utils/tiktoken.js","utils/tiktoken.d.ts","utils/tiktoken.d.cts","utils/types.cjs","utils/types.js","utils/types.d.ts","utils/types.d.cts","vectorstores.cjs","vectorstores.js","vectorstores.d.ts","vectorstores.d.cts"],"_lastModified":"2025-11-21T09:46:01.665Z"}
1
+ {"name":"@langchain/core","version":"0.3.42","description":"Core LangChain.js abstractions and schemas","type":"module","engines":{"node":">=18"},"main":"./index.js","types":"./index.d.ts","repository":{"type":"git","url":"git@github.com:langchain-ai/langchainjs.git"},"homepage":"https://github.com/langchain-ai/langchainjs/tree/main/langchain-core/","scripts":{"build":"yarn turbo:command build:internal --filter=@langchain/core","build:internal":"yarn lc_build --create-entrypoints --pre --tree-shaking","clean":"rm -rf .turbo dist/","lint:eslint":"NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/","lint:dpdm":"dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts","lint":"yarn lint:eslint && yarn lint:dpdm","lint:fix":"yarn lint:eslint --fix && yarn lint:dpdm","prepack":"yarn build","release":"release-it --only-version --config .release-it.json","test":"NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%","test:integration":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%","test:watch":"NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts","test:single":"NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000","format":"prettier --config .prettierrc --write \"src\"","format:check":"prettier --config .prettierrc --check \"src\""},"author":"LangChain","license":"MIT","dependencies":{"@cfworker/json-schema":"^4.0.2","ansi-styles":"^5.0.0","camelcase":"6","decamelize":"1.2.0","js-tiktoken":"^1.0.12","langsmith":">=0.2.8 <0.4.0","mustache":"^4.2.0","p-queue":"^6.6.2","p-retry":"4","uuid":"^10.0.0","zod":"^3.22.4","zod-to-json-schema":"^3.22.3"},"devDependencies":{"@jest/globals":"^29.5.0","@langchain/scripts":">=0.1.0 <0.2.0","@swc/core":"^1.3.90","@swc/jest":"^0.2.29","@types/decamelize":"^1.2.0","@types/mustache":"^4","dpdm":"^3.12.0","eslint":"^8.33.0","eslint-config-airbnb-base":"^15.0.0","eslint-config-prettier":"^8.6.0","eslint-plugin-import":"^2.27.5","eslint-plugin-jest":"^27.6.0","eslint-plugin-no-instanceof":"^1.0.1","eslint-plugin-prettier":"^4.2.1","jest":"^29.5.0","jest-environment-node":"^29.6.4","ml-matrix":"^6.10.4","prettier":"^2.8.3","release-it":"^17.6.0","rimraf":"^5.0.1","ts-jest":"^29.1.0","typescript":"~5.1.6","web-streams-polyfill":"^4.0.0"},"publishConfig":{"access":"public"},"keywords":["llm","ai","gpt3","chain","prompt","prompt engineering","chatgpt","machine learning","ml","openai","embeddings","vectorstores"],"exports":{".":{"types":{"import":"./index.d.ts","require":"./index.d.cts","default":"./index.d.ts"},"import":"./index.js","require":"./index.cjs"},"./agents":{"types":{"import":"./agents.d.ts","require":"./agents.d.cts","default":"./agents.d.ts"},"import":"./agents.js","require":"./agents.cjs"},"./caches":{"types":{"import":"./caches.d.ts","require":"./caches.d.cts","default":"./caches.d.ts"},"import":"./caches.js","require":"./caches.cjs"},"./callbacks/base":{"types":{"import":"./callbacks/base.d.ts","require":"./callbacks/base.d.cts","default":"./callbacks/base.d.ts"},"import":"./callbacks/base.js","require":"./callbacks/base.cjs"},"./callbacks/dispatch":{"types":{"import":"./callbacks/dispatch.d.ts","require":"./callbacks/dispatch.d.cts","default":"./callbacks/dispatch.d.ts"},"import":"./callbacks/dispatch.js","require":"./callbacks/dispatch.cjs"},"./callbacks/dispatch/web":{"types":{"import":"./callbacks/dispatch/web.d.ts","require":"./callbacks/dispatch/web.d.cts","default":"./callbacks/dispatch/web.d.ts"},"import":"./callbacks/dispatch/web.js","require":"./callbacks/dispatch/web.cjs"},"./callbacks/manager":{"types":{"import":"./callbacks/manager.d.ts","require":"./callbacks/manager.d.cts","default":"./callbacks/manager.d.ts"},"import":"./callbacks/manager.js","require":"./callbacks/manager.cjs"},"./callbacks/promises":{"types":{"import":"./callbacks/promises.d.ts","require":"./callbacks/promises.d.cts","default":"./callbacks/promises.d.ts"},"import":"./callbacks/promises.js","require":"./callbacks/promises.cjs"},"./chat_history":{"types":{"import":"./chat_history.d.ts","require":"./chat_history.d.cts","default":"./chat_history.d.ts"},"import":"./chat_history.js","require":"./chat_history.cjs"},"./context":{"types":{"import":"./context.d.ts","require":"./context.d.cts","default":"./context.d.ts"},"import":"./context.js","require":"./context.cjs"},"./documents":{"types":{"import":"./documents.d.ts","require":"./documents.d.cts","default":"./documents.d.ts"},"import":"./documents.js","require":"./documents.cjs"},"./document_loaders/base":{"types":{"import":"./document_loaders/base.d.ts","require":"./document_loaders/base.d.cts","default":"./document_loaders/base.d.ts"},"import":"./document_loaders/base.js","require":"./document_loaders/base.cjs"},"./document_loaders/langsmith":{"types":{"import":"./document_loaders/langsmith.d.ts","require":"./document_loaders/langsmith.d.cts","default":"./document_loaders/langsmith.d.ts"},"import":"./document_loaders/langsmith.js","require":"./document_loaders/langsmith.cjs"},"./embeddings":{"types":{"import":"./embeddings.d.ts","require":"./embeddings.d.cts","default":"./embeddings.d.ts"},"import":"./embeddings.js","require":"./embeddings.cjs"},"./example_selectors":{"types":{"import":"./example_selectors.d.ts","require":"./example_selectors.d.cts","default":"./example_selectors.d.ts"},"import":"./example_selectors.js","require":"./example_selectors.cjs"},"./indexing":{"types":{"import":"./indexing.d.ts","require":"./indexing.d.cts","default":"./indexing.d.ts"},"import":"./indexing.js","require":"./indexing.cjs"},"./language_models/base":{"types":{"import":"./language_models/base.d.ts","require":"./language_models/base.d.cts","default":"./language_models/base.d.ts"},"import":"./language_models/base.js","require":"./language_models/base.cjs"},"./language_models/chat_models":{"types":{"import":"./language_models/chat_models.d.ts","require":"./language_models/chat_models.d.cts","default":"./language_models/chat_models.d.ts"},"import":"./language_models/chat_models.js","require":"./language_models/chat_models.cjs"},"./language_models/llms":{"types":{"import":"./language_models/llms.d.ts","require":"./language_models/llms.d.cts","default":"./language_models/llms.d.ts"},"import":"./language_models/llms.js","require":"./language_models/llms.cjs"},"./load":{"types":{"import":"./load.d.ts","require":"./load.d.cts","default":"./load.d.ts"},"import":"./load.js","require":"./load.cjs"},"./load/serializable":{"types":{"import":"./load/serializable.d.ts","require":"./load/serializable.d.cts","default":"./load/serializable.d.ts"},"import":"./load/serializable.js","require":"./load/serializable.cjs"},"./memory":{"types":{"import":"./memory.d.ts","require":"./memory.d.cts","default":"./memory.d.ts"},"import":"./memory.js","require":"./memory.cjs"},"./messages":{"types":{"import":"./messages.d.ts","require":"./messages.d.cts","default":"./messages.d.ts"},"import":"./messages.js","require":"./messages.cjs"},"./messages/tool":{"types":{"import":"./messages/tool.d.ts","require":"./messages/tool.d.cts","default":"./messages/tool.d.ts"},"import":"./messages/tool.js","require":"./messages/tool.cjs"},"./output_parsers":{"types":{"import":"./output_parsers.d.ts","require":"./output_parsers.d.cts","default":"./output_parsers.d.ts"},"import":"./output_parsers.js","require":"./output_parsers.cjs"},"./output_parsers/openai_tools":{"types":{"import":"./output_parsers/openai_tools.d.ts","require":"./output_parsers/openai_tools.d.cts","default":"./output_parsers/openai_tools.d.ts"},"import":"./output_parsers/openai_tools.js","require":"./output_parsers/openai_tools.cjs"},"./output_parsers/openai_functions":{"types":{"import":"./output_parsers/openai_functions.d.ts","require":"./output_parsers/openai_functions.d.cts","default":"./output_parsers/openai_functions.d.ts"},"import":"./output_parsers/openai_functions.js","require":"./output_parsers/openai_functions.cjs"},"./outputs":{"types":{"import":"./outputs.d.ts","require":"./outputs.d.cts","default":"./outputs.d.ts"},"import":"./outputs.js","require":"./outputs.cjs"},"./prompts":{"types":{"import":"./prompts.d.ts","require":"./prompts.d.cts","default":"./prompts.d.ts"},"import":"./prompts.js","require":"./prompts.cjs"},"./prompt_values":{"types":{"import":"./prompt_values.d.ts","require":"./prompt_values.d.cts","default":"./prompt_values.d.ts"},"import":"./prompt_values.js","require":"./prompt_values.cjs"},"./runnables":{"types":{"import":"./runnables.d.ts","require":"./runnables.d.cts","default":"./runnables.d.ts"},"import":"./runnables.js","require":"./runnables.cjs"},"./runnables/graph":{"types":{"import":"./runnables/graph.d.ts","require":"./runnables/graph.d.cts","default":"./runnables/graph.d.ts"},"import":"./runnables/graph.js","require":"./runnables/graph.cjs"},"./runnables/remote":{"types":{"import":"./runnables/remote.d.ts","require":"./runnables/remote.d.cts","default":"./runnables/remote.d.ts"},"import":"./runnables/remote.js","require":"./runnables/remote.cjs"},"./retrievers":{"types":{"import":"./retrievers.d.ts","require":"./retrievers.d.cts","default":"./retrievers.d.ts"},"import":"./retrievers.js","require":"./retrievers.cjs"},"./retrievers/document_compressors":{"types":{"import":"./retrievers/document_compressors.d.ts","require":"./retrievers/document_compressors.d.cts","default":"./retrievers/document_compressors.d.ts"},"import":"./retrievers/document_compressors.js","require":"./retrievers/document_compressors.cjs"},"./singletons":{"types":{"import":"./singletons.d.ts","require":"./singletons.d.cts","default":"./singletons.d.ts"},"import":"./singletons.js","require":"./singletons.cjs"},"./stores":{"types":{"import":"./stores.d.ts","require":"./stores.d.cts","default":"./stores.d.ts"},"import":"./stores.js","require":"./stores.cjs"},"./structured_query":{"types":{"import":"./structured_query.d.ts","require":"./structured_query.d.cts","default":"./structured_query.d.ts"},"import":"./structured_query.js","require":"./structured_query.cjs"},"./tools":{"types":{"import":"./tools.d.ts","require":"./tools.d.cts","default":"./tools.d.ts"},"import":"./tools.js","require":"./tools.cjs"},"./tracers/base":{"types":{"import":"./tracers/base.d.ts","require":"./tracers/base.d.cts","default":"./tracers/base.d.ts"},"import":"./tracers/base.js","require":"./tracers/base.cjs"},"./tracers/console":{"types":{"import":"./tracers/console.d.ts","require":"./tracers/console.d.cts","default":"./tracers/console.d.ts"},"import":"./tracers/console.js","require":"./tracers/console.cjs"},"./tracers/initialize":{"types":{"import":"./tracers/initialize.d.ts","require":"./tracers/initialize.d.cts","default":"./tracers/initialize.d.ts"},"import":"./tracers/initialize.js","require":"./tracers/initialize.cjs"},"./tracers/log_stream":{"types":{"import":"./tracers/log_stream.d.ts","require":"./tracers/log_stream.d.cts","default":"./tracers/log_stream.d.ts"},"import":"./tracers/log_stream.js","require":"./tracers/log_stream.cjs"},"./tracers/run_collector":{"types":{"import":"./tracers/run_collector.d.ts","require":"./tracers/run_collector.d.cts","default":"./tracers/run_collector.d.ts"},"import":"./tracers/run_collector.js","require":"./tracers/run_collector.cjs"},"./tracers/tracer_langchain":{"types":{"import":"./tracers/tracer_langchain.d.ts","require":"./tracers/tracer_langchain.d.cts","default":"./tracers/tracer_langchain.d.ts"},"import":"./tracers/tracer_langchain.js","require":"./tracers/tracer_langchain.cjs"},"./tracers/tracer_langchain_v1":{"types":{"import":"./tracers/tracer_langchain_v1.d.ts","require":"./tracers/tracer_langchain_v1.d.cts","default":"./tracers/tracer_langchain_v1.d.ts"},"import":"./tracers/tracer_langchain_v1.js","require":"./tracers/tracer_langchain_v1.cjs"},"./types/stream":{"types":{"import":"./types/stream.d.ts","require":"./types/stream.d.cts","default":"./types/stream.d.ts"},"import":"./types/stream.js","require":"./types/stream.cjs"},"./utils/async_caller":{"types":{"import":"./utils/async_caller.d.ts","require":"./utils/async_caller.d.cts","default":"./utils/async_caller.d.ts"},"import":"./utils/async_caller.js","require":"./utils/async_caller.cjs"},"./utils/chunk_array":{"types":{"import":"./utils/chunk_array.d.ts","require":"./utils/chunk_array.d.cts","default":"./utils/chunk_array.d.ts"},"import":"./utils/chunk_array.js","require":"./utils/chunk_array.cjs"},"./utils/env":{"types":{"import":"./utils/env.d.ts","require":"./utils/env.d.cts","default":"./utils/env.d.ts"},"import":"./utils/env.js","require":"./utils/env.cjs"},"./utils/event_source_parse":{"types":{"import":"./utils/event_source_parse.d.ts","require":"./utils/event_source_parse.d.cts","default":"./utils/event_source_parse.d.ts"},"import":"./utils/event_source_parse.js","require":"./utils/event_source_parse.cjs"},"./utils/function_calling":{"types":{"import":"./utils/function_calling.d.ts","require":"./utils/function_calling.d.cts","default":"./utils/function_calling.d.ts"},"import":"./utils/function_calling.js","require":"./utils/function_calling.cjs"},"./utils/hash":{"types":{"import":"./utils/hash.d.ts","require":"./utils/hash.d.cts","default":"./utils/hash.d.ts"},"import":"./utils/hash.js","require":"./utils/hash.cjs"},"./utils/json_patch":{"types":{"import":"./utils/json_patch.d.ts","require":"./utils/json_patch.d.cts","default":"./utils/json_patch.d.ts"},"import":"./utils/json_patch.js","require":"./utils/json_patch.cjs"},"./utils/json_schema":{"types":{"import":"./utils/json_schema.d.ts","require":"./utils/json_schema.d.cts","default":"./utils/json_schema.d.ts"},"import":"./utils/json_schema.js","require":"./utils/json_schema.cjs"},"./utils/math":{"types":{"import":"./utils/math.d.ts","require":"./utils/math.d.cts","default":"./utils/math.d.ts"},"import":"./utils/math.js","require":"./utils/math.cjs"},"./utils/stream":{"types":{"import":"./utils/stream.d.ts","require":"./utils/stream.d.cts","default":"./utils/stream.d.ts"},"import":"./utils/stream.js","require":"./utils/stream.cjs"},"./utils/testing":{"types":{"import":"./utils/testing.d.ts","require":"./utils/testing.d.cts","default":"./utils/testing.d.ts"},"import":"./utils/testing.js","require":"./utils/testing.cjs"},"./utils/tiktoken":{"types":{"import":"./utils/tiktoken.d.ts","require":"./utils/tiktoken.d.cts","default":"./utils/tiktoken.d.ts"},"import":"./utils/tiktoken.js","require":"./utils/tiktoken.cjs"},"./utils/types":{"types":{"import":"./utils/types.d.ts","require":"./utils/types.d.cts","default":"./utils/types.d.ts"},"import":"./utils/types.js","require":"./utils/types.cjs"},"./vectorstores":{"types":{"import":"./vectorstores.d.ts","require":"./vectorstores.d.cts","default":"./vectorstores.d.ts"},"import":"./vectorstores.js","require":"./vectorstores.cjs"},"./package.json":"./package.json"},"files":["dist/","agents.cjs","agents.js","agents.d.ts","agents.d.cts","caches.cjs","caches.js","caches.d.ts","caches.d.cts","callbacks/base.cjs","callbacks/base.js","callbacks/base.d.ts","callbacks/base.d.cts","callbacks/dispatch.cjs","callbacks/dispatch.js","callbacks/dispatch.d.ts","callbacks/dispatch.d.cts","callbacks/dispatch/web.cjs","callbacks/dispatch/web.js","callbacks/dispatch/web.d.ts","callbacks/dispatch/web.d.cts","callbacks/manager.cjs","callbacks/manager.js","callbacks/manager.d.ts","callbacks/manager.d.cts","callbacks/promises.cjs","callbacks/promises.js","callbacks/promises.d.ts","callbacks/promises.d.cts","chat_history.cjs","chat_history.js","chat_history.d.ts","chat_history.d.cts","context.cjs","context.js","context.d.ts","context.d.cts","documents.cjs","documents.js","documents.d.ts","documents.d.cts","document_loaders/base.cjs","document_loaders/base.js","document_loaders/base.d.ts","document_loaders/base.d.cts","document_loaders/langsmith.cjs","document_loaders/langsmith.js","document_loaders/langsmith.d.ts","document_loaders/langsmith.d.cts","embeddings.cjs","embeddings.js","embeddings.d.ts","embeddings.d.cts","example_selectors.cjs","example_selectors.js","example_selectors.d.ts","example_selectors.d.cts","indexing.cjs","indexing.js","indexing.d.ts","indexing.d.cts","language_models/base.cjs","language_models/base.js","language_models/base.d.ts","language_models/base.d.cts","language_models/chat_models.cjs","language_models/chat_models.js","language_models/chat_models.d.ts","language_models/chat_models.d.cts","language_models/llms.cjs","language_models/llms.js","language_models/llms.d.ts","language_models/llms.d.cts","load.cjs","load.js","load.d.ts","load.d.cts","load/serializable.cjs","load/serializable.js","load/serializable.d.ts","load/serializable.d.cts","memory.cjs","memory.js","memory.d.ts","memory.d.cts","messages.cjs","messages.js","messages.d.ts","messages.d.cts","messages/tool.cjs","messages/tool.js","messages/tool.d.ts","messages/tool.d.cts","output_parsers.cjs","output_parsers.js","output_parsers.d.ts","output_parsers.d.cts","output_parsers/openai_tools.cjs","output_parsers/openai_tools.js","output_parsers/openai_tools.d.ts","output_parsers/openai_tools.d.cts","output_parsers/openai_functions.cjs","output_parsers/openai_functions.js","output_parsers/openai_functions.d.ts","output_parsers/openai_functions.d.cts","outputs.cjs","outputs.js","outputs.d.ts","outputs.d.cts","prompts.cjs","prompts.js","prompts.d.ts","prompts.d.cts","prompt_values.cjs","prompt_values.js","prompt_values.d.ts","prompt_values.d.cts","runnables.cjs","runnables.js","runnables.d.ts","runnables.d.cts","runnables/graph.cjs","runnables/graph.js","runnables/graph.d.ts","runnables/graph.d.cts","runnables/remote.cjs","runnables/remote.js","runnables/remote.d.ts","runnables/remote.d.cts","retrievers.cjs","retrievers.js","retrievers.d.ts","retrievers.d.cts","retrievers/document_compressors.cjs","retrievers/document_compressors.js","retrievers/document_compressors.d.ts","retrievers/document_compressors.d.cts","singletons.cjs","singletons.js","singletons.d.ts","singletons.d.cts","stores.cjs","stores.js","stores.d.ts","stores.d.cts","structured_query.cjs","structured_query.js","structured_query.d.ts","structured_query.d.cts","tools.cjs","tools.js","tools.d.ts","tools.d.cts","tracers/base.cjs","tracers/base.js","tracers/base.d.ts","tracers/base.d.cts","tracers/console.cjs","tracers/console.js","tracers/console.d.ts","tracers/console.d.cts","tracers/initialize.cjs","tracers/initialize.js","tracers/initialize.d.ts","tracers/initialize.d.cts","tracers/log_stream.cjs","tracers/log_stream.js","tracers/log_stream.d.ts","tracers/log_stream.d.cts","tracers/run_collector.cjs","tracers/run_collector.js","tracers/run_collector.d.ts","tracers/run_collector.d.cts","tracers/tracer_langchain.cjs","tracers/tracer_langchain.js","tracers/tracer_langchain.d.ts","tracers/tracer_langchain.d.cts","tracers/tracer_langchain_v1.cjs","tracers/tracer_langchain_v1.js","tracers/tracer_langchain_v1.d.ts","tracers/tracer_langchain_v1.d.cts","types/stream.cjs","types/stream.js","types/stream.d.ts","types/stream.d.cts","utils/async_caller.cjs","utils/async_caller.js","utils/async_caller.d.ts","utils/async_caller.d.cts","utils/chunk_array.cjs","utils/chunk_array.js","utils/chunk_array.d.ts","utils/chunk_array.d.cts","utils/env.cjs","utils/env.js","utils/env.d.ts","utils/env.d.cts","utils/event_source_parse.cjs","utils/event_source_parse.js","utils/event_source_parse.d.ts","utils/event_source_parse.d.cts","utils/function_calling.cjs","utils/function_calling.js","utils/function_calling.d.ts","utils/function_calling.d.cts","utils/hash.cjs","utils/hash.js","utils/hash.d.ts","utils/hash.d.cts","utils/json_patch.cjs","utils/json_patch.js","utils/json_patch.d.ts","utils/json_patch.d.cts","utils/json_schema.cjs","utils/json_schema.js","utils/json_schema.d.ts","utils/json_schema.d.cts","utils/math.cjs","utils/math.js","utils/math.d.ts","utils/math.d.cts","utils/stream.cjs","utils/stream.js","utils/stream.d.ts","utils/stream.d.cts","utils/testing.cjs","utils/testing.js","utils/testing.d.ts","utils/testing.d.cts","utils/tiktoken.cjs","utils/tiktoken.js","utils/tiktoken.d.ts","utils/tiktoken.d.cts","utils/types.cjs","utils/types.js","utils/types.d.ts","utils/types.d.cts","vectorstores.cjs","vectorstores.js","vectorstores.d.ts","vectorstores.d.cts"],"_lastModified":"2025-12-09T09:19:45.663Z"}
@@ -1 +1 @@
1
- {"name":"@langchain/deepseek","version":"0.0.1","description":"Deepseek integration for LangChain.js","type":"module","engines":{"node":">=18"},"main":"./index.js","types":"./index.d.ts","repository":{"type":"git","url":"git@github.com:langchain-ai/langchainjs.git"},"homepage":"https://github.com/langchain-ai/langchainjs/tree/main/libs/@langchain/deepseek","scripts":{"build":"yarn turbo:command build:internal --filter=@langchain/deepseek","build:internal":"yarn lc_build --create-entrypoints --pre --tree-shaking","lint:eslint":"NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/","lint:dpdm":"dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts","lint":"yarn lint:eslint && yarn lint:dpdm","lint:fix":"yarn lint:eslint --fix && yarn lint:dpdm","clean":"rm -rf .turbo dist/","prepack":"yarn build","test":"NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%","test:watch":"NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts","test:single":"NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000","test:int":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%","format":"prettier --config .prettierrc --write \"src\"","format:check":"prettier --config .prettierrc --check \"src\""},"author":"LangChain","license":"MIT","dependencies":{"@langchain/openai":"^0.4.2","zod":"^3.24.1"},"peerDependencies":{"@langchain/core":">=0.3.0 <0.4.0"},"devDependencies":{"@jest/globals":"^29.5.0","@langchain/core":"workspace:*","@langchain/scripts":">=0.1.0 <0.2.0","@langchain/standard-tests":"workspace:*","@swc/core":"^1.3.90","@swc/jest":"^0.2.29","@tsconfig/recommended":"^1.0.3","@typescript-eslint/eslint-plugin":"^6.12.0","@typescript-eslint/parser":"^6.12.0","dotenv":"^16.3.1","dpdm":"^3.12.0","eslint":"^8.33.0","eslint-config-airbnb-base":"^15.0.0","eslint-config-prettier":"^8.6.0","eslint-plugin-import":"^2.27.5","eslint-plugin-no-instanceof":"^1.0.1","eslint-plugin-prettier":"^4.2.1","jest":"^29.5.0","jest-environment-node":"^29.6.4","prettier":"^2.8.3","release-it":"^15.10.1","rollup":"^4.5.2","ts-jest":"^29.1.0","typescript":"<5.2.0"},"publishConfig":{"access":"public"},"exports":{".":{"types":{"import":"./index.d.ts","require":"./index.d.cts","default":"./index.d.ts"},"import":"./index.js","require":"./index.cjs"},"./package.json":"./package.json"},"files":["dist/","index.cjs","index.js","index.d.ts","index.d.cts"],"_lastModified":"2025-11-21T09:45:57.356Z"}
1
+ {"name":"@langchain/deepseek","version":"0.0.1","description":"Deepseek integration for LangChain.js","type":"module","engines":{"node":">=18"},"main":"./index.js","types":"./index.d.ts","repository":{"type":"git","url":"git@github.com:langchain-ai/langchainjs.git"},"homepage":"https://github.com/langchain-ai/langchainjs/tree/main/libs/@langchain/deepseek","scripts":{"build":"yarn turbo:command build:internal --filter=@langchain/deepseek","build:internal":"yarn lc_build --create-entrypoints --pre --tree-shaking","lint:eslint":"NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/","lint:dpdm":"dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts","lint":"yarn lint:eslint && yarn lint:dpdm","lint:fix":"yarn lint:eslint --fix && yarn lint:dpdm","clean":"rm -rf .turbo dist/","prepack":"yarn build","test":"NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%","test:watch":"NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts","test:single":"NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000","test:int":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%","format":"prettier --config .prettierrc --write \"src\"","format:check":"prettier --config .prettierrc --check \"src\""},"author":"LangChain","license":"MIT","dependencies":{"@langchain/openai":"^0.4.2","zod":"^3.24.1"},"peerDependencies":{"@langchain/core":">=0.3.0 <0.4.0"},"devDependencies":{"@jest/globals":"^29.5.0","@langchain/core":"workspace:*","@langchain/scripts":">=0.1.0 <0.2.0","@langchain/standard-tests":"workspace:*","@swc/core":"^1.3.90","@swc/jest":"^0.2.29","@tsconfig/recommended":"^1.0.3","@typescript-eslint/eslint-plugin":"^6.12.0","@typescript-eslint/parser":"^6.12.0","dotenv":"^16.3.1","dpdm":"^3.12.0","eslint":"^8.33.0","eslint-config-airbnb-base":"^15.0.0","eslint-config-prettier":"^8.6.0","eslint-plugin-import":"^2.27.5","eslint-plugin-no-instanceof":"^1.0.1","eslint-plugin-prettier":"^4.2.1","jest":"^29.5.0","jest-environment-node":"^29.6.4","prettier":"^2.8.3","release-it":"^15.10.1","rollup":"^4.5.2","ts-jest":"^29.1.0","typescript":"<5.2.0"},"publishConfig":{"access":"public"},"exports":{".":{"types":{"import":"./index.d.ts","require":"./index.d.cts","default":"./index.d.ts"},"import":"./index.js","require":"./index.cjs"},"./package.json":"./package.json"},"files":["dist/","index.cjs","index.js","index.d.ts","index.d.cts"],"_lastModified":"2025-12-09T09:19:41.231Z"}
@@ -1 +1 @@
1
- {"name":"@langchain/openai","version":"0.4.4","description":"OpenAI integrations for LangChain.js","type":"module","engines":{"node":">=18"},"main":"./index.js","types":"./index.d.ts","repository":{"type":"git","url":"git@github.com:langchain-ai/langchainjs.git"},"homepage":"https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-openai/","scripts":{"build":"yarn turbo:command build:internal --filter=@langchain/openai","build:internal":"yarn lc_build --create-entrypoints --pre --tree-shaking","lint:eslint":"NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/","lint:dpdm":"dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts","lint":"yarn lint:eslint && yarn lint:dpdm","lint:fix":"yarn lint:eslint --fix && yarn lint:dpdm","clean":"rm -rf .turbo dist/","prepack":"yarn build","test":"NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%","test:watch":"NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts","test:single":"NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000","test:int":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%","test:standard:unit":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%","test:standard:int":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%","test:standard":"yarn test:standard:unit && yarn test:standard:int","format":"prettier --config .prettierrc --write \"src\"","format:check":"prettier --config .prettierrc --check \"src\""},"author":"LangChain","license":"MIT","dependencies":{"js-tiktoken":"^1.0.12","openai":"^4.77.0","zod":"^3.22.4","zod-to-json-schema":"^3.22.3"},"peerDependencies":{"@langchain/core":">=0.3.39 <0.4.0"},"devDependencies":{"@azure/identity":"^4.2.1","@jest/globals":"^29.5.0","@langchain/core":"workspace:*","@langchain/scripts":">=0.1.0 <0.2.0","@langchain/standard-tests":"0.0.0","@swc/core":"^1.3.90","@swc/jest":"^0.2.29","dpdm":"^3.12.0","eslint":"^8.33.0","eslint-config-airbnb-base":"^15.0.0","eslint-config-prettier":"^8.6.0","eslint-plugin-import":"^2.27.5","eslint-plugin-jest":"^27.6.0","eslint-plugin-no-instanceof":"^1.0.1","eslint-plugin-prettier":"^4.2.1","jest":"^29.5.0","jest-environment-node":"^29.6.4","prettier":"^2.8.3","release-it":"^17.6.0","rimraf":"^5.0.1","ts-jest":"^29.1.0","typescript":"~5.1.6"},"publishConfig":{"access":"public"},"keywords":["llm","ai","gpt3","chain","prompt","prompt engineering","chatgpt","machine learning","ml","openai","embeddings","vectorstores"],"exports":{".":{"types":{"import":"./index.d.ts","require":"./index.d.cts","default":"./index.d.ts"},"import":"./index.js","require":"./index.cjs"},"./package.json":"./package.json"},"files":["dist/","index.cjs","index.js","index.d.ts","index.d.cts"],"_lastModified":"2025-11-21T09:46:01.005Z"}
1
+ {"name":"@langchain/openai","version":"0.4.4","description":"OpenAI integrations for LangChain.js","type":"module","engines":{"node":">=18"},"main":"./index.js","types":"./index.d.ts","repository":{"type":"git","url":"git@github.com:langchain-ai/langchainjs.git"},"homepage":"https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-openai/","scripts":{"build":"yarn turbo:command build:internal --filter=@langchain/openai","build:internal":"yarn lc_build --create-entrypoints --pre --tree-shaking","lint:eslint":"NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/","lint:dpdm":"dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts","lint":"yarn lint:eslint && yarn lint:dpdm","lint:fix":"yarn lint:eslint --fix && yarn lint:dpdm","clean":"rm -rf .turbo dist/","prepack":"yarn build","test":"NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%","test:watch":"NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts","test:single":"NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000","test:int":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%","test:standard:unit":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%","test:standard:int":"NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%","test:standard":"yarn test:standard:unit && yarn test:standard:int","format":"prettier --config .prettierrc --write \"src\"","format:check":"prettier --config .prettierrc --check \"src\""},"author":"LangChain","license":"MIT","dependencies":{"js-tiktoken":"^1.0.12","openai":"^4.77.0","zod":"^3.22.4","zod-to-json-schema":"^3.22.3"},"peerDependencies":{"@langchain/core":">=0.3.39 <0.4.0"},"devDependencies":{"@azure/identity":"^4.2.1","@jest/globals":"^29.5.0","@langchain/core":"workspace:*","@langchain/scripts":">=0.1.0 <0.2.0","@langchain/standard-tests":"0.0.0","@swc/core":"^1.3.90","@swc/jest":"^0.2.29","dpdm":"^3.12.0","eslint":"^8.33.0","eslint-config-airbnb-base":"^15.0.0","eslint-config-prettier":"^8.6.0","eslint-plugin-import":"^2.27.5","eslint-plugin-jest":"^27.6.0","eslint-plugin-no-instanceof":"^1.0.1","eslint-plugin-prettier":"^4.2.1","jest":"^29.5.0","jest-environment-node":"^29.6.4","prettier":"^2.8.3","release-it":"^17.6.0","rimraf":"^5.0.1","ts-jest":"^29.1.0","typescript":"~5.1.6"},"publishConfig":{"access":"public"},"keywords":["llm","ai","gpt3","chain","prompt","prompt engineering","chatgpt","machine learning","ml","openai","embeddings","vectorstores"],"exports":{".":{"types":{"import":"./index.d.ts","require":"./index.d.cts","default":"./index.d.ts"},"import":"./index.js","require":"./index.cjs"},"./package.json":"./package.json"},"files":["dist/","index.cjs","index.js","index.d.ts","index.d.cts"],"_lastModified":"2025-12-09T09:19:44.874Z"}
package/package.json CHANGED
@@ -6,7 +6,7 @@
6
6
  "description": "Support integration with AI services, providing AI-related workflow nodes to enhance business processing capabilities.",
7
7
  "description.ru-RU": "Поддержка интеграции с AI-сервисами: предоставляются AI-узлы для рабочих процессов, расширяя возможности бизнес-обработки.",
8
8
  "description.zh-CN": "支持接入 AI 服务,提供 AI 相关的工作流节点,增强业务处理能力。",
9
- "version": "1.9.0-beta.17",
9
+ "version": "1.9.0-beta.18",
10
10
  "main": "dist/server/index.js",
11
11
  "homepage": "https://docs.nocobase.com/handbook/action-ai",
12
12
  "homepage.ru-RU": "https://docs-ru.nocobase.com/handbook/action-ai",
@@ -22,5 +22,5 @@
22
22
  "@langchain/deepseek": "^0.0.1",
23
23
  "@langchain/openai": "^0.4.3"
24
24
  },
25
- "gitHead": "4f95b676235fa3f7583493412279d8132a20c4d0"
25
+ "gitHead": "1c211ab8c30d9442c38d07e2ebef2c2935e0fbac"
26
26
  }