@alpic80/rivet-core 1.24.2-aidon.5 → 1.24.2-aidon.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -53,7 +53,7 @@ class ChatAidonNodeImpl extends ChatNodeImpl {
53
53
  }
54
54
  return path;
55
55
  }
56
- async callToolGet(parsedArgs, schemaDetail, path, data) {
56
+ async callToolGet(schemaDetail, path, parsedArgs, data) {
57
57
  const queryParams = new URLSearchParams(parsedArgs.parameters).toString();
58
58
  const fullUrl = schemaDetail.url + path + (queryParams ? "?" + queryParams : "");
59
59
  let headers = {};
@@ -112,8 +112,11 @@ class ChatAidonNodeImpl extends ChatNodeImpl {
112
112
  async process(inputs, context) {
113
113
  //make sure not to include functions if we have no way to run them after.
114
114
  inputs = this.removeInvalidInputs(inputs);
115
+ // const configData = this.data
116
+ // configData.frequencyPenalty = 2;
115
117
  // Call the parent class's process method to do its job
116
118
  let outputs = await super.process(inputs, context);
119
+ //Now check if the LLM wants us to do some tool calling
117
120
  const funcCallOutput = outputs['function-call'] ?? outputs['function-calls'];
118
121
  const funcCalls = funcCallOutput?.type === 'object[]'
119
122
  ? funcCallOutput.value
@@ -135,14 +138,13 @@ class ChatAidonNodeImpl extends ChatNodeImpl {
135
138
  }
136
139
  const schemaDetail = this.convertToolSchemaToSchemaDetail(toolSchema);
137
140
  const path = this.extractPath(schemaDetail, functionCall.name, functionCall.arguments);
138
- // Determine if the request should be in the body or as a query
139
141
  let data = {};
140
- if (schemaDetail.requestInBody) {
141
- // If the type is set to body
142
+ // Determine if the request should be in the body or as a query
143
+ if (schemaDetail.requestInBody) { // If the type is set to body
142
144
  data = await this.callToolPost(schemaDetail, path, functionCall.arguments, data);
143
145
  }
144
146
  else { // If the type is set to query
145
- data = await this.callToolGet(functionCall.arguments, schemaDetail, path, data);
147
+ data = await this.callToolGet(schemaDetail, path, functionCall.arguments, data);
146
148
  }
147
149
  messages['value'].push({
148
150
  type: "function",
@@ -1,6 +1,6 @@
1
1
  import { nanoid } from 'nanoid/non-secure';
2
2
  import {} from '../../../index.js';
3
- import { HfInference, HfInferenceEndpoint } from '@huggingface/inference';
3
+ import { InferenceClient } from '@huggingface/inference';
4
4
  import { getInputOrData } from '../../../utils/inputs.js';
5
5
  import { coerceType } from '../../../utils/coerceType.js';
6
6
  import { dedent } from '../../../utils/misc.js';
@@ -206,7 +206,9 @@ export const ChatHuggingFaceNodeImpl = {
206
206
  const repetitionPenalty = getInputOrData(data, inputData, 'repetitionPenalty', 'number');
207
207
  const topP = getInputOrData(data, inputData, 'topP', 'number');
208
208
  const topK = getInputOrData(data, inputData, 'topK', 'number');
209
- const hf = endpoint ? new HfInferenceEndpoint(endpoint, accessToken) : new HfInference(accessToken);
209
+ const hf = endpoint
210
+ ? new InferenceClient(accessToken, { endpointUrl: endpoint })
211
+ : new InferenceClient(accessToken);
210
212
  const generationStream = hf.textGenerationStream({
211
213
  inputs: prompt,
212
214
  model,
@@ -1,6 +1,6 @@
1
1
  import { nanoid } from 'nanoid/non-secure';
2
2
  import {} from '../../../index.js';
3
- import { HfInference, HfInferenceEndpoint } from '@huggingface/inference';
3
+ import { InferenceClient } from '@huggingface/inference';
4
4
  import { dedent } from 'ts-dedent';
5
5
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
6
6
  import { getInputOrData } from '../../../utils/inputs.js';
@@ -163,7 +163,9 @@ export const TextToImageHuggingFaceNodeImpl = {
163
163
  const negativePrompt = getInputOrData(data, inputData, 'negativePrompt') || undefined;
164
164
  const guidanceScale = getInputOrData(data, inputData, 'guidanceScale', 'number');
165
165
  const numInferenceSteps = getInputOrData(data, inputData, 'numInferenceSteps', 'number');
166
- const hf = endpoint ? new HfInferenceEndpoint(endpoint, accessToken) : new HfInference(accessToken);
166
+ const hf = endpoint
167
+ ? new InferenceClient(accessToken, { endpointUrl: endpoint })
168
+ : new InferenceClient(accessToken);
167
169
  const image = await hf.textToImage({
168
170
  inputs: prompt,
169
171
  model,
@@ -174,7 +176,7 @@ export const TextToImageHuggingFaceNodeImpl = {
174
176
  guidance_scale: guidanceScale,
175
177
  num_inference_steps: numInferenceSteps,
176
178
  },
177
- });
179
+ }, { outputType: "blob" });
178
180
  return {
179
181
  ['output']: {
180
182
  type: 'image',
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "@alpic80/rivet-core",
3
3
  "license": "MIT",
4
4
  "repository": "https://github.com/castortech/rivet",
5
- "version": "1.24.2-aidon.5",
5
+ "version": "1.24.2-aidon.6",
6
6
  "packageManager": "yarn@3.5.0",
7
7
  "main": "dist/cjs/bundle.cjs",
8
8
  "module": "dist/esm/index.js",
@@ -47,8 +47,8 @@
47
47
  "@gentrace/core": "^2.2.5",
48
48
  "@google-cloud/vertexai": "^0.1.3",
49
49
  "@google/genai": "^0.12.0",
50
- "@huggingface/inference": "^2.6.4",
51
- "@ironclad/rivet-core": "npm:@alpic80/rivet-core@1.24.2-aidon.5",
50
+ "@huggingface/inference": "^4.13.0",
51
+ "@ironclad/rivet-core": "npm:@alpic80/rivet-core@1.24.2-aidon.6",
52
52
  "assemblyai": "^4.6.0",
53
53
  "autoevals": "^0.0.26",
54
54
  "cron-parser": "^4.9.0",
@@ -56,12 +56,12 @@
56
56
  "emittery": "^1.0.1",
57
57
  "emittery-0-13": "npm:emittery@^0.13.1",
58
58
  "gpt-tokenizer": "^2.1.2",
59
- "jsonpath-plus": "^10.2.0",
59
+ "jsonpath-plus": "^10.3.0",
60
60
  "lodash-es": "^4.17.21",
61
61
  "mdast-util-gfm-table": "^2.0.0",
62
62
  "mdast-util-to-markdown": "^2.1.2",
63
63
  "minimatch": "^9.0.3",
64
- "nanoid": "^3.3.6",
64
+ "nanoid": "^3.3.8",
65
65
  "openai": "^4.28.4",
66
66
  "p-queue": "^7.4.1",
67
67
  "p-queue-6": "npm:p-queue@^6.0.0",
@@ -83,7 +83,7 @@
83
83
  "@types/yaml": "^1.9.7",
84
84
  "@typescript-eslint/eslint-plugin": "^8.24.0",
85
85
  "@typescript-eslint/parser": "^8.24.0",
86
- "esbuild": "^0.19.5",
86
+ "esbuild": "^0.25.12",
87
87
  "eslint": "^9.20.1",
88
88
  "eslint-import-resolver-typescript": "^3.6.1",
89
89
  "eslint-plugin-import": "^2.31.0",