@alpic80/rivet-core 1.24.2-aidon.5 → 1.24.2-aidon.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16609,7 +16609,7 @@ var ChatAidonNodeImpl = class extends ChatNodeImpl {
16609
16609
  }
16610
16610
  return path;
16611
16611
  }
16612
- async callToolGet(parsedArgs, schemaDetail, path, data) {
16612
+ async callToolGet(schemaDetail, path, parsedArgs, data) {
16613
16613
  const queryParams = new URLSearchParams(
16614
16614
  parsedArgs.parameters
16615
16615
  ).toString();
@@ -16687,7 +16687,7 @@ var ChatAidonNodeImpl = class extends ChatNodeImpl {
16687
16687
  if (schemaDetail.requestInBody) {
16688
16688
  data = await this.callToolPost(schemaDetail, path, functionCall.arguments, data);
16689
16689
  } else {
16690
- data = await this.callToolGet(functionCall.arguments, schemaDetail, path, data);
16690
+ data = await this.callToolGet(schemaDetail, path, functionCall.arguments, data);
16691
16691
  }
16692
16692
  messages["value"].push({
16693
16693
  type: "function",
@@ -16763,6 +16763,40 @@ function chatAidonNode() {
16763
16763
  var aidonPlugin = {
16764
16764
  id: "aidon",
16765
16765
  name: "Aidon",
16766
+ configSpec: {
16767
+ aidonURL: {
16768
+ type: "string",
16769
+ label: "Aidon URL",
16770
+ description: "The URL for the Aidon application.",
16771
+ helperText: "Defaults to https://app.aidon.ai. URL for the Aidon application.",
16772
+ default: "https://app.aidon.ai"
16773
+ },
16774
+ aidonKey: {
16775
+ type: "secret",
16776
+ label: "Aidon API Key",
16777
+ description: "The API Key for the Aidon application.",
16778
+ helperText: "API Key for the Aidon application."
16779
+ },
16780
+ fileBrowserURL: {
16781
+ type: "string",
16782
+ label: "FileBrowser URL",
16783
+ description: "The URL for the FileBrowser service.",
16784
+ helperText: "Defaults to https://ai-fb.aidon.ai. URL for the FileBrowser service.",
16785
+ default: "https://ai-fb.aidon.ai"
16786
+ },
16787
+ fileBrowserUsername: {
16788
+ type: "string",
16789
+ label: "FileBrowser Username",
16790
+ description: "The username for the FileBrowser service.",
16791
+ helperText: "Enter username given to access FileBrowser."
16792
+ },
16793
+ fileBrowserPassword: {
16794
+ type: "secret",
16795
+ label: "FileBrowser password",
16796
+ description: "The password for the FileBrowser service.",
16797
+ helperText: "Enter passord given to access FileBrowser."
16798
+ }
16799
+ },
16766
16800
  register: (register) => {
16767
16801
  register(chatAidonNode());
16768
16802
  }
@@ -18928,7 +18962,7 @@ var ChatHuggingFaceNodeImpl = {
18928
18962
  const repetitionPenalty = getInputOrData(data, inputData, "repetitionPenalty", "number");
18929
18963
  const topP = getInputOrData(data, inputData, "topP", "number");
18930
18964
  const topK = getInputOrData(data, inputData, "topK", "number");
18931
- const hf = endpoint ? new import_inference.HfInferenceEndpoint(endpoint, accessToken) : new import_inference.HfInference(accessToken);
18965
+ const hf = endpoint ? new import_inference.InferenceClient(accessToken, { endpointUrl: endpoint }) : new import_inference.InferenceClient(accessToken);
18932
18966
  const generationStream = hf.textGenerationStream({
18933
18967
  inputs: prompt,
18934
18968
  model,
@@ -19126,18 +19160,21 @@ var TextToImageHuggingFaceNodeImpl = {
19126
19160
  const negativePrompt = getInputOrData(data, inputData, "negativePrompt") || void 0;
19127
19161
  const guidanceScale = getInputOrData(data, inputData, "guidanceScale", "number");
19128
19162
  const numInferenceSteps = getInputOrData(data, inputData, "numInferenceSteps", "number");
19129
- const hf = endpoint ? new import_inference2.HfInferenceEndpoint(endpoint, accessToken) : new import_inference2.HfInference(accessToken);
19130
- const image = await hf.textToImage({
19131
- inputs: prompt,
19132
- model,
19133
- parameters: {
19134
- width,
19135
- height,
19136
- negative_prompt: negativePrompt,
19137
- guidance_scale: guidanceScale,
19138
- num_inference_steps: numInferenceSteps
19139
- }
19140
- });
19163
+ const hf = endpoint ? new import_inference2.InferenceClient(accessToken, { endpointUrl: endpoint }) : new import_inference2.InferenceClient(accessToken);
19164
+ const image = await hf.textToImage(
19165
+ {
19166
+ inputs: prompt,
19167
+ model,
19168
+ parameters: {
19169
+ width,
19170
+ height,
19171
+ negative_prompt: negativePrompt,
19172
+ guidance_scale: guidanceScale,
19173
+ num_inference_steps: numInferenceSteps
19174
+ }
19175
+ },
19176
+ { outputType: "blob" }
19177
+ );
19141
19178
  return {
19142
19179
  ["output"]: {
19143
19180
  type: "image",