@marizmelo/llm-cli 0.0.4 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/bundle/gemini.js +62 -31
  2. package/package.json +1 -1
package/bundle/gemini.js CHANGED
@@ -139658,8 +139658,8 @@ var GIT_COMMIT_INFO, CLI_VERSION;
139658
139658
  var init_git_commit = __esm({
139659
139659
  "packages/core/dist/src/generated/git-commit.js"() {
139660
139660
  "use strict";
139661
- GIT_COMMIT_INFO = "55b76129";
139662
- CLI_VERSION = "0.0.4";
139661
+ GIT_COMMIT_INFO = "4e5c77fc";
139662
+ CLI_VERSION = "0.0.6";
139663
139663
  }
139664
139664
  });
139665
139665
 
@@ -141926,60 +141926,59 @@ var init_ollama_provider = __esm({
141926
141926
  name = "ollama";
141927
141927
  baseUrl;
141928
141928
  model;
141929
+ toolsSupported = true;
141929
141930
  constructor(config, gcConfig) {
141930
141931
  this.baseUrl = config.baseUrl || "http://localhost:11434";
141931
141932
  this.model = config.model;
141932
141933
  }
141933
141934
  async generateContent(request3, userPromptId) {
141934
141935
  const messages = this.convertToOllamaMessages(request3);
141935
- const tools = this.convertToOllamaTools(request3);
141936
+ const tools = this.toolsSupported ? this.convertToOllamaTools(request3) : [];
141936
141937
  const ollamaRequest = {
141937
141938
  model: this.model,
141938
141939
  messages,
141939
141940
  stream: false,
141940
141941
  ...tools.length > 0 ? { tools } : {},
141941
- options: {
141942
- temperature: request3.generationConfig?.temperature ?? request3.config?.temperature ?? 0,
141943
- top_p: request3.generationConfig?.topP ?? request3.config?.topP ?? 1,
141944
- num_predict: request3.generationConfig?.maxOutputTokens ?? request3.config?.maxOutputTokens
141945
- }
141942
+ options: this.buildOptions(request3)
141946
141943
  };
141947
141944
  const response = await fetch(`${this.baseUrl}/api/chat`, {
141948
141945
  method: "POST",
141949
- headers: {
141950
- "Content-Type": "application/json"
141951
- },
141946
+ headers: { "Content-Type": "application/json" },
141952
141947
  body: JSON.stringify(ollamaRequest)
141953
141948
  });
141954
141949
  if (!response.ok) {
141955
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
141950
+ const body = await response.text().catch(() => "");
141951
+ if (this.isToolsNotSupportedError(body) && tools.length > 0) {
141952
+ this.toolsSupported = false;
141953
+ return this.generateContent(request3, userPromptId);
141954
+ }
141955
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText} - ${body}`);
141956
141956
  }
141957
141957
  const ollamaResponse = await response.json();
141958
141958
  return this.convertFromOllamaChatResponse(ollamaResponse);
141959
141959
  }
141960
141960
  async generateContentStream(request3, userPromptId) {
141961
141961
  const messages = this.convertToOllamaMessages(request3);
141962
- const tools = this.convertToOllamaTools(request3);
141962
+ const tools = this.toolsSupported ? this.convertToOllamaTools(request3) : [];
141963
141963
  const ollamaRequest = {
141964
141964
  model: this.model,
141965
141965
  messages,
141966
141966
  stream: true,
141967
141967
  ...tools.length > 0 ? { tools } : {},
141968
- options: {
141969
- temperature: request3.generationConfig?.temperature ?? request3.config?.temperature ?? 0,
141970
- top_p: request3.generationConfig?.topP ?? request3.config?.topP ?? 1,
141971
- num_predict: request3.generationConfig?.maxOutputTokens ?? request3.config?.maxOutputTokens
141972
- }
141968
+ options: this.buildOptions(request3)
141973
141969
  };
141974
141970
  const response = await fetch(`${this.baseUrl}/api/chat`, {
141975
141971
  method: "POST",
141976
- headers: {
141977
- "Content-Type": "application/json"
141978
- },
141972
+ headers: { "Content-Type": "application/json" },
141979
141973
  body: JSON.stringify(ollamaRequest)
141980
141974
  });
141981
141975
  if (!response.ok) {
141982
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
141976
+ const body = await response.text().catch(() => "");
141977
+ if (this.isToolsNotSupportedError(body) && tools.length > 0) {
141978
+ this.toolsSupported = false;
141979
+ return this.generateContentStream(request3, userPromptId);
141980
+ }
141981
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText} - ${body}`);
141983
141982
  }
141984
141983
  const reader = response.body?.getReader();
141985
141984
  if (!reader) {
@@ -142027,6 +142026,37 @@ var init_ollama_provider = __esm({
142027
142026
  validateConfig(config) {
142028
142027
  return config.provider === "ollama" && !!config.model;
142029
142028
  }
142029
+ isToolsNotSupportedError(body) {
142030
+ const lower = body.toLowerCase();
142031
+ return lower.includes("does not support tools") || lower.includes("tool use is not supported");
142032
+ }
142033
+ parseToolArgs(args) {
142034
+ if (args === null || args === void 0)
142035
+ return {};
142036
+ if (typeof args === "object" && !Array.isArray(args))
142037
+ return args;
142038
+ if (typeof args === "string") {
142039
+ try {
142040
+ return JSON.parse(args);
142041
+ } catch {
142042
+ return {};
142043
+ }
142044
+ }
142045
+ return {};
142046
+ }
142047
+ buildOptions(request3) {
142048
+ const options2 = {};
142049
+ const temp = request3.generationConfig?.temperature ?? request3.config?.temperature;
142050
+ if (temp !== void 0)
142051
+ options2.temperature = temp;
142052
+ const topP = request3.generationConfig?.topP ?? request3.config?.topP;
142053
+ if (topP !== void 0)
142054
+ options2.top_p = topP;
142055
+ const maxTokens = request3.generationConfig?.maxOutputTokens ?? request3.config?.maxOutputTokens;
142056
+ if (maxTokens !== void 0)
142057
+ options2.num_predict = maxTokens;
142058
+ return options2;
142059
+ }
142030
142060
  convertToOllamaMessages(request3) {
142031
142061
  const messages = [];
142032
142062
  if (request3.systemInstruction) {
@@ -142107,12 +142137,11 @@ var init_ollama_provider = __esm({
142107
142137
  }
142108
142138
  if (message?.tool_calls) {
142109
142139
  for (const toolCall of message.tool_calls) {
142110
- if (toolCall.function) {
142111
- const args = typeof toolCall.function.arguments === "string" ? JSON.parse(toolCall.function.arguments) : toolCall.function.arguments || {};
142140
+ if (toolCall.function?.name) {
142112
142141
  parts.push({
142113
142142
  functionCall: {
142114
142143
  name: toolCall.function.name,
142115
- args
142144
+ args: this.parseToolArgs(toolCall.function.arguments)
142116
142145
  }
142117
142146
  });
142118
142147
  }
@@ -142166,10 +142195,12 @@ var init_ollama_provider = __esm({
142166
142195
  }
142167
142196
  if (chunk.message?.tool_calls) {
142168
142197
  for (const toolCall of chunk.message.tool_calls) {
142169
- if (toolCall.function) {
142170
- const args = typeof toolCall.function.arguments === "string" ? JSON.parse(toolCall.function.arguments) : toolCall.function.arguments || {};
142198
+ if (toolCall.function?.name) {
142171
142199
  accumulatedToolCalls.push({
142172
- function: { name: toolCall.function.name, arguments: args }
142200
+ function: {
142201
+ name: toolCall.function.name,
142202
+ arguments: this.parseToolArgs(toolCall.function.arguments)
142203
+ }
142173
142204
  });
142174
142205
  }
142175
142206
  }
@@ -142365,7 +142396,7 @@ function createContentGeneratorConfig(config, authType) {
142365
142396
  return contentGeneratorConfig;
142366
142397
  }
142367
142398
  async function createContentGenerator(config, gcConfig, sessionId2) {
142368
- const version2 = "0.0.4";
142399
+ const version2 = "0.0.6";
142369
142400
  const userAgent2 = `GeminiCLI/${version2} (${process.platform}; ${process.arch})`;
142370
142401
  const baseHeaders = {
142371
142402
  "User-Agent": userAgent2
@@ -274727,7 +274758,7 @@ async function getPackageJson() {
274727
274758
  // packages/cli/src/utils/version.ts
274728
274759
  async function getCliVersion() {
274729
274760
  const pkgJson = await getPackageJson();
274730
- return "0.0.4";
274761
+ return "0.0.6";
274731
274762
  }
274732
274763
 
274733
274764
  // packages/cli/src/ui/commands/aboutCommand.ts
@@ -274779,7 +274810,7 @@ init_open();
274779
274810
  import process30 from "node:process";
274780
274811
 
274781
274812
  // packages/cli/src/generated/git-commit.ts
274782
- var GIT_COMMIT_INFO2 = "55b76129";
274813
+ var GIT_COMMIT_INFO2 = "4e5c77fc";
274783
274814
 
274784
274815
  // packages/cli/src/ui/commands/bugCommand.ts
274785
274816
  init_dist3();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@marizmelo/llm-cli",
3
- "version": "0.0.4",
3
+ "version": "0.0.6",
4
4
  "engines": {
5
5
  "node": ">=20.0.0"
6
6
  },