modelfusion 0.66.1 → 0.67.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -72,6 +72,8 @@ class OllamaTextGenerationModel extends AbstractModel_js_1.AbstractModel {
72
72
  "system",
73
73
  "template",
74
74
  "context",
75
+ "format",
76
+ "raw",
75
77
  ];
76
78
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
77
79
  }
@@ -140,7 +142,7 @@ const ollamaTextStreamingResponseSchema = zod_1.z.discriminatedUnion("done", [
140
142
  context: zod_1.z.array(zod_1.z.number()),
141
143
  }),
142
144
  ]);
143
- async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration_js_1.OllamaApiConfiguration(), abortSignal, responseFormat, prompt, model, contextWindowSize, maxCompletionTokens, mirostat, mirostat_eta, mirostat_tau, num_gpu, num_gqa, num_threads, repeat_last_n, repeat_penalty, seed, stopSequences, temperature, tfs_z, top_k, top_p, system, template, context, }) {
145
+ async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration_js_1.OllamaApiConfiguration(), abortSignal, responseFormat, prompt, model, format, contextWindowSize, maxCompletionTokens, mirostat, mirostat_eta, mirostat_tau, num_gpu, num_gqa, num_threads, repeat_last_n, repeat_penalty, seed, stopSequences, temperature, tfs_z, top_k, top_p, system, template, context, raw, }) {
144
146
  return (0, postToApi_js_1.postJsonToApi)({
145
147
  url: api.assembleUrl(`/api/generate`),
146
148
  headers: api.headers,
@@ -148,6 +150,7 @@ async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration_js
148
150
  stream: responseFormat.stream,
149
151
  model,
150
152
  prompt,
153
+ format,
151
154
  options: {
152
155
  mirostat,
153
156
  mirostat_eta,
@@ -169,6 +172,7 @@ async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration_js
169
172
  system,
170
173
  template,
171
174
  context,
175
+ raw,
172
176
  },
173
177
  failedResponseHandler: OllamaError_js_1.failedOllamaCallResponseHandler,
174
178
  successfulResponseHandler: responseFormat.handler,
@@ -7,27 +7,104 @@ import { Delta } from "../../model-function/Delta.js";
7
7
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
10
+ /**
11
+ * @see https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion
12
+ */
10
13
  export interface OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends TextGenerationModelSettings {
11
14
  api?: ApiConfiguration;
15
+ /**
16
+ * The name of the model to use. For example, 'mistral'.
17
+ *
18
+ * @see https://ollama.ai/library
19
+ */
12
20
  model: string;
21
+ /**
22
+ * The temperature of the model. Increasing the temperature will make the model
23
+ * answer more creatively. (Default: 0.8)
24
+ */
13
25
  temperature?: number;
14
26
  /**
15
27
  * Specify the context window size of the model that you have loaded in your
16
- * Ollama server.
28
+ * Ollama server. (Default: 2048)
17
29
  */
18
30
  contextWindowSize?: CONTEXT_WINDOW_SIZE;
31
+ /**
32
+ * Enable Mirostat sampling for controlling perplexity.
33
+ * (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)
34
+ */
19
35
  mirostat?: number;
36
+ /**
37
+ * Influences how quickly the algorithm responds to feedback from the generated text.
38
+ * A lower learning rate will result in slower adjustments,
39
+ * while a higher learning rate will make the algorithm more responsive. (Default: 0.1)
40
+ */
20
41
  mirostat_eta?: number;
42
+ /**
43
+ * Controls the balance between coherence and diversity of the output.
44
+ * A lower value will result in more focused and coherent text. (Default: 5.0)
45
+ */
21
46
  mirostat_tau?: number;
47
+ /**
48
+ * The number of GQA groups in the transformer layer. Required for some models,
49
+ * for example it is 8 for llama2:70b
50
+ */
22
51
  num_gqa?: number;
52
+ /**
53
+ * The number of layers to send to the GPU(s). On macOS it defaults to 1 to
54
+ * enable metal support, 0 to disable.
55
+ */
23
56
  num_gpu?: number;
57
+ /**
58
+ * Sets the number of threads to use during computation. By default, Ollama will
59
+ * detect this for optimal performance. It is recommended to set this value to the
60
+ * number of physical CPU cores your system has (as opposed to the logical number of cores).
61
+ */
24
62
  num_threads?: number;
63
+ /**
64
+ * Sets how far back for the model to look back to prevent repetition.
65
+ * (Default: 64, 0 = disabled, -1 = num_ctx)
66
+ */
25
67
  repeat_last_n?: number;
68
+ /**
69
+ * Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
70
+ * will penalize repetitions more strongly, while a lower value (e.g., 0.9)
71
+ * will be more lenient. (Default: 1.1)
72
+ */
26
73
  repeat_penalty?: number;
74
+ /**
75
+ * Sets the random number seed to use for generation. Setting this to a
76
+ * specific number will make the model generate the same text for the same prompt.
77
+ * (Default: 0)
78
+ */
27
79
  seed?: number;
80
+ /**
81
+ * Tail free sampling is used to reduce the impact of less probable tokens
82
+ * from the output. A higher value (e.g., 2.0) will reduce the impact more,
83
+ * while a value of 1.0 disables this setting. (default: 1)
84
+ */
28
85
  tfs_z?: number;
86
+ /**
87
+ * Reduces the probability of generating nonsense. A higher value (e.g. 100)
88
+ * will give more diverse answers, while a lower value (e.g. 10) will be more
89
+ * conservative. (Default: 40)
90
+ */
29
91
  top_k?: number;
92
+ /**
93
+ * Works together with top-k. A higher value (e.g., 0.95) will lead to more
94
+ * diverse text, while a lower value (e.g., 0.5) will generate more focused
95
+ * and conservative text. (Default: 0.9)
96
+ */
30
97
  top_p?: number;
98
+ /**
99
+ * When set to true, no formatting will be applied to the prompt and no context
100
+ * will be returned.
101
+ */
102
+ raw?: boolean;
103
+ /**
104
+ * The format to return a response in. Currently the only accepted value is 'json'.
105
+ * Leave undefined to return a string.
106
+ */
107
+ format?: "json";
31
108
  system?: string;
32
109
  template?: string;
33
110
  context?: number[];
@@ -69,6 +69,8 @@ export class OllamaTextGenerationModel extends AbstractModel {
69
69
  "system",
70
70
  "template",
71
71
  "context",
72
+ "format",
73
+ "raw",
72
74
  ];
73
75
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
74
76
  }
@@ -136,7 +138,7 @@ const ollamaTextStreamingResponseSchema = z.discriminatedUnion("done", [
136
138
  context: z.array(z.number()),
137
139
  }),
138
140
  ]);
139
- async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration(), abortSignal, responseFormat, prompt, model, contextWindowSize, maxCompletionTokens, mirostat, mirostat_eta, mirostat_tau, num_gpu, num_gqa, num_threads, repeat_last_n, repeat_penalty, seed, stopSequences, temperature, tfs_z, top_k, top_p, system, template, context, }) {
141
+ async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration(), abortSignal, responseFormat, prompt, model, format, contextWindowSize, maxCompletionTokens, mirostat, mirostat_eta, mirostat_tau, num_gpu, num_gqa, num_threads, repeat_last_n, repeat_penalty, seed, stopSequences, temperature, tfs_z, top_k, top_p, system, template, context, raw, }) {
140
142
  return postJsonToApi({
141
143
  url: api.assembleUrl(`/api/generate`),
142
144
  headers: api.headers,
@@ -144,6 +146,7 @@ async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration(),
144
146
  stream: responseFormat.stream,
145
147
  model,
146
148
  prompt,
149
+ format,
147
150
  options: {
148
151
  mirostat,
149
152
  mirostat_eta,
@@ -165,6 +168,7 @@ async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration(),
165
168
  system,
166
169
  template,
167
170
  context,
171
+ raw,
168
172
  },
169
173
  failedResponseHandler: failedOllamaCallResponseHandler,
170
174
  successfulResponseHandler: responseFormat.handler,
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.66.1",
4
+ "version": "0.67.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -8,9 +8,9 @@ function parseJsonStream({ schema, stream, process, onDone, }) {
8
8
  }
9
9
  return (async () => {
10
10
  try {
11
- let unprocessedText = "";
12
11
  const reader = new ReadableStreamDefaultReader(stream);
13
12
  const utf8Decoder = new TextDecoder("utf-8");
13
+ let unprocessedText = "";
14
14
  // eslint-disable-next-line no-constant-condition
15
15
  while (true) {
16
16
  const { value: chunk, done } = await reader.read();
@@ -18,8 +18,8 @@ function parseJsonStream({ schema, stream, process, onDone, }) {
18
18
  break;
19
19
  }
20
20
  unprocessedText += utf8Decoder.decode(chunk, { stream: true });
21
- const processableLines = unprocessedText.split(/\r\n|\n|\r/g);
22
- unprocessedText = processableLines.pop() || "";
21
+ const processableLines = unprocessedText.split("\n");
22
+ unprocessedText = processableLines.pop() ?? "";
23
23
  processableLines.forEach(processLine);
24
24
  }
25
25
  // processing remaining text:
@@ -5,9 +5,9 @@ export function parseJsonStream({ schema, stream, process, onDone, }) {
5
5
  }
6
6
  return (async () => {
7
7
  try {
8
- let unprocessedText = "";
9
8
  const reader = new ReadableStreamDefaultReader(stream);
10
9
  const utf8Decoder = new TextDecoder("utf-8");
10
+ let unprocessedText = "";
11
11
  // eslint-disable-next-line no-constant-condition
12
12
  while (true) {
13
13
  const { value: chunk, done } = await reader.read();
@@ -15,8 +15,8 @@ export function parseJsonStream({ schema, stream, process, onDone, }) {
15
15
  break;
16
16
  }
17
17
  unprocessedText += utf8Decoder.decode(chunk, { stream: true });
18
- const processableLines = unprocessedText.split(/\r\n|\n|\r/g);
19
- unprocessedText = processableLines.pop() || "";
18
+ const processableLines = unprocessedText.split("\n");
19
+ unprocessedText = processableLines.pop() ?? "";
20
20
  processableLines.forEach(processLine);
21
21
  }
22
22
  // processing remaining text: