markpdfdown 0.2.1 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,20 +18,33 @@ class AnthropicClient extends LLMClient {
18
18
  model: modelName,
19
19
  messages: anthropicMessages,
20
20
  temperature: normalizedOptions.temperature ?? 0.7,
21
- max_tokens: normalizedOptions.maxTokens,
22
21
  stream: normalizedOptions.stream || false
23
22
  };
23
+ if (typeof normalizedOptions.maxTokens === "number" && normalizedOptions.maxTokens > 0) {
24
+ requestBody.max_tokens = normalizedOptions.maxTokens;
25
+ }
24
26
  const systemMessage = normalizedOptions.messages.find((msg) => msg.role === "system");
27
+ let systemText = "";
25
28
  if (systemMessage) {
26
- const systemContent = Array.isArray(systemMessage.content) ? systemMessage.content.filter((c) => c.type === "text").map((c) => c.text).join("\n") : systemMessage.content.type === "text" ? systemMessage.content.text : "";
27
- if (systemContent) {
28
- requestBody.system = systemContent;
29
+ systemText = Array.isArray(systemMessage.content) ? systemMessage.content.filter((c) => c.type === "text").map((c) => c.text).join("\n") : systemMessage.content.type === "text" ? systemMessage.content.text : "";
30
+ }
31
+ if (normalizedOptions.response_format?.type === "json_object") {
32
+ if (!systemText) {
33
+ systemText = "请以有效的JSON格式提供响应。";
34
+ } else {
35
+ systemText += "\n\n请以有效的JSON格式提供响应。";
29
36
  }
30
37
  }
31
- if (normalizedOptions.response_format?.type === "json_object" && !requestBody.system) {
32
- requestBody.system = "请以有效的JSON格式提供响应。";
33
- } else if (normalizedOptions.response_format?.type === "json_object" && requestBody.system) {
34
- requestBody.system += "\n\n请以有效的JSON格式提供响应。";
38
+ if (systemText) {
39
+ requestBody.system = [
40
+ {
41
+ type: "text",
42
+ text: systemText,
43
+ cache_control: {
44
+ type: "ephemeral"
45
+ }
46
+ }
47
+ ];
35
48
  }
36
49
  const response = await fetch(endpoint, {
37
50
  method: "POST",
@@ -16,10 +16,12 @@ class GeminiClient extends LLMClient {
16
16
  contents: geminiContents,
17
17
  generationConfig: {
18
18
  temperature: normalizedOptions.temperature ?? 0.7,
19
- maxOutputTokens: normalizedOptions.maxTokens,
20
19
  topP: 0.95
21
20
  }
22
21
  };
22
+ if (typeof normalizedOptions.maxTokens === "number" && normalizedOptions.maxTokens > 0) {
23
+ requestBody.generationConfig.maxOutputTokens = normalizedOptions.maxTokens;
24
+ }
23
25
  if (normalizedOptions.response_format?.type === "json_object") {
24
26
  requestBody.generationConfig.response_mime_type = "application/json";
25
27
  }
@@ -20,7 +20,7 @@ class OllamaClient extends LLMClient {
20
20
  if (normalizedOptions.temperature !== void 0) {
21
21
  requestBody.options.temperature = normalizedOptions.temperature;
22
22
  }
23
- if (normalizedOptions.maxTokens !== void 0) {
23
+ if (typeof normalizedOptions.maxTokens === "number" && normalizedOptions.maxTokens > 0) {
24
24
  requestBody.options.num_predict = normalizedOptions.maxTokens;
25
25
  }
26
26
  if (normalizedOptions.tools && normalizedOptions.tools.length > 0) {
@@ -14,9 +14,11 @@ class OpenAIClient extends LLMClient {
14
14
  model: normalizedOptions.model || "gpt-3.5-turbo",
15
15
  messages: openaiMessages,
16
16
  temperature: normalizedOptions.temperature ?? 0.7,
17
- max_tokens: normalizedOptions.maxTokens,
18
17
  stream: normalizedOptions.stream || false
19
18
  };
19
+ if (typeof normalizedOptions.maxTokens === "number" && normalizedOptions.maxTokens > 0) {
20
+ requestBody.max_tokens = normalizedOptions.maxTokens;
21
+ }
20
22
  if (normalizedOptions.tools && normalizedOptions.tools.length > 0) {
21
23
  requestBody.tools = normalizedOptions.tools;
22
24
  if (normalizedOptions.tool_choice) {
@@ -14,9 +14,11 @@ class OpenAIResponsesClient extends LLMClient {
14
14
  model: normalizedOptions.model || "gpt-4o",
15
15
  input,
16
16
  temperature: normalizedOptions.temperature ?? 0.7,
17
- max_tokens: normalizedOptions.maxTokens,
18
17
  stream: normalizedOptions.stream || false
19
18
  };
19
+ if (typeof normalizedOptions.maxTokens === "number" && normalizedOptions.maxTokens > 0) {
20
+ requestBody.max_tokens = normalizedOptions.maxTokens;
21
+ }
20
22
  if (instructions) {
21
23
  requestBody.instructions = instructions;
22
24
  }