ace-llm 0.30.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.ace-defaults/llm/config.yml +31 -0
- data/.ace-defaults/llm/presets/claude/prompt.yml +5 -0
- data/.ace-defaults/llm/presets/claude/ro.yml +6 -0
- data/.ace-defaults/llm/presets/claude/rw.yml +4 -0
- data/.ace-defaults/llm/presets/claude/yolo.yml +3 -0
- data/.ace-defaults/llm/presets/codex/ro.yml +5 -0
- data/.ace-defaults/llm/presets/codex/rw.yml +3 -0
- data/.ace-defaults/llm/presets/codex/yolo.yml +3 -0
- data/.ace-defaults/llm/presets/gemini/ro.yml +4 -0
- data/.ace-defaults/llm/presets/gemini/rw.yml +4 -0
- data/.ace-defaults/llm/presets/gemini/yolo.yml +4 -0
- data/.ace-defaults/llm/presets/opencode/ro.yml +1 -0
- data/.ace-defaults/llm/presets/opencode/rw.yml +1 -0
- data/.ace-defaults/llm/presets/opencode/yolo.yml +3 -0
- data/.ace-defaults/llm/presets/pi/ro.yml +1 -0
- data/.ace-defaults/llm/presets/pi/rw.yml +1 -0
- data/.ace-defaults/llm/presets/pi/yolo.yml +1 -0
- data/.ace-defaults/llm/providers/anthropic.yml +34 -0
- data/.ace-defaults/llm/providers/google.yml +36 -0
- data/.ace-defaults/llm/providers/groq.yml +29 -0
- data/.ace-defaults/llm/providers/lmstudio.yml +24 -0
- data/.ace-defaults/llm/providers/mistral.yml +33 -0
- data/.ace-defaults/llm/providers/openai.yml +33 -0
- data/.ace-defaults/llm/providers/openrouter.yml +45 -0
- data/.ace-defaults/llm/providers/togetherai.yml +26 -0
- data/.ace-defaults/llm/providers/xai.yml +30 -0
- data/.ace-defaults/llm/providers/zai.yml +18 -0
- data/.ace-defaults/llm/thinking/claude/high.yml +3 -0
- data/.ace-defaults/llm/thinking/claude/low.yml +3 -0
- data/.ace-defaults/llm/thinking/claude/medium.yml +3 -0
- data/.ace-defaults/llm/thinking/claude/xhigh.yml +3 -0
- data/.ace-defaults/llm/thinking/codex/high.yml +3 -0
- data/.ace-defaults/llm/thinking/codex/low.yml +3 -0
- data/.ace-defaults/llm/thinking/codex/medium.yml +3 -0
- data/.ace-defaults/llm/thinking/codex/xhigh.yml +3 -0
- data/.ace-defaults/nav/protocols/guide-sources/ace-llm.yml +10 -0
- data/CHANGELOG.md +641 -0
- data/LICENSE +21 -0
- data/README.md +42 -0
- data/Rakefile +14 -0
- data/exe/ace-llm +25 -0
- data/handbook/guides/llm-query-tool-reference.g.md +683 -0
- data/handbook/templates/agent/plan-mode.template.md +48 -0
- data/lib/ace/llm/atoms/env_reader.rb +155 -0
- data/lib/ace/llm/atoms/error_classifier.rb +200 -0
- data/lib/ace/llm/atoms/http_client.rb +162 -0
- data/lib/ace/llm/atoms/provider_config_validator.rb +260 -0
- data/lib/ace/llm/atoms/xdg_directory_resolver.rb +189 -0
- data/lib/ace/llm/cli/commands/query.rb +280 -0
- data/lib/ace/llm/cli.rb +24 -0
- data/lib/ace/llm/configuration.rb +180 -0
- data/lib/ace/llm/models/fallback_config.rb +216 -0
- data/lib/ace/llm/molecules/client_registry.rb +336 -0
- data/lib/ace/llm/molecules/config_loader.rb +39 -0
- data/lib/ace/llm/molecules/fallback_orchestrator.rb +218 -0
- data/lib/ace/llm/molecules/file_io_handler.rb +158 -0
- data/lib/ace/llm/molecules/format_handlers.rb +183 -0
- data/lib/ace/llm/molecules/llm_alias_resolver.rb +50 -0
- data/lib/ace/llm/molecules/openai_compatible_params.rb +21 -0
- data/lib/ace/llm/molecules/preset_loader.rb +99 -0
- data/lib/ace/llm/molecules/provider_loader.rb +198 -0
- data/lib/ace/llm/molecules/provider_model_parser.rb +172 -0
- data/lib/ace/llm/molecules/thinking_level_loader.rb +83 -0
- data/lib/ace/llm/organisms/anthropic_client.rb +213 -0
- data/lib/ace/llm/organisms/base_client.rb +264 -0
- data/lib/ace/llm/organisms/google_client.rb +187 -0
- data/lib/ace/llm/organisms/groq_client.rb +197 -0
- data/lib/ace/llm/organisms/lmstudio_client.rb +146 -0
- data/lib/ace/llm/organisms/mistral_client.rb +180 -0
- data/lib/ace/llm/organisms/openai_client.rb +195 -0
- data/lib/ace/llm/organisms/openrouter_client.rb +216 -0
- data/lib/ace/llm/organisms/togetherai_client.rb +184 -0
- data/lib/ace/llm/organisms/xai_client.rb +213 -0
- data/lib/ace/llm/organisms/zai_client.rb +149 -0
- data/lib/ace/llm/query_interface.rb +455 -0
- data/lib/ace/llm/version.rb +7 -0
- data/lib/ace/llm.rb +61 -0
- metadata +318 -0
checksums.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
SHA256:
|
|
3
|
+
metadata.gz: 91f463c56920b3bdaf3db5b85526ee15e8b15f101bb027e2acaf17537b6980e6
|
|
4
|
+
data.tar.gz: 56287d9afefbaed517b9d0a450c2d9c31a3e5b6b05c752e3f94097e029615466
|
|
5
|
+
SHA512:
|
|
6
|
+
metadata.gz: 3d4706d5e1120f324a5f02d85766ba5dd8f49506366ee92e85f25434348bcad680826a22f7abf0802cc8bb4e0feed12acb0692e3d68d8289418d4649a1b80804
|
|
7
|
+
data.tar.gz: 23a2fb71311c4c95713fa5f6e03c65fd0aec10dba13c2703f81fbec6f397c757bd61aee24b2cc06f565ebaef0e2f1db927779219944639dfb853820888157731
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# ace-llm default configuration
|
|
2
|
+
# Override in .ace/llm/config.yml for project-specific settings
|
|
3
|
+
|
|
4
|
+
llm:
|
|
5
|
+
# Optional allow-list of active providers.
|
|
6
|
+
# When unset, null, or empty, all discovered providers remain available.
|
|
7
|
+
# Example:
|
|
8
|
+
# providers:
|
|
9
|
+
# active:
|
|
10
|
+
# - google
|
|
11
|
+
# - anthropic
|
|
12
|
+
providers: {}
|
|
13
|
+
|
|
14
|
+
# Default timeout in seconds for LLM requests
|
|
15
|
+
# CLI providers (opencode, claude, codex, gemini) may need longer
|
|
16
|
+
# due to subprocess overhead and "thinking" phases
|
|
17
|
+
timeout: 120
|
|
18
|
+
|
|
19
|
+
# Global fallback policy shared by QueryInterface and ace-llm CLI query.
|
|
20
|
+
fallback:
|
|
21
|
+
enabled: true
|
|
22
|
+
retry_count: 3
|
|
23
|
+
retry_delay: 1.0
|
|
24
|
+
max_total_timeout: 30.0
|
|
25
|
+
chains: {}
|
|
26
|
+
providers: []
|
|
27
|
+
|
|
28
|
+
# Default context limit for unknown models (in tokens)
|
|
29
|
+
# Provider-specific limits are defined in providers/*.yml
|
|
30
|
+
context_limit:
|
|
31
|
+
default: 128000
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
timeout: 600
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
timeout: 600
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
timeout: 600
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
timeout: 600
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
timeout: 600
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
name: anthropic
|
|
2
|
+
last_synced: 2026-03-23
|
|
3
|
+
class: Ace::LLM::Organisms::AnthropicClient
|
|
4
|
+
gem: ace-llm
|
|
5
|
+
context_limit: 1000000 # Claude models have 1M context window
|
|
6
|
+
models:
|
|
7
|
+
- claude-haiku-4-5
|
|
8
|
+
- claude-haiku-4-5-20251001
|
|
9
|
+
- claude-opus-4-5
|
|
10
|
+
- claude-opus-4-5-20251101
|
|
11
|
+
- claude-sonnet-4-5
|
|
12
|
+
- claude-sonnet-4-5-20250929
|
|
13
|
+
aliases:
|
|
14
|
+
global:
|
|
15
|
+
opus: anthropic:o
|
|
16
|
+
sonnet: anthropic:s
|
|
17
|
+
haiku: anthropic:h
|
|
18
|
+
csonnet: anthropic:s
|
|
19
|
+
model:
|
|
20
|
+
s: claude-sonnet-4-5
|
|
21
|
+
o: claude-opus-4-5
|
|
22
|
+
h: claude-haiku-4-5
|
|
23
|
+
api_key:
|
|
24
|
+
env: ANTHROPIC_API_KEY
|
|
25
|
+
required: true
|
|
26
|
+
description: Anthropic API key
|
|
27
|
+
capabilities:
|
|
28
|
+
- text_generation
|
|
29
|
+
- streaming
|
|
30
|
+
- function_calling
|
|
31
|
+
- vision
|
|
32
|
+
default_options:
|
|
33
|
+
temperature: 0.7
|
|
34
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
name: google
|
|
2
|
+
last_synced: 2026-02-27
|
|
3
|
+
class: Ace::LLM::Organisms::GoogleClient
|
|
4
|
+
gem: ace-llm
|
|
5
|
+
context_limit: 1000000 # Gemini models have 1M+ context window
|
|
6
|
+
models:
|
|
7
|
+
- gemini-2.5-flash
|
|
8
|
+
- gemini-2.5-flash-lite
|
|
9
|
+
- gemini-2.5-pro
|
|
10
|
+
- gemini-3.1-pro-preview
|
|
11
|
+
- gemini-3-flash-preview
|
|
12
|
+
- gemini-flash-latest
|
|
13
|
+
- gemini-flash-lite-latest
|
|
14
|
+
aliases:
|
|
15
|
+
global:
|
|
16
|
+
gflash: google:flash
|
|
17
|
+
gpro: google:pro
|
|
18
|
+
glite: google:lite
|
|
19
|
+
model:
|
|
20
|
+
flash: gemini-flash-latest
|
|
21
|
+
lite: gemini-flash-lite-latest
|
|
22
|
+
pro: gemini-2.5-pro
|
|
23
|
+
pro-preview: gemini-3.1-pro-preview
|
|
24
|
+
flash-preview: gemini-3-flash-preview
|
|
25
|
+
api_key:
|
|
26
|
+
env: GEMINI_API_KEY
|
|
27
|
+
required: true
|
|
28
|
+
description: Google AI Studio API key
|
|
29
|
+
capabilities:
|
|
30
|
+
- text_generation
|
|
31
|
+
- streaming
|
|
32
|
+
- function_calling
|
|
33
|
+
- vision
|
|
34
|
+
default_options:
|
|
35
|
+
temperature: 0.7
|
|
36
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
name: groq
|
|
2
|
+
class: Ace::LLM::Organisms::GroqClient
|
|
3
|
+
gem: ace-llm
|
|
4
|
+
context_limit: 128000 # Groq models typically have 128K context window
|
|
5
|
+
models:
|
|
6
|
+
- openai/gpt-oss-120b
|
|
7
|
+
- openai/gpt-oss-20b
|
|
8
|
+
- moonshotai/kimi-k2-instruct-0905
|
|
9
|
+
aliases:
|
|
10
|
+
global:
|
|
11
|
+
groq: groq:openai/gpt-oss-120b
|
|
12
|
+
groq-fast: groq:openai/gpt-oss-20b
|
|
13
|
+
groq-kimi: groq:moonshotai/kimi-k2-instruct-0905
|
|
14
|
+
groq-saba: groq:mistral-saba-24b
|
|
15
|
+
model:
|
|
16
|
+
gpt-oss: openai/gpt-oss-120b
|
|
17
|
+
gpt-oss-120b: openai/gpt-oss-120b
|
|
18
|
+
gpt-oss-20b: openai/gpt-oss-20b
|
|
19
|
+
kimi-k2: moonshotai/kimi-k2-instruct-0905
|
|
20
|
+
saba: mistral-saba-24b
|
|
21
|
+
api_key:
|
|
22
|
+
env: GROQ_API_KEY
|
|
23
|
+
required: true
|
|
24
|
+
description: Groq API key
|
|
25
|
+
capabilities:
|
|
26
|
+
- text_generation
|
|
27
|
+
default_options:
|
|
28
|
+
temperature: 0.7
|
|
29
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
name: lmstudio
|
|
2
|
+
last_synced: 2025-12-05
|
|
3
|
+
class: Ace::LLM::Organisms::LMStudioClient
|
|
4
|
+
gem: ace-llm
|
|
5
|
+
context_limit: 128000 # Default context limit; varies by local model
|
|
6
|
+
# Models are user-defined locally in LM Studio and not synced from models.dev.
|
|
7
|
+
# This list is intentionally empty.
|
|
8
|
+
models:
|
|
9
|
+
aliases:
|
|
10
|
+
global:
|
|
11
|
+
local: lmstudio:local
|
|
12
|
+
model:
|
|
13
|
+
local: local-model
|
|
14
|
+
custom: custom-model
|
|
15
|
+
endpoint: http://localhost:1234
|
|
16
|
+
api_key:
|
|
17
|
+
required: false
|
|
18
|
+
description: LM Studio local server (no API key needed)
|
|
19
|
+
capabilities:
|
|
20
|
+
- text_generation
|
|
21
|
+
- streaming
|
|
22
|
+
default_options:
|
|
23
|
+
temperature: 0.7
|
|
24
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
name: mistral
|
|
2
|
+
last_synced: 2025-12-05
|
|
3
|
+
class: Ace::LLM::Organisms::MistralClient
|
|
4
|
+
gem: ace-llm
|
|
5
|
+
context_limit: 128000 # Mistral Large has 128K context window
|
|
6
|
+
models:
|
|
7
|
+
- codestral-latest
|
|
8
|
+
- devstral-medium-2507
|
|
9
|
+
- devstral-small-2507
|
|
10
|
+
- mistral-large-latest
|
|
11
|
+
- mistral-medium-2508
|
|
12
|
+
- mistral-medium-latest
|
|
13
|
+
- mistral-small-latest
|
|
14
|
+
aliases:
|
|
15
|
+
global:
|
|
16
|
+
mistral-large: mistral:large
|
|
17
|
+
mistral-medium: mistral:medium
|
|
18
|
+
model:
|
|
19
|
+
large: mistral-large-latest
|
|
20
|
+
medium: mistral-medium-latest
|
|
21
|
+
small: mistral-small-latest
|
|
22
|
+
code: codestral-latest
|
|
23
|
+
api_key:
|
|
24
|
+
env: MISTRAL_API_KEY
|
|
25
|
+
required: true
|
|
26
|
+
description: Mistral AI API key
|
|
27
|
+
capabilities:
|
|
28
|
+
- text_generation
|
|
29
|
+
- streaming
|
|
30
|
+
- code_generation
|
|
31
|
+
default_options:
|
|
32
|
+
temperature: 0.7
|
|
33
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
name: openai
|
|
2
|
+
last_synced: 2025-12-05
|
|
3
|
+
class: Ace::LLM::Organisms::OpenAIClient
|
|
4
|
+
gem: ace-llm
|
|
5
|
+
context_limit: 1050000 # GPT-5.x has 1.05M context window
|
|
6
|
+
models:
|
|
7
|
+
- gpt-5.1
|
|
8
|
+
- gpt-5.1-chat-latest
|
|
9
|
+
- gpt-5.1-codex
|
|
10
|
+
- gpt-5.1-codex-max
|
|
11
|
+
- gpt-5.1-codex-mini
|
|
12
|
+
aliases:
|
|
13
|
+
global:
|
|
14
|
+
oagpt: openai:gpt
|
|
15
|
+
oacodex: openai:codex
|
|
16
|
+
oamax: openai:max
|
|
17
|
+
model:
|
|
18
|
+
gpt: gpt-5.1
|
|
19
|
+
codex: gpt-5.1-codex
|
|
20
|
+
max: gpt-5.1-codex-max
|
|
21
|
+
api_key:
|
|
22
|
+
env: OPENAI_API_KEY
|
|
23
|
+
required: true
|
|
24
|
+
description: OpenAI API key
|
|
25
|
+
capabilities:
|
|
26
|
+
- text_generation
|
|
27
|
+
- streaming
|
|
28
|
+
- function_calling
|
|
29
|
+
- vision
|
|
30
|
+
- embeddings
|
|
31
|
+
default_options:
|
|
32
|
+
temperature: 0.7
|
|
33
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
name: openrouter
|
|
2
|
+
class: Ace::LLM::Organisms::OpenRouterClient
|
|
3
|
+
gem: ace-llm
|
|
4
|
+
context_limit: 128000 # Default context limit; varies by model
|
|
5
|
+
models:
|
|
6
|
+
# Fast inference (:nitro = throughput priority → Groq/Cerebras)
|
|
7
|
+
- openai/gpt-oss-120b:nitro
|
|
8
|
+
- openai/gpt-oss-20b:nitro
|
|
9
|
+
- moonshotai/kimi-k2-0905:nitro
|
|
10
|
+
- qwen/qwen3-max
|
|
11
|
+
# DeepSeek (exclusive to OpenRouter)
|
|
12
|
+
- deepseek/deepseek-v3.2
|
|
13
|
+
# Kimi/Moonshot
|
|
14
|
+
- moonshotai/kimi-k2-thinking
|
|
15
|
+
# Qwen
|
|
16
|
+
- qwen/qwen3-coder
|
|
17
|
+
# Others
|
|
18
|
+
- z-ai/glm-4.6
|
|
19
|
+
- minimax/minimax-m1
|
|
20
|
+
aliases:
|
|
21
|
+
models:
|
|
22
|
+
# Fast inference (nitro = throughput)
|
|
23
|
+
oss-nitro: openrouter:openai/gpt-oss-120b:nitro
|
|
24
|
+
ss-small-nitro: openrouter:openai/gpt-oss-20b:nitro
|
|
25
|
+
kimi-nitro: openrouter:moonshotai/kimi-k2-0905:nitro
|
|
26
|
+
qwen3: openrouter:qwen/qwen3-max
|
|
27
|
+
# Kimi
|
|
28
|
+
kimi: openrouter:moonshotai/kimi-k2-0905
|
|
29
|
+
kimi-think: openrouter:moonshotai/kimi-k2-thinking
|
|
30
|
+
# DeepSeek
|
|
31
|
+
deepseek: openrouter:deepseek/deepseek-v3.2
|
|
32
|
+
# Qwen
|
|
33
|
+
qwen-coder: openrouter:qwen/qwen3-coder
|
|
34
|
+
# Others
|
|
35
|
+
glm: openrouter:z-ai/glm-4.6
|
|
36
|
+
minimax: openrouter:minimax/minimax-m1
|
|
37
|
+
api_key:
|
|
38
|
+
env: OPENROUTER_API_KEY
|
|
39
|
+
required: true
|
|
40
|
+
description: OpenRouter API key
|
|
41
|
+
capabilities:
|
|
42
|
+
- text_generation
|
|
43
|
+
default_options:
|
|
44
|
+
temperature: 0.7
|
|
45
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
name: togetherai
|
|
2
|
+
last_synced: 2025-12-05
|
|
3
|
+
class: Ace::LLM::Organisms::TogetherAIClient
|
|
4
|
+
gem: ace-llm
|
|
5
|
+
context_limit: 128000 # Default context limit; varies by model
|
|
6
|
+
models:
|
|
7
|
+
- Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8
|
|
8
|
+
- deepseek-ai/DeepSeek-V3
|
|
9
|
+
- moonshotai/Kimi-K2-Instruct
|
|
10
|
+
- openai/gpt-oss-120b
|
|
11
|
+
aliases:
|
|
12
|
+
model:
|
|
13
|
+
qwen: Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8
|
|
14
|
+
deepseek: deepseek-ai/DeepSeek-V3
|
|
15
|
+
kimi: moonshotai/Kimi-K2-Instruct
|
|
16
|
+
oss: openai/gpt-oss-120b
|
|
17
|
+
api_key:
|
|
18
|
+
env: TOGETHER_API_KEY
|
|
19
|
+
required: true
|
|
20
|
+
description: Together AI API key
|
|
21
|
+
capabilities:
|
|
22
|
+
- text_generation
|
|
23
|
+
- streaming
|
|
24
|
+
default_options:
|
|
25
|
+
temperature: 0.7
|
|
26
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
name: xai
|
|
2
|
+
last_synced: 2025-12-06
|
|
3
|
+
class: Ace::LLM::Organisms::XAIClient
|
|
4
|
+
gem: ace-llm
|
|
5
|
+
context_limit: 131072 # Grok models have 131K context window
|
|
6
|
+
models:
|
|
7
|
+
- grok-4
|
|
8
|
+
- grok-4-1-fast
|
|
9
|
+
- grok-4-1-fast-non-reasoning
|
|
10
|
+
- grok-4-fast-non-reasoning
|
|
11
|
+
- grok-code-fast-1
|
|
12
|
+
aliases:
|
|
13
|
+
global:
|
|
14
|
+
grok: xai:grok-4
|
|
15
|
+
grokfast: xai:grok-4-1-fast
|
|
16
|
+
grokcode: xai:grok-code-fast-1
|
|
17
|
+
model:
|
|
18
|
+
grok: grok-4
|
|
19
|
+
fast: grok-4-1-fast
|
|
20
|
+
fast-non: grok-4-1-fast-non-reasoning
|
|
21
|
+
code: grok-code-fast-1
|
|
22
|
+
api_key:
|
|
23
|
+
env: XAI_API_KEY
|
|
24
|
+
required: true
|
|
25
|
+
description: x.ai API key
|
|
26
|
+
capabilities:
|
|
27
|
+
- text_generation
|
|
28
|
+
default_options:
|
|
29
|
+
temperature: 0.7
|
|
30
|
+
max_tokens: 16384
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
name: zai
|
|
2
|
+
last_synced: 2026-02-27
|
|
3
|
+
class: Ace::LLM::Organisms::ZaiClient
|
|
4
|
+
gem: ace-llm
|
|
5
|
+
context_limit: 128000
|
|
6
|
+
models:
|
|
7
|
+
- glm-4.7-flashx
|
|
8
|
+
- glm-4.7
|
|
9
|
+
- glm-5
|
|
10
|
+
api_key:
|
|
11
|
+
env: ZAI_API_KEY
|
|
12
|
+
required: true
|
|
13
|
+
description: Z.AI API key
|
|
14
|
+
capabilities:
|
|
15
|
+
- text_generation
|
|
16
|
+
default_options:
|
|
17
|
+
temperature: 0.7
|
|
18
|
+
max_tokens: 16384
|