agentscope-runtime 0.1.5b2__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentscope_runtime/engine/agents/agentscope_agent.py +447 -0
- agentscope_runtime/engine/agents/agno_agent.py +19 -18
- agentscope_runtime/engine/agents/autogen_agent.py +13 -8
- agentscope_runtime/engine/agents/utils.py +53 -0
- agentscope_runtime/engine/deployers/__init__.py +0 -13
- agentscope_runtime/engine/deployers/local_deployer.py +501 -356
- agentscope_runtime/engine/helpers/helper.py +60 -41
- agentscope_runtime/engine/runner.py +11 -36
- agentscope_runtime/engine/schemas/agent_schemas.py +2 -70
- agentscope_runtime/engine/services/sandbox_service.py +62 -70
- agentscope_runtime/engine/services/tablestore_memory_service.py +304 -0
- agentscope_runtime/engine/services/tablestore_rag_service.py +143 -0
- agentscope_runtime/engine/services/tablestore_session_history_service.py +293 -0
- agentscope_runtime/engine/services/utils/tablestore_service_utils.py +352 -0
- agentscope_runtime/sandbox/__init__.py +2 -0
- agentscope_runtime/sandbox/box/base/__init__.py +4 -0
- agentscope_runtime/sandbox/box/base/base_sandbox.py +4 -3
- agentscope_runtime/sandbox/box/browser/__init__.py +4 -0
- agentscope_runtime/sandbox/box/browser/browser_sandbox.py +8 -13
- agentscope_runtime/sandbox/box/dummy/__init__.py +4 -0
- agentscope_runtime/sandbox/box/filesystem/__init__.py +4 -0
- agentscope_runtime/sandbox/box/filesystem/filesystem_sandbox.py +8 -6
- agentscope_runtime/sandbox/box/gui/__init__.py +4 -0
- agentscope_runtime/sandbox/box/gui/gui_sandbox.py +80 -0
- agentscope_runtime/sandbox/box/sandbox.py +5 -2
- agentscope_runtime/sandbox/box/shared/routers/generic.py +20 -1
- agentscope_runtime/sandbox/box/training_box/__init__.py +4 -0
- agentscope_runtime/sandbox/box/training_box/training_box.py +10 -15
- agentscope_runtime/sandbox/build.py +143 -58
- agentscope_runtime/sandbox/client/http_client.py +43 -49
- agentscope_runtime/sandbox/client/training_client.py +0 -1
- agentscope_runtime/sandbox/constant.py +24 -1
- agentscope_runtime/sandbox/custom/custom_sandbox.py +5 -5
- agentscope_runtime/sandbox/custom/example.py +2 -2
- agentscope_runtime/sandbox/enums.py +1 -0
- agentscope_runtime/sandbox/manager/collections/in_memory_mapping.py +11 -6
- agentscope_runtime/sandbox/manager/collections/redis_mapping.py +25 -9
- agentscope_runtime/sandbox/manager/container_clients/__init__.py +0 -10
- agentscope_runtime/sandbox/manager/container_clients/agentrun_client.py +1098 -0
- agentscope_runtime/sandbox/manager/container_clients/docker_client.py +33 -205
- agentscope_runtime/sandbox/manager/container_clients/kubernetes_client.py +8 -555
- agentscope_runtime/sandbox/manager/sandbox_manager.py +187 -88
- agentscope_runtime/sandbox/manager/server/app.py +82 -14
- agentscope_runtime/sandbox/manager/server/config.py +50 -3
- agentscope_runtime/sandbox/model/container.py +6 -23
- agentscope_runtime/sandbox/model/manager_config.py +93 -5
- agentscope_runtime/sandbox/tools/gui/__init__.py +7 -0
- agentscope_runtime/sandbox/tools/gui/tool.py +77 -0
- agentscope_runtime/sandbox/tools/mcp_tool.py +6 -2
- agentscope_runtime/sandbox/utils.py +124 -0
- agentscope_runtime/version.py +1 -1
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/METADATA +168 -77
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/RECORD +59 -78
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/entry_points.txt +0 -1
- agentscope_runtime/engine/agents/agentscope_agent/__init__.py +0 -6
- agentscope_runtime/engine/agents/agentscope_agent/agent.py +0 -401
- agentscope_runtime/engine/agents/agentscope_agent/hooks.py +0 -169
- agentscope_runtime/engine/agents/llm_agent.py +0 -51
- agentscope_runtime/engine/deployers/adapter/responses/response_api_adapter_utils.py +0 -2886
- agentscope_runtime/engine/deployers/adapter/responses/response_api_agent_adapter.py +0 -51
- agentscope_runtime/engine/deployers/adapter/responses/response_api_protocol_adapter.py +0 -314
- agentscope_runtime/engine/deployers/cli_fc_deploy.py +0 -184
- agentscope_runtime/engine/deployers/kubernetes_deployer.py +0 -265
- agentscope_runtime/engine/deployers/modelstudio_deployer.py +0 -677
- agentscope_runtime/engine/deployers/utils/deployment_modes.py +0 -14
- agentscope_runtime/engine/deployers/utils/docker_image_utils/__init__.py +0 -8
- agentscope_runtime/engine/deployers/utils/docker_image_utils/docker_image_builder.py +0 -429
- agentscope_runtime/engine/deployers/utils/docker_image_utils/dockerfile_generator.py +0 -240
- agentscope_runtime/engine/deployers/utils/docker_image_utils/runner_image_factory.py +0 -297
- agentscope_runtime/engine/deployers/utils/package_project_utils.py +0 -932
- agentscope_runtime/engine/deployers/utils/service_utils/__init__.py +0 -9
- agentscope_runtime/engine/deployers/utils/service_utils/fastapi_factory.py +0 -504
- agentscope_runtime/engine/deployers/utils/service_utils/fastapi_templates.py +0 -157
- agentscope_runtime/engine/deployers/utils/service_utils/process_manager.py +0 -268
- agentscope_runtime/engine/deployers/utils/service_utils/service_config.py +0 -75
- agentscope_runtime/engine/deployers/utils/service_utils/service_factory.py +0 -220
- agentscope_runtime/engine/deployers/utils/wheel_packager.py +0 -389
- agentscope_runtime/engine/helpers/agent_api_builder.py +0 -651
- agentscope_runtime/engine/llms/__init__.py +0 -3
- agentscope_runtime/engine/llms/base_llm.py +0 -60
- agentscope_runtime/engine/llms/qwen_llm.py +0 -47
- agentscope_runtime/engine/schemas/embedding.py +0 -37
- agentscope_runtime/engine/schemas/modelstudio_llm.py +0 -310
- agentscope_runtime/engine/schemas/oai_llm.py +0 -538
- agentscope_runtime/engine/schemas/realtime.py +0 -254
- /agentscope_runtime/engine/{deployers/adapter/responses → services/utils}/__init__.py +0 -0
- /agentscope_runtime/{engine/deployers/utils → sandbox/box/gui/box}/__init__.py +0 -0
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/WHEEL +0 -0
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/licenses/LICENSE +0 -0
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/top_level.txt +0 -0
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
import os
|
|
3
|
-
|
|
4
|
-
from openai import Client, AsyncClient
|
|
5
|
-
|
|
6
|
-
from .base_llm import BaseLLM
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class QwenLLM(BaseLLM):
|
|
10
|
-
"""
|
|
11
|
-
QwenLLM is a class that provides a wrapper around the Qwen LLM model.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
base_url = None
|
|
15
|
-
|
|
16
|
-
def __init__(
|
|
17
|
-
self,
|
|
18
|
-
model_name: str = "qwen-turbo",
|
|
19
|
-
api_key: str = None,
|
|
20
|
-
**kwargs,
|
|
21
|
-
):
|
|
22
|
-
"""
|
|
23
|
-
Initialize the QwenLLM class.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
model_name (str): The name of the Qwen LLM model to use.
|
|
27
|
-
Defaults to "qwen-turbo".
|
|
28
|
-
api_key (str): The API key for Qwen service.
|
|
29
|
-
If None, will read from DASHSCOPE_API_KEY environment variable.
|
|
30
|
-
"""
|
|
31
|
-
super().__init__(model_name, **kwargs)
|
|
32
|
-
|
|
33
|
-
if api_key is None:
|
|
34
|
-
api_key = os.getenv("DASHSCOPE_API_KEY")
|
|
35
|
-
if self.base_url is None:
|
|
36
|
-
default_base_url = (
|
|
37
|
-
"https://dashscope.aliyuncs.com/compatible-mode/v1"
|
|
38
|
-
)
|
|
39
|
-
self.base_url = os.getenv("DASHSCOPE_BASE_URL", default_base_url)
|
|
40
|
-
self.client = Client(
|
|
41
|
-
api_key=api_key,
|
|
42
|
-
base_url=self.base_url,
|
|
43
|
-
)
|
|
44
|
-
self.async_client = AsyncClient(
|
|
45
|
-
api_key=api_key,
|
|
46
|
-
base_url=self.base_url,
|
|
47
|
-
)
|
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
from typing import List, Optional, Literal
|
|
3
|
-
|
|
4
|
-
from openai.types import Embedding
|
|
5
|
-
from pydantic import BaseModel
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class Usage(BaseModel):
|
|
9
|
-
prompt_tokens: Optional[int] = None
|
|
10
|
-
"""The number of tokens used by the prompt."""
|
|
11
|
-
|
|
12
|
-
total_tokens: Optional[int] = None
|
|
13
|
-
"""The total number of tokens used by the request."""
|
|
14
|
-
|
|
15
|
-
input_tokens: Optional[int] = None
|
|
16
|
-
|
|
17
|
-
text_count: Optional[int] = None
|
|
18
|
-
|
|
19
|
-
image_count: Optional[int] = None
|
|
20
|
-
|
|
21
|
-
video_count: Optional[int] = None
|
|
22
|
-
|
|
23
|
-
duration: Optional[float] = None
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class EmbeddingResponse(BaseModel):
|
|
27
|
-
data: List[Embedding]
|
|
28
|
-
"""The list of embeddings generated by the model."""
|
|
29
|
-
|
|
30
|
-
model: str
|
|
31
|
-
"""The name of the model used to generate the embedding."""
|
|
32
|
-
|
|
33
|
-
object: Literal["list"]
|
|
34
|
-
"""The object type, which is always "list"."""
|
|
35
|
-
|
|
36
|
-
usage: Usage
|
|
37
|
-
"""The usage information for the request."""
|
|
@@ -1,310 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
import os
|
|
3
|
-
from typing import List, Literal, Optional, Union
|
|
4
|
-
|
|
5
|
-
from openai.types.chat import ChatCompletion, ChatCompletionChunk
|
|
6
|
-
from pydantic import (
|
|
7
|
-
BaseModel,
|
|
8
|
-
StrictInt,
|
|
9
|
-
field_validator,
|
|
10
|
-
Field,
|
|
11
|
-
)
|
|
12
|
-
|
|
13
|
-
from .oai_llm import (
|
|
14
|
-
Parameters,
|
|
15
|
-
OpenAIMessage,
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class KnowledgeHolder(BaseModel):
|
|
20
|
-
source: str
|
|
21
|
-
"""The source identifier or URL where the knowledge was retrieved from."""
|
|
22
|
-
|
|
23
|
-
content: str
|
|
24
|
-
"""The actual content or knowledge text retrieved from the source."""
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class IntentionOptions(BaseModel):
|
|
28
|
-
white_list: List[str] = Field(default_factory=list)
|
|
29
|
-
"""A list of allowed intentions that can be processed."""
|
|
30
|
-
|
|
31
|
-
black_list: List[str] = Field(default_factory=list)
|
|
32
|
-
"""A list of blocked intentions that should not be processed."""
|
|
33
|
-
|
|
34
|
-
search_model: str = "search_v6"
|
|
35
|
-
"""The searches model version to use for intentions recognition."""
|
|
36
|
-
|
|
37
|
-
intensity: Optional[int] = None
|
|
38
|
-
"""The intensity level for intentions matching and processing."""
|
|
39
|
-
|
|
40
|
-
scene_id: Optional[str] = None
|
|
41
|
-
"""The scene identifier for context-aware intentions processing."""
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class SearchOptions(BaseModel):
|
|
45
|
-
"""
|
|
46
|
-
Search Options on Modelstudio platform for knowledge retrieval and web
|
|
47
|
-
searches.
|
|
48
|
-
"""
|
|
49
|
-
|
|
50
|
-
enable_source: bool = False
|
|
51
|
-
"""Whether to include source information in searches results."""
|
|
52
|
-
|
|
53
|
-
enable_citation: bool = False
|
|
54
|
-
"""Whether to include citation information for retrieved content."""
|
|
55
|
-
|
|
56
|
-
enable_readpage: bool = False
|
|
57
|
-
"""Whether to enable full page reading for web content."""
|
|
58
|
-
|
|
59
|
-
enable_online_read: bool = False
|
|
60
|
-
"""Whether to enable online reading capabilities for real-time content."""
|
|
61
|
-
|
|
62
|
-
citation_format: str = "[<number>]"
|
|
63
|
-
"""The format string for citations in the response."""
|
|
64
|
-
|
|
65
|
-
search_strategy: Literal[
|
|
66
|
-
"standard",
|
|
67
|
-
"pro_ultra",
|
|
68
|
-
"pro",
|
|
69
|
-
"lite",
|
|
70
|
-
"pro_max",
|
|
71
|
-
"image",
|
|
72
|
-
"turbo",
|
|
73
|
-
"max",
|
|
74
|
-
] = "turbo"
|
|
75
|
-
"""The searches strategy to use ('standard', 'pro_ultra',
|
|
76
|
-
'pro', 'lite','pro_max', 'image','turbo','max'). """
|
|
77
|
-
|
|
78
|
-
forced_search: bool = False
|
|
79
|
-
"""Whether to force searches even when cached results are available."""
|
|
80
|
-
|
|
81
|
-
prepend_search_result: bool = False
|
|
82
|
-
"""Whether to prepend searches results to the response."""
|
|
83
|
-
|
|
84
|
-
enable_search_extension: bool = False
|
|
85
|
-
"""Whether to enable extended searches capabilities."""
|
|
86
|
-
|
|
87
|
-
item_cnt: int = 20000
|
|
88
|
-
"""The maximum number of items to retrieve in searches results."""
|
|
89
|
-
|
|
90
|
-
top_n: int = 0
|
|
91
|
-
"""The number of top results to return (0 means return all)."""
|
|
92
|
-
|
|
93
|
-
intention_options: Union[IntentionOptions, None] = IntentionOptions()
|
|
94
|
-
"""Options for intentions recognition and processing during searches."""
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
# maximum chunk size from knowledge base [1, 20]
|
|
98
|
-
PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MIN = int(
|
|
99
|
-
os.getenv(
|
|
100
|
-
"PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MIN",
|
|
101
|
-
"1",
|
|
102
|
-
),
|
|
103
|
-
)
|
|
104
|
-
PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MAX = int(
|
|
105
|
-
os.getenv(
|
|
106
|
-
"PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MAX",
|
|
107
|
-
"20",
|
|
108
|
-
),
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
class RagOptions(BaseModel):
|
|
113
|
-
model_config = {"populate_by_name": True}
|
|
114
|
-
|
|
115
|
-
class FallbackOptions(BaseModel):
|
|
116
|
-
default_response_type: Optional[str] = "llm"
|
|
117
|
-
"""The type of default response when RAG fails ('llm', 'template',
|
|
118
|
-
'none'). """
|
|
119
|
-
|
|
120
|
-
default_response: Optional[str] = ""
|
|
121
|
-
"""The default response text to use when RAG fails."""
|
|
122
|
-
|
|
123
|
-
class RewriteOptions(BaseModel):
|
|
124
|
-
model_name: Optional[str] = None
|
|
125
|
-
"""The model name to use for rewriting."""
|
|
126
|
-
|
|
127
|
-
class_name: Optional[str] = None
|
|
128
|
-
"""The class name to use for rewriting."""
|
|
129
|
-
|
|
130
|
-
class RerankOptions(BaseModel):
|
|
131
|
-
model_name: Optional[str] = None
|
|
132
|
-
"""The model name to use for reranking."""
|
|
133
|
-
|
|
134
|
-
workspace_id: Optional[str] = ""
|
|
135
|
-
"""The modelstudio workspace id"""
|
|
136
|
-
|
|
137
|
-
replaced_word: str = "${documents}"
|
|
138
|
-
"""The placeholder word in prompts that will be replaced with retrieved
|
|
139
|
-
documents. """
|
|
140
|
-
|
|
141
|
-
index_names: Optional[List[str]] = Field(default_factory=list)
|
|
142
|
-
"""List of index names to use for document processing and retrieval."""
|
|
143
|
-
|
|
144
|
-
pipeline_ids: Optional[List[str]] = Field(default_factory=list)
|
|
145
|
-
"""List of pipeline IDs to use for document processing and retrieval."""
|
|
146
|
-
|
|
147
|
-
file_ids: Optional[List[str]] = Field(
|
|
148
|
-
default_factory=list,
|
|
149
|
-
alias="file_id_list",
|
|
150
|
-
)
|
|
151
|
-
"""List of specific file IDs to searches within."""
|
|
152
|
-
|
|
153
|
-
prompt_strategy: Optional[str] = Field(
|
|
154
|
-
default="topK",
|
|
155
|
-
alias="prompt_strategy_name",
|
|
156
|
-
)
|
|
157
|
-
"""The strategy for selecting and organizing retrieved content in
|
|
158
|
-
prompts. """
|
|
159
|
-
|
|
160
|
-
maximum_allowed_chunk_num: Optional[int] = 5
|
|
161
|
-
"""The maximum number of document chunks to include in the context."""
|
|
162
|
-
|
|
163
|
-
maximum_allowed_length: Optional[int] = 2000
|
|
164
|
-
"""The maximum total length of retrieved content in characters."""
|
|
165
|
-
|
|
166
|
-
enable_citation: bool = Field(
|
|
167
|
-
default=False,
|
|
168
|
-
alias="prompt_enable_citation",
|
|
169
|
-
)
|
|
170
|
-
"""Whether to include citation information for retrieved documents."""
|
|
171
|
-
|
|
172
|
-
fallback_options: Optional[FallbackOptions] = None
|
|
173
|
-
"""Options for handling cases when RAG retrieval fails."""
|
|
174
|
-
|
|
175
|
-
enable_web_search: bool = False
|
|
176
|
-
"""Whether to enable web searches as part of the RAG pipeline."""
|
|
177
|
-
|
|
178
|
-
session_file_ids: Optional[List[str]] = Field(default_factory=list)
|
|
179
|
-
"""List of file IDs that are specific to the current session."""
|
|
180
|
-
|
|
181
|
-
dense_similarity_top_k: Optional[int] = 100
|
|
182
|
-
"""The number of most similar dense vectors to retrieve."""
|
|
183
|
-
|
|
184
|
-
sparse_similarity_top_k: Optional[int] = 100
|
|
185
|
-
"""The number of most similar sparse vectors to retrieve."""
|
|
186
|
-
|
|
187
|
-
enable_rewrite: Optional[bool] = None
|
|
188
|
-
"""Whether to enable content rewrite during RAG."""
|
|
189
|
-
|
|
190
|
-
rewrite: Optional[List[RewriteOptions]] = None
|
|
191
|
-
"""Options for content rewrite."""
|
|
192
|
-
|
|
193
|
-
enable_reranking: Optional[bool] = None
|
|
194
|
-
"""Whether to enable content reranking."""
|
|
195
|
-
|
|
196
|
-
rerank_min_score: Optional[float] = None
|
|
197
|
-
"""The minimum score threshold for content reranking."""
|
|
198
|
-
|
|
199
|
-
rerank_top_n: Optional[int] = None
|
|
200
|
-
"""The number of top results to return for content reranking."""
|
|
201
|
-
|
|
202
|
-
rerank: Optional[List[RerankOptions]] = None
|
|
203
|
-
|
|
204
|
-
enable_reject_filter: Optional[bool] = None
|
|
205
|
-
"""Whether to enable content rejection filtering."""
|
|
206
|
-
|
|
207
|
-
reject_filter_type: Optional[str] = None
|
|
208
|
-
"""The type of content rejection filter to use."""
|
|
209
|
-
|
|
210
|
-
reject_filter_model_name: Optional[str] = None
|
|
211
|
-
"""The name of the model to use for content rejection filtering."""
|
|
212
|
-
|
|
213
|
-
reject_filter_prompt: Optional[str] = None
|
|
214
|
-
"""The prompt to use for content rejection filtering."""
|
|
215
|
-
|
|
216
|
-
enable_agg_search: Optional[bool] = None
|
|
217
|
-
"""Whether to enable aggregation searches."""
|
|
218
|
-
|
|
219
|
-
enable_hybrid_gen: Optional[bool] = None
|
|
220
|
-
"""Whether to enable hybrid generations."""
|
|
221
|
-
|
|
222
|
-
@field_validator("prompt_strategy")
|
|
223
|
-
def prompt_strategy_check(self, value: str) -> str:
|
|
224
|
-
if value:
|
|
225
|
-
value = value.lower()
|
|
226
|
-
if value in ["topk", "top_k"]:
|
|
227
|
-
return "topK"
|
|
228
|
-
return value
|
|
229
|
-
|
|
230
|
-
@field_validator("maximum_allowed_chunk_num")
|
|
231
|
-
def maximum_allowed_chunk_num_check(self, value: int) -> int:
|
|
232
|
-
if value < int(PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MIN) or value > int(
|
|
233
|
-
PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MAX,
|
|
234
|
-
):
|
|
235
|
-
raise KeyError(
|
|
236
|
-
f"Range of maximum_allowed_chunk_num should be "
|
|
237
|
-
f"[{PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MIN}, "
|
|
238
|
-
f"{PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MAX}]",
|
|
239
|
-
)
|
|
240
|
-
return value
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
class ModelstudioParameters(Parameters):
|
|
244
|
-
"""
|
|
245
|
-
Parameters for Modelstudio platform, extending the base Parameters with
|
|
246
|
-
Modelstudio-specific options.
|
|
247
|
-
"""
|
|
248
|
-
|
|
249
|
-
repetition_penalty: Union[float, None] = None
|
|
250
|
-
"""Penalty for repeating tokens. Higher values reduce repetition."""
|
|
251
|
-
|
|
252
|
-
length_penalty: Union[float, None] = None
|
|
253
|
-
"""Penalty applied to longer sequences. Affects the length of generated
|
|
254
|
-
text. """
|
|
255
|
-
|
|
256
|
-
top_k: Union[StrictInt, None] = None
|
|
257
|
-
"""The number of highest probability vocabulary tokens to keep for top-k
|
|
258
|
-
filtering."""
|
|
259
|
-
|
|
260
|
-
min_tokens: Optional[int] = None
|
|
261
|
-
"""The minimum number of tokens to generate before stopping."""
|
|
262
|
-
|
|
263
|
-
result_format: Literal["text", "message"] = "message"
|
|
264
|
-
"""The format of the response ('text' for plain text, 'message' for
|
|
265
|
-
structured message) """
|
|
266
|
-
|
|
267
|
-
incremental_output: bool = False
|
|
268
|
-
"""Whether to return incremental output during generations."""
|
|
269
|
-
|
|
270
|
-
# Search
|
|
271
|
-
enable_search: bool = False
|
|
272
|
-
"""Whether to enable searches capabilities for knowledge retrieval."""
|
|
273
|
-
|
|
274
|
-
search_options: Optional[SearchOptions] = SearchOptions()
|
|
275
|
-
"""Configuration options for searches functionality."""
|
|
276
|
-
|
|
277
|
-
# RAG
|
|
278
|
-
enable_rag: bool = False # RAGs of modelstudio assistant service
|
|
279
|
-
"""Whether to enable Retrieval-Augmented Generation (RAG) for the
|
|
280
|
-
Modelstudio assistant service. """
|
|
281
|
-
|
|
282
|
-
rag_options: Union[RagOptions, None] = None
|
|
283
|
-
"""Configuration options for RAG functionality."""
|
|
284
|
-
|
|
285
|
-
selected_model: Optional[str] = "qwen-max"
|
|
286
|
-
"""The selected model name to use for generations."""
|
|
287
|
-
|
|
288
|
-
# Intention
|
|
289
|
-
intention_options: Optional[IntentionOptions] = None
|
|
290
|
-
"""Options for intentions recognition and processing."""
|
|
291
|
-
|
|
292
|
-
# MCP Servers
|
|
293
|
-
mcp_config_file: Optional[str] = None
|
|
294
|
-
"""Path to the MCP (Model Context Protocol) configuration file."""
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
class ModelstudioChatRequest(ModelstudioParameters):
|
|
298
|
-
messages: List[OpenAIMessage]
|
|
299
|
-
"""A list of messages comprising the conversation so far."""
|
|
300
|
-
|
|
301
|
-
model: str
|
|
302
|
-
"""ID of the model to use for the chat completion."""
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
class ModelstudioChatResponse(ChatCompletion):
|
|
306
|
-
pass
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
class ModelstudioChatCompletionChunk(ChatCompletionChunk):
|
|
310
|
-
pass
|