@cozeloop/loop-lng 0.0.0-beta-1766569788904
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +2 -0
- package/dist/index.js +36 -0
- package/dist/index.js.map +1 -0
- package/dist/locales/auth/en-US.json +28 -0
- package/dist/locales/auth/zh-CN.json +28 -0
- package/dist/locales/base/en-US.json +12 -0
- package/dist/locales/base/zh-CN.json +12 -0
- package/dist/locales/common/en-US.json +156 -0
- package/dist/locales/common/zh-CN.json +156 -0
- package/dist/locales/components/en-US.json +72 -0
- package/dist/locales/components/zh-CN.json +72 -0
- package/dist/locales/evaluate/en-US.json +671 -0
- package/dist/locales/evaluate/zh-CN.json +671 -0
- package/dist/locales/observation/en-US.json +325 -0
- package/dist/locales/observation/zh-CN.json +325 -0
- package/dist/locales/prompt/en-US.json +346 -0
- package/dist/locales/prompt/zh-CN.json +346 -0
- package/dist/locales/tag/en-US.json +67 -0
- package/dist/locales/tag/zh-CN.json +67 -0
- package/dist/tsconfig.build.tsbuildinfo +1 -0
- package/package.json +33 -0
|
@@ -0,0 +1,346 @@
|
|
|
1
|
+
{
|
|
2
|
+
"add_control_group": "Add control group",
|
|
3
|
+
"add_message": "Add message",
|
|
4
|
+
"add_prompt_tpl_or_input_question": "Please add a Prompt template or enter your question",
|
|
5
|
+
"assistant_role": "Assistant",
|
|
6
|
+
"clear_history_messages": "Clear historical messages.",
|
|
7
|
+
"close_enable_function": "Turn off the enabling function.",
|
|
8
|
+
"collapse_model_and_var_area": "Collapse the model configuration and variable area",
|
|
9
|
+
"collapse_preview_and_debug": "Collapse the preview and debug",
|
|
10
|
+
"confirm_delete_current_prompt_template": "Are you sure you want to delete the current Prompt template?",
|
|
11
|
+
"confirm_delete_function": "Are you sure you want to delete this function?",
|
|
12
|
+
"confirm_delete_message": "Are you sure to delete this message?",
|
|
13
|
+
"confirm_delete_var_in_tpl": "This variable will be removed from the Prompt template. Confirm deletion?",
|
|
14
|
+
"confirm_version_difference": "Confirm the version differences",
|
|
15
|
+
"confirm_version_info": "Confirm the version information",
|
|
16
|
+
"copy_prompt_key": "Copy Prompt Key",
|
|
17
|
+
"copy_trace_id": "Copy the Trace ID",
|
|
18
|
+
"copy_variable_name": "Copy the variable name",
|
|
19
|
+
"cozeloop_sdk_data_report_observation": "Connect to the CozeLoop SDK to enable data reporting and monitoring.",
|
|
20
|
+
"create_copy": "Copy Prompt",
|
|
21
|
+
"create_prompt": "Create Prompt",
|
|
22
|
+
"debug_history": "Debug history",
|
|
23
|
+
"deep_thinking": "Deep in thought",
|
|
24
|
+
"deeply_thought": "Thought deeply",
|
|
25
|
+
"default_mock_value": "Default mock value",
|
|
26
|
+
"delete_control_group": "Delete the control group.",
|
|
27
|
+
"delete_function": "Delete function",
|
|
28
|
+
"delete_message": "Delete the message",
|
|
29
|
+
"delete_prompt": "Delete the Prompt",
|
|
30
|
+
"delete_prompt_template": "Delete the Prompt template",
|
|
31
|
+
"delete_success": "Deletion successful",
|
|
32
|
+
"delete_variable": "Delete variable",
|
|
33
|
+
"draft_auto_saved_in": "The draft has been automatically saved in ",
|
|
34
|
+
"draft_saving": "Draft is being saved...",
|
|
35
|
+
"edit_placeholder": "Edit Placeholder",
|
|
36
|
+
"edit_prompt": "Edit Prompt",
|
|
37
|
+
"enable_function": "Enable",
|
|
38
|
+
"enter_free_comparison_mode": "Comparison mode",
|
|
39
|
+
"exit_free_comparison_mode": "Exit the free comparison mode",
|
|
40
|
+
"expand_model_and_var_area": "Expand the model configuration and variable area",
|
|
41
|
+
"expand_preview_and_debug": "Expand preview and debug",
|
|
42
|
+
"fornax_prompt_current_status": "Current status:",
|
|
43
|
+
"fornax_prompt_disable_model_func_google_on": "After enabling Google search, the model's function capabilities will be disabled",
|
|
44
|
+
"fornax_prompt_documentation": "Documentation",
|
|
45
|
+
"fornax_prompt_enable": "Enable",
|
|
46
|
+
"fornax_prompt_fps_influence_on_video_understanding_and_token_usage": "The higher the FPS, the more detailed the video understanding and the greater the token usage.",
|
|
47
|
+
"fornax_prompt_frame_extraction_config": "Frame Extraction Configuration",
|
|
48
|
+
"fornax_prompt_manual_config_video_sample_params": "Manually configure video variable sampling parameters, see details at",
|
|
49
|
+
"fornax_prompt_model_built_in_methods": "Built-in methods of the model",
|
|
50
|
+
"fornax_prompt_please_enter_frame_extraction_config": "Please enter the frame extraction configuration.",
|
|
51
|
+
"fornax_prompt_video_manual_uniform_sampling_support": "Video supports manual configuration for uniform sampling over time, i.e., setting frames processed per second (FPS) at fixed time intervals. See details at",
|
|
52
|
+
"fornax_prompt_video_sampling_config": "Video Sampling Configuration",
|
|
53
|
+
"frequency_penalty": "Frequency penalty",
|
|
54
|
+
"function_call": "Function call",
|
|
55
|
+
"go_immediately": "Go now",
|
|
56
|
+
"historical_data_has_empty_content": "The historical data contains empty content.",
|
|
57
|
+
"image_size_not_exceed_num_mb": "The size of the picture cannot exceed {num}MB.",
|
|
58
|
+
"image_upload_error": "Image upload failed. Please try again later.",
|
|
59
|
+
"incorrect_version_number": "The version number format is incorrect.",
|
|
60
|
+
"input_mock_value_here": "Enter simulation values here to simulate the return values of the function.",
|
|
61
|
+
"input_prompt_key_to_delete": "Enter the Prompt Key you want to delete:",
|
|
62
|
+
"input_question_tip": "Please enter a question to test the large language model's response. Press Enter to send, and Shift+Enter for a new line.",
|
|
63
|
+
"input_version_number": "Please enter the version number in the format a.b.c, where each segment ranges from 0 to 9999.",
|
|
64
|
+
"insert_template": "Insert template",
|
|
65
|
+
"insert_variable": "Insert variables",
|
|
66
|
+
"load_more": "Load more",
|
|
67
|
+
"max_tokens": "Max tokens",
|
|
68
|
+
"max_upload_picture_num": "Upload a maximum of {num} pictures.",
|
|
69
|
+
"method_exists": "The current method already exists. Please rename it.",
|
|
70
|
+
"method_name_rule": "The method name can only contain letters (a-z, A-Z), digits (0-9), underscores (_), and hyphens (-), with a maximum length of 64 characters.",
|
|
71
|
+
"mock_message": "Mock message",
|
|
72
|
+
"mock_message_group": "Mock message group - {key}",
|
|
73
|
+
"mock_value": "Mock value",
|
|
74
|
+
"model_config": "Model config",
|
|
75
|
+
"model_id": "Model ID",
|
|
76
|
+
"model_name": "Model name",
|
|
77
|
+
"model_not_support": "Not support",
|
|
78
|
+
"model_not_support_multimodal": "The selected model does not support multimodality. Please adjust the variable type or change the model.",
|
|
79
|
+
"model_not_support_picture": "The model does not support images",
|
|
80
|
+
"model_run_error": "An error occurred during the model operation.",
|
|
81
|
+
"model_runtime_error": "Model operation error",
|
|
82
|
+
"new_function": "New function",
|
|
83
|
+
"no_debug_record": "No debug records now.",
|
|
84
|
+
"no_draft_change": "Currently no draft changes",
|
|
85
|
+
"no_longer_notify": "no longer prompt",
|
|
86
|
+
"no_prompt": "No Prompt",
|
|
87
|
+
"no_variable": "No variable",
|
|
88
|
+
"num_words": "Number of characters: {num}",
|
|
89
|
+
"open_enable_function": "Turn on the enabling function.",
|
|
90
|
+
"param_value": "Parameter value",
|
|
91
|
+
"parameter_config": "Parameter config",
|
|
92
|
+
"placeholder_format": "Only English letters and underscores are allowed, and the first character must be a letter",
|
|
93
|
+
"placeholder_name_exists": "The text variable name already exists. Please modify the Placeholder variable name.",
|
|
94
|
+
"placeholder_var_create_error": "The Placeholder variable name does not exist or is named incorrectly, so creation failed.",
|
|
95
|
+
"placeholder_var_error": "The Placeholder variable does not exist or is named incorrectly.",
|
|
96
|
+
"placeholder_var_execute_error": "The Placeholder variable name does not exist or is named incorrectly",
|
|
97
|
+
"placeholder_var_name": "Placeholder variable name",
|
|
98
|
+
"please_input_with_vars": "Please enter content and use variables such as '{{USER_NAME}}",
|
|
99
|
+
"presence_penalty": "Presence penalty",
|
|
100
|
+
"preview_and_debug": "Preview and Debug",
|
|
101
|
+
"prompt_add_function": "Add function",
|
|
102
|
+
"prompt_add_multi_modal_variable": "Add multi-modal variable",
|
|
103
|
+
"prompt_add_new_multi_modal_variable": "Add new multi-modal variable",
|
|
104
|
+
"prompt_add_variable": "Add variable",
|
|
105
|
+
"prompt_additional_configuration": "Additional Configuration",
|
|
106
|
+
"prompt_ak_ak_get_label": "获取 AK/SK",
|
|
107
|
+
"prompt_ak_ak_get_tip": "点击后前往空间管理页面获取",
|
|
108
|
+
"prompt_all_creators": "All creators",
|
|
109
|
+
"prompt_being_deprecated": "Being deprecated",
|
|
110
|
+
"prompt_blank_prompt": "Blank Prompt",
|
|
111
|
+
"prompt_call_records": "Call records",
|
|
112
|
+
"prompt_call_stream": "package main\n\nimport (\n \"context\"\n \"fmt\"\n \"io\"\n\n \"github.com/coze-dev/cozeloop-go\"\n \"github.com/coze-dev/cozeloop-go/entity\"\n \"github.com/coze-dev/cozeloop-go/internal/util\"\n)\n\nfunc main() {\n // 1.Create a prompt on the platform\n // Create a Prompt on the platform's Prompt development page (set Prompt Key to 'ptaas_demo'),\n // add the following messages to the template, submit a version.\n // System: You are a helpful assistant for {{topic}}.\n // User: Please help me with {{user_request}}\n ctx := context.Background()\n\n // Set the following environment variables first.\n // COZELOOP_WORKSPACE_ID=your workspace id\n // COZELOOP_API_TOKEN=your token\n // 2.New loop client\n client, err := cozeloop.NewClient()\n if err != nil {\n panic(err)\n }\n defer client.Close(ctx)\n\n ctx, span := client.StartSpan(ctx, \"root_span\", \"custom\")\n defer span.Finish(ctx)\n\n // 3. Execute prompt\n executeRequest := &entity.ExecuteParam{\n PromptKey: \"CozeLoop_Oncall_Master\",\n Version: \"0.0.1\",\n VariableVals: map[string]any{\n \"topic\": \"artificial intelligence\",\n \"user_request\": \"explain what is machine learning\",\n },\n // You can also append messages to the prompt.\n Messages: []*entity.Message{\n {\n Role: entity.RoleUser,\n Content: util.Ptr(\"Keep the answer brief.\"),\n },\n },\n }\n // 3.2 stream\n stream(ctx, client, executeRequest)\n client.Flush(ctx)\n}\n\nfunc stream(ctx context.Context, client cozeloop.Client, executeRequest *entity.ExecuteParam) {\n streamReader, err := client.ExecuteStreaming(ctx, executeRequest)\n if err != nil {\n panic(err)\n }\n for {\n result, err := streamReader.Recv()\n if err != nil {\n if err == io.EOF {\n fmt.Println(\"\\nStream finished.\")\n break\n }\n panic(err)\n }\n printExecuteResult(result)\n }\n}\n\nfunc printExecuteResult(result entity.ExecuteResult) {\n if result.Message != nil {\n fmt.Printf(\"Message: %s\\n\", util.ToJSON(result.Message))\n }\n if util.PtrValue(result.FinishReason) != \"\" {\n fmt.Printf(\"FinishReason: %s\\n\", util.PtrValue(result.FinishReason))\n }\n if result.Usage != nil {\n fmt.Printf(\"Usage: %s\\n\", util.ToJSON(result.Usage))\n }\n}",
|
|
113
|
+
"prompt_cannot_add_check_form_data": "Cannot add, please check form data",
|
|
114
|
+
"prompt_cannot_delete_snippet_variables": "Only variables inside Prompt can be deleted; variables in snippets cannot be deleted",
|
|
115
|
+
"prompt_case_sensitive": "Case sensitive",
|
|
116
|
+
"prompt_choose_language": "Select language",
|
|
117
|
+
"prompt_click_to_view": "Click to view",
|
|
118
|
+
"prompt_common_configuration": "Common Configuration",
|
|
119
|
+
"prompt_comparable_versions": "Comparable Versions",
|
|
120
|
+
"prompt_compare_mode": "Compare Mode",
|
|
121
|
+
"prompt_compare_versions": "Compare Versions",
|
|
122
|
+
"prompt_config_change_count": "Configuration Changes ({configTypeDiffCount})",
|
|
123
|
+
"prompt_confirm_delete": "Confirm Deletion",
|
|
124
|
+
"prompt_confirm_delete_section": "Are you sure you want to delete this section?",
|
|
125
|
+
"prompt_confirm_deletion_input_prompt_key": "If confirmed, please enter the Prompt Key you want to delete:",
|
|
126
|
+
"prompt_copy_draft_not_supported": "Copying draft versions is not supported yet, please submit the version first",
|
|
127
|
+
"prompt_copy_prompt": "Copy Prompt",
|
|
128
|
+
"prompt_copy_prompt_snippet": "Copy Prompt Snippet",
|
|
129
|
+
"prompt_create_custom_tag": "Create custom tag",
|
|
130
|
+
"prompt_create_prompt_snippet": "Create Prompt Snippet",
|
|
131
|
+
"prompt_create_version_tag_success": "Version tag created successfully",
|
|
132
|
+
"prompt_current_message_not_support_multi_modal": "Current message does not support multi-modal, please adjust variable types or change message type",
|
|
133
|
+
"prompt_current_submission_draft": "This submission is a draft version",
|
|
134
|
+
"prompt_current_version": "Current Version",
|
|
135
|
+
"prompt_currently_editing_version": "Version Currently being Edited",
|
|
136
|
+
"prompt_data_cannot_recover_after_deletion": "Data cannot be recovered after deletion",
|
|
137
|
+
"prompt_data_observation": "Data observation",
|
|
138
|
+
"prompt_debug_data_refresh_retry": "Prompt debug data is on the way. Please refresh and try again.",
|
|
139
|
+
"prompt_deep_thinking": "Deep Thinking",
|
|
140
|
+
"prompt_deep_thinking_degree": "Deep Thinking Degree",
|
|
141
|
+
"prompt_deep_thinking_description": "When enabled, the model will perform deep thinking based on the input question to generate more accurate answers.",
|
|
142
|
+
"prompt_deep_thinking_length": "Deep Thinking Length",
|
|
143
|
+
"prompt_deep_thinking_switch": "Deep Thinking Switch",
|
|
144
|
+
"prompt_delete_prompt": "Delete Prompt",
|
|
145
|
+
"prompt_description": "Prompt description",
|
|
146
|
+
"prompt_edit_prompt_snippet": "Edit Prompt Snippet",
|
|
147
|
+
"prompt_edit_variable": "Edit variable",
|
|
148
|
+
"prompt_effect_evaluation": "Conduct a performance evaluation of the prompt to improve application effectiveness.",
|
|
149
|
+
"prompt_enter_compare_mode": "Enter Compare Mode",
|
|
150
|
+
"prompt_enter_diff": "Enter Diff",
|
|
151
|
+
"prompt_enter_fullscreen": "Enter Fullscreen",
|
|
152
|
+
"prompt_evaluate_prompt_improve_performance": "Evaluate Prompt effects to improve application performance",
|
|
153
|
+
"prompt_exit_compare_mode": "Exit Compare Mode",
|
|
154
|
+
"prompt_exit_diff": "Exit Diff",
|
|
155
|
+
"prompt_exit_fullscreen": "Exit Fullscreen",
|
|
156
|
+
"prompt_expand_nested_content": "Expand Nested Content",
|
|
157
|
+
"prompt_free_comparison_mode": "Free comparison mode",
|
|
158
|
+
"prompt_full_process_prompt_support": "Provides full-process support from writing, debugging, optimizing to version management of prompts; click to create",
|
|
159
|
+
"prompt_gotemplate_engine": "GoTemplate Engine",
|
|
160
|
+
"prompt_historical_image_message_expires_1day": "Historical image messages expire after 1 day; you can view Trace or go to the Prompt development page to debug and retrieve image information",
|
|
161
|
+
"prompt_id_inconsistent": "The prompt IDs are inconsistent",
|
|
162
|
+
"prompt_image_size_max_MAX_FILE_SIZE_MB_MB": "Image size cannot exceed {MAX_FILE_SIZE_MB}MB",
|
|
163
|
+
"prompt_input_multi_modal_variable_name": "Enter multi-modal variable name",
|
|
164
|
+
"prompt_insert_snippet": "Insert Snippet",
|
|
165
|
+
"prompt_install_sdk": "Install SDK",
|
|
166
|
+
"prompt_integrate_llm_capabilities": "Integrate LLM capabilities",
|
|
167
|
+
"prompt_jinja2_template_engine": "Jinja2 template engine",
|
|
168
|
+
"prompt_key": "Prompt key",
|
|
169
|
+
"prompt_key_again_confirm": "Please enter the Prompt Key to confirm again.",
|
|
170
|
+
"prompt_key_format": "Only English letters, digits, '_' and '.' are allowed, and the name must start with a letter.",
|
|
171
|
+
"prompt_kouzi_compass_no_skill_reference_debug": "Kouzi Compass temporarily does not support skill reference and debugging",
|
|
172
|
+
"prompt_latest_committer": "Latest Committer",
|
|
173
|
+
"prompt_loading_status": "Loading",
|
|
174
|
+
"prompt_manual_add_delete_variables_complex_logic": "Manually add and delete variables, supports complex logic",
|
|
175
|
+
"prompt_mark_version_feature_sdk_fetch": "Mark version features, you can use the tag to fetch specific Prompt versions via SDK. See details",
|
|
176
|
+
"prompt_max_select_MAX_SELECT_COUNT_tags": "You can select up to {MAX_SELECT_COUNT} tags",
|
|
177
|
+
"prompt_max_tokens_description": "- **max_tokens**: Controls the maximum token length of the model output. Typically, 100 tokens roughly equal 150 Chinese characters.",
|
|
178
|
+
"prompt_max_upload_MAX_IMAGE_FILE_images": "You can upload up to {MAX_IMAGE_FILE} images",
|
|
179
|
+
"prompt_may_cause_variable_render_failure": "May cause existing variable rendering failure, please operate carefully.",
|
|
180
|
+
"prompt_modal_title_confirm": "{modalTitle} - Confirm",
|
|
181
|
+
"prompt_model_id": "Model ID",
|
|
182
|
+
"prompt_model_not_support_multimodal_image": "The current model does not support uploading multimodal images",
|
|
183
|
+
"prompt_model_not_support_multimodal_video": "The current model does not support uploading multimodal videos",
|
|
184
|
+
"prompt_model_not_support_this_video_type": "The current model does not support uploading this video type",
|
|
185
|
+
"prompt_model_output_multi_response_for_stability_test": "Model outputs multiple responses simultaneously each time, facilitating testing of response stability",
|
|
186
|
+
"prompt_model_output_single_response": "Model outputs only one response each time",
|
|
187
|
+
"prompt_model_settings": "Model settings",
|
|
188
|
+
"prompt_modify_version_tag": "Modify version tag",
|
|
189
|
+
"prompt_multi_modal_variable_name_conflict": "Multi-modal variable name conflict",
|
|
190
|
+
"prompt_multi_turn_conversation": "Multi-turn Conversation",
|
|
191
|
+
"prompt_multiple_runs": "Multiple runs",
|
|
192
|
+
"prompt_name": "Prompt name",
|
|
193
|
+
"prompt_name_format": "Only English letters, digits, Chinese characters, '-', '_', and '.' are supported, and it must start with English letters, digits, or Chinese characters.",
|
|
194
|
+
"prompt_name_query": "Name Query",
|
|
195
|
+
"prompt_new_message": "New Message",
|
|
196
|
+
"prompt_new_version_released_features": "The new version has been successfully released, you can continue to use the following features:",
|
|
197
|
+
"prompt_next_diff": "Next Diff",
|
|
198
|
+
"prompt_next_match": "Next match",
|
|
199
|
+
"prompt_no_change_info": "No change information available",
|
|
200
|
+
"prompt_no_content": "No content available",
|
|
201
|
+
"prompt_no_delete_permission": "No permission to delete",
|
|
202
|
+
"prompt_no_prompt_snippet": "No Prompt Snippets Available",
|
|
203
|
+
"prompt_no_results": "No results",
|
|
204
|
+
"prompt_no_submitted_versions_no_compare": "No submitted versions available, comparison is not supported",
|
|
205
|
+
"prompt_normal_template_engine": "Normal template engine",
|
|
206
|
+
"prompt_normal_template_var_intro": "Normal template can create variables by entering {{}} in Prompt, please create variables manually for other templates",
|
|
207
|
+
"prompt_number_of_projects_referencing": "{placeholder1} projects referencing",
|
|
208
|
+
"prompt_number_of_snippets": "{placeholder1} snippets",
|
|
209
|
+
"prompt_obtain_prompt_config": "Obtain Prompt configuration",
|
|
210
|
+
"prompt_open_version_history": "Open Version History",
|
|
211
|
+
"prompt_performance_evaluation": "Performance evaluation",
|
|
212
|
+
"prompt_placeholder_variable_name_duplication": "Placeholder variable name cannot duplicate other variable types, please modify it",
|
|
213
|
+
"prompt_placeholder_variable_name_not_empty": "Placeholder variable name cannot be empty",
|
|
214
|
+
"prompt_playground_mock_system": "# 角色\n你是一个专业的旅游规划助手,能够根据用户的具体需求和偏好,迅速且精准地为用户生成全面、详细且个性化的旅游规划文档。\n\n## 技能:制定旅游规划方案\n为用户量身制定合理且舒适的行程安排和贴心的旅行指引。对于不同主题,需要能够体现对应主题的特色、需求或注意事项等。如亲子游,需要体现带小孩旅行途中要注意的内容,用户的预算和偏好等。 \n回复使用以下格式(内容可以合理使用 emoji 表情,让内容更生动):\n\n## 输出格式\n#### 基本信息\n- 🛫 出发地:{{departure}} <如未提供,则不展示此信息>\n- 🎯 目的地:{{destination}}\n- 🫂 人数:{{people_num}}人\n- 📅 天数:{{days_num}}天\n- 🎨 主题:{{travel_theme}} \n##### <目的地>简介\n<介目的地的基本信息,约100字>\n<描述天气状况、穿衣指南,约100字>\n<描述当地特色饮食、风俗习惯等,约100字>\n#### Checklist\n- 手机、充电器\n<需要携带的物品或准备事项,按需求生成>\n#### 行程安排\n<根据用户期望天数({{days_num}}天)安排每日行程>\n##### 第一天、地点1 - 地点2 - ...\n###### 行程1:地点1\n<地点的景点简介,约100字>\n<地点的交通方式,提供合理的交通方式及使用时间信息>\n<地点的游玩方式,提供推荐游玩时长、游玩方式、注意事项、预定信息等,约100字>\n<如果 {{days_num}}超过1天,则继续按照第一天格式生成>\n#### 注意事项\n<根据以上日程安排信息,提供一些目的地旅行的注意事项>\n\n\n## 限制:\n- 所输出的内容必须按照给定的格式进行组织,不能偏离框架要求。",
|
|
215
|
+
"prompt_playground_mock_user": "## 用户需求\n- 出发地:{{departure}} \n- 目的地:{{destination}}\n- 人数:{{people_num}}\n- 天数:{{days_num}}\n- 主题:{{travel_theme}} ",
|
|
216
|
+
"prompt_please_accept_run_recommendation": "Please accept the run suggestions first",
|
|
217
|
+
"prompt_please_input_content_variable_format": "Please enter content, variables are supported in this format: {{USER_NAME}}",
|
|
218
|
+
"prompt_please_input_float_max_4_decimal": "Please enter a floating-point number, up to 4 decimal places",
|
|
219
|
+
"prompt_please_input_prompt_description": "Please enter Prompt description",
|
|
220
|
+
"prompt_please_input_prompt_key": "Please enter Prompt key",
|
|
221
|
+
"prompt_please_input_prompt_key_caps": "Please enter Prompt Key",
|
|
222
|
+
"prompt_please_input_prompt_name": "Please enter Prompt name",
|
|
223
|
+
"prompt_please_input_simulated_message": "Please enter simulated message",
|
|
224
|
+
"prompt_please_input_simulated_value": "Please enter simulated value",
|
|
225
|
+
"prompt_please_input_variable_name": "Please enter variable name",
|
|
226
|
+
"prompt_please_input_variable_value": "Please enter variable value",
|
|
227
|
+
"prompt_please_input_version_tag": "Please enter version tag",
|
|
228
|
+
"prompt_please_select_a_model": "Please select a model",
|
|
229
|
+
"prompt_please_select_variable_data_type": "Please select variable data type",
|
|
230
|
+
"prompt_previous_diff": "Previous Diff",
|
|
231
|
+
"prompt_previous_match": "Previous match",
|
|
232
|
+
"prompt_prompt_change_count": "Prompt Changes ({promptTypeDiffCount})",
|
|
233
|
+
"prompt_prompt_configuration": "Prompt Configuration",
|
|
234
|
+
"prompt_prompt_contains_mismatched_snippet": "Prompt contains snippets that do not match the template type",
|
|
235
|
+
"prompt_prompt_debug_data_loading_refresh": "Prompt debug data is on the way, please refresh and try again",
|
|
236
|
+
"prompt_prompt_diff_change_info": "Prompt Diff Change Information",
|
|
237
|
+
"prompt_prompt_invocation": "Prompt invocation",
|
|
238
|
+
"prompt_prompt_key_length_limit": "Prompt Key length cannot exceed 100 characters",
|
|
239
|
+
"prompt_prompt_name_length_limit": "{promptNameLabel} length cannot exceed 100 characters",
|
|
240
|
+
"prompt_prompt_snippet": "Prompt Snippet",
|
|
241
|
+
"prompt_prompt_snippet_description": "Prompt Snippet Description",
|
|
242
|
+
"prompt_prompt_snippet_name": "Prompt Snippet Name",
|
|
243
|
+
"prompt_prompt_snippet_nesting_support": "Prompt snippets can be nested and reused in different Prompt Templates",
|
|
244
|
+
"prompt_prompt_snippet_reuse_support": "Prompt snippets support reuse in different Prompt Templates",
|
|
245
|
+
"prompt_prompt_submit": "Submit Prompt",
|
|
246
|
+
"prompt_prompthub_features_and_integration": "PromptHub allows business parties to pull Prompts hosted on the CozeLoop platform into their services by integrating the CozeLoop SDK. They can obtain detailed Prompt Template content and either invoke models for inference within their services or integrate with agent frameworks like Eino.",
|
|
247
|
+
"prompt_provided_by_placeholder1": "Provided by {placeholder1}",
|
|
248
|
+
"prompt_ptaas_overview_and_limitations": "PTaaS (Prompt As a Service) publishes hosted Prompts as callable APIs. By integrating the CozeLoop SDK, these APIs can be directly and quickly invoked in business workflows, enabling independent iteration and tuning of Prompts. PTaaS currently does not support independent deployment for business parties; model invocation capabilities are provided by CozeLoop services.",
|
|
249
|
+
"prompt_reference_project": "Referencing Project",
|
|
250
|
+
"prompt_reference_snippet_prompt_version": "Referencing this snippet's Prompt version",
|
|
251
|
+
"prompt_release_successful": "Release successful",
|
|
252
|
+
"prompt_repetition_penalty": "Repetition penalty",
|
|
253
|
+
"prompt_replace_all": "Replace all",
|
|
254
|
+
"prompt_request_start_time": "Request start time:",
|
|
255
|
+
"prompt_response_randomness": "Response randomness",
|
|
256
|
+
"prompt_run_group_count": "Number of run groups",
|
|
257
|
+
"prompt_run_mode": "Run mode",
|
|
258
|
+
"prompt_sdk_data_reporting_and_observation": "Access SDK to report data and perform data observation",
|
|
259
|
+
"prompt_selected_tags": "Selected tags",
|
|
260
|
+
"prompt_service_call_sync": "package main\n\nimport (\n \"context\"\n \"fmt\"\n\n \"github.com/coze-dev/cozeloop-go\"\n \"github.com/coze-dev/cozeloop-go/entity\"\n \"github.com/coze-dev/cozeloop-go/internal/util\"\n)\n\nfunc main() {\n // 1.Create a prompt on the platform\n // Create a Prompt on the platform's Prompt development page (set Prompt Key to 'ptaas_demo'),\n // add the following messages to the template, submit a version.\n // System: You are a helpful assistant for {{topic}}.\n // User: Please help me with {{user_request}}\n ctx := context.Background()\n\n // Set the following environment variables first.\n // COZELOOP_WORKSPACE_ID=your workspace id\n // COZELOOP_API_TOKEN=your token\n // 2.New loop client\n client, err := cozeloop.NewClient()\n if err != nil {\n panic(err)\n }\n defer client.Close(ctx)\n\n ctx, span := client.StartSpan(ctx, \"root_span\", \"custom\")\n defer span.Finish(ctx)\n\n // 3. Execute prompt\n executeRequest := &entity.ExecuteParam{\n PromptKey: \"CozeLoop_Oncall_Master\",\n Version: \"0.0.1\",\n VariableVals: map[string]any{\n \"topic\": \"artificial intelligence\",\n \"user_request\": \"explain what is machine learning\",\n },\n // You can also append messages to the prompt.\n Messages: []*entity.Message{\n {\n Role: entity.RoleUser,\n Content: util.Ptr(\"Keep the answer brief.\"),\n },\n },\n }\n // 3.1 non stream\n nonStream(ctx, client, executeRequest)\n client.Flush(ctx)\n}\n\nfunc nonStream(ctx context.Context, client cozeloop.Client, executeRequest *entity.ExecuteParam) {\n result, err := client.Execute(ctx, executeRequest)\n if err != nil {\n panic(err)\n }\n printExecuteResult(result)\n}\n\nfunc printExecuteResult(result entity.ExecuteResult) {\n if result.Message != nil {\n fmt.Printf(\"Message: %s\\n\", util.ToJSON(result.Message))\n }\n if util.PtrValue(result.FinishReason) != \"\" {\n fmt.Printf(\"FinishReason: %s\\n\", util.PtrValue(result.FinishReason))\n }\n if result.Usage != nil {\n fmt.Printf(\"Usage: %s\\n\", util.ToJSON(result.Usage))\n }\n}",
|
|
261
|
+
"prompt_service_config_eg": "package main\n\nimport (\n \"context\"\n \"encoding/json\"\n \"fmt\"\n\n \"github.com/coze-dev/cozeloop-go\"\n \"github.com/coze-dev/cozeloop-go/entity\"\n)\n\nfunc main() {\n // 1.Create a prompt on the platform\n // You can create a Prompt on the platform's Prompt development page (set Prompt Key to 'prompt_hub_demo'), add the following messages to the template, and submit a version.\n // System: You are a helpful bot, the conversation topic is {{var1}}.\n // Placeholder: placeholder1\n // User: My question is {{var2}}\n // Placeholder: placeholder2\n\n ctx := context.Background()\n\n // Set the following environment variables first.\n // COZELOOP_WORKSPACE_ID=your workspace id\n // COZELOOP_API_TOKEN=your token\n // 2.New loop client\n client, err := cozeloop.NewClient(\n // Set whether to report a trace span when get or format prompt.\n // Default value is false.\n cozeloop.WithPromptTrace(true))\n if err != nil {\n panic(err)\n }\n\n llmRunner := llmRunner{\n client: client,\n }\n\n // 1. start root span\n ctx, span := llmRunner.client.StartSpan(ctx, \"root_span\", \"main_span\", nil)\n\n // 2. Get the prompt\n prompt, err := llmRunner.client.GetPrompt(ctx, cozeloop.GetPromptParam{\n PromptKey: \"prompt_hub_demo\",\n // If version is not specified, the latest version of the corresponding prompt will be obtained\n Version: \"0.0.1\",\n })\n if err != nil {\n fmt.Printf(\"get prompt failed: %v\\n\", err)\n return\n }\n if prompt != nil {\n // Get messages of the prompt\n if prompt.PromptTemplate != nil {\n messages, err := json.Marshal(prompt.PromptTemplate.Messages)\n if err != nil {\n fmt.Printf(\"json marshal failed: %v\\n\", err)\n return\n }\n fmt.Printf(\"prompt messages=%s\\n\", string(messages))\n }\n // Get llm config of the prompt\n if prompt.LLMConfig != nil {\n llmConfig, err := json.Marshal(prompt.LLMConfig)\n if err != nil {\n fmt.Printf(\"json marshal failed: %v\\n\", err)\n }\n fmt.Printf(\"prompt llm config=%s\\n\", llmConfig)\n }\n\n // 3. Format messages of the prompt\n userMessageContent := \"Hello!\"\n assistantMessageContent := \"Hello!\"\n messages, err := llmRunner.client.PromptFormat(ctx, prompt, map[string]any{\n // Normal variable type should be string\n \"var1\": \"artificial intelligence\",\n // Placeholder variable type should be entity.Message/*entity.Message/[]entity.Message/[]*entity.Message\n \"placeholder1\": []*entity.Message{\n {\n Role: entity.RoleUser,\n Content: &userMessageContent,\n },\n {\n Role: entity.RoleAssistant,\n Content: &assistantMessageContent,\n },\n },\n // Other variables in the prompt template that are not provided with corresponding values will be considered as empty values\n })\n if err != nil {\n fmt.Printf(\"prompt format failed: %v\\n\", err)\n return\n }\n data, err := json.Marshal(messages)\n if err != nil {\n fmt.Printf(\"json marshal failed: %v\\n\", err)\n return\n }\n fmt.Printf(\"formatted messages=%s\\n\", string(data))\n\n if err != nil {\n return\n }\n }\n\n // 4. span finish\n span.Finish(ctx)\n\n // 5. (optional) flush or close\n // -- force flush, report all traces in the queue\n // Warning! In general, this method is not needed to be call, as spans will be automatically reported in batches.\n // Note that flush will block and wait for the report to complete, and it may cause frequent reporting,\n // affecting performance.\n llmRunner.client.Flush(ctx)\n}\n\ntype llmRunner struct {\n client cozeloop.Client\n}",
|
|
262
|
+
"prompt_service_install_eg": "go get github.com/coze-dev/cozeloop-go",
|
|
263
|
+
"prompt_simulated_value_colon": "Simulated value:",
|
|
264
|
+
"prompt_single_run": "Single run",
|
|
265
|
+
"prompt_single_turn_conversation": "Single-turn Conversation",
|
|
266
|
+
"prompt_snippet_reference_records": "Records of snippet referencing",
|
|
267
|
+
"prompt_snippet_variables_no_delete": "Variables inside snippets cannot be deleted",
|
|
268
|
+
"prompt_snippet_version": "Snippet Version",
|
|
269
|
+
"prompt_source_version": "Source Version",
|
|
270
|
+
"prompt_step_placeholder1": "Step {placeholder1}",
|
|
271
|
+
"prompt_stop_all_responses": "Stop all responses",
|
|
272
|
+
"prompt_streaming_call": "Streaming call",
|
|
273
|
+
"prompt_submit_new_version": "Submit New Version",
|
|
274
|
+
"prompt_submit_no_version_diff_confirm": "There are no version differences in this submission, are you sure you want to submit?",
|
|
275
|
+
"prompt_support_multi_modal_in_prompt_via_variable": "Support multi-modal information in prompts through variables",
|
|
276
|
+
"prompt_switch_template_engine": "Switch template engine",
|
|
277
|
+
"prompt_synchronous_call": "Synchronous call",
|
|
278
|
+
"prompt_tag_allows_lowercase_num_underscore": "Tag only allows lowercase letters, numbers and underscores",
|
|
279
|
+
"prompt_tag_already_exists": "Tag already exists",
|
|
280
|
+
"prompt_tag_effect_other_versions_submission_success": "Already effective in other versions of the current Prompt. After successful submission, the tag will be effective only in the current version.",
|
|
281
|
+
"prompt_tag_exists_in_promptVersion": "The current tag already exists in version {promptVersion}",
|
|
282
|
+
"prompt_tag_length_max_50_chars": "Tag length cannot exceed 50 characters",
|
|
283
|
+
"prompt_template": "Prompt template",
|
|
284
|
+
"prompt_template_engine": "Template engine",
|
|
285
|
+
"prompt_template_type": "Template Type",
|
|
286
|
+
"prompt_text_md_toggle": "Toggle Text/Markdown",
|
|
287
|
+
"prompt_ticket_link": "Ticket link",
|
|
288
|
+
"prompt_time_and_tokens_info": "Time consumed: {placeholder1} | Tokens:",
|
|
289
|
+
"prompt_time_consumed": "Time consumed:",
|
|
290
|
+
"prompt_total_reference_projects": "Total {totalReferenceCount} projects",
|
|
291
|
+
"prompt_triple_braces_variable_recognition": "Triple braces {{{}}} recognize variables",
|
|
292
|
+
"prompt_type_change": "Type change",
|
|
293
|
+
"prompt_usage_call_title": "call configuration",
|
|
294
|
+
"prompt_use_js_configuration": "import { PromptHub } from '@cozeloop/ai'; \n \nconst hub = new PromptHub({ \n /** workspace id, use process.env.COZELOOP_WORKSPACE_ID when unprovided */ \n // workspaceId: 'your_workspace_id', \n apiClient: { \n // baseURL: 'api_base_url', \n // token: 'your_api_token', \n }, \n}); \n \n// get prompt with `beta` label \n// - prompt_key: xxx \n// - version: undefined \n// - label: beta \nconst prompt = await hub.getPrompt('xxx', undefined, 'beta'); \n \n// format prompt with variables \nconst messages = hub.formatPrompt(prompt, { \n var1: 'value_of_var1', \n var2: 'value_of_var2', \n var3: 'value_of_var3', \n placeholder1: { role: 'assistant', content: 'user' }, \n});",
|
|
295
|
+
"prompt_use_js_installation": "npm i @cozeloop/ai",
|
|
296
|
+
"prompt_use_js_streaming_call": "import { ApiClient, PromptAsAService } from '@cozeloop/ai'; \n \nconst apiClient = new ApiClient({ \n token: 'pat_xxx', \n}); \n \nconst model = new PromptAsAService({ \n // or set it as process.env.COZELOOP_WORKSPACE_ID, \n workspaceId: 'your_workspace_id', \n // prompt to invoke as a service \n prompt: { \n prompt_key: 'ptaas_demo', \n version: '0.0.1', \n }, \n apiClient, \n}); \n \nconst replyStream = await model.stream({ \n messages: [{ role: 'user', content: 'Keep the answer brief.' }], \n variables: { \n topic: 'artificial intelligence', \n user_request: 'explain what is machine learning', \n }, \n}); \n \nfor await (const chunk of replyStream) { \n console.info(chunk); \n}",
|
|
297
|
+
"prompt_use_js_synchronous_call": "import { ApiClient, PromptAsAService } from '@cozeloop/ai'; \n \nconst apiClient = new ApiClient({ \n token: 'pat_xxx', \n}); \n \nconst model = new PromptAsAService({ \n // or set it as process.env.COZELOOP_WORKSPACE_ID, \n workspaceId: 'your_workspace_id', \n // prompt to invoke as a service \n prompt: { \n prompt_key: 'ptaas_demo', \n version: '0.0.1', \n }, \n apiClient, \n}); \n \nconst reply = await model.invoke({ \n messages: [{ role: 'user', content: 'Keep the answer brief.' }], \n variables: { \n topic: 'artificial intelligence', \n user_request: 'explain what is machine learning', \n }, \n}); \n \nconsole.info(reply);",
|
|
298
|
+
"prompt_use_python_configuration": "if __name__ == '__main__':\n # 1.Create a prompt on the platform\n # You can create a Prompt on the platform's Prompt development page (set Prompt Key to 'prompt_hub_demo'),\n # add the following messages to the template, and submit a version.\n # System: You are a helpful bot, the conversation topic is {{var1}}.\n # Placeholder: placeholder1\n # User: My question is {{var2}}\n # Placeholder: placeholder2\n\n # Set the following environment variables first.\n # COZELOOP_WORKSPACE_ID=your workspace id\n # COZELOOP_API_TOKEN=your token\n # 2.New loop client\n client = cozeloop.new_client(\n # Set whether to report a trace span when get or format prompt.\n # Default value is false.\n prompt_trace=True)\n\n # 3. new root span\n rootSpan = client.start_span(\"root_span\", \"main_span\")\n\n # 4. Get the prompt\n # If no specific version is specified, the latest version of the corresponding prompt will be obtained\n prompt = client.get_prompt(prompt_key=\"prompt_hub_demo\", version=\"0.0.1\")\n if prompt is not None:\n # Get messages of the prompt\n if prompt.prompt_template is not None:\n messages = prompt.prompt_template.messages\n print(\n f\"prompt messages: {json.dumps([message.model_dump(exclude_none=True) for message in messages], ensure_ascii=False)}\")\n # Get llm config of the prompt\n if prompt.llm_config is not None:\n llm_config = prompt.llm_config\n print(f\"prompt llm_config: {llm_config.model_dump_json(exclude_none=True)}\")\n\n # 5.Format messages of the prompt\n formatted_messages = client.prompt_format(prompt, {\n # Normal variable type should be string\n \"var1\": \"artificial intelligence\",\n # Placeholder variable type should be Message/List[Message]\n \"placeholder1\": [Message(role=Role.USER, content=\"Hello!\"),\n Message(role=Role.ASSISTANT, content=\"Hello!\")]\n # Other variables in the prompt template that are not provided with corresponding values will be\n # considered as empty values.\n })\n print(\n f\"formatted_messages: {json.dumps([message.model_dump(exclude_none=True) for message in formatted_messages], ensure_ascii=False)}\")\n\n rootSpan.finish()\n # 6. (optional) flush or close\n # -- force flush, report all traces in the queue\n # Warning! In general, this method is not needed to be call, as spans will be automatically reported in batches.\n # Note that flush will block and wait for the report to complete, and it may cause frequent reporting,\n # affecting performance.\n client.flush()",
|
|
299
|
+
"prompt_use_python_installation": "pip install CozeLoop",
|
|
300
|
+
"prompt_use_python_simultaneous_call": "import asyncio\nimport os\n\nfrom anyio import sleep\n\nfrom cozeloop import new_client, Client\nfrom cozeloop.entities.prompt import Message, Role, ExecuteResult\n\n\ndef setup_client() -> Client:\n \"\"\"\n Unified client setup function\n \n Environment variables:\n - COZELOOP_WORKSPACE_ID: workspace ID\n - COZELOOP_API_TOKEN: API token\n \"\"\"\n # Set the following environment variables first.\n # COZELOOP_WORKSPACE_ID=your workspace id\n # COZELOOP_API_TOKEN=your token\n client = new_client(\n api_base_url=os.getenv(\"COZELOOP_API_BASE_URL\"),\n workspace_id=os.getenv(\"COZELOOP_WORKSPACE_ID\"),\n api_token=os.getenv(\"COZELOOP_API_TOKEN\"),\n )\n return client\n\n\ndef print_execute_result(result: ExecuteResult) -> None:\n \"\"\"Unified result printing function, consistent with Go version format\"\"\"\n if result.message:\n print(f\"Message: {result.message}\")\n if result.finish_reason:\n print(f\"FinishReason: {result.finish_reason}\")\n if result.usage:\n print(f\"Usage: {result.usage}\")\n\n\ndef sync_non_stream_example(client: Client) -> None:\n \"\"\"Sync non-stream call example\"\"\"\n print(\"=== Sync Non-Stream Example ===\")\n \n # 1. Create a prompt on the platform\n # Create a Prompt on the platform's Prompt development page (set Prompt Key to 'ptaas_demo'),\n # add the following messages to the template, submit a version.\n # System: You are a helpful assistant for {{topic}}.\n # User: Please help me with {{user_request}}\n \n result = client.execute_prompt(\n prompt_key=\"ptaas_demo\",\n version=\"0.0.1\",\n variable_vals={\n \"topic\": \"artificial intelligence\",\n \"user_request\": \"explain what is machine learning\"\n },\n # You can also append messages to the prompt.\n messages=[\n Message(role=Role.USER, content=\"Keep the answer brief.\")\n ],\n stream=False\n )\n print_execute_result(result)\n\n\nasync def async_non_stream_example(client: Client) -> None:\n \"\"\"Async non-stream call example\"\"\"\n print(\"=== Async Non-Stream Example ===\")\n \n result = await client.aexecute_prompt(\n prompt_key=\"ptaas_demo\",\n version=\"0.0.1\",\n variable_vals={\n \"topic\": \"artificial intelligence\",\n \"user_request\": \"explain what is machine learning\"\n },\n messages=[\n Message(role=Role.USER, content=\"Keep the answer brief.\")\n ],\n stream=False\n )\n print_execute_result(result)\n\n\nasync def main():\n \"\"\"Main function\"\"\"\n client = setup_client()\n\n root_span = client.start_span(\"root\", \"custom\")\n try:\n # Sync non-stream call\n sync_non_stream_example(client)\n\n # Async non-stream call\n await async_non_stream_example(client)\n\n finally:\n # Close client\n root_span.finish()\n if hasattr(client, 'close'):\n client.close()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())",
|
|
301
|
+
"prompt_use_python_streaming_call": "import asyncio\nimport os\n\nfrom anyio import sleep\n\nfrom cozeloop import new_client, Client\nfrom cozeloop.entities.prompt import Message, Role, ExecuteResult\n\n\ndef setup_client() -> Client:\n \"\"\"\n Unified client setup function\n \n Environment variables:\n - COZELOOP_WORKSPACE_ID: workspace ID\n - COZELOOP_API_TOKEN: API token\n \"\"\"\n # Set the following environment variables first.\n # COZELOOP_WORKSPACE_ID=your workspace id\n # COZELOOP_API_TOKEN=your token\n client = new_client(\n api_base_url=os.getenv(\"COZELOOP_API_BASE_URL\"),\n workspace_id=os.getenv(\"COZELOOP_WORKSPACE_ID\"),\n api_token=os.getenv(\"COZELOOP_API_TOKEN\"),\n )\n return client\n\n\ndef print_execute_result(result: ExecuteResult) -> None:\n \"\"\"Unified result printing function, consistent with Go version format\"\"\"\n if result.message:\n print(f\"Message: {result.message}\")\n if result.finish_reason:\n print(f\"FinishReason: {result.finish_reason}\")\n if result.usage:\n print(f\"Usage: {result.usage}\")\n\n\ndef sync_stream_example(client: Client) -> None:\n \"\"\"Sync stream call example\"\"\"\n print(\"=== Sync Stream Example ===\")\n \n stream_reader = client.execute_prompt(\n prompt_key=\"ptaas_demo\",\n version=\"0.0.1\",\n variable_vals={\n \"topic\": \"artificial intelligence\",\n \"user_request\": \"explain what is machine learning\"\n },\n messages=[\n Message(role=Role.USER, content=\"Keep the answer brief.\")\n ],\n stream=True\n )\n \n for result in stream_reader:\n print_execute_result(result)\n \n print(\"\\nStream finished.\")\n\n\nasync def async_stream_example(client: Client) -> None:\n \"\"\"Async stream call example\"\"\"\n print(\"=== Async Stream Example ===\")\n \n stream_reader = await client.aexecute_prompt(\n prompt_key=\"ptaas_demo\",\n version=\"0.0.1\",\n variable_vals={\n \"topic\": \"artificial intelligence\",\n \"user_request\": \"explain what is machine learning\"\n },\n messages=[\n Message(role=Role.USER, content=\"Keep the answer brief.\")\n ],\n stream=True\n )\n \n async for result in stream_reader:\n print_execute_result(result)\n \n print(\"\\nStream finished.\")\n\n\nasync def main():\n \"\"\"Main function\"\"\"\n client = setup_client()\n\n root_span = client.start_span(\"root\", \"custom\")\n try:\n # Sync stream call\n sync_stream_example(client)\n\n # Async stream call\n await async_stream_example(client)\n \n finally:\n # Close client\n root_span.finish()\n if hasattr(client, 'close'):\n client.close()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())",
|
|
302
|
+
"prompt_use_sdk": "Use SDK",
|
|
303
|
+
"prompt_user_manual": "User manual",
|
|
304
|
+
"prompt_user_or_model_contains_risky_content": "User input or model output contains risky content",
|
|
305
|
+
"prompt_var_format": "Starts with letter, supports letters and underscores",
|
|
306
|
+
"prompt_variable": "Prompt variable",
|
|
307
|
+
"prompt_variable_name": "Variable name",
|
|
308
|
+
"prompt_variable_name_cannot_start_space": "Variable name cannot start with a space",
|
|
309
|
+
"prompt_variable_name_duplicate": "Variable name already exists",
|
|
310
|
+
"prompt_variable_name_exists": "Variable name already exists",
|
|
311
|
+
"prompt_variable_name_format_rule": "Variable name format only supports letters, numbers, underscores, hyphens, and cannot start with a digit",
|
|
312
|
+
"prompt_variable_name_rule_letters_numbers_underscore": "Can only contain letters, numbers, or underscores and must start with a letter",
|
|
313
|
+
"prompt_variable_referenced_in_snippets": "This variable is referenced in {snippetNames} snippets",
|
|
314
|
+
"prompt_variable_value": "Variable value",
|
|
315
|
+
"prompt_version_contains_first_level_nesting": "This version already contains first-level nesting",
|
|
316
|
+
"prompt_version_empty_submitted": "No version has been submitted yet",
|
|
317
|
+
"prompt_version_inconsistent_with_prompt_template": "This version is inconsistent with the current Prompt template type",
|
|
318
|
+
"prompt_version_number_needed": "The Prompt version number needs to be provided.",
|
|
319
|
+
"prompt_version_tag": "Version tag",
|
|
320
|
+
"prompt_version_tag_duplicates_exist": "Duplicate version tags exist",
|
|
321
|
+
"prompt_view_detailed_instructions": "View detailed instructions",
|
|
322
|
+
"prompt_view_documentation": "View Documentation",
|
|
323
|
+
"prompt_whole_word_match": "Whole word match",
|
|
324
|
+
"quick_create": "Quick creation",
|
|
325
|
+
"recent_submission_time": "Recent submission time",
|
|
326
|
+
"request_initiation_time": "Request initiation time",
|
|
327
|
+
"restore_to_this_version": "Restore to this version.",
|
|
328
|
+
"restore_version_tip": "Restoring will overwrite the latest prompt. Confirm restore to this version?",
|
|
329
|
+
"revert_draft_version": "Revert to the draft version",
|
|
330
|
+
"rollback_success": "Rollback succeeded.",
|
|
331
|
+
"search_prompt_key_or_prompt_name": "Search for Prompt Key or Prompt name",
|
|
332
|
+
"set_to_reference_group": "Set as the reference group.",
|
|
333
|
+
"single_step_debugging": "Step Debug",
|
|
334
|
+
"stop_respond": "Stop responding",
|
|
335
|
+
"submission_no_version_diff": "There is no version difference in this submission.",
|
|
336
|
+
"system_role": "System",
|
|
337
|
+
"task_delete_title": "hint",
|
|
338
|
+
"time_consumed": "Time cost",
|
|
339
|
+
"top_k": "Top K",
|
|
340
|
+
"top_p": "Top P",
|
|
341
|
+
"trace_id": "Trace ID",
|
|
342
|
+
"user_role": "User",
|
|
343
|
+
"variable_setting": "Variable setting",
|
|
344
|
+
"version_number_lt_error": "The version number cannot be less than the current version.",
|
|
345
|
+
"x_step": "Step {num}"
|
|
346
|
+
}
|