codegnipy 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codegnipy/__init__.py +190 -0
- codegnipy/cli.py +153 -0
- codegnipy/decorator.py +151 -0
- codegnipy/determinism.py +631 -0
- codegnipy/memory.py +276 -0
- codegnipy/providers.py +1160 -0
- codegnipy/reflection.py +244 -0
- codegnipy/runtime.py +197 -0
- codegnipy/scheduler.py +498 -0
- codegnipy/streaming.py +387 -0
- codegnipy/tools.py +481 -0
- codegnipy/transformer.py +155 -0
- codegnipy/validation.py +961 -0
- codegnipy-0.0.1.dist-info/METADATA +417 -0
- codegnipy-0.0.1.dist-info/RECORD +19 -0
- codegnipy-0.0.1.dist-info/WHEEL +5 -0
- codegnipy-0.0.1.dist-info/entry_points.txt +2 -0
- codegnipy-0.0.1.dist-info/licenses/LICENSE +21 -0
- codegnipy-0.0.1.dist-info/top_level.txt +1 -0
codegnipy/__init__.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
# Codegnipy - AI 原生的 Python 语言扩展
|
|
2
|
+
"""
|
|
3
|
+
Codegnipy 让非确定性的 AI 能力成为 Python 的一等公民。
|
|
4
|
+
|
|
5
|
+
核心特性:
|
|
6
|
+
- `~"prompt"` 操作符:将自然语言提示直接嵌入代码
|
|
7
|
+
- `@cognitive` 装饰器:让函数由 LLM 实现
|
|
8
|
+
- 记忆存储:会话级别的记忆管理
|
|
9
|
+
- 反思循环:LLM 自我检查与修正
|
|
10
|
+
- 异步调度:高性能并发调用
|
|
11
|
+
- 确定性保证:类型约束、幻觉检测
|
|
12
|
+
- 流式响应:实时输出支持
|
|
13
|
+
- 工具调用:Function Calling 支持
|
|
14
|
+
- 多提供商:OpenAI、Anthropic 等
|
|
15
|
+
- 混合执行模型:确定性逻辑与模糊意图的无缝协同
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
__version__ = "0.0.1"
|
|
19
|
+
|
|
20
|
+
from .runtime import cognitive_call, CognitiveContext
|
|
21
|
+
from .decorator import cognitive
|
|
22
|
+
from .memory import (
|
|
23
|
+
MemoryStore,
|
|
24
|
+
InMemoryStore,
|
|
25
|
+
FileStore,
|
|
26
|
+
Message,
|
|
27
|
+
MessageRole,
|
|
28
|
+
ContextCompressor
|
|
29
|
+
)
|
|
30
|
+
from .reflection import (
|
|
31
|
+
Reflector,
|
|
32
|
+
ReflectionResult,
|
|
33
|
+
ReflectionStatus,
|
|
34
|
+
with_reflection,
|
|
35
|
+
ReflectiveCognitiveCall
|
|
36
|
+
)
|
|
37
|
+
from .scheduler import (
|
|
38
|
+
CognitiveScheduler,
|
|
39
|
+
ScheduledTask,
|
|
40
|
+
TaskStatus,
|
|
41
|
+
Priority,
|
|
42
|
+
SchedulerConfig,
|
|
43
|
+
RetryPolicy,
|
|
44
|
+
async_cognitive_call,
|
|
45
|
+
batch_call,
|
|
46
|
+
run_async
|
|
47
|
+
)
|
|
48
|
+
from .determinism import (
|
|
49
|
+
TypeConstraint,
|
|
50
|
+
PrimitiveConstraint,
|
|
51
|
+
EnumConstraint,
|
|
52
|
+
SchemaConstraint,
|
|
53
|
+
ListConstraint,
|
|
54
|
+
ValidationStatus,
|
|
55
|
+
ValidationResult,
|
|
56
|
+
SimulationMode,
|
|
57
|
+
Simulator,
|
|
58
|
+
HallucinationDetector,
|
|
59
|
+
HallucinationCheck,
|
|
60
|
+
deterministic_call
|
|
61
|
+
)
|
|
62
|
+
from .streaming import (
|
|
63
|
+
StreamStatus,
|
|
64
|
+
StreamChunk,
|
|
65
|
+
StreamResult,
|
|
66
|
+
stream_call,
|
|
67
|
+
stream_call_async,
|
|
68
|
+
stream_iter,
|
|
69
|
+
stream_iter_async
|
|
70
|
+
)
|
|
71
|
+
from .tools import (
|
|
72
|
+
ToolType,
|
|
73
|
+
ToolParameter,
|
|
74
|
+
ToolDefinition,
|
|
75
|
+
ToolCall,
|
|
76
|
+
ToolResult,
|
|
77
|
+
ToolRegistry,
|
|
78
|
+
tool,
|
|
79
|
+
call_with_tools,
|
|
80
|
+
register_tool,
|
|
81
|
+
get_global_registry
|
|
82
|
+
)
|
|
83
|
+
from .providers import (
|
|
84
|
+
ProviderType,
|
|
85
|
+
ProviderConfig,
|
|
86
|
+
BaseProvider,
|
|
87
|
+
OpenAIProvider,
|
|
88
|
+
AnthropicProvider,
|
|
89
|
+
OllamaProvider,
|
|
90
|
+
TransformersProvider,
|
|
91
|
+
ProviderFactory,
|
|
92
|
+
create_provider
|
|
93
|
+
)
|
|
94
|
+
from .validation import (
|
|
95
|
+
ExternalValidationStatus,
|
|
96
|
+
Evidence,
|
|
97
|
+
ExternalValidationResult,
|
|
98
|
+
BaseValidator,
|
|
99
|
+
WebSearchValidator,
|
|
100
|
+
KnowledgeGraphValidator,
|
|
101
|
+
FactCheckValidator,
|
|
102
|
+
CompositeValidator,
|
|
103
|
+
create_default_validator,
|
|
104
|
+
verify_claim,
|
|
105
|
+
verify_claim_async
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
__all__ = [
|
|
109
|
+
# Core
|
|
110
|
+
"cognitive_call",
|
|
111
|
+
"CognitiveContext",
|
|
112
|
+
"cognitive",
|
|
113
|
+
# Memory
|
|
114
|
+
"MemoryStore",
|
|
115
|
+
"InMemoryStore",
|
|
116
|
+
"FileStore",
|
|
117
|
+
"Message",
|
|
118
|
+
"MessageRole",
|
|
119
|
+
"ContextCompressor",
|
|
120
|
+
# Reflection
|
|
121
|
+
"Reflector",
|
|
122
|
+
"ReflectionResult",
|
|
123
|
+
"ReflectionStatus",
|
|
124
|
+
"with_reflection",
|
|
125
|
+
"ReflectiveCognitiveCall",
|
|
126
|
+
# Scheduler
|
|
127
|
+
"CognitiveScheduler",
|
|
128
|
+
"ScheduledTask",
|
|
129
|
+
"TaskStatus",
|
|
130
|
+
"Priority",
|
|
131
|
+
"SchedulerConfig",
|
|
132
|
+
"RetryPolicy",
|
|
133
|
+
"async_cognitive_call",
|
|
134
|
+
"batch_call",
|
|
135
|
+
"run_async",
|
|
136
|
+
# Determinism
|
|
137
|
+
"TypeConstraint",
|
|
138
|
+
"PrimitiveConstraint",
|
|
139
|
+
"EnumConstraint",
|
|
140
|
+
"SchemaConstraint",
|
|
141
|
+
"ListConstraint",
|
|
142
|
+
"ValidationStatus",
|
|
143
|
+
"ValidationResult",
|
|
144
|
+
"SimulationMode",
|
|
145
|
+
"Simulator",
|
|
146
|
+
"HallucinationDetector",
|
|
147
|
+
"HallucinationCheck",
|
|
148
|
+
"deterministic_call",
|
|
149
|
+
# Streaming
|
|
150
|
+
"StreamStatus",
|
|
151
|
+
"StreamChunk",
|
|
152
|
+
"StreamResult",
|
|
153
|
+
"stream_call",
|
|
154
|
+
"stream_call_async",
|
|
155
|
+
"stream_iter",
|
|
156
|
+
"stream_iter_async",
|
|
157
|
+
# Tools
|
|
158
|
+
"ToolType",
|
|
159
|
+
"ToolParameter",
|
|
160
|
+
"ToolDefinition",
|
|
161
|
+
"ToolCall",
|
|
162
|
+
"ToolResult",
|
|
163
|
+
"ToolRegistry",
|
|
164
|
+
"tool",
|
|
165
|
+
"call_with_tools",
|
|
166
|
+
"register_tool",
|
|
167
|
+
"get_global_registry",
|
|
168
|
+
# Providers
|
|
169
|
+
"ProviderType",
|
|
170
|
+
"ProviderConfig",
|
|
171
|
+
"BaseProvider",
|
|
172
|
+
"OpenAIProvider",
|
|
173
|
+
"AnthropicProvider",
|
|
174
|
+
"OllamaProvider",
|
|
175
|
+
"TransformersProvider",
|
|
176
|
+
"ProviderFactory",
|
|
177
|
+
"create_provider",
|
|
178
|
+
# Validation
|
|
179
|
+
"ExternalValidationStatus",
|
|
180
|
+
"Evidence",
|
|
181
|
+
"ExternalValidationResult",
|
|
182
|
+
"BaseValidator",
|
|
183
|
+
"WebSearchValidator",
|
|
184
|
+
"KnowledgeGraphValidator",
|
|
185
|
+
"FactCheckValidator",
|
|
186
|
+
"CompositeValidator",
|
|
187
|
+
"create_default_validator",
|
|
188
|
+
"verify_claim",
|
|
189
|
+
"verify_claim_async",
|
|
190
|
+
]
|
codegnipy/cli.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Codegnipy 命令行接口
|
|
3
|
+
|
|
4
|
+
提供 `codegnipy run` 和 `codegnipy repl` 命令。
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import argparse
|
|
8
|
+
import sys
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
from .transformer import transform_code
|
|
13
|
+
from .runtime import CognitiveContext
|
|
14
|
+
import codegnipy
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def create_parser() -> argparse.ArgumentParser:
|
|
18
|
+
"""创建命令行解析器"""
|
|
19
|
+
parser = argparse.ArgumentParser(
|
|
20
|
+
prog="codegnipy",
|
|
21
|
+
description="Codegnipy - AI 原生的 Python 语言扩展"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
subparsers = parser.add_subparsers(dest="command", help="可用命令")
|
|
25
|
+
|
|
26
|
+
# run 命令
|
|
27
|
+
run_parser = subparsers.add_parser("run", help="运行 .py 文件")
|
|
28
|
+
run_parser.add_argument("file", help="要运行的 Python 文件")
|
|
29
|
+
run_parser.add_argument(
|
|
30
|
+
"--model", "-m",
|
|
31
|
+
default="gpt-4o-mini",
|
|
32
|
+
help="使用的 LLM 模型"
|
|
33
|
+
)
|
|
34
|
+
run_parser.add_argument(
|
|
35
|
+
"--api-key", "-k",
|
|
36
|
+
help="API 密钥(也可通过环境变量 OPENAI_API_KEY 设置)"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# repl 命令
|
|
40
|
+
repl_parser = subparsers.add_parser("repl", help="启动交互式 REPL")
|
|
41
|
+
repl_parser.add_argument(
|
|
42
|
+
"--model", "-m",
|
|
43
|
+
default="gpt-4o-mini",
|
|
44
|
+
help="使用的 LLM 模型"
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
# version 命令
|
|
48
|
+
subparsers.add_parser("version", help="显示版本信息")
|
|
49
|
+
|
|
50
|
+
return parser
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def run_file(filepath: str, model: str, api_key: Optional[str] = None):
|
|
54
|
+
"""运行 Codegnipy 文件"""
|
|
55
|
+
path = Path(filepath)
|
|
56
|
+
|
|
57
|
+
if not path.exists():
|
|
58
|
+
print(f"错误: 文件不存在: {filepath}", file=sys.stderr)
|
|
59
|
+
sys.exit(1)
|
|
60
|
+
|
|
61
|
+
if not path.suffix == ".py":
|
|
62
|
+
print(f"警告: 文件扩展名不是 .py: {filepath}", file=sys.stderr)
|
|
63
|
+
|
|
64
|
+
# 读取源代码
|
|
65
|
+
source = path.read_text(encoding="utf-8")
|
|
66
|
+
|
|
67
|
+
# 创建上下文并执行
|
|
68
|
+
with CognitiveContext(api_key=api_key, model=model):
|
|
69
|
+
# 准备执行环境
|
|
70
|
+
globals_ = {
|
|
71
|
+
"__name__": "__main__",
|
|
72
|
+
"__file__": str(path.absolute()),
|
|
73
|
+
"codegnipy": codegnipy,
|
|
74
|
+
"__cognitive_context__": CognitiveContext.get_current()
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
# 转换并编译
|
|
78
|
+
tree = transform_code(source, str(path))
|
|
79
|
+
code = compile(tree, str(path), "exec")
|
|
80
|
+
|
|
81
|
+
# 执行
|
|
82
|
+
try:
|
|
83
|
+
exec(code, globals_)
|
|
84
|
+
except Exception as e:
|
|
85
|
+
print(f"错误: {e}", file=sys.stderr)
|
|
86
|
+
sys.exit(1)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def start_repl(model: str):
|
|
90
|
+
"""启动交互式 REPL"""
|
|
91
|
+
import code
|
|
92
|
+
|
|
93
|
+
print("Codegnipy REPL")
|
|
94
|
+
print(f"模型: {model}")
|
|
95
|
+
print("输入 Python 代码,~\"prompt\" 语法将调用 LLM")
|
|
96
|
+
print("输入 exit() 或 Ctrl+D 退出\n")
|
|
97
|
+
|
|
98
|
+
# 创建上下文
|
|
99
|
+
ctx = CognitiveContext(model=model)
|
|
100
|
+
ctx.__enter__()
|
|
101
|
+
|
|
102
|
+
# 准备 REPL 环境
|
|
103
|
+
local_vars = {
|
|
104
|
+
"codegnipy": codegnipy,
|
|
105
|
+
"__cognitive_context__": ctx
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
# 自定义编译函数
|
|
109
|
+
class CognitiveConsole(code.InteractiveConsole):
|
|
110
|
+
def runsource(self, source, filename="<input>", symbol="single"):
|
|
111
|
+
try:
|
|
112
|
+
# 尝试转换
|
|
113
|
+
tree = transform_code(source, filename)
|
|
114
|
+
code_obj = compile(tree, filename, symbol)
|
|
115
|
+
except (SyntaxError, OverflowError) as e:
|
|
116
|
+
self.showsyntaxerror(e)
|
|
117
|
+
return False
|
|
118
|
+
|
|
119
|
+
# 执行
|
|
120
|
+
try:
|
|
121
|
+
exec(code_obj, self.locals)
|
|
122
|
+
except SystemExit:
|
|
123
|
+
raise
|
|
124
|
+
except Exception:
|
|
125
|
+
self.showtraceback()
|
|
126
|
+
|
|
127
|
+
return False
|
|
128
|
+
|
|
129
|
+
console = CognitiveConsole(local_vars)
|
|
130
|
+
console.interact(banner="", exitmsg="再见!")
|
|
131
|
+
|
|
132
|
+
# 清理
|
|
133
|
+
ctx.__exit__(None, None, None)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def main():
|
|
137
|
+
"""主入口"""
|
|
138
|
+
parser = create_parser()
|
|
139
|
+
args = parser.parse_args()
|
|
140
|
+
|
|
141
|
+
if args.command == "run":
|
|
142
|
+
run_file(args.file, args.model, args.api_key)
|
|
143
|
+
elif args.command == "repl":
|
|
144
|
+
start_repl(args.model)
|
|
145
|
+
elif args.command == "version":
|
|
146
|
+
print(f"Codegnipy v{codegnipy.__version__}")
|
|
147
|
+
else:
|
|
148
|
+
parser.print_help()
|
|
149
|
+
sys.exit(1)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
if __name__ == "__main__":
|
|
153
|
+
main()
|
codegnipy/decorator.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
"""
|
|
2
|
+
@cognitive 装饰器模块
|
|
3
|
+
|
|
4
|
+
让函数由 LLM 实现,而非手写逻辑。
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from functools import wraps
|
|
8
|
+
from typing import Callable, get_type_hints, Any, Optional
|
|
9
|
+
import inspect
|
|
10
|
+
|
|
11
|
+
from .runtime import cognitive_call
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def cognitive(func: Optional[Callable] = None, *, model: Optional[str] = None) -> Callable:
|
|
15
|
+
"""
|
|
16
|
+
Decorator: Mark a function as cognitive, implemented by LLM.
|
|
17
|
+
|
|
18
|
+
The function's docstring serves as prompt template,
|
|
19
|
+
and the signature is used for input validation and output parsing.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
func: The function to decorate
|
|
23
|
+
model: Specify the model to use (optional)
|
|
24
|
+
|
|
25
|
+
Examples:
|
|
26
|
+
@cognitive
|
|
27
|
+
def summarize(text: str) -> str:
|
|
28
|
+
'''Summarize the key points of this text in no more than 3 sentences.'''
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
@cognitive(model="gpt-4")
|
|
32
|
+
def translate(text: str, target_lang: str = "English") -> str:
|
|
33
|
+
'''Translate the text to {target_lang}.'''
|
|
34
|
+
pass
|
|
35
|
+
"""
|
|
36
|
+
def decorator(fn: Callable) -> Callable:
|
|
37
|
+
# 获取函数信息
|
|
38
|
+
sig = inspect.signature(fn)
|
|
39
|
+
hints = get_type_hints(fn) if hasattr(fn, '__annotations__') else {}
|
|
40
|
+
docstring = fn.__doc__ or f"执行函数 {fn.__name__}"
|
|
41
|
+
|
|
42
|
+
@wraps(fn)
|
|
43
|
+
def wrapper(*args, **kwargs):
|
|
44
|
+
# 绑定参数
|
|
45
|
+
bound = sig.bind(*args, **kwargs)
|
|
46
|
+
bound.apply_defaults()
|
|
47
|
+
|
|
48
|
+
# 构建提示
|
|
49
|
+
prompt = _build_prompt(docstring, bound.arguments)
|
|
50
|
+
|
|
51
|
+
# 调用 LLM
|
|
52
|
+
result = cognitive_call(prompt, model=model)
|
|
53
|
+
|
|
54
|
+
# 类型转换(如果指定了返回类型)
|
|
55
|
+
return_type = hints.get('return')
|
|
56
|
+
if return_type and return_type is not str:
|
|
57
|
+
result = _convert_result(result, return_type)
|
|
58
|
+
|
|
59
|
+
return result
|
|
60
|
+
|
|
61
|
+
# 标记为认知函数
|
|
62
|
+
wrapper._is_cognitive = True # type: ignore[attr-defined]
|
|
63
|
+
wrapper._original_func = fn # type: ignore[attr-defined]
|
|
64
|
+
|
|
65
|
+
return wrapper
|
|
66
|
+
|
|
67
|
+
# 支持 @cognitive 和 @cognitive(...) 两种用法
|
|
68
|
+
if func is not None:
|
|
69
|
+
return decorator(func)
|
|
70
|
+
return decorator
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _build_prompt(docstring: str, arguments: dict) -> str:
|
|
74
|
+
"""
|
|
75
|
+
根据文档字符串和参数构建提示。
|
|
76
|
+
|
|
77
|
+
支持 {param} 占位符语法。
|
|
78
|
+
"""
|
|
79
|
+
prompt = docstring
|
|
80
|
+
|
|
81
|
+
# 替换占位符
|
|
82
|
+
for key, value in arguments.items():
|
|
83
|
+
placeholder = "{" + key + "}"
|
|
84
|
+
if placeholder in prompt:
|
|
85
|
+
prompt = prompt.replace(placeholder, str(value))
|
|
86
|
+
|
|
87
|
+
# 如果没有占位符,将参数附加到提示后
|
|
88
|
+
if "{" not in docstring and arguments:
|
|
89
|
+
args_str = "\n".join(f"- {k}: {v}" for k, v in arguments.items())
|
|
90
|
+
prompt = f"{docstring}\n\n参数:\n{args_str}"
|
|
91
|
+
|
|
92
|
+
return prompt
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _convert_result(result: str, target_type: type) -> Any:
|
|
96
|
+
"""
|
|
97
|
+
将 LLM 结果转换为目标类型。
|
|
98
|
+
|
|
99
|
+
目前支持:
|
|
100
|
+
- str: 直接返回
|
|
101
|
+
- int/float: 尝试解析数字
|
|
102
|
+
- bool: 解析布尔值
|
|
103
|
+
- list: 尝试解析 JSON 数组
|
|
104
|
+
- dict: 尝试解析 JSON 对象
|
|
105
|
+
"""
|
|
106
|
+
if target_type is str:
|
|
107
|
+
return result
|
|
108
|
+
|
|
109
|
+
if target_type is int:
|
|
110
|
+
try:
|
|
111
|
+
return int(result.strip())
|
|
112
|
+
except ValueError:
|
|
113
|
+
# 尝试提取数字
|
|
114
|
+
import re
|
|
115
|
+
match = re.search(r'-?\d+', result)
|
|
116
|
+
if match:
|
|
117
|
+
return int(match.group())
|
|
118
|
+
raise ValueError(f"无法将结果转换为整数: {result}")
|
|
119
|
+
|
|
120
|
+
if target_type is float:
|
|
121
|
+
try:
|
|
122
|
+
return float(result.strip())
|
|
123
|
+
except ValueError:
|
|
124
|
+
import re
|
|
125
|
+
match = re.search(r'-?\d+\.?\d*', result)
|
|
126
|
+
if match:
|
|
127
|
+
return float(match.group())
|
|
128
|
+
raise ValueError(f"无法将结果转换为浮点数: {result}")
|
|
129
|
+
|
|
130
|
+
if target_type is bool:
|
|
131
|
+
lower = result.strip().lower()
|
|
132
|
+
if lower in ('true', 'yes', '是', '1', '真'):
|
|
133
|
+
return True
|
|
134
|
+
if lower in ('false', 'no', '否', '0', '假'):
|
|
135
|
+
return False
|
|
136
|
+
raise ValueError(f"无法将结果转换为布尔值: {result}")
|
|
137
|
+
|
|
138
|
+
if target_type in (list, dict):
|
|
139
|
+
import json
|
|
140
|
+
try:
|
|
141
|
+
return json.loads(result)
|
|
142
|
+
except json.JSONDecodeError:
|
|
143
|
+
# 尝试提取 JSON
|
|
144
|
+
import re
|
|
145
|
+
json_match = re.search(r'[\[{].*[\]}]', result, re.DOTALL)
|
|
146
|
+
if json_match:
|
|
147
|
+
return json.loads(json_match.group())
|
|
148
|
+
raise ValueError(f"无法将结果解析为 JSON: {result}")
|
|
149
|
+
|
|
150
|
+
# 未知类型,返回原字符串
|
|
151
|
+
return result
|