vectorvein 0.2.52__py3-none-any.whl → 0.2.54__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/anthropic_client.py +120 -4
- vectorvein/chat_clients/utils.py +1 -1
- vectorvein/types/llm_parameters.py +4 -0
- vectorvein/workflow/utils/check.py +4 -2
- {vectorvein-0.2.52.dist-info → vectorvein-0.2.54.dist-info}/METADATA +1 -1
- {vectorvein-0.2.52.dist-info → vectorvein-0.2.54.dist-info}/RECORD +8 -8
- {vectorvein-0.2.52.dist-info → vectorvein-0.2.54.dist-info}/WHEEL +0 -0
- {vectorvein-0.2.52.dist-info → vectorvein-0.2.54.dist-info}/entry_points.txt +0 -0
@@ -44,8 +44,10 @@ from anthropic.types import (
|
|
44
44
|
MessageParam,
|
45
45
|
ToolUseBlock,
|
46
46
|
ThinkingConfigParam,
|
47
|
+
RawMessageStopEvent,
|
47
48
|
RawMessageDeltaEvent,
|
48
49
|
RawMessageStartEvent,
|
50
|
+
RawContentBlockStopEvent,
|
49
51
|
RawContentBlockStartEvent,
|
50
52
|
RawContentBlockDeltaEvent,
|
51
53
|
)
|
@@ -623,13 +625,16 @@ class AnthropicChatClient(BaseChatClient):
|
|
623
625
|
raise APIConnectionError(message=e.message, request=e.request)
|
624
626
|
|
625
627
|
def generator():
|
626
|
-
result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": []}
|
628
|
+
result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": [], "raw_content": []}
|
627
629
|
for chunk in stream_response:
|
628
630
|
message = {"content": "", "tool_calls": []}
|
629
631
|
if isinstance(chunk, RawMessageStartEvent):
|
630
632
|
result["usage"] = {"prompt_tokens": chunk.message.usage.input_tokens}
|
631
633
|
continue
|
632
634
|
elif isinstance(chunk, RawContentBlockStartEvent):
|
635
|
+
content_block = chunk.content_block.model_dump()
|
636
|
+
result["raw_content"].append(content_block)
|
637
|
+
|
633
638
|
if chunk.content_block.type == "tool_use":
|
634
639
|
result["tool_calls"] = message["tool_calls"] = [
|
635
640
|
{
|
@@ -646,14 +651,40 @@ class AnthropicChatClient(BaseChatClient):
|
|
646
651
|
message["content"] = chunk.content_block.text
|
647
652
|
elif chunk.content_block.type == "thinking":
|
648
653
|
message["reasoning_content"] = chunk.content_block.thinking
|
654
|
+
elif chunk.content_block.type == "redacted_thinking":
|
655
|
+
pass
|
656
|
+
|
657
|
+
message["raw_content"] = content_block
|
649
658
|
yield ChatCompletionDeltaMessage(**message)
|
650
659
|
elif isinstance(chunk, RawContentBlockDeltaEvent):
|
651
660
|
if chunk.delta.type == "text_delta":
|
652
661
|
message["content"] = chunk.delta.text
|
653
662
|
result["content"] += chunk.delta.text
|
663
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
664
|
+
if result["raw_content"][i]["type"] == "text":
|
665
|
+
result["raw_content"][i]["text"] += chunk.delta.text
|
666
|
+
break
|
654
667
|
elif chunk.delta.type == "thinking_delta":
|
655
668
|
message["reasoning_content"] = chunk.delta.thinking
|
656
669
|
result["reasoning_content"] += chunk.delta.thinking
|
670
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
671
|
+
if result["raw_content"][i]["type"] == "thinking":
|
672
|
+
result["raw_content"][i]["thinking"] += chunk.delta.thinking
|
673
|
+
break
|
674
|
+
elif chunk.delta.type == "signature_delta":
|
675
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
676
|
+
if result["raw_content"][i]["type"] == "thinking":
|
677
|
+
if "signature" not in result["raw_content"][i]:
|
678
|
+
result["raw_content"][i]["signature"] = ""
|
679
|
+
result["raw_content"][i]["signature"] += chunk.delta.signature
|
680
|
+
break
|
681
|
+
elif chunk.delta.type == "citations_delta":
|
682
|
+
citation_data = chunk.delta.citation.model_dump()
|
683
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
684
|
+
if "citations" not in result["raw_content"][i]:
|
685
|
+
result["raw_content"][i]["citations"] = []
|
686
|
+
result["raw_content"][i]["citations"].append(citation_data)
|
687
|
+
break
|
657
688
|
elif chunk.delta.type == "input_json_delta":
|
658
689
|
result["tool_calls"][0]["function"]["arguments"] += chunk.delta.partial_json
|
659
690
|
message["tool_calls"] = [
|
@@ -667,6 +698,29 @@ class AnthropicChatClient(BaseChatClient):
|
|
667
698
|
"type": "function",
|
668
699
|
}
|
669
700
|
]
|
701
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
702
|
+
if result["raw_content"][i]["type"] == "tool_use":
|
703
|
+
if "input" not in result["raw_content"][i]:
|
704
|
+
result["raw_content"][i]["input"] = {}
|
705
|
+
try:
|
706
|
+
if result["tool_calls"][0]["function"]["arguments"]:
|
707
|
+
result["raw_content"][i]["input"] = json.loads(
|
708
|
+
result["tool_calls"][0]["function"]["arguments"]
|
709
|
+
)
|
710
|
+
else:
|
711
|
+
result["raw_content"][i]["input"] = {}
|
712
|
+
except json.JSONDecodeError:
|
713
|
+
pass
|
714
|
+
break
|
715
|
+
elif chunk.delta.type == "redacted_thinking_delta":
|
716
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
717
|
+
if result["raw_content"][i]["type"] == "redacted_thinking":
|
718
|
+
if "data" not in result["raw_content"][i]:
|
719
|
+
result["raw_content"][i]["data"] = ""
|
720
|
+
result["raw_content"][i]["data"] += chunk.delta.data
|
721
|
+
break
|
722
|
+
|
723
|
+
message["raw_content"] = chunk.delta.model_dump()
|
670
724
|
yield ChatCompletionDeltaMessage(**message)
|
671
725
|
elif isinstance(chunk, RawMessageDeltaEvent):
|
672
726
|
result["usage"]["completion_tokens"] = chunk.usage.output_tokens
|
@@ -678,8 +732,12 @@ class AnthropicChatClient(BaseChatClient):
|
|
678
732
|
prompt_tokens=result["usage"]["prompt_tokens"],
|
679
733
|
completion_tokens=result["usage"]["completion_tokens"],
|
680
734
|
total_tokens=result["usage"]["total_tokens"],
|
681
|
-
)
|
735
|
+
),
|
682
736
|
)
|
737
|
+
elif isinstance(chunk, RawMessageStopEvent):
|
738
|
+
pass
|
739
|
+
elif isinstance(chunk, RawContentBlockStopEvent):
|
740
|
+
pass
|
683
741
|
|
684
742
|
return generator()
|
685
743
|
else:
|
@@ -704,6 +762,7 @@ class AnthropicChatClient(BaseChatClient):
|
|
704
762
|
result = {
|
705
763
|
"content": "",
|
706
764
|
"reasoning_content": "",
|
765
|
+
"raw_content": [content_block.model_dump() for content_block in response.content],
|
707
766
|
"usage": {
|
708
767
|
"prompt_tokens": response.usage.input_tokens,
|
709
768
|
"completion_tokens": response.usage.output_tokens,
|
@@ -1172,13 +1231,16 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1172
1231
|
raise APIConnectionError(message=e.message, request=e.request)
|
1173
1232
|
|
1174
1233
|
async def generator():
|
1175
|
-
result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": []}
|
1234
|
+
result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": [], "raw_content": []}
|
1176
1235
|
async for chunk in stream_response:
|
1177
1236
|
message = {"content": "", "tool_calls": []}
|
1178
1237
|
if isinstance(chunk, RawMessageStartEvent):
|
1179
1238
|
result["usage"] = {"prompt_tokens": chunk.message.usage.input_tokens}
|
1180
1239
|
continue
|
1181
1240
|
elif isinstance(chunk, RawContentBlockStartEvent):
|
1241
|
+
content_block = chunk.content_block.model_dump()
|
1242
|
+
result["raw_content"].append(content_block)
|
1243
|
+
|
1182
1244
|
if chunk.content_block.type == "tool_use":
|
1183
1245
|
result["tool_calls"] = message["tool_calls"] = [
|
1184
1246
|
{
|
@@ -1195,14 +1257,40 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1195
1257
|
message["content"] = chunk.content_block.text
|
1196
1258
|
elif chunk.content_block.type == "thinking":
|
1197
1259
|
message["reasoning_content"] = chunk.content_block.thinking
|
1260
|
+
elif chunk.content_block.type == "redacted_thinking":
|
1261
|
+
pass
|
1262
|
+
|
1263
|
+
message["raw_content"] = content_block
|
1198
1264
|
yield ChatCompletionDeltaMessage(**message)
|
1199
1265
|
elif isinstance(chunk, RawContentBlockDeltaEvent):
|
1200
1266
|
if chunk.delta.type == "text_delta":
|
1201
1267
|
message["content"] = chunk.delta.text
|
1202
1268
|
result["content"] += chunk.delta.text
|
1269
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
1270
|
+
if result["raw_content"][i]["type"] == "text":
|
1271
|
+
result["raw_content"][i]["text"] += chunk.delta.text
|
1272
|
+
break
|
1203
1273
|
elif chunk.delta.type == "thinking_delta":
|
1204
1274
|
message["reasoning_content"] = chunk.delta.thinking
|
1205
1275
|
result["reasoning_content"] += chunk.delta.thinking
|
1276
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
1277
|
+
if result["raw_content"][i]["type"] == "thinking":
|
1278
|
+
result["raw_content"][i]["thinking"] += chunk.delta.thinking
|
1279
|
+
break
|
1280
|
+
elif chunk.delta.type == "signature_delta":
|
1281
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
1282
|
+
if result["raw_content"][i]["type"] == "thinking":
|
1283
|
+
if "signature" not in result["raw_content"][i]:
|
1284
|
+
result["raw_content"][i]["signature"] = ""
|
1285
|
+
result["raw_content"][i]["signature"] += chunk.delta.signature
|
1286
|
+
break
|
1287
|
+
elif chunk.delta.type == "citations_delta":
|
1288
|
+
citation_data = chunk.delta.citation.model_dump()
|
1289
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
1290
|
+
if "citations" not in result["raw_content"][i]:
|
1291
|
+
result["raw_content"][i]["citations"] = []
|
1292
|
+
result["raw_content"][i]["citations"].append(citation_data)
|
1293
|
+
break
|
1206
1294
|
elif chunk.delta.type == "input_json_delta":
|
1207
1295
|
result["tool_calls"][0]["function"]["arguments"] += chunk.delta.partial_json
|
1208
1296
|
message["tool_calls"] = [
|
@@ -1216,6 +1304,29 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1216
1304
|
"type": "function",
|
1217
1305
|
}
|
1218
1306
|
]
|
1307
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
1308
|
+
if result["raw_content"][i]["type"] == "tool_use":
|
1309
|
+
if "input" not in result["raw_content"][i]:
|
1310
|
+
result["raw_content"][i]["input"] = {}
|
1311
|
+
try:
|
1312
|
+
if result["tool_calls"][0]["function"]["arguments"]:
|
1313
|
+
result["raw_content"][i]["input"] = json.loads(
|
1314
|
+
result["tool_calls"][0]["function"]["arguments"]
|
1315
|
+
)
|
1316
|
+
else:
|
1317
|
+
result["raw_content"][i]["input"] = {}
|
1318
|
+
except json.JSONDecodeError:
|
1319
|
+
pass
|
1320
|
+
break
|
1321
|
+
elif chunk.delta.type == "redacted_thinking_delta":
|
1322
|
+
for i in range(len(result["raw_content"]) - 1, -1, -1):
|
1323
|
+
if result["raw_content"][i]["type"] == "redacted_thinking":
|
1324
|
+
if "data" not in result["raw_content"][i]:
|
1325
|
+
result["raw_content"][i]["data"] = ""
|
1326
|
+
result["raw_content"][i]["data"] += chunk.delta.data
|
1327
|
+
break
|
1328
|
+
|
1329
|
+
message["raw_content"] = chunk.delta.model_dump()
|
1219
1330
|
yield ChatCompletionDeltaMessage(**message)
|
1220
1331
|
elif isinstance(chunk, RawMessageDeltaEvent):
|
1221
1332
|
result["usage"]["completion_tokens"] = chunk.usage.output_tokens
|
@@ -1227,8 +1338,12 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1227
1338
|
prompt_tokens=result["usage"]["prompt_tokens"],
|
1228
1339
|
completion_tokens=result["usage"]["completion_tokens"],
|
1229
1340
|
total_tokens=result["usage"]["total_tokens"],
|
1230
|
-
)
|
1341
|
+
),
|
1231
1342
|
)
|
1343
|
+
elif isinstance(chunk, RawMessageStopEvent):
|
1344
|
+
pass
|
1345
|
+
elif isinstance(chunk, RawContentBlockStopEvent):
|
1346
|
+
pass
|
1232
1347
|
|
1233
1348
|
return generator()
|
1234
1349
|
else:
|
@@ -1253,6 +1368,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1253
1368
|
result = {
|
1254
1369
|
"content": "",
|
1255
1370
|
"reasoning_content": "",
|
1371
|
+
"raw_content": [content_block.model_dump() for content_block in response.content],
|
1256
1372
|
"usage": {
|
1257
1373
|
"prompt_tokens": response.usage.input_tokens,
|
1258
1374
|
"completion_tokens": response.usage.output_tokens,
|
vectorvein/chat_clients/utils.py
CHANGED
@@ -126,7 +126,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
|
|
126
126
|
text = str(text)
|
127
127
|
if model == "gpt-3.5-turbo":
|
128
128
|
return len(get_gpt_35_encoding().encode(text))
|
129
|
-
elif model.startswith(("gpt-4o", "o1-")):
|
129
|
+
elif model.startswith(("gpt-4o", "o1-", "o3-")):
|
130
130
|
return len(get_gpt_4o_encoding().encode(text))
|
131
131
|
elif model.startswith(("abab", "MiniMax")):
|
132
132
|
backend_setting = settings.get_backend(BackendType.MiniMax).models[model]
|
@@ -149,6 +149,8 @@ class ChatCompletionMessage(BaseModel):
|
|
149
149
|
|
150
150
|
reasoning_content: Optional[str] = None
|
151
151
|
|
152
|
+
raw_content: Optional[List[Dict]] = None
|
153
|
+
|
152
154
|
tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
|
153
155
|
"""The tool calls generated by the model, such as function calls."""
|
154
156
|
|
@@ -162,6 +164,8 @@ class ChatCompletionDeltaMessage(BaseModel):
|
|
162
164
|
|
163
165
|
reasoning_content: Optional[str] = None
|
164
166
|
|
167
|
+
raw_content: Optional[Dict] = None
|
168
|
+
|
165
169
|
tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
|
166
170
|
"""The tool calls generated by the model, such as function calls."""
|
167
171
|
|
@@ -139,10 +139,12 @@ def check_ui(workflow: "Workflow") -> UIWarning:
|
|
139
139
|
# 检查是否为输出节点
|
140
140
|
if hasattr(node, "category") and node.category == "outputs":
|
141
141
|
warnings["has_output_nodes"] = True
|
142
|
+
continue # 输出节点不检查连接覆盖问题
|
142
143
|
|
143
144
|
# 检查节点的输入端口
|
144
|
-
|
145
|
-
|
145
|
+
ports = node.ports if hasattr(node, "ports") else {}
|
146
|
+
for port_name in ports.keys():
|
147
|
+
port = ports.get(port_name)
|
146
148
|
# 确保是输入端口且设置为显示
|
147
149
|
if hasattr(port, "show") and getattr(port, "show", False) and isinstance(port, InputPort):
|
148
150
|
has_shown_input_ports = True
|
@@ -1,13 +1,13 @@
|
|
1
|
-
vectorvein-0.2.
|
2
|
-
vectorvein-0.2.
|
3
|
-
vectorvein-0.2.
|
1
|
+
vectorvein-0.2.54.dist-info/METADATA,sha256=b-MXVu9Jj3l07gGdq2ZyjImBBXXEZFTiCY826pJWT3U,4570
|
2
|
+
vectorvein-0.2.54.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
3
|
+
vectorvein-0.2.54.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
|
6
6
|
vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
|
7
7
|
vectorvein/api/exceptions.py,sha256=uS_PAdx0ksC0r3dgfSGWdbLMZm4qdLeWSSqCv1g3_Gc,772
|
8
8
|
vectorvein/api/models.py,sha256=xtPWMsB0yIJI7i-gY4B6MtvXv0ZIXnoeKspmeInH6fU,1449
|
9
9
|
vectorvein/chat_clients/__init__.py,sha256=UIytpIgwo8qkZpIyrHVxLYTyliUOTp4J7C4iHRjbtWE,23850
|
10
|
-
vectorvein/chat_clients/anthropic_client.py,sha256=
|
10
|
+
vectorvein/chat_clients/anthropic_client.py,sha256=lvnE7IrtRehjiuXirhy09VH1NqiV5RFakfG8JAb9WNk,68362
|
11
11
|
vectorvein/chat_clients/baichuan_client.py,sha256=CVMvpgjdrZGv0BWnTOBD-f2ufZ3wq3496wqukumsAr4,526
|
12
12
|
vectorvein/chat_clients/base_client.py,sha256=p7s-G4Wh9MSpDKEfG8wuFAeWy5DGvj5Go31hqrpQPhM,38817
|
13
13
|
vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
|
@@ -23,7 +23,7 @@ vectorvein/chat_clients/openai_compatible_client.py,sha256=3MEyKqlf1BtzbNpmqAHQq
|
|
23
23
|
vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
24
|
vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
|
25
25
|
vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
|
26
|
-
vectorvein/chat_clients/utils.py,sha256=
|
26
|
+
vectorvein/chat_clients/utils.py,sha256=G2GBFfDHBf5Z_vR2tXNVb1di5G8KKm2RoYlJpey2oxU,29481
|
27
27
|
vectorvein/chat_clients/xai_client.py,sha256=eLFJJrNRJ-ni3DpshODcr3S1EJQLbhVwxyO1E54LaqM,491
|
28
28
|
vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
|
29
29
|
vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
|
@@ -35,7 +35,7 @@ vectorvein/types/__init__.py,sha256=ypg8c8AwF49FrFBMqmgH_eIBH4LFf0KN4kjqQa7zrvM,
|
|
35
35
|
vectorvein/types/defaults.py,sha256=MpDvT9vNwIqwHFKXTlBq3FY02PLr4BncCdvNhfbxM60,27563
|
36
36
|
vectorvein/types/enums.py,sha256=LplSVkXLBK-t8TWtJKj_f7ktWTd6CSHWRLb67XKMm54,1716
|
37
37
|
vectorvein/types/exception.py,sha256=KtnqZ-1DstHm95SZAyZdHhkGq1bJ4A9Aw3Zfdu-VIFo,130
|
38
|
-
vectorvein/types/llm_parameters.py,sha256=
|
38
|
+
vectorvein/types/llm_parameters.py,sha256=Bdz9E_x0G96rvJ5TnEFPrU5QV4I2y0YFv7dY4Pq-MuU,7933
|
39
39
|
vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
40
40
|
vectorvein/types/settings.py,sha256=zneZcebts-MHcYS1DXakulA_f0fGPzidOo1s9yl51zI,4794
|
41
41
|
vectorvein/utilities/media_processing.py,sha256=7KtbLFzOYIn1e9QTN9G6C76NH8CBlV9kfAgiRKEIeXY,6263
|
@@ -61,7 +61,7 @@ vectorvein/workflow/nodes/triggers.py,sha256=BolH4X6S8HSuU2kwHmYKr-ozHbgKBmdZRcn
|
|
61
61
|
vectorvein/workflow/nodes/vector_db.py,sha256=t6I17q6iR3yQreiDHpRrksMdWDPIvgqJs076z-7dlQQ,5712
|
62
62
|
vectorvein/workflow/nodes/video_generation.py,sha256=qmdg-t_idpxq1veukd-jv_ChICMOoInKxprV9Z4Vi2w,4118
|
63
63
|
vectorvein/workflow/nodes/web_crawlers.py,sha256=BhJBX1AZH7-22Gu95Ox4qJqmH5DU-m4dbUb5N5DTA-M,5559
|
64
|
-
vectorvein/workflow/utils/check.py,sha256=
|
64
|
+
vectorvein/workflow/utils/check.py,sha256=sirsyg4io5BzgGM0VdEq5s9hyJk0z80Y_mNwCqMc6UI,6168
|
65
65
|
vectorvein/workflow/utils/json_to_code.py,sha256=F7dhDy8kGc8ndOeihGLRLGFGlquoxVlb02ENtxnQ0C8,5914
|
66
66
|
vectorvein/workflow/utils/layout.py,sha256=j0bRD3uaXu40xCS6U6BGahBI8FrHa5MiF55GbTrZ1LM,4565
|
67
|
-
vectorvein-0.2.
|
67
|
+
vectorvein-0.2.54.dist-info/RECORD,,
|
File without changes
|
File without changes
|