aient 1.1.61__tar.gz → 1.1.63__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {aient-1.1.61 → aient-1.1.63}/PKG-INFO +1 -1
- {aient-1.1.61 → aient-1.1.63}/aient/models/base.py +31 -15
- {aient-1.1.61 → aient-1.1.63}/aient/models/chatgpt.py +79 -185
- {aient-1.1.61 → aient-1.1.63}/aient/utils/scripts.py +2 -0
- {aient-1.1.61 → aient-1.1.63}/aient.egg-info/PKG-INFO +1 -1
- {aient-1.1.61 → aient-1.1.63}/pyproject.toml +1 -1
- {aient-1.1.61 → aient-1.1.63}/LICENSE +0 -0
- {aient-1.1.61 → aient-1.1.63}/README.md +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/__init__.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/__init__.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/log_config.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/models.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/request.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/response.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/test/test_base_api.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/test/test_geminimask.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/test/test_image.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/test/test_payload.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/core/utils.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/models/__init__.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/models/audio.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/__init__.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/arXiv.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/config.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/excute_command.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/get_time.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/image.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/list_directory.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/read_file.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/read_image.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/readonly.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/registry.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/run_python.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/websearch.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/plugins/write_file.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/utils/__init__.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient/utils/prompt.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient.egg-info/SOURCES.txt +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient.egg-info/dependency_links.txt +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient.egg-info/requires.txt +0 -0
- {aient-1.1.61 → aient-1.1.63}/aient.egg-info/top_level.txt +0 -0
- {aient-1.1.61 → aient-1.1.63}/setup.cfg +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_Web_crawler.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_ddg_search.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_google_search.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_ollama.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_plugin.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_search.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_url.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_whisper.py +0 -0
- {aient-1.1.61 → aient-1.1.63}/test/test_yjh.py +0 -0
@@ -53,20 +53,10 @@ class BaseLLM:
|
|
53
53
|
"https": proxy,
|
54
54
|
},
|
55
55
|
)
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
self.aclient = httpx.AsyncClient(
|
61
|
-
follow_redirects=True,
|
62
|
-
proxies=proxy,
|
63
|
-
timeout=timeout,
|
64
|
-
)
|
65
|
-
else:
|
66
|
-
self.aclient = httpx.AsyncClient(
|
67
|
-
follow_redirects=True,
|
68
|
-
timeout=timeout,
|
69
|
-
)
|
56
|
+
self._aclient = None
|
57
|
+
self._proxy = proxy
|
58
|
+
self._timeout = timeout
|
59
|
+
self._loop = None
|
70
60
|
|
71
61
|
self.conversation: dict[str, list[dict]] = {
|
72
62
|
"default": [
|
@@ -83,6 +73,33 @@ class BaseLLM:
|
|
83
73
|
self.use_plugins = use_plugins
|
84
74
|
self.print_log: bool = print_log
|
85
75
|
|
76
|
+
def _get_aclient(self):
|
77
|
+
"""
|
78
|
+
Lazily initialize and return the httpx.AsyncClient.
|
79
|
+
This method ensures the client is always bound to a running event loop.
|
80
|
+
"""
|
81
|
+
import asyncio
|
82
|
+
try:
|
83
|
+
loop = asyncio.get_running_loop()
|
84
|
+
except RuntimeError:
|
85
|
+
loop = asyncio.new_event_loop()
|
86
|
+
asyncio.set_event_loop(loop)
|
87
|
+
|
88
|
+
if self._aclient is None or self._aclient.is_closed or self._loop is not loop:
|
89
|
+
self._loop = loop
|
90
|
+
proxy = self._proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
|
91
|
+
proxies = proxy if proxy and "socks5h" not in proxy else None
|
92
|
+
self._aclient = httpx.AsyncClient(
|
93
|
+
follow_redirects=True,
|
94
|
+
proxy=proxies,
|
95
|
+
timeout=self._timeout,
|
96
|
+
)
|
97
|
+
return self._aclient
|
98
|
+
|
99
|
+
@property
|
100
|
+
def aclient(self):
|
101
|
+
return self._get_aclient()
|
102
|
+
|
86
103
|
def add_to_conversation(
|
87
104
|
self,
|
88
105
|
message: list,
|
@@ -196,7 +213,6 @@ class BaseLLM:
|
|
196
213
|
**kwargs,
|
197
214
|
):
|
198
215
|
response += chunk
|
199
|
-
# full_response: str = "".join([r async for r in response])
|
200
216
|
full_response: str = "".join(response)
|
201
217
|
return full_response
|
202
218
|
|
@@ -676,148 +676,7 @@ class chatgpt(BaseLLM):
|
|
676
676
|
self.conversation[convo_id].pop(-1)
|
677
677
|
self.conversation[convo_id].pop(-1)
|
678
678
|
|
679
|
-
def
|
680
|
-
self,
|
681
|
-
prompt: list,
|
682
|
-
role: str = "user",
|
683
|
-
convo_id: str = "default",
|
684
|
-
model: str = "",
|
685
|
-
pass_history: int = 9999,
|
686
|
-
function_name: str = "",
|
687
|
-
total_tokens: int = 0,
|
688
|
-
function_arguments: str = "",
|
689
|
-
function_call_id: str = "",
|
690
|
-
language: str = "English",
|
691
|
-
system_prompt: str = None,
|
692
|
-
stream: bool = True,
|
693
|
-
**kwargs,
|
694
|
-
):
|
695
|
-
"""
|
696
|
-
Ask a question (同步流式响应)
|
697
|
-
"""
|
698
|
-
# 准备会话
|
699
|
-
self.system_prompt = system_prompt or self.system_prompt
|
700
|
-
if convo_id not in self.conversation or pass_history <= 2:
|
701
|
-
self.reset(convo_id=convo_id, system_prompt=system_prompt)
|
702
|
-
self.add_to_conversation(prompt, role, convo_id=convo_id, function_name=function_name, total_tokens=total_tokens, function_arguments=function_arguments, function_call_id=function_call_id, pass_history=pass_history)
|
703
|
-
|
704
|
-
# 获取请求体
|
705
|
-
json_post = None
|
706
|
-
async def get_post_body_async():
|
707
|
-
nonlocal json_post
|
708
|
-
url, headers, json_post, engine_type = await self.get_post_body(prompt, role, convo_id, model, pass_history, stream=stream, **kwargs)
|
709
|
-
return url, headers, json_post, engine_type
|
710
|
-
|
711
|
-
# 替换原来的获取请求体的代码
|
712
|
-
# json_post = next(async_generator_to_sync(get_post_body_async()))
|
713
|
-
try:
|
714
|
-
url, headers, json_post, engine_type = asyncio.run(get_post_body_async())
|
715
|
-
except RuntimeError:
|
716
|
-
# 如果已经在事件循环中,则使用不同的方法
|
717
|
-
loop = asyncio.get_event_loop()
|
718
|
-
url, headers, json_post, engine_type = loop.run_until_complete(get_post_body_async())
|
719
|
-
|
720
|
-
self.truncate_conversation(convo_id=convo_id)
|
721
|
-
|
722
|
-
# 打印日志
|
723
|
-
if self.print_log:
|
724
|
-
self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
|
725
|
-
self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
|
726
|
-
|
727
|
-
# 发送请求并处理响应
|
728
|
-
for _ in range(3):
|
729
|
-
if self.print_log:
|
730
|
-
replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
|
731
|
-
replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
|
732
|
-
self.logger.info(f"Request Body:\n{replaced_text_str}")
|
733
|
-
|
734
|
-
try:
|
735
|
-
# 改进处理方式,创建一个内部异步函数来处理异步调用
|
736
|
-
async def process_async():
|
737
|
-
# 异步调用 fetch_response_stream
|
738
|
-
# self.logger.info("--------------------------------")
|
739
|
-
# self.logger.info(prompt)
|
740
|
-
# self.logger.info(parse_function_xml(prompt))
|
741
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
|
742
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
|
743
|
-
# self.logger.info("--------------------------------")
|
744
|
-
if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
|
745
|
-
tmp_response = {
|
746
|
-
"id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
|
747
|
-
"object": "chat.completion.chunk",
|
748
|
-
"created": 1754588695,
|
749
|
-
"model": "gemini-2.5-flash",
|
750
|
-
"choices": [
|
751
|
-
{
|
752
|
-
"index": 0,
|
753
|
-
"delta": {
|
754
|
-
"role": "assistant",
|
755
|
-
"content": prompt
|
756
|
-
},
|
757
|
-
"finish_reason": "stop"
|
758
|
-
}
|
759
|
-
],
|
760
|
-
"system_fingerprint": "fp_d576307f90"
|
761
|
-
}
|
762
|
-
async def _mock_response_generator():
|
763
|
-
yield f"data: {json.dumps(tmp_response)}\n\n"
|
764
|
-
async_generator = _mock_response_generator()
|
765
|
-
else:
|
766
|
-
if stream:
|
767
|
-
async_generator = fetch_response_stream(
|
768
|
-
self.aclient,
|
769
|
-
url,
|
770
|
-
headers,
|
771
|
-
json_post,
|
772
|
-
engine_type,
|
773
|
-
model or self.engine,
|
774
|
-
)
|
775
|
-
else:
|
776
|
-
async_generator = fetch_response(
|
777
|
-
self.aclient,
|
778
|
-
url,
|
779
|
-
headers,
|
780
|
-
json_post,
|
781
|
-
engine_type,
|
782
|
-
model or self.engine,
|
783
|
-
)
|
784
|
-
# 异步处理响应流
|
785
|
-
async for chunk in self._process_stream_response(
|
786
|
-
async_generator,
|
787
|
-
convo_id=convo_id,
|
788
|
-
function_name=function_name,
|
789
|
-
total_tokens=total_tokens,
|
790
|
-
function_arguments=function_arguments,
|
791
|
-
function_call_id=function_call_id,
|
792
|
-
model=model,
|
793
|
-
language=language,
|
794
|
-
system_prompt=system_prompt,
|
795
|
-
pass_history=pass_history,
|
796
|
-
is_async=True,
|
797
|
-
**kwargs
|
798
|
-
):
|
799
|
-
yield chunk
|
800
|
-
|
801
|
-
# 将异步函数转换为同步生成器
|
802
|
-
return async_generator_to_sync(process_async())
|
803
|
-
except ConnectionError:
|
804
|
-
self.logger.error("连接错误,请检查服务器状态或网络连接。")
|
805
|
-
return
|
806
|
-
except requests.exceptions.ReadTimeout:
|
807
|
-
self.logger.error("请求超时,请检查网络连接或增加超时时间。")
|
808
|
-
return
|
809
|
-
except httpx.RemoteProtocolError:
|
810
|
-
continue
|
811
|
-
except Exception as e:
|
812
|
-
self.logger.error(f"发生了未预料的错误:{e}")
|
813
|
-
if "Invalid URL" in str(e):
|
814
|
-
e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
|
815
|
-
raise Exception(f"{e}")
|
816
|
-
# 最后一次重试失败,向上抛出异常
|
817
|
-
if _ == 2:
|
818
|
-
raise Exception(f"{e}")
|
819
|
-
|
820
|
-
async def ask_stream_async(
|
679
|
+
async def _ask_stream_handler(
|
821
680
|
self,
|
822
681
|
prompt: list,
|
823
682
|
role: str = "user",
|
@@ -834,7 +693,7 @@ class chatgpt(BaseLLM):
|
|
834
693
|
**kwargs,
|
835
694
|
):
|
836
695
|
"""
|
837
|
-
|
696
|
+
Unified stream handler (async)
|
838
697
|
"""
|
839
698
|
# 准备会话
|
840
699
|
self.system_prompt = system_prompt or self.system_prompt
|
@@ -848,41 +707,31 @@ class chatgpt(BaseLLM):
|
|
848
707
|
|
849
708
|
# 打印日志
|
850
709
|
if self.print_log:
|
851
|
-
self.logger.info(f"api_url: {url}")
|
710
|
+
self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
|
852
711
|
self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
|
853
712
|
|
854
713
|
# 发送请求并处理响应
|
855
|
-
for
|
714
|
+
for i in range(3):
|
856
715
|
if self.print_log:
|
857
716
|
replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
|
858
717
|
replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
|
859
718
|
self.logger.info(f"Request Body:\n{replaced_text_str}")
|
860
719
|
|
861
720
|
try:
|
862
|
-
# 使用fetch_response_stream处理响应
|
863
|
-
# self.logger.info("--------------------------------")
|
864
|
-
# self.logger.info(prompt)
|
865
|
-
# self.logger.info(parse_function_xml(prompt))
|
866
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
|
867
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
|
868
|
-
# self.logger.info("--------------------------------")
|
869
721
|
if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
|
870
722
|
tmp_response = {
|
871
723
|
"id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
|
872
724
|
"object": "chat.completion.chunk",
|
873
725
|
"created": 1754588695,
|
874
|
-
"model":
|
726
|
+
"model": model or self.engine,
|
875
727
|
"choices": [
|
876
728
|
{
|
877
|
-
|
878
|
-
|
879
|
-
"
|
880
|
-
"content": prompt
|
881
|
-
},
|
882
|
-
"finish_reason": "stop"
|
729
|
+
"index": 0,
|
730
|
+
"delta": {"role": "assistant", "content": prompt},
|
731
|
+
"finish_reason": "stop",
|
883
732
|
}
|
884
733
|
],
|
885
|
-
"system_fingerprint": "fp_d576307f90"
|
734
|
+
"system_fingerprint": "fp_d576307f90",
|
886
735
|
}
|
887
736
|
async def _mock_response_generator():
|
888
737
|
yield f"data: {json.dumps(tmp_response)}\n\n"
|
@@ -890,42 +739,27 @@ class chatgpt(BaseLLM):
|
|
890
739
|
else:
|
891
740
|
if stream:
|
892
741
|
generator = fetch_response_stream(
|
893
|
-
self.aclient,
|
894
|
-
url,
|
895
|
-
headers,
|
896
|
-
json_post,
|
897
|
-
engine_type,
|
898
|
-
model or self.engine,
|
742
|
+
self.aclient, url, headers, json_post, engine_type, model or self.engine,
|
899
743
|
)
|
900
744
|
else:
|
901
745
|
generator = fetch_response(
|
902
|
-
self.aclient,
|
903
|
-
url,
|
904
|
-
headers,
|
905
|
-
json_post,
|
906
|
-
engine_type,
|
907
|
-
model or self.engine,
|
746
|
+
self.aclient, url, headers, json_post, engine_type, model or self.engine,
|
908
747
|
)
|
909
748
|
|
910
749
|
# 处理正常响应
|
911
750
|
async for processed_chunk in self._process_stream_response(
|
912
|
-
generator,
|
913
|
-
|
914
|
-
|
915
|
-
|
916
|
-
function_arguments=function_arguments,
|
917
|
-
function_call_id=function_call_id,
|
918
|
-
model=model,
|
919
|
-
language=language,
|
920
|
-
system_prompt=system_prompt,
|
921
|
-
pass_history=pass_history,
|
922
|
-
is_async=True,
|
923
|
-
**kwargs
|
751
|
+
generator, convo_id=convo_id, function_name=function_name,
|
752
|
+
total_tokens=total_tokens, function_arguments=function_arguments,
|
753
|
+
function_call_id=function_call_id, model=model, language=language,
|
754
|
+
system_prompt=system_prompt, pass_history=pass_history, is_async=True, **kwargs
|
924
755
|
):
|
925
756
|
yield processed_chunk
|
926
757
|
|
927
758
|
# 成功处理,跳出重试循环
|
928
759
|
break
|
760
|
+
except (httpx.ConnectError, httpx.ReadTimeout):
|
761
|
+
self.logger.error("连接或读取超时错误,请检查服务器状态或网络连接。")
|
762
|
+
return # Stop iteration
|
929
763
|
except httpx.RemoteProtocolError:
|
930
764
|
continue
|
931
765
|
except Exception as e:
|
@@ -936,9 +770,69 @@ class chatgpt(BaseLLM):
|
|
936
770
|
e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
|
937
771
|
raise Exception(f"{e}")
|
938
772
|
# 最后一次重试失败,向上抛出异常
|
939
|
-
if
|
773
|
+
if i == 2:
|
940
774
|
raise Exception(f"{e}")
|
941
775
|
|
776
|
+
def ask_stream(
|
777
|
+
self,
|
778
|
+
prompt: list,
|
779
|
+
role: str = "user",
|
780
|
+
convo_id: str = "default",
|
781
|
+
model: str = "",
|
782
|
+
pass_history: int = 9999,
|
783
|
+
function_name: str = "",
|
784
|
+
total_tokens: int = 0,
|
785
|
+
function_arguments: str = "",
|
786
|
+
function_call_id: str = "",
|
787
|
+
language: str = "English",
|
788
|
+
system_prompt: str = None,
|
789
|
+
stream: bool = True,
|
790
|
+
**kwargs,
|
791
|
+
):
|
792
|
+
"""
|
793
|
+
Ask a question (同步流式响应)
|
794
|
+
"""
|
795
|
+
try:
|
796
|
+
loop = asyncio.get_event_loop()
|
797
|
+
if loop.is_closed():
|
798
|
+
loop = asyncio.new_event_loop()
|
799
|
+
asyncio.set_event_loop(loop)
|
800
|
+
except RuntimeError:
|
801
|
+
loop = asyncio.new_event_loop()
|
802
|
+
asyncio.set_event_loop(loop)
|
803
|
+
|
804
|
+
async_gen = self._ask_stream_handler(
|
805
|
+
prompt, role, convo_id, model, pass_history, function_name, total_tokens,
|
806
|
+
function_arguments, function_call_id, language, system_prompt, stream, **kwargs
|
807
|
+
)
|
808
|
+
for chunk in async_generator_to_sync(async_gen):
|
809
|
+
yield chunk
|
810
|
+
|
811
|
+
async def ask_stream_async(
|
812
|
+
self,
|
813
|
+
prompt: list,
|
814
|
+
role: str = "user",
|
815
|
+
convo_id: str = "default",
|
816
|
+
model: str = "",
|
817
|
+
pass_history: int = 9999,
|
818
|
+
function_name: str = "",
|
819
|
+
total_tokens: int = 0,
|
820
|
+
function_arguments: str = "",
|
821
|
+
function_call_id: str = "",
|
822
|
+
language: str = "English",
|
823
|
+
system_prompt: str = None,
|
824
|
+
stream: bool = True,
|
825
|
+
**kwargs,
|
826
|
+
):
|
827
|
+
"""
|
828
|
+
Ask a question (异步流式响应)
|
829
|
+
"""
|
830
|
+
async for chunk in self._ask_stream_handler(
|
831
|
+
prompt, role, convo_id, model, pass_history, function_name, total_tokens,
|
832
|
+
function_arguments, function_call_id, language, system_prompt, stream, **kwargs
|
833
|
+
):
|
834
|
+
yield chunk
|
835
|
+
|
942
836
|
async def ask_async(
|
943
837
|
self,
|
944
838
|
prompt: str,
|
@@ -212,6 +212,8 @@ def async_generator_to_sync(async_gen):
|
|
212
212
|
# 清理所有待处理的任务
|
213
213
|
tasks = [t for t in asyncio.all_tasks(loop) if not t.done()]
|
214
214
|
if tasks:
|
215
|
+
for task in tasks:
|
216
|
+
task.cancel()
|
215
217
|
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
|
216
218
|
loop.run_until_complete(loop.shutdown_asyncgens())
|
217
219
|
loop.close()
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|