wolfhece 2.2.46__py3-none-any.whl → 2.2.47__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wolfhece/PyCrosssections.py +14 -2
- wolfhece/apps/version.py +1 -1
- wolfhece/apps/wolf.ico +0 -0
- wolfhece/apps/wolf2.png +0 -0
- {wolfhece-2.2.46.dist-info → wolfhece-2.2.47.dist-info}/METADATA +1 -1
- {wolfhece-2.2.46.dist-info → wolfhece-2.2.47.dist-info}/RECORD +9 -8
- wolfhece/ChatwWOLF.py +0 -200
- {wolfhece-2.2.46.dist-info → wolfhece-2.2.47.dist-info}/WHEEL +0 -0
- {wolfhece-2.2.46.dist-info → wolfhece-2.2.47.dist-info}/entry_points.txt +0 -0
- {wolfhece-2.2.46.dist-info → wolfhece-2.2.47.dist-info}/top_level.txt +0 -0
wolfhece/PyCrosssections.py
CHANGED
@@ -2395,7 +2395,7 @@ class crosssections(Element_To_Draw):
|
|
2395
2395
|
['right_down']: wolfvertex
|
2396
2396
|
['cs'] : profile (surcharge de vector)
|
2397
2397
|
|
2398
|
-
Pour le moment, il est possible de lire les fichiers et d'effectuer
|
2398
|
+
Pour le moment, il est possible de lire les fichiers et d'effectuer certains traitements (tri selon vecteur, export gltf...).
|
2399
2399
|
|
2400
2400
|
Une instance de cet objet peut être ajouté à une instance graphique WOLF pour affichage.
|
2401
2401
|
Pour ce faire:
|
@@ -2408,7 +2408,7 @@ class crosssections(Element_To_Draw):
|
|
2408
2408
|
|
2409
2409
|
"""
|
2410
2410
|
|
2411
|
-
myprofiles:dict['cs':profile, 'index':int, 'left':wolfvertex, 'bed':wolfvertex, 'right':wolfvertex, 'left_down':wolfvertex, 'right_down':wolfvertex]
|
2411
|
+
myprofiles:dict[str | int: dict['cs':profile, 'index':int, 'left':wolfvertex, 'bed':wolfvertex, 'right':wolfvertex, 'left_down':wolfvertex, 'right_down':wolfvertex]]
|
2412
2412
|
mygenprofiles:dict
|
2413
2413
|
|
2414
2414
|
def __init__(self,
|
@@ -3395,6 +3395,10 @@ class crosssections(Element_To_Draw):
|
|
3395
3395
|
|
3396
3396
|
curprof = self[0]
|
3397
3397
|
|
3398
|
+
if curprof.up is None:
|
3399
|
+
logging.warning(_('No upstream profile defined for profile %s.')%curprof.myname)
|
3400
|
+
return self.myprofiles[curprof.myname]
|
3401
|
+
|
3398
3402
|
while curprof.up is not curprof:
|
3399
3403
|
curprof = curprof.up
|
3400
3404
|
|
@@ -3405,6 +3409,10 @@ class crosssections(Element_To_Draw):
|
|
3405
3409
|
|
3406
3410
|
curprof = self[0]
|
3407
3411
|
|
3412
|
+
if curprof.down is None:
|
3413
|
+
logging.warning(_('No downstream profile defined for profile %s.')%curprof.myname)
|
3414
|
+
return self.myprofiles[curprof.myname]
|
3415
|
+
|
3408
3416
|
while curprof.down is not curprof:
|
3409
3417
|
curprof = curprof.down
|
3410
3418
|
|
@@ -3649,6 +3657,10 @@ class crosssections(Element_To_Draw):
|
|
3649
3657
|
if to_destroy:
|
3650
3658
|
destroy_prepared(vecsupport)
|
3651
3659
|
|
3660
|
+
if len(mysorted)==0:
|
3661
|
+
logging.warning(_('No cross-section intersects the support vector!'))
|
3662
|
+
return 0
|
3663
|
+
|
3652
3664
|
#on trie le résultat en place
|
3653
3665
|
mysorted.sort(key=lambda x:x.s)
|
3654
3666
|
|
wolfhece/apps/version.py
CHANGED
wolfhece/apps/wolf.ico
ADDED
Binary file
|
wolfhece/apps/wolf2.png
ADDED
Binary file
|
@@ -1,4 +1,3 @@
|
|
1
|
-
wolfhece/ChatwWOLF.py,sha256=B7MkwZiLYjR3OUNBcTIxDnYZzOBFDe52k880KTLUotc,10135
|
2
1
|
wolfhece/Coordinates_operations.py,sha256=DSkzJ1Rm4y89I9tuyyAA9mp-EHp9vl5w2qGpNJ-e9qs,8215
|
3
2
|
wolfhece/CpGrid.py,sha256=_piG1u-ua7NzWh_PHJYTmxuPJ43ZfeYKNEQgZIJwDJ8,10660
|
4
3
|
wolfhece/GraphNotebook.py,sha256=_VZfakR5eXBZE-4Ztv2n12ZDO8zESoeDfCz_9k__T20,31509
|
@@ -9,7 +8,7 @@ wolfhece/Model1D.py,sha256=-2ibQLscVUsXlcnJWixCIScrBPqJ9BTirmwtGXEKI-4,571155
|
|
9
8
|
wolfhece/MulticriteriAnalysis.py,sha256=vGmkzYagZohNe0XjwGJ6VUXcDPjOt80lNFthXpzxCF0,59572
|
10
9
|
wolfhece/PandasGrid.py,sha256=etfVhIHzja4Z1EUY6BcDOKX-w7V-Xou1yaf0NMqmclo,4599
|
11
10
|
wolfhece/PyConfig.py,sha256=13DDWjJdohYHwn1uRVHB0s8Jcwq_b9pwcwbAr8NlZyc,19667
|
12
|
-
wolfhece/PyCrosssections.py,sha256=
|
11
|
+
wolfhece/PyCrosssections.py,sha256=kCZBlX3idRY-QqJRHE6MPm4Qxz0AOVublV-JSJYUab8,184622
|
13
12
|
wolfhece/PyDraw.py,sha256=Wby6vq-xxQUIF-PoNNBJ4GNgrP6Ug5RX5A3eylYEDP8,744285
|
14
13
|
wolfhece/PyGui.py,sha256=z8m4M4Q7DVnRt_bEpEDjUl5x0FMkmsEnH2XHnGKWo14,185336
|
15
14
|
wolfhece/PyGuiHydrology.py,sha256=dmBlRO8AljsvCPH6eVt0l9ZLx7g5j7Ubl9Srk7ECwyA,34693
|
@@ -97,8 +96,10 @@ wolfhece/apps/curvedigitizer.py,sha256=lEJJwgAfulrrWQc-U6ij6sj59hWN3SZl4Yu1kQxVz
|
|
97
96
|
wolfhece/apps/hydrometry.py,sha256=lhhJsFeb4zGL4bNQTs0co85OQ_6ssL1Oy0OUJCzhfYE,656
|
98
97
|
wolfhece/apps/isocurrent.py,sha256=dagmGR8ja9QQ1gwz_8fU-N052hIw-W0mWGVkzLu6C7I,4247
|
99
98
|
wolfhece/apps/splashscreen.py,sha256=EdGDN9NhudIiP7c3gVqj7dp4MWFB8ySizM_tpMnsgpE,3091
|
100
|
-
wolfhece/apps/version.py,sha256=
|
99
|
+
wolfhece/apps/version.py,sha256=yOcKnPSbBIuPo1EH2qe5sbCPJ9lY1HrQWjZYeyCTgb0,388
|
100
|
+
wolfhece/apps/wolf.ico,sha256=ej-kLodlUrQLsFDxpDhu2Mak7BTuWw6c9NCJaCBOkHI,238323
|
101
101
|
wolfhece/apps/wolf.py,sha256=mRnjYsUu4KIsRuamdQWAINFMuwN4eJgMo9erG-hkZ70,729
|
102
|
+
wolfhece/apps/wolf2.png,sha256=YXd-MbtlxWRq38l5L6ky4Lg80DFuoZEKdbDwmeSsWqo,24714
|
102
103
|
wolfhece/apps/wolf2D.py,sha256=4z_OPQ3IgaLtjexjMKX9ppvqEYyjFLt1hcfFABy3-jU,703
|
103
104
|
wolfhece/apps/wolf_logo.bmp,sha256=ruJ4MA51CpGO_AYUp_dB4SWKHelvhOvd7Q8NrVOjDJk,3126
|
104
105
|
wolfhece/apps/wolf_logo2.bmp,sha256=pCJFVDn_-rHru6fumazVNM4BqAaobM0Xg0zI0DFWMSQ,5830
|
@@ -323,8 +324,8 @@ wolfhece/ui/wolf_multiselection_collapsiblepane.py,sha256=u4C7CXe_bUyGKx7c_Bi0x9
|
|
323
324
|
wolfhece/ui/wolf_times_selection_comparison_models.py,sha256=ORy7fz4dcp691qKzaOZHrRLZ0uXNhL-LIHxmpDGL6BI,5007
|
324
325
|
wolfhece/wintab/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
325
326
|
wolfhece/wintab/wintab.py,sha256=8A-JNONV6ujgsgG3lM5Uw-pVgglPATwKs86oBzzljoc,7179
|
326
|
-
wolfhece-2.2.
|
327
|
-
wolfhece-2.2.
|
328
|
-
wolfhece-2.2.
|
329
|
-
wolfhece-2.2.
|
330
|
-
wolfhece-2.2.
|
327
|
+
wolfhece-2.2.47.dist-info/METADATA,sha256=E2D4W6SdupEluafVKCe7arDTdJcFpE5BniIeE35l0AU,2792
|
328
|
+
wolfhece-2.2.47.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
329
|
+
wolfhece-2.2.47.dist-info/entry_points.txt,sha256=Jr187pyvA3EeJiQLjZK9yo6mJX7IAn6ygZU9T8qF_gQ,658
|
330
|
+
wolfhece-2.2.47.dist-info/top_level.txt,sha256=EfqZXMVCn7eILUzx9xsEu2oBbSo9liWPFWjIHik0iCI,9
|
331
|
+
wolfhece-2.2.47.dist-info/RECORD,,
|
wolfhece/ChatwWOLF.py
DELETED
@@ -1,200 +0,0 @@
|
|
1
|
-
# Préparation des données pour le modèle ChatWOLF, machine conversationnelle spécialisée dans les questions relatives à WOLF.
|
2
|
-
# Les données sont principalemen extraites des fichiers rst de l'aide en ligne mais également des fichiers py de l'API.
|
3
|
-
import torch
|
4
|
-
print(torch.cuda.is_available())
|
5
|
-
|
6
|
-
# Importation des modules nécessaires
|
7
|
-
import os
|
8
|
-
import re
|
9
|
-
import json
|
10
|
-
from pathlib import Path
|
11
|
-
from typing import List, Dict, Any
|
12
|
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
13
|
-
from langchain.docstore.document import Document
|
14
|
-
from langchain_huggingface import HuggingFaceEmbeddings
|
15
|
-
from langchain_community.vectorstores import FAISS
|
16
|
-
from langchain.chains.retrieval_qa.base import RetrievalQA
|
17
|
-
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
18
|
-
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
19
|
-
from sklearn.model_selection import train_test_split
|
20
|
-
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
|
21
|
-
import torch
|
22
|
-
from datasets import load_dataset, Dataset
|
23
|
-
from transformers import Trainer, TrainingArguments
|
24
|
-
import logging
|
25
|
-
|
26
|
-
logging.basicConfig(level=logging.INFO)
|
27
|
-
logger = logging.getLogger(__name__)
|
28
|
-
|
29
|
-
rst_directory = Path("D:/ProgrammationGitLab/HECEPython/docs/source")
|
30
|
-
py_directory = Path("D:/ProgrammationGitLab/HECEPython/wolfhece")
|
31
|
-
output_directory = Path("D:/ProgrammationGitLab/HECEPython/wolfhece/models/chatwolf")
|
32
|
-
output_directory.mkdir(parents=True, exist_ok=True)
|
33
|
-
|
34
|
-
# Fonction pour extraire le texte des fichiers rst
|
35
|
-
def extract_text_from_rst(file_path: Path) -> str:
|
36
|
-
with open(file_path, 'r', encoding='utf-8') as file:
|
37
|
-
text = file.read()
|
38
|
-
# Nettoyage du texte
|
39
|
-
text = re.sub(r'\.\. _.*?:', '', text) # Remove references
|
40
|
-
text = re.sub(r'\.\. note::.*?\n\n', '', text, flags=re.DOTALL) # Remove notes
|
41
|
-
text = re.sub(r'\.\. warning::.*?\n\n', '', text, flags=re.DOTALL) # Remove warnings
|
42
|
-
text = re.sub(r'\.\. code-block::.*?\n\n', '', text, flags=re.DOTALL) # Remove code blocks
|
43
|
-
text = re.sub(r'\.\. image::.*?\n\n', '', text, flags=re.DOTALL) # Remove images
|
44
|
-
text = re.sub(r'\.\. figure::.*?\n\n', '', text, flags=re.DOTALL) # Remove figures
|
45
|
-
text = re.sub(r'\.\. table::.*?\n\n', '', text, flags=re.DOTALL) # Remove tables
|
46
|
-
text = re.sub(r'\.\. rubric::.*?\n\n', '', text, flags=re.DOTALL) # Remove rubrics
|
47
|
-
text = re.sub(r'\.\. sidebar::.*?\n\n', '', text, flags=re.DOTALL) # Remove sidebars
|
48
|
-
text = re.sub(r'\.\. literalinclude::.*?\n\n', '', text, flags=re.DOTALL) # Remove literal includes
|
49
|
-
text = re.sub(r'\.\. math::.*?\n\n', '', text, flags=re.DOTALL) # Remove math
|
50
|
-
text = re.sub(r'\.\. raw::.*?\n\n', '', text, flags=re.DOTALL) # Remove raw
|
51
|
-
text = re.sub(r'\.\. toctree::.*?\n\n', '', text, flags=re.DOTALL) # Remove toctree
|
52
|
-
text = re.sub(r'\.\. index::.*?\n\n', '', text, flags=re.DOTALL) # Remove index
|
53
|
-
text = re.sub(r'\.\. glossary::.*?\n\n', '', text, flags=re.DOTALL) # Remove glossary
|
54
|
-
text = re.sub(r'\.\. footnote::.*?\n\n', '', text, flags=re.DOTALL) # Remove footnotes
|
55
|
-
text = re.sub(r'\.\. citation::.*?\n\n', '', text, flags=re.DOTALL) # Remove citations
|
56
|
-
text = re.sub(r'\.\. epigraph::.*?\n\n', '', text, flags=re.DOTALL) # Remove epigraphs
|
57
|
-
text = re.sub(r'\.\. highlight::.*?\n\n', '', text, flags=re.DOTALL) # Remove highlights
|
58
|
-
text = re.sub(r'\.\. hlist::.*?\n\n', '', text, flags=re.DOTALL) # Remove hlists
|
59
|
-
text = re.sub(r'\.\. csv-table::.*?\n\n', '', text, flags=re.DOTALL) # Remove csv-tables
|
60
|
-
text = re.sub(r'\.\. list-table::.*?\n\n', '', text, flags=re.DOTALL) # Remove list-tables
|
61
|
-
text = re.sub(r'\.\. contents::.*?\n\n', '', text, flags=re.DOTALL) # Remove contents
|
62
|
-
text = re.sub(r'\.\. include::.*?\n\n', '', text, flags=re.DOTALL) # Remove includes
|
63
|
-
text = re.sub(r'\.\. admonition::.*?\n\n', '', text, flags=re.DOTALL) # Remove admonitions
|
64
|
-
text = re.sub(r'\.\. note::.*?\n\n', '', text, flags=re.DOTALL) # Remove notes
|
65
|
-
text = re.sub(r'\.\. tip::.*?\n\n', '', text, flags=re.DOTALL) # Remove tips
|
66
|
-
text = re.sub(r'\.\. important::.*?\n\n', '', text, flags=re.DOTALL) # Remove importants
|
67
|
-
text = re.sub(r'\.\. caution::.*?\n\n', '', text, flags=re.DOTALL) # Remove cautions
|
68
|
-
text = re.sub(r'\.\. seealso::.*?\n\n', '', text, flags=re.DOTALL) # Remove seealso
|
69
|
-
|
70
|
-
return text
|
71
|
-
|
72
|
-
def scan_files() -> List[Path]:
|
73
|
-
# Scan all files and extract text
|
74
|
-
documents = []
|
75
|
-
for rst_file in rst_directory.rglob("*.rst"):
|
76
|
-
text = extract_text_from_rst(rst_file)
|
77
|
-
if text.strip(): # Only add non-empty documents
|
78
|
-
documents.append(Document(page_content=text, metadata={"source": str(rst_file)}))
|
79
|
-
logger.info(f"Extracted text from {rst_file}")
|
80
|
-
for py_file in py_directory.rglob("*.py"):
|
81
|
-
with open(py_file, 'r', encoding='utf-8') as file:
|
82
|
-
text = file.read()
|
83
|
-
if text.strip(): # Only add non-empty documents
|
84
|
-
documents.append(Document(page_content=text, metadata={"source": str(py_file)}))
|
85
|
-
logger.info(f"Extracted text from {py_file}")
|
86
|
-
logger.info(f"Total documents extracted: {len(documents)}")
|
87
|
-
return documents
|
88
|
-
|
89
|
-
def split_and_prepare_data(documents: List[Document]) -> None:
|
90
|
-
# Split documents into smaller chunks
|
91
|
-
text_splitter = RecursiveCharacterTextSplitter(
|
92
|
-
chunk_size=1000,
|
93
|
-
chunk_overlap=100,
|
94
|
-
length_function=len
|
95
|
-
)
|
96
|
-
texts = text_splitter.split_documents(documents)
|
97
|
-
logger.info(f"Total text chunks created: {len(texts)}")
|
98
|
-
# Save texts to JSONL for dataset creation
|
99
|
-
jsonl_path = output_directory / "chatwolf_data.jsonl"
|
100
|
-
with open(jsonl_path, 'w', encoding='utf-8') as f:
|
101
|
-
for text in texts:
|
102
|
-
json.dump({"text": text.page_content}, f)
|
103
|
-
f.write('\n')
|
104
|
-
logger.info(f"Saved text chunks to {jsonl_path}")
|
105
|
-
return texts, jsonl_path
|
106
|
-
|
107
|
-
def train_model():
|
108
|
-
# Load dataset
|
109
|
-
dataset = load_dataset('json', data_files=str(jsonl_path))['train']
|
110
|
-
# Split dataset into training and validation sets
|
111
|
-
train_test_split = dataset.train_test_split(test_size=0.1)
|
112
|
-
train_dataset = train_test_split['train']
|
113
|
-
eval_dataset = train_test_split['test']
|
114
|
-
logger.info(f"Training dataset size: {len(train_dataset)}")
|
115
|
-
logger.info(f"Validation dataset size: {len(eval_dataset)}")
|
116
|
-
# Define model and tokenizer
|
117
|
-
model_name = "gpt2"
|
118
|
-
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
|
119
|
-
if tokenizer.pad_token is None:
|
120
|
-
tokenizer.pad_token = tokenizer.eos_token
|
121
|
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
122
|
-
# Define training arguments
|
123
|
-
training_args = TrainingArguments(
|
124
|
-
output_dir=output_directory / "output",
|
125
|
-
eval_strategy="epoch",
|
126
|
-
num_train_epochs=3,
|
127
|
-
per_device_train_batch_size=1,
|
128
|
-
per_device_eval_batch_size=1,
|
129
|
-
save_strategy="epoch",
|
130
|
-
logging_dir=output_directory / "logs",
|
131
|
-
logging_steps=10,
|
132
|
-
save_total_limit=2,
|
133
|
-
fp16=False, # Set to False to avoid FP16 errors on unsupported hardware
|
134
|
-
load_best_model_at_end=True,
|
135
|
-
)
|
136
|
-
# Define data collator
|
137
|
-
def tokenize_function(examples):
|
138
|
-
return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=512)
|
139
|
-
train_dataset = train_dataset.map(tokenize_function, batched=True)
|
140
|
-
eval_dataset = eval_dataset.map(tokenize_function, batched=True)
|
141
|
-
train_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
|
142
|
-
eval_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
|
143
|
-
# Define data collator for causal language modeling
|
144
|
-
from transformers import DataCollatorForLanguageModeling
|
145
|
-
data_collator = DataCollatorForLanguageModeling(
|
146
|
-
tokenizer=tokenizer,
|
147
|
-
mlm=False,
|
148
|
-
)
|
149
|
-
# Initialize Trainer
|
150
|
-
trainer = Trainer(
|
151
|
-
model=model,
|
152
|
-
args=training_args,
|
153
|
-
train_dataset=train_dataset,
|
154
|
-
eval_dataset=eval_dataset,
|
155
|
-
tokenizer=tokenizer,
|
156
|
-
data_collator=data_collator,
|
157
|
-
)
|
158
|
-
# Train the model
|
159
|
-
trainer.train()
|
160
|
-
# Save the fine-tuned model
|
161
|
-
trainer.save_model(output_directory / "chatwolf_model")
|
162
|
-
logger.info(f"Saved fine-tuned model to {output_directory / 'chatwolf_model'}")
|
163
|
-
return model, tokenizer
|
164
|
-
|
165
|
-
def load_model_and_tokenizer():
|
166
|
-
model = AutoModelForCausalLM.from_pretrained(output_directory / "chatwolf_model")
|
167
|
-
tokenizer = AutoTokenizer.from_pretrained("gpt2", use_fast=True)
|
168
|
-
if tokenizer.pad_token is None:
|
169
|
-
tokenizer.pad_token = tokenizer.eos_token
|
170
|
-
return model, tokenizer
|
171
|
-
|
172
|
-
documents = scan_files()
|
173
|
-
texts, jsonl_path = split_and_prepare_data(documents)
|
174
|
-
|
175
|
-
if False:
|
176
|
-
model, tokenizer = train_model()
|
177
|
-
else:
|
178
|
-
model, tokenizer = load_model_and_tokenizer()
|
179
|
-
|
180
|
-
|
181
|
-
# Create embeddings and vector store
|
182
|
-
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
183
|
-
vector_store = FAISS.from_documents(texts, embeddings)
|
184
|
-
vector_store.save_local(str(output_directory / "faiss_index"))
|
185
|
-
logger.info(f"Saved FAISS index to {output_directory / 'faiss_index'}")
|
186
|
-
# Create retrieval QA chain
|
187
|
-
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.7, top_p=0.9, repetition_penalty=1.2)
|
188
|
-
hf_llm = HuggingFacePipeline(pipeline=llm_pipeline)
|
189
|
-
qa_chain = RetrievalQA.from_chain_type(llm=hf_llm, chain_type="stuff", retriever=vector_store.as_retriever())
|
190
|
-
# Save the QA chain
|
191
|
-
import pickle
|
192
|
-
with open(output_directory / "qa_chain.pkl", 'wb') as f:
|
193
|
-
pickle.dump(qa_chain, f)
|
194
|
-
logger.info(f"Saved QA chain to {output_directory / 'qa_chain.pkl'}")
|
195
|
-
# Example usage of the QA chain
|
196
|
-
def answer_question(question: str) -> str:
|
197
|
-
return qa_chain.run(question)
|
198
|
-
example_question = "How to create a new map in WOLF?"
|
199
|
-
answer = answer_question(example_question)
|
200
|
-
logger.info(f"Question: {example_question}\nAnswer: {answer}")
|
File without changes
|
File without changes
|
File without changes
|