jarvis-ai-assistant 0.1.106__py3-none-any.whl → 0.1.108__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jarvis-ai-assistant might be problematic. Click here for more details.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_codebase/main.py +125 -53
- jarvis/jarvis_rag/main.py +282 -260
- {jarvis_ai_assistant-0.1.106.dist-info → jarvis_ai_assistant-0.1.108.dist-info}/METADATA +3 -3
- {jarvis_ai_assistant-0.1.106.dist-info → jarvis_ai_assistant-0.1.108.dist-info}/RECORD +9 -9
- {jarvis_ai_assistant-0.1.106.dist-info → jarvis_ai_assistant-0.1.108.dist-info}/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.106.dist-info → jarvis_ai_assistant-0.1.108.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.106.dist-info → jarvis_ai_assistant-0.1.108.dist-info}/entry_points.txt +0 -0
- {jarvis_ai_assistant-0.1.106.dist-info → jarvis_ai_assistant-0.1.108.dist-info}/top_level.txt +0 -0
jarvis/__init__.py
CHANGED
jarvis/jarvis_codebase/main.py
CHANGED
|
@@ -648,7 +648,7 @@ Note: Only include files that have a strong connection to the query."""
|
|
|
648
648
|
return []
|
|
649
649
|
|
|
650
650
|
def _generate_query_variants(self, query: str) -> List[str]:
|
|
651
|
-
"""Generate different expressions of the query
|
|
651
|
+
"""Generate different expressions of the query optimized for vector search
|
|
652
652
|
|
|
653
653
|
Args:
|
|
654
654
|
query: Original query
|
|
@@ -657,14 +657,44 @@ Note: Only include files that have a strong connection to the query."""
|
|
|
657
657
|
List[str]: The query variants list
|
|
658
658
|
"""
|
|
659
659
|
model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
|
|
660
|
-
prompt = f"""Please generate 3 different expressions based on the following query
|
|
660
|
+
prompt = f"""Please generate 3 different expressions optimized for vector search based on the following query. Each expression should:
|
|
661
|
+
|
|
662
|
+
1. Focus on key technical concepts and terminology
|
|
663
|
+
2. Use clear and specific language
|
|
664
|
+
3. Include important contextual terms
|
|
665
|
+
4. Avoid general or ambiguous words
|
|
666
|
+
5. Maintain semantic similarity with original query
|
|
667
|
+
6. Be suitable for embedding-based search
|
|
668
|
+
|
|
661
669
|
Original query: {query}
|
|
662
670
|
|
|
663
|
-
|
|
671
|
+
Example transformations:
|
|
672
|
+
Query: "How to handle user login?"
|
|
673
|
+
Output format:
|
|
674
|
+
<QUESTION>
|
|
675
|
+
- user authentication implementation and flow
|
|
676
|
+
- login system architecture and components
|
|
677
|
+
- credential validation and session management
|
|
678
|
+
- ...
|
|
679
|
+
</QUESTION>
|
|
680
|
+
|
|
681
|
+
Please provide 10 search-optimized expressions in the specified format.
|
|
664
682
|
"""
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
683
|
+
response = model.chat_until_success(prompt)
|
|
684
|
+
|
|
685
|
+
# Parse the response using YAML format
|
|
686
|
+
import yaml
|
|
687
|
+
variants = []
|
|
688
|
+
question_match = re.search(r'<QUESTION>\n(.*?)</QUESTION>', response, re.DOTALL)
|
|
689
|
+
if question_match:
|
|
690
|
+
try:
|
|
691
|
+
variants = yaml.safe_load(question_match.group(1))
|
|
692
|
+
except Exception as e:
|
|
693
|
+
PrettyOutput.print(f"Failed to parse variants: {str(e)}", OutputType.ERROR)
|
|
694
|
+
|
|
695
|
+
# Add original query
|
|
696
|
+
variants.append(query)
|
|
697
|
+
return variants if variants else [query]
|
|
668
698
|
|
|
669
699
|
def _vector_search(self, query_variants: List[str], top_k: int) -> Dict[str, Tuple[str, float, str]]:
|
|
670
700
|
"""Use vector search to find related files
|
|
@@ -699,86 +729,128 @@ Please output 3 expressions directly, separated by two line breaks, without numb
|
|
|
699
729
|
|
|
700
730
|
|
|
701
731
|
def search_similar(self, query: str, top_k: int = 30) -> List[str]:
|
|
702
|
-
"""Search related files"""
|
|
732
|
+
"""Search related files with optimized retrieval"""
|
|
703
733
|
try:
|
|
704
734
|
self.generate_codebase()
|
|
705
735
|
if self.index is None:
|
|
706
|
-
return []
|
|
707
|
-
|
|
736
|
+
return []
|
|
737
|
+
|
|
738
|
+
# Generate query variants for better coverage
|
|
708
739
|
query_variants = self._generate_query_variants(query)
|
|
709
740
|
|
|
710
|
-
#
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
results = list(vector_results.values())
|
|
714
|
-
results.sort(key=lambda x: x[1], reverse=True)
|
|
715
|
-
|
|
716
|
-
# Take the top top_k results for reordering
|
|
717
|
-
initial_results = results[:top_k]
|
|
741
|
+
# Collect results from all variants
|
|
742
|
+
all_results = []
|
|
743
|
+
seen_files = set()
|
|
718
744
|
|
|
719
|
-
|
|
720
|
-
|
|
745
|
+
for variant in query_variants:
|
|
746
|
+
# Get vector for each variant
|
|
747
|
+
query_vector = self.get_embedding(variant)
|
|
748
|
+
query_vector = query_vector.reshape(1, -1)
|
|
749
|
+
|
|
750
|
+
# Search with current variant
|
|
751
|
+
initial_k = min(top_k * 2, len(self.file_paths))
|
|
752
|
+
distances, indices = self.index.search(query_vector, initial_k) # type: ignore
|
|
753
|
+
|
|
754
|
+
# Process results
|
|
755
|
+
for idx, dist in zip(indices[0], distances[0]):
|
|
756
|
+
if idx != -1:
|
|
757
|
+
file_path = self.file_paths[idx]
|
|
758
|
+
if file_path not in seen_files:
|
|
759
|
+
similarity = 1.0 / (1.0 + float(dist))
|
|
760
|
+
if similarity > 0.3: # Lower threshold for better recall
|
|
761
|
+
seen_files.add(file_path)
|
|
762
|
+
all_results.append((file_path, similarity, self.vector_cache[file_path]["description"]))
|
|
763
|
+
|
|
764
|
+
if not all_results:
|
|
721
765
|
return []
|
|
766
|
+
|
|
767
|
+
# Sort by similarity and take top_k
|
|
768
|
+
all_results.sort(key=lambda x: x[1], reverse=True)
|
|
769
|
+
results = all_results[:top_k]
|
|
722
770
|
|
|
723
|
-
#
|
|
724
|
-
initial_results = [(path, score, desc) for path, score, desc in initial_results if score >= 0.5]
|
|
725
|
-
|
|
771
|
+
# Display results with scores
|
|
726
772
|
message = "Found related files:\n"
|
|
727
|
-
for path, score, _ in
|
|
728
|
-
message += f"File: {path}
|
|
773
|
+
for path, score, _ in results:
|
|
774
|
+
message += f"File: {path} (Score: {score:.3f})\n"
|
|
729
775
|
PrettyOutput.print(message.rstrip(), output_type=OutputType.INFO, lang="markdown")
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
return self.pick_results(query, [path for path, _, _ in initial_results])
|
|
776
|
+
|
|
777
|
+
return [path for path, _, _ in results]
|
|
733
778
|
|
|
734
779
|
except Exception as e:
|
|
735
780
|
PrettyOutput.print(f"Failed to search: {str(e)}", output_type=OutputType.ERROR)
|
|
736
781
|
return []
|
|
737
782
|
|
|
738
783
|
def ask_codebase(self, query: str, top_k: int=20) -> str:
|
|
739
|
-
"""Query the codebase"""
|
|
740
|
-
|
|
784
|
+
"""Query the codebase with enhanced context building"""
|
|
785
|
+
files_from_codebase = self.search_similar(query, top_k)
|
|
741
786
|
|
|
742
787
|
from jarvis.jarvis_code_agent.relevant_files import find_relevant_files_from_agent
|
|
743
|
-
|
|
788
|
+
files_from_agent = find_relevant_files_from_agent(query, files_from_codebase)
|
|
744
789
|
|
|
745
|
-
if not
|
|
790
|
+
if not files_from_agent:
|
|
746
791
|
PrettyOutput.print("No related files found", output_type=OutputType.WARNING)
|
|
747
792
|
return ""
|
|
748
793
|
|
|
749
|
-
|
|
750
|
-
for path in
|
|
751
|
-
|
|
752
|
-
PrettyOutput.print(
|
|
794
|
+
output = "Found related files:\n"
|
|
795
|
+
for path in files_from_agent:
|
|
796
|
+
output += f"- {path}\n"
|
|
797
|
+
PrettyOutput.print(output, output_type=OutputType.INFO, lang="markdown")
|
|
753
798
|
|
|
754
|
-
|
|
799
|
+
# Build enhanced prompt
|
|
800
|
+
prompt = f"""Based on the following code files, please provide a comprehensive and accurate answer to the user's question.
|
|
801
|
+
|
|
802
|
+
Important guidelines:
|
|
803
|
+
1. Focus on code-specific details and implementation
|
|
804
|
+
2. Explain technical concepts clearly
|
|
805
|
+
3. Include relevant code snippets when helpful
|
|
806
|
+
4. If the code doesn't fully answer the question, indicate what's missing
|
|
807
|
+
|
|
808
|
+
Question: {query}
|
|
809
|
+
|
|
810
|
+
Relevant code files (ordered by relevance):
|
|
755
811
|
"""
|
|
756
|
-
|
|
812
|
+
# Add context with length control
|
|
813
|
+
available_length = self.max_context_length - len(prompt) - 1000 # Reserve space for answer
|
|
814
|
+
current_length = 0
|
|
815
|
+
|
|
816
|
+
for path in files_from_agent:
|
|
757
817
|
try:
|
|
758
|
-
if len(prompt) > self.max_context_length:
|
|
759
|
-
PrettyOutput.print(f"Avoid context overflow, discard low-related file: {path}", OutputType.WARNING)
|
|
760
|
-
continue
|
|
761
818
|
content = open(path, "r", encoding="utf-8").read()
|
|
762
|
-
|
|
763
|
-
File
|
|
764
|
-
|
|
819
|
+
file_content = f"""
|
|
820
|
+
File: {path}
|
|
821
|
+
Content:
|
|
765
822
|
{content}
|
|
766
|
-
|
|
823
|
+
----------------------------------------
|
|
767
824
|
"""
|
|
825
|
+
if current_length + len(file_content) > available_length:
|
|
826
|
+
PrettyOutput.print(
|
|
827
|
+
"Due to context length limit, some files were omitted",
|
|
828
|
+
output_type=OutputType.WARNING
|
|
829
|
+
)
|
|
830
|
+
break
|
|
831
|
+
|
|
832
|
+
prompt += file_content
|
|
833
|
+
current_length += len(file_content)
|
|
834
|
+
|
|
768
835
|
except Exception as e:
|
|
769
836
|
PrettyOutput.print(f"Failed to read file {path}: {str(e)}",
|
|
770
|
-
|
|
837
|
+
output_type=OutputType.ERROR)
|
|
771
838
|
continue
|
|
772
|
-
|
|
773
|
-
prompt +=
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
839
|
+
|
|
840
|
+
prompt += """
|
|
841
|
+
Please structure your answer as follows:
|
|
842
|
+
1. Direct answer to the question
|
|
843
|
+
2. Relevant code explanations
|
|
844
|
+
3. Implementation details
|
|
845
|
+
4. Any missing information or limitations
|
|
846
|
+
5. Add reference files and code snippets at the end of the answer.
|
|
847
|
+
|
|
848
|
+
Answer in Chinese using professional language.
|
|
779
849
|
"""
|
|
780
|
-
|
|
850
|
+
|
|
851
|
+
model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
|
|
781
852
|
response = model.chat_until_success(prompt)
|
|
853
|
+
|
|
782
854
|
return response
|
|
783
855
|
|
|
784
856
|
def is_index_generated(self) -> bool:
|
jarvis/jarvis_rag/main.py
CHANGED
|
@@ -16,6 +16,8 @@ from datetime import datetime
|
|
|
16
16
|
import lzma # 添加 lzma 导入
|
|
17
17
|
from concurrent.futures import ThreadPoolExecutor
|
|
18
18
|
from threading import Lock
|
|
19
|
+
import concurrent.futures
|
|
20
|
+
import re
|
|
19
21
|
|
|
20
22
|
@dataclass
|
|
21
23
|
class Document:
|
|
@@ -148,7 +150,7 @@ class RAGTool:
|
|
|
148
150
|
self.max_context_length = int(get_max_context_length() * 0.8)
|
|
149
151
|
|
|
150
152
|
# Initialize data directory
|
|
151
|
-
self.data_dir = os.path.join(self.root_dir, ".jarvis
|
|
153
|
+
self.data_dir = os.path.join(self.root_dir, ".jarvis/rag")
|
|
152
154
|
if not os.path.exists(self.data_dir):
|
|
153
155
|
os.makedirs(self.data_dir)
|
|
154
156
|
|
|
@@ -182,6 +184,52 @@ class RAGTool:
|
|
|
182
184
|
self.thread_count = get_thread_count()
|
|
183
185
|
self.vector_lock = Lock() # Protect vector list concurrency
|
|
184
186
|
|
|
187
|
+
# 初始化 GPU 内存配置
|
|
188
|
+
self.gpu_config = self._init_gpu_config()
|
|
189
|
+
|
|
190
|
+
def _init_gpu_config(self) -> Dict:
|
|
191
|
+
"""Initialize GPU configuration based on available hardware
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
Dict: GPU configuration including memory sizes and availability
|
|
195
|
+
"""
|
|
196
|
+
config = {
|
|
197
|
+
"has_gpu": False,
|
|
198
|
+
"shared_memory": 0,
|
|
199
|
+
"device_memory": 0,
|
|
200
|
+
"memory_fraction": 0.8 # 默认使用80%的可用内存
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
try:
|
|
204
|
+
import torch
|
|
205
|
+
if torch.cuda.is_available():
|
|
206
|
+
# 获取GPU信息
|
|
207
|
+
gpu_mem = torch.cuda.get_device_properties(0).total_memory
|
|
208
|
+
config["has_gpu"] = True
|
|
209
|
+
config["device_memory"] = gpu_mem
|
|
210
|
+
|
|
211
|
+
# 估算共享内存 (通常是系统内存的一部分)
|
|
212
|
+
import psutil
|
|
213
|
+
system_memory = psutil.virtual_memory().total
|
|
214
|
+
config["shared_memory"] = min(system_memory * 0.5, gpu_mem * 2) # 取系统内存的50%或GPU内存的2倍中的较小值
|
|
215
|
+
|
|
216
|
+
# 设置CUDA内存分配
|
|
217
|
+
torch.cuda.set_per_process_memory_fraction(config["memory_fraction"])
|
|
218
|
+
torch.cuda.empty_cache()
|
|
219
|
+
|
|
220
|
+
PrettyOutput.print(
|
|
221
|
+
f"GPU initialized: {torch.cuda.get_device_name(0)}\n"
|
|
222
|
+
f"Device Memory: {gpu_mem / 1024**3:.1f}GB\n"
|
|
223
|
+
f"Shared Memory: {config['shared_memory'] / 1024**3:.1f}GB",
|
|
224
|
+
output_type=OutputType.SUCCESS
|
|
225
|
+
)
|
|
226
|
+
else:
|
|
227
|
+
PrettyOutput.print("No GPU available, using CPU mode", output_type=OutputType.WARNING)
|
|
228
|
+
except Exception as e:
|
|
229
|
+
PrettyOutput.print(f"GPU initialization failed: {str(e)}", output_type=OutputType.WARNING)
|
|
230
|
+
|
|
231
|
+
return config
|
|
232
|
+
|
|
185
233
|
def _load_cache(self):
|
|
186
234
|
"""Load cache data"""
|
|
187
235
|
if os.path.exists(self.cache_path):
|
|
@@ -323,45 +371,102 @@ class RAGTool:
|
|
|
323
371
|
show_progress_bar=False)
|
|
324
372
|
return np.array(embedding, dtype=np.float32)
|
|
325
373
|
|
|
326
|
-
def _get_embedding_batch(self, texts: List[str]) -> np.ndarray:
|
|
327
|
-
"""Get
|
|
328
|
-
|
|
329
|
-
Args:
|
|
330
|
-
texts: Text list
|
|
331
|
-
|
|
332
|
-
Returns:
|
|
333
|
-
np.ndarray: Vector representation array
|
|
334
|
-
"""
|
|
374
|
+
def _get_embedding_batch(self, texts: List[str], batch_size: int = 32) -> np.ndarray:
|
|
375
|
+
"""Get embeddings for a batch of texts efficiently"""
|
|
335
376
|
try:
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
377
|
+
if self.gpu_config["has_gpu"]:
|
|
378
|
+
import torch
|
|
379
|
+
torch.cuda.empty_cache()
|
|
380
|
+
|
|
381
|
+
# 使用较小的批处理大小
|
|
382
|
+
optimal_batch_size = min(16, len(texts))
|
|
383
|
+
all_embeddings = []
|
|
384
|
+
|
|
385
|
+
with tqdm(total=len(texts), desc="Vectorizing") as pbar:
|
|
386
|
+
for i in range(0, len(texts), optimal_batch_size):
|
|
387
|
+
try:
|
|
388
|
+
batch = texts[i:i + optimal_batch_size]
|
|
389
|
+
embeddings = self.embedding_model.encode(
|
|
390
|
+
batch,
|
|
391
|
+
normalize_embeddings=True,
|
|
392
|
+
show_progress_bar=False,
|
|
393
|
+
batch_size=4, # 减小内部批处理大小
|
|
394
|
+
convert_to_tensor=True
|
|
395
|
+
)
|
|
396
|
+
# 立即移动到 CPU
|
|
397
|
+
embeddings = embeddings.cpu().numpy()
|
|
398
|
+
all_embeddings.append(embeddings)
|
|
399
|
+
pbar.update(len(batch))
|
|
400
|
+
|
|
401
|
+
# 清理 GPU 缓存
|
|
402
|
+
torch.cuda.empty_cache()
|
|
403
|
+
|
|
404
|
+
except RuntimeError as e:
|
|
405
|
+
if "out of memory" in str(e):
|
|
406
|
+
# 如果内存不足,减小批次大小重试
|
|
407
|
+
if optimal_batch_size > 4:
|
|
408
|
+
optimal_batch_size //= 2
|
|
409
|
+
PrettyOutput.print(
|
|
410
|
+
f"CUDA out of memory, reducing batch size to {optimal_batch_size}",
|
|
411
|
+
OutputType.WARNING
|
|
412
|
+
)
|
|
413
|
+
i -= optimal_batch_size # 重试当前批次
|
|
414
|
+
continue
|
|
415
|
+
raise
|
|
416
|
+
|
|
417
|
+
return np.vstack(all_embeddings)
|
|
418
|
+
else:
|
|
419
|
+
# CPU 模式
|
|
420
|
+
return self.embedding_model.encode(
|
|
421
|
+
texts,
|
|
422
|
+
normalize_embeddings=True,
|
|
423
|
+
show_progress_bar=True,
|
|
424
|
+
batch_size=8,
|
|
425
|
+
convert_to_tensor=False
|
|
426
|
+
)
|
|
427
|
+
|
|
341
428
|
except Exception as e:
|
|
342
|
-
PrettyOutput.print(f"
|
|
343
|
-
output_type=OutputType.ERROR)
|
|
429
|
+
PrettyOutput.print(f"Batch embedding failed: {str(e)}", OutputType.ERROR)
|
|
344
430
|
return np.zeros((len(texts), self.vector_dim), dtype=np.float32) # type: ignore
|
|
345
431
|
|
|
346
|
-
def _process_document_batch(self, documents: List[Document]) ->
|
|
347
|
-
"""Process a batch of documents
|
|
432
|
+
def _process_document_batch(self, documents: List[Document]) -> np.ndarray:
|
|
433
|
+
"""Process a batch of documents using shared memory
|
|
348
434
|
|
|
349
435
|
Args:
|
|
350
|
-
documents:
|
|
436
|
+
documents: List of documents to process
|
|
351
437
|
|
|
352
438
|
Returns:
|
|
353
|
-
|
|
439
|
+
np.ndarray: Document vectors
|
|
354
440
|
"""
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
"""
|
|
362
|
-
texts.append(combined_text)
|
|
441
|
+
try:
|
|
442
|
+
import torch
|
|
443
|
+
|
|
444
|
+
# 估算内存需求
|
|
445
|
+
total_content_size = sum(len(doc.content) for doc in documents)
|
|
446
|
+
est_memory_needed = total_content_size * 4 # 粗略估计
|
|
363
447
|
|
|
364
|
-
|
|
448
|
+
# 如果预估内存超过共享内存限制,分批处理
|
|
449
|
+
if est_memory_needed > self.gpu_config["shared_memory"] * 0.7:
|
|
450
|
+
batch_size = max(1, int(len(documents) * (self.gpu_config["shared_memory"] * 0.7 / est_memory_needed)))
|
|
451
|
+
|
|
452
|
+
all_vectors = []
|
|
453
|
+
for i in range(0, len(documents), batch_size):
|
|
454
|
+
batch = documents[i:i + batch_size]
|
|
455
|
+
vectors = self._process_document_batch(batch)
|
|
456
|
+
all_vectors.append(vectors)
|
|
457
|
+
return np.vstack(all_vectors)
|
|
458
|
+
|
|
459
|
+
# 正常处理单个批次
|
|
460
|
+
texts = []
|
|
461
|
+
for doc in documents:
|
|
462
|
+
combined_text = f"File:{doc.metadata['file_path']} Content:{doc.content}"
|
|
463
|
+
texts.append(combined_text)
|
|
464
|
+
|
|
465
|
+
return self._get_embedding_batch(texts)
|
|
466
|
+
|
|
467
|
+
except Exception as e:
|
|
468
|
+
PrettyOutput.print(f"Batch processing failed: {str(e)}", OutputType.ERROR)
|
|
469
|
+
return np.zeros((0, self.vector_dim), dtype=np.float32) # type: ignore
|
|
365
470
|
|
|
366
471
|
def _process_file(self, file_path: str) -> List[Document]:
|
|
367
472
|
"""Process a single file"""
|
|
@@ -419,15 +524,18 @@ Content: {doc.content}
|
|
|
419
524
|
return []
|
|
420
525
|
|
|
421
526
|
def build_index(self, dir: str):
|
|
422
|
-
"""Build document index"""
|
|
527
|
+
"""Build document index with optimized processing"""
|
|
423
528
|
# Get all files
|
|
424
529
|
all_files = []
|
|
425
530
|
for root, _, files in os.walk(dir):
|
|
426
|
-
|
|
531
|
+
# Skip .jarvis directories and other ignored paths
|
|
532
|
+
if any(ignored in root for ignored in ['.git', '__pycache__', 'node_modules', '.jarvis']) or \
|
|
427
533
|
any(part.startswith('.jarvis-') for part in root.split(os.sep)):
|
|
428
534
|
continue
|
|
535
|
+
|
|
429
536
|
for file in files:
|
|
430
|
-
|
|
537
|
+
# Skip .jarvis files
|
|
538
|
+
if '.jarvis' in root:
|
|
431
539
|
continue
|
|
432
540
|
|
|
433
541
|
file_path = os.path.join(root, file)
|
|
@@ -464,209 +572,132 @@ Content: {doc.content}
|
|
|
464
572
|
unchanged_documents = [doc for doc in self.documents
|
|
465
573
|
if doc.metadata['file_path'] in unchanged_files]
|
|
466
574
|
|
|
467
|
-
# Process
|
|
468
|
-
new_documents = []
|
|
575
|
+
# Process files in parallel with optimized vectorization
|
|
469
576
|
if files_to_process:
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
# Merge new and old vectors
|
|
510
|
-
if self.flat_index is not None:
|
|
511
|
-
# Get vectors for unchanged documents
|
|
512
|
-
unchanged_vectors = []
|
|
513
|
-
for doc in unchanged_documents:
|
|
514
|
-
# Get vectors from existing index
|
|
515
|
-
doc_idx = next((i for i, d in enumerate(self.documents)
|
|
516
|
-
if d.metadata['file_path'] == doc.metadata['file_path']), None)
|
|
517
|
-
if doc_idx is not None:
|
|
518
|
-
# Reconstruct vectors from flat index
|
|
519
|
-
vector = np.zeros((1, self.vector_dim), dtype=np.float32) # type: ignore
|
|
520
|
-
self.flat_index.reconstruct(doc_idx, vector.ravel())
|
|
521
|
-
unchanged_vectors.append(vector)
|
|
577
|
+
PrettyOutput.print(f"Processing {len(files_to_process)} files...", OutputType.INFO)
|
|
578
|
+
|
|
579
|
+
# Step 1: 并行提取文本内容
|
|
580
|
+
documents_to_process = []
|
|
581
|
+
with ThreadPoolExecutor(max_workers=self.thread_count) as executor:
|
|
582
|
+
futures = {
|
|
583
|
+
executor.submit(self._process_file, file_path): file_path
|
|
584
|
+
for file_path in files_to_process
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
with tqdm(total=len(files_to_process), desc="Extracting text") as pbar:
|
|
588
|
+
for future in concurrent.futures.as_completed(futures):
|
|
589
|
+
try:
|
|
590
|
+
docs = future.result()
|
|
591
|
+
if docs:
|
|
592
|
+
documents_to_process.extend(docs)
|
|
593
|
+
pbar.update(1)
|
|
594
|
+
except Exception as e:
|
|
595
|
+
PrettyOutput.print(f"File processing failed: {str(e)}", OutputType.ERROR)
|
|
596
|
+
pbar.update(1)
|
|
597
|
+
|
|
598
|
+
# Step 2: 优化的批量向量化
|
|
599
|
+
if documents_to_process:
|
|
600
|
+
PrettyOutput.print(f"Vectorizing {len(documents_to_process)} documents...", OutputType.INFO)
|
|
601
|
+
|
|
602
|
+
# 准备向量化的文本
|
|
603
|
+
texts_to_vectorize = []
|
|
604
|
+
for doc in documents_to_process:
|
|
605
|
+
# 优化文本组合,减少内存使用
|
|
606
|
+
combined_text = f"File:{doc.metadata['file_path']} Content:{doc.content}"
|
|
607
|
+
texts_to_vectorize.append(combined_text)
|
|
608
|
+
|
|
609
|
+
# 使用较小的初始批处理大小
|
|
610
|
+
initial_batch_size = min(
|
|
611
|
+
32, # 最大批次大小
|
|
612
|
+
max(4, len(texts_to_vectorize) // 8), # 基于文档数的批次大小
|
|
613
|
+
len(texts_to_vectorize) # 不超过总文档数
|
|
614
|
+
)
|
|
522
615
|
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
616
|
+
# 批量处理向量
|
|
617
|
+
vectors = self._get_embedding_batch(texts_to_vectorize, initial_batch_size)
|
|
618
|
+
|
|
619
|
+
# 更新文档和索引
|
|
620
|
+
self.documents.extend(documents_to_process)
|
|
621
|
+
|
|
622
|
+
# 构建最终索引
|
|
623
|
+
if self.flat_index is not None:
|
|
624
|
+
# 获取未更改文档的向量
|
|
625
|
+
unchanged_vectors = self._get_unchanged_vectors(unchanged_documents)
|
|
626
|
+
if unchanged_vectors is not None:
|
|
627
|
+
final_vectors = np.vstack([unchanged_vectors, vectors])
|
|
628
|
+
else:
|
|
629
|
+
final_vectors = vectors
|
|
526
630
|
else:
|
|
527
|
-
|
|
528
|
-
else:
|
|
529
|
-
vectors = np.vstack(new_vectors)
|
|
631
|
+
final_vectors = vectors
|
|
530
632
|
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
633
|
+
# 构建索引并保存缓存
|
|
634
|
+
self._build_index(final_vectors)
|
|
635
|
+
self._save_cache(final_vectors)
|
|
636
|
+
|
|
637
|
+
PrettyOutput.print(
|
|
638
|
+
f"Indexed {len(self.documents)} documents "
|
|
639
|
+
f"(New/Modified: {len(documents_to_process)}, "
|
|
640
|
+
f"Unchanged: {len(unchanged_documents)})",
|
|
641
|
+
OutputType.SUCCESS
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
def _get_unchanged_vectors(self, unchanged_documents: List[Document]) -> Optional[np.ndarray]:
|
|
645
|
+
"""Get vectors for unchanged documents from existing index"""
|
|
646
|
+
try:
|
|
647
|
+
if not unchanged_documents or self.flat_index is None:
|
|
648
|
+
return None
|
|
649
|
+
|
|
650
|
+
unchanged_vectors = []
|
|
651
|
+
for doc in unchanged_documents:
|
|
652
|
+
doc_idx = next((i for i, d in enumerate(self.documents)
|
|
653
|
+
if d.metadata['file_path'] == doc.metadata['file_path']), None)
|
|
654
|
+
if doc_idx is not None:
|
|
655
|
+
vector = np.zeros((1, self.vector_dim), dtype=np.float32) # type: ignore
|
|
656
|
+
self.flat_index.reconstruct(doc_idx, vector.ravel())
|
|
657
|
+
unchanged_vectors.append(vector)
|
|
658
|
+
|
|
659
|
+
return np.vstack(unchanged_vectors) if unchanged_vectors else None
|
|
660
|
+
|
|
661
|
+
except Exception as e:
|
|
662
|
+
PrettyOutput.print(f"Failed to get unchanged vectors: {str(e)}", OutputType.ERROR)
|
|
663
|
+
return None
|
|
538
664
|
|
|
539
665
|
def search(self, query: str, top_k: int = 30) -> List[Tuple[Document, float]]:
|
|
540
|
-
"""
|
|
666
|
+
"""Search documents using vector similarity
|
|
667
|
+
|
|
668
|
+
Args:
|
|
669
|
+
query: Search query
|
|
670
|
+
top_k: Number of results to return
|
|
671
|
+
"""
|
|
541
672
|
if not self.index:
|
|
542
673
|
PrettyOutput.print("Index not built, building...", output_type=OutputType.INFO)
|
|
543
674
|
self.build_index(self.root_dir)
|
|
544
|
-
|
|
545
|
-
# Implement MMR (Maximal Marginal Relevance) to increase result diversity
|
|
546
|
-
def mmr(query_vec, doc_vecs, doc_ids, lambda_param=0.5, n_docs=top_k):
|
|
547
|
-
selected = []
|
|
548
|
-
selected_ids = []
|
|
549
|
-
|
|
550
|
-
while len(selected) < n_docs and len(doc_ids) > 0:
|
|
551
|
-
best_score = -1
|
|
552
|
-
best_idx = -1
|
|
553
|
-
|
|
554
|
-
for i, (doc_vec, doc_id) in enumerate(zip(doc_vecs, doc_ids)):
|
|
555
|
-
# Calculate similarity with query
|
|
556
|
-
query_sim = float(np.dot(query_vec, doc_vec))
|
|
557
|
-
|
|
558
|
-
# Calculate maximum similarity with selected documents
|
|
559
|
-
if selected:
|
|
560
|
-
doc_sims = [float(np.dot(doc_vec, selected_doc)) for selected_doc in selected]
|
|
561
|
-
max_doc_sim = max(doc_sims)
|
|
562
|
-
else:
|
|
563
|
-
max_doc_sim = 0
|
|
564
|
-
|
|
565
|
-
# MMR score
|
|
566
|
-
score = lambda_param * query_sim - (1 - lambda_param) * max_doc_sim
|
|
567
|
-
|
|
568
|
-
if score > best_score:
|
|
569
|
-
best_score = score
|
|
570
|
-
best_idx = i
|
|
571
|
-
|
|
572
|
-
if best_idx == -1:
|
|
573
|
-
break
|
|
574
|
-
|
|
575
|
-
selected.append(doc_vecs[best_idx])
|
|
576
|
-
selected_ids.append(doc_ids[best_idx])
|
|
577
|
-
doc_vecs = np.delete(doc_vecs, best_idx, axis=0)
|
|
578
|
-
doc_ids = np.delete(doc_ids, best_idx)
|
|
579
675
|
|
|
580
|
-
return selected_ids
|
|
581
|
-
|
|
582
676
|
# Get query vector
|
|
583
677
|
query_vector = self._get_embedding(query)
|
|
584
678
|
query_vector = query_vector.reshape(1, -1)
|
|
585
679
|
|
|
586
|
-
#
|
|
587
|
-
initial_k = min(top_k *
|
|
680
|
+
# Search with more candidates
|
|
681
|
+
initial_k = min(top_k * 4, len(self.documents))
|
|
588
682
|
distances, indices = self.index.search(query_vector, initial_k) # type: ignore
|
|
589
683
|
|
|
590
|
-
#
|
|
591
|
-
valid_indices = indices[0][indices[0] != -1]
|
|
592
|
-
valid_vectors = np.vstack([self._get_embedding(self.documents[idx].content) for idx in valid_indices])
|
|
593
|
-
|
|
594
|
-
# Apply MMR
|
|
595
|
-
final_indices = mmr(query_vector[0], valid_vectors, valid_indices, n_docs=top_k)
|
|
596
|
-
|
|
597
|
-
# Build results
|
|
684
|
+
# Process results
|
|
598
685
|
results = []
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
686
|
+
seen_files = set()
|
|
687
|
+
for idx, dist in zip(indices[0], distances[0]):
|
|
688
|
+
if idx != -1:
|
|
689
|
+
doc = self.documents[idx]
|
|
690
|
+
similarity = 1.0 / (1.0 + float(dist))
|
|
691
|
+
if similarity > 0.3: # 降低过滤阈值以获取更多结果
|
|
692
|
+
file_path = doc.metadata['file_path']
|
|
693
|
+
if file_path not in seen_files:
|
|
694
|
+
seen_files.add(file_path)
|
|
695
|
+
results.append((doc, similarity))
|
|
696
|
+
if len(results) >= top_k:
|
|
697
|
+
break
|
|
603
698
|
|
|
604
699
|
return results
|
|
605
700
|
|
|
606
|
-
def _rerank_results(self, query: str, initial_results: List[Tuple[Document, float]]) -> List[Tuple[Document, float]]:
|
|
607
|
-
"""Use rerank model to rerank search results"""
|
|
608
|
-
try:
|
|
609
|
-
import torch
|
|
610
|
-
model, tokenizer = load_rerank_model()
|
|
611
|
-
|
|
612
|
-
# Prepare data
|
|
613
|
-
pairs = []
|
|
614
|
-
for doc, _ in initial_results:
|
|
615
|
-
# Combine document information
|
|
616
|
-
doc_content = f"""
|
|
617
|
-
File: {doc.metadata['file_path']}
|
|
618
|
-
Content: {doc.content}
|
|
619
|
-
"""
|
|
620
|
-
pairs.append([query, doc_content])
|
|
621
|
-
|
|
622
|
-
# Score each document pair
|
|
623
|
-
scores = []
|
|
624
|
-
batch_size = 8
|
|
625
|
-
|
|
626
|
-
with torch.no_grad():
|
|
627
|
-
for i in range(0, len(pairs), batch_size):
|
|
628
|
-
batch_pairs = pairs[i:i + batch_size]
|
|
629
|
-
encoded = tokenizer(
|
|
630
|
-
batch_pairs,
|
|
631
|
-
padding=True,
|
|
632
|
-
truncation=True,
|
|
633
|
-
max_length=512,
|
|
634
|
-
return_tensors='pt'
|
|
635
|
-
)
|
|
636
|
-
|
|
637
|
-
if torch.cuda.is_available():
|
|
638
|
-
encoded = {k: v.cuda() for k, v in encoded.items()}
|
|
639
|
-
|
|
640
|
-
outputs = model(**encoded)
|
|
641
|
-
batch_scores = outputs.logits.squeeze(-1).cpu().numpy()
|
|
642
|
-
scores.extend(batch_scores.tolist())
|
|
643
|
-
|
|
644
|
-
# Normalize scores to 0-1 range
|
|
645
|
-
if scores:
|
|
646
|
-
min_score = min(scores)
|
|
647
|
-
max_score = max(scores)
|
|
648
|
-
if max_score > min_score:
|
|
649
|
-
scores = [(s - min_score) / (max_score - min_score) for s in scores]
|
|
650
|
-
|
|
651
|
-
# Combine scores with documents and sort
|
|
652
|
-
scored_results = []
|
|
653
|
-
for (doc, _), score in zip(initial_results, scores):
|
|
654
|
-
if score >= 0.5: # Only keep results with a score greater than 0.5
|
|
655
|
-
scored_results.append((doc, float(score)))
|
|
656
|
-
|
|
657
|
-
# Sort by score in descending order
|
|
658
|
-
scored_results.sort(key=lambda x: x[1], reverse=True)
|
|
659
|
-
|
|
660
|
-
return scored_results
|
|
661
|
-
|
|
662
|
-
except Exception as e:
|
|
663
|
-
PrettyOutput.print(f"Failed to rerank, using original sorting: {str(e)}", output_type=OutputType.WARNING)
|
|
664
|
-
return initial_results
|
|
665
|
-
|
|
666
|
-
def is_index_built(self):
|
|
667
|
-
"""Check if index is built"""
|
|
668
|
-
return self.index is not None
|
|
669
|
-
|
|
670
701
|
def query(self, query: str) -> List[Document]:
|
|
671
702
|
"""Query related documents
|
|
672
703
|
|
|
@@ -674,82 +705,73 @@ Content: {doc.content}
|
|
|
674
705
|
query: Query text
|
|
675
706
|
|
|
676
707
|
Returns:
|
|
677
|
-
List[Document]: Related documents
|
|
708
|
+
List[Document]: Related documents
|
|
678
709
|
"""
|
|
679
710
|
results = self.search(query)
|
|
680
711
|
return [doc for doc, _ in results]
|
|
681
712
|
|
|
682
713
|
def ask(self, question: str) -> Optional[str]:
|
|
683
|
-
"""Ask about documents
|
|
684
|
-
|
|
685
|
-
Args:
|
|
686
|
-
question: User question
|
|
687
|
-
|
|
688
|
-
Returns:
|
|
689
|
-
Model answer, return None if failed
|
|
690
|
-
"""
|
|
714
|
+
"""Ask questions about documents with enhanced context building"""
|
|
691
715
|
try:
|
|
692
|
-
#
|
|
693
|
-
results = self.
|
|
716
|
+
# 搜索相关文档
|
|
717
|
+
results = self.search(question)
|
|
694
718
|
if not results:
|
|
695
719
|
return None
|
|
696
720
|
|
|
697
|
-
#
|
|
698
|
-
for doc in results:
|
|
699
|
-
output = f"""File: {doc.metadata['file_path']}\n"""
|
|
721
|
+
# 显示找到的文档
|
|
722
|
+
for doc, score in results:
|
|
723
|
+
output = f"""File: {doc.metadata['file_path']} (Score: {score:.3f})\n"""
|
|
700
724
|
output += f"""Fragment {doc.metadata['chunk_index'] + 1}/{doc.metadata['total_chunks']}\n"""
|
|
701
725
|
output += f"""Content:\n{doc.content}\n"""
|
|
702
726
|
PrettyOutput.print(output, output_type=OutputType.INFO, lang="markdown")
|
|
727
|
+
|
|
728
|
+
# 构建提示词
|
|
729
|
+
prompt = f"""Based on the following document fragments, please answer the user's question accurately and comprehensively.
|
|
703
730
|
|
|
704
|
-
|
|
705
|
-
base_prompt = f"""Please answer the user's question based on the following document fragments. If the document content is not sufficient to answer the question completely, please clearly indicate.
|
|
706
|
-
|
|
707
|
-
User question: {question}
|
|
731
|
+
Question: {question}
|
|
708
732
|
|
|
709
|
-
|
|
733
|
+
Relevant documents (ordered by relevance):
|
|
710
734
|
"""
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
# Calculate the maximum length that can be used for document content
|
|
714
|
-
# Leave some space for the model's answer
|
|
715
|
-
available_length = self.max_context_length - len(base_prompt) - len(end_prompt) - 500
|
|
716
|
-
|
|
717
|
-
# Build context, while controlling the total length
|
|
718
|
-
context = []
|
|
735
|
+
# 添加上下文,控制长度
|
|
736
|
+
available_length = self.max_context_length - len(prompt) - 1000
|
|
719
737
|
current_length = 0
|
|
720
738
|
|
|
721
|
-
for doc in results:
|
|
722
|
-
# Calculate the length of this document fragment's content
|
|
739
|
+
for doc, score in results:
|
|
723
740
|
doc_content = f"""
|
|
724
|
-
|
|
725
|
-
Content:
|
|
741
|
+
[Score: {score:.3f}] {doc.metadata['file_path']}:
|
|
726
742
|
{doc.content}
|
|
727
743
|
---
|
|
728
744
|
"""
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
output_type=OutputType.WARNING)
|
|
745
|
+
if current_length + len(doc_content) > available_length:
|
|
746
|
+
PrettyOutput.print(
|
|
747
|
+
"Due to context length limit, some fragments were omitted",
|
|
748
|
+
output_type=OutputType.WARNING
|
|
749
|
+
)
|
|
735
750
|
break
|
|
736
751
|
|
|
737
|
-
|
|
738
|
-
current_length +=
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
prompt = base_prompt + ''.join(context) + end_prompt
|
|
752
|
+
prompt += doc_content
|
|
753
|
+
current_length += len(doc_content)
|
|
754
|
+
|
|
755
|
+
prompt += "\nIf the documents don't fully answer the question, please indicate what information is missing."
|
|
742
756
|
|
|
743
|
-
#
|
|
757
|
+
# 使用 normal 平台处理文档问答
|
|
744
758
|
model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
|
|
745
759
|
response = model.chat_until_success(prompt)
|
|
746
760
|
|
|
747
761
|
return response
|
|
748
762
|
|
|
749
763
|
except Exception as e:
|
|
750
|
-
PrettyOutput.print(f"Failed to answer: {str(e)}",
|
|
764
|
+
PrettyOutput.print(f"Failed to answer: {str(e)}", OutputType.ERROR)
|
|
751
765
|
return None
|
|
752
766
|
|
|
767
|
+
def is_index_built(self) -> bool:
|
|
768
|
+
"""Check if the index is built and valid
|
|
769
|
+
|
|
770
|
+
Returns:
|
|
771
|
+
bool: True if index is built and valid
|
|
772
|
+
"""
|
|
773
|
+
return self.index is not None and len(self.documents) > 0
|
|
774
|
+
|
|
753
775
|
def main():
|
|
754
776
|
"""Main function"""
|
|
755
777
|
import argparse
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: jarvis-ai-assistant
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.108
|
|
4
4
|
Summary: Jarvis: An AI assistant that uses tools to interact with the system
|
|
5
5
|
Home-page: https://github.com/skyfireitdiy/Jarvis
|
|
6
6
|
Author: skyfire
|
|
@@ -154,8 +154,8 @@ Jarvis supports configuration through environment variables that can be set in t
|
|
|
154
154
|
| JARVIS_NEED_SUMMARY | Generate summaries | true | No |
|
|
155
155
|
| JARVIS_DONT_USE_LOCAL_MODEL | Avoid using local models | false | No |
|
|
156
156
|
| OPENAI_API_KEY | API key for OpenAI platform | - | Required for OpenAI |
|
|
157
|
-
| OPENAI_API_BASE | Base URL for OpenAI API | https://api.
|
|
158
|
-
| OPENAI_MODEL_NAME | Model name for OpenAI |
|
|
157
|
+
| OPENAI_API_BASE | Base URL for OpenAI API | https://api.openai.com | No |
|
|
158
|
+
| OPENAI_MODEL_NAME | Model name for OpenAI | gpt-4o | No |
|
|
159
159
|
| AI8_API_KEY | API key for AI8 platform | - | Required for AI8 |
|
|
160
160
|
| KIMI_API_KEY | API key for Kimi platform | - | Required for Kimi |
|
|
161
161
|
| OYI_API_KEY | API key for OYI platform | - | Required for OYI |
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
jarvis/__init__.py,sha256=
|
|
1
|
+
jarvis/__init__.py,sha256=hCWwxdx-HHSIwE8vf6Yfn-___pFPgINrCmQcUL8j_XI,51
|
|
2
2
|
jarvis/agent.py,sha256=7FDrJc2_JlY9u7TRfeHKZRQ0PrY04r-0w4H64eGcbUM,22626
|
|
3
3
|
jarvis/utils.py,sha256=0w1rYsSovS7vgbHNdfdzpo3zSb3y-KWM7RvYMqBhDnM,22086
|
|
4
4
|
jarvis/jarvis_code_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -7,7 +7,7 @@ jarvis/jarvis_code_agent/file_select.py,sha256=KNxalhepCM2e-V__ca8ErmbXSXHP_1xmd
|
|
|
7
7
|
jarvis/jarvis_code_agent/patch.py,sha256=bbNB8k8mebjPVsNdI8aT3oOyjLyAhUQbKmX54tyziDk,4034
|
|
8
8
|
jarvis/jarvis_code_agent/relevant_files.py,sha256=PxSKQyHfCe6878bDqP6XyQd_jwcvNK4a9YKTfpLImRI,6160
|
|
9
9
|
jarvis/jarvis_codebase/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
-
jarvis/jarvis_codebase/main.py,sha256=
|
|
10
|
+
jarvis/jarvis_codebase/main.py,sha256=UfFLwOZQoOKSKnpk_Ive1azEYO4-Cm8fICxULGAvqUU,39889
|
|
11
11
|
jarvis/jarvis_lsp/base.py,sha256=_7pdbMKjdtYBW0DsRbjIodDHM3J7df-YgXHejN_WIrU,4490
|
|
12
12
|
jarvis/jarvis_lsp/cpp.py,sha256=F7Zo3BErkvtWS1_H9zQO83pX_FUmnijux-2SjhWzKCE,4985
|
|
13
13
|
jarvis/jarvis_lsp/go.py,sha256=p8LULiFdq4qjDYQzXFlzH0-FQZ3IyfiwN_sbO9i0L_A,5310
|
|
@@ -25,7 +25,7 @@ jarvis/jarvis_platform/registry.py,sha256=9QLoihcnkYckrCzgNnlTqaLn_z_HMhaxMSyUNb
|
|
|
25
25
|
jarvis/jarvis_platform_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
26
|
jarvis/jarvis_platform_manager/main.py,sha256=17607aNAStqJ1sOQLTGi6Tnv-cIQme_r5YvbB_S3enc,4985
|
|
27
27
|
jarvis/jarvis_rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
28
|
-
jarvis/jarvis_rag/main.py,sha256=
|
|
28
|
+
jarvis/jarvis_rag/main.py,sha256=3BpRz4XrdSLUmH1IENXoKAFqjMxckwtWrGrhdUxOsNw,34626
|
|
29
29
|
jarvis/jarvis_smart_shell/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
30
30
|
jarvis/jarvis_smart_shell/main.py,sha256=VdUR-x932OccEwU0pcQM_pb_I4yfrAutE3hfm6jf5es,3955
|
|
31
31
|
jarvis/jarvis_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -54,9 +54,9 @@ jarvis/jarvis_tools/read_webpage.py,sha256=JCReSXhkDHDkQ606sZYIKG1Itlprjpmu1sSbF
|
|
|
54
54
|
jarvis/jarvis_tools/registry.py,sha256=mkAQ1NDPwDy1ESAaAFnzSxAJRrhce3NO3E7cWkb-quA,11732
|
|
55
55
|
jarvis/jarvis_tools/search.py,sha256=PLSSNETyajpqDoStCTfkoy-D41IMNudTuVzonMlT6Aw,9225
|
|
56
56
|
jarvis/jarvis_tools/select_code_files.py,sha256=bjJGwCNw0Ue_8jW60K1gcy1rUgKqoHihicu5SS58WNk,1890
|
|
57
|
-
jarvis_ai_assistant-0.1.
|
|
58
|
-
jarvis_ai_assistant-0.1.
|
|
59
|
-
jarvis_ai_assistant-0.1.
|
|
60
|
-
jarvis_ai_assistant-0.1.
|
|
61
|
-
jarvis_ai_assistant-0.1.
|
|
62
|
-
jarvis_ai_assistant-0.1.
|
|
57
|
+
jarvis_ai_assistant-0.1.108.dist-info/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
|
|
58
|
+
jarvis_ai_assistant-0.1.108.dist-info/METADATA,sha256=OWN4NKRNvFiqIc7mL9XN4-a488mjwbV_TCUX5bhyFkQ,14153
|
|
59
|
+
jarvis_ai_assistant-0.1.108.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
|
60
|
+
jarvis_ai_assistant-0.1.108.dist-info/entry_points.txt,sha256=UYj4FYvOH8jJ0GgCJTA_TAmJ3wvikos-hUVbCwt_KOc,480
|
|
61
|
+
jarvis_ai_assistant-0.1.108.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
|
|
62
|
+
jarvis_ai_assistant-0.1.108.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
{jarvis_ai_assistant-0.1.106.dist-info → jarvis_ai_assistant-0.1.108.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{jarvis_ai_assistant-0.1.106.dist-info → jarvis_ai_assistant-0.1.108.dist-info}/top_level.txt
RENAMED
|
File without changes
|