jarvis-ai-assistant 0.1.109__py3-none-any.whl → 0.1.110__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

jarvis/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Jarvis AI Assistant"""
2
2
 
3
- __version__ = "0.1.109"
3
+ __version__ = "0.1.110"
jarvis/agent.py CHANGED
@@ -8,7 +8,7 @@ import yaml
8
8
  from jarvis.jarvis_platform.base import BasePlatform
9
9
  from jarvis.jarvis_platform.registry import PlatformRegistry
10
10
  from jarvis.jarvis_tools.registry import ToolRegistry, tool_call_help
11
- from jarvis.utils import PrettyOutput, OutputType, get_context_token_count, is_auto_complete, is_need_summary, is_record_methodology, load_methodology, add_agent, delete_current_agent, get_max_context_length, get_multiline_input, init_env, is_use_methodology
11
+ from jarvis.utils import PrettyOutput, OutputType, get_context_token_count, is_auto_complete, is_need_summary, is_record_methodology, load_methodology, add_agent, delete_current_agent, get_max_token_count, get_multiline_input, init_env, is_use_methodology
12
12
  import os
13
13
 
14
14
  class Agent:
@@ -89,7 +89,7 @@ class Agent:
89
89
  Please describe in concise bullet points, highlighting important information.
90
90
  """
91
91
 
92
- self.max_context_length = max_context_length if max_context_length is not None else get_max_context_length()
92
+ self.max_token_count = max_context_length if max_context_length is not None else get_max_token_count()
93
93
 
94
94
  self.auto_complete = auto_complete if auto_complete is not None else is_auto_complete()
95
95
 
@@ -325,7 +325,7 @@ Please continue the task based on the above information.
325
325
  self.conversation_length += get_context_token_count(self.prompt)
326
326
 
327
327
  # 如果对话历史长度超过限制,在提示中添加提醒
328
- if self.conversation_length > self.max_context_length:
328
+ if self.conversation_length > self.max_token_count:
329
329
  current_response = self._summarize_and_clear_history()
330
330
  continue
331
331
  else:
@@ -7,7 +7,7 @@ from typing import List, Tuple, Optional, Dict
7
7
  from jarvis.jarvis_platform.registry import PlatformRegistry
8
8
  import concurrent.futures
9
9
  from concurrent.futures import ThreadPoolExecutor
10
- from jarvis.utils import OutputType, PrettyOutput, find_git_root, get_context_token_count, get_embedding, get_file_md5, get_max_context_length, get_thread_count, load_embedding_model, user_confirm
10
+ from jarvis.utils import OutputType, PrettyOutput, find_git_root, get_context_token_count, get_embedding, get_file_md5, get_max_token_count, get_thread_count, load_embedding_model, user_confirm
11
11
  from jarvis.utils import init_env
12
12
  import argparse
13
13
  import pickle
@@ -21,7 +21,7 @@ class CodeBase:
21
21
  self.root_dir = root_dir
22
22
  os.chdir(self.root_dir)
23
23
  self.thread_count = get_thread_count()
24
- self.max_context_length = get_max_context_length()
24
+ self.max_token_count = get_max_token_count()
25
25
  self.index = None
26
26
 
27
27
  # 初始化数据目录
@@ -218,7 +218,7 @@ Code content:
218
218
  return cached_vector
219
219
 
220
220
  # Read the file content and combine information
221
- content = open(file_path, "r", encoding="utf-8").read()[:self.max_context_length] # Limit the file content length
221
+ content = open(file_path, "r", encoding="utf-8").read()[:self.max_token_count] # Limit the file content length
222
222
 
223
223
  # Combine file information, including file content
224
224
  combined_text = f"""
@@ -541,7 +541,7 @@ Content: {content}
541
541
  PrettyOutput.print(f"Picking results for query: \n" + "\n".join(query), output_type=OutputType.INFO)
542
542
 
543
543
  # Maximum content length per batch
544
- max_batch_length = self.max_context_length - 1000 # Reserve space for prompt
544
+ max_batch_length = self.max_token_count - 1000 # Reserve space for prompt
545
545
  max_file_length = max_batch_length // 3 # Limit individual file size
546
546
 
547
547
  # Process files in batches
@@ -805,7 +805,7 @@ Question: {query}
805
805
  Relevant code files (ordered by relevance):
806
806
  """
807
807
  # Add context with length control
808
- available_count = self.max_context_length - get_context_token_count(prompt) - 1000 # Reserve space for answer
808
+ available_count = self.max_token_count - get_context_token_count(prompt) - 1000 # Reserve space for answer
809
809
  current_count = 0
810
810
 
811
811
  for path in files_from_codebase:
@@ -2,7 +2,7 @@ import mimetypes
2
2
  import os
3
3
  from typing import Dict, List, Tuple
4
4
  from jarvis.jarvis_platform.base import BasePlatform
5
- from jarvis.utils import PrettyOutput, OutputType, get_max_context_length
5
+ from jarvis.utils import PrettyOutput, OutputType
6
6
  import requests
7
7
  import json
8
8
 
jarvis/jarvis_rag/main.py CHANGED
@@ -3,7 +3,7 @@ import numpy as np
3
3
  import faiss
4
4
  from typing import List, Tuple, Optional, Dict
5
5
  import pickle
6
- from jarvis.utils import OutputType, PrettyOutput, get_context_token_count, get_embedding, get_embedding_batch, get_file_md5, get_max_context_length, get_max_paragraph_length, get_min_paragraph_length, get_thread_count, init_gpu_config, load_embedding_model
6
+ from jarvis.utils import OutputType, PrettyOutput, get_context_token_count, get_embedding, get_embedding_batch, get_file_md5, get_max_token_count, get_max_paragraph_length, get_min_paragraph_length, get_thread_count, init_gpu_config, load_embedding_model
7
7
  from jarvis.utils import init_env
8
8
  from dataclasses import dataclass
9
9
  from tqdm import tqdm
@@ -143,7 +143,7 @@ class RAGTool:
143
143
  self.min_paragraph_length = get_min_paragraph_length() # Minimum paragraph length
144
144
  self.max_paragraph_length = get_max_paragraph_length() # Maximum paragraph length
145
145
  self.context_window = 5 # Fixed context window size
146
- self.max_context_length = int(get_max_context_length() * 0.8)
146
+ self.max_token_count = int(get_max_token_count() * 0.8)
147
147
 
148
148
  # Initialize data directory
149
149
  self.data_dir = os.path.join(self.root_dir, ".jarvis/rag")
@@ -655,7 +655,7 @@ Question: {question}
655
655
  Relevant documents (ordered by relevance):
656
656
  """
657
657
  # 添加上下文,控制长度
658
- available_count = self.max_context_length - get_context_token_count(prompt) - 1000
658
+ available_count = self.max_token_count - get_context_token_count(prompt) - 1000
659
659
  current_count = 0
660
660
 
661
661
  for doc, score in results:
@@ -5,7 +5,7 @@ from typing import Any, Callable, Dict, List, Optional
5
5
 
6
6
  from jarvis.jarvis_platform.registry import PlatformRegistry
7
7
  from jarvis.jarvis_tools.base import Tool
8
- from jarvis.utils import OutputType, PrettyOutput, get_context_token_count, get_max_context_length
8
+ from jarvis.utils import OutputType, PrettyOutput, get_context_token_count, get_max_token_count
9
9
 
10
10
 
11
11
  tool_call_help = """## Tool Usage Format
@@ -64,8 +64,8 @@ class ToolRegistry:
64
64
  # Load built-in tools and external tools
65
65
  self._load_builtin_tools()
66
66
  self._load_external_tools()
67
- # Ensure max_context_length is an integer
68
- self.max_context_length = int(get_max_context_length() * 0.8)
67
+ # Ensure max_token_count is an integer
68
+ self.max_token_count = int(get_max_token_count() * 0.8)
69
69
 
70
70
  def use_tools(self, name: List[str]):
71
71
  """Use specified tools"""
@@ -248,13 +248,13 @@ arguments:
248
248
  PrettyOutput.section("Execution successful", OutputType.SUCCESS)
249
249
 
250
250
  # If the output exceeds 4k characters, use a large model to summarize
251
- if get_context_token_count(output) > self.max_context_length:
251
+ if get_context_token_count(output) > self.max_token_count:
252
252
  try:
253
253
  PrettyOutput.print("Output is too long, summarizing...", OutputType.PROGRESS)
254
254
  model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
255
255
 
256
256
  # If the output exceeds the maximum context length, only take the last part
257
- max_count = self.max_context_length
257
+ max_count = self.max_token_count
258
258
  if get_context_token_count(output) > max_count:
259
259
  output_to_summarize = output[-max_count:]
260
260
  truncation_notice = f"\n(Note: Due to the length of the output, only the last {max_count} characters are summarized)"
jarvis/utils.py CHANGED
@@ -463,8 +463,8 @@ def load_rerank_model():
463
463
 
464
464
  def is_long_context(files: list) -> bool:
465
465
  """Check if the file list belongs to a long context (total characters exceed 80% of the maximum context length)"""
466
- max_length = get_max_context_length()
467
- threshold = max_length * 0.8
466
+ max_token_count = get_max_token_count()
467
+ threshold = max_token_count * 0.8
468
468
  total_tokens = 0
469
469
 
470
470
  for file_path in files:
@@ -674,8 +674,8 @@ def get_embedding_batch(embedding_model: Any, texts: List[str]) -> np.ndarray:
674
674
 
675
675
 
676
676
 
677
- def get_max_context_length():
678
- return int(os.getenv('JARVIS_MAX_CONTEXT_LENGTH', '131072')) # 默认128k
677
+ def get_max_token_count():
678
+ return int(os.getenv('JARVIS_MAX_TOKEN_COUNT', '131072')) # 默认128k
679
679
 
680
680
  def get_thread_count():
681
681
  return int(os.getenv('JARVIS_THREAD_COUNT', '1'))
@@ -774,8 +774,8 @@ def get_context_token_count(text: str) -> int:
774
774
  try:
775
775
  # Use a fast tokenizer that's good at general text
776
776
  tokenizer = load_tokenizer()
777
- tokens = tokenizer.encode(text)
778
- return len(tokens)
777
+ chunks = split_text_into_chunks(text, 512)
778
+ return sum([len(tokenizer.encode(chunk)) for chunk in chunks])
779
779
 
780
780
  except Exception as e:
781
781
  PrettyOutput.print(f"Error counting tokens: {str(e)}", OutputType.WARNING)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.109
3
+ Version: 0.1.110
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -1,13 +1,13 @@
1
- jarvis/__init__.py,sha256=CB3MYpgrHjKZT-LJIoysQYNgLyXYOJSKmxopMqJsAmU,51
2
- jarvis/agent.py,sha256=x6LmdW5k50nhAK6hdb8V1Vvkt64szwrnaewAto8murg,22690
3
- jarvis/utils.py,sha256=_GQY1UlxzhKM0DwdnuTUeSmaae8dV83SffsjMPPXFM0,26889
1
+ jarvis/__init__.py,sha256=0rw1RD8VrgfjPxXtqbnGEx3Aj9K4mgrYWuVSzACO3Ic,51
2
+ jarvis/agent.py,sha256=eV2Bgm5Q6gnQb2QeEo9bHCDaLY0v3RSV8Ylm_gS2_Yc,22678
3
+ jarvis/utils.py,sha256=7v9hs9Tlyi9XMLYkPUzbzzMXGuAlJAx5999O2d7kP9A,26945
4
4
  jarvis/jarvis_code_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  jarvis/jarvis_code_agent/code_agent.py,sha256=nigsmCK6D2z0dFU_1HFNYEvXr3lWdl0rm6p4VgiOk6o,5980
6
6
  jarvis/jarvis_code_agent/file_select.py,sha256=1kOVRLPS1GZcDyGpCW9hOPbfCEwF8f0-qVPaRZPHzoM,8154
7
7
  jarvis/jarvis_code_agent/patch.py,sha256=bOhegGKs4JEmJJOZfUlmwzGI6kakMyi2Q62HADJ7Npk,4594
8
8
  jarvis/jarvis_code_agent/relevant_files.py,sha256=Q4nI45zuyWt5aKuc4OR7-a6UbOXOym3oEzQJvqxkF8Q,946
9
9
  jarvis/jarvis_codebase/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- jarvis/jarvis_codebase/main.py,sha256=7SwL0QPeojsC95SIlbKjq9FcvKnvPBbtH1kv3SjPUBY,39701
10
+ jarvis/jarvis_codebase/main.py,sha256=40ySTIrQld5uW60vW9pawZopjDNVvtmlW27oNVaJXH0,39683
11
11
  jarvis/jarvis_lsp/base.py,sha256=_7pdbMKjdtYBW0DsRbjIodDHM3J7df-YgXHejN_WIrU,4490
12
12
  jarvis/jarvis_lsp/cpp.py,sha256=F7Zo3BErkvtWS1_H9zQO83pX_FUmnijux-2SjhWzKCE,4985
13
13
  jarvis/jarvis_lsp/go.py,sha256=p8LULiFdq4qjDYQzXFlzH0-FQZ3IyfiwN_sbO9i0L_A,5310
@@ -20,12 +20,12 @@ jarvis/jarvis_platform/base.py,sha256=nQ-rsJL1Z-gMev3TPoY7tYdwxhCJY8LG6_gtJ-maiW
20
20
  jarvis/jarvis_platform/kimi.py,sha256=3yiOL2PsEcKEL0Yj0Hm3lTg9M0Ahy0Ou1AUnJ0AS0Ss,15768
21
21
  jarvis/jarvis_platform/ollama.py,sha256=9Ptu-UzRMnNxqFlx9uDpHO0_Imrzf0Wfw9sZqnv2wRI,5681
22
22
  jarvis/jarvis_platform/openai.py,sha256=NYAIaQbFH9Usg5ZxkBSek1F0imu-pDB9Qf6Am0AtU0s,4130
23
- jarvis/jarvis_platform/oyi.py,sha256=mV8tsQty2Htz--DNemBAnCiauih3JQ4jSyuZi5L4WQo,15089
23
+ jarvis/jarvis_platform/oyi.py,sha256=11WcpJu0rsQfcHP1SVVwpbOjBoJzvQ6LELHWQMCvyzw,15065
24
24
  jarvis/jarvis_platform/registry.py,sha256=9QLoihcnkYckrCzgNnlTqaLn_z_HMhaxMSyUNb8IEys,8538
25
25
  jarvis/jarvis_platform_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  jarvis/jarvis_platform_manager/main.py,sha256=17607aNAStqJ1sOQLTGi6Tnv-cIQme_r5YvbB_S3enc,4985
27
27
  jarvis/jarvis_rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
- jarvis/jarvis_rag/main.py,sha256=ZdZZnOyQX8q1mNUeMymb9qg1l77JnG6WBz_KDov0NLQ,31366
28
+ jarvis/jarvis_rag/main.py,sha256=Lr3b2eTB9TXZGZGdG4Sl9bdtE5NFRbv_bRysxeWNCEo,31354
29
29
  jarvis/jarvis_smart_shell/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
30
  jarvis/jarvis_smart_shell/main.py,sha256=VdUR-x932OccEwU0pcQM_pb_I4yfrAutE3hfm6jf5es,3955
31
31
  jarvis/jarvis_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -51,12 +51,12 @@ jarvis/jarvis_tools/methodology.py,sha256=RFqcVjKuj8ESGmNYcQz_HyphsitDvF3XtqgGaq
51
51
  jarvis/jarvis_tools/rag.py,sha256=2fQHqc4bw8JM-OxGTsHobLIOTo8Mip3rdtJCmAoY8XU,4952
52
52
  jarvis/jarvis_tools/read_code.py,sha256=5DGmeXTgumAiG0RP1xB4sF4NdmBm5BEGjRRlIBzjGnQ,4002
53
53
  jarvis/jarvis_tools/read_webpage.py,sha256=JCReSXhkDHDkQ606sZYIKG1Itlprjpmu1sSbF-Ed-jI,2478
54
- jarvis/jarvis_tools/registry.py,sha256=mYx7q78BQ9El68c6DPbbeMyRp0Dr9GnZoozjhSViLPo,11859
54
+ jarvis/jarvis_tools/registry.py,sha256=OR-BxSVfI3ER_1rAPMZfLf45E2YpheeS01j8MJ8RGso,11841
55
55
  jarvis/jarvis_tools/search.py,sha256=PLSSNETyajpqDoStCTfkoy-D41IMNudTuVzonMlT6Aw,9225
56
56
  jarvis/jarvis_tools/select_code_files.py,sha256=bjJGwCNw0Ue_8jW60K1gcy1rUgKqoHihicu5SS58WNk,1890
57
- jarvis_ai_assistant-0.1.109.dist-info/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
58
- jarvis_ai_assistant-0.1.109.dist-info/METADATA,sha256=pxorU-ZxDXj6dNQOZfIh285QvRa9hUxXmKGVbgotBTc,14392
59
- jarvis_ai_assistant-0.1.109.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
60
- jarvis_ai_assistant-0.1.109.dist-info/entry_points.txt,sha256=UYj4FYvOH8jJ0GgCJTA_TAmJ3wvikos-hUVbCwt_KOc,480
61
- jarvis_ai_assistant-0.1.109.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
62
- jarvis_ai_assistant-0.1.109.dist-info/RECORD,,
57
+ jarvis_ai_assistant-0.1.110.dist-info/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
58
+ jarvis_ai_assistant-0.1.110.dist-info/METADATA,sha256=0Ogu7gge_EAhRT6p7HXEqeGNb4uAFMa6MedSJQsp0yY,14392
59
+ jarvis_ai_assistant-0.1.110.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
60
+ jarvis_ai_assistant-0.1.110.dist-info/entry_points.txt,sha256=UYj4FYvOH8jJ0GgCJTA_TAmJ3wvikos-hUVbCwt_KOc,480
61
+ jarvis_ai_assistant-0.1.110.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
62
+ jarvis_ai_assistant-0.1.110.dist-info/RECORD,,