jarvis-ai-assistant 0.1.32__py3-none-any.whl → 0.1.33__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- jarvis/__init__.py +1 -1
- jarvis/__pycache__/__init__.cpython-313.pyc +0 -0
- jarvis/__pycache__/main.cpython-313.pyc +0 -0
- jarvis/main.py +9 -3
- jarvis/models/__pycache__/ai8.cpython-313.pyc +0 -0
- jarvis/models/__pycache__/oyi.cpython-313.pyc +0 -0
- jarvis/models/ai8.py +277 -0
- jarvis/models/oyi.py +28 -0
- {jarvis_ai_assistant-0.1.32.dist-info → jarvis_ai_assistant-0.1.33.dist-info}/METADATA +1 -1
- {jarvis_ai_assistant-0.1.32.dist-info → jarvis_ai_assistant-0.1.33.dist-info}/RECORD +14 -12
- {jarvis_ai_assistant-0.1.32.dist-info → jarvis_ai_assistant-0.1.33.dist-info}/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.32.dist-info → jarvis_ai_assistant-0.1.33.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.32.dist-info → jarvis_ai_assistant-0.1.33.dist-info}/entry_points.txt +0 -0
- {jarvis_ai_assistant-0.1.32.dist-info → jarvis_ai_assistant-0.1.33.dist-info}/top_level.txt +0 -0
jarvis/__init__.py
CHANGED
Binary file
|
Binary file
|
jarvis/main.py
CHANGED
@@ -103,19 +103,25 @@ def main():
|
|
103
103
|
parser = argparse.ArgumentParser(description='Jarvis AI Assistant')
|
104
104
|
parser.add_argument('-f', '--files', nargs='*', help='List of files to process')
|
105
105
|
parser.add_argument('--keep-history', action='store_true', help='Keep chat history (do not delete chat session)')
|
106
|
-
parser.add_argument('-m', '--model', default='
|
106
|
+
parser.add_argument('-m', '--model', default='', help='选择模型')
|
107
107
|
args = parser.parse_args()
|
108
108
|
|
109
109
|
load_env_from_file()
|
110
110
|
|
111
|
-
|
111
|
+
model = args.model if args.model else os.getenv('JARVIS_MODEL')
|
112
|
+
|
113
|
+
if not model:
|
114
|
+
PrettyOutput.print("未指定模型,请使用 -m 参数或者设置 JARVIS_MODEL 环境变量", OutputType.ERROR)
|
115
|
+
return 1
|
116
|
+
|
117
|
+
ModelRegistry.get_model_registry().set_global_model(model)
|
112
118
|
|
113
119
|
try:
|
114
120
|
# 获取全局模型实例
|
115
121
|
agent = Agent()
|
116
122
|
|
117
123
|
# 欢迎信息
|
118
|
-
PrettyOutput.print(f"Jarvis 已初始化 - With {
|
124
|
+
PrettyOutput.print(f"Jarvis 已初始化 - With {model}", OutputType.SYSTEM)
|
119
125
|
if args.keep_history:
|
120
126
|
PrettyOutput.print("已启用历史保留模式", OutputType.INFO)
|
121
127
|
|
Binary file
|
Binary file
|
jarvis/models/ai8.py
ADDED
@@ -0,0 +1,277 @@
|
|
1
|
+
import os
|
2
|
+
from typing import Dict, List
|
3
|
+
from jarvis.models.base import BaseModel
|
4
|
+
from jarvis.utils import PrettyOutput, OutputType
|
5
|
+
import requests
|
6
|
+
import json
|
7
|
+
import base64
|
8
|
+
|
9
|
+
class AI8Model(BaseModel):
|
10
|
+
"""AI8 model implementation"""
|
11
|
+
|
12
|
+
model_name = "ai8"
|
13
|
+
BASE_URL = "https://ai8.rcouyi.com"
|
14
|
+
|
15
|
+
def __init__(self):
|
16
|
+
"""Initialize model"""
|
17
|
+
PrettyOutput.section("支持的模型", OutputType.SUCCESS)
|
18
|
+
|
19
|
+
PrettyOutput.print("gpt-3.5-turbo", OutputType.INFO)
|
20
|
+
PrettyOutput.print("gpt-4-turbo", OutputType.INFO)
|
21
|
+
PrettyOutput.print("gpt-4o", OutputType.INFO)
|
22
|
+
PrettyOutput.print("gpt-4o-mini", OutputType.INFO)
|
23
|
+
PrettyOutput.print("o1-mini", OutputType.INFO)
|
24
|
+
PrettyOutput.print("gpt-4-vision-preview", OutputType.INFO)
|
25
|
+
PrettyOutput.print("gpt-4-turbo-preview", OutputType.INFO)
|
26
|
+
PrettyOutput.print("o1-mini-all", OutputType.INFO)
|
27
|
+
PrettyOutput.print("gpt-4o-all", OutputType.INFO)
|
28
|
+
PrettyOutput.print("o1-preview", OutputType.INFO)
|
29
|
+
PrettyOutput.print("claude-3-5-sonnet-20241022", OutputType.INFO)
|
30
|
+
PrettyOutput.print("claude-3-opus-20240229", OutputType.INFO)
|
31
|
+
PrettyOutput.print("claude-3-haiku-20240307", OutputType.INFO)
|
32
|
+
PrettyOutput.print("claude-3-5-sonnet-20240620", OutputType.INFO)
|
33
|
+
PrettyOutput.print("deepseek-chat", OutputType.INFO)
|
34
|
+
PrettyOutput.print("deepseek-coder", OutputType.INFO)
|
35
|
+
PrettyOutput.print("glm-4-flash", OutputType.INFO)
|
36
|
+
PrettyOutput.print("glm-4-air", OutputType.INFO)
|
37
|
+
PrettyOutput.print("glm-4v-flash", OutputType.INFO)
|
38
|
+
PrettyOutput.print("qwen-plus", OutputType.INFO)
|
39
|
+
PrettyOutput.print("qwen-vl-max", OutputType.INFO)
|
40
|
+
PrettyOutput.print("qwen-turbo", OutputType.INFO)
|
41
|
+
PrettyOutput.print("lite", OutputType.INFO)
|
42
|
+
PrettyOutput.print("generalv3.5", OutputType.INFO)
|
43
|
+
PrettyOutput.print("yi-lightning", OutputType.INFO)
|
44
|
+
PrettyOutput.print("yi-vision", OutputType.INFO)
|
45
|
+
PrettyOutput.print("yi-spark", OutputType.INFO)
|
46
|
+
PrettyOutput.print("yi-medium", OutputType.INFO)
|
47
|
+
PrettyOutput.print("Doubao-lite-4k", OutputType.INFO)
|
48
|
+
PrettyOutput.print("Doubao-lite-32k", OutputType.INFO)
|
49
|
+
PrettyOutput.print("Doubao-pro-4k", OutputType.INFO)
|
50
|
+
PrettyOutput.print("Doubao-pro-32k", OutputType.INFO)
|
51
|
+
PrettyOutput.print("step-1-flash", OutputType.INFO)
|
52
|
+
PrettyOutput.print("step-1v-8k", OutputType.INFO)
|
53
|
+
PrettyOutput.print("Baichuan4-Air", OutputType.INFO)
|
54
|
+
PrettyOutput.print("Baichuan4-Turbo", OutputType.INFO)
|
55
|
+
PrettyOutput.print("moonshot-v1-8k", OutputType.INFO)
|
56
|
+
PrettyOutput.print("moonshot-v1-32k", OutputType.INFO)
|
57
|
+
PrettyOutput.print("moonshot-v1-128k", OutputType.INFO)
|
58
|
+
PrettyOutput.print("ERNIE-Speed-128K", OutputType.INFO)
|
59
|
+
PrettyOutput.print("ERNIE-3.5-128K", OutputType.INFO)
|
60
|
+
|
61
|
+
|
62
|
+
PrettyOutput.print("使用AI8_MODEL配置模型", OutputType.SUCCESS)
|
63
|
+
|
64
|
+
self.system_message = ""
|
65
|
+
self.conversation = None
|
66
|
+
self.files = []
|
67
|
+
self.model = os.getenv("AI8_MODEL") or "deepseek-chat"
|
68
|
+
self.token = os.getenv("AI8_API_KEY")
|
69
|
+
if not all([self.model, self.token]):
|
70
|
+
raise Exception("AI8_MODEL or AI8_API_KEY is not set")
|
71
|
+
|
72
|
+
def create_conversation(self) -> bool:
|
73
|
+
"""Create a new conversation"""
|
74
|
+
try:
|
75
|
+
headers = {
|
76
|
+
'Authorization': self.token,
|
77
|
+
'Content-Type': 'application/json',
|
78
|
+
'Accept': 'application/json, text/plain, */*',
|
79
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
|
80
|
+
'X-APP-VERSION': '2.2.2',
|
81
|
+
'Origin': self.BASE_URL,
|
82
|
+
'Referer': f'{self.BASE_URL}/chat?_userMenuKey=chat'
|
83
|
+
}
|
84
|
+
|
85
|
+
# 1. 创建会话
|
86
|
+
response = requests.post(
|
87
|
+
f"{self.BASE_URL}/api/chat/session",
|
88
|
+
headers=headers
|
89
|
+
)
|
90
|
+
|
91
|
+
if response.status_code != 200:
|
92
|
+
PrettyOutput.print(f"创建会话失败: {response.status_code}", OutputType.ERROR)
|
93
|
+
return False
|
94
|
+
|
95
|
+
data = response.json()
|
96
|
+
if data['code'] != 0:
|
97
|
+
PrettyOutput.print(f"创建会话失败: {data.get('msg', '未知错误')}", OutputType.ERROR)
|
98
|
+
return False
|
99
|
+
|
100
|
+
self.conversation = data['data']
|
101
|
+
PrettyOutput.print(f"创建会话成功: {data['data']['id']}", OutputType.SUCCESS)
|
102
|
+
|
103
|
+
# 2. 更新会话设置
|
104
|
+
session_data = {
|
105
|
+
**self.conversation,
|
106
|
+
"contextCount": 1024,
|
107
|
+
"prompt": self.system_message,
|
108
|
+
"plugins": ["tavily_search"],
|
109
|
+
"localPlugins": None,
|
110
|
+
"useAppId": 0
|
111
|
+
}
|
112
|
+
|
113
|
+
response = requests.put(
|
114
|
+
f"{self.BASE_URL}/api/chat/session/{self.conversation['id']}",
|
115
|
+
headers=headers,
|
116
|
+
json=session_data
|
117
|
+
)
|
118
|
+
|
119
|
+
if response.status_code == 200:
|
120
|
+
data = response.json()
|
121
|
+
if data['code'] == 0:
|
122
|
+
self.conversation = data['data']
|
123
|
+
PrettyOutput.print("会话设置更新成功", OutputType.SUCCESS)
|
124
|
+
return True
|
125
|
+
else:
|
126
|
+
PrettyOutput.print(f"更新会话设置失败: {data.get('msg', '未知错误')}", OutputType.ERROR)
|
127
|
+
return False
|
128
|
+
else:
|
129
|
+
PrettyOutput.print(f"更新会话设置失败: {response.status_code}", OutputType.ERROR)
|
130
|
+
return False
|
131
|
+
|
132
|
+
except Exception as e:
|
133
|
+
PrettyOutput.print(f"创建会话异常: {str(e)}", OutputType.ERROR)
|
134
|
+
return False
|
135
|
+
|
136
|
+
def upload_files(self, file_list: List[str]) -> List[Dict]:
|
137
|
+
for file_path in file_list:
|
138
|
+
name = os.path.basename(file_path)
|
139
|
+
with open(file_path, 'rb') as f:
|
140
|
+
file_data = f.read()
|
141
|
+
base64_data = base64.b64encode(file_data).decode('utf-8')
|
142
|
+
self.files.append({
|
143
|
+
"name": name,
|
144
|
+
"data": f"data:image/png;base64,{base64_data}"
|
145
|
+
})
|
146
|
+
PrettyOutput.print(f"文件 {name} 已准备好发送", OutputType.SUCCESS)
|
147
|
+
|
148
|
+
def set_system_message(self, message: str):
|
149
|
+
"""Set system message"""
|
150
|
+
self.system_message = message
|
151
|
+
|
152
|
+
def chat(self, message: str) -> str:
|
153
|
+
"""Execute chat with the model
|
154
|
+
|
155
|
+
Args:
|
156
|
+
message: User input message
|
157
|
+
|
158
|
+
Returns:
|
159
|
+
str: Model response
|
160
|
+
"""
|
161
|
+
try:
|
162
|
+
# 确保有会话ID
|
163
|
+
if not self.conversation:
|
164
|
+
if not self.create_conversation():
|
165
|
+
raise Exception("Failed to create conversation")
|
166
|
+
|
167
|
+
headers = {
|
168
|
+
'Authorization': self.token,
|
169
|
+
'Content-Type': 'application/json',
|
170
|
+
'Accept': 'text/event-stream',
|
171
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
|
172
|
+
'X-APP-VERSION': '2.2.2',
|
173
|
+
'Origin': self.BASE_URL,
|
174
|
+
'Referer': f'{self.BASE_URL}/chat?_userMenuKey=chat'
|
175
|
+
}
|
176
|
+
|
177
|
+
payload = {
|
178
|
+
"text": message,
|
179
|
+
"sessionId": self.conversation['id'],
|
180
|
+
"files": []
|
181
|
+
}
|
182
|
+
|
183
|
+
# 如果有文件需要发送
|
184
|
+
if self.files:
|
185
|
+
for file_data in self.files:
|
186
|
+
payload["files"].append({
|
187
|
+
"name": file_data["name"],
|
188
|
+
"data": file_data["data"]
|
189
|
+
})
|
190
|
+
self.files = [] # 清空已使用的文件
|
191
|
+
|
192
|
+
response = requests.post(
|
193
|
+
f"{self.BASE_URL}/api/chat/completions",
|
194
|
+
headers=headers,
|
195
|
+
json=payload,
|
196
|
+
stream=True
|
197
|
+
)
|
198
|
+
|
199
|
+
if response.status_code != 200:
|
200
|
+
error_msg = f"聊天请求失败: {response.status_code}"
|
201
|
+
PrettyOutput.print(error_msg, OutputType.ERROR)
|
202
|
+
raise Exception(error_msg)
|
203
|
+
|
204
|
+
# 处理流式响应
|
205
|
+
full_response = ""
|
206
|
+
for line in response.iter_lines():
|
207
|
+
if line:
|
208
|
+
line = line.decode('utf-8')
|
209
|
+
if line.startswith('data: '):
|
210
|
+
try:
|
211
|
+
data = json.loads(line[6:])
|
212
|
+
if data.get('type') == 'string':
|
213
|
+
chunk = data.get('data', '')
|
214
|
+
if chunk:
|
215
|
+
full_response += chunk
|
216
|
+
PrettyOutput.print_stream(chunk)
|
217
|
+
|
218
|
+
except json.JSONDecodeError:
|
219
|
+
continue
|
220
|
+
|
221
|
+
PrettyOutput.print_stream_end()
|
222
|
+
|
223
|
+
return full_response
|
224
|
+
|
225
|
+
except Exception as e:
|
226
|
+
PrettyOutput.print(f"聊天异常: {str(e)}", OutputType.ERROR)
|
227
|
+
raise e
|
228
|
+
|
229
|
+
def name(self) -> str:
|
230
|
+
"""Return model name"""
|
231
|
+
return self.model_name
|
232
|
+
|
233
|
+
def reset(self):
|
234
|
+
"""Reset model state"""
|
235
|
+
self.conversation = None
|
236
|
+
self.files = [] # 清空文件列表
|
237
|
+
|
238
|
+
def delete_chat(self) -> bool:
|
239
|
+
"""Delete current chat session"""
|
240
|
+
try:
|
241
|
+
if not self.conversation:
|
242
|
+
return True
|
243
|
+
|
244
|
+
headers = {
|
245
|
+
'Authorization': self.token,
|
246
|
+
'Content-Type': 'application/json',
|
247
|
+
'Accept': 'application/json, text/plain, */*',
|
248
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
|
249
|
+
'X-APP-VERSION': '2.2.2',
|
250
|
+
'Origin': self.BASE_URL,
|
251
|
+
'Referer': f'{self.BASE_URL}/chat?_userMenuKey=chat'
|
252
|
+
}
|
253
|
+
|
254
|
+
response = requests.delete(
|
255
|
+
f"{self.BASE_URL}/api/chat/session/{self.conversation['id']}",
|
256
|
+
headers=headers
|
257
|
+
)
|
258
|
+
|
259
|
+
if response.status_code == 200:
|
260
|
+
data = response.json()
|
261
|
+
if data['code'] == 0:
|
262
|
+
PrettyOutput.print("会话删除成功", OutputType.SUCCESS)
|
263
|
+
self.reset()
|
264
|
+
return True
|
265
|
+
else:
|
266
|
+
error_msg = f"删除会话失败: {data.get('msg', '未知错误')}"
|
267
|
+
PrettyOutput.print(error_msg, OutputType.ERROR)
|
268
|
+
return False
|
269
|
+
else:
|
270
|
+
error_msg = f"删除会话请求失败: {response.status_code}"
|
271
|
+
PrettyOutput.print(error_msg, OutputType.ERROR)
|
272
|
+
return False
|
273
|
+
|
274
|
+
except Exception as e:
|
275
|
+
PrettyOutput.print(f"删除会话异常: {str(e)}", OutputType.ERROR)
|
276
|
+
return False
|
277
|
+
|
jarvis/models/oyi.py
CHANGED
@@ -14,6 +14,34 @@ class OyiModel(BaseModel):
|
|
14
14
|
|
15
15
|
def __init__(self):
|
16
16
|
"""Initialize model"""
|
17
|
+
PrettyOutput.section("支持的模型", OutputType.SUCCESS)
|
18
|
+
PrettyOutput.print("gpt-4o-mini", OutputType.INFO)
|
19
|
+
PrettyOutput.print("gpt-3.5-turbo", OutputType.INFO)
|
20
|
+
PrettyOutput.print("gpt-4o", OutputType.INFO)
|
21
|
+
PrettyOutput.print("gpt-4o-2024-11-20", OutputType.INFO)
|
22
|
+
PrettyOutput.print("o1-mini", OutputType.INFO)
|
23
|
+
PrettyOutput.print("o1-mini-2024-09-12", OutputType.INFO)
|
24
|
+
PrettyOutput.print("gpt-4o-all", OutputType.INFO)
|
25
|
+
PrettyOutput.print("claude-3-5-sonnet-20240620", OutputType.INFO)
|
26
|
+
PrettyOutput.print("claude-3-opus-20240229", OutputType.INFO)
|
27
|
+
PrettyOutput.print("deepseek-chat", OutputType.INFO)
|
28
|
+
PrettyOutput.print("deepseek-coder", OutputType.INFO)
|
29
|
+
PrettyOutput.print("glm-4-flash", OutputType.INFO)
|
30
|
+
PrettyOutput.print("glm-4-air", OutputType.INFO)
|
31
|
+
PrettyOutput.print("qwen-plus", OutputType.INFO)
|
32
|
+
PrettyOutput.print("qwen-turbo", OutputType.INFO)
|
33
|
+
PrettyOutput.print("Doubao-lite-4k", OutputType.INFO)
|
34
|
+
PrettyOutput.print("Doubao-pro-4k", OutputType.INFO)
|
35
|
+
PrettyOutput.print("yi-lightning", OutputType.INFO)
|
36
|
+
PrettyOutput.print("step-1-flash", OutputType.INFO)
|
37
|
+
PrettyOutput.print("moonshot-v1-8k", OutputType.INFO)
|
38
|
+
PrettyOutput.print("lite", OutputType.INFO)
|
39
|
+
PrettyOutput.print("generalv3.5", OutputType.INFO)
|
40
|
+
PrettyOutput.print("gemini-pro", OutputType.INFO)
|
41
|
+
PrettyOutput.print("llama3-70b-8192", OutputType.INFO)
|
42
|
+
PrettyOutput.print("使用OYI_MODEL配置模型", OutputType.SUCCESS)
|
43
|
+
|
44
|
+
|
17
45
|
self.messages = []
|
18
46
|
self.system_message = ""
|
19
47
|
self.conversation = None
|
@@ -1,25 +1,27 @@
|
|
1
|
-
jarvis/__init__.py,sha256=
|
1
|
+
jarvis/__init__.py,sha256=K3XebXHZE6kSmgNBnsXUeI2ljKwqHTsmFycr1tKnbP4,50
|
2
2
|
jarvis/agent.py,sha256=QR5nwej7LKYg2s9q7lVG1R7C62t8OcJz0PTuIFncDB8,11805
|
3
|
-
jarvis/main.py,sha256=
|
3
|
+
jarvis/main.py,sha256=FOfc2v7iX9my5USyf4oxcGzyJe4AH4TwIEQk2WdNWrI,5768
|
4
4
|
jarvis/utils.py,sha256=JlkuC9RtspXH2VWDmj9nR0vnb8ie1gIsKc4vC7WRco8,7321
|
5
|
-
jarvis/__pycache__/__init__.cpython-313.pyc,sha256=
|
5
|
+
jarvis/__pycache__/__init__.cpython-313.pyc,sha256=WcNiIdZTVZKVlyxKWve-Trc49FuiqOMMe4g_fMxKlDA,209
|
6
6
|
jarvis/__pycache__/agent.cpython-313.pyc,sha256=cgbX5L0T16_ZBYw1K3wHGQ8UT7khEn5V7AlOGFkoN68,15200
|
7
|
-
jarvis/__pycache__/main.cpython-313.pyc,sha256=
|
7
|
+
jarvis/__pycache__/main.cpython-313.pyc,sha256=KKVD1bs_4MDOdq2f04rsJpI8Osp959Fo5KOsUUBvrDM,7985
|
8
8
|
jarvis/__pycache__/models.cpython-313.pyc,sha256=uWuRIjGrY4YDB3dGW5PGDLWaS03et8g11O725TjY_eU,5960
|
9
9
|
jarvis/__pycache__/tools.cpython-313.pyc,sha256=lAD4LrnnWzNZQmHXGfZ_2l7oskOpr2_2OC-gdFhxQY8,33933
|
10
10
|
jarvis/__pycache__/utils.cpython-313.pyc,sha256=eXXM-V-2ax7qBNxktdUrEIwhAXPQHAlI7gLGewlKOj4,10276
|
11
11
|
jarvis/__pycache__/zte_llm.cpython-313.pyc,sha256=kMm9IGundGmOPqjsgrm9oIaWLDagYGCPRAaE3ipkc-0,5662
|
12
12
|
jarvis/models/__init__.py,sha256=Lqb1NWFIfq7HlZIsJ7eUGyGjdYyaJqOoOf7cG_yo73A,57
|
13
|
+
jarvis/models/ai8.py,sha256=mXa_fE_tHztK9SP3gUOFoSl6A-ceUkNsTVC6flEXlow,11457
|
13
14
|
jarvis/models/base.py,sha256=dNkYPg9ISrHGEpmQLN9kxCDU-kqJAJlm_owdDC302Dk,1132
|
14
15
|
jarvis/models/kimi.py,sha256=iI8mBzUxiyxa_bzDG9uwE3BZtreEUt0EJOIP_l2rSDM,16788
|
15
16
|
jarvis/models/openai.py,sha256=aFpRH6K0YG6suCRGlJLw2JzLh2Ftpn6AYhdnKtMQQlY,3940
|
16
|
-
jarvis/models/oyi.py,sha256=
|
17
|
+
jarvis/models/oyi.py,sha256=DzRKD0jVYvU_O5dXKlOmy83j8tPIcGGHVJcV90QEmxs,12219
|
17
18
|
jarvis/models/registry.py,sha256=ecIo3a0G-pRPw4eg77ozzbGVh6vy93DHF8oAnU2g51w,7511
|
18
19
|
jarvis/models/__pycache__/__init__.cpython-313.pyc,sha256=hD4Uui0EPCTfoPOasTYzIi46Kv_q7OI8m-Lck-nX4zM,220
|
20
|
+
jarvis/models/__pycache__/ai8.cpython-313.pyc,sha256=xexoy9Qgy0BYAfKY85Yy3oj2B-Sw9q2TXxeTmeK6tvQ,14775
|
19
21
|
jarvis/models/__pycache__/base.cpython-313.pyc,sha256=9VvOXFPYOrB-2pO2py7dWOVbimODnXQJFLlFbyF7-LI,2207
|
20
22
|
jarvis/models/__pycache__/kimi.cpython-313.pyc,sha256=FGtHoTv747oNY4Lqnwf5BkGYKnevHOlIEDIlbsY7va0,20893
|
21
23
|
jarvis/models/__pycache__/openai.cpython-313.pyc,sha256=CU3KaUA0XcOK55sexF7OxfQ6_jdofABsufmFxm0T3mk,6004
|
22
|
-
jarvis/models/__pycache__/oyi.cpython-313.pyc,sha256=
|
24
|
+
jarvis/models/__pycache__/oyi.cpython-313.pyc,sha256=AzChNnqApFVNNXVi77uw7GwL7EfWnXp0T8G7_VUPpyY,14037
|
23
25
|
jarvis/models/__pycache__/registry.cpython-313.pyc,sha256=jUZUyHyfzeQtjCdk2NCZGTsTUsvKyIlnZVDzZY1gLuU,9985
|
24
26
|
jarvis/tools/__init__.py,sha256=Kj1bKj34lwRDKMKHLOrLyQElf2lHbqA2tDgP359eaDo,71
|
25
27
|
jarvis/tools/base.py,sha256=EGRGbdfbLXDLwtyoWdvp9rlxNX7bzc20t0Vc2VkwIEY,652
|
@@ -47,9 +49,9 @@ jarvis/tools/__pycache__/user_confirmation.cpython-313.pyc,sha256=wK3Ev10lHSUSRv
|
|
47
49
|
jarvis/tools/__pycache__/user_input.cpython-313.pyc,sha256=JjTFOhObKsKF4Pn8KBRuKfV1_Ssj083fjU7Mfc_5z7c,2531
|
48
50
|
jarvis/tools/__pycache__/user_interaction.cpython-313.pyc,sha256=RuVZ-pmiPBDywY3efgXSfohMAciC1avMGPmBK5qlnew,3305
|
49
51
|
jarvis/tools/__pycache__/webpage.cpython-313.pyc,sha256=BjzSfnNzsKCrLETCcWjt32lNDLzwnjqcVGg4JfWd9OM,3008
|
50
|
-
jarvis_ai_assistant-0.1.
|
51
|
-
jarvis_ai_assistant-0.1.
|
52
|
-
jarvis_ai_assistant-0.1.
|
53
|
-
jarvis_ai_assistant-0.1.
|
54
|
-
jarvis_ai_assistant-0.1.
|
55
|
-
jarvis_ai_assistant-0.1.
|
52
|
+
jarvis_ai_assistant-0.1.33.dist-info/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
|
53
|
+
jarvis_ai_assistant-0.1.33.dist-info/METADATA,sha256=WLKoychbzXs8Mom2ctlWUnrXnxTMn5BFzKBzf2ME3SE,9765
|
54
|
+
jarvis_ai_assistant-0.1.33.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
55
|
+
jarvis_ai_assistant-0.1.33.dist-info/entry_points.txt,sha256=iKu7OMfew9dtfGhW71gIMTg4wvafuPqKb4wyQOnMAGU,44
|
56
|
+
jarvis_ai_assistant-0.1.33.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
|
57
|
+
jarvis_ai_assistant-0.1.33.dist-info/RECORD,,
|
File without changes
|
File without changes
|
{jarvis_ai_assistant-0.1.32.dist-info → jarvis_ai_assistant-0.1.33.dist-info}/entry_points.txt
RENAMED
File without changes
|
File without changes
|