jarvis-ai-assistant 0.1.182__py3-none-any.whl → 0.1.183__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

jarvis/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """Jarvis AI Assistant"""
3
3
 
4
- __version__ = "0.1.182"
4
+ __version__ = "0.1.183"
@@ -315,7 +315,7 @@ class KimiModel(BasePlatform):
315
315
 
316
316
  def name(self) -> str:
317
317
  """Model name"""
318
- return "kimi"
318
+ return self.model_name
319
319
 
320
320
  def support_web(self) -> bool:
321
321
  """Kimi平台支持web功能"""
@@ -19,7 +19,6 @@ REQUIRED_METHODS = [
19
19
  ('set_system_message', ['message']),
20
20
  ('set_model_name', ['model_name']),
21
21
  ('get_model_list', []),
22
- ('set_suppress_output', ['suppress']),
23
22
  ('upload_files', ['file_list']),
24
23
  ]
25
24
 
@@ -0,0 +1,428 @@
1
+ # -*- coding: utf-8 -*-
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Generator, List, Tuple
5
+ import uuid
6
+
7
+ import requests
8
+
9
+ from jarvis.jarvis_platform.base import BasePlatform
10
+ from jarvis.jarvis_utils.output import PrettyOutput, OutputType
11
+ from jarvis.jarvis_utils.utils import while_success
12
+
13
+
14
+ class TongyiPlatform(BasePlatform):
15
+ """Tongyi platform implementation"""
16
+
17
+ platform_name = "tongyi"
18
+
19
+ def __init__(self):
20
+ """Initialize Tongyi platform"""
21
+ super().__init__()
22
+ self.session_id = ""
23
+ self.cookies = os.getenv("TONGYI_COOKIES", "")
24
+ self.request_id = ""
25
+ self.msg_id = ""
26
+ self.model_name = ""
27
+ self.uploaded_file_info = []
28
+
29
+
30
+ def _get_base_headers(self):
31
+ return {
32
+ "Host": "api.tongyi.com",
33
+ "Connection": "keep-alive",
34
+ "X-Platform": "pc_tongyi",
35
+ "sec-ch-ua-platform": "Windows",
36
+ "sec-ch-ua": '"Chromium";v="136", "Microsoft Edge";v="136", "Not.A/Brand";v="99"',
37
+ "sec-ch-ua-mobile": "?0",
38
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0",
39
+ "accept": "application/json, text/plain, */*",
40
+ "DNT": "1",
41
+ "Content-Type": "application/json",
42
+ "Origin": "https://www.tongyi.com",
43
+ "Sec-Fetch-Site": "same-site",
44
+ "Sec-Fetch-Mode": "cors",
45
+ "Sec-Fetch-Dest": "empty",
46
+ "Referer": "https://www.tongyi.com/qianwen",
47
+ "Accept-Encoding": "gzip, deflate, br, zstd",
48
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
49
+ "Cookie": self.cookies
50
+ }
51
+
52
+ def set_model_name(self, model_name: str):
53
+ """Set model name
54
+
55
+ Args:
56
+ model_name: Model name to use
57
+ """
58
+ self.model_name = model_name
59
+
60
+ def _generate_request_id(self):
61
+ self.request_id = str(uuid.uuid4()).replace("-", "")
62
+
63
+ def chat(self, message: str) -> Generator[str, None, None]:
64
+ if not self.request_id:
65
+ self._generate_request_id()
66
+ url = "https://api.tongyi.com/dialog/conversation"
67
+ headers = self._get_base_headers()
68
+
69
+ headers["accept"] = "text/event-stream"
70
+
71
+ # Prepare contents array with message
72
+ contents = [{
73
+ "content": message,
74
+ "contentType": "text",
75
+ "role": "user",
76
+ "ext": {
77
+ "searchType": "",
78
+ "pptGenerate": False,
79
+ "deepThink": False,
80
+ "deepResearch": False
81
+ }
82
+ }]
83
+
84
+ # Add uploaded files to contents if available and clear after use
85
+ if self.uploaded_file_info:
86
+ for file_info in self.uploaded_file_info:
87
+ contents.append({
88
+ "role": "user",
89
+ "contentType": "file",
90
+ "content": file_info["url"],
91
+ "ext": {
92
+ "fileSize": file_info.get("fileSize", 0),
93
+ "batchId": file_info.get("batchId", ""),
94
+ "docId": file_info.get("docId", "")
95
+ }
96
+ })
97
+ # Clear uploaded file info after using it
98
+ self.uploaded_file_info = []
99
+
100
+ payload = {
101
+ "model": "",
102
+ "action": "next",
103
+ "mode": "chat",
104
+ "userAction": "new_top",
105
+ "requestId": self.request_id,
106
+ "sessionId": self.session_id,
107
+ "sessionType": "text_chat",
108
+ "parentMsgId": self.msg_id,
109
+ "params": {
110
+ "agentId": "",
111
+ "searchType": "",
112
+ "pptGenerate": False,
113
+ "bizScene": "code_chat" if self.model_name == "Code-Chat" else "",
114
+ "bizSceneInfo": {},
115
+ "specifiedModel": "",
116
+ "deepThink": True if self.model_name == "Thinking" else False,
117
+ "deepResearch": False,
118
+ "fileUploadBatchId": self.uploaded_file_info[0]["batchId"] if self.uploaded_file_info else ""
119
+ },
120
+ "contents": contents
121
+ }
122
+
123
+ try:
124
+ response = while_success(lambda: requests.post(url, headers=headers, json=payload, stream=True), sleep_time=5)
125
+ if response.status_code != 200:
126
+ raise Exception(f"HTTP {response.status_code}: {response.text}")
127
+ msg_id = ""
128
+ session_id = ""
129
+ thinking_content = ""
130
+ text_content = ""
131
+ in_thinking = False
132
+ for line in response.iter_lines():
133
+ if not line:
134
+ continue
135
+ line_str = line.decode('utf-8')
136
+ if not line_str.startswith("data: "):
137
+ continue
138
+
139
+ try:
140
+ data = json.loads(line_str[6:])
141
+ # 记录消息ID和会话ID
142
+ if "msgId" in data:
143
+ msg_id = data["msgId"]
144
+ if "sessionId" in data:
145
+ session_id = data["sessionId"]
146
+
147
+ if "contents" in data and len(data["contents"]) > 0:
148
+ for content in data["contents"]:
149
+ if content.get("contentType") == "think":
150
+ if not in_thinking:
151
+ yield "<think>\n\n"
152
+ in_thinking = True
153
+ if content.get("incremental"):
154
+ tmp_content = json.loads(content.get("content"))["content"]
155
+ thinking_content += tmp_content
156
+ yield tmp_content
157
+ else:
158
+ tmp_content = json.loads(content.get("content"))["content"]
159
+ if len(thinking_content) < len(tmp_content):
160
+ yield tmp_content[len(thinking_content):]
161
+ thinking_content = tmp_content
162
+ else:
163
+ # thinking_content = "aaa</thi"
164
+ # tmp_content = "aaa"
165
+ # 应该yield nk>
166
+ # print("\n")
167
+ # print(len(thinking_content))
168
+ # print(len(tmp_content))
169
+ # print("--------------------------------")
170
+ # print(thinking_content)
171
+ # print("--------------------------------")
172
+ # print(tmp_content)
173
+ # print("--------------------------------")
174
+ yield "\r\n</think>\n"[len(thinking_content)-len(tmp_content):]
175
+ thinking_content = tmp_content
176
+ in_thinking = False
177
+ elif content.get("contentType") == "text":
178
+ if in_thinking:
179
+ continue
180
+ if content.get("incremental"):
181
+ tmp_content = content.get("content")
182
+ text_content += tmp_content
183
+ yield tmp_content
184
+ else:
185
+ tmp_content = content.get("content")
186
+ if len(text_content) < len(tmp_content):
187
+ yield tmp_content[len(text_content):]
188
+ text_content = tmp_content
189
+
190
+
191
+ except json.JSONDecodeError:
192
+ continue
193
+
194
+ self.msg_id = msg_id
195
+ self.session_id = session_id
196
+
197
+ return None
198
+
199
+ except Exception as e:
200
+ raise Exception(f"Chat failed: {str(e)}")
201
+
202
+ def _get_upload_token(self) -> Dict[str, Any]:
203
+ """Get upload token from Tongyi API
204
+
205
+ Returns:
206
+ Dict[str, Any]: Upload token information including accessId, bucketName, etc.
207
+ """
208
+ url = "https://api.tongyi.com/dialog/uploadToken"
209
+ headers = self._get_base_headers()
210
+ payload = {}
211
+
212
+ try:
213
+ response = while_success(lambda: requests.post(url, headers=headers, json=payload), sleep_time=5)
214
+ if response.status_code != 200:
215
+ raise Exception(f"HTTP {response.status_code}: {response.text}")
216
+
217
+ result = response.json()
218
+ if not result.get("success"):
219
+ raise Exception(f"Failed to get upload token: {result.get('errorMsg')}")
220
+
221
+ return result.get("data", {})
222
+
223
+ except Exception as e:
224
+ raise Exception(f"Failed to get upload token: {str(e)}")
225
+
226
+
227
+ def upload_files(self, file_list: List[str]) -> bool:
228
+ """Upload files to Tongyi platform and get download links
229
+
230
+ Args:
231
+ file_list: List of file paths to upload
232
+
233
+ Returns:
234
+ List[Dict[str, str]]: List of dictionaries containing file info and download URLs
235
+ """
236
+ try:
237
+ upload_token = self._get_upload_token()
238
+ uploaded_files = []
239
+
240
+ for file_path in file_list:
241
+ if not os.path.exists(file_path):
242
+ PrettyOutput.print(f"File not found: {file_path}", OutputType.ERROR)
243
+ return False
244
+
245
+ # Get file name and content type
246
+ file_name = os.path.basename(file_path)
247
+ content_type = self._get_content_type(file_path)
248
+
249
+ # Prepare form data
250
+ form_data = {
251
+ 'OSSAccessKeyId': upload_token['accessId'],
252
+ 'policy': upload_token['policy'],
253
+ 'signature': upload_token['signature'],
254
+ 'key': f"{upload_token['dir']}{file_name}",
255
+ 'dir': upload_token['dir'],
256
+ 'success_action_status': '200'
257
+ }
258
+
259
+ # Prepare files
260
+ files = {
261
+ 'file': (file_name, open(file_path, 'rb'), content_type)
262
+ }
263
+
264
+ # Upload file
265
+ response = requests.post(
266
+ upload_token['host'],
267
+ data=form_data,
268
+ files=files
269
+ )
270
+
271
+ if response.status_code != 200:
272
+ PrettyOutput.print(f"Failed to upload {file_name}: HTTP {response.status_code}", OutputType.ERROR)
273
+ return False
274
+
275
+ uploaded_files.append({
276
+ 'fileKey': file_name,
277
+ 'fileType': 'file',
278
+ 'dir': upload_token['dir']
279
+ })
280
+
281
+ # Get download links for uploaded files
282
+ url = "https://api.tongyi.com/dialog/downloadLink/batch"
283
+ headers = self._get_base_headers()
284
+ payload = {
285
+ "fileKeys": [f['fileKey'] for f in uploaded_files],
286
+ "fileType": "file",
287
+ "dir": upload_token['dir']
288
+ }
289
+
290
+ response = requests.post(url, headers=headers, json=payload)
291
+ if response.status_code != 200:
292
+ PrettyOutput.print(f"Failed to get download links: HTTP {response.status_code}", OutputType.ERROR)
293
+ return False
294
+
295
+ result = response.json()
296
+ if not result.get("success"):
297
+ PrettyOutput.print(f"Failed to get download links: {result.get('errorMsg')}", OutputType.ERROR)
298
+ return False
299
+
300
+ # Add files to chat
301
+ self.uploaded_file_info = result.get("data", {}).get("results", [])
302
+ for file_info in self.uploaded_file_info:
303
+ add_url = "https://api.tongyi.com/assistant/api/chat/file/add"
304
+ add_payload = {
305
+ "workSource": "chat",
306
+ "terminal": "web",
307
+ "workCode": "0",
308
+ "channel": "home",
309
+ "workType": "file",
310
+ "module": "uploadhistory",
311
+ "workName": file_info["fileKey"],
312
+ "workId": file_info["docId"],
313
+ "workResourcePath": file_info["url"],
314
+ "sessionId": "",
315
+ "batchId": str(uuid.uuid4()).replace('-', '')[:32], # Generate random batchId
316
+ "fileSize": os.path.getsize(file_path)
317
+ }
318
+
319
+ add_response = requests.post(add_url, headers=headers, json=add_payload)
320
+ if add_response.status_code != 200:
321
+ PrettyOutput.print(f"Failed to add file to chat: HTTP {add_response.status_code}", OutputType.ERROR)
322
+ continue
323
+
324
+ add_result = add_response.json()
325
+ if not add_result.get("success"):
326
+ PrettyOutput.print(f"Failed to add file to chat: {add_result.get('errorMsg')}", OutputType.ERROR)
327
+ continue
328
+
329
+ file_info.update(add_result.get("data", {}))
330
+
331
+ return True
332
+
333
+ except Exception as e:
334
+ PrettyOutput.print(f"Error uploading files: {str(e)}", OutputType.ERROR)
335
+ return False
336
+
337
+ def _get_content_type(self, file_path: str) -> str:
338
+ """Get content type for file
339
+
340
+ Args:
341
+ file_path: Path to file
342
+
343
+ Returns:
344
+ str: Content type
345
+ """
346
+ ext = os.path.splitext(file_path)[1].lower()
347
+ content_types = {
348
+ '.txt': 'text/plain',
349
+ '.md': 'text/markdown',
350
+ '.doc': 'application/msword',
351
+ '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
352
+ '.xls': 'application/vnd.ms-excel',
353
+ '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
354
+ '.pdf': 'application/pdf',
355
+ '.png': 'image/png',
356
+ '.jpg': 'image/jpeg',
357
+ '.jpeg': 'image/jpeg',
358
+ '.gif': 'image/gif',
359
+ '.mp4': 'video/mp4',
360
+ '.mp3': 'audio/mpeg',
361
+ '.wav': 'audio/wav'
362
+ }
363
+ return content_types.get(ext, 'application/octet-stream')
364
+
365
+ def name(self) -> str:
366
+ """Get platform name
367
+
368
+ Returns:
369
+ str: Platform name
370
+ """
371
+ return self.model_name
372
+
373
+ def delete_chat(self) -> bool:
374
+ """Delete chat history
375
+
376
+ Returns:
377
+ bool: True if deletion successful, False otherwise
378
+ """
379
+ if not self.session_id:
380
+ return True
381
+
382
+ url = "https://api.tongyi.com/dialog/session/delete"
383
+ headers = self._get_base_headers()
384
+ payload = {
385
+ "sessionId": self.session_id
386
+ }
387
+
388
+ try:
389
+ response = while_success(lambda: requests.post(url, headers=headers, json=payload), sleep_time=5)
390
+ if response.status_code != 200:
391
+ PrettyOutput.print(f"Failed to delete chat: HTTP {response.status_code}", OutputType.ERROR)
392
+ return False
393
+ self.request_id = ""
394
+ self.session_id = ""
395
+ self.msg_id = ""
396
+ return True
397
+ except Exception as e:
398
+ PrettyOutput.print(f"Error deleting chat: {str(e)}", OutputType.ERROR)
399
+ return False
400
+
401
+ def set_system_message(self, message: str):
402
+ """Set system message
403
+
404
+ Args:
405
+ message: System message to set
406
+ """
407
+ self.system_message = message
408
+
409
+ def get_model_list(self) -> List[Tuple[str, str]]:
410
+ """Get available model list
411
+
412
+ Returns:
413
+ List[Tuple[str, str]]: List of (model_id, model_name) tuples
414
+ """
415
+ return [
416
+ ("Normal", "Normal"),
417
+ ("Thinking", "Thinking"),
418
+ ("Deep-Research", "Deep-Research"),
419
+ ("Code-Chat", "Code-Chat"),
420
+ ]
421
+
422
+ def support_web(self) -> bool:
423
+ """Check if platform supports web functionality
424
+
425
+ Returns:
426
+ bool: True if web is supported, False otherwise
427
+ """
428
+ return True
@@ -430,6 +430,8 @@ class YuanbaoPlatform(BasePlatform):
430
430
  if hasattr(response, 'text'):
431
431
  error_msg += f", 响应: {response.text}"
432
432
  raise Exception(error_msg)
433
+
434
+ in_thinking = False
433
435
 
434
436
  # 处理SSE流响应
435
437
  for line in response.iter_lines():
@@ -446,12 +448,18 @@ class YuanbaoPlatform(BasePlatform):
446
448
 
447
449
  # 处理文本类型的消息
448
450
  if data.get("type") == "text":
451
+ if in_thinking:
452
+ yield "</think>\n"
453
+ in_thinking = False
449
454
  msg = data.get("msg", "")
450
455
  if msg:
451
456
  yield msg
452
457
 
453
458
  # 处理思考中的消息
454
459
  elif data.get("type") == "think":
460
+ if not in_thinking:
461
+ yield "<think>\n"
462
+ in_thinking = True
455
463
  think_content = data.get("content", "")
456
464
  if think_content:
457
465
  yield think_content
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.182
3
+ Version: 0.1.183
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -153,6 +153,35 @@ Kimi API Key获取方式:
153
153
  删除Bearer前缀,剩下的内容就是Kimi API Key。
154
154
 
155
155
 
156
+ #### 通义千问
157
+ ```yaml
158
+ JARVIS_PLATFORM: tongyi
159
+ JARVIS_MODEL: Normal # 可选模型:Normal, Thinking, Deep-Research, Code-Chat
160
+ JARVIS_THINKING_PLATFORM: tongyi
161
+ JARVIS_THINKING_MODEL: Thinking
162
+
163
+ ENV:
164
+ TONGYI_COOKIES: <通义千问cookies>
165
+ ```
166
+
167
+ 通义千问cookies获取方式:
168
+
169
+ ![通义千问cookies获取方式](docs/images/tongyi.png)
170
+
171
+ 1. 登录[通义千问](https://www.tongyi.com/qianwen)
172
+ 2. 打开浏览器开发者工具(F12)
173
+ 3. 在Network标签页中找到任意请求
174
+ 4. 在请求头中找到Cookie字段,复制其值
175
+
176
+ 配置说明:
177
+ 1. `TONGYI_COOKIES`: 必填,用于身份验证
178
+ 2. 支持的模型:
179
+ - `Normal`: 标准对话模型
180
+ - `Thinking`: 深度思考模型
181
+ - `Deep-Research`: 深度研究模型
182
+ - `Code-Chat`: 代码对话模型
183
+
184
+
156
185
  #### OpenAI
157
186
  ```yaml
158
187
  JARVIS_PLATFORM: openai
@@ -1,4 +1,4 @@
1
- jarvis/__init__.py,sha256=0L3xbrhIunJRm7imt5CFTJQk-Ck5Or4XgYyygPYg_Dg,74
1
+ jarvis/__init__.py,sha256=KysQNyO0AUjUMLzRISa3Wf1q9ffBDepVKiOh2ztirYg,74
2
2
  jarvis/jarvis_agent/__init__.py,sha256=AxT_2n-IQkbtoQlAS3SJ0tsvcUenWD7_Xrc-RZZCWiA,30352
3
3
  jarvis/jarvis_agent/builtin_input_handler.py,sha256=f4DaEHPakXcAbgykFP-tiOQP6fh_yGFlZx_h91_j2tQ,1529
4
4
  jarvis/jarvis_agent/file_input_handler.py,sha256=LDNXoTtyjhyBmfzDnAdbWZ2BWdu4q-r6thSKRK8Iwjk,4187
@@ -49,10 +49,11 @@ jarvis/jarvis_multi_agent/main.py,sha256=KeGv8sdpSgTjW6VE4-tQ8BWDC_a0aE_4R3OqzPB
49
49
  jarvis/jarvis_platform/__init__.py,sha256=0YnsUoM4JkIBOtImFdjfuDbrqQZT3dEaAwSJ62DrpCc,104
50
50
  jarvis/jarvis_platform/base.py,sha256=HbE7BVh8F5F38rr9K9281h6Q11XyWgDGzyPXe-e_Th0,7086
51
51
  jarvis/jarvis_platform/human.py,sha256=xwaTZ1zdrAYZZFXxkbHvUdECwCGsic0kgAFUncUr45g,2567
52
- jarvis/jarvis_platform/kimi.py,sha256=k0dYwuRf-snmJF206D7inahUcZUZG0VqOmhphj09NzQ,11969
52
+ jarvis/jarvis_platform/kimi.py,sha256=b3EpnmHseZwrfCc8sMmvwLJ6Jg2FWf8ATItSDz5G3eQ,11978
53
53
  jarvis/jarvis_platform/openai.py,sha256=VyX3bR1rGxrJdWOtUBf8PgSL9n06KaNbOewL1urzOnk,4741
54
- jarvis/jarvis_platform/registry.py,sha256=CxAELjDrc-KKPPKdP71E_qaFisfQztvwc_tdf3WpOt8,7863
55
- jarvis/jarvis_platform/yuanbao.py,sha256=vKb6oy5cTMQCwqcqpaVur7BFtQwX1Cv-mYnswP-L4mA,20291
54
+ jarvis/jarvis_platform/registry.py,sha256=3djxE8AB4gwrdAOvRSL0612Rt_CcsaZhzZ0_oXHu6xk,7820
55
+ jarvis/jarvis_platform/tongyi.py,sha256=m44aZHZ1oCbYdlSMuG3qYPFZbHW4e3VlaFZ2i3H7xrE,16927
56
+ jarvis/jarvis_platform/yuanbao.py,sha256=FDi-D9Jnw_MiwI0skPNMYz874o6GhWhdNRdZg-ECoUA,20632
56
57
  jarvis/jarvis_platform_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
58
  jarvis/jarvis_platform_manager/main.py,sha256=OXWj18SqiV0Gl75YT6D9wspCCB4Nes04EY-ShI9kbpU,25677
58
59
  jarvis/jarvis_smart_shell/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -90,9 +91,9 @@ jarvis/jarvis_utils/methodology.py,sha256=A8pE8ZqNHvGKaDO4TFtg7Oz-hAXPBcQfhmSPWM
90
91
  jarvis/jarvis_utils/output.py,sha256=QboL42GtG_dnvd1O64sl8o72mEBhXNRADPXQMXgDE7Q,9661
91
92
  jarvis/jarvis_utils/tag.py,sha256=YJHmuedLb7_AiqvKQetHr4R1FxyzIh7HN0RRkWMmYbU,429
92
93
  jarvis/jarvis_utils/utils.py,sha256=atSK-2cUr7_tOIFsQzJnuQxebi7aFN4jtmaoXEaV4jM,10692
93
- jarvis_ai_assistant-0.1.182.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
94
- jarvis_ai_assistant-0.1.182.dist-info/METADATA,sha256=Op8V2ma_T4C2rrhuEXgSn0qN7lZEfeiozSES036fdak,15059
95
- jarvis_ai_assistant-0.1.182.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
96
- jarvis_ai_assistant-0.1.182.dist-info/entry_points.txt,sha256=Gy3DOP1PYLMK0GCj4rrP_9lkOyBQ39EK_lKGUSwn41E,869
97
- jarvis_ai_assistant-0.1.182.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
98
- jarvis_ai_assistant-0.1.182.dist-info/RECORD,,
94
+ jarvis_ai_assistant-0.1.183.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
95
+ jarvis_ai_assistant-0.1.183.dist-info/METADATA,sha256=RRkBRiDEeBnEfQQFAufn9jE2RV7BA90P5o5G_PnJUp0,15836
96
+ jarvis_ai_assistant-0.1.183.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
97
+ jarvis_ai_assistant-0.1.183.dist-info/entry_points.txt,sha256=Gy3DOP1PYLMK0GCj4rrP_9lkOyBQ39EK_lKGUSwn41E,869
98
+ jarvis_ai_assistant-0.1.183.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
99
+ jarvis_ai_assistant-0.1.183.dist-info/RECORD,,