jarvis-ai-assistant 0.1.115__py3-none-any.whl → 0.1.116__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/{agent.py → jarvis_agent/__init__.py} +35 -159
  3. jarvis/jarvis_agent/output_handler.py +23 -0
  4. jarvis/jarvis_code_agent/code_agent.py +11 -11
  5. jarvis/jarvis_code_agent/file_select.py +28 -7
  6. jarvis/jarvis_code_agent/patch.py +25 -2
  7. jarvis/jarvis_code_agent/relevant_files.py +1 -1
  8. jarvis/jarvis_codebase/main.py +2 -2
  9. jarvis/jarvis_lsp/cpp.py +1 -1
  10. jarvis/jarvis_lsp/go.py +1 -1
  11. jarvis/jarvis_lsp/registry.py +1 -1
  12. jarvis/jarvis_lsp/rust.py +1 -1
  13. jarvis/jarvis_multi_agent/__init__.py +147 -0
  14. jarvis/jarvis_platform/ai8.py +2 -2
  15. jarvis/jarvis_platform/base.py +14 -4
  16. jarvis/jarvis_platform/kimi.py +2 -2
  17. jarvis/jarvis_platform/ollama.py +1 -1
  18. jarvis/jarvis_platform/openai.py +1 -1
  19. jarvis/jarvis_platform/oyi.py +1 -1
  20. jarvis/jarvis_platform/registry.py +1 -1
  21. jarvis/jarvis_platform_manager/main.py +422 -6
  22. jarvis/jarvis_platform_manager/openai_test.py +139 -0
  23. jarvis/jarvis_rag/main.py +2 -2
  24. jarvis/jarvis_smart_shell/main.py +17 -16
  25. jarvis/jarvis_tools/ask_codebase.py +1 -1
  26. jarvis/jarvis_tools/ask_user.py +1 -1
  27. jarvis/jarvis_tools/chdir.py +1 -1
  28. jarvis/jarvis_tools/code_review.py +3 -3
  29. jarvis/jarvis_tools/create_code_agent.py +1 -1
  30. jarvis/jarvis_tools/create_sub_agent.py +2 -2
  31. jarvis/jarvis_tools/execute_shell.py +1 -1
  32. jarvis/jarvis_tools/file_operation.py +16 -14
  33. jarvis/jarvis_tools/git_commiter.py +2 -2
  34. jarvis/jarvis_tools/methodology.py +1 -1
  35. jarvis/jarvis_tools/rag.py +1 -1
  36. jarvis/jarvis_tools/read_code.py +19 -8
  37. jarvis/jarvis_tools/read_webpage.py +1 -1
  38. jarvis/jarvis_tools/registry.py +53 -6
  39. jarvis/jarvis_tools/search.py +1 -1
  40. jarvis/jarvis_tools/select_code_files.py +1 -1
  41. jarvis/{utils.py → jarvis_utils/__init__.py} +69 -53
  42. {jarvis_ai_assistant-0.1.115.dist-info → jarvis_ai_assistant-0.1.116.dist-info}/METADATA +1 -1
  43. jarvis_ai_assistant-0.1.116.dist-info/RECORD +64 -0
  44. {jarvis_ai_assistant-0.1.115.dist-info → jarvis_ai_assistant-0.1.116.dist-info}/WHEEL +1 -1
  45. {jarvis_ai_assistant-0.1.115.dist-info → jarvis_ai_assistant-0.1.116.dist-info}/entry_points.txt +1 -2
  46. jarvis/jarvis_dev/main.py +0 -664
  47. jarvis/multi_agent.py +0 -76
  48. jarvis/utils/date_utils.py +0 -19
  49. jarvis_ai_assistant-0.1.115.dist-info/RECORD +0 -64
  50. {jarvis_ai_assistant-0.1.115.dist-info → jarvis_ai_assistant-0.1.116.dist-info}/LICENSE +0 -0
  51. {jarvis_ai_assistant-0.1.115.dist-info → jarvis_ai_assistant-0.1.116.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,17 @@
1
1
  from jarvis.jarvis_platform.registry import PlatformRegistry
2
- from jarvis.utils import PrettyOutput, OutputType, init_env, get_multiline_input
2
+ from jarvis.jarvis_utils import PrettyOutput, OutputType, init_env, get_multiline_input
3
+ import asyncio
4
+ from fastapi import FastAPI, HTTPException
5
+ from fastapi.responses import StreamingResponse
6
+ from pydantic import BaseModel, Field
7
+ from typing import List, Dict, Any, Optional
8
+ import uvicorn
9
+ import io
10
+ from contextlib import redirect_stdout
11
+ import json
12
+ import os
13
+ from datetime import datetime
14
+ from fastapi.middleware.cors import CORSMiddleware
3
15
 
4
16
  def list_platforms():
5
17
  """List all supported platforms and models"""
@@ -93,17 +105,412 @@ def chat_with_model(platform_name: str, model_name: str):
93
105
  except:
94
106
  pass
95
107
 
96
- def info_command(args):
97
- """Process info subcommand"""
98
- list_platforms()
108
+ # Helper function for platform and model validation
109
+ def validate_platform_model(args):
110
+ if not args.platform or not args.model:
111
+ PrettyOutput.print("请指定平台和模型。使用 'jarvis info' 查看可用平台和模型。", OutputType.ERROR)
112
+ return False
113
+ return True
99
114
 
100
115
  def chat_command(args):
101
116
  """Process chat subcommand"""
102
- if not args.platform or not args.model:
103
- PrettyOutput.print("请指定平台和模型。使用 'jarvis info' 查看可用平台和模型。", OutputType.ERROR)
117
+ if not validate_platform_model(args):
104
118
  return
105
119
  chat_with_model(args.platform, args.model)
106
120
 
121
+ def info_command(args):
122
+ """Process info subcommand"""
123
+ list_platforms()
124
+
125
+ # New models for OpenAI-compatible API
126
+ class ChatMessage(BaseModel):
127
+ role: str
128
+ content: str
129
+
130
+ class ChatCompletionRequest(BaseModel):
131
+ model: str
132
+ messages: List[ChatMessage]
133
+ stream: bool = False
134
+ temperature: Optional[float] = None
135
+ max_tokens: Optional[int] = None
136
+
137
+ class ChatCompletionChoice(BaseModel):
138
+ index: int
139
+ message: ChatMessage
140
+ finish_reason: str = "stop"
141
+
142
+ class ChatCompletionChunk(BaseModel):
143
+ id: str
144
+ object: str = "chat.completion.chunk"
145
+ created: int
146
+ model: str
147
+ choices: List[Dict[str, Any]]
148
+
149
+ class ChatCompletionResponse(BaseModel):
150
+ id: str
151
+ object: str = "chat.completion"
152
+ created: int
153
+ model: str
154
+ choices: List[ChatCompletionChoice]
155
+ usage: Dict[str, int] = Field(default_factory=lambda: {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0})
156
+
157
+ def service_command(args):
158
+ """Process service subcommand - start OpenAI-compatible API server"""
159
+ import time
160
+ import uuid
161
+ import json
162
+ import os
163
+ from datetime import datetime
164
+
165
+ host = args.host
166
+ port = args.port
167
+ default_platform = args.platform
168
+ default_model = args.model
169
+
170
+ # Create logs directory if it doesn't exist
171
+ logs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
172
+ os.makedirs(logs_dir, exist_ok=True)
173
+
174
+ app = FastAPI(title="Jarvis API Server")
175
+
176
+ # 添加 CORS 中间件
177
+ app.add_middleware(
178
+ CORSMiddleware,
179
+ allow_origins=["*"], # 允许所有来源,生产环境应更严格
180
+ allow_credentials=True,
181
+ allow_methods=["*"], # 允许所有方法
182
+ allow_headers=["*"], # 允许所有头
183
+ )
184
+
185
+ registry = PlatformRegistry.get_global_platform_registry()
186
+
187
+ PrettyOutput.print(f"Starting Jarvis API server on {host}:{port}", OutputType.SUCCESS)
188
+ PrettyOutput.print("This server provides an OpenAI-compatible API", OutputType.INFO)
189
+
190
+ if default_platform and default_model:
191
+ PrettyOutput.print(f"Default platform: {default_platform}, model: {default_model}", OutputType.INFO)
192
+
193
+ PrettyOutput.print("Available platforms:", OutputType.INFO)
194
+
195
+ # Print available platforms and models
196
+ platforms = registry.get_available_platforms()
197
+ list_platforms()
198
+
199
+ # Platform and model cache
200
+ platform_instances = {}
201
+
202
+ # Chat history storage
203
+ chat_histories = {}
204
+
205
+ def get_platform_instance(platform_name: str, model_name: str):
206
+ """Get or create a platform instance"""
207
+ key = f"{platform_name}:{model_name}"
208
+ if key not in platform_instances:
209
+ platform = registry.create_platform(platform_name)
210
+ if not platform:
211
+ raise HTTPException(status_code=400, detail=f"Platform {platform_name} not found")
212
+
213
+ platform.set_model_name(model_name)
214
+ platform.set_suppress_output(True) # Suppress console output in server mode
215
+ platform_instances[key] = platform
216
+
217
+ return platform_instances[key]
218
+
219
+ def log_conversation(conversation_id, messages, model, response=None):
220
+ """Log conversation to file in plain text format."""
221
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
222
+ log_file = os.path.join(logs_dir, f"conversation_{conversation_id}_{timestamp}.txt")
223
+
224
+ with open(log_file, "w", encoding="utf-8") as f:
225
+ f.write(f"Conversation ID: {conversation_id}\n")
226
+ f.write(f"Timestamp: {timestamp}\n")
227
+ f.write(f"Model: {model}\n\n")
228
+ f.write("Messages:\n")
229
+ for message in messages:
230
+ f.write(f"{message['role']}: {message['content']}\n")
231
+ if response:
232
+ f.write(f"\nResponse:\n{response}\n")
233
+
234
+ PrettyOutput.print(f"Conversation logged to {log_file}", OutputType.INFO)
235
+
236
+ @app.get("/v1/models")
237
+ async def list_models():
238
+ """List available models for the specified platform in OpenAI-compatible format"""
239
+ model_list = []
240
+
241
+ # Only get models for the currently set platform
242
+ if default_platform:
243
+ try:
244
+ platform = registry.create_platform(default_platform)
245
+ if platform:
246
+ models = platform.get_model_list()
247
+ if models:
248
+ for model_name, _ in models:
249
+ full_name = f"{default_platform}/{model_name}"
250
+ model_list.append({
251
+ "id": full_name,
252
+ "object": "model",
253
+ "created": int(time.time()),
254
+ "owned_by": default_platform
255
+ })
256
+ except Exception as e:
257
+ print(f"Error getting models for {default_platform}: {str(e)}")
258
+
259
+ # Return model list
260
+ return {"object": "list", "data": model_list}
261
+
262
+ @app.post("/v1/chat/completions")
263
+ @app.options("/v1/chat/completions") # 添加 OPTIONS 方法支持
264
+ async def create_chat_completion(request: ChatCompletionRequest):
265
+ """Create a chat completion in OpenAI-compatible format"""
266
+ model = request.model
267
+ messages = request.messages
268
+ stream = request.stream
269
+
270
+ # Generate a conversation ID if this is a new conversation
271
+ conversation_id = str(uuid.uuid4())
272
+
273
+ # Extract platform and model name
274
+ if "/" in model:
275
+ platform_name, model_name = model.split("/", 1)
276
+ else:
277
+ # Use default platform and model if not specified
278
+ if default_platform and default_model:
279
+ platform_name, model_name = default_platform, default_model
280
+ else:
281
+ platform_name, model_name = "oyi", model # Default to OYI platform
282
+
283
+ # Get platform instance
284
+ platform = get_platform_instance(platform_name, model_name)
285
+
286
+ # Convert messages to text format for the platform
287
+ message_text = ""
288
+ for msg in messages:
289
+ role = msg.role
290
+ content = msg.content
291
+
292
+ if role == "system":
293
+ message_text += f"System: {content}\n\n"
294
+ elif role == "user":
295
+ message_text += f"User: {content}\n\n"
296
+ elif role == "assistant":
297
+ message_text += f"Assistant: {content}\n\n"
298
+
299
+ # Store messages in chat history
300
+ chat_histories[conversation_id] = {
301
+ "model": model,
302
+ "messages": [{"role": m.role, "content": m.content} for m in messages]
303
+ }
304
+
305
+ # Log the conversation
306
+ log_conversation(conversation_id,
307
+ [{"role": m.role, "content": m.content} for m in messages],
308
+ model)
309
+
310
+ if stream:
311
+ # Return streaming response
312
+ return StreamingResponse(
313
+ stream_chat_response(platform, message_text, model),
314
+ media_type="text/event-stream"
315
+ )
316
+ else:
317
+ # Get chat response
318
+ try:
319
+ response_text = platform.chat_until_success(message_text)
320
+
321
+ # Create response in OpenAI format
322
+ completion_id = f"chatcmpl-{str(uuid.uuid4())}"
323
+
324
+ # Update chat history with response
325
+ if conversation_id in chat_histories:
326
+ chat_histories[conversation_id]["messages"].append({
327
+ "role": "assistant",
328
+ "content": response_text
329
+ })
330
+
331
+ # Log the conversation with response
332
+ log_conversation(conversation_id,
333
+ chat_histories[conversation_id]["messages"],
334
+ model,
335
+ response_text)
336
+
337
+ return {
338
+ "id": completion_id,
339
+ "object": "chat.completion",
340
+ "created": int(time.time()),
341
+ "model": model,
342
+ "choices": [
343
+ {
344
+ "index": 0,
345
+ "message": {
346
+ "role": "assistant",
347
+ "content": response_text
348
+ },
349
+ "finish_reason": "stop"
350
+ }
351
+ ],
352
+ "usage": {
353
+ "prompt_tokens": len(message_text) // 4, # Rough estimate
354
+ "completion_tokens": len(response_text) // 4, # Rough estimate
355
+ "total_tokens": (len(message_text) + len(response_text)) // 4 # Rough estimate
356
+ }
357
+ }
358
+ except Exception as e:
359
+ raise HTTPException(status_code=500, detail=str(e))
360
+
361
+ async def stream_chat_response(platform, message, model_name):
362
+ """Stream chat response in OpenAI-compatible format"""
363
+ import time
364
+ import json
365
+ import uuid
366
+ from datetime import datetime
367
+ import os
368
+
369
+ completion_id = f"chatcmpl-{str(uuid.uuid4())}"
370
+ created_time = int(time.time())
371
+ conversation_id = str(uuid.uuid4())
372
+
373
+ # Create logs directory if it doesn't exist
374
+ logs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
375
+ os.makedirs(logs_dir, exist_ok=True)
376
+
377
+ # 修改第一个yield语句的格式
378
+ initial_data = {
379
+ 'id': completion_id,
380
+ 'object': 'chat.completion.chunk',
381
+ 'created': created_time,
382
+ 'model': model_name,
383
+ 'choices': [{
384
+ 'index': 0,
385
+ 'delta': {'role': 'assistant'},
386
+ 'finish_reason': None
387
+ }]
388
+ }
389
+ res = json.dumps(initial_data)
390
+ yield f"data: {res}\n\n"
391
+
392
+ try:
393
+ # 直接获取聊天响应,而不是尝试捕获stdout
394
+ response = platform.chat_until_success(message)
395
+
396
+ # 记录完整响应
397
+ full_response = ""
398
+
399
+ # 如果有响应,将其分块发送
400
+ if response:
401
+ # 分成小块以获得更好的流式体验
402
+ chunk_size = 4 # 每个块的字符数
403
+ for i in range(0, len(response), chunk_size):
404
+ chunk = response[i:i+chunk_size]
405
+ full_response += chunk
406
+
407
+ # 创建并发送块
408
+ chunk_data = {
409
+ 'id': completion_id,
410
+ 'object': 'chat.completion.chunk',
411
+ 'created': created_time,
412
+ 'model': model_name,
413
+ 'choices': [{
414
+ 'index': 0,
415
+ 'delta': {'content': chunk},
416
+ 'finish_reason': None
417
+ }]
418
+ }
419
+
420
+ yield f"data: {json.dumps(chunk_data)}\n\n"
421
+
422
+ # 小延迟以模拟流式传输
423
+ await asyncio.sleep(0.01)
424
+ else:
425
+ # 如果没有输出,发送一个空内容块
426
+ chunk_data = {
427
+ 'id': completion_id,
428
+ 'object': 'chat.completion.chunk',
429
+ 'created': created_time,
430
+ 'model': model_name,
431
+ 'choices': [{
432
+ 'index': 0,
433
+ 'delta': {'content': "No response from model."},
434
+ 'finish_reason': None
435
+ }]
436
+ }
437
+ yield f"data: {json.dumps(chunk_data)}\n\n"
438
+ full_response = "No response from model."
439
+
440
+ # 修改最终yield语句的格式
441
+ final_data = {
442
+ 'id': completion_id,
443
+ 'object': 'chat.completion.chunk',
444
+ 'created': created_time,
445
+ 'model': model_name,
446
+ 'choices': [{
447
+ 'index': 0,
448
+ 'delta': {},
449
+ 'finish_reason': 'stop'
450
+ }]
451
+ }
452
+ yield f"data: {json.dumps(final_data)}\n\n"
453
+
454
+ # 发送[DONE]标记
455
+ yield "data: [DONE]\n\n"
456
+
457
+ # 记录对话到文件
458
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
459
+ log_file = os.path.join(logs_dir, f"stream_conversation_{conversation_id}_{timestamp}.json")
460
+
461
+ log_data = {
462
+ "conversation_id": conversation_id,
463
+ "timestamp": timestamp,
464
+ "model": model_name,
465
+ "message": message,
466
+ "response": full_response
467
+ }
468
+
469
+ with open(log_file, "w", encoding="utf-8") as f:
470
+ json.dump(log_data, f, ensure_ascii=False, indent=2)
471
+
472
+ PrettyOutput.print(f"Stream conversation logged to {log_file}", OutputType.INFO)
473
+
474
+ except Exception as e:
475
+ # 发送错误消息
476
+ error_msg = f"Error: {str(e)}"
477
+ print(f"Streaming error: {error_msg}")
478
+
479
+ res = json.dumps({
480
+ 'id': completion_id,
481
+ 'object': 'chat.completion.chunk',
482
+ 'created': created_time,
483
+ 'model': model_name,
484
+ 'choices': [{
485
+ 'index': 0,
486
+ 'delta': {'content': error_msg},
487
+ 'finish_reason': 'stop'
488
+ }]
489
+ })
490
+ yield f"data: {res}\n\n"
491
+ yield f"data: {json.dumps({'error': {'message': error_msg, 'type': 'server_error'}})}\n\n"
492
+ yield "data: [DONE]\n\n"
493
+
494
+ # 记录错误到文件
495
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
496
+ log_file = os.path.join(logs_dir, f"stream_error_{conversation_id}_{timestamp}.json")
497
+
498
+ log_data = {
499
+ "conversation_id": conversation_id,
500
+ "timestamp": timestamp,
501
+ "model": model_name,
502
+ "message": message,
503
+ "error": error_msg
504
+ }
505
+
506
+ with open(log_file, "w", encoding="utf-8") as f:
507
+ json.dump(log_data, f, ensure_ascii=False, indent=2)
508
+
509
+ PrettyOutput.print(f"Stream error logged to {log_file}", OutputType.ERROR)
510
+
511
+ # Run the server
512
+ uvicorn.run(app, host=host, port=port)
513
+
107
514
  def main():
108
515
  """Main function"""
109
516
  import argparse
@@ -121,12 +528,21 @@ def main():
121
528
  chat_parser.add_argument('--platform', '-p', help='指定要使用的平台')
122
529
  chat_parser.add_argument('--model', '-m', help='指定要使用的模型')
123
530
 
531
+ # service subcommand
532
+ service_parser = subparsers.add_parser('service', help='启动OpenAI兼容的API服务')
533
+ service_parser.add_argument('--host', default='127.0.0.1', help='服务主机地址 (默认: 127.0.0.1)')
534
+ service_parser.add_argument('--port', type=int, default=8000, help='服务端口 (默认: 8000)')
535
+ service_parser.add_argument('--platform', '-p', help='指定默认平台,当客户端未指定平台时使用')
536
+ service_parser.add_argument('--model', '-m', help='指定默认模型,当客户端未指定模型时使用')
537
+
124
538
  args = parser.parse_args()
125
539
 
126
540
  if args.command == 'info':
127
541
  info_command(args)
128
542
  elif args.command == 'chat':
129
543
  chat_command(args)
544
+ elif args.command == 'service':
545
+ service_command(args)
130
546
  else:
131
547
  parser.print_help()
132
548
 
@@ -0,0 +1,139 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script for Jarvis OpenAI-compatible API service.
4
+ """
5
+
6
+ import argparse
7
+ import os
8
+ import sys
9
+ from openai import OpenAI
10
+
11
+ def test_chat(api_base, model, stream=False, interactive=False):
12
+ """Test chat completion with the API."""
13
+ client = OpenAI(
14
+ api_key="dummy-key", # Not actually used by our service
15
+ base_url=f"{api_base}/v1"
16
+ )
17
+
18
+ print(f"Testing chat with model: {model}, stream={stream}")
19
+ print("=" * 50)
20
+
21
+ try:
22
+ # First, list available models
23
+ print("Available models:")
24
+ models = client.models.list()
25
+ for m in models.data:
26
+ print(f" - {m.id}")
27
+ print()
28
+
29
+ if interactive:
30
+ # Interactive chat mode
31
+ messages = [
32
+ {"role": "system", "content": "You are a helpful assistant."}
33
+ ]
34
+
35
+ print("Interactive chat mode. Type 'exit' to quit.")
36
+ print("=" * 50)
37
+
38
+ while True:
39
+ # Get user input
40
+ user_input = input("You: ")
41
+ if user_input.lower() in ['exit', 'quit', 'bye']:
42
+ break
43
+
44
+ # Add user message to history
45
+ messages.append({"role": "user", "content": user_input})
46
+
47
+ # Get response
48
+ print("Assistant: ", end="", flush=True)
49
+
50
+ if stream:
51
+ response = client.chat.completions.create(
52
+ model=model,
53
+ messages=messages, # type: ignore
54
+ stream=True
55
+ ) # type: ignore
56
+
57
+ # Process the streaming response
58
+ assistant_response = ""
59
+ for chunk in response:
60
+ if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
61
+ content = chunk.choices[0].delta.content
62
+ assistant_response += content
63
+ print(content, end="", flush=True)
64
+ print()
65
+ else:
66
+ response = client.chat.completions.create(
67
+ model=model,
68
+ messages=messages # type: ignore
69
+ )
70
+ assistant_response = response.choices[0].message.content
71
+ print(assistant_response)
72
+
73
+ # Add assistant response to history
74
+ messages.append({"role": "assistant", "content": assistant_response}) # type: ignore
75
+ print()
76
+
77
+ print("=" * 50)
78
+ print("Chat session ended.")
79
+
80
+ else:
81
+ # Single request mode
82
+ print("Sending chat request...")
83
+ messages = [
84
+ {"role": "system", "content": "You are a helpful assistant."},
85
+ {"role": "user", "content": "Hello! Tell me a short joke."}
86
+ ]
87
+
88
+ if stream:
89
+ print("Response (streaming):")
90
+
91
+ # Use the OpenAI client for streaming
92
+ response = client.chat.completions.create(
93
+ model=model,
94
+ messages=messages, # type: ignore
95
+ stream=True
96
+ ) # type: ignore
97
+
98
+ # Process the streaming response
99
+ full_content = ""
100
+ for chunk in response:
101
+ if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
102
+ content = chunk.choices[0].delta.content
103
+ full_content += content
104
+ print(content, end="", flush=True)
105
+
106
+ print("\n")
107
+ print(f"Full response: {full_content}")
108
+ else:
109
+ print("Response:")
110
+ response = client.chat.completions.create(
111
+ model=model,
112
+ messages=messages # type: ignore
113
+ )
114
+ print(response.choices[0].message.content)
115
+
116
+ print("=" * 50)
117
+ print("Test completed successfully!")
118
+
119
+ except Exception as e:
120
+ print(f"Error: {str(e)}")
121
+ import traceback
122
+ traceback.print_exc()
123
+ return 1
124
+
125
+ return 0
126
+
127
+ def main():
128
+ parser = argparse.ArgumentParser(description="Test Jarvis OpenAI-compatible API")
129
+ parser.add_argument("--api-base", default="http://localhost:8000", help="API base URL")
130
+ parser.add_argument("--model", default="gpt-3.5-turbo", help="Model to test (default: gpt-3.5-turbo)")
131
+ parser.add_argument("--stream", action="store_true", help="Test streaming mode")
132
+ parser.add_argument("--interactive", "-i", action="store_true", help="Interactive chat mode")
133
+
134
+ args = parser.parse_args()
135
+
136
+ return test_chat(args.api_base, args.model, args.stream, args.interactive)
137
+
138
+ if __name__ == "__main__":
139
+ sys.exit(main())
jarvis/jarvis_rag/main.py CHANGED
@@ -3,8 +3,8 @@ import numpy as np
3
3
  import faiss
4
4
  from typing import List, Tuple, Optional, Dict
5
5
  import pickle
6
- from jarvis.utils import OutputType, PrettyOutput, get_context_token_count, get_embedding, get_embedding_batch, get_file_md5, get_max_token_count, get_max_paragraph_length, get_min_paragraph_length, get_thread_count, init_gpu_config, load_embedding_model
7
- from jarvis.utils import init_env
6
+ from jarvis.jarvis_utils import OutputType, PrettyOutput, get_context_token_count, get_embedding, get_embedding_batch, get_file_md5, get_max_token_count, get_max_paragraph_length, get_min_paragraph_length, get_thread_count, init_gpu_config, load_embedding_model
7
+ from jarvis.jarvis_utils import init_env
8
8
  from dataclasses import dataclass
9
9
  from tqdm import tqdm
10
10
  import fitz # PyMuPDF for PDF files
@@ -8,7 +8,7 @@ from yaspin import yaspin # type: ignore
8
8
  from yaspin.spinners import Spinners # type: ignore
9
9
 
10
10
  from jarvis.jarvis_platform.registry import PlatformRegistry
11
- from jarvis.utils import PrettyOutput, OutputType, get_shell_name, init_env
11
+ from jarvis.jarvis_utils import PrettyOutput, OutputType, get_multiline_input, get_shell_name, init_env
12
12
 
13
13
  def execute_command(command: str) -> None:
14
14
  """Show command and allow user to edit, then execute, Ctrl+C to cancel"""
@@ -70,19 +70,14 @@ Remember: Only return the command itself, without any additional content.
70
70
  prefix = f"Current path: {current_path}\n"
71
71
  prefix += f"Current shell: {shell}\n"
72
72
 
73
- # 使用yaspin显示Thinking状态
74
- with yaspin(Spinners.dots, text="Thinking", color="yellow") as spinner:
75
- # 处理请求
76
- result = model.chat_until_success(prefix + request)
77
-
78
- # 提取命令
79
- if result and isinstance(result, str):
80
- command = result.strip()
81
- spinner.ok("✓")
82
- return command
83
-
84
- spinner.fail("✗")
85
- return None
73
+ result = model.chat_until_success(prefix + request)
74
+
75
+ # 提取命令
76
+ if result and isinstance(result, str):
77
+ command = result.strip()
78
+ return command
79
+
80
+ return None
86
81
 
87
82
  except Exception as e:
88
83
  PrettyOutput.print(f"处理请求失败: {str(e)}", OutputType.WARNING)
@@ -101,15 +96,21 @@ Example:
101
96
  %(prog)s "Find documents modified in the last week"
102
97
  """)
103
98
 
104
- # 添加参数
99
+ # 修改为可选参数,添加从stdin读取的支持
105
100
  parser.add_argument(
106
101
  "request",
107
- help="描述您想要执行的操作, 用自然语言描述"
102
+ nargs='?', # 设置为可选参数
103
+ help="描述您想要执行的操作(用自然语言描述),如果未提供则从标准输入读取"
108
104
  )
109
105
 
110
106
  # 解析参数
111
107
  args = parser.parse_args()
112
108
 
109
+ # 添加标准输入处理
110
+ if not args.request:
111
+ # 检查是否在交互式终端中运行
112
+ args.request = get_multiline_input(tip="请输入您要执行的功能:")
113
+
113
114
  # 处理请求
114
115
  command = process_request(args.request)
115
116
 
@@ -1,5 +1,5 @@
1
1
  from typing import Dict, Any
2
- from jarvis.utils import OutputType, PrettyOutput, dont_use_local_model, find_git_root
2
+ from jarvis.jarvis_utils import OutputType, PrettyOutput, dont_use_local_model, find_git_root
3
3
  from jarvis.jarvis_codebase.main import CodeBase
4
4
 
5
5
  class AskCodebaseTool:
@@ -1,6 +1,6 @@
1
1
  from typing import Dict, Any
2
2
  from jarvis.jarvis_tools.base import Tool
3
- from jarvis.utils import get_multiline_input, PrettyOutput, OutputType
3
+ from jarvis.jarvis_utils import get_multiline_input, PrettyOutput, OutputType
4
4
 
5
5
  class AskUserTool:
6
6
  name="ask_user"