jarvis-ai-assistant 0.1.207__py3-none-any.whl → 0.1.209__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_agent/__init__.py +63 -103
- jarvis/jarvis_agent/edit_file_handler.py +43 -47
- jarvis/jarvis_agent/jarvis.py +33 -39
- jarvis/jarvis_code_agent/code_agent.py +74 -30
- jarvis/jarvis_code_agent/lint.py +6 -6
- jarvis/jarvis_code_analysis/code_review.py +164 -175
- jarvis/jarvis_data/config_schema.json +0 -25
- jarvis/jarvis_git_utils/git_commiter.py +148 -153
- jarvis/jarvis_methodology/main.py +70 -81
- jarvis/jarvis_platform/base.py +21 -17
- jarvis/jarvis_platform/kimi.py +59 -64
- jarvis/jarvis_platform/tongyi.py +118 -131
- jarvis/jarvis_platform/yuanbao.py +117 -122
- jarvis/jarvis_platform_manager/main.py +102 -502
- jarvis/jarvis_platform_manager/service.py +432 -0
- jarvis/jarvis_smart_shell/main.py +99 -33
- jarvis/jarvis_tools/ask_user.py +0 -1
- jarvis/jarvis_tools/edit_file.py +64 -55
- jarvis/jarvis_tools/file_analyzer.py +17 -28
- jarvis/jarvis_tools/read_code.py +80 -81
- jarvis/jarvis_utils/builtin_replace_map.py +1 -36
- jarvis/jarvis_utils/config.py +13 -48
- jarvis/jarvis_utils/embedding.py +6 -51
- jarvis/jarvis_utils/git_utils.py +93 -43
- jarvis/jarvis_utils/http.py +104 -0
- jarvis/jarvis_utils/methodology.py +12 -17
- jarvis/jarvis_utils/utils.py +186 -63
- {jarvis_ai_assistant-0.1.207.dist-info → jarvis_ai_assistant-0.1.209.dist-info}/METADATA +4 -19
- {jarvis_ai_assistant-0.1.207.dist-info → jarvis_ai_assistant-0.1.209.dist-info}/RECORD +34 -40
- {jarvis_ai_assistant-0.1.207.dist-info → jarvis_ai_assistant-0.1.209.dist-info}/entry_points.txt +1 -1
- jarvis/jarvis_data/huggingface.tar.gz +0 -0
- jarvis/jarvis_dev/main.py +0 -1247
- jarvis/jarvis_tools/chdir.py +0 -72
- jarvis/jarvis_tools/code_plan.py +0 -218
- jarvis/jarvis_tools/create_code_agent.py +0 -95
- jarvis/jarvis_tools/create_sub_agent.py +0 -82
- jarvis/jarvis_tools/file_operation.py +0 -238
- jarvis/jarvis_utils/jarvis_history.py +0 -98
- {jarvis_ai_assistant-0.1.207.dist-info → jarvis_ai_assistant-0.1.209.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.207.dist-info → jarvis_ai_assistant-0.1.209.dist-info}/licenses/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.207.dist-info → jarvis_ai_assistant-0.1.209.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,432 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""Jarvis Platform Manager Service Module.
|
3
|
+
|
4
|
+
This module provides an OpenAI-compatible API service for the Jarvis platform.
|
5
|
+
"""
|
6
|
+
import asyncio
|
7
|
+
import json
|
8
|
+
import os
|
9
|
+
import time
|
10
|
+
import uuid
|
11
|
+
from datetime import datetime
|
12
|
+
from typing import Any, Dict, List, Optional, Union
|
13
|
+
|
14
|
+
import uvicorn
|
15
|
+
from fastapi import FastAPI, HTTPException
|
16
|
+
from fastapi.middleware.cors import CORSMiddleware
|
17
|
+
from fastapi.responses import StreamingResponse
|
18
|
+
from pydantic import BaseModel, Field
|
19
|
+
from starlette.responses import JSONResponse, Response
|
20
|
+
|
21
|
+
from jarvis.jarvis_platform.registry import PlatformRegistry
|
22
|
+
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
23
|
+
|
24
|
+
|
25
|
+
class ChatMessage(BaseModel):
|
26
|
+
"""Represents a chat message with role and content."""
|
27
|
+
|
28
|
+
role: str
|
29
|
+
content: str
|
30
|
+
|
31
|
+
|
32
|
+
class ChatCompletionRequest(BaseModel):
|
33
|
+
"""Request model for chat completion."""
|
34
|
+
|
35
|
+
model: str
|
36
|
+
messages: List[ChatMessage]
|
37
|
+
stream: bool = False
|
38
|
+
temperature: Optional[float] = None
|
39
|
+
max_tokens: Optional[int] = None
|
40
|
+
|
41
|
+
|
42
|
+
class ChatCompletionChoice(BaseModel):
|
43
|
+
"""Represents a choice in chat completion response."""
|
44
|
+
|
45
|
+
index: int
|
46
|
+
message: ChatMessage
|
47
|
+
finish_reason: str = "stop"
|
48
|
+
|
49
|
+
|
50
|
+
class ChatCompletionResponse(BaseModel):
|
51
|
+
"""Response model for chat completion."""
|
52
|
+
|
53
|
+
id: str
|
54
|
+
object: str = "chat.completion"
|
55
|
+
created: int
|
56
|
+
model: str
|
57
|
+
choices: List[ChatCompletionChoice]
|
58
|
+
usage: Dict[str, int] = Field(
|
59
|
+
default_factory=lambda: {
|
60
|
+
"prompt_tokens": 0,
|
61
|
+
"completion_tokens": 0,
|
62
|
+
"total_tokens": 0,
|
63
|
+
}
|
64
|
+
)
|
65
|
+
|
66
|
+
|
67
|
+
def start_service(
|
68
|
+
host: str,
|
69
|
+
port: int,
|
70
|
+
default_platform: Optional[str] = None,
|
71
|
+
default_model: Optional[str] = None,
|
72
|
+
) -> None:
|
73
|
+
"""Start OpenAI-compatible API server."""
|
74
|
+
# Create logs directory if it doesn't exist
|
75
|
+
logs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
|
76
|
+
os.makedirs(logs_dir, exist_ok=True)
|
77
|
+
|
78
|
+
app = FastAPI(title="Jarvis API Server")
|
79
|
+
|
80
|
+
# Add CORS middleware
|
81
|
+
app.add_middleware(
|
82
|
+
CORSMiddleware,
|
83
|
+
allow_origins=["*"],
|
84
|
+
allow_credentials=True,
|
85
|
+
allow_methods=["*"],
|
86
|
+
allow_headers=["*"],
|
87
|
+
)
|
88
|
+
|
89
|
+
registry = PlatformRegistry.get_global_platform_registry()
|
90
|
+
|
91
|
+
PrettyOutput.print(
|
92
|
+
f"Starting Jarvis API server on {host}:{port}", OutputType.SUCCESS
|
93
|
+
)
|
94
|
+
PrettyOutput.print("This server provides an OpenAI-compatible API", OutputType.INFO)
|
95
|
+
|
96
|
+
if default_platform and default_model:
|
97
|
+
PrettyOutput.print(
|
98
|
+
f"Default platform: {default_platform}, model: {default_model}",
|
99
|
+
OutputType.INFO,
|
100
|
+
)
|
101
|
+
|
102
|
+
PrettyOutput.print("Available platforms:", OutputType.INFO)
|
103
|
+
|
104
|
+
# Platform and model cache
|
105
|
+
platform_instances: Dict[str, Any] = {}
|
106
|
+
|
107
|
+
# Chat history storage
|
108
|
+
chat_histories: Dict[str, Dict[str, Any]] = {}
|
109
|
+
|
110
|
+
def get_platform_instance(platform_name: str, model_name: str) -> Any:
|
111
|
+
"""Get or create a platform instance."""
|
112
|
+
key = f"{platform_name}:{model_name}"
|
113
|
+
if key not in platform_instances:
|
114
|
+
platform = registry.create_platform(platform_name)
|
115
|
+
if not platform:
|
116
|
+
raise HTTPException(
|
117
|
+
status_code=400, detail=f"Platform {platform_name} not found"
|
118
|
+
)
|
119
|
+
|
120
|
+
platform.set_model_name(model_name)
|
121
|
+
platform_instances[key] = platform
|
122
|
+
|
123
|
+
return platform_instances[key]
|
124
|
+
|
125
|
+
def log_conversation(
|
126
|
+
conversation_id: str,
|
127
|
+
messages: List[Dict[str, str]],
|
128
|
+
model: str,
|
129
|
+
response: Optional[str] = None,
|
130
|
+
) -> None:
|
131
|
+
"""Log conversation to file in plain text format."""
|
132
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
133
|
+
log_file = os.path.join(
|
134
|
+
logs_dir, f"conversation_{conversation_id}_{timestamp}.txt"
|
135
|
+
)
|
136
|
+
|
137
|
+
with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
|
138
|
+
f.write(f"Conversation ID: {conversation_id}\n")
|
139
|
+
f.write(f"Timestamp: {timestamp}\n")
|
140
|
+
f.write(f"Model: {model}\n\n")
|
141
|
+
f.write("Messages:\n")
|
142
|
+
for message in messages:
|
143
|
+
f.write(f"{message['role']}: {message['content']}\n")
|
144
|
+
if response:
|
145
|
+
f.write(f"\nResponse:\n{response}\n")
|
146
|
+
|
147
|
+
PrettyOutput.print(f"Conversation logged to {log_file}", OutputType.INFO)
|
148
|
+
|
149
|
+
@app.get("/v1/models")
|
150
|
+
async def list_models() -> Dict[str, Any]:
|
151
|
+
"""List available models for the specified platform in OpenAI-compatible format."""
|
152
|
+
model_list = []
|
153
|
+
|
154
|
+
# Only get models for the currently set platform
|
155
|
+
if default_platform:
|
156
|
+
try:
|
157
|
+
platform = registry.create_platform(default_platform)
|
158
|
+
if platform:
|
159
|
+
models = platform.get_model_list()
|
160
|
+
if models:
|
161
|
+
for model_name, _ in models:
|
162
|
+
full_name = f"{default_platform}/{model_name}"
|
163
|
+
model_list.append(
|
164
|
+
{
|
165
|
+
"id": full_name,
|
166
|
+
"object": "model",
|
167
|
+
"created": int(time.time()),
|
168
|
+
"owned_by": default_platform,
|
169
|
+
}
|
170
|
+
)
|
171
|
+
except Exception as exc:
|
172
|
+
print(f"Error getting models for {default_platform}: {str(exc)}")
|
173
|
+
|
174
|
+
# Return model list
|
175
|
+
return {"object": "list", "data": model_list}
|
176
|
+
|
177
|
+
@app.post("/v1/chat/completions", response_model=None)
|
178
|
+
@app.options("/v1/chat/completions")
|
179
|
+
async def create_chat_completion(
|
180
|
+
request: ChatCompletionRequest,
|
181
|
+
) -> Response:
|
182
|
+
"""Create a chat completion in OpenAI-compatible format.
|
183
|
+
|
184
|
+
Returns:
|
185
|
+
Response: Either a JSONResponse or StreamingResponse depending on the request.
|
186
|
+
"""
|
187
|
+
model = request.model
|
188
|
+
messages = request.messages
|
189
|
+
stream = request.stream
|
190
|
+
|
191
|
+
# Generate a conversation ID if this is a new conversation
|
192
|
+
conversation_id = str(uuid.uuid4())
|
193
|
+
|
194
|
+
# Extract platform and model name
|
195
|
+
if "/" in model:
|
196
|
+
platform_name, model_name = model.split("/", 1)
|
197
|
+
else:
|
198
|
+
# Use default platform and model if not specified
|
199
|
+
if default_platform and default_model:
|
200
|
+
platform_name, model_name = default_platform, default_model
|
201
|
+
else:
|
202
|
+
platform_name, model_name = "oyi", model # Default to OYI platform
|
203
|
+
|
204
|
+
# Get platform instance
|
205
|
+
platform = get_platform_instance(platform_name, model_name)
|
206
|
+
|
207
|
+
# Convert messages to text format for the platform
|
208
|
+
message_text = ""
|
209
|
+
for msg in messages:
|
210
|
+
role = msg.role
|
211
|
+
content = msg.content
|
212
|
+
|
213
|
+
if role == "system":
|
214
|
+
message_text += f"System: {content}\n\n"
|
215
|
+
elif role == "user":
|
216
|
+
message_text += f"User: {content}\n\n"
|
217
|
+
elif role == "assistant":
|
218
|
+
message_text += f"Assistant: {content}\n\n"
|
219
|
+
|
220
|
+
# Store messages in chat history
|
221
|
+
chat_histories[conversation_id] = {
|
222
|
+
"model": model,
|
223
|
+
"messages": [{"role": m.role, "content": m.content} for m in messages],
|
224
|
+
}
|
225
|
+
|
226
|
+
# Log the conversation
|
227
|
+
log_conversation(
|
228
|
+
conversation_id,
|
229
|
+
[{"role": m.role, "content": m.content} for m in messages],
|
230
|
+
model,
|
231
|
+
)
|
232
|
+
|
233
|
+
if stream:
|
234
|
+
# Return streaming response
|
235
|
+
return StreamingResponse(
|
236
|
+
stream_chat_response(platform, message_text, model),
|
237
|
+
media_type="text/event-stream",
|
238
|
+
)
|
239
|
+
|
240
|
+
# Get chat response
|
241
|
+
try:
|
242
|
+
response_text = platform.chat_until_success(message_text)
|
243
|
+
|
244
|
+
# Create response in OpenAI format
|
245
|
+
completion_id = f"chatcmpl-{str(uuid.uuid4())}"
|
246
|
+
|
247
|
+
# Update chat history with response
|
248
|
+
if conversation_id in chat_histories:
|
249
|
+
chat_histories[conversation_id]["messages"].append(
|
250
|
+
{"role": "assistant", "content": response_text}
|
251
|
+
)
|
252
|
+
|
253
|
+
# Log the conversation with response
|
254
|
+
log_conversation(
|
255
|
+
conversation_id,
|
256
|
+
chat_histories[conversation_id]["messages"],
|
257
|
+
model,
|
258
|
+
response_text,
|
259
|
+
)
|
260
|
+
|
261
|
+
return JSONResponse({
|
262
|
+
"id": completion_id,
|
263
|
+
"object": "chat.completion",
|
264
|
+
"created": int(time.time()),
|
265
|
+
"model": model,
|
266
|
+
"choices": [
|
267
|
+
{
|
268
|
+
"index": 0,
|
269
|
+
"message": {"role": "assistant", "content": response_text},
|
270
|
+
"finish_reason": "stop",
|
271
|
+
}
|
272
|
+
],
|
273
|
+
"usage": {
|
274
|
+
"prompt_tokens": len(message_text) // 4,
|
275
|
+
"completion_tokens": len(response_text) // 4,
|
276
|
+
"total_tokens": (len(message_text) + len(response_text)) // 4,
|
277
|
+
},
|
278
|
+
})
|
279
|
+
except Exception as exc:
|
280
|
+
raise HTTPException(status_code=500, detail=str(exc))
|
281
|
+
|
282
|
+
async def stream_chat_response(
|
283
|
+
platform: Any, message: str, model_name: str
|
284
|
+
) -> Any:
|
285
|
+
"""Stream chat response in OpenAI-compatible format."""
|
286
|
+
completion_id = f"chatcmpl-{str(uuid.uuid4())}"
|
287
|
+
created_time = int(time.time())
|
288
|
+
conversation_id = str(uuid.uuid4())
|
289
|
+
|
290
|
+
# Modify first yield statement format
|
291
|
+
initial_data = {
|
292
|
+
"id": completion_id,
|
293
|
+
"object": "chat.completion.chunk",
|
294
|
+
"created": created_time,
|
295
|
+
"model": model_name,
|
296
|
+
"choices": [
|
297
|
+
{"index": 0, "delta": {"role": "assistant"}, "finish_reason": None}
|
298
|
+
],
|
299
|
+
}
|
300
|
+
yield f"data: {json.dumps(initial_data)}\n\n"
|
301
|
+
|
302
|
+
try:
|
303
|
+
# Get chat response directly
|
304
|
+
response = platform.chat_until_success(message)
|
305
|
+
|
306
|
+
# Record full response
|
307
|
+
full_response = ""
|
308
|
+
|
309
|
+
# If there is a response, send it in chunks
|
310
|
+
if response:
|
311
|
+
# Split into small chunks for better streaming experience
|
312
|
+
chunk_size = 4
|
313
|
+
for i in range(0, len(response), chunk_size):
|
314
|
+
chunk = response[i : i + chunk_size]
|
315
|
+
full_response += chunk
|
316
|
+
|
317
|
+
# Create and send chunk
|
318
|
+
chunk_data = {
|
319
|
+
"id": completion_id,
|
320
|
+
"object": "chat.completion.chunk",
|
321
|
+
"created": created_time,
|
322
|
+
"model": model_name,
|
323
|
+
"choices": [
|
324
|
+
{
|
325
|
+
"index": 0,
|
326
|
+
"delta": {"content": chunk},
|
327
|
+
"finish_reason": None,
|
328
|
+
}
|
329
|
+
],
|
330
|
+
}
|
331
|
+
|
332
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
333
|
+
|
334
|
+
# Small delay to simulate streaming
|
335
|
+
await asyncio.sleep(0.01)
|
336
|
+
else:
|
337
|
+
# If no output, send an empty content chunk
|
338
|
+
chunk_data = {
|
339
|
+
"id": completion_id,
|
340
|
+
"object": "chat.completion.chunk",
|
341
|
+
"created": created_time,
|
342
|
+
"model": model_name,
|
343
|
+
"choices": [
|
344
|
+
{
|
345
|
+
"index": 0,
|
346
|
+
"delta": {"content": "No response from model."},
|
347
|
+
"finish_reason": None,
|
348
|
+
}
|
349
|
+
],
|
350
|
+
}
|
351
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
352
|
+
full_response = "No response from model."
|
353
|
+
|
354
|
+
# Modify final yield statement format
|
355
|
+
final_data = {
|
356
|
+
"id": completion_id,
|
357
|
+
"object": "chat.completion.chunk",
|
358
|
+
"created": created_time,
|
359
|
+
"model": model_name,
|
360
|
+
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
|
361
|
+
}
|
362
|
+
yield f"data: {json.dumps(final_data)}\n\n"
|
363
|
+
|
364
|
+
# Send [DONE] marker
|
365
|
+
yield "data: [DONE]\n\n"
|
366
|
+
|
367
|
+
# Log conversation to file
|
368
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
369
|
+
log_file = os.path.join(
|
370
|
+
logs_dir, f"stream_conversation_{conversation_id}_{timestamp}.json"
|
371
|
+
)
|
372
|
+
|
373
|
+
log_data = {
|
374
|
+
"conversation_id": conversation_id,
|
375
|
+
"timestamp": timestamp,
|
376
|
+
"model": model_name,
|
377
|
+
"message": message,
|
378
|
+
"response": full_response,
|
379
|
+
}
|
380
|
+
|
381
|
+
with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
|
382
|
+
json.dump(log_data, f, ensure_ascii=False, indent=2)
|
383
|
+
|
384
|
+
PrettyOutput.print(
|
385
|
+
f"Stream conversation logged to {log_file}", OutputType.INFO
|
386
|
+
)
|
387
|
+
|
388
|
+
except Exception as exc:
|
389
|
+
# Send error message
|
390
|
+
error_msg = f"Error: {str(exc)}"
|
391
|
+
print(f"Streaming error: {error_msg}")
|
392
|
+
|
393
|
+
res = json.dumps(
|
394
|
+
{
|
395
|
+
"id": completion_id,
|
396
|
+
"object": "chat.completion.chunk",
|
397
|
+
"created": created_time,
|
398
|
+
"model": model_name,
|
399
|
+
"choices": [
|
400
|
+
{
|
401
|
+
"index": 0,
|
402
|
+
"delta": {"content": error_msg},
|
403
|
+
"finish_reason": "stop",
|
404
|
+
}
|
405
|
+
],
|
406
|
+
}
|
407
|
+
)
|
408
|
+
yield f"data: {res}\n\n"
|
409
|
+
yield f"data: {json.dumps({'error': {'message': error_msg, 'type': 'server_error'}})}\n\n"
|
410
|
+
yield "data: [DONE]\n\n"
|
411
|
+
|
412
|
+
# Log error to file
|
413
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
414
|
+
log_file = os.path.join(
|
415
|
+
logs_dir, f"stream_error_{conversation_id}_{timestamp}.json"
|
416
|
+
)
|
417
|
+
|
418
|
+
log_data = {
|
419
|
+
"conversation_id": conversation_id,
|
420
|
+
"timestamp": timestamp,
|
421
|
+
"model": model_name,
|
422
|
+
"message": message,
|
423
|
+
"error": error_msg,
|
424
|
+
}
|
425
|
+
|
426
|
+
with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
|
427
|
+
json.dump(log_data, f, ensure_ascii=False, indent=2)
|
428
|
+
|
429
|
+
PrettyOutput.print(f"Stream error logged to {log_file}", OutputType.ERROR)
|
430
|
+
|
431
|
+
# Run the server
|
432
|
+
uvicorn.run(app, host=host, port=port)
|
@@ -3,7 +3,7 @@
|
|
3
3
|
import argparse
|
4
4
|
import os
|
5
5
|
import sys
|
6
|
-
from typing import Optional
|
6
|
+
from typing import Optional, Tuple
|
7
7
|
|
8
8
|
from jarvis.jarvis_platform.registry import PlatformRegistry
|
9
9
|
from jarvis.jarvis_utils.config import get_shell_name, set_config
|
@@ -18,54 +18,105 @@ def execute_command(command: str, should_run: bool) -> None:
|
|
18
18
|
os.system(command)
|
19
19
|
|
20
20
|
|
21
|
-
def
|
22
|
-
"""
|
21
|
+
def _check_fish_shell() -> bool:
|
22
|
+
"""Check if current shell is fish
|
23
|
+
|
24
|
+
Returns:
|
25
|
+
bool: True if fish shell, False otherwise
|
26
|
+
"""
|
27
|
+
return get_shell_name() == "fish"
|
28
|
+
|
29
|
+
def _get_config_file() -> str:
|
30
|
+
"""Get fish config file path
|
31
|
+
|
32
|
+
Returns:
|
33
|
+
str: Path to fish config file
|
34
|
+
"""
|
35
|
+
return os.path.expanduser("~/.config/fish/config.fish")
|
36
|
+
|
37
|
+
def _get_markers() -> Tuple[str, str]:
|
38
|
+
"""Get start and end markers for JSS completion
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
Tuple[str, str]: (start_marker, end_marker)
|
42
|
+
"""
|
43
|
+
return (
|
44
|
+
"# ===== JARVIS JSS FISH COMPLETION START =====",
|
45
|
+
"# ===== JARVIS JSS FISH COMPLETION END ====="
|
46
|
+
)
|
23
47
|
|
48
|
+
def install_jss_completion() -> int:
|
49
|
+
"""Install JSS fish shell command completion
|
50
|
+
|
24
51
|
Returns:
|
25
52
|
int: 0 if success, 1 if failed
|
26
53
|
"""
|
27
|
-
if
|
54
|
+
if not _check_fish_shell():
|
28
55
|
print("当前不是fish shell,无需安装")
|
29
56
|
return 0
|
30
57
|
|
31
|
-
|
32
|
-
|
33
|
-
result = os.popen(f"fish -c '{check_cmd}'").read().strip()
|
58
|
+
config_file = _get_config_file()
|
59
|
+
start_marker, end_marker = _get_markers()
|
34
60
|
|
35
|
-
if
|
36
|
-
print("
|
37
|
-
|
38
|
-
|
39
|
-
config_file = os.path.expanduser("~/.config/fish/config.fish")
|
61
|
+
if not os.path.exists(config_file):
|
62
|
+
print("未找到config.fish文件,将创建新文件")
|
63
|
+
os.makedirs(os.path.dirname(config_file), exist_ok=True)
|
40
64
|
|
41
|
-
|
42
|
-
|
43
|
-
with open(config_file, "r") as config:
|
44
|
-
if "function fish_command_not_found" in config.read():
|
45
|
-
print(
|
46
|
-
"fish_command_not_found函数已定义但未加载,请执行: source ~/.config/fish/config.fish"
|
47
|
-
)
|
48
|
-
return 0
|
65
|
+
with open(config_file, "r") as f:
|
66
|
+
content = f.read()
|
49
67
|
|
50
|
-
|
51
|
-
|
68
|
+
if start_marker in content:
|
69
|
+
print("JSS fish completion已安装,请执行: source ~/.config/fish/config.fish")
|
70
|
+
return 0
|
52
71
|
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
"""
|
72
|
+
with open(config_file, "a") as f:
|
73
|
+
f.write(f"""
|
74
|
+
{start_marker}
|
57
75
|
function fish_command_not_found
|
76
|
+
if test (string length "$argv") -lt 10
|
77
|
+
return
|
78
|
+
end
|
58
79
|
commandline -r (jss request "$argv")
|
59
80
|
end
|
60
81
|
|
61
82
|
function __fish_command_not_found_handler --on-event fish_command_not_found
|
62
83
|
fish_command_not_found "$argv"
|
63
84
|
end
|
64
|
-
|
65
|
-
|
66
|
-
print(
|
67
|
-
|
68
|
-
|
85
|
+
{end_marker}
|
86
|
+
""")
|
87
|
+
print("JSS fish completion已安装,请执行: source ~/.config/fish/config.fish")
|
88
|
+
return 0
|
89
|
+
|
90
|
+
def uninstall_jss_completion() -> int:
|
91
|
+
"""Uninstall JSS fish shell command completion
|
92
|
+
|
93
|
+
Returns:
|
94
|
+
int: 0 if success, 1 if failed
|
95
|
+
"""
|
96
|
+
if not _check_fish_shell():
|
97
|
+
print("当前不是fish shell,无需卸载")
|
98
|
+
return 0
|
99
|
+
|
100
|
+
config_file = _get_config_file()
|
101
|
+
start_marker, end_marker = _get_markers()
|
102
|
+
|
103
|
+
if not os.path.exists(config_file):
|
104
|
+
print("未找到JSS fish completion配置,无需卸载")
|
105
|
+
return 0
|
106
|
+
|
107
|
+
with open(config_file, "r") as f:
|
108
|
+
content = f.read()
|
109
|
+
|
110
|
+
if start_marker not in content:
|
111
|
+
print("未找到JSS fish completion配置,无需卸载")
|
112
|
+
return 0
|
113
|
+
|
114
|
+
new_content = content.split(start_marker)[0] + content.split(end_marker)[-1]
|
115
|
+
|
116
|
+
with open(config_file, "w") as f:
|
117
|
+
f.write(new_content)
|
118
|
+
|
119
|
+
print("JSS fish completion已卸载,请执行: source ~/.config/fish/config.fish")
|
69
120
|
return 0
|
70
121
|
|
71
122
|
|
@@ -149,11 +200,19 @@ Example:
|
|
149
200
|
|
150
201
|
# install子命令
|
151
202
|
install_parser = subparsers.add_parser(
|
152
|
-
"install", help="安装fish shell
|
203
|
+
"install", help="安装JSS fish shell命令补全功能"
|
153
204
|
)
|
154
205
|
install_parser.add_argument(
|
155
206
|
"--shell", choices=["fish"], default="fish", help="指定shell类型(仅支持fish)"
|
156
207
|
)
|
208
|
+
|
209
|
+
# 添加uninstall子命令
|
210
|
+
uninstall_parser = subparsers.add_parser(
|
211
|
+
"uninstall", help="卸载JSS fish shell命令补全功能"
|
212
|
+
)
|
213
|
+
uninstall_parser.add_argument(
|
214
|
+
"--shell", choices=["fish"], default="fish", help="指定shell类型(仅支持fish)"
|
215
|
+
)
|
157
216
|
|
158
217
|
# 解析参数
|
159
218
|
args = parser.parse_args()
|
@@ -165,8 +224,15 @@ Example:
|
|
165
224
|
if args.shell != "fish":
|
166
225
|
print(f"错误: 不支持的shell类型: {args.shell}, 仅支持fish")
|
167
226
|
return 1
|
168
|
-
return
|
227
|
+
return install_jss_completion()
|
169
228
|
|
229
|
+
# 处理uninstall命令
|
230
|
+
if args.command == "uninstall":
|
231
|
+
if args.shell != "fish":
|
232
|
+
print(f"错误: 不支持的shell类型: {args.shell}, 仅支持fish")
|
233
|
+
return 1
|
234
|
+
return uninstall_jss_completion()
|
235
|
+
|
170
236
|
# 处理request命令
|
171
237
|
if not args.request:
|
172
238
|
# 检查是否在交互式终端中运行
|