jarvis-ai-assistant 0.1.222__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/jarvis_agent/__init__.py +1143 -245
  3. jarvis/jarvis_agent/agent_manager.py +97 -0
  4. jarvis/jarvis_agent/builtin_input_handler.py +12 -10
  5. jarvis/jarvis_agent/config_editor.py +57 -0
  6. jarvis/jarvis_agent/edit_file_handler.py +392 -99
  7. jarvis/jarvis_agent/event_bus.py +48 -0
  8. jarvis/jarvis_agent/events.py +157 -0
  9. jarvis/jarvis_agent/file_context_handler.py +79 -0
  10. jarvis/jarvis_agent/file_methodology_manager.py +117 -0
  11. jarvis/jarvis_agent/jarvis.py +1117 -147
  12. jarvis/jarvis_agent/main.py +78 -34
  13. jarvis/jarvis_agent/memory_manager.py +195 -0
  14. jarvis/jarvis_agent/methodology_share_manager.py +174 -0
  15. jarvis/jarvis_agent/prompt_manager.py +82 -0
  16. jarvis/jarvis_agent/prompts.py +46 -9
  17. jarvis/jarvis_agent/protocols.py +4 -1
  18. jarvis/jarvis_agent/rewrite_file_handler.py +141 -0
  19. jarvis/jarvis_agent/run_loop.py +146 -0
  20. jarvis/jarvis_agent/session_manager.py +9 -9
  21. jarvis/jarvis_agent/share_manager.py +228 -0
  22. jarvis/jarvis_agent/shell_input_handler.py +23 -3
  23. jarvis/jarvis_agent/stdio_redirect.py +295 -0
  24. jarvis/jarvis_agent/task_analyzer.py +212 -0
  25. jarvis/jarvis_agent/task_manager.py +154 -0
  26. jarvis/jarvis_agent/task_planner.py +496 -0
  27. jarvis/jarvis_agent/tool_executor.py +8 -4
  28. jarvis/jarvis_agent/tool_share_manager.py +139 -0
  29. jarvis/jarvis_agent/user_interaction.py +42 -0
  30. jarvis/jarvis_agent/utils.py +54 -0
  31. jarvis/jarvis_agent/web_bridge.py +189 -0
  32. jarvis/jarvis_agent/web_output_sink.py +53 -0
  33. jarvis/jarvis_agent/web_server.py +751 -0
  34. jarvis/jarvis_c2rust/__init__.py +26 -0
  35. jarvis/jarvis_c2rust/cli.py +613 -0
  36. jarvis/jarvis_c2rust/collector.py +258 -0
  37. jarvis/jarvis_c2rust/library_replacer.py +1122 -0
  38. jarvis/jarvis_c2rust/llm_module_agent.py +1300 -0
  39. jarvis/jarvis_c2rust/optimizer.py +960 -0
  40. jarvis/jarvis_c2rust/scanner.py +1681 -0
  41. jarvis/jarvis_c2rust/transpiler.py +2325 -0
  42. jarvis/jarvis_code_agent/build_validation_config.py +133 -0
  43. jarvis/jarvis_code_agent/code_agent.py +1605 -178
  44. jarvis/jarvis_code_agent/code_analyzer/__init__.py +62 -0
  45. jarvis/jarvis_code_agent/code_analyzer/base_language.py +74 -0
  46. jarvis/jarvis_code_agent/code_analyzer/build_validator/__init__.py +44 -0
  47. jarvis/jarvis_code_agent/code_analyzer/build_validator/base.py +102 -0
  48. jarvis/jarvis_code_agent/code_analyzer/build_validator/cmake.py +59 -0
  49. jarvis/jarvis_code_agent/code_analyzer/build_validator/detector.py +125 -0
  50. jarvis/jarvis_code_agent/code_analyzer/build_validator/fallback.py +69 -0
  51. jarvis/jarvis_code_agent/code_analyzer/build_validator/go.py +38 -0
  52. jarvis/jarvis_code_agent/code_analyzer/build_validator/java_gradle.py +44 -0
  53. jarvis/jarvis_code_agent/code_analyzer/build_validator/java_maven.py +38 -0
  54. jarvis/jarvis_code_agent/code_analyzer/build_validator/makefile.py +50 -0
  55. jarvis/jarvis_code_agent/code_analyzer/build_validator/nodejs.py +93 -0
  56. jarvis/jarvis_code_agent/code_analyzer/build_validator/python.py +129 -0
  57. jarvis/jarvis_code_agent/code_analyzer/build_validator/rust.py +54 -0
  58. jarvis/jarvis_code_agent/code_analyzer/build_validator/validator.py +154 -0
  59. jarvis/jarvis_code_agent/code_analyzer/build_validator.py +43 -0
  60. jarvis/jarvis_code_agent/code_analyzer/context_manager.py +363 -0
  61. jarvis/jarvis_code_agent/code_analyzer/context_recommender.py +18 -0
  62. jarvis/jarvis_code_agent/code_analyzer/dependency_analyzer.py +132 -0
  63. jarvis/jarvis_code_agent/code_analyzer/file_ignore.py +330 -0
  64. jarvis/jarvis_code_agent/code_analyzer/impact_analyzer.py +781 -0
  65. jarvis/jarvis_code_agent/code_analyzer/language_registry.py +185 -0
  66. jarvis/jarvis_code_agent/code_analyzer/language_support.py +89 -0
  67. jarvis/jarvis_code_agent/code_analyzer/languages/__init__.py +31 -0
  68. jarvis/jarvis_code_agent/code_analyzer/languages/c_cpp_language.py +231 -0
  69. jarvis/jarvis_code_agent/code_analyzer/languages/go_language.py +183 -0
  70. jarvis/jarvis_code_agent/code_analyzer/languages/python_language.py +219 -0
  71. jarvis/jarvis_code_agent/code_analyzer/languages/rust_language.py +209 -0
  72. jarvis/jarvis_code_agent/code_analyzer/llm_context_recommender.py +451 -0
  73. jarvis/jarvis_code_agent/code_analyzer/symbol_extractor.py +77 -0
  74. jarvis/jarvis_code_agent/code_analyzer/tree_sitter_extractor.py +48 -0
  75. jarvis/jarvis_code_agent/lint.py +275 -13
  76. jarvis/jarvis_code_agent/utils.py +142 -0
  77. jarvis/jarvis_code_analysis/checklists/loader.py +20 -6
  78. jarvis/jarvis_code_analysis/code_review.py +583 -548
  79. jarvis/jarvis_data/config_schema.json +339 -28
  80. jarvis/jarvis_git_squash/main.py +22 -13
  81. jarvis/jarvis_git_utils/git_commiter.py +171 -55
  82. jarvis/jarvis_mcp/sse_mcp_client.py +22 -15
  83. jarvis/jarvis_mcp/stdio_mcp_client.py +4 -4
  84. jarvis/jarvis_mcp/streamable_mcp_client.py +36 -16
  85. jarvis/jarvis_memory_organizer/memory_organizer.py +753 -0
  86. jarvis/jarvis_methodology/main.py +48 -63
  87. jarvis/jarvis_multi_agent/__init__.py +302 -43
  88. jarvis/jarvis_multi_agent/main.py +70 -24
  89. jarvis/jarvis_platform/ai8.py +40 -23
  90. jarvis/jarvis_platform/base.py +210 -49
  91. jarvis/jarvis_platform/human.py +11 -1
  92. jarvis/jarvis_platform/kimi.py +82 -76
  93. jarvis/jarvis_platform/openai.py +73 -1
  94. jarvis/jarvis_platform/registry.py +8 -15
  95. jarvis/jarvis_platform/tongyi.py +115 -101
  96. jarvis/jarvis_platform/yuanbao.py +89 -63
  97. jarvis/jarvis_platform_manager/main.py +194 -132
  98. jarvis/jarvis_platform_manager/service.py +122 -86
  99. jarvis/jarvis_rag/cli.py +156 -53
  100. jarvis/jarvis_rag/embedding_manager.py +155 -12
  101. jarvis/jarvis_rag/llm_interface.py +10 -13
  102. jarvis/jarvis_rag/query_rewriter.py +63 -12
  103. jarvis/jarvis_rag/rag_pipeline.py +222 -40
  104. jarvis/jarvis_rag/reranker.py +26 -3
  105. jarvis/jarvis_rag/retriever.py +270 -14
  106. jarvis/jarvis_sec/__init__.py +3605 -0
  107. jarvis/jarvis_sec/checkers/__init__.py +32 -0
  108. jarvis/jarvis_sec/checkers/c_checker.py +2680 -0
  109. jarvis/jarvis_sec/checkers/rust_checker.py +1108 -0
  110. jarvis/jarvis_sec/cli.py +116 -0
  111. jarvis/jarvis_sec/report.py +257 -0
  112. jarvis/jarvis_sec/status.py +264 -0
  113. jarvis/jarvis_sec/types.py +20 -0
  114. jarvis/jarvis_sec/workflow.py +219 -0
  115. jarvis/jarvis_smart_shell/main.py +405 -137
  116. jarvis/jarvis_stats/__init__.py +13 -0
  117. jarvis/jarvis_stats/cli.py +387 -0
  118. jarvis/jarvis_stats/stats.py +711 -0
  119. jarvis/jarvis_stats/storage.py +612 -0
  120. jarvis/jarvis_stats/visualizer.py +282 -0
  121. jarvis/jarvis_tools/ask_user.py +1 -0
  122. jarvis/jarvis_tools/base.py +18 -2
  123. jarvis/jarvis_tools/clear_memory.py +239 -0
  124. jarvis/jarvis_tools/cli/main.py +220 -144
  125. jarvis/jarvis_tools/execute_script.py +52 -12
  126. jarvis/jarvis_tools/file_analyzer.py +17 -12
  127. jarvis/jarvis_tools/generate_new_tool.py +46 -24
  128. jarvis/jarvis_tools/read_code.py +277 -18
  129. jarvis/jarvis_tools/read_symbols.py +141 -0
  130. jarvis/jarvis_tools/read_webpage.py +86 -13
  131. jarvis/jarvis_tools/registry.py +294 -90
  132. jarvis/jarvis_tools/retrieve_memory.py +227 -0
  133. jarvis/jarvis_tools/save_memory.py +194 -0
  134. jarvis/jarvis_tools/search_web.py +62 -28
  135. jarvis/jarvis_tools/sub_agent.py +205 -0
  136. jarvis/jarvis_tools/sub_code_agent.py +217 -0
  137. jarvis/jarvis_tools/virtual_tty.py +330 -62
  138. jarvis/jarvis_utils/builtin_replace_map.py +4 -5
  139. jarvis/jarvis_utils/clipboard.py +90 -0
  140. jarvis/jarvis_utils/config.py +607 -50
  141. jarvis/jarvis_utils/embedding.py +3 -0
  142. jarvis/jarvis_utils/fzf.py +57 -0
  143. jarvis/jarvis_utils/git_utils.py +251 -29
  144. jarvis/jarvis_utils/globals.py +174 -17
  145. jarvis/jarvis_utils/http.py +58 -79
  146. jarvis/jarvis_utils/input.py +899 -153
  147. jarvis/jarvis_utils/methodology.py +210 -83
  148. jarvis/jarvis_utils/output.py +220 -137
  149. jarvis/jarvis_utils/utils.py +1906 -135
  150. jarvis_ai_assistant-0.7.0.dist-info/METADATA +465 -0
  151. jarvis_ai_assistant-0.7.0.dist-info/RECORD +192 -0
  152. {jarvis_ai_assistant-0.1.222.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/entry_points.txt +8 -2
  153. jarvis/jarvis_git_details/main.py +0 -265
  154. jarvis/jarvis_platform/oyi.py +0 -357
  155. jarvis/jarvis_tools/edit_file.py +0 -255
  156. jarvis/jarvis_tools/rewrite_file.py +0 -195
  157. jarvis_ai_assistant-0.1.222.dist-info/METADATA +0 -767
  158. jarvis_ai_assistant-0.1.222.dist-info/RECORD +0 -110
  159. /jarvis/{jarvis_git_details → jarvis_memory_organizer}/__init__.py +0 -0
  160. {jarvis_ai_assistant-0.1.222.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/WHEEL +0 -0
  161. {jarvis_ai_assistant-0.1.222.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/licenses/LICENSE +0 -0
  162. {jarvis_ai_assistant-0.1.222.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/top_level.txt +0 -0
@@ -7,9 +7,10 @@ import asyncio
7
7
  import json
8
8
  import os
9
9
  import time
10
+ import threading
10
11
  import uuid
11
12
  from datetime import datetime
12
- from typing import Any, Dict, List, Optional, Union
13
+ from typing import Any, Dict, List, Optional
13
14
 
14
15
  import uvicorn
15
16
  from fastapi import FastAPI, HTTPException
@@ -72,8 +73,16 @@ def start_service(
72
73
  ) -> None:
73
74
  """Start OpenAI-compatible API server."""
74
75
  # Create logs directory if it doesn't exist
75
- logs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
76
- os.makedirs(logs_dir, exist_ok=True)
76
+ # Prefer environment variable, then user directory, fall back to CWD
77
+ logs_dir = os.environ.get("JARVIS_LOG_DIR")
78
+ if not logs_dir:
79
+ logs_dir = os.path.join(os.path.expanduser("~"), ".jarvis", "logs")
80
+ try:
81
+ os.makedirs(logs_dir, exist_ok=True)
82
+ except Exception:
83
+ # As a last resort, use current working directory
84
+ logs_dir = os.path.join(os.getcwd(), "logs")
85
+ os.makedirs(logs_dir, exist_ok=True)
77
86
 
78
87
  app = FastAPI(title="Jarvis API Server")
79
88
 
@@ -81,7 +90,7 @@ def start_service(
81
90
  app.add_middleware(
82
91
  CORSMiddleware,
83
92
  allow_origins=["*"],
84
- allow_credentials=True,
93
+ allow_credentials=False,
85
94
  allow_methods=["*"],
86
95
  allow_headers=["*"],
87
96
  )
@@ -91,7 +100,7 @@ def start_service(
91
100
  PrettyOutput.print(
92
101
  f"Starting Jarvis API server on {host}:{port}", OutputType.SUCCESS
93
102
  )
94
- PrettyOutput.print("This server provides an OpenAI-compatible API", OutputType.INFO)
103
+ PrettyOutput.print("本服务提供与 OpenAI 兼容的 API", OutputType.INFO)
95
104
 
96
105
  if default_platform and default_model:
97
106
  PrettyOutput.print(
@@ -99,8 +108,6 @@ def start_service(
99
108
  OutputType.INFO,
100
109
  )
101
110
 
102
- PrettyOutput.print("Available platforms:", OutputType.INFO)
103
-
104
111
  # Platform and model cache
105
112
  platform_instances: Dict[str, Any] = {}
106
113
 
@@ -144,7 +151,7 @@ def start_service(
144
151
  if response:
145
152
  f.write(f"\nResponse:\n{response}\n")
146
153
 
147
- PrettyOutput.print(f"Conversation logged to {log_file}", OutputType.INFO)
154
+ PrettyOutput.print(f"会话已记录到 {log_file}", OutputType.INFO)
148
155
 
149
156
  @app.get("/v1/models")
150
157
  async def list_models() -> Dict[str, Any]:
@@ -169,7 +176,10 @@ def start_service(
169
176
  }
170
177
  )
171
178
  except Exception as exc:
172
- print(f"Error getting models for {default_platform}: {str(exc)}")
179
+ PrettyOutput.print(
180
+ f"Error getting models for {default_platform}: {str(exc)}",
181
+ OutputType.ERROR,
182
+ )
173
183
 
174
184
  # Return model list
175
185
  return {"object": "list", "data": model_list}
@@ -195,11 +205,16 @@ def start_service(
195
205
  if "/" in model:
196
206
  platform_name, model_name = model.split("/", 1)
197
207
  else:
198
- # Use default platform and model if not specified
199
- if default_platform and default_model:
200
- platform_name, model_name = default_platform, default_model
208
+ # Use default platform if not specified in the model name
209
+ if default_platform:
210
+ platform_name = default_platform
211
+ model_name = model
201
212
  else:
202
- platform_name, model_name = "oyi", model # Default to OYI platform
213
+ raise HTTPException(
214
+ status_code=400,
215
+ detail="Model name must be in 'platform/model_name' format "
216
+ "or a default platform must be set.",
217
+ )
203
218
 
204
219
  # Get platform instance
205
220
  platform = get_platform_instance(platform_name, model_name)
@@ -223,23 +238,23 @@ def start_service(
223
238
  "messages": [{"role": m.role, "content": m.content} for m in messages],
224
239
  }
225
240
 
226
- # Log the conversation
227
- log_conversation(
228
- conversation_id,
229
- [{"role": m.role, "content": m.content} for m in messages],
230
- model,
231
- )
241
+ # Logging moved to post-response to avoid duplicates
232
242
 
233
243
  if stream:
234
244
  # Return streaming response
235
245
  return StreamingResponse(
236
246
  stream_chat_response(platform, message_text, model), # type: ignore
237
247
  media_type="text/event-stream",
248
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive"},
238
249
  )
239
250
 
240
251
  # Get chat response
241
252
  try:
242
- response_text = platform.chat_until_success(message_text)
253
+ # Run potentially blocking call in a thread to avoid blocking the event loop
254
+ loop = asyncio.get_running_loop()
255
+ response_text = await loop.run_in_executor(
256
+ None, lambda: platform.chat_until_success(message_text)
257
+ )
243
258
 
244
259
  # Create response in OpenAI format
245
260
  completion_id = f"chatcmpl-{str(uuid.uuid4())}"
@@ -282,11 +297,31 @@ def start_service(
282
297
  raise HTTPException(status_code=500, detail=str(exc))
283
298
 
284
299
  async def stream_chat_response(platform: Any, message: str, model_name: str) -> Any:
285
- """Stream chat response in OpenAI-compatible format."""
300
+ """Stream chat response in OpenAI-compatible format without blocking the event loop."""
286
301
  completion_id = f"chatcmpl-{str(uuid.uuid4())}"
287
302
  created_time = int(time.time())
288
303
  conversation_id = str(uuid.uuid4())
289
304
 
305
+ loop = asyncio.get_running_loop()
306
+ queue: asyncio.Queue = asyncio.Queue()
307
+ SENTINEL = object()
308
+
309
+ def producer() -> None:
310
+ try:
311
+ for chunk in platform.chat(message):
312
+ if chunk:
313
+ asyncio.run_coroutine_threadsafe(queue.put(chunk), loop)
314
+ except Exception as exc:
315
+ # Use a special dict to pass error across thread boundary
316
+ asyncio.run_coroutine_threadsafe(
317
+ queue.put({"__error__": str(exc)}), loop
318
+ )
319
+ finally:
320
+ asyncio.run_coroutine_threadsafe(queue.put(SENTINEL), loop)
321
+
322
+ # Start producer thread
323
+ threading.Thread(target=producer, daemon=True).start()
324
+
290
325
  # Send the initial chunk with the role
291
326
  initial_data = {
292
327
  "id": completion_id,
@@ -299,36 +334,20 @@ def start_service(
299
334
  }
300
335
  yield f"data: {json.dumps(initial_data)}\n\n"
301
336
 
302
- try:
303
- # Use the streaming-capable chat method
304
- response_generator = platform.chat(message)
305
-
306
- full_response = ""
307
- has_content = False
308
-
309
- # Iterate over the generator and stream chunks
310
- for chunk in response_generator:
311
- if chunk:
312
- has_content = True
313
- full_response += chunk
314
- chunk_data = {
315
- "id": completion_id,
316
- "object": "chat.completion.chunk",
317
- "created": created_time,
318
- "model": model_name,
319
- "choices": [
320
- {
321
- "index": 0,
322
- "delta": {"content": chunk},
323
- "finish_reason": None,
324
- }
325
- ],
326
- }
327
- yield f"data: {json.dumps(chunk_data)}\n\n"
337
+ full_response = ""
338
+ has_content = False
339
+
340
+ while True:
341
+ item = await queue.get()
342
+ if item is SENTINEL:
343
+ break
344
+
345
+ if isinstance(item, dict) and "__error__" in item:
346
+ error_msg = f"Error during streaming: {item['__error__']}"
347
+ PrettyOutput.print(error_msg, OutputType.ERROR)
328
348
 
329
- if not has_content:
330
- no_response_message = "No response from model."
331
- chunk_data = {
349
+ # Send error information in the stream
350
+ error_chunk = {
332
351
  "id": completion_id,
333
352
  "object": "chat.completion.chunk",
334
353
  "created": created_time,
@@ -336,41 +355,45 @@ def start_service(
336
355
  "choices": [
337
356
  {
338
357
  "index": 0,
339
- "delta": {"content": no_response_message},
340
- "finish_reason": None,
358
+ "delta": {"content": error_msg},
359
+ "finish_reason": "stop",
341
360
  }
342
361
  ],
343
362
  }
344
- yield f"data: {json.dumps(chunk_data)}\n\n"
345
- full_response = no_response_message
363
+ yield f"data: {json.dumps(error_chunk)}\n\n"
364
+ yield "data: [DONE]\n\n"
365
+
366
+ # Log the error
367
+ log_conversation(
368
+ conversation_id,
369
+ [{"role": "user", "content": message}],
370
+ model_name,
371
+ response=f"ERROR: {error_msg}",
372
+ )
373
+ return
346
374
 
347
- # Send the final chunk with finish_reason
348
- final_data = {
375
+ # Normal chunk
376
+ chunk = item
377
+ has_content = True
378
+ full_response += chunk
379
+ chunk_data = {
349
380
  "id": completion_id,
350
381
  "object": "chat.completion.chunk",
351
382
  "created": created_time,
352
383
  "model": model_name,
353
- "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
384
+ "choices": [
385
+ {
386
+ "index": 0,
387
+ "delta": {"content": chunk},
388
+ "finish_reason": None,
389
+ }
390
+ ],
354
391
  }
355
- yield f"data: {json.dumps(final_data)}\n\n"
356
-
357
- # Send the [DONE] marker
358
- yield "data: [DONE]\n\n"
392
+ yield f"data: {json.dumps(chunk_data)}\n\n"
359
393
 
360
- # Log the full conversation
361
- log_conversation(
362
- conversation_id,
363
- [{"role": "user", "content": message}],
364
- model_name,
365
- full_response,
366
- )
367
-
368
- except Exception as exc:
369
- error_msg = f"Error during streaming: {str(exc)}"
370
- PrettyOutput.print(error_msg, OutputType.ERROR)
371
-
372
- # Send error information in the stream
373
- error_chunk = {
394
+ if not has_content:
395
+ no_response_message = "No response from model."
396
+ chunk_data = {
374
397
  "id": completion_id,
375
398
  "object": "chat.completion.chunk",
376
399
  "created": created_time,
@@ -378,21 +401,34 @@ def start_service(
378
401
  "choices": [
379
402
  {
380
403
  "index": 0,
381
- "delta": {"content": error_msg},
382
- "finish_reason": "stop",
404
+ "delta": {"content": no_response_message},
405
+ "finish_reason": None,
383
406
  }
384
407
  ],
385
408
  }
386
- yield f"data: {json.dumps(error_chunk)}\n\n"
387
- yield "data: [DONE]\n\n"
409
+ yield f"data: {json.dumps(chunk_data)}\n\n"
410
+ full_response = no_response_message
388
411
 
389
- # Log the error
390
- log_conversation(
391
- conversation_id,
392
- [{"role": "user", "content": message}],
393
- model_name,
394
- response=f"ERROR: {error_msg}",
395
- )
412
+ # Send the final chunk with finish_reason
413
+ final_data = {
414
+ "id": completion_id,
415
+ "object": "chat.completion.chunk",
416
+ "created": created_time,
417
+ "model": model_name,
418
+ "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
419
+ }
420
+ yield f"data: {json.dumps(final_data)}\n\n"
421
+
422
+ # Send the [DONE] marker
423
+ yield "data: [DONE]\n\n"
424
+
425
+ # Log the full conversation
426
+ log_conversation(
427
+ conversation_id,
428
+ [{"role": "user", "content": message}],
429
+ model_name,
430
+ full_response,
431
+ )
396
432
 
397
433
  # Run the server
398
434
  uvicorn.run(app, host=host, port=port)