mcli-framework 7.5.0__py3-none-any.whl → 7.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/completion_helpers.py +4 -13
- mcli/app/main.py +13 -25
- mcli/app/model_cmd.py +119 -9
- mcli/lib/custom_commands.py +12 -1
- mcli/{app → self}/completion_cmd.py +6 -6
- mcli/self/self_cmd.py +29 -0
- mcli/test/test_cmd.py +30 -0
- mcli/workflow/model_service/openai_adapter.py +343 -0
- {mcli_framework-7.5.0.dist-info → mcli_framework-7.6.0.dist-info}/METADATA +1 -1
- {mcli_framework-7.5.0.dist-info → mcli_framework-7.6.0.dist-info}/RECORD +18 -16
- /mcli/{app → self}/logs_cmd.py +0 -0
- /mcli/{app → self}/redis_cmd.py +0 -0
- /mcli/{app → self}/visual_cmd.py +0 -0
- /mcli/{app → test}/cron_test_cmd.py +0 -0
- {mcli_framework-7.5.0.dist-info → mcli_framework-7.6.0.dist-info}/WHEEL +0 -0
- {mcli_framework-7.5.0.dist-info → mcli_framework-7.6.0.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.5.0.dist-info → mcli_framework-7.6.0.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.5.0.dist-info → mcli_framework-7.6.0.dist-info}/top_level.txt +0 -0
mcli/app/completion_helpers.py
CHANGED
|
@@ -183,22 +183,13 @@ class CompletionAwareLazyGroup(click.Group):
|
|
|
183
183
|
group = self._load_group()
|
|
184
184
|
return group.list_commands(ctx)
|
|
185
185
|
|
|
186
|
-
def shell_complete(self, ctx, incomplete):
|
|
186
|
+
def shell_complete(self, ctx, param, incomplete):
|
|
187
187
|
"""Provide shell completion using static data when possible."""
|
|
188
|
-
#
|
|
189
|
-
|
|
190
|
-
data = LAZY_COMMAND_COMPLETIONS[self.name]
|
|
191
|
-
if "subcommands" in data:
|
|
192
|
-
items = []
|
|
193
|
-
for subcommand in data["subcommands"]:
|
|
194
|
-
if subcommand.startswith(incomplete):
|
|
195
|
-
items.append(CompletionItem(subcommand))
|
|
196
|
-
return items
|
|
197
|
-
|
|
198
|
-
# Fallback to loading the actual group
|
|
188
|
+
# Load the actual group to get proper completion for nested commands
|
|
189
|
+
# This ensures file path completion works for subcommands
|
|
199
190
|
group = self._load_group()
|
|
200
191
|
if hasattr(group, "shell_complete"):
|
|
201
|
-
return group.shell_complete(ctx, incomplete)
|
|
192
|
+
return group.shell_complete(ctx, param, incomplete)
|
|
202
193
|
return []
|
|
203
194
|
|
|
204
195
|
def get_params(self, ctx):
|
mcli/app/main.py
CHANGED
|
@@ -255,9 +255,11 @@ class LazyCommand(click.Command):
|
|
|
255
255
|
def shell_complete(self, ctx, param, incomplete):
|
|
256
256
|
"""Provide shell completion for the lazily loaded command."""
|
|
257
257
|
cmd = self._load_command()
|
|
258
|
+
# Delegate to the loaded command's completion
|
|
258
259
|
if hasattr(cmd, "shell_complete"):
|
|
259
260
|
return cmd.shell_complete(ctx, param, incomplete)
|
|
260
|
-
|
|
261
|
+
# Fallback to default Click completion
|
|
262
|
+
return super().shell_complete(ctx, param, incomplete) if hasattr(super(), "shell_complete") else []
|
|
261
263
|
|
|
262
264
|
|
|
263
265
|
class LazyGroup(click.Group):
|
|
@@ -309,9 +311,11 @@ class LazyGroup(click.Group):
|
|
|
309
311
|
def shell_complete(self, ctx, param, incomplete):
|
|
310
312
|
"""Provide shell completion for the lazily loaded group."""
|
|
311
313
|
group = self._load_group()
|
|
314
|
+
# Delegate to the loaded group's completion
|
|
312
315
|
if hasattr(group, "shell_complete"):
|
|
313
316
|
return group.shell_complete(ctx, param, incomplete)
|
|
314
|
-
|
|
317
|
+
# Fallback to default Click completion
|
|
318
|
+
return super().shell_complete(ctx, param, incomplete) if hasattr(super(), "shell_complete") else []
|
|
315
319
|
|
|
316
320
|
|
|
317
321
|
def _add_lazy_commands(app: click.Group):
|
|
@@ -334,14 +338,14 @@ def _add_lazy_commands(app: click.Group):
|
|
|
334
338
|
except Exception as e:
|
|
335
339
|
logger.debug(f"Could not load self commands: {e}")
|
|
336
340
|
|
|
337
|
-
#
|
|
341
|
+
# Test group - load immediately for testing commands
|
|
338
342
|
try:
|
|
339
|
-
from mcli.
|
|
343
|
+
from mcli.test.test_cmd import test_group
|
|
340
344
|
|
|
341
|
-
app.add_command(
|
|
342
|
-
logger.debug("Added
|
|
343
|
-
except
|
|
344
|
-
logger.debug(f"Could not load
|
|
345
|
+
app.add_command(test_group, name="test")
|
|
346
|
+
logger.debug("Added test group commands")
|
|
347
|
+
except Exception as e:
|
|
348
|
+
logger.debug(f"Could not load test commands: {e}")
|
|
345
349
|
|
|
346
350
|
# Add workflow with completion-aware lazy loading
|
|
347
351
|
try:
|
|
@@ -374,22 +378,6 @@ def _add_lazy_commands(app: click.Group):
|
|
|
374
378
|
"import_path": "mcli.app.model_cmd.model",
|
|
375
379
|
"help": "Model management commands for offline and online model usage",
|
|
376
380
|
},
|
|
377
|
-
"cron-test": {
|
|
378
|
-
"import_path": "mcli.app.cron_test_cmd.cron_test",
|
|
379
|
-
"help": "🕒 Validate and test MCLI cron/scheduler functionality with comprehensive tests.",
|
|
380
|
-
},
|
|
381
|
-
"visual": {
|
|
382
|
-
"import_path": "mcli.app.visual_cmd.visual",
|
|
383
|
-
"help": "🎨 Visual effects and enhancements showcase",
|
|
384
|
-
},
|
|
385
|
-
"redis": {
|
|
386
|
-
"import_path": "mcli.app.redis_cmd.redis_group",
|
|
387
|
-
"help": "🗄️ Manage Redis cache service for performance optimization",
|
|
388
|
-
},
|
|
389
|
-
"logs": {
|
|
390
|
-
"import_path": "mcli.app.logs_cmd.logs_group",
|
|
391
|
-
"help": "📋 Stream and manage MCLI log files with real-time updates",
|
|
392
|
-
},
|
|
393
381
|
}
|
|
394
382
|
|
|
395
383
|
for cmd_name, cmd_info in lazy_commands.items():
|
|
@@ -397,7 +385,7 @@ def _add_lazy_commands(app: click.Group):
|
|
|
397
385
|
if cmd_name == "workflow":
|
|
398
386
|
continue
|
|
399
387
|
|
|
400
|
-
if cmd_name in ["model"
|
|
388
|
+
if cmd_name in ["model"]:
|
|
401
389
|
# Use completion-aware LazyGroup for commands that have subcommands
|
|
402
390
|
try:
|
|
403
391
|
from mcli.app.completion_helpers import create_completion_aware_lazy_group
|
mcli/app/model_cmd.py
CHANGED
|
@@ -18,6 +18,86 @@ from mcli.workflow.model_service.lightweight_model_server import (
|
|
|
18
18
|
logger = get_logger(__name__)
|
|
19
19
|
|
|
20
20
|
|
|
21
|
+
def _start_openai_server(server, host: str, port: int, api_key: Optional[str], model: str):
|
|
22
|
+
"""Start FastAPI server with OpenAI compatibility"""
|
|
23
|
+
try:
|
|
24
|
+
from fastapi import FastAPI
|
|
25
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
26
|
+
import uvicorn
|
|
27
|
+
|
|
28
|
+
from mcli.workflow.model_service.openai_adapter import create_openai_adapter
|
|
29
|
+
|
|
30
|
+
# Create FastAPI app
|
|
31
|
+
app = FastAPI(
|
|
32
|
+
title="MCLI Model Service (OpenAI Compatible)",
|
|
33
|
+
description="OpenAI-compatible API for MCLI lightweight models",
|
|
34
|
+
version="1.0.0",
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
# Add CORS middleware
|
|
38
|
+
app.add_middleware(
|
|
39
|
+
CORSMiddleware,
|
|
40
|
+
allow_origins=["*"],
|
|
41
|
+
allow_credentials=True,
|
|
42
|
+
allow_methods=["*"],
|
|
43
|
+
allow_headers=["*"],
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Create OpenAI adapter
|
|
47
|
+
require_auth = api_key is not None
|
|
48
|
+
adapter = create_openai_adapter(server, require_auth=require_auth)
|
|
49
|
+
|
|
50
|
+
# Add API key if provided
|
|
51
|
+
if api_key:
|
|
52
|
+
adapter.api_key_manager.add_key(api_key, name="default")
|
|
53
|
+
click.echo(f"🔐 API key authentication enabled")
|
|
54
|
+
|
|
55
|
+
# Include OpenAI routes
|
|
56
|
+
app.include_router(adapter.router)
|
|
57
|
+
|
|
58
|
+
# Add health check endpoint
|
|
59
|
+
@app.get("/health")
|
|
60
|
+
async def health():
|
|
61
|
+
return {"status": "healthy", "model": model}
|
|
62
|
+
|
|
63
|
+
# Display server info
|
|
64
|
+
click.echo(f"\n📝 Server running at:")
|
|
65
|
+
click.echo(f" - Base URL: http://{host}:{port}")
|
|
66
|
+
click.echo(f" - OpenAI API: http://{host}:{port}/v1")
|
|
67
|
+
click.echo(f" - Models: http://{host}:{port}/v1/models")
|
|
68
|
+
click.echo(f" - Chat: http://{host}:{port}/v1/chat/completions")
|
|
69
|
+
click.echo(f" - Health: http://{host}:{port}/health")
|
|
70
|
+
|
|
71
|
+
if require_auth:
|
|
72
|
+
click.echo(f"\n🔐 Authentication: Required")
|
|
73
|
+
click.echo(f" Use: Authorization: Bearer {api_key}")
|
|
74
|
+
else:
|
|
75
|
+
click.echo(f"\n⚠️ Authentication: Disabled (not recommended for public access)")
|
|
76
|
+
|
|
77
|
+
if host == "0.0.0.0":
|
|
78
|
+
click.echo(f"\n⚠️ Server is publicly accessible on all interfaces!")
|
|
79
|
+
|
|
80
|
+
click.echo(f"\n📚 For aider, use:")
|
|
81
|
+
if require_auth:
|
|
82
|
+
click.echo(f" export OPENAI_API_KEY={api_key}")
|
|
83
|
+
click.echo(f" export OPENAI_API_BASE=http://{host}:{port}/v1")
|
|
84
|
+
click.echo(f" aider --model {model}")
|
|
85
|
+
|
|
86
|
+
click.echo(f"\n Press Ctrl+C to stop the server")
|
|
87
|
+
|
|
88
|
+
# Start server
|
|
89
|
+
uvicorn.run(app, host=host, port=port, log_level="info")
|
|
90
|
+
|
|
91
|
+
except ImportError as e:
|
|
92
|
+
click.echo(f"❌ Missing dependencies for OpenAI-compatible server: {e}")
|
|
93
|
+
click.echo(f" Install with: pip install fastapi uvicorn")
|
|
94
|
+
sys.exit(1)
|
|
95
|
+
except Exception as e:
|
|
96
|
+
click.echo(f"❌ Failed to start OpenAI-compatible server: {e}")
|
|
97
|
+
logger.error(f"Server error: {e}", exc_info=True)
|
|
98
|
+
sys.exit(1)
|
|
99
|
+
|
|
100
|
+
|
|
21
101
|
@click.group()
|
|
22
102
|
def model():
|
|
23
103
|
"""Model management commands for offline and online model usage."""
|
|
@@ -103,13 +183,34 @@ def download(model_name: str):
|
|
|
103
183
|
@click.option(
|
|
104
184
|
"--port", "-p", default=None, help="Port to run server on (default: from config or 51234)"
|
|
105
185
|
)
|
|
186
|
+
@click.option(
|
|
187
|
+
"--host", "-h", default="localhost", help="Host to bind to (use 0.0.0.0 for public access)"
|
|
188
|
+
)
|
|
106
189
|
@click.option(
|
|
107
190
|
"--auto-download",
|
|
108
191
|
is_flag=True,
|
|
109
192
|
default=True,
|
|
110
193
|
help="Automatically download model if not available",
|
|
111
194
|
)
|
|
112
|
-
|
|
195
|
+
@click.option(
|
|
196
|
+
"--openai-compatible",
|
|
197
|
+
is_flag=True,
|
|
198
|
+
default=False,
|
|
199
|
+
help="Enable OpenAI-compatible API endpoints",
|
|
200
|
+
)
|
|
201
|
+
@click.option(
|
|
202
|
+
"--api-key",
|
|
203
|
+
default=None,
|
|
204
|
+
help="API key for authentication (if not set, auth is disabled)",
|
|
205
|
+
)
|
|
206
|
+
def start(
|
|
207
|
+
model: Optional[str],
|
|
208
|
+
port: Optional[int],
|
|
209
|
+
host: str,
|
|
210
|
+
auto_download: bool,
|
|
211
|
+
openai_compatible: bool,
|
|
212
|
+
api_key: Optional[str],
|
|
213
|
+
):
|
|
113
214
|
"""Start the lightweight model server."""
|
|
114
215
|
# Load port from config if not specified
|
|
115
216
|
if port is None:
|
|
@@ -155,15 +256,24 @@ def start(model: Optional[str], port: Optional[int], auto_download: bool):
|
|
|
155
256
|
click.echo(f"❌ Failed to load {model}")
|
|
156
257
|
sys.exit(1)
|
|
157
258
|
|
|
158
|
-
# Start server
|
|
159
|
-
|
|
160
|
-
|
|
259
|
+
# Start server with OpenAI compatibility if requested
|
|
260
|
+
if openai_compatible:
|
|
261
|
+
click.echo(f"🚀 Starting OpenAI-compatible server on {host}:{port}...")
|
|
262
|
+
_start_openai_server(server, host, port, api_key, model)
|
|
263
|
+
else:
|
|
264
|
+
click.echo(f"🚀 Starting lightweight server on {host}:{port}...")
|
|
265
|
+
server.start_server()
|
|
266
|
+
|
|
267
|
+
click.echo(f"\n📝 Server running at:")
|
|
268
|
+
click.echo(f" - API: http://{host}:{port}")
|
|
269
|
+
click.echo(f" - Health: http://{host}:{port}/health")
|
|
270
|
+
click.echo(f" - Models: http://{host}:{port}/models")
|
|
271
|
+
|
|
272
|
+
if host == "0.0.0.0":
|
|
273
|
+
click.echo(f"\n⚠️ Server is publicly accessible!")
|
|
274
|
+
click.echo(f" Consider using --openai-compatible with --api-key for security")
|
|
161
275
|
|
|
162
|
-
|
|
163
|
-
click.echo(f" - API: http://localhost:{port}")
|
|
164
|
-
click.echo(f" - Health: http://localhost:{port}/health")
|
|
165
|
-
click.echo(f" - Models: http://localhost:{port}/models")
|
|
166
|
-
click.echo(f"\n Press Ctrl+C to stop the server")
|
|
276
|
+
click.echo(f"\n Press Ctrl+C to stop the server")
|
|
167
277
|
|
|
168
278
|
try:
|
|
169
279
|
# Keep server running
|
mcli/lib/custom_commands.py
CHANGED
|
@@ -274,12 +274,23 @@ class CustomCommandManager:
|
|
|
274
274
|
spec.loader.exec_module(module)
|
|
275
275
|
|
|
276
276
|
# Look for a command or command group in the module
|
|
277
|
+
# Prioritize Groups over Commands to handle commands with subcommands correctly
|
|
277
278
|
command_obj = None
|
|
279
|
+
found_commands = []
|
|
280
|
+
|
|
278
281
|
for attr_name in dir(module):
|
|
279
282
|
attr = getattr(module, attr_name)
|
|
280
|
-
if isinstance(attr,
|
|
283
|
+
if isinstance(attr, click.Group):
|
|
284
|
+
# Found a group - this takes priority
|
|
281
285
|
command_obj = attr
|
|
282
286
|
break
|
|
287
|
+
elif isinstance(attr, click.Command):
|
|
288
|
+
# Store command for fallback
|
|
289
|
+
found_commands.append(attr)
|
|
290
|
+
|
|
291
|
+
# If no group found, use the first command
|
|
292
|
+
if not command_obj and found_commands:
|
|
293
|
+
command_obj = found_commands[0]
|
|
283
294
|
|
|
284
295
|
if command_obj:
|
|
285
296
|
# Register with the target group
|
|
@@ -27,7 +27,7 @@ def bash_completion(ctx):
|
|
|
27
27
|
|
|
28
28
|
# Get the root CLI app
|
|
29
29
|
app = ctx.find_root().command
|
|
30
|
-
complete = BashComplete(app, {}, "mcli", "
|
|
30
|
+
complete = BashComplete(app, {}, "mcli", "_MCLI_COMPLETE")
|
|
31
31
|
script = complete.source()
|
|
32
32
|
|
|
33
33
|
click.echo("# Bash completion script for MCLI")
|
|
@@ -44,7 +44,7 @@ def zsh_completion(ctx):
|
|
|
44
44
|
|
|
45
45
|
# Get the root CLI app
|
|
46
46
|
app = ctx.find_root().command
|
|
47
|
-
complete = ZshComplete(app, {}, "mcli", "
|
|
47
|
+
complete = ZshComplete(app, {}, "mcli", "_MCLI_COMPLETE")
|
|
48
48
|
script = complete.source()
|
|
49
49
|
|
|
50
50
|
click.echo("# Zsh completion script for MCLI")
|
|
@@ -61,7 +61,7 @@ def fish_completion(ctx):
|
|
|
61
61
|
|
|
62
62
|
# Get the root CLI app
|
|
63
63
|
app = ctx.find_root().command
|
|
64
|
-
complete = FishComplete(app, {}, "mcli", "
|
|
64
|
+
complete = FishComplete(app, {}, "mcli", "_MCLI_COMPLETE")
|
|
65
65
|
script = complete.source()
|
|
66
66
|
|
|
67
67
|
click.echo("# Fish completion script for MCLI")
|
|
@@ -101,7 +101,7 @@ def install_completion(ctx, shell):
|
|
|
101
101
|
if shell == "bash":
|
|
102
102
|
from click.shell_completion import BashComplete
|
|
103
103
|
|
|
104
|
-
complete = BashComplete(app, {}, "mcli", "
|
|
104
|
+
complete = BashComplete(app, {}, "mcli", "_MCLI_COMPLETE")
|
|
105
105
|
script = complete.source()
|
|
106
106
|
|
|
107
107
|
# Install to bash completion directory
|
|
@@ -130,7 +130,7 @@ def install_completion(ctx, shell):
|
|
|
130
130
|
elif shell == "zsh":
|
|
131
131
|
from click.shell_completion import ZshComplete
|
|
132
132
|
|
|
133
|
-
complete = ZshComplete(app, {}, "mcli", "
|
|
133
|
+
complete = ZshComplete(app, {}, "mcli", "_MCLI_COMPLETE")
|
|
134
134
|
script = complete.source()
|
|
135
135
|
|
|
136
136
|
# Install to zsh completion directory
|
|
@@ -161,7 +161,7 @@ def install_completion(ctx, shell):
|
|
|
161
161
|
elif shell == "fish":
|
|
162
162
|
from click.shell_completion import FishComplete
|
|
163
163
|
|
|
164
|
-
complete = FishComplete(app, {}, "mcli", "
|
|
164
|
+
complete = FishComplete(app, {}, "mcli", "_MCLI_COMPLETE")
|
|
165
165
|
script = complete.source()
|
|
166
166
|
|
|
167
167
|
# Install to fish completion directory
|
mcli/self/self_cmd.py
CHANGED
|
@@ -1265,6 +1265,35 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1265
1265
|
# Register the plugin group with self_app
|
|
1266
1266
|
self_app.add_command(plugin)
|
|
1267
1267
|
|
|
1268
|
+
# Import and register new commands that have been moved to self
|
|
1269
|
+
try:
|
|
1270
|
+
from mcli.self.completion_cmd import completion
|
|
1271
|
+
self_app.add_command(completion, name="completion")
|
|
1272
|
+
logger.debug("Added completion command to self group")
|
|
1273
|
+
except ImportError as e:
|
|
1274
|
+
logger.debug(f"Could not load completion command: {e}")
|
|
1275
|
+
|
|
1276
|
+
try:
|
|
1277
|
+
from mcli.self.logs_cmd import logs_group
|
|
1278
|
+
self_app.add_command(logs_group, name="logs")
|
|
1279
|
+
logger.debug("Added logs command to self group")
|
|
1280
|
+
except ImportError as e:
|
|
1281
|
+
logger.debug(f"Could not load logs command: {e}")
|
|
1282
|
+
|
|
1283
|
+
try:
|
|
1284
|
+
from mcli.self.redis_cmd import redis_group
|
|
1285
|
+
self_app.add_command(redis_group, name="redis")
|
|
1286
|
+
logger.debug("Added redis command to self group")
|
|
1287
|
+
except ImportError as e:
|
|
1288
|
+
logger.debug(f"Could not load redis command: {e}")
|
|
1289
|
+
|
|
1290
|
+
try:
|
|
1291
|
+
from mcli.self.visual_cmd import visual
|
|
1292
|
+
self_app.add_command(visual, name="visual")
|
|
1293
|
+
logger.debug("Added visual command to self group")
|
|
1294
|
+
except ImportError as e:
|
|
1295
|
+
logger.debug(f"Could not load visual command: {e}")
|
|
1296
|
+
|
|
1268
1297
|
# This part is important to make the command available to the CLI
|
|
1269
1298
|
if __name__ == "__main__":
|
|
1270
1299
|
self_app()
|
mcli/test/test_cmd.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test command group for mcli.
|
|
3
|
+
Contains testing and validation utilities.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import click
|
|
7
|
+
|
|
8
|
+
from mcli.lib.logger.logger import get_logger
|
|
9
|
+
|
|
10
|
+
logger = get_logger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@click.group(name="test")
|
|
14
|
+
def test_group():
|
|
15
|
+
"""Testing and validation commands"""
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Import and register subcommands
|
|
20
|
+
try:
|
|
21
|
+
from mcli.test.cron_test_cmd import cron_test
|
|
22
|
+
|
|
23
|
+
test_group.add_command(cron_test, name="cron")
|
|
24
|
+
logger.debug("Added cron test command to test group")
|
|
25
|
+
except ImportError as e:
|
|
26
|
+
logger.debug(f"Could not load cron test command: {e}")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
if __name__ == "__main__":
|
|
30
|
+
test_group()
|
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI API Adapter for MCLI Model Service
|
|
3
|
+
|
|
4
|
+
Provides OpenAI-compatible endpoints for tools like aider.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import time
|
|
9
|
+
import uuid
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import Any, AsyncGenerator, Dict, List, Optional
|
|
12
|
+
|
|
13
|
+
from fastapi import APIRouter, Depends, HTTPException, Header, Request, status
|
|
14
|
+
from fastapi.responses import StreamingResponse
|
|
15
|
+
from pydantic import BaseModel, Field
|
|
16
|
+
|
|
17
|
+
from mcli.lib.logger.logger import get_logger
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Message(BaseModel):
|
|
23
|
+
"""OpenAI message format"""
|
|
24
|
+
|
|
25
|
+
role: str
|
|
26
|
+
content: str
|
|
27
|
+
name: Optional[str] = None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ChatCompletionRequest(BaseModel):
|
|
31
|
+
"""OpenAI chat completion request"""
|
|
32
|
+
|
|
33
|
+
model: str
|
|
34
|
+
messages: List[Message]
|
|
35
|
+
temperature: Optional[float] = 0.7
|
|
36
|
+
top_p: Optional[float] = 0.9
|
|
37
|
+
n: Optional[int] = 1
|
|
38
|
+
stream: Optional[bool] = False
|
|
39
|
+
stop: Optional[List[str]] = None
|
|
40
|
+
max_tokens: Optional[int] = 2048
|
|
41
|
+
presence_penalty: Optional[float] = 0.0
|
|
42
|
+
frequency_penalty: Optional[float] = 0.0
|
|
43
|
+
user: Optional[str] = None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ChatCompletionChoice(BaseModel):
|
|
47
|
+
"""Chat completion choice"""
|
|
48
|
+
|
|
49
|
+
index: int
|
|
50
|
+
message: Message
|
|
51
|
+
finish_reason: str
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class Usage(BaseModel):
|
|
55
|
+
"""Token usage information"""
|
|
56
|
+
|
|
57
|
+
prompt_tokens: int
|
|
58
|
+
completion_tokens: int
|
|
59
|
+
total_tokens: int
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class ChatCompletionResponse(BaseModel):
|
|
63
|
+
"""OpenAI chat completion response"""
|
|
64
|
+
|
|
65
|
+
id: str
|
|
66
|
+
object: str = "chat.completion"
|
|
67
|
+
created: int
|
|
68
|
+
model: str
|
|
69
|
+
choices: List[ChatCompletionChoice]
|
|
70
|
+
usage: Usage
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class ModelInfo(BaseModel):
|
|
74
|
+
"""Model information"""
|
|
75
|
+
|
|
76
|
+
id: str
|
|
77
|
+
object: str = "model"
|
|
78
|
+
created: int
|
|
79
|
+
owned_by: str = "mcli"
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class ModelListResponse(BaseModel):
|
|
83
|
+
"""Model list response"""
|
|
84
|
+
|
|
85
|
+
object: str = "list"
|
|
86
|
+
data: List[ModelInfo]
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class APIKeyManager:
|
|
90
|
+
"""Manages API key authentication"""
|
|
91
|
+
|
|
92
|
+
def __init__(self):
|
|
93
|
+
self.valid_keys: Dict[str, Dict[str, Any]] = {}
|
|
94
|
+
|
|
95
|
+
def add_key(self, key: str, name: str = "default", metadata: Optional[Dict] = None):
|
|
96
|
+
"""Add a valid API key"""
|
|
97
|
+
self.valid_keys[key] = {
|
|
98
|
+
"name": name,
|
|
99
|
+
"created_at": datetime.now().isoformat(),
|
|
100
|
+
"metadata": metadata or {},
|
|
101
|
+
"usage_count": 0,
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
def validate_key(self, key: str) -> bool:
|
|
105
|
+
"""Validate an API key"""
|
|
106
|
+
if key in self.valid_keys:
|
|
107
|
+
self.valid_keys[key]["usage_count"] += 1
|
|
108
|
+
return True
|
|
109
|
+
return False
|
|
110
|
+
|
|
111
|
+
def remove_key(self, key: str):
|
|
112
|
+
"""Remove an API key"""
|
|
113
|
+
if key in self.valid_keys:
|
|
114
|
+
del self.valid_keys[key]
|
|
115
|
+
|
|
116
|
+
def list_keys(self) -> List[Dict[str, Any]]:
|
|
117
|
+
"""List all API keys (without showing the actual key)"""
|
|
118
|
+
return [
|
|
119
|
+
{"name": info["name"], "created_at": info["created_at"], "usage_count": info["usage_count"]}
|
|
120
|
+
for info in self.valid_keys.values()
|
|
121
|
+
]
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class OpenAIAdapter:
|
|
125
|
+
"""Adapter to make MCLI model service OpenAI-compatible"""
|
|
126
|
+
|
|
127
|
+
def __init__(self, model_manager, require_auth: bool = True):
|
|
128
|
+
self.model_manager = model_manager
|
|
129
|
+
self.require_auth = require_auth
|
|
130
|
+
self.api_key_manager = APIKeyManager()
|
|
131
|
+
self.router = APIRouter(prefix="/v1")
|
|
132
|
+
|
|
133
|
+
# Setup routes
|
|
134
|
+
self._setup_routes()
|
|
135
|
+
|
|
136
|
+
def _setup_routes(self):
|
|
137
|
+
"""Setup OpenAI-compatible routes"""
|
|
138
|
+
|
|
139
|
+
@self.router.get("/models", response_model=ModelListResponse)
|
|
140
|
+
async def list_models(api_key: str = Depends(self.verify_api_key)):
|
|
141
|
+
"""List available models (OpenAI compatible)"""
|
|
142
|
+
models = []
|
|
143
|
+
|
|
144
|
+
# Get loaded models from model manager
|
|
145
|
+
if hasattr(self.model_manager, "loaded_models"):
|
|
146
|
+
for model_name in self.model_manager.loaded_models.keys():
|
|
147
|
+
models.append(
|
|
148
|
+
ModelInfo(
|
|
149
|
+
id=model_name,
|
|
150
|
+
object="model",
|
|
151
|
+
created=int(time.time()),
|
|
152
|
+
owned_by="mcli",
|
|
153
|
+
)
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# If no models loaded, return available lightweight models
|
|
157
|
+
if not models:
|
|
158
|
+
from .lightweight_model_server import LIGHTWEIGHT_MODELS
|
|
159
|
+
|
|
160
|
+
for model_key in LIGHTWEIGHT_MODELS.keys():
|
|
161
|
+
models.append(
|
|
162
|
+
ModelInfo(
|
|
163
|
+
id=model_key,
|
|
164
|
+
object="model",
|
|
165
|
+
created=int(time.time()),
|
|
166
|
+
owned_by="mcli",
|
|
167
|
+
)
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
return ModelListResponse(object="list", data=models)
|
|
171
|
+
|
|
172
|
+
@self.router.post("/chat/completions")
|
|
173
|
+
async def create_chat_completion(
|
|
174
|
+
request: ChatCompletionRequest, api_key: str = Depends(self.verify_api_key)
|
|
175
|
+
):
|
|
176
|
+
"""Create a chat completion (OpenAI compatible)"""
|
|
177
|
+
try:
|
|
178
|
+
# Extract the conversation history
|
|
179
|
+
messages = request.messages
|
|
180
|
+
prompt = self._messages_to_prompt(messages)
|
|
181
|
+
|
|
182
|
+
# Generate response using the model
|
|
183
|
+
if request.stream:
|
|
184
|
+
return StreamingResponse(
|
|
185
|
+
self._generate_stream(request, prompt), media_type="text/event-stream"
|
|
186
|
+
)
|
|
187
|
+
else:
|
|
188
|
+
response_text = await self._generate_response(request, prompt)
|
|
189
|
+
|
|
190
|
+
# Create OpenAI-compatible response
|
|
191
|
+
completion_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
|
|
192
|
+
response = ChatCompletionResponse(
|
|
193
|
+
id=completion_id,
|
|
194
|
+
object="chat.completion",
|
|
195
|
+
created=int(time.time()),
|
|
196
|
+
model=request.model,
|
|
197
|
+
choices=[
|
|
198
|
+
ChatCompletionChoice(
|
|
199
|
+
index=0,
|
|
200
|
+
message=Message(role="assistant", content=response_text),
|
|
201
|
+
finish_reason="stop",
|
|
202
|
+
)
|
|
203
|
+
],
|
|
204
|
+
usage=Usage(
|
|
205
|
+
prompt_tokens=len(prompt.split()),
|
|
206
|
+
completion_tokens=len(response_text.split()),
|
|
207
|
+
total_tokens=len(prompt.split()) + len(response_text.split()),
|
|
208
|
+
),
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
return response
|
|
212
|
+
|
|
213
|
+
except Exception as e:
|
|
214
|
+
logger.error(f"Error in chat completion: {e}")
|
|
215
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
216
|
+
|
|
217
|
+
def _messages_to_prompt(self, messages: List[Message]) -> str:
|
|
218
|
+
"""Convert OpenAI messages format to a simple prompt"""
|
|
219
|
+
prompt_parts = []
|
|
220
|
+
|
|
221
|
+
for message in messages:
|
|
222
|
+
role = message.role
|
|
223
|
+
content = message.content
|
|
224
|
+
|
|
225
|
+
if role == "system":
|
|
226
|
+
prompt_parts.append(f"System: {content}")
|
|
227
|
+
elif role == "user":
|
|
228
|
+
prompt_parts.append(f"User: {content}")
|
|
229
|
+
elif role == "assistant":
|
|
230
|
+
prompt_parts.append(f"Assistant: {content}")
|
|
231
|
+
|
|
232
|
+
return "\n\n".join(prompt_parts)
|
|
233
|
+
|
|
234
|
+
async def _generate_response(self, request: ChatCompletionRequest, prompt: str) -> str:
|
|
235
|
+
"""Generate a response from the model"""
|
|
236
|
+
try:
|
|
237
|
+
# Use the lightweight model server if available
|
|
238
|
+
if hasattr(self.model_manager, "loaded_models"):
|
|
239
|
+
# Get the first loaded model or the requested model
|
|
240
|
+
model_name = request.model
|
|
241
|
+
available_models = list(self.model_manager.loaded_models.keys())
|
|
242
|
+
|
|
243
|
+
if not available_models:
|
|
244
|
+
# Try to auto-load the requested model
|
|
245
|
+
from .lightweight_model_server import LIGHTWEIGHT_MODELS
|
|
246
|
+
|
|
247
|
+
if model_name in LIGHTWEIGHT_MODELS:
|
|
248
|
+
logger.info(f"Auto-loading model: {model_name}")
|
|
249
|
+
success = self.model_manager.download_and_load_model(model_name)
|
|
250
|
+
if not success:
|
|
251
|
+
raise HTTPException(
|
|
252
|
+
status_code=500, detail=f"Failed to load model: {model_name}"
|
|
253
|
+
)
|
|
254
|
+
else:
|
|
255
|
+
raise HTTPException(
|
|
256
|
+
status_code=404,
|
|
257
|
+
detail=f"Model {model_name} not found. Available models: {list(LIGHTWEIGHT_MODELS.keys())}",
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Generate response (placeholder - would use actual model inference)
|
|
261
|
+
response = f"This is a response from MCLI model service using {model_name}. In a production environment, this would use the actual model for inference.\n\nYour prompt was: {prompt[:100]}..."
|
|
262
|
+
|
|
263
|
+
return response
|
|
264
|
+
else:
|
|
265
|
+
return "Model manager not properly initialized"
|
|
266
|
+
|
|
267
|
+
except Exception as e:
|
|
268
|
+
logger.error(f"Error generating response: {e}")
|
|
269
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
270
|
+
|
|
271
|
+
async def _generate_stream(
|
|
272
|
+
self, request: ChatCompletionRequest, prompt: str
|
|
273
|
+
) -> AsyncGenerator[str, None]:
|
|
274
|
+
"""Generate a streaming response"""
|
|
275
|
+
completion_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
|
|
276
|
+
|
|
277
|
+
# Generate response
|
|
278
|
+
response_text = await self._generate_response(request, prompt)
|
|
279
|
+
|
|
280
|
+
# Stream the response word by word
|
|
281
|
+
words = response_text.split()
|
|
282
|
+
for i, word in enumerate(words):
|
|
283
|
+
chunk = {
|
|
284
|
+
"id": completion_id,
|
|
285
|
+
"object": "chat.completion.chunk",
|
|
286
|
+
"created": int(time.time()),
|
|
287
|
+
"model": request.model,
|
|
288
|
+
"choices": [
|
|
289
|
+
{
|
|
290
|
+
"index": 0,
|
|
291
|
+
"delta": {"content": word + " " if i < len(words) - 1 else word},
|
|
292
|
+
"finish_reason": None if i < len(words) - 1 else "stop",
|
|
293
|
+
}
|
|
294
|
+
],
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
yield f"data: {json.dumps(chunk)}\n\n"
|
|
298
|
+
|
|
299
|
+
# Send final message
|
|
300
|
+
yield "data: [DONE]\n\n"
|
|
301
|
+
|
|
302
|
+
async def verify_api_key(self, authorization: Optional[str] = Header(None)) -> str:
|
|
303
|
+
"""Verify API key from Authorization header"""
|
|
304
|
+
if not self.require_auth:
|
|
305
|
+
return "no-auth-required"
|
|
306
|
+
|
|
307
|
+
if not authorization:
|
|
308
|
+
raise HTTPException(
|
|
309
|
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
310
|
+
detail="Missing API key",
|
|
311
|
+
headers={"WWW-Authenticate": "Bearer"},
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
# Extract API key from "Bearer <key>" format
|
|
315
|
+
try:
|
|
316
|
+
scheme, key = authorization.split()
|
|
317
|
+
if scheme.lower() != "bearer":
|
|
318
|
+
raise HTTPException(
|
|
319
|
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
320
|
+
detail="Invalid authentication scheme",
|
|
321
|
+
headers={"WWW-Authenticate": "Bearer"},
|
|
322
|
+
)
|
|
323
|
+
except ValueError:
|
|
324
|
+
raise HTTPException(
|
|
325
|
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
326
|
+
detail="Invalid authorization header format",
|
|
327
|
+
headers={"WWW-Authenticate": "Bearer"},
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
# Validate the API key
|
|
331
|
+
if not self.api_key_manager.validate_key(key):
|
|
332
|
+
raise HTTPException(
|
|
333
|
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
334
|
+
detail="Invalid API key",
|
|
335
|
+
headers={"WWW-Authenticate": "Bearer"},
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
return key
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
def create_openai_adapter(model_manager, require_auth: bool = True) -> OpenAIAdapter:
|
|
342
|
+
"""Create an OpenAI adapter instance"""
|
|
343
|
+
return OpenAIAdapter(model_manager, require_auth)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mcli-framework
|
|
3
|
-
Version: 7.
|
|
3
|
+
Version: 7.6.0
|
|
4
4
|
Summary: 🚀 High-performance CLI framework with Rust extensions, AI chat, and stunning visuals
|
|
5
5
|
Author-email: Luis Fernandez de la Vara <luis@lefv.io>
|
|
6
6
|
Maintainer-email: Luis Fernandez de la Vara <luis@lefv.io>
|
|
@@ -2,14 +2,9 @@ mcli/cli.py,sha256=6KTyXn-pmVkAbCDu59PbiNKBwNra5su31ujFFZ6CBOM,389
|
|
|
2
2
|
mcli/config.toml,sha256=263yEVvP_W9F2zOLssUBgy7amKaRAFQuBrfxcMhKxaQ,1706
|
|
3
3
|
mcli/app/chat_cmd.py,sha256=OJK91iX-f-CL5-m3ECG4IlcbLbsWQpPp7XWuGnb0cVQ,1552
|
|
4
4
|
mcli/app/commands_cmd.py,sha256=0A54cXazYZRe0Zs_tvBRcqDNtyY22WCxSJ4qfxyue2s,32338
|
|
5
|
-
mcli/app/
|
|
6
|
-
mcli/app/
|
|
7
|
-
mcli/app/
|
|
8
|
-
mcli/app/logs_cmd.py,sha256=SCzZ4VZs6p42hksun_w4WN33xIZgmq7RjdWX8P2WcT4,15056
|
|
9
|
-
mcli/app/main.py,sha256=axQx8HjH5iTg2E4vyPNXngTQS4zOyZAs_G4tG-k_IOU,19350
|
|
10
|
-
mcli/app/model_cmd.py,sha256=D-_HckRSI6ly8GwIHflK5S92xitlbLtkpSEO7a6x5xs,12889
|
|
11
|
-
mcli/app/redis_cmd.py,sha256=Cl0LQ3Mqt27gLeb542_xw6bJBbIE-CBmWyMmaUTSk8c,9426
|
|
12
|
-
mcli/app/visual_cmd.py,sha256=jXighahHxeM9HANQ2Brk6nKFgi2ZuQBOBH7PE5xhebk,9428
|
|
5
|
+
mcli/app/completion_helpers.py,sha256=e62C6w2N-XoD66GYYHgtvKKoD3kYMuIeBBGzVKbuL04,7497
|
|
6
|
+
mcli/app/main.py,sha256=1IgnVmEkONVAB2VgXAdjPSDtbfrk-zb5uR2P6tAHLJY,18967
|
|
7
|
+
mcli/app/model_cmd.py,sha256=2Rn6KqEfGbQ5VQxNH0-6FgArZMmCTGiRmCtTs80mBGk,16741
|
|
13
8
|
mcli/app/model/model.py,sha256=EUGu_td-hRlbf4OElkdk1-0p7WyuG7sZmb-Ux2-J9KY,39061
|
|
14
9
|
mcli/app/video/video.py,sha256=3TL8vG3XSKzH_Iyy-IHPZOwNtT7js0VqVVNwIgfDvpk,41910
|
|
15
10
|
mcli/chat/chat.py,sha256=tk4laKe2uSVg9JukacSNTQhCFRlzYbaz1Qdkg8Mu_Bw,102138
|
|
@@ -17,7 +12,7 @@ mcli/chat/command_rag.py,sha256=Ee8usPyRDRYDWpQ79dI7xbxM8Ljxsy_ym_MnL37nPAo,1936
|
|
|
17
12
|
mcli/chat/enhanced_chat.py,sha256=e3odh5klewDHIjfNOyvifLzCdHrysDc2IvNVHzTPIh4,27072
|
|
18
13
|
mcli/chat/system_controller.py,sha256=SuGvnIh2QObvM1DMicF3gGyeBkbz_xXS-hOOHjWx5j4,39114
|
|
19
14
|
mcli/chat/system_integration.py,sha256=xQ11thOUswPg8r1HZkId6U3bTCOtMYngt0-mUYYXpt4,40196
|
|
20
|
-
mcli/lib/custom_commands.py,sha256
|
|
15
|
+
mcli/lib/custom_commands.py,sha256=ss-rr0i_Gk-MgGpqyQ9hMvsvoyRNQadxH0Y8S9ujpmY,14587
|
|
21
16
|
mcli/lib/lib.py,sha256=mlp2INx-UKTOECcA7Kens9yNt2gJi7GbKWFmf4cxj0c,632
|
|
22
17
|
mcli/lib/paths.py,sha256=k6sDwvD8QRzBkBOllvXkokameumpTjpJ7pQrP7z1en0,2455
|
|
23
18
|
mcli/lib/api/api.py,sha256=sPgAIYC8Z7AWV2TCBssNSKotbRggBqNLsbfzbjkhmUY,18558
|
|
@@ -147,8 +142,14 @@ mcli/ml/training/train_model.py,sha256=vXgRMsG1jdr9rG7x6uv98y22fbRFtbESox1RmTVEP
|
|
|
147
142
|
mcli/mygroup/test_cmd.py,sha256=WjzgoH1WFa79wc8A7O6UMuJfookLfgciUNcCMbKHAQQ,21
|
|
148
143
|
mcli/public/public.py,sha256=t9BkO1XV7s3YcoH0bbIpyjZ05UX_vBjaKtKkuDX7wZ0,114
|
|
149
144
|
mcli/public/oi/oi.py,sha256=SQabQWQ1pE67pWYEHwIDc3R93DARJfB6VHk7qxWx9xo,308
|
|
150
|
-
mcli/self/
|
|
145
|
+
mcli/self/completion_cmd.py,sha256=FKNVc_4ikWTGbDHybiNZGdxrggvt6A6q1rnzuyFVzVM,7754
|
|
146
|
+
mcli/self/logs_cmd.py,sha256=SCzZ4VZs6p42hksun_w4WN33xIZgmq7RjdWX8P2WcT4,15056
|
|
147
|
+
mcli/self/redis_cmd.py,sha256=Cl0LQ3Mqt27gLeb542_xw6bJBbIE-CBmWyMmaUTSk8c,9426
|
|
148
|
+
mcli/self/self_cmd.py,sha256=ZSms98HrNWlwp-WammkyTct-eYpMBkLFGA9VteHKZh8,46503
|
|
151
149
|
mcli/self/test_cmd.py,sha256=WjzgoH1WFa79wc8A7O6UMuJfookLfgciUNcCMbKHAQQ,21
|
|
150
|
+
mcli/self/visual_cmd.py,sha256=jXighahHxeM9HANQ2Brk6nKFgi2ZuQBOBH7PE5xhebk,9428
|
|
151
|
+
mcli/test/cron_test_cmd.py,sha256=Ai4Smg2WxULeiMD5s2m_S_fXdMAAQsKHpSc4iJGSnwI,26156
|
|
152
|
+
mcli/test/test_cmd.py,sha256=tk4U1Kyt6t9IW4xvXM0JxFlAeu5mJ6bXAiryGhc108U,600
|
|
152
153
|
mcli/workflow/lsh_integration.py,sha256=jop80DUjdOSxmqPb-gX_OBep5f1twViv-pXmkcFqBPY,13314
|
|
153
154
|
mcli/workflow/workflow.py,sha256=P_W5LOB3lowvvlfEp3mGwS3eNq4tpbiUY-poFulAF9E,393
|
|
154
155
|
mcli/workflow/daemon/async_command_database.py,sha256=pvfKYjt0Jg1EPwJ1p2C0M3bsBWvjEs4Ok-Y6-jY0qVI,24873
|
|
@@ -171,6 +172,7 @@ mcli/workflow/model_service/lightweight_model_server.py,sha256=VyBrUba1maPHNDzO_
|
|
|
171
172
|
mcli/workflow/model_service/lightweight_test.py,sha256=J1--5Q8YvJT3Wzhqf0ElDAostRVA2yDs1LkVwkDMf5M,7330
|
|
172
173
|
mcli/workflow/model_service/model_service.py,sha256=pAxxHw4tShkrnWWFBnzsrGGKEFa8ZEv4Mza_C8clK38,69729
|
|
173
174
|
mcli/workflow/model_service/ollama_efficient_runner.py,sha256=fFZLoIfmgbQnMLwnXuH2ya5o_Tu6ww3ERtXg58HbamE,14340
|
|
175
|
+
mcli/workflow/model_service/openai_adapter.py,sha256=TsI8wvrIwv7TlrzK5DVxVy4WVhDDqZztX_MIeetk4Es,11858
|
|
174
176
|
mcli/workflow/model_service/pdf_processor.py,sha256=YgxGPd37L8OUoocrolYKJTwk_VPknbvTtQnOnfpXMYU,13621
|
|
175
177
|
mcli/workflow/model_service/test_efficient_runner.py,sha256=whu93oXrM0Z8HEzcKCc1DlAVY6CeGDXURAsQ0V-gSjo,6355
|
|
176
178
|
mcli/workflow/model_service/test_example.py,sha256=aMSNRyjz5ian6cmHQt_sE5-q10V82iYJDjeAV1rsfJs,9515
|
|
@@ -204,9 +206,9 @@ mcli/workflow/scheduler/persistence.py,sha256=SU8-F5wTpTercZvTeAXKlGI7gwHyfmYDhX
|
|
|
204
206
|
mcli/workflow/scheduler/scheduler.py,sha256=1Ujq9VgL1rSTCAtshuLA2_sodW6HOj0MEZem7Ga-kic,23351
|
|
205
207
|
mcli/workflow/sync/test_cmd.py,sha256=neVgs9zEnKSxlvzDpFkuCGucqnzjrShm2OvJtHibslg,10009
|
|
206
208
|
mcli/workflow/wakatime/wakatime.py,sha256=sEjsUKa3-XyE8Ni6sAb_D3GAY5jDcA30KknW9YTbLTA,142
|
|
207
|
-
mcli_framework-7.
|
|
208
|
-
mcli_framework-7.
|
|
209
|
-
mcli_framework-7.
|
|
210
|
-
mcli_framework-7.
|
|
211
|
-
mcli_framework-7.
|
|
212
|
-
mcli_framework-7.
|
|
209
|
+
mcli_framework-7.6.0.dist-info/licenses/LICENSE,sha256=sahwAMfrJv2-V66HNPTp7A9UmMjxtyejwTZZoWQvEcI,1075
|
|
210
|
+
mcli_framework-7.6.0.dist-info/METADATA,sha256=w8eFipi2tKKMyNkEmJxvQkuGNQ1UxJ8XU0bga9YAvlA,14802
|
|
211
|
+
mcli_framework-7.6.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
212
|
+
mcli_framework-7.6.0.dist-info/entry_points.txt,sha256=dYrZbDIm-KUPsl1wfv600Kx_8sMy89phMkCihbDRgP8,261
|
|
213
|
+
mcli_framework-7.6.0.dist-info/top_level.txt,sha256=_bnO8J2EUkliWivey_1le0UrnocFKmyVMQjbQ8iVXjc,5
|
|
214
|
+
mcli_framework-7.6.0.dist-info/RECORD,,
|
/mcli/{app → self}/logs_cmd.py
RENAMED
|
File without changes
|
/mcli/{app → self}/redis_cmd.py
RENAMED
|
File without changes
|
/mcli/{app → self}/visual_cmd.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|