lyceum-cli 1.0.28__py3-none-any.whl → 1.0.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,67 +1,52 @@
1
1
  """Model discovery and information commands for AI inference"""
2
2
 
3
-
4
3
  import httpx
5
4
  import typer
6
5
  from rich.console import Console
7
- from rich.panel import Panel
8
6
  from rich.table import Table
9
7
 
10
8
  from ....shared.config import config
11
9
 
12
10
  console = Console()
13
11
 
14
- models_app = typer.Typer(name="models", help="Model discovery and information")
15
-
16
-
17
- @models_app.command("list")
18
- def list_models(
19
- model_type: str | None = typer.Option(
20
- None, "--type", "-t", help="Filter by model type (text, image, multimodal, etc.)"
21
- ),
22
- available_only: bool = typer.Option(
23
- False, "--available", "-a", help="Show only available models"
24
- ),
25
- sync_only: bool = typer.Option(
26
- False, "--sync", help="Show only models that support synchronous inference"
27
- ),
28
- async_only: bool = typer.Option(
29
- False, "--async", help="Show only models that support async/batch inference"
30
- ),
31
- ):
32
- """List all available AI models"""
12
+ def models_cmd():
13
+ """Retrieve available models"""
33
14
  try:
34
- # Determine the endpoint based on filters
35
- if sync_only:
36
- endpoint = "/api/v2/external/models/sync"
37
- elif async_only:
38
- endpoint = "/api/v2/external/models/async"
39
- elif model_type:
40
- endpoint = f"/api/v2/external/models/type/{model_type}"
41
- else:
42
- endpoint = "/api/v2/external/models/"
15
+ config.get_client()
16
+
17
+ endpoint = "/api/v2/external/models/"
43
18
 
44
19
  url = f"{config.base_url}{endpoint}"
45
- headers = {"Authorization": f"Bearer {config.api_key}"}
20
+ headers = {
21
+ "Authorization": f"Bearer {config.api_key}",
22
+ "ngrok-skip-browser-warning": "true",
23
+ }
46
24
 
47
- console.print("[dim]🔍 Fetching available models...[/dim]")
25
+ console.print("[dim]Fetching available models...[/dim]")
48
26
 
49
27
  with httpx.Client() as http_client:
50
28
  response = http_client.get(url, headers=headers, timeout=10.0)
51
29
 
52
30
  if response.status_code != 200:
53
- console.print(f"[red]Error: HTTP {response.status_code}[/red]")
54
- console.print(f"[red]{response.text}[/red]")
31
+ console.print(f"[red]Error: HTTP {response.status_code}[/red]")
32
+ try:
33
+ console.print(f"[red]{response.text}[/red]")
34
+ except:
35
+ pass
55
36
  raise typer.Exit(1)
56
37
 
57
38
  models = response.json()
58
39
 
59
- # Filter by availability if requested
60
- if available_only:
61
- models = [m for m in models if m.get('available', False)]
40
+ if not models:
41
+ console.print("[yellow]No models found matching your criteria[/yellow]")
42
+ return
43
+
44
+ # Filter to only available models
45
+ models = [m for m in models if m.get('available', False)]
62
46
 
63
47
  if not models:
64
- console.print("[yellow]⚠️ No models found matching your criteria[/yellow]")
48
+ console.print("[yellow]No available models found.[/yellow]")
49
+ console.print("[dim]Deploy a new model with: lyceum infer deploy <model_id>[/dim]")
65
50
  return
66
51
 
67
52
  # Create a detailed table
@@ -69,180 +54,22 @@ def list_models(
69
54
  table.add_column("Model ID", style="cyan", no_wrap=True)
70
55
  table.add_column("Name", style="white")
71
56
  table.add_column("Type", style="magenta")
72
- table.add_column("Provider", style="blue")
73
- table.add_column("Status", justify="center")
74
- table.add_column("Price/1K", justify="right", style="green")
75
- table.add_column("Sync", justify="center", style="yellow")
76
- table.add_column("Async", justify="center", style="yellow")
77
57
 
78
- # Sort models: available first, then by type, then by name
58
+ # Sort models by type, then by name
79
59
  sorted_models = sorted(models, key=lambda m: (
80
- not m.get('available', False),
81
60
  m.get('type', 'text'),
82
61
  m.get('model_id', '')
83
62
  ))
84
63
 
85
64
  for model in sorted_models:
86
- # Status with emoji
87
- status = "🟢 Yes" if model.get('available') else "🔴 No"
88
-
89
- # Sync/Async support
90
- sync_support = "✓" if model.get('supports_sync', True) else "✗"
91
- async_support = "✓" if model.get('supports_async', True) else "✗"
92
-
93
- # Price
94
- price = model.get('price_per_1k_tokens', 0)
95
- price_str = f"${price:.4f}" if price > 0 else "Free"
96
-
97
65
  table.add_row(
98
66
  model.get('model_id', 'Unknown'),
99
67
  model.get('name', 'Unknown'),
100
- model.get('type', 'text').title(),
101
- model.get('provider', 'unknown').title(),
102
- status,
103
- price_str,
104
- sync_support,
105
- async_support
106
- )
107
-
108
- console.print(table)
109
-
110
- # Show summary
111
- available_count = sum(1 for m in models if m.get('available'))
112
- total_count = len(models)
113
- console.print(f"\n[dim]📊 {available_count}/{total_count} models available[/dim]")
114
-
115
- except Exception as e:
116
- console.print(f"[red]❌ Error: {e}[/red]")
117
- raise typer.Exit(1)
118
-
119
-
120
- @models_app.command("info")
121
- def get_model_info(
122
- model_id: str = typer.Argument(..., help="Model ID to get information about"),
123
- ):
124
- """Get detailed information about a specific model"""
125
- try:
126
- url = f"{config.base_url}/api/v2/external/models/{model_id}"
127
- headers = {"Authorization": f"Bearer {config.api_key}"}
128
-
129
- console.print(f"[dim]🔍 Fetching info for model: {model_id}...[/dim]")
130
-
131
- with httpx.Client() as http_client:
132
- response = http_client.get(url, headers=headers, timeout=10.0)
133
-
134
- if response.status_code == 404:
135
- console.print(f"[red]❌ Model '{model_id}' not found[/red]")
136
- raise typer.Exit(1)
137
- elif response.status_code != 200:
138
- console.print(f"[red]❌ Error: HTTP {response.status_code}[/red]")
139
- console.print(f"[red]{response.text}[/red]")
140
- raise typer.Exit(1)
141
-
142
- model = response.json()
143
-
144
- # Build detailed info display
145
- status_color = "green" if model.get('available') else "red"
146
- status_text = "Available ✓" if model.get('available') else "Unavailable ✗"
147
-
148
- info_lines = [
149
- f"[bold cyan]Model ID:[/bold cyan] {model.get('model_id', 'Unknown')}",
150
- f"[bold]Name:[/bold] {model.get('name', 'Unknown')}",
151
- f"[bold]Description:[/bold] {model.get('description', 'No description available')}",
152
- "",
153
- f"[bold]Type:[/bold] {model.get('type', 'text').title()}",
154
- f"[bold]Provider:[/bold] {model.get('provider', 'unknown').title()}",
155
- f"[bold]Version:[/bold] {model.get('version', 'N/A')}",
156
- f"[bold]Status:[/bold] [{status_color}]{status_text}[/{status_color}]",
157
- "",
158
- "[bold yellow]Capabilities:[/bold yellow]",
159
- f" • Synchronous inference: {'Yes ✓' if model.get('supports_sync', True) else 'No ✗'}",
160
- f" • Asynchronous/Batch: {'Yes ✓' if model.get('supports_async', True) else 'No ✗'}",
161
- f" • GPU Required: {'Yes' if model.get('gpu_required', False) else 'No'}",
162
- "",
163
- "[bold green]Input/Output:[/bold green]",
164
- f" • Input types: {', '.join(model.get('input_types', []))}",
165
- f" • Output types: {', '.join(model.get('output_types', []))}",
166
- f" • Max input tokens: {model.get('max_input_tokens', 'N/A'):,}",
167
- f" • Max output tokens: {model.get('max_output_tokens', 'N/A'):,}",
168
- "",
169
- "[bold green]Pricing:[/bold green]",
170
- f" • Base price: ${model.get('price_per_1k_tokens', 0):.4f} per 1K tokens",
171
- f" • Batch discount: {model.get('batch_pricing_discount', 0.5) * 100:.0f}% off",
172
- "",
173
- "[bold blue]Performance:[/bold blue]",
174
- f" • Estimated latency: {model.get('estimated_latency_ms', 0):,} ms",
175
- ]
176
-
177
- panel = Panel(
178
- "\n".join(info_lines),
179
- title=f"[bold white]Model Information: {model.get('name', model_id)}[/bold white]",
180
- border_style="cyan",
181
- padding=(1, 2)
182
- )
183
-
184
- console.print(panel)
185
-
186
- except Exception as e:
187
- console.print(f"[red]❌ Error: {e}[/red]")
188
- raise typer.Exit(1)
189
-
190
-
191
- @models_app.command("instances")
192
- def list_model_instances(
193
- model_id: str = typer.Argument(..., help="Model ID to list instances for"),
194
- ):
195
- """List running instances for a specific model"""
196
- try:
197
- url = f"{config.base_url}/api/v2/external/models/{model_id}/instances"
198
- headers = {"Authorization": f"Bearer {config.api_key}"}
199
-
200
- console.print(f"[dim]🔍 Fetching instances for model: {model_id}...[/dim]")
201
-
202
- with httpx.Client() as http_client:
203
- response = http_client.get(url, headers=headers, timeout=10.0)
204
-
205
- if response.status_code == 404:
206
- console.print(f"[red]❌ Model '{model_id}' not found[/red]")
207
- raise typer.Exit(1)
208
- elif response.status_code != 200:
209
- console.print(f"[red]❌ Error: HTTP {response.status_code}[/red]")
210
- console.print(f"[red]{response.text}[/red]")
211
- raise typer.Exit(1)
212
-
213
- instances = response.json()
214
-
215
- if not instances:
216
- console.print(f"[yellow]⚠️ No instances found for model '{model_id}'[/yellow]")
217
- return
218
-
219
- # Create table for instances
220
- table = Table(title=f"Instances for {model_id}", show_header=True, header_style="bold cyan")
221
- table.add_column("Instance ID", style="cyan", no_wrap=True)
222
- table.add_column("Instance URL", style="blue")
223
- table.add_column("Status", justify="center", style="green")
224
- table.add_column("Node ID", style="magenta")
225
- table.add_column("Last Health Check", style="dim")
226
-
227
- for instance in instances:
228
- # Truncate instance ID for display
229
- instance_id = instance.get('id', 'Unknown')
230
- short_id = instance_id[:12] + "..." if len(instance_id) > 12 else instance_id
231
-
232
- status = instance.get('status', 'unknown')
233
- status_emoji = "🟢" if status == "running" else "🔴"
234
-
235
- table.add_row(
236
- short_id,
237
- instance.get('instance_url', 'N/A'),
238
- f"{status_emoji} {status}",
239
- instance.get('node_id', 'N/A') or 'N/A',
240
- instance.get('last_health_check', 'N/A') or 'N/A'
68
+ model.get('type', 'text').title()
241
69
  )
242
70
 
243
71
  console.print(table)
244
- console.print(f"\n[dim]📊 Total instances: {len(instances)}[/dim]")
245
72
 
246
73
  except Exception as e:
247
- console.print(f"[red]Error: {e}[/red]")
74
+ console.print(f"[red]Error: {e}[/red]")
248
75
  raise typer.Exit(1)
lyceum/main.py CHANGED
@@ -14,7 +14,9 @@ from .external.compute.execution.docker import docker_app
14
14
  from .external.compute.execution.docker_compose import compose_app
15
15
  from .external.compute.execution.workloads import workloads_app
16
16
  from .external.compute.execution.notebook import notebook_app
17
- from .external.vms.management import vms_app
17
+ from .external.compute.execution.gpu_selection import gpu_selection_app
18
+ # from .external.compute.inference.infer import infer_app
19
+ # from .external.vms.management import vms_app
18
20
 
19
21
  app = typer.Typer(
20
22
  name="lyceum",
@@ -31,6 +33,9 @@ app.add_typer(docker_app, name="docker")
31
33
  app.add_typer(compose_app, name="compose")
32
34
  app.add_typer(workloads_app, name="workloads")
33
35
  app.add_typer(notebook_app, name="notebook")
36
+ app.add_typer(gpu_selection_app, name="gpu-selection")
37
+ # app.add_typer(infer_app, name="infer")
38
+ # app.add_typer(vms_app, name="vms")
34
39
 
35
40
 
36
41
 
lyceum/shared/config.py CHANGED
@@ -16,9 +16,12 @@ console = Console()
16
16
  CONFIG_DIR = Path.home() / ".lyceum"
17
17
  CONFIG_FILE = CONFIG_DIR / "config.json"
18
18
 
19
- # Supabase configuration - these are public anon keys safe for client-side use
19
+ # Supabase configuration from environment variables
20
20
  SUPABASE_URL = os.getenv("SUPABASE_URL", "https://tqcebgbexyszvqhnwnhh.supabase.co")
21
- SUPABASE_ANON_KEY = os.getenv("SUPABASE_ANON_KEY", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InRxY2ViZ2JleHlzenZxaG53bmhoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDcxNTQ0NzEsImV4cCI6MjA2MjczMDQ3MX0.VvPV8oRJUSGadaPf9RwTn6URuC_5oL7KulsJgE2vpH4")
21
+ SUPABASE_ANON_KEY = os.getenv(
22
+ "SUPABASE_ANON_KEY",
23
+ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InRxY2ViZ2JleHlzenZxaG53bmhoIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NDcxNTQ0NzEsImV4cCI6MjA2MjczMDQ3MX0.VvPV8oRJUSGadaPf9RwTn6URuC_5oL7KulsJgE2vpH4"
24
+ )
22
25
 
23
26
 
24
27
  class _Config:
@@ -88,13 +91,6 @@ class _Config:
88
91
  return False
89
92
 
90
93
  try:
91
- # Check if Supabase credentials are configured
92
- if not SUPABASE_URL or not SUPABASE_ANON_KEY:
93
- console.print(
94
- "[red]Error: SUPABASE_URL and SUPABASE_ANON_KEY environment variables must be set[/red]"
95
- )
96
- return False
97
-
98
94
  # Use Supabase's refresh_session method with the public anon key
99
95
  # This is safe for client-side use and properly scoped for user token refresh
100
96
  supabase = create_client(SUPABASE_URL, SUPABASE_ANON_KEY)
@@ -60,7 +60,7 @@ def normalize_newlines(text: str) -> str:
60
60
  return re.sub(r'\n{2,}', '\n', text)
61
61
 
62
62
 
63
- def stream_execution_output(execution_id: str, streaming_url: str = None) -> bool:
63
+ def stream_execution_output(execution_id: str, streaming_url: str = None, status: StatusLine = None) -> bool:
64
64
  """Stream execution output in real-time. Returns True if successful, False if failed."""
65
65
  if not streaming_url:
66
66
  # Fallback to stream endpoint if no streaming URL provided
@@ -69,15 +69,25 @@ def stream_execution_output(execution_id: str, streaming_url: str = None) -> boo
69
69
  stream_url = streaming_url
70
70
 
71
71
  try:
72
- console.print("[dim]Connecting to execution stream...[/dim]")
73
-
74
- headers = {"Authorization": f"Bearer {config.api_key}"}
75
- with httpx.stream("GET", stream_url, headers=headers, timeout=600.0) as response:
72
+ if status:
73
+ status.update("Connecting to stream...")
74
+
75
+ headers = {
76
+ "Accept": "text/event-stream",
77
+ "Cache-Control": "no-cache",
78
+ }
79
+ with httpx.stream("POST", stream_url, headers=headers, timeout=600.0) as response:
76
80
  if response.status_code != 200:
77
- console.print(f"[red]Stream failed: HTTP {response.status_code}[/red]")
81
+ if status:
82
+ status.stop()
83
+ if response.status_code == 404:
84
+ console.print("[yellow]Stream not found - execution may have already completed[/yellow]")
85
+ else:
86
+ console.print(f"[red]Stream failed: HTTP {response.status_code}[/red]")
78
87
  return False
79
88
 
80
- console.print("[dim]Streaming output...[/dim]")
89
+ if status:
90
+ status.update("Waiting for output...")
81
91
 
82
92
  first_output = True
83
93
  for line in response.iter_lines():
@@ -93,6 +103,10 @@ def stream_execution_output(execution_id: str, streaming_url: str = None) -> boo
93
103
  output_data = data["output"]
94
104
  content = output_data.get("content", "")
95
105
  if content:
106
+ # Stop status spinner on first output
107
+ if first_output and status:
108
+ status.stop()
109
+ first_output = False
96
110
  clean_output = strip_ansi_codes(content)
97
111
  # Normalize newlines to avoid excessive blank lines
98
112
  clean_output = normalize_newlines(clean_output)
@@ -100,6 +114,8 @@ def stream_execution_output(execution_id: str, streaming_url: str = None) -> boo
100
114
 
101
115
  # Handle job finished event
102
116
  elif "jobFinished" in data:
117
+ if status:
118
+ status.stop()
103
119
  job_data = data["jobFinished"]
104
120
  job = job_data.get("job", {})
105
121
  result = job.get("result", {})
@@ -110,6 +126,7 @@ def stream_execution_output(execution_id: str, streaming_url: str = None) -> boo
110
126
  if not first_output:
111
127
  print()
112
128
 
129
+ # Check for system failure (error field present)
113
130
  if error:
114
131
  console.print(f"[red]{error}[/red]")
115
132
  return False
@@ -126,34 +143,46 @@ def stream_execution_output(execution_id: str, streaming_url: str = None) -> boo
126
143
  if event_type == "output":
127
144
  output = data.get("content", "")
128
145
  if output:
146
+ if first_output and status:
147
+ status.stop()
148
+ first_output = False
129
149
  clean_output = strip_ansi_codes(output)
130
150
  clean_output = normalize_newlines(clean_output)
131
151
  print(clean_output, end="", flush=True)
132
152
 
133
153
  elif event_type == "completed":
134
- status = data.get("status", "unknown")
135
- if status == "completed":
136
- console.print("\n[green]Execution completed successfully[/green]")
154
+ if status:
155
+ status.stop()
156
+ status_val = data.get("status", "unknown")
157
+ if not first_output:
158
+ print()
159
+ if status_val == "completed":
137
160
  return True
138
161
  else:
139
- console.print(f"\n[red]Execution failed: {status}[/red]")
162
+ console.print(f"[red]Failed: {status_val}[/red]")
140
163
  return False
141
164
 
142
165
  elif event_type == "error":
166
+ if status:
167
+ status.stop()
143
168
  error_msg = data.get("message", "Unknown error")
144
- console.print(f"\n[red]Error: {error_msg}[/red]")
169
+ console.print(f"[red]Error: {error_msg}[/red]")
145
170
  return False
146
171
 
147
172
  except json.JSONDecodeError:
148
173
  # Skip malformed JSON
149
174
  continue
150
175
 
151
- console.print("\n[yellow]Stream ended without completion signal[/yellow]")
176
+ if status:
177
+ status.stop()
178
+ console.print("[yellow]Stream ended without completion signal[/yellow]")
152
179
  # Fallback: poll execution status
153
180
  return check_execution_status(execution_id)
154
181
 
155
182
  except Exception as e:
156
- console.print(f"\n[red]Streaming error: {e}[/red]")
183
+ if status:
184
+ status.stop()
185
+ console.print(f"[red]Streaming error: {e}[/red]")
157
186
  # Fallback: poll execution status
158
187
  return check_execution_status(execution_id)
159
188
 
@@ -177,16 +206,15 @@ def check_execution_status(execution_id: str) -> bool:
177
206
  status = data.get('status', 'unknown')
178
207
 
179
208
  if status == 'completed':
180
- console.print("[green]Execution completed successfully[/green]")
181
209
  return True
182
210
  elif status in ['failed_user', 'failed_system', 'failed']:
183
- console.print(f"[red]Execution failed: {status}[/red]")
211
+ console.print(f"[red]Failed: {status}[/red]")
184
212
  errors = data.get('errors')
185
213
  if errors:
186
214
  console.print(f"[red]Error: {errors}[/red]")
187
215
  return False
188
216
  elif status in ['timeout', 'cancelled']:
189
- console.print(f"[yellow]Execution {status}[/yellow]")
217
+ console.print(f"[yellow]{status.capitalize()}[/yellow]")
190
218
  return False
191
219
  elif status in ['running', 'pending', 'queued']:
192
220
  # Still running, continue polling
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lyceum-cli
3
- Version: 1.0.28
3
+ Version: 1.0.29
4
4
  Summary: Command-line interface for Lyceum Cloud Execution API
5
5
  Home-page: https://lyceum.technology
6
6
  Author: Lyceum Team
@@ -1,5 +1,5 @@
1
1
  lyceum/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
2
- lyceum/main.py,sha256=s4ZvNPGMeGoV9AIq68hEwfo9ioFfu2qIoRXb2upjNDk,1076
2
+ lyceum/main.py,sha256=8outCbb7DSurGzwG1EX_5k89l85OeBgxUUedbjj6UIQ,1341
3
3
  lyceum/external/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
4
4
  lyceum/external/auth/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
5
5
  lyceum/external/auth/login.py,sha256=-yJ0aEV8_vDXiT6BXzjpqZ2uDdnTnkop4qhagw2dSZA,23447
@@ -8,27 +8,29 @@ lyceum/external/compute/execution/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZ
8
8
  lyceum/external/compute/execution/config.py,sha256=6JJgLJnDPTwevEaNdB1nEICih_qbBmws5u5_S9gj7k0,8866
9
9
  lyceum/external/compute/execution/docker.py,sha256=0Y6lxJAm56Jrl0HxeNz1mX6DGs556i2iMN9_U1JQP0c,9635
10
10
  lyceum/external/compute/execution/docker_compose.py,sha256=YsWPnw5nB1ZpqjU9X8o_klT78I5m46PapVwVEeWra_8,9189
11
+ lyceum/external/compute/execution/gpu_selection.py,sha256=fzYW6k8exMbGnYpZTDKpALRrlKAO5RrppLUq6z2UWoo,37059
11
12
  lyceum/external/compute/execution/notebook.py,sha256=Gw9UhJ-UjYhpjdIYQ4IMYhVjhSkAFpOQ9aFYj1qOeww,7542
12
13
  lyceum/external/compute/execution/python.py,sha256=8Y9ElWs9RdauQbhECKcBPSoT0XZeGhXZ_pkEpr3sGro,12878
13
14
  lyceum/external/compute/execution/workloads.py,sha256=4fsRWbYGmsQMGPPIN1jUG8cG5NPG9yV26ANJ-DtaXqc,5844
14
15
  lyceum/external/compute/inference/__init__.py,sha256=4YLoUKDEzitexynJv_Q5O0w1lty8CJ6uyRxuc1LiaBw,89
15
- lyceum/external/compute/inference/batch.py,sha256=mgEndr02UM1j00o-iRLUpDqS5KFvyg0Htc0Gg0s3hTU,11394
16
- lyceum/external/compute/inference/chat.py,sha256=hITj_UGLaxCJQskU-YbeaEerM5Xt_eJpEsYrTJoUpk4,8485
17
- lyceum/external/compute/inference/models.py,sha256=BkCEdvyliezGOUulj557e-Eoif0_HKR3CxqpEhdAZaA,10339
16
+ lyceum/external/compute/inference/batch.py,sha256=F4MlR2IEi1X7qUo3UDUVRWclHLLmmjqu7KZ3RkHIjNM,4349
17
+ lyceum/external/compute/inference/chat.py,sha256=vGjRLYcR31jHCpHKhlZHJB9obboScz_RcuyHZ_yW1Ts,5301
18
+ lyceum/external/compute/inference/infer.py,sha256=vf0jXN3Hb7H_dfI6dqZrp9oJiwutznHamFnhqa_Izig,3829
19
+ lyceum/external/compute/inference/models.py,sha256=fZDZb4YRbzqEIU3CGfGD6t8BiSUJD-voMqtBCiwsVDE,2438
18
20
  lyceum/external/general/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
19
21
  lyceum/external/vms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
22
  lyceum/external/vms/instances.py,sha256=8DKpI8PbyZFzk5RT-IPgoMDjkf_-HC-2pJKuSFs-5BA,11007
21
23
  lyceum/external/vms/management.py,sha256=dYEkN5Qiur-SG4G5CLOk2Rbr0HW3rK1BROSp0K6KxC8,15405
22
24
  lyceum/shared/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
23
- lyceum/shared/config.py,sha256=XGMmoGVzmcxSvS-s1kDbOkp_LEGY-wEenKZ8cb4NBBM,5656
25
+ lyceum/shared/config.py,sha256=gEXf_hEmgUq3rSxU2SY75qg32mFS6qsruox7uvQlf3I,5334
24
26
  lyceum/shared/display.py,sha256=-VSAfoa0yivTvxRrN2RYr2Sq1x_msZqENjnkSedmbhQ,4444
25
27
  lyceum/shared/imports.py,sha256=wEG4wfVTIqJ6MBWDRAN96iGmVCb9ST2aOqSjkbvajug,11768
26
- lyceum/shared/streaming.py,sha256=x3zA8Pn9ia06t8nKJfP6hztxOVKPUC3Nk3qmAiIsl9M,8194
28
+ lyceum/shared/streaming.py,sha256=wFb7w7fra63y8WWaIA8_E1Z6Sx_6G-0J53Zh010eZgk,9355
27
29
  lyceum_cloud_execution_api_client/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
28
30
  lyceum_cloud_execution_api_client/api/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
29
31
  lyceum_cloud_execution_api_client/models/__init__.py,sha256=AMlb9R9O9aNC9hvKz_8TFpEfOolYC3VtFS5JX17kYks,4888
30
- lyceum_cli-1.0.28.dist-info/METADATA,sha256=gsm3uSzI8owqGaoCDqiHeYCAHYmCr-CVafDknHw1KUA,1482
31
- lyceum_cli-1.0.28.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
32
- lyceum_cli-1.0.28.dist-info/entry_points.txt,sha256=Oq-9wDkxVd6MHgNiUTYwXI9SGhvR3VkD7Mvk0xhiUZo,43
33
- lyceum_cli-1.0.28.dist-info/top_level.txt,sha256=CR7FEMloAXgLsHUR6ti3mWNcpgje27HRHSfq8doIils,41
34
- lyceum_cli-1.0.28.dist-info/RECORD,,
32
+ lyceum_cli-1.0.29.dist-info/METADATA,sha256=3ltYxfRInW369XotoAGtTzZ7j8uj5i0vPDpu6hI8rJM,1482
33
+ lyceum_cli-1.0.29.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
34
+ lyceum_cli-1.0.29.dist-info/entry_points.txt,sha256=Oq-9wDkxVd6MHgNiUTYwXI9SGhvR3VkD7Mvk0xhiUZo,43
35
+ lyceum_cli-1.0.29.dist-info/top_level.txt,sha256=CR7FEMloAXgLsHUR6ti3mWNcpgje27HRHSfq8doIils,41
36
+ lyceum_cli-1.0.29.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.10.1)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5