fluxloop-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fluxloop-cli might be problematic. Click here for more details.
- fluxloop_cli/__init__.py +9 -0
- fluxloop_cli/arg_binder.py +219 -0
- fluxloop_cli/commands/__init__.py +5 -0
- fluxloop_cli/commands/config.py +355 -0
- fluxloop_cli/commands/generate.py +304 -0
- fluxloop_cli/commands/init.py +225 -0
- fluxloop_cli/commands/parse.py +293 -0
- fluxloop_cli/commands/run.py +310 -0
- fluxloop_cli/commands/status.py +227 -0
- fluxloop_cli/config_loader.py +159 -0
- fluxloop_cli/constants.py +12 -0
- fluxloop_cli/input_generator.py +158 -0
- fluxloop_cli/llm_generator.py +417 -0
- fluxloop_cli/main.py +97 -0
- fluxloop_cli/project_paths.py +80 -0
- fluxloop_cli/runner.py +634 -0
- fluxloop_cli/target_loader.py +95 -0
- fluxloop_cli/templates.py +277 -0
- fluxloop_cli/validators.py +31 -0
- fluxloop_cli-0.1.0.dist-info/METADATA +86 -0
- fluxloop_cli-0.1.0.dist-info/RECORD +24 -0
- fluxloop_cli-0.1.0.dist-info/WHEEL +5 -0
- fluxloop_cli-0.1.0.dist-info/entry_points.txt +2 -0
- fluxloop_cli-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,417 @@
|
|
|
1
|
+
"""LLM-backed input generation utilities."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import hashlib
|
|
7
|
+
import inspect
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from typing import Any, Dict, Iterable, List, Optional, Protocol, Sequence, Tuple
|
|
12
|
+
|
|
13
|
+
import httpx
|
|
14
|
+
from rich.console import Console
|
|
15
|
+
from rich.progress import (
|
|
16
|
+
Progress,
|
|
17
|
+
SpinnerColumn,
|
|
18
|
+
TextColumn,
|
|
19
|
+
BarColumn,
|
|
20
|
+
TimeRemainingColumn,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
from fluxloop.schemas import (
|
|
24
|
+
ExperimentConfig,
|
|
25
|
+
InputGenerationMode,
|
|
26
|
+
LLMGeneratorConfig,
|
|
27
|
+
PersonaConfig,
|
|
28
|
+
VariationStrategy,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
DEFAULT_STRATEGIES: Sequence[VariationStrategy] = (
|
|
35
|
+
VariationStrategy.REPHRASE,
|
|
36
|
+
VariationStrategy.VERBOSE,
|
|
37
|
+
VariationStrategy.CONCISE,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class LLMGenerationError(RuntimeError):
|
|
42
|
+
"""Raised when LLM-backed generation fails."""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class LLMGenerationContext:
|
|
47
|
+
"""Context data passed into prompt templates."""
|
|
48
|
+
|
|
49
|
+
base_input: Dict[str, Any]
|
|
50
|
+
persona: Optional[PersonaConfig]
|
|
51
|
+
strategy: VariationStrategy
|
|
52
|
+
iteration: int
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class LLMClient(Protocol):
|
|
56
|
+
"""Protocol describing asynchronous LLM client implementations."""
|
|
57
|
+
|
|
58
|
+
async def generate(
|
|
59
|
+
self,
|
|
60
|
+
*,
|
|
61
|
+
prompts: Sequence[Tuple[str, Dict[str, Any]]],
|
|
62
|
+
config: ExperimentConfig,
|
|
63
|
+
llm_config: LLMGeneratorConfig,
|
|
64
|
+
) -> List[Dict[str, Any]]:
|
|
65
|
+
...
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _ensure_llm_mode(config: ExperimentConfig) -> LLMGeneratorConfig:
|
|
69
|
+
generation = config.input_generation
|
|
70
|
+
if generation.mode != InputGenerationMode.LLM:
|
|
71
|
+
raise LLMGenerationError(
|
|
72
|
+
"LLM input generation requested but configuration mode is not set to 'llm'"
|
|
73
|
+
)
|
|
74
|
+
if not generation.llm.enabled:
|
|
75
|
+
raise LLMGenerationError("LLM input generation is disabled in configuration")
|
|
76
|
+
return generation.llm
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _hash_prompt(prompt: str) -> str:
|
|
80
|
+
return hashlib.sha256(prompt.encode("utf-8")).hexdigest()[:16]
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
async def _generate_one_variation_openai(
|
|
84
|
+
client: httpx.AsyncClient,
|
|
85
|
+
llm_config: LLMGeneratorConfig,
|
|
86
|
+
prompt_text: str,
|
|
87
|
+
metadata: Dict[str, Any],
|
|
88
|
+
) -> Dict[str, Any]:
|
|
89
|
+
"""Generate a single input variation via OpenAI API."""
|
|
90
|
+
messages: List[Dict[str, str]] = []
|
|
91
|
+
if llm_config.system_prompt:
|
|
92
|
+
messages.append({"role": "system", "content": llm_config.system_prompt})
|
|
93
|
+
messages.append({"role": "user", "content": prompt_text})
|
|
94
|
+
|
|
95
|
+
payload = {
|
|
96
|
+
"model": llm_config.model,
|
|
97
|
+
"messages": messages,
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
# Add GPT-5 specific controls if they exist on the config object
|
|
101
|
+
# if hasattr(llm_config, "reasoning_effort") and llm_config.reasoning_effort:
|
|
102
|
+
# payload["reasoning"] = {"effort": llm_config.reasoning_effort}
|
|
103
|
+
|
|
104
|
+
# if hasattr(llm_config, "text_verbosity") and llm_config.text_verbosity:
|
|
105
|
+
# payload["text"] = {"verbosity": llm_config.text_verbosity}
|
|
106
|
+
|
|
107
|
+
response = await _request_openai(client, config=llm_config, payload=payload)
|
|
108
|
+
|
|
109
|
+
text = None
|
|
110
|
+
if "choices" in response:
|
|
111
|
+
choices = response.get("choices", [])
|
|
112
|
+
if choices:
|
|
113
|
+
message = choices[0].get("message") or {}
|
|
114
|
+
text = message.get("content")
|
|
115
|
+
|
|
116
|
+
if text is None:
|
|
117
|
+
error_details = json.dumps(response, indent=2)
|
|
118
|
+
raise LLMGenerationError(
|
|
119
|
+
f"OpenAI response did not contain content. Full response:\n{error_details}"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
"input": text.strip(),
|
|
124
|
+
"metadata": {
|
|
125
|
+
**metadata,
|
|
126
|
+
"model": llm_config.model,
|
|
127
|
+
"provider": llm_config.provider,
|
|
128
|
+
"prompt_hash": _hash_prompt(prompt_text),
|
|
129
|
+
"prompt": prompt_text,
|
|
130
|
+
},
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
async def _request_openai(
|
|
135
|
+
client: httpx.AsyncClient,
|
|
136
|
+
*,
|
|
137
|
+
config: LLMGeneratorConfig,
|
|
138
|
+
payload: Dict[str, Any],
|
|
139
|
+
) -> Dict[str, Any]:
|
|
140
|
+
endpoint = "https://api.openai.com/v1/chat/completions"
|
|
141
|
+
headers = {}
|
|
142
|
+
if config.api_key:
|
|
143
|
+
headers["Authorization"] = f"Bearer {config.api_key}"
|
|
144
|
+
|
|
145
|
+
try:
|
|
146
|
+
response = await client.post(
|
|
147
|
+
endpoint,
|
|
148
|
+
headers=headers,
|
|
149
|
+
timeout=config.request_timeout,
|
|
150
|
+
json=payload,
|
|
151
|
+
)
|
|
152
|
+
except httpx.HTTPError as exc:
|
|
153
|
+
raise LLMGenerationError(f"OpenAI request failed: {exc}") from exc
|
|
154
|
+
|
|
155
|
+
if response.status_code >= 400:
|
|
156
|
+
raise LLMGenerationError(
|
|
157
|
+
f"OpenAI API error {response.status_code}: {response.text}"
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
return response.json()
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
async def _generate_variations_openai(
|
|
164
|
+
*,
|
|
165
|
+
client: httpx.AsyncClient,
|
|
166
|
+
config: ExperimentConfig,
|
|
167
|
+
llm_config: LLMGeneratorConfig,
|
|
168
|
+
prompts: Sequence[Tuple[str, Dict[str, Any]]],
|
|
169
|
+
progress: Progress,
|
|
170
|
+
task_id: Any,
|
|
171
|
+
) -> List[Dict[str, Any]]:
|
|
172
|
+
tasks = [
|
|
173
|
+
_generate_one_variation_openai(client, llm_config, prompt_text, metadata)
|
|
174
|
+
for prompt_text, metadata in prompts
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
results: List[Dict[str, Any]] = []
|
|
178
|
+
for future in asyncio.as_completed(tasks):
|
|
179
|
+
try:
|
|
180
|
+
result = await future
|
|
181
|
+
results.append(result)
|
|
182
|
+
except LLMGenerationError as exc:
|
|
183
|
+
logger.warning(f"Failed to generate one variation: {exc}")
|
|
184
|
+
except Exception as exc:
|
|
185
|
+
logger.error(f"An unexpected error occurred during generation: {exc}")
|
|
186
|
+
finally:
|
|
187
|
+
progress.update(task_id, advance=1)
|
|
188
|
+
|
|
189
|
+
return results
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
async def _generate_variations_mock(
|
|
193
|
+
*,
|
|
194
|
+
config: ExperimentConfig,
|
|
195
|
+
llm_config: LLMGeneratorConfig,
|
|
196
|
+
prompts: Sequence[Tuple[str, Dict[str, Any]]],
|
|
197
|
+
progress: Progress,
|
|
198
|
+
task_id: Any,
|
|
199
|
+
) -> List[Dict[str, Any]]:
|
|
200
|
+
if llm_config.provider == "mock":
|
|
201
|
+
results = await _generate_variations_mock(
|
|
202
|
+
config=config,
|
|
203
|
+
llm_config=llm_config,
|
|
204
|
+
prompts=prompts,
|
|
205
|
+
)
|
|
206
|
+
progress.update(task_id, completed=len(prompts))
|
|
207
|
+
return results
|
|
208
|
+
|
|
209
|
+
if llm_config.provider == "openai":
|
|
210
|
+
async with httpx.AsyncClient() as client:
|
|
211
|
+
return await _generate_variations_openai(
|
|
212
|
+
client=client,
|
|
213
|
+
config=config,
|
|
214
|
+
llm_config=llm_config,
|
|
215
|
+
prompts=prompts,
|
|
216
|
+
progress=progress,
|
|
217
|
+
task_id=task_id,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
raise LLMGenerationError(f"Unsupported LLM provider: {llm_config.provider}")
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def _format_prompt(
|
|
224
|
+
config: ExperimentConfig,
|
|
225
|
+
llm_config: LLMGeneratorConfig,
|
|
226
|
+
context: LLMGenerationContext,
|
|
227
|
+
) -> Tuple[str, Dict[str, Any]]:
|
|
228
|
+
template = llm_config.user_prompt_template or (
|
|
229
|
+
"""You are an expert in creating high-quality datasets for testing AI agents.
|
|
230
|
+
Your task is to generate a single, realistic user message that a person would type into a support chat or search box. This message will be used as an input in an automated simulation to test an AI agent's performance.
|
|
231
|
+
|
|
232
|
+
**Instructions:**
|
|
233
|
+
1. Read the Base Input, Strategy, and Persona details carefully.
|
|
234
|
+
2. Generate a new user message that modifies the Base Input according to the given Strategy and reflects the Persona.
|
|
235
|
+
3. **Keep the message concise and natural.** Even when the strategy is "verbose," it should still be a realistic user query, not a lengthy technical specification. A verbose user might ask multiple related questions in one message, but they would not write an essay.
|
|
236
|
+
4. **Your output must ONLY be the generated user message text.** Do not include any prefixes, quotation marks, explanations, or formatting like bullet points. It should be a single block of text.
|
|
237
|
+
|
|
238
|
+
---
|
|
239
|
+
**Base Input:**
|
|
240
|
+
{input}
|
|
241
|
+
|
|
242
|
+
**Strategy to Apply (how the user words their request):**
|
|
243
|
+
{strategy}
|
|
244
|
+
|
|
245
|
+
**User Persona Profile:**
|
|
246
|
+
{persona}
|
|
247
|
+
---
|
|
248
|
+
|
|
249
|
+
Generated User Message:"""
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
optional_persona = (
|
|
253
|
+
context.persona.to_prompt() if context.persona else "Generic user"
|
|
254
|
+
)
|
|
255
|
+
strategy_prompt = llm_config.strategy_prompts.get(
|
|
256
|
+
context.strategy.value,
|
|
257
|
+
context.strategy.value,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
prompt_text = template.format(
|
|
261
|
+
input=context.base_input.get("input", ""),
|
|
262
|
+
persona=optional_persona,
|
|
263
|
+
strategy=strategy_prompt,
|
|
264
|
+
metadata=json.dumps(context.base_input, ensure_ascii=False),
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
metadata = {
|
|
268
|
+
"strategy": context.strategy.value,
|
|
269
|
+
"base_index": context.iteration,
|
|
270
|
+
"persona": context.persona.name if context.persona else None,
|
|
271
|
+
"persona_description": context.persona.description if context.persona else None,
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
return prompt_text, metadata
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
async def _generate_with_client(
|
|
278
|
+
*,
|
|
279
|
+
config: ExperimentConfig,
|
|
280
|
+
llm_config: LLMGeneratorConfig,
|
|
281
|
+
prompts: Sequence[Tuple[str, Dict[str, Any]]],
|
|
282
|
+
progress: Progress,
|
|
283
|
+
task_id: Any,
|
|
284
|
+
) -> List[Dict[str, Any]]:
|
|
285
|
+
if llm_config.provider == "mock":
|
|
286
|
+
return await _generate_variations_mock(
|
|
287
|
+
config=config,
|
|
288
|
+
llm_config=llm_config,
|
|
289
|
+
prompts=prompts,
|
|
290
|
+
progress=progress,
|
|
291
|
+
task_id=task_id,
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
if llm_config.provider == "openai":
|
|
295
|
+
async with httpx.AsyncClient() as client:
|
|
296
|
+
return await _generate_variations_openai(
|
|
297
|
+
client=client,
|
|
298
|
+
config=config,
|
|
299
|
+
llm_config=llm_config,
|
|
300
|
+
prompts=prompts,
|
|
301
|
+
progress=progress,
|
|
302
|
+
task_id=task_id,
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
raise LLMGenerationError(f"Unsupported LLM provider: {llm_config.provider}")
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def _collect_prompts(
|
|
309
|
+
*,
|
|
310
|
+
config: ExperimentConfig,
|
|
311
|
+
strategies: Sequence[VariationStrategy],
|
|
312
|
+
limit: Optional[int],
|
|
313
|
+
) -> List[Tuple[str, Dict[str, Any]]]:
|
|
314
|
+
llm_config = _ensure_llm_mode(config)
|
|
315
|
+
prompts: List[Tuple[str, Dict[str, Any]]] = []
|
|
316
|
+
|
|
317
|
+
personas: Iterable[Optional[PersonaConfig]] = config.personas or [None]
|
|
318
|
+
|
|
319
|
+
for index, base_input in enumerate(config.base_inputs):
|
|
320
|
+
if not base_input.get("input"):
|
|
321
|
+
continue
|
|
322
|
+
for persona in personas:
|
|
323
|
+
for strategy in strategies:
|
|
324
|
+
context = LLMGenerationContext(
|
|
325
|
+
base_input=base_input,
|
|
326
|
+
persona=persona,
|
|
327
|
+
strategy=strategy,
|
|
328
|
+
iteration=index,
|
|
329
|
+
)
|
|
330
|
+
prompts.append(_format_prompt(config, llm_config, context))
|
|
331
|
+
|
|
332
|
+
if limit is not None and len(prompts) >= limit:
|
|
333
|
+
return prompts
|
|
334
|
+
|
|
335
|
+
return prompts
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def generate_llm_inputs(
|
|
339
|
+
*,
|
|
340
|
+
config: ExperimentConfig,
|
|
341
|
+
strategies: Sequence[VariationStrategy],
|
|
342
|
+
settings,
|
|
343
|
+
) -> List[Dict[str, Any]]:
|
|
344
|
+
"""Generate inputs using an LLM provider."""
|
|
345
|
+
|
|
346
|
+
llm_config = _ensure_llm_mode(config)
|
|
347
|
+
|
|
348
|
+
if settings.llm_api_key_override:
|
|
349
|
+
llm_config = llm_config.model_copy(update={"api_key": settings.llm_api_key_override})
|
|
350
|
+
|
|
351
|
+
prompts = _collect_prompts(
|
|
352
|
+
config=config,
|
|
353
|
+
strategies=strategies,
|
|
354
|
+
limit=settings.limit,
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
if not prompts:
|
|
358
|
+
raise LLMGenerationError("No prompts generated from base inputs")
|
|
359
|
+
|
|
360
|
+
async def _run_generation(progress: Progress, task_id: Any) -> List[Dict[str, Any]]:
|
|
361
|
+
if settings.llm_client:
|
|
362
|
+
# Note: Custom clients do not support progress bars currently
|
|
363
|
+
result = settings.llm_client.generate(
|
|
364
|
+
prompts=prompts,
|
|
365
|
+
config=config,
|
|
366
|
+
llm_config=llm_config,
|
|
367
|
+
)
|
|
368
|
+
if inspect.isawaitable(result):
|
|
369
|
+
return await result
|
|
370
|
+
return result
|
|
371
|
+
|
|
372
|
+
return await _generate_with_client(
|
|
373
|
+
config=config,
|
|
374
|
+
llm_config=llm_config,
|
|
375
|
+
prompts=prompts,
|
|
376
|
+
progress=progress,
|
|
377
|
+
task_id=task_id,
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
console = Console()
|
|
381
|
+
console.print(f"🧠 Generating [bold cyan]{len(prompts)}[/bold cyan] variations using LLM...")
|
|
382
|
+
|
|
383
|
+
results: List[Dict[str, Any]] = []
|
|
384
|
+
with Progress(
|
|
385
|
+
SpinnerColumn(),
|
|
386
|
+
TextColumn("[progress.description]{task.description}"),
|
|
387
|
+
BarColumn(),
|
|
388
|
+
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
|
|
389
|
+
TimeRemainingColumn(),
|
|
390
|
+
TextColumn("({task.completed} of {task.total})"),
|
|
391
|
+
console=console,
|
|
392
|
+
) as progress:
|
|
393
|
+
generation_task = progress.add_task("[green]Generating...", total=len(prompts))
|
|
394
|
+
try:
|
|
395
|
+
results = asyncio.run(_run_generation(progress, generation_task))
|
|
396
|
+
except RuntimeError:
|
|
397
|
+
try:
|
|
398
|
+
loop = asyncio.get_running_loop()
|
|
399
|
+
except RuntimeError:
|
|
400
|
+
loop = asyncio.new_event_loop()
|
|
401
|
+
asyncio.set_event_loop(loop)
|
|
402
|
+
|
|
403
|
+
if loop.is_running():
|
|
404
|
+
raise LLMGenerationError(
|
|
405
|
+
"LLM generation cannot run inside an active asyncio event loop"
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
results = loop.run_until_complete(_run_generation(progress, generation_task))
|
|
409
|
+
|
|
410
|
+
if len(results) < len(prompts):
|
|
411
|
+
console.print(
|
|
412
|
+
f"[yellow]Warning:[/yellow] {len(prompts) - len(results)} variations failed to generate."
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
return results
|
|
416
|
+
|
|
417
|
+
|
fluxloop_cli/main.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main CLI application entry point.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
import typer
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.panel import Panel
|
|
12
|
+
|
|
13
|
+
from . import __version__
|
|
14
|
+
from .commands import config, generate, init, parse, run, status
|
|
15
|
+
|
|
16
|
+
# Create the main Typer app
|
|
17
|
+
app = typer.Typer(
|
|
18
|
+
name="fluxloop",
|
|
19
|
+
help="FluxLoop CLI - Run simulations and manage experiments for AI agents",
|
|
20
|
+
add_completion=True,
|
|
21
|
+
rich_markup_mode="rich",
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# Create console for rich output
|
|
25
|
+
console = Console()
|
|
26
|
+
|
|
27
|
+
# Add subcommands
|
|
28
|
+
app.add_typer(init.app, name="init", help="Initialize a new FluxLoop project")
|
|
29
|
+
app.add_typer(run.app, name="run", help="Run simulations and experiments")
|
|
30
|
+
app.add_typer(status.app, name="status", help="Check status and view results")
|
|
31
|
+
app.add_typer(config.app, name="config", help="Manage configuration")
|
|
32
|
+
app.add_typer(generate.app, name="generate", help="Generate input datasets")
|
|
33
|
+
app.add_typer(parse.app, name="parse", help="Parse experiments into readable files")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def version_callback(value: bool):
|
|
37
|
+
"""Show version and exit."""
|
|
38
|
+
if value:
|
|
39
|
+
console.print(f"[bold blue]FluxLoop CLI[/bold blue] version [green]{__version__}[/green]")
|
|
40
|
+
raise typer.Exit()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@app.callback()
|
|
44
|
+
def main(
|
|
45
|
+
ctx: typer.Context,
|
|
46
|
+
version: Optional[bool] = typer.Option(
|
|
47
|
+
None,
|
|
48
|
+
"--version",
|
|
49
|
+
"-v",
|
|
50
|
+
help="Show version and exit",
|
|
51
|
+
callback=version_callback,
|
|
52
|
+
is_eager=True,
|
|
53
|
+
),
|
|
54
|
+
verbose: bool = typer.Option(
|
|
55
|
+
False,
|
|
56
|
+
"--verbose",
|
|
57
|
+
help="Enable verbose output",
|
|
58
|
+
),
|
|
59
|
+
debug: bool = typer.Option(
|
|
60
|
+
False,
|
|
61
|
+
"--debug",
|
|
62
|
+
help="Enable debug mode",
|
|
63
|
+
),
|
|
64
|
+
):
|
|
65
|
+
"""
|
|
66
|
+
FluxLoop CLI - Simulation and observability for AI agents.
|
|
67
|
+
|
|
68
|
+
Use [bold]fluxloop --help[/bold] to see available commands.
|
|
69
|
+
"""
|
|
70
|
+
# Store global options in context
|
|
71
|
+
ctx.ensure_object(dict)
|
|
72
|
+
ctx.obj["verbose"] = verbose
|
|
73
|
+
ctx.obj["debug"] = debug
|
|
74
|
+
|
|
75
|
+
if debug:
|
|
76
|
+
console.print("[dim]Debug mode enabled[/dim]")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
@app.command()
|
|
80
|
+
def hello(
|
|
81
|
+
name: str = typer.Argument("World", help="Name to greet"),
|
|
82
|
+
):
|
|
83
|
+
"""
|
|
84
|
+
Simple hello command to test the CLI.
|
|
85
|
+
"""
|
|
86
|
+
console.print(
|
|
87
|
+
Panel(
|
|
88
|
+
f"[bold green]Hello, {name}![/bold green]\n\n"
|
|
89
|
+
f"Welcome to FluxLoop CLI v{__version__}",
|
|
90
|
+
title="[bold blue]FluxLoop[/bold blue]",
|
|
91
|
+
border_style="blue",
|
|
92
|
+
)
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
if __name__ == "__main__":
|
|
97
|
+
app()
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Utilities for resolving FluxLoop project directories."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from .constants import DEFAULT_CONFIG_FILENAME, DEFAULT_CONFIG_PATH, DEFAULT_ROOT_DIR_NAME
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _normalize_path(path: Path) -> Path:
|
|
10
|
+
return path.expanduser().resolve()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def resolve_root_dir(root: Optional[Path]) -> Path:
|
|
14
|
+
"""Resolve the FluxLoop root directory."""
|
|
15
|
+
base = root if root is not None else Path(DEFAULT_ROOT_DIR_NAME)
|
|
16
|
+
if base.is_absolute():
|
|
17
|
+
return _normalize_path(base)
|
|
18
|
+
return _normalize_path(Path.cwd() / base)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def resolve_project_dir(project: str, root: Optional[Path]) -> Path:
|
|
22
|
+
"""Resolve the directory for a specific project.
|
|
23
|
+
|
|
24
|
+
If project is None, fall back to current working directory.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
root_dir = resolve_root_dir(root)
|
|
28
|
+
return _normalize_path(root_dir / project)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def resolve_project_relative(path: Path, project: Optional[str], root: Optional[Path]) -> Path:
|
|
32
|
+
"""Resolve a path relative to the project directory (if provided)."""
|
|
33
|
+
|
|
34
|
+
if path.is_absolute():
|
|
35
|
+
return _normalize_path(path)
|
|
36
|
+
|
|
37
|
+
if project:
|
|
38
|
+
return _normalize_path(resolve_project_dir(project, root) / path)
|
|
39
|
+
|
|
40
|
+
return _normalize_path(Path.cwd() / path)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def resolve_config_path(config_file: Path, project: Optional[str], root: Optional[Path]) -> Path:
|
|
44
|
+
"""Resolve the path to a configuration file, honoring project/root settings."""
|
|
45
|
+
|
|
46
|
+
if project:
|
|
47
|
+
if config_file == DEFAULT_CONFIG_PATH:
|
|
48
|
+
target = Path(DEFAULT_CONFIG_FILENAME)
|
|
49
|
+
else:
|
|
50
|
+
target = config_file
|
|
51
|
+
return resolve_project_relative(target, project, root)
|
|
52
|
+
|
|
53
|
+
if config_file.is_absolute():
|
|
54
|
+
return _normalize_path(config_file)
|
|
55
|
+
|
|
56
|
+
return _normalize_path(Path.cwd() / config_file)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def resolve_env_path(env_file: Path, project: Optional[str], root: Optional[Path]) -> Path:
|
|
60
|
+
"""Resolve the path for environment variable files."""
|
|
61
|
+
|
|
62
|
+
if env_file.is_absolute() and env_file != Path(".env"):
|
|
63
|
+
return _normalize_path(env_file)
|
|
64
|
+
|
|
65
|
+
root_dir = resolve_root_dir(root)
|
|
66
|
+
root_env = root_dir / ".env"
|
|
67
|
+
|
|
68
|
+
if env_file != Path(".env"):
|
|
69
|
+
return resolve_project_relative(env_file, project, root)
|
|
70
|
+
|
|
71
|
+
if project:
|
|
72
|
+
project_dir = resolve_project_dir(project, root)
|
|
73
|
+
project_env = project_dir / ".env"
|
|
74
|
+
return _normalize_path(project_env)
|
|
75
|
+
|
|
76
|
+
if root_env.exists():
|
|
77
|
+
return _normalize_path(root_env)
|
|
78
|
+
|
|
79
|
+
return _normalize_path(Path.cwd() / env_file)
|
|
80
|
+
|