flamecli 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cli/repl.py +324 -0
- flame/__init__.py +1 -0
- flame/api/__init__.py +2 -0
- flame/api/async_client.py +43 -0
- flame/api/client.py +156 -0
- flame/cli/__init__.py +2 -0
- flame/cli/executor.py +515 -0
- flame/cli/repl.py +342 -0
- flame/main.py +105 -0
- flame/tools/__init__.py +0 -0
- flame/tools/base.py +110 -0
- flame/tools/fs.py +89 -0
- flame/tools/registry.py +57 -0
- flame/tools/system.py +13 -0
- flame/utils/__init__.py +2 -0
- flame/utils/context.py +168 -0
- flame/utils/logger.py +11 -0
- flame/utils/prompts.py +42 -0
- flamecli-0.1.2.dist-info/METADATA +431 -0
- flamecli-0.1.2.dist-info/RECORD +24 -0
- flamecli-0.1.2.dist-info/WHEEL +5 -0
- flamecli-0.1.2.dist-info/entry_points.txt +2 -0
- flamecli-0.1.2.dist-info/licenses/LICENSE +22 -0
- flamecli-0.1.2.dist-info/top_level.txt +2 -0
cli/repl.py
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
"""Interactive REPL for chat loop with streaming responses."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import signal
|
|
5
|
+
import sys
|
|
6
|
+
import os
|
|
7
|
+
import re
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Optional
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
from rich.panel import Panel
|
|
13
|
+
from rich.markdown import Markdown
|
|
14
|
+
from prompt_toolkit import PromptSession
|
|
15
|
+
from prompt_toolkit.history import FileHistory
|
|
16
|
+
from prompt_toolkit.formatted_text import HTML
|
|
17
|
+
|
|
18
|
+
from api.client import APIClient
|
|
19
|
+
from utils.context import SystemContext
|
|
20
|
+
from cli.executor import FileExecutor, CommandExecutor
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class REPL:
|
|
24
|
+
"""Interactive read-eval-print loop for AI coding assistant."""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
api_client: APIClient,
|
|
29
|
+
working_dir: Optional[str] = None,
|
|
30
|
+
history_file: Optional[str] = None,
|
|
31
|
+
):
|
|
32
|
+
"""Initialize REPL.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
api_client: APIClient instance
|
|
36
|
+
working_dir: Working directory for file operations
|
|
37
|
+
history_file: Path to save command history
|
|
38
|
+
"""
|
|
39
|
+
self.client = api_client
|
|
40
|
+
self.console = Console()
|
|
41
|
+
self.working_dir = Path(working_dir or os.getcwd())
|
|
42
|
+
self.system_context = SystemContext(working_dir=str(self.working_dir))
|
|
43
|
+
self.file_executor = FileExecutor(base_dir=str(self.working_dir), console=self.console)
|
|
44
|
+
self.command_executor = CommandExecutor(console=self.console)
|
|
45
|
+
|
|
46
|
+
# Message history for multi-turn conversation
|
|
47
|
+
self.messages = []
|
|
48
|
+
self.add_system_message()
|
|
49
|
+
|
|
50
|
+
# Setup command history
|
|
51
|
+
history_file = history_file or Path.home() / ".flame_history"
|
|
52
|
+
self.prompt_session = PromptSession(history=FileHistory(str(history_file)))
|
|
53
|
+
|
|
54
|
+
def add_system_message(self):
|
|
55
|
+
"""Add system context as initial message."""
|
|
56
|
+
context_prompt = self.system_context.get_context_prompt()
|
|
57
|
+
self.messages.append({
|
|
58
|
+
"role": "system",
|
|
59
|
+
"content": (
|
|
60
|
+
f"{context_prompt}\n\n"
|
|
61
|
+
"CRITICAL: You are an agent that can directly interact with the user's terminal and filesystem.\n"
|
|
62
|
+
"To perform actions, you MUST use the following commands in your output:\n\n"
|
|
63
|
+
"1. `/create <filepath>` - Use this to create a NEW file.\n"
|
|
64
|
+
" Followed by a markdown code block containing the content. Example:\n"
|
|
65
|
+
" /create hello.py\n"
|
|
66
|
+
" ```python\n"
|
|
67
|
+
" print('hello')\n"
|
|
68
|
+
" ```\n\n"
|
|
69
|
+
"2. `/edit <filepath>` - Use this to EDIT an existing file.\n"
|
|
70
|
+
" Followed by a markdown code block containing the FULL new content. Example:\n"
|
|
71
|
+
" /edit existing.txt\n"
|
|
72
|
+
" ```\n"
|
|
73
|
+
" This is the new content.\n"
|
|
74
|
+
" ```\n\n"
|
|
75
|
+
"3. `/read <filepath>` - Use this to READ a file's content automatically (No user approval needed for reading). Example:\n"
|
|
76
|
+
" /read src/main.py\n\n"
|
|
77
|
+
"4. `/run <command>` - Use this to run a shell command. Example:\n"
|
|
78
|
+
" /run pip install requests\n\n"
|
|
79
|
+
"Do NOT just explain how to do it. ACT by emitting these commands.\n"
|
|
80
|
+
"IMPORTANT: After you emit a command, the system will provide the output in the NEXT turn as 'Action results:'. Use this feedback to continue your task.\n"
|
|
81
|
+
"CRITICAL: Always wrap file content in triple backticks (```). "
|
|
82
|
+
"Any text OUTSIDE of the /command line and the code block will be treated as conversational notes and NOT part of the file.\n"
|
|
83
|
+
"File creation, editing, and command execution require user approval (y/n), but READING is automatic.\n"
|
|
84
|
+
"If the user asks to 'test a shell command' or 'make a file', use the /run or /create commands respectively."
|
|
85
|
+
),
|
|
86
|
+
})
|
|
87
|
+
|
|
88
|
+
def print_welcome(self):
|
|
89
|
+
"""Print welcome message."""
|
|
90
|
+
self.console.print("\n")
|
|
91
|
+
self.console.print(
|
|
92
|
+
Panel.fit(
|
|
93
|
+
"[bold cyan]🔥 Flame - AI Coding Assistant[/bold cyan]\n"
|
|
94
|
+
"Powered by API",
|
|
95
|
+
border_style="cyan",
|
|
96
|
+
)
|
|
97
|
+
)
|
|
98
|
+
self.console.print("[dim]Type 'help' for commands, 'exit' to quit\n[/dim]")
|
|
99
|
+
|
|
100
|
+
def print_help(self):
|
|
101
|
+
"""Print help message."""
|
|
102
|
+
help_text = """
|
|
103
|
+
[bold]Available Commands:[/bold]
|
|
104
|
+
|
|
105
|
+
[cyan]help[/cyan] - Show this help message
|
|
106
|
+
[cyan]context[/cyan] - Show current system context
|
|
107
|
+
[cyan]clear[/cyan] - Clear conversation history
|
|
108
|
+
[cyan]exit[/cyan] - Exit the program
|
|
109
|
+
[cyan]/read <file>[/cyan] - Read a file's content (auto-approved)
|
|
110
|
+
[cyan]/run <cmd>[/cyan] - Suggest running a command (AI-aware)
|
|
111
|
+
[cyan]/edit <file>[/cyan] - Suggest editing a file
|
|
112
|
+
[cyan]/create <file>[/cyan] - Suggest creating a file
|
|
113
|
+
|
|
114
|
+
[bold]Tips:[/bold]
|
|
115
|
+
• Type normally to chat with the AI
|
|
116
|
+
• The AI has context about your project
|
|
117
|
+
• All file/command operations require your approval
|
|
118
|
+
• Multi-line input: Ctrl+Enter (or continue on new line)
|
|
119
|
+
"""
|
|
120
|
+
self.console.print(Markdown(help_text))
|
|
121
|
+
|
|
122
|
+
def print_context(self):
|
|
123
|
+
"""Print current system context."""
|
|
124
|
+
self.console.print("\n[cyan]📍 System Context:[/cyan]\n")
|
|
125
|
+
self.console.print(self.system_context.get_full_context())
|
|
126
|
+
|
|
127
|
+
def handle_command(self, user_input: str) -> Optional[str]:
|
|
128
|
+
"""Handle special commands before sending to AI.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
user_input: User input text
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Modified input to send to AI, or None if command was handled
|
|
135
|
+
"""
|
|
136
|
+
stripped = user_input.strip().lower()
|
|
137
|
+
|
|
138
|
+
if stripped == "help":
|
|
139
|
+
self.print_help()
|
|
140
|
+
return None
|
|
141
|
+
elif stripped == "context":
|
|
142
|
+
self.print_context()
|
|
143
|
+
return None
|
|
144
|
+
elif stripped == "clear":
|
|
145
|
+
self.messages = []
|
|
146
|
+
self.add_system_message()
|
|
147
|
+
self.console.print("[green]✅ Conversation history cleared[/green]")
|
|
148
|
+
return None
|
|
149
|
+
elif stripped == "exit":
|
|
150
|
+
raise KeyboardInterrupt()
|
|
151
|
+
elif stripped.startswith("/read "):
|
|
152
|
+
filepath = user_input[6:].strip()
|
|
153
|
+
if filepath:
|
|
154
|
+
content = self.file_executor.read_file(filepath)
|
|
155
|
+
self.console.print(Panel(content, title=f"Content of {filepath}"))
|
|
156
|
+
return None
|
|
157
|
+
elif stripped.startswith("/run "):
|
|
158
|
+
command = user_input[5:].strip()
|
|
159
|
+
if command:
|
|
160
|
+
success, output = self.command_executor.suggest_command(
|
|
161
|
+
command,
|
|
162
|
+
description="Suggested by AI assistant",
|
|
163
|
+
)
|
|
164
|
+
return None
|
|
165
|
+
elif stripped.startswith("/create "):
|
|
166
|
+
filepath = user_input[8:].strip()
|
|
167
|
+
if filepath:
|
|
168
|
+
self.console.print(
|
|
169
|
+
"[yellow]Note: Use AI to help generate file content, "
|
|
170
|
+
"then approve creation[/yellow]"
|
|
171
|
+
)
|
|
172
|
+
return None
|
|
173
|
+
elif stripped.startswith("/edit "):
|
|
174
|
+
filepath = user_input[6:].strip()
|
|
175
|
+
if filepath:
|
|
176
|
+
self.console.print(
|
|
177
|
+
"[yellow]Note: Use AI to help generate edits, "
|
|
178
|
+
"then approve changes[/yellow]"
|
|
179
|
+
)
|
|
180
|
+
return None
|
|
181
|
+
|
|
182
|
+
return user_input
|
|
183
|
+
|
|
184
|
+
def _process_assistant_commands(self, response: str):
|
|
185
|
+
"""Parse and execute commands from AI response and return feedback."""
|
|
186
|
+
feedback = []
|
|
187
|
+
|
|
188
|
+
# Regex for block commands (/create, /edit)
|
|
189
|
+
block_cmd_pattern = r"/(create|edit)\s+([^\n]+)\s*\n\s*```[^\n]*\n(.*?)\n\s*```"
|
|
190
|
+
|
|
191
|
+
# First, find all block commands
|
|
192
|
+
for match in re.finditer(block_cmd_pattern, response, re.DOTALL):
|
|
193
|
+
cmd = match.group(1)
|
|
194
|
+
filepath = match.group(2).strip()
|
|
195
|
+
content = match.group(3)
|
|
196
|
+
|
|
197
|
+
if cmd == "create":
|
|
198
|
+
success = self.file_executor.suggest_file_creation(filepath, content, description="AI suggestion")
|
|
199
|
+
if success:
|
|
200
|
+
feedback.append(f"File '{filepath}' created successfully.")
|
|
201
|
+
else:
|
|
202
|
+
feedback.append(f"Creation of file '{filepath}' was rejected or failed.")
|
|
203
|
+
elif cmd == "edit":
|
|
204
|
+
target_path = self.working_dir / filepath
|
|
205
|
+
old_content = ""
|
|
206
|
+
if target_path.exists():
|
|
207
|
+
old_content = target_path.read_text(encoding="utf-8")
|
|
208
|
+
|
|
209
|
+
if old_content:
|
|
210
|
+
success = self.file_executor.suggest_file_edit(filepath, old_content, content, description="AI suggestion")
|
|
211
|
+
if success:
|
|
212
|
+
feedback.append(f"File '{filepath}' updated successfully.")
|
|
213
|
+
else:
|
|
214
|
+
feedback.append(f"Edit of file '{filepath}' was rejected or failed.")
|
|
215
|
+
else:
|
|
216
|
+
success = self.file_executor.suggest_file_creation(filepath, content, description="AI suggestion")
|
|
217
|
+
if success:
|
|
218
|
+
feedback.append(f"File '{filepath}' created (as it didn't exist).")
|
|
219
|
+
else:
|
|
220
|
+
feedback.append(f"Creation of file '{filepath}' was rejected.")
|
|
221
|
+
|
|
222
|
+
# Pattern for simple commands (/read, /run)
|
|
223
|
+
# Avoid matching /read or /run if they are inside a markdown block that was already processed
|
|
224
|
+
remaining_response = response
|
|
225
|
+
for match in re.finditer(block_cmd_pattern, response, re.DOTALL):
|
|
226
|
+
remaining_response = remaining_response.replace(match.group(0), "")
|
|
227
|
+
|
|
228
|
+
simple_cmd_pattern = r"/(read|run)\s+([^\n]+)"
|
|
229
|
+
for match in re.finditer(simple_cmd_pattern, remaining_response):
|
|
230
|
+
cmd = match.group(1)
|
|
231
|
+
arg = match.group(2).strip()
|
|
232
|
+
|
|
233
|
+
if cmd == "read":
|
|
234
|
+
content = self.file_executor.read_file(arg)
|
|
235
|
+
feedback.append(f"Content of '{arg}':\n{content}")
|
|
236
|
+
elif cmd == "run":
|
|
237
|
+
success, output = self.command_executor.suggest_command(arg, description="AI suggestion")
|
|
238
|
+
if success:
|
|
239
|
+
feedback.append(f"Command '{arg}' executed.\nOutput:\n{output or '(No output)'}")
|
|
240
|
+
else:
|
|
241
|
+
feedback.append(f"Command '{arg}' failed or was rejected.")
|
|
242
|
+
|
|
243
|
+
return "\n".join(feedback)
|
|
244
|
+
|
|
245
|
+
def run_conversation_step(self, user_message: str):
|
|
246
|
+
"""Run a interaction turn and process any commands."""
|
|
247
|
+
|
|
248
|
+
# Add user response to history
|
|
249
|
+
self.messages.append({
|
|
250
|
+
"role": "user",
|
|
251
|
+
"content": user_message,
|
|
252
|
+
})
|
|
253
|
+
|
|
254
|
+
# Get streaming response
|
|
255
|
+
self.console.print("\n[cyan]🤖 Flame:[/cyan] ", end="", highlight=False)
|
|
256
|
+
|
|
257
|
+
full_response = ""
|
|
258
|
+
try:
|
|
259
|
+
for chunk in self.client.chat_stream(self.messages):
|
|
260
|
+
full_response += chunk
|
|
261
|
+
self.console.print(chunk, end="", highlight=False)
|
|
262
|
+
sys.stdout.flush()
|
|
263
|
+
except Exception as e:
|
|
264
|
+
self.console.print(f"\n[red]Error: {e}[/red]")
|
|
265
|
+
return
|
|
266
|
+
|
|
267
|
+
self.console.print("\n")
|
|
268
|
+
|
|
269
|
+
# Process commands and get feedback
|
|
270
|
+
feedback = self._process_assistant_commands(full_response)
|
|
271
|
+
|
|
272
|
+
# Add assistant response to history
|
|
273
|
+
self.messages.append({
|
|
274
|
+
"role": "assistant",
|
|
275
|
+
"content": full_response,
|
|
276
|
+
})
|
|
277
|
+
|
|
278
|
+
# If there's feedback, automatically start a new turn
|
|
279
|
+
if feedback:
|
|
280
|
+
self.console.print(f"\n[dim]🔄 Processing action results...[/dim]")
|
|
281
|
+
self.run_conversation_step(f"Action results:\n{feedback}")
|
|
282
|
+
|
|
283
|
+
def run(self):
|
|
284
|
+
"""Start the interactive REPL loop."""
|
|
285
|
+
self.print_welcome()
|
|
286
|
+
|
|
287
|
+
# Setup signal handlers for clean exit
|
|
288
|
+
def signal_handler(sig, frame):
|
|
289
|
+
self.console.print("\n[yellow]👋 Goodbye![/yellow]")
|
|
290
|
+
sys.exit(0)
|
|
291
|
+
|
|
292
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
293
|
+
|
|
294
|
+
try:
|
|
295
|
+
while True:
|
|
296
|
+
try:
|
|
297
|
+
# Get user input
|
|
298
|
+
user_input = self.prompt_session.prompt(
|
|
299
|
+
HTML("<cyan>You:</cyan> "),
|
|
300
|
+
multiline=False,
|
|
301
|
+
).strip()
|
|
302
|
+
|
|
303
|
+
if not user_input:
|
|
304
|
+
continue
|
|
305
|
+
|
|
306
|
+
# Handle special commands
|
|
307
|
+
processed_input = self.handle_command(user_input)
|
|
308
|
+
if processed_input is None:
|
|
309
|
+
continue
|
|
310
|
+
|
|
311
|
+
# Run interaction loop
|
|
312
|
+
self.run_conversation_step(processed_input)
|
|
313
|
+
|
|
314
|
+
except KeyboardInterrupt:
|
|
315
|
+
self.console.print("\n[yellow]👋 Goodbye![/yellow]")
|
|
316
|
+
break
|
|
317
|
+
except EOFError:
|
|
318
|
+
self.console.print("\n[yellow]👋 Goodbye![/yellow]")
|
|
319
|
+
break
|
|
320
|
+
|
|
321
|
+
except Exception as e:
|
|
322
|
+
self.console.print(f"[red]Unexpected error: {e}[/red]")
|
|
323
|
+
raise
|
|
324
|
+
|
flame/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
flame/api/__init__.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import httpx
|
|
3
|
+
from typing import AsyncIterator, Optional
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
|
|
6
|
+
class AsyncAPIClient:
|
|
7
|
+
def __init__(
|
|
8
|
+
self,
|
|
9
|
+
api_key: Optional[str] = None,
|
|
10
|
+
base_url: Optional[str] = None,
|
|
11
|
+
model: Optional[str] = None,
|
|
12
|
+
):
|
|
13
|
+
self.api_key = api_key or os.getenv("FLAME_API_KEY")
|
|
14
|
+
self.base_url = base_url or os.getenv("FLAME_API_BASE_URL", "https://api.example.com/proxy/v1")
|
|
15
|
+
self.model = model or os.getenv("FLAME_MODEL", "qwen/qwen3-32b")
|
|
16
|
+
|
|
17
|
+
if not self.api_key:
|
|
18
|
+
raise ValueError("FLAME_API_KEY not found.")
|
|
19
|
+
|
|
20
|
+
async def chat_stream(self, messages: list[dict]) -> AsyncIterator[str]:
|
|
21
|
+
headers = {
|
|
22
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
23
|
+
"Content-Type": "application/json"
|
|
24
|
+
}
|
|
25
|
+
payload = {
|
|
26
|
+
"model": self.model,
|
|
27
|
+
"messages": messages,
|
|
28
|
+
"stream": True
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
async with httpx.AsyncClient() as client:
|
|
32
|
+
async with client.stream("POST", f"{self.base_url}/chat/completions", json=payload, headers=headers) as response:
|
|
33
|
+
async for line in response.aiter_lines():
|
|
34
|
+
if line.startswith("data: "):
|
|
35
|
+
# Simple SSE parsing
|
|
36
|
+
content = line[6:]
|
|
37
|
+
if content == "[DONE]":
|
|
38
|
+
break
|
|
39
|
+
# In a real app, use json.loads(content) here
|
|
40
|
+
yield content
|
|
41
|
+
|
|
42
|
+
### Step 4: Update the Test
|
|
43
|
+
Now that we changed the structure, we need to update the test I created earlier.
|
flame/api/client.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
"""API wrapper for streaming and non-streaming responses."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import time
|
|
5
|
+
import random
|
|
6
|
+
from typing import Iterator, Optional, Any, Callable
|
|
7
|
+
from openrouter import OpenRouter
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class APIClient:
|
|
12
|
+
"""Wrapper around OpenRouter SDK for API."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
api_key: Optional[str] = None,
|
|
17
|
+
base_url: Optional[str] = None,
|
|
18
|
+
model: Optional[str] = None,
|
|
19
|
+
console: Optional[Console] = None,
|
|
20
|
+
):
|
|
21
|
+
"""Initialize the API client.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
api_key: API key for API (defaults to FLAME_API_KEY env var)
|
|
25
|
+
base_url: API base URL (defaults to FLAME_API_BASE_URL env var)
|
|
26
|
+
model: Model name (defaults to FLAME_MODEL env var)
|
|
27
|
+
console: Rich console for output (optional)
|
|
28
|
+
"""
|
|
29
|
+
self.api_key = api_key or os.getenv("FLAME_API_KEY")
|
|
30
|
+
self.base_url = base_url or os.getenv("FLAME_API_BASE_URL", "https://api.example.com/proxy/v1")
|
|
31
|
+
self.model = model or os.getenv("FLAME_MODEL", "google/gemini-3-flash-preview")
|
|
32
|
+
self.console = console or Console()
|
|
33
|
+
|
|
34
|
+
if not self.api_key:
|
|
35
|
+
raise ValueError(
|
|
36
|
+
"FLAME_API_KEY not found. Please set it in .env or pass it as an argument."
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
self.client = OpenRouter(
|
|
40
|
+
api_key=self.api_key,
|
|
41
|
+
server_url=self.base_url,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
def chat_stream(
|
|
45
|
+
self,
|
|
46
|
+
messages: list[dict],
|
|
47
|
+
temperature: float = 0.7,
|
|
48
|
+
max_tokens: int = 2048,
|
|
49
|
+
model: Optional[str] = None,
|
|
50
|
+
) -> Iterator[str]:
|
|
51
|
+
"""Stream chat response from API.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
messages: List of message dicts with 'role' and 'content'
|
|
55
|
+
temperature: Temperature for response generation
|
|
56
|
+
max_tokens: Maximum tokens in response
|
|
57
|
+
model: Model name to use (overrides default)
|
|
58
|
+
|
|
59
|
+
Yields:
|
|
60
|
+
Streamed response chunks as strings
|
|
61
|
+
"""
|
|
62
|
+
def _attempt():
|
|
63
|
+
response = self.client.chat.send(
|
|
64
|
+
model=model or self.model,
|
|
65
|
+
messages=messages,
|
|
66
|
+
stream=True,
|
|
67
|
+
)
|
|
68
|
+
for chunk in response:
|
|
69
|
+
if isinstance(chunk, dict):
|
|
70
|
+
if 'choices' in chunk and chunk['choices']:
|
|
71
|
+
delta = chunk['choices'][0].get('delta', {})
|
|
72
|
+
content = delta.get('content', '')
|
|
73
|
+
if content:
|
|
74
|
+
yield content
|
|
75
|
+
elif hasattr(chunk, 'choices') and chunk.choices:
|
|
76
|
+
delta = chunk.choices[0].delta
|
|
77
|
+
content = getattr(delta, "content", "")
|
|
78
|
+
if content:
|
|
79
|
+
yield content
|
|
80
|
+
|
|
81
|
+
return self._with_retry(_attempt)
|
|
82
|
+
|
|
83
|
+
def chat_complete(
|
|
84
|
+
self,
|
|
85
|
+
messages: list[dict],
|
|
86
|
+
temperature: float = 0.7,
|
|
87
|
+
max_tokens: int = 2048,
|
|
88
|
+
model: Optional[str] = None,
|
|
89
|
+
) -> str:
|
|
90
|
+
"""Get non-streaming chat response from API.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
messages: List of message dicts with 'role' and 'content'
|
|
94
|
+
temperature: Temperature for response generation
|
|
95
|
+
max_tokens: Maximum tokens in response
|
|
96
|
+
model: Model name to use (overrides default)
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Complete response text
|
|
100
|
+
"""
|
|
101
|
+
def _attempt():
|
|
102
|
+
response = self.client.chat.send(
|
|
103
|
+
model=model or self.model,
|
|
104
|
+
messages=messages,
|
|
105
|
+
stream=False,
|
|
106
|
+
)
|
|
107
|
+
# Handle list response or single object
|
|
108
|
+
if isinstance(response, list):
|
|
109
|
+
if response and hasattr(response[0], 'choices'):
|
|
110
|
+
return response[0].choices[0].message.content
|
|
111
|
+
return ""
|
|
112
|
+
|
|
113
|
+
if hasattr(response, 'choices'):
|
|
114
|
+
return response.choices[0].message.content
|
|
115
|
+
|
|
116
|
+
return str(response)
|
|
117
|
+
|
|
118
|
+
return self._with_retry(_attempt)
|
|
119
|
+
|
|
120
|
+
def _with_retry(self, func: Callable, max_retries: int = 3, base_delay: float = 1.0) -> Any:
|
|
121
|
+
"""Retry a function with exponential backoff."""
|
|
122
|
+
last_exception = None
|
|
123
|
+
for attempt in range(max_retries):
|
|
124
|
+
try:
|
|
125
|
+
return func()
|
|
126
|
+
except Exception as e:
|
|
127
|
+
last_exception = e
|
|
128
|
+
# Only retry on certain errors (simplified here to retry on most unless it's a clear config error)
|
|
129
|
+
if "Authentication" in str(e) or "401" in str(e):
|
|
130
|
+
raise
|
|
131
|
+
|
|
132
|
+
if attempt < max_retries - 1:
|
|
133
|
+
delay = base_delay * (2 ** attempt) + random.uniform(0, 0.1)
|
|
134
|
+
if self.console:
|
|
135
|
+
self.console.print(f"[yellow]⚠️ API error: {e}. Retrying in {delay:.1f}s... (Attempt {attempt + 1}/{max_retries})[/yellow]")
|
|
136
|
+
time.sleep(delay)
|
|
137
|
+
else:
|
|
138
|
+
if self.console:
|
|
139
|
+
self.console.print(f"[red]❌ Max retries reached. Last error: {e}[/red]")
|
|
140
|
+
raise last_exception
|
|
141
|
+
|
|
142
|
+
def validate_connection(self) -> bool:
|
|
143
|
+
"""Test connection to API.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
True if connection successful, False otherwise
|
|
147
|
+
"""
|
|
148
|
+
try:
|
|
149
|
+
response = self.chat_complete(
|
|
150
|
+
messages=[{"role": "user", "content": "say 'ok'"}],
|
|
151
|
+
max_tokens=10,
|
|
152
|
+
)
|
|
153
|
+
return bool(response)
|
|
154
|
+
except Exception:
|
|
155
|
+
return False
|
|
156
|
+
|
flame/cli/__init__.py
ADDED