empathy-framework 4.1.1__py3-none-any.whl → 4.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. {empathy_framework-4.1.1.dist-info → empathy_framework-4.4.0.dist-info}/METADATA +77 -12
  2. {empathy_framework-4.1.1.dist-info → empathy_framework-4.4.0.dist-info}/RECORD +45 -14
  3. empathy_os/cli_unified.py +13 -0
  4. empathy_os/memory/long_term.py +5 -0
  5. empathy_os/memory/unified.py +149 -9
  6. empathy_os/meta_workflows/__init__.py +74 -0
  7. empathy_os/meta_workflows/agent_creator.py +254 -0
  8. empathy_os/meta_workflows/builtin_templates.py +567 -0
  9. empathy_os/meta_workflows/cli_meta_workflows.py +1551 -0
  10. empathy_os/meta_workflows/form_engine.py +304 -0
  11. empathy_os/meta_workflows/intent_detector.py +298 -0
  12. empathy_os/meta_workflows/models.py +567 -0
  13. empathy_os/meta_workflows/pattern_learner.py +754 -0
  14. empathy_os/meta_workflows/session_context.py +398 -0
  15. empathy_os/meta_workflows/template_registry.py +229 -0
  16. empathy_os/meta_workflows/workflow.py +980 -0
  17. empathy_os/orchestration/execution_strategies.py +888 -1
  18. empathy_os/orchestration/pattern_learner.py +699 -0
  19. empathy_os/socratic/__init__.py +273 -0
  20. empathy_os/socratic/ab_testing.py +969 -0
  21. empathy_os/socratic/blueprint.py +532 -0
  22. empathy_os/socratic/cli.py +689 -0
  23. empathy_os/socratic/collaboration.py +1112 -0
  24. empathy_os/socratic/domain_templates.py +916 -0
  25. empathy_os/socratic/embeddings.py +734 -0
  26. empathy_os/socratic/engine.py +729 -0
  27. empathy_os/socratic/explainer.py +663 -0
  28. empathy_os/socratic/feedback.py +767 -0
  29. empathy_os/socratic/forms.py +624 -0
  30. empathy_os/socratic/generator.py +716 -0
  31. empathy_os/socratic/llm_analyzer.py +635 -0
  32. empathy_os/socratic/mcp_server.py +751 -0
  33. empathy_os/socratic/session.py +306 -0
  34. empathy_os/socratic/storage.py +635 -0
  35. empathy_os/socratic/success.py +719 -0
  36. empathy_os/socratic/visual_editor.py +812 -0
  37. empathy_os/socratic/web_ui.py +925 -0
  38. empathy_os/workflows/manage_documentation.py +18 -2
  39. empathy_os/workflows/release_prep_crew.py +16 -1
  40. empathy_os/workflows/test_coverage_boost_crew.py +16 -1
  41. empathy_os/workflows/test_maintenance_crew.py +18 -1
  42. {empathy_framework-4.1.1.dist-info → empathy_framework-4.4.0.dist-info}/WHEEL +0 -0
  43. {empathy_framework-4.1.1.dist-info → empathy_framework-4.4.0.dist-info}/entry_points.txt +0 -0
  44. {empathy_framework-4.1.1.dist-info → empathy_framework-4.4.0.dist-info}/licenses/LICENSE +0 -0
  45. {empathy_framework-4.1.1.dist-info → empathy_framework-4.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,635 @@
1
+ """LLM-Powered Goal Analysis
2
+
3
+ Uses LLM calls to provide sophisticated goal understanding, ambiguity detection,
4
+ and intelligent question generation.
5
+
6
+ Copyright 2026 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import json
13
+ import logging
14
+ import re
15
+ from dataclasses import dataclass, field
16
+ from typing import Any
17
+
18
+ from .forms import (
19
+ FieldOption,
20
+ FieldType,
21
+ FieldValidation,
22
+ Form,
23
+ FormField,
24
+ )
25
+ from .session import SocraticSession
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ # =============================================================================
31
+ # LLM PROMPTS
32
+ # =============================================================================
33
+
34
+
35
+ GOAL_ANALYSIS_PROMPT = """You are an expert at understanding user requirements for software development workflows.
36
+
37
+ Analyze the following goal statement and extract structured information:
38
+
39
+ <goal>
40
+ {goal}
41
+ </goal>
42
+
43
+ Provide your analysis in the following JSON format:
44
+ {{
45
+ "intent": "A clear, concise statement of what the user wants to achieve",
46
+ "domain": "One of: code_review, security, testing, documentation, performance, refactoring, general",
47
+ "confidence": 0.0 to 1.0 indicating how well you understand the goal,
48
+ "ambiguities": ["List of unclear aspects that need clarification"],
49
+ "assumptions": ["List of assumptions you're making about their needs"],
50
+ "constraints": ["Any constraints mentioned or implied"],
51
+ "keywords": ["Important technical keywords extracted"],
52
+ "suggested_agents": ["List of agent types that would help: security_reviewer, code_quality_reviewer, performance_analyzer, test_generator, documentation_writer, style_enforcer"],
53
+ "suggested_questions": [
54
+ {{
55
+ "id": "unique_id",
56
+ "question": "The clarifying question to ask",
57
+ "type": "single_select or multi_select or text",
58
+ "options": ["option1", "option2"] // for select types
59
+ }}
60
+ ]
61
+ }}
62
+
63
+ Focus on:
64
+ 1. Identifying the core intent behind potentially vague statements
65
+ 2. Detecting missing information that would affect implementation
66
+ 3. Suggesting specific questions that would help refine the requirements
67
+ 4. Recommending appropriate agent types for the task
68
+
69
+ Respond ONLY with valid JSON, no additional text."""
70
+
71
+
72
+ QUESTION_REFINEMENT_PROMPT = """Based on the user's goal and their previous answers, generate the next round of clarifying questions.
73
+
74
+ <goal>
75
+ {goal}
76
+ </goal>
77
+
78
+ <previous_answers>
79
+ {previous_answers}
80
+ </previous_answers>
81
+
82
+ <current_requirements>
83
+ {requirements}
84
+ </current_requirements>
85
+
86
+ <remaining_ambiguities>
87
+ {ambiguities}
88
+ </remaining_ambiguities>
89
+
90
+ Generate 2-4 focused questions that will help clarify the remaining ambiguities.
91
+ Prioritize questions that will have the biggest impact on the workflow design.
92
+
93
+ Respond in JSON format:
94
+ {{
95
+ "questions": [
96
+ {{
97
+ "id": "unique_id",
98
+ "question": "The clarifying question",
99
+ "type": "single_select or multi_select or text or boolean",
100
+ "options": ["option1", "option2"],
101
+ "category": "technical or quality or scope or preferences",
102
+ "priority": 1 to 5 (5 being most important)
103
+ }}
104
+ ],
105
+ "confidence_after_answers": 0.0 to 1.0,
106
+ "ready_to_generate": true/false,
107
+ "reasoning": "Brief explanation of why these questions are important"
108
+ }}
109
+
110
+ Respond ONLY with valid JSON."""
111
+
112
+
113
+ AGENT_RECOMMENDATION_PROMPT = """Based on the user's requirements, recommend the optimal agent configuration.
114
+
115
+ <goal>
116
+ {goal}
117
+ </goal>
118
+
119
+ <requirements>
120
+ {requirements}
121
+ </requirements>
122
+
123
+ <available_agent_templates>
124
+ - security_reviewer: Security vulnerability detection, OWASP expertise
125
+ - code_quality_reviewer: Code quality, maintainability, best practices
126
+ - performance_analyzer: Performance bottlenecks, optimization opportunities
127
+ - test_generator: Unit test generation, coverage improvement
128
+ - documentation_writer: Documentation, docstrings, README generation
129
+ - style_enforcer: Code style, formatting, conventions
130
+ - result_synthesizer: Aggregates and reports findings from other agents
131
+ </available_agent_templates>
132
+
133
+ Recommend agents and their configuration:
134
+ {{
135
+ "agents": [
136
+ {{
137
+ "template_id": "security_reviewer",
138
+ "priority": 1,
139
+ "customizations": {{
140
+ "focus_areas": ["injection", "auth"],
141
+ "model_tier": "capable"
142
+ }},
143
+ "reasoning": "Why this agent is needed"
144
+ }}
145
+ ],
146
+ "workflow_stages": [
147
+ {{
148
+ "name": "Analysis",
149
+ "agents": ["security_reviewer", "code_quality_reviewer"],
150
+ "parallel": true
151
+ }},
152
+ {{
153
+ "name": "Synthesis",
154
+ "agents": ["result_synthesizer"],
155
+ "parallel": false
156
+ }}
157
+ ],
158
+ "estimated_cost_tier": "cheap or moderate or expensive",
159
+ "estimated_duration": "fast (<1min) or moderate (1-5min) or slow (>5min)"
160
+ }}
161
+
162
+ Respond ONLY with valid JSON."""
163
+
164
+
165
+ # =============================================================================
166
+ # LLM ANALYZER
167
+ # =============================================================================
168
+
169
+
170
+ @dataclass
171
+ class LLMAnalysisResult:
172
+ """Result from LLM goal analysis."""
173
+
174
+ intent: str
175
+ domain: str
176
+ confidence: float
177
+ ambiguities: list[str]
178
+ assumptions: list[str]
179
+ constraints: list[str]
180
+ keywords: list[str]
181
+ suggested_agents: list[str]
182
+ suggested_questions: list[dict[str, Any]]
183
+ raw_response: str = ""
184
+ secondary_domains: list[str] = field(default_factory=list)
185
+ detected_requirements: list[str] = field(default_factory=list)
186
+
187
+ @property
188
+ def primary_domain(self) -> str:
189
+ """Alias for domain (for MCP server compatibility)."""
190
+ return self.domain
191
+
192
+
193
+ @dataclass
194
+ class LLMQuestionResult:
195
+ """Result from LLM question generation."""
196
+
197
+ questions: list[dict[str, Any]]
198
+ confidence_after_answers: float
199
+ ready_to_generate: bool
200
+ reasoning: str
201
+
202
+
203
+ @dataclass
204
+ class LLMAgentRecommendation:
205
+ """Result from LLM agent recommendation."""
206
+
207
+ agents: list[dict[str, Any]]
208
+ workflow_stages: list[dict[str, Any]]
209
+ estimated_cost_tier: str
210
+ estimated_duration: str
211
+
212
+
213
+ class LLMGoalAnalyzer:
214
+ """Uses LLM to analyze goals and generate questions.
215
+
216
+ Supports two modes:
217
+ 1. Direct Anthropic API (preferred when api_key is provided)
218
+ 2. EmpathyLLMExecutor integration (fallback)
219
+
220
+ Example:
221
+ >>> analyzer = LLMGoalAnalyzer(api_key="sk-...")
222
+ >>> result = await analyzer.analyze_goal("I want to automate code reviews")
223
+ >>> print(result.domain) # "code_review"
224
+ >>> print(result.suggested_questions)
225
+ """
226
+
227
+ # Model selection by tier
228
+ MODELS = {
229
+ "cheap": "claude-3-5-haiku-20241022",
230
+ "capable": "claude-sonnet-4-5-20250514",
231
+ "premium": "claude-opus-4-5-20251101",
232
+ }
233
+
234
+ def __init__(
235
+ self,
236
+ api_key: str | None = None,
237
+ provider: str = "anthropic",
238
+ model_tier: str = "capable",
239
+ ):
240
+ """Initialize the analyzer.
241
+
242
+ Args:
243
+ api_key: Anthropic API key (enables direct API access)
244
+ provider: LLM provider to use (for executor mode)
245
+ model_tier: Model tier (cheap, capable, premium)
246
+ """
247
+ import os
248
+ self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
249
+ self.provider = provider
250
+ self.model_tier = model_tier
251
+ self._client = None
252
+ self._executor = None
253
+
254
+ def _get_client(self):
255
+ """Lazy-load the Anthropic client for direct API access."""
256
+ if self._client is None and self.api_key:
257
+ try:
258
+ import anthropic
259
+ self._client = anthropic.Anthropic(api_key=self.api_key)
260
+ except ImportError:
261
+ logger.warning("anthropic package not installed")
262
+ return self._client
263
+
264
+ async def _get_executor(self):
265
+ """Get or create LLM executor (fallback mode)."""
266
+ if self._executor is None:
267
+ try:
268
+ from ..models.empathy_executor import EmpathyLLMExecutor
269
+ self._executor = EmpathyLLMExecutor(provider=self.provider)
270
+ except ImportError:
271
+ logger.warning("EmpathyLLMExecutor not available, using mock")
272
+ self._executor = MockLLMExecutor()
273
+ return self._executor
274
+
275
+ async def _call_llm(
276
+ self,
277
+ prompt: str,
278
+ system: str,
279
+ max_tokens: int = 2000,
280
+ ) -> str:
281
+ """Call LLM using direct API or executor.
282
+
283
+ Args:
284
+ prompt: User prompt
285
+ system: System prompt
286
+ max_tokens: Maximum tokens in response
287
+
288
+ Returns:
289
+ Response content as string
290
+ """
291
+ # Try direct Anthropic API first (preferred)
292
+ client = self._get_client()
293
+ if client:
294
+ try:
295
+ model = self.MODELS.get(self.model_tier, self.MODELS["capable"])
296
+ response = client.messages.create(
297
+ model=model,
298
+ max_tokens=max_tokens,
299
+ system=system,
300
+ messages=[{"role": "user", "content": prompt}],
301
+ )
302
+ return response.content[0].text if response.content else "{}"
303
+ except Exception as e:
304
+ logger.warning(f"Direct API call failed: {e}")
305
+
306
+ # Fall back to executor
307
+ executor = await self._get_executor()
308
+ response = await executor.run(
309
+ task_type="analysis",
310
+ prompt=prompt,
311
+ system=system,
312
+ )
313
+ return response.content if hasattr(response, 'content') else str(response)
314
+
315
+ async def analyze_goal(self, goal: str) -> LLMAnalysisResult:
316
+ """Analyze a goal using LLM.
317
+
318
+ Args:
319
+ goal: The user's goal statement
320
+
321
+ Returns:
322
+ LLMAnalysisResult with structured analysis
323
+ """
324
+ prompt = GOAL_ANALYSIS_PROMPT.format(goal=goal)
325
+ system = "You are an expert requirements analyst. Respond only with valid JSON."
326
+
327
+ try:
328
+ content = await self._call_llm(prompt, system)
329
+ data = self._parse_json_response(content)
330
+
331
+ return LLMAnalysisResult(
332
+ intent=data.get("intent", ""),
333
+ domain=data.get("domain", "general"),
334
+ confidence=float(data.get("confidence", 0.5)),
335
+ ambiguities=data.get("ambiguities", []),
336
+ assumptions=data.get("assumptions", []),
337
+ constraints=data.get("constraints", []),
338
+ keywords=data.get("keywords", []),
339
+ suggested_agents=data.get("suggested_agents", []),
340
+ suggested_questions=data.get("suggested_questions", []),
341
+ raw_response=content,
342
+ )
343
+
344
+ except Exception as e:
345
+ logger.warning(f"LLM analysis failed, using fallback: {e}")
346
+ return self._fallback_analysis(goal)
347
+
348
+ async def generate_questions(
349
+ self,
350
+ session: SocraticSession,
351
+ ) -> LLMQuestionResult:
352
+ """Generate follow-up questions using LLM.
353
+
354
+ Args:
355
+ session: Current Socratic session
356
+
357
+ Returns:
358
+ LLMQuestionResult with questions
359
+ """
360
+ # Gather context
361
+ previous_answers = {}
362
+ for round_data in session.question_rounds:
363
+ previous_answers.update(round_data.get("answers", {}))
364
+
365
+ requirements = {
366
+ "must_have": session.requirements.must_have,
367
+ "technical": session.requirements.technical_constraints,
368
+ "quality": session.requirements.quality_attributes,
369
+ }
370
+
371
+ ambiguities = []
372
+ if session.goal_analysis:
373
+ ambiguities = session.goal_analysis.ambiguities
374
+
375
+ prompt = QUESTION_REFINEMENT_PROMPT.format(
376
+ goal=session.goal,
377
+ previous_answers=json.dumps(previous_answers, indent=2),
378
+ requirements=json.dumps(requirements, indent=2),
379
+ ambiguities=json.dumps(ambiguities, indent=2),
380
+ )
381
+ system = "You are an expert at gathering requirements. Respond only with valid JSON."
382
+
383
+ try:
384
+ content = await self._call_llm(prompt, system)
385
+ data = self._parse_json_response(content)
386
+
387
+ return LLMQuestionResult(
388
+ questions=data.get("questions", []),
389
+ confidence_after_answers=float(data.get("confidence_after_answers", 0.7)),
390
+ ready_to_generate=data.get("ready_to_generate", False),
391
+ reasoning=data.get("reasoning", ""),
392
+ )
393
+
394
+ except Exception as e:
395
+ logger.warning(f"LLM question generation failed: {e}")
396
+ return LLMQuestionResult(
397
+ questions=[],
398
+ confidence_after_answers=0.5,
399
+ ready_to_generate=False,
400
+ reasoning="Fallback due to LLM error",
401
+ )
402
+
403
+ async def recommend_agents(
404
+ self,
405
+ session: SocraticSession,
406
+ ) -> LLMAgentRecommendation:
407
+ """Get agent recommendations using LLM.
408
+
409
+ Args:
410
+ session: Current Socratic session
411
+
412
+ Returns:
413
+ LLMAgentRecommendation with agent configuration
414
+ """
415
+ requirements = {
416
+ "must_have": session.requirements.must_have,
417
+ "technical": session.requirements.technical_constraints,
418
+ "quality": session.requirements.quality_attributes,
419
+ "preferences": session.requirements.preferences,
420
+ "domain_specific": session.requirements.domain_specific,
421
+ }
422
+
423
+ prompt = AGENT_RECOMMENDATION_PROMPT.format(
424
+ goal=session.goal,
425
+ requirements=json.dumps(requirements, indent=2),
426
+ )
427
+ system = "You are an expert at designing agent workflows. Respond only with valid JSON."
428
+
429
+ try:
430
+ content = await self._call_llm(prompt, system)
431
+ data = self._parse_json_response(content)
432
+
433
+ return LLMAgentRecommendation(
434
+ agents=data.get("agents", []),
435
+ workflow_stages=data.get("workflow_stages", []),
436
+ estimated_cost_tier=data.get("estimated_cost_tier", "moderate"),
437
+ estimated_duration=data.get("estimated_duration", "moderate"),
438
+ )
439
+
440
+ except Exception as e:
441
+ logger.warning(f"LLM agent recommendation failed: {e}")
442
+ return self._fallback_agent_recommendation(session)
443
+
444
+ def _parse_json_response(self, content: str) -> dict[str, Any]:
445
+ """Parse JSON from LLM response, handling common issues."""
446
+ # Try direct parse
447
+ try:
448
+ return json.loads(content)
449
+ except json.JSONDecodeError:
450
+ pass
451
+
452
+ # Try to extract JSON from markdown code block
453
+ json_match = re.search(r"```(?:json)?\s*([\s\S]*?)```", content)
454
+ if json_match:
455
+ try:
456
+ return json.loads(json_match.group(1))
457
+ except json.JSONDecodeError:
458
+ pass
459
+
460
+ # Try to find JSON object in content
461
+ json_match = re.search(r"\{[\s\S]*\}", content)
462
+ if json_match:
463
+ try:
464
+ return json.loads(json_match.group(0))
465
+ except json.JSONDecodeError:
466
+ pass
467
+
468
+ logger.warning(f"Could not parse JSON from: {content[:200]}")
469
+ return {}
470
+
471
+ def _fallback_analysis(self, goal: str) -> LLMAnalysisResult:
472
+ """Fallback analysis when LLM is unavailable."""
473
+ from .engine import (
474
+ detect_domain,
475
+ extract_keywords,
476
+ identify_ambiguities,
477
+ identify_assumptions,
478
+ )
479
+
480
+ domain, confidence = detect_domain(goal)
481
+ keywords = extract_keywords(goal)
482
+ ambiguities = identify_ambiguities(goal, domain)
483
+ assumptions = identify_assumptions(goal, domain)
484
+
485
+ # Map domain to suggested agents
486
+ domain_agents = {
487
+ "code_review": ["code_quality_reviewer", "result_synthesizer"],
488
+ "security": ["security_reviewer", "result_synthesizer"],
489
+ "testing": ["test_generator", "result_synthesizer"],
490
+ "documentation": ["documentation_writer"],
491
+ "performance": ["performance_analyzer", "result_synthesizer"],
492
+ "refactoring": ["code_quality_reviewer", "result_synthesizer"],
493
+ "general": ["code_quality_reviewer"],
494
+ }
495
+
496
+ return LLMAnalysisResult(
497
+ intent=f"Automated {domain.replace('_', ' ')}",
498
+ domain=domain,
499
+ confidence=confidence,
500
+ ambiguities=ambiguities,
501
+ assumptions=assumptions,
502
+ constraints=[],
503
+ keywords=keywords,
504
+ suggested_agents=domain_agents.get(domain, ["code_quality_reviewer"]),
505
+ suggested_questions=[],
506
+ )
507
+
508
+ def _fallback_agent_recommendation(
509
+ self,
510
+ session: SocraticSession,
511
+ ) -> LLMAgentRecommendation:
512
+ """Fallback agent recommendation."""
513
+ domain = session.goal_analysis.domain if session.goal_analysis else "general"
514
+
515
+ # Default configurations by domain
516
+ configs = {
517
+ "security": {
518
+ "agents": [
519
+ {"template_id": "security_reviewer", "priority": 1},
520
+ {"template_id": "result_synthesizer", "priority": 2},
521
+ ],
522
+ "stages": [
523
+ {"name": "Analysis", "agents": ["security_reviewer"], "parallel": False},
524
+ {"name": "Synthesis", "agents": ["result_synthesizer"], "parallel": False},
525
+ ],
526
+ },
527
+ "code_review": {
528
+ "agents": [
529
+ {"template_id": "code_quality_reviewer", "priority": 1},
530
+ {"template_id": "result_synthesizer", "priority": 2},
531
+ ],
532
+ "stages": [
533
+ {"name": "Analysis", "agents": ["code_quality_reviewer"], "parallel": False},
534
+ {"name": "Synthesis", "agents": ["result_synthesizer"], "parallel": False},
535
+ ],
536
+ },
537
+ "testing": {
538
+ "agents": [
539
+ {"template_id": "test_generator", "priority": 1},
540
+ ],
541
+ "stages": [
542
+ {"name": "Generation", "agents": ["test_generator"], "parallel": False},
543
+ ],
544
+ },
545
+ }
546
+
547
+ config = configs.get(domain, configs["code_review"])
548
+
549
+ return LLMAgentRecommendation(
550
+ agents=config["agents"],
551
+ workflow_stages=config["stages"],
552
+ estimated_cost_tier="moderate",
553
+ estimated_duration="moderate",
554
+ )
555
+
556
+
557
+ class MockLLMExecutor:
558
+ """Mock executor for testing without LLM."""
559
+
560
+ async def run(self, **kwargs) -> Any:
561
+ """Return mock response."""
562
+ @dataclass
563
+ class MockResponse:
564
+ content: str = "{}"
565
+
566
+ return MockResponse()
567
+
568
+
569
+ def llm_questions_to_form(
570
+ questions: list[dict[str, Any]],
571
+ round_number: int,
572
+ session: SocraticSession,
573
+ ) -> Form:
574
+ """Convert LLM-generated questions to a Form.
575
+
576
+ Args:
577
+ questions: Questions from LLM
578
+ round_number: Current round number
579
+ session: Current session
580
+
581
+ Returns:
582
+ Form ready for display
583
+ """
584
+ fields = []
585
+
586
+ for q in questions:
587
+ q_id = q.get("id", f"q_{len(fields)}")
588
+ q_type = q.get("type", "text")
589
+ q_options = q.get("options", [])
590
+
591
+ # Map type to FieldType
592
+ field_type_map = {
593
+ "single_select": FieldType.SINGLE_SELECT,
594
+ "multi_select": FieldType.MULTI_SELECT,
595
+ "text": FieldType.TEXT,
596
+ "text_area": FieldType.TEXT_AREA,
597
+ "boolean": FieldType.BOOLEAN,
598
+ }
599
+ field_type = field_type_map.get(q_type, FieldType.TEXT)
600
+
601
+ # Build options
602
+ options = []
603
+ for opt in q_options:
604
+ if isinstance(opt, str):
605
+ options.append(FieldOption(value=opt.lower().replace(" ", "_"), label=opt))
606
+ elif isinstance(opt, dict):
607
+ options.append(FieldOption(
608
+ value=opt.get("value", opt.get("label", "").lower().replace(" ", "_")),
609
+ label=opt.get("label", ""),
610
+ description=opt.get("description", ""),
611
+ ))
612
+
613
+ fields.append(FormField(
614
+ id=q_id,
615
+ field_type=field_type,
616
+ label=q.get("question", ""),
617
+ options=options,
618
+ validation=FieldValidation(required=q.get("required", False)),
619
+ category=q.get("category", "general"),
620
+ order=q.get("priority", 5),
621
+ ))
622
+
623
+ # Sort by priority (higher priority = lower order number = appears first)
624
+ fields.sort(key=lambda f: f.order, reverse=True)
625
+
626
+ progress = min(0.3 + (round_number * 0.15), 0.9)
627
+
628
+ return Form(
629
+ id=f"llm_round_{round_number}",
630
+ title="A Few More Questions",
631
+ description=f"Help us understand your needs better. (Round {round_number})",
632
+ fields=fields,
633
+ round_number=round_number,
634
+ progress=progress,
635
+ )