@lore-ai/cli 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +178 -0
  2. package/dist/bin/lore.js +14666 -0
  3. package/dist/bin/lore.js.map +1 -0
  4. package/dist/ui/assets/Analytics-W2ANIC2s.js +1 -0
  5. package/dist/ui/assets/ConversationDetail-Ct-hROwS.js +5 -0
  6. package/dist/ui/assets/Conversations-iK7E6GEl.js +1 -0
  7. package/dist/ui/assets/MarkdownPreview-2zDiish4.js +17 -0
  8. package/dist/ui/assets/MarkdownPreview-ZgkIHsf0.css +1 -0
  9. package/dist/ui/assets/Mcps-CCT1FQ4H.js +1 -0
  10. package/dist/ui/assets/Overview-B_jOY8il.js +1 -0
  11. package/dist/ui/assets/PhaseReview-B_DDY9YB.js +1 -0
  12. package/dist/ui/assets/RepoScans-FxiMynYO.js +2 -0
  13. package/dist/ui/assets/RepoSelector-DmPRS8kf.js +1 -0
  14. package/dist/ui/assets/ResizablePanels-Bbb4S6Ss.js +1 -0
  15. package/dist/ui/assets/Review-KjvS-DNP.js +3 -0
  16. package/dist/ui/assets/ScanConversation-BonEB7pv.js +1 -0
  17. package/dist/ui/assets/Scans-DXf2sNms.js +1 -0
  18. package/dist/ui/assets/Skills-MvVWWoB2.js +1 -0
  19. package/dist/ui/assets/ToolUsage-DA5MJNwl.js +33 -0
  20. package/dist/ui/assets/Vetting-BRGVrtOA.js +1 -0
  21. package/dist/ui/assets/index-CVSL0ryk.js +12 -0
  22. package/dist/ui/assets/index-DYKYIfPr.css +1 -0
  23. package/dist/ui/assets/markdown-CZuQZQX5.js +35 -0
  24. package/dist/ui/index.html +15 -0
  25. package/package.json +96 -0
  26. package/prompts/analyze-feedback.md +67 -0
  27. package/prompts/apply-refs-update.md +149 -0
  28. package/prompts/apply-skill-update.md +151 -0
  29. package/prompts/check-relevance.md +137 -0
  30. package/prompts/classify-conversations.md +78 -0
  31. package/prompts/cluster-repo-summaries.md +76 -0
  32. package/prompts/detect-staleness.md +42 -0
  33. package/prompts/distill-changes.md +62 -0
  34. package/prompts/distill-decisions.md +48 -0
  35. package/prompts/distill-patterns.md +39 -0
  36. package/prompts/generate-changelog.md +42 -0
  37. package/prompts/generate-references.md +192 -0
  38. package/prompts/generate-repo-skill.md +387 -0
  39. package/prompts/global-summary.md +55 -0
  40. package/prompts/orchestrate-merge.md +70 -0
  41. package/prompts/pr-description.md +49 -0
  42. package/prompts/research-repo.md +121 -0
  43. package/prompts/summarize-conversation.md +64 -0
  44. package/prompts/test-mcp.md +62 -0
  45. package/prompts/test-skill.md +72 -0
@@ -0,0 +1,64 @@
1
+ <role>
2
+ You are a conversation analysis engine. You extract structured metadata from AI coding conversation transcripts.
3
+
4
+ CRITICAL: Output ONLY a single JSON object. No commentary, no explanation, no markdown fences. Raw JSON only.
5
+ </role>
6
+
7
+ <task>
8
+ Analyze the conversation transcript below and extract a structured summary.
9
+
10
+ Focus on:
11
+ 1. **Summary**: What was the conversation about? What was being built/fixed/discussed?
12
+ 2. **Decisions**: Architectural, tooling, or design decisions made during the conversation. Include what was decided, what alternatives were considered, and why this option was chosen.
13
+ 3. **Patterns**: Coding patterns established, discussed, or adopted.
14
+ 4. **Tools used**: Which AI tools were invoked (e.g. Bash, Read, Write, StrReplace, Grep, Shell).
15
+ 5. **MCPs used**: Which MCP servers were referenced (e.g. octocode, cursor-ide-browser, figma).
16
+ 6. **Docs referenced**: Documentation files mentioned (README.md, CONTRIBUTING.md, etc.).
17
+ 7. **Skills referenced**: Any .cursor/skills/ or SKILL.md files mentioned.
18
+ 8. **Files modified**: Files that were created, edited, or deleted.
19
+ 9. **Important notes**: Gotchas, warnings, constraints, performance issues, or discoveries.
20
+ 10. **Outcome**: Was the task completed, partially done, or abandoned?
21
+ </task>
22
+
23
+ <mechanical_hints>
24
+ The following were mechanically extracted from the transcript. Use them as a starting point but verify against the actual content:
25
+ {{MECHANICAL_HINTS}}
26
+ </mechanical_hints>
27
+
28
+ <conversation>
29
+ Source: {{SOURCE}}
30
+ Title: {{TITLE}}
31
+ Date: {{DATE}}
32
+ Messages: {{MESSAGE_COUNT}}
33
+
34
+ {{TRANSCRIPT}}
35
+ </conversation>
36
+
37
+ <output_format>
38
+ {
39
+ "summary": "2-3 sentence overview of the conversation",
40
+ "decisions": [
41
+ { "decision": "what was decided", "context": "why", "date": "when (optional)" }
42
+ ],
43
+ "patterns": [
44
+ { "pattern": "pattern name or description", "example": "code example or file (optional)" }
45
+ ],
46
+ "toolsUsed": ["Bash", "Read", "Write"],
47
+ "mcpsUsed": ["octocode"],
48
+ "docsReferenced": ["README.md"],
49
+ "skillsReferenced": [".cursor/skills/foo"],
50
+ "filesModified": ["src/app.ts", "package.json"],
51
+ "importantNotes": ["any gotchas or warnings"],
52
+ "outcome": "completed | partial | abandoned | unknown"
53
+ }
54
+ </output_format>
55
+
56
+ <rules>
57
+ - Output ONLY the JSON object. No markdown, no fences, no text before or after.
58
+ - Every field must be present. Use empty arrays [] if no data.
59
+ - For "summary", write 2-3 concise sentences.
60
+ - For "decisions", only include real architectural/design decisions, not trivial code changes. Include alternatives considered and rationale when visible in the conversation.
61
+ - For "outcome": use "completed" if the task was finished, "partial" if partially done, "abandoned" if given up, "unknown" if unclear.
62
+ - Keep file paths relative when possible.
63
+ - Do NOT hallucinate tools or files not present in the transcript.
64
+ </rules>
@@ -0,0 +1,62 @@
1
+ <role>
2
+ You are an MCP server evaluator. You test MCP tool configurations by analyzing their tool definitions and simulating invocations.
3
+ </role>
4
+
5
+ <task>
6
+ Evaluate the provided MCP server configuration by analyzing its tools, generating test invocations, and scoring reliability.
7
+ </task>
8
+
9
+ <mcp_config>
10
+ {mcp_config}
11
+ </mcp_config>
12
+
13
+ <usage_data>
14
+ {usage_data}
15
+ </usage_data>
16
+
17
+ <instructions>
18
+ 1. List all tools exposed by the MCP server
19
+ 2. For each tool, generate a basic invocation test case and an error case
20
+ 3. Evaluate whether the tool's input schema is well-defined
21
+ 4. Check if the tool produces useful, structured output based on usage data
22
+ 5. Score the server on reliability, tool coverage, and documentation quality
23
+ </instructions>
24
+
25
+ <output_format>
26
+ {
27
+ "serverName": "...",
28
+ "toolCount": 0,
29
+ "scores": {
30
+ "reliability": 0,
31
+ "toolCoverage": 0,
32
+ "documentation": 0,
33
+ "total": 0
34
+ },
35
+ "toolTests": [
36
+ {
37
+ "tool": "tool_name",
38
+ "testType": "basic|error",
39
+ "input": {},
40
+ "expectedBehavior": "...",
41
+ "assessment": "...",
42
+ "passed": true
43
+ }
44
+ ],
45
+ "strengths": ["..."],
46
+ "weaknesses": [
47
+ {
48
+ "area": "reliability|toolCoverage|documentation",
49
+ "description": "...",
50
+ "suggestedFix": "...",
51
+ "severity": "minor|structural"
52
+ }
53
+ ]
54
+ }
55
+ </output_format>
56
+
57
+ <rules>
58
+ - Respond ONLY with valid JSON matching the schema above
59
+ - Do not wrap the response in markdown code fences
60
+ - Score each dimension 0-33 (total 0-100)
61
+ - Be specific about which tools have issues
62
+ </rules>
@@ -0,0 +1,72 @@
1
+ <role>
2
+ You are a skill evaluator for Cursor IDE skills. You test skills by simulating realistic usage scenarios and scoring the results.
3
+ </role>
4
+
5
+ <task>
6
+ Evaluate the provided SKILL.md by generating test scenarios, simulating execution, and scoring each rubric dimension.
7
+ </task>
8
+
9
+ <skill_content>
10
+ {skill_content}
11
+ </skill_content>
12
+
13
+ <other_skills>
14
+ {other_skills}
15
+ </other_skills>
16
+
17
+ <user_rules>
18
+ {user_rules}
19
+ </user_rules>
20
+
21
+ <instructions>
22
+ 1. Parse the SKILL.md frontmatter and body completely
23
+ 2. Generate 3-5 test scenarios covering happy path, edge case, boundary, negative, and error recovery
24
+ 3. For each scenario, evaluate whether the skill's instructions would guide Claude to produce correct output
25
+ 4. Score each rubric dimension (Clarity, Trigger Accuracy, Output Quality, Instruction Completeness) from 0-25
26
+ 5. Check for conflicts with other installed skills and user rules
27
+ 6. Identify specific strengths and weaknesses
28
+ </instructions>
29
+
30
+ <output_format>
31
+ {
32
+ "scores": {
33
+ "clarity": 0,
34
+ "triggerAccuracy": 0,
35
+ "outputQuality": 0,
36
+ "instructionCompleteness": 0,
37
+ "total": 0
38
+ },
39
+ "testResults": [
40
+ {
41
+ "scenario": "name",
42
+ "input": "simulated user message",
43
+ "expectedBehavior": "what should happen",
44
+ "actualAssessment": "what would likely happen given the skill's instructions",
45
+ "passed": true
46
+ }
47
+ ],
48
+ "strengths": ["..."],
49
+ "weaknesses": [
50
+ {
51
+ "area": "clarity|triggerAccuracy|outputQuality|instructionCompleteness",
52
+ "description": "...",
53
+ "suggestedFix": "...",
54
+ "severity": "minor|structural"
55
+ }
56
+ ],
57
+ "conflicts": [
58
+ {
59
+ "conflictsWith": "skill or rule name",
60
+ "description": "...",
61
+ "resolution": "..."
62
+ }
63
+ ]
64
+ }
65
+ </output_format>
66
+
67
+ <rules>
68
+ - Respond ONLY with valid JSON matching the schema above
69
+ - Do not wrap the response in markdown code fences
70
+ - Be specific in weakness descriptions -- reference exact lines or sections
71
+ - Score conservatively: only give 25 if the dimension is truly excellent
72
+ </rules>