vibefast-cli 0.2.3 → 0.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.wrangler/state/v3/r2/miniflare-R2BucketObject/d1cc388a1a0ef44dd5669fd1a165d168b61362136c8b5fa50aefd96c72688e54.sqlite +0 -0
- package/.wrangler/state/v3/r2/miniflare-R2BucketObject/d1cc388a1a0ef44dd5669fd1a165d168b61362136c8b5fa50aefd96c72688e54.sqlite-shm +0 -0
- package/.wrangler/state/v3/r2/miniflare-R2BucketObject/d1cc388a1a0ef44dd5669fd1a165d168b61362136c8b5fa50aefd96c72688e54.sqlite-wal +0 -0
- package/.wrangler/state/v3/r2/vibefast-recipes/blobs/177b5d7279681c1bec396cafe63779a2d89eaf538109e55733147727276e2b9f0000019a81f04ba2 +0 -0
- package/.wrangler/state/v3/r2/vibefast-recipes/blobs/4fe398bba6e2d5f13b569bc1be4244e696d86caa04c323db2d9fb0b9381c508f0000019a81f0503f +0 -0
- package/.wrangler/state/v3/r2/vibefast-recipes/blobs/f68f19a655380ac7fb575eb49c0623cde74046261ed89c498ba5107b8aacde9d0000019a81f05484 +0 -0
- package/package.json +1 -1
- package/recipes/chatbot/packages/backend/convex/agents.ts +116 -0
- package/recipes/chatbot/packages/backend/convex/chatbotAgent.ts +1085 -0
- package/recipes/chatbot/packages/backend/convex/chatbotHistory.ts +307 -0
- package/recipes/chatbot/packages/backend/convex/lib/rateLimit.ts +100 -0
- package/recipes/chatbot/packages/backend/convex/lib/telemetry.ts +29 -0
- package/recipes/chatbot/packages/backend/convex/ragKnowledge.ts +714 -0
- package/recipes/chatbot/packages/backend/convex/tools/index.ts +18 -0
- package/recipes/chatbot/packages/backend/convex/tools/knowledgeRetrieval.ts +92 -0
- package/recipes/chatbot/packages/backend/convex/tools/tavilySearch.ts +83 -0
- package/recipes/chatbot/packages/backend/convex/tools/userProfile.ts +72 -0
- package/recipes/chatbot/recipe.json +89 -1
- package/recipes/chatbot@latest.zip +0 -0
- package/recipes/image-generator/packages/backend/convex/imageGeneration/index.ts +12 -0
- package/recipes/image-generator/packages/backend/convex/imageGeneratorFunctions.ts +290 -0
- package/recipes/image-generator/recipe.json +41 -1
- package/recipes/image-generator@latest.zip +0 -0
- package/recipes/voice-bot/packages/backend/convex/router.ts +81 -0
- package/recipes/voice-bot/recipe.json +48 -1
- package/recipes/voice-bot@latest.zip +0 -0
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
package/package.json
CHANGED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
2
|
+
import { openai } from '@ai-sdk/openai';
|
|
3
|
+
import type { LanguageModelV1 } from '@ai-sdk/provider';
|
|
4
|
+
import { Agent } from '@convex-dev/agent';
|
|
5
|
+
|
|
6
|
+
import { components } from './_generated/api';
|
|
7
|
+
import {
|
|
8
|
+
knowledgeRetrievalTool,
|
|
9
|
+
tavilySearchTool,
|
|
10
|
+
userProfileTool,
|
|
11
|
+
} from './tools';
|
|
12
|
+
|
|
13
|
+
const google = createGoogleGenerativeAI();
|
|
14
|
+
|
|
15
|
+
type ModelProvider = 'openai' | 'google';
|
|
16
|
+
|
|
17
|
+
type ModelResolver = () => {
|
|
18
|
+
model: LanguageModelV1;
|
|
19
|
+
provider: ModelProvider;
|
|
20
|
+
modelId: string;
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
const PROVIDER_CHAT_FACTORIES: Record<
|
|
24
|
+
ModelProvider,
|
|
25
|
+
(modelId: string) => LanguageModelV1
|
|
26
|
+
> = {
|
|
27
|
+
openai: (modelId) => openai.chat(modelId),
|
|
28
|
+
google: (modelId) => google.chat(modelId),
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
const MODEL_CONFIGS = {
|
|
32
|
+
'gpt-4.1-nano': { provider: 'openai', modelId: 'gpt-4.1-nano' },
|
|
33
|
+
'gpt-5': { provider: 'openai', modelId: 'gpt-5' },
|
|
34
|
+
'gpt-5-nano': { provider: 'openai', modelId: 'gpt-5-nano' },
|
|
35
|
+
'gpt-4o-mini': { provider: 'openai', modelId: 'gpt-4o-mini' },
|
|
36
|
+
'gpt-4o': { provider: 'openai', modelId: 'gpt-4o' },
|
|
37
|
+
'gemini-2.0-flash': {
|
|
38
|
+
provider: 'google',
|
|
39
|
+
modelId: 'models/gemini-2.0-flash',
|
|
40
|
+
},
|
|
41
|
+
'gemini-1.5-flash': {
|
|
42
|
+
provider: 'google',
|
|
43
|
+
modelId: 'models/gemini-1.5-flash',
|
|
44
|
+
},
|
|
45
|
+
'gemini-1.5-pro': { provider: 'google', modelId: 'models/gemini-1.5-pro' },
|
|
46
|
+
'gemini-2.5-flash': {
|
|
47
|
+
provider: 'google',
|
|
48
|
+
modelId: 'models/gemini-2.5-flash-preview-05-20',
|
|
49
|
+
},
|
|
50
|
+
'gemini-2.5-pro': {
|
|
51
|
+
provider: 'google',
|
|
52
|
+
modelId: 'models/gemini-2.5-pro-preview-06-05',
|
|
53
|
+
},
|
|
54
|
+
} satisfies Record<string, { provider: ModelProvider; modelId: string }>;
|
|
55
|
+
|
|
56
|
+
const MODEL_RESOLVERS = Object.fromEntries(
|
|
57
|
+
Object.entries(MODEL_CONFIGS).map(([key, config]) => [
|
|
58
|
+
key,
|
|
59
|
+
() => ({
|
|
60
|
+
model: PROVIDER_CHAT_FACTORIES[config.provider](config.modelId),
|
|
61
|
+
provider: config.provider,
|
|
62
|
+
modelId: config.modelId,
|
|
63
|
+
}),
|
|
64
|
+
]),
|
|
65
|
+
) as Record<string, ModelResolver>;
|
|
66
|
+
|
|
67
|
+
const DEFAULT_MODEL: keyof typeof MODEL_CONFIGS = 'gpt-4.1-nano';
|
|
68
|
+
|
|
69
|
+
export function resolveChatModel(preferredModel?: string) {
|
|
70
|
+
const modelKey =
|
|
71
|
+
preferredModel && MODEL_RESOLVERS[preferredModel]
|
|
72
|
+
? preferredModel
|
|
73
|
+
: DEFAULT_MODEL;
|
|
74
|
+
|
|
75
|
+
const resolver = MODEL_RESOLVERS[modelKey] ?? MODEL_RESOLVERS[DEFAULT_MODEL];
|
|
76
|
+
|
|
77
|
+
try {
|
|
78
|
+
return resolver();
|
|
79
|
+
} catch (error) {
|
|
80
|
+
if (modelKey !== DEFAULT_MODEL) {
|
|
81
|
+
return MODEL_RESOLVERS[DEFAULT_MODEL]();
|
|
82
|
+
}
|
|
83
|
+
throw error;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
export const agent: Agent = new Agent(components.agent, {
|
|
88
|
+
name: 'VibeFast Assistant',
|
|
89
|
+
chat: MODEL_RESOLVERS[DEFAULT_MODEL]().model,
|
|
90
|
+
instructions: `
|
|
91
|
+
You are VibeFast, a proactive mobile AI assistant. Produce concise, friendly responses with clear structure.
|
|
92
|
+
|
|
93
|
+
- Use markdown for readability (lists, bold keywords, inline code where appropriate).
|
|
94
|
+
- When a question depends on current or niche information, call the tavilySearch tool first and cite the findings.
|
|
95
|
+
- When the user asks about their account, credits, or profile details, call the userProfile tool before answering.
|
|
96
|
+
- When the user references uploaded files or knowledge base material, call the knowledgeRetrieval tool with relevant filters to ground your answer.
|
|
97
|
+
- For image attachments, describe what you see before reasoning about it.
|
|
98
|
+
- If a user asks for something you cannot do, gracefully explain the limitation and offer an alternative.
|
|
99
|
+
- Keep responses focused on the user's goal; avoid unnecessary chatter.
|
|
100
|
+
`.trim(),
|
|
101
|
+
tools: {
|
|
102
|
+
knowledgeRetrieval: knowledgeRetrievalTool,
|
|
103
|
+
tavilySearch: tavilySearchTool,
|
|
104
|
+
userProfile: userProfileTool,
|
|
105
|
+
},
|
|
106
|
+
maxSteps: 6,
|
|
107
|
+
contextOptions: {
|
|
108
|
+
recentMessages: 50,
|
|
109
|
+
},
|
|
110
|
+
storageOptions: {
|
|
111
|
+
saveMessages: 'promptAndOutput',
|
|
112
|
+
},
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
export const SUPPORTED_MODEL_KEYS = Object.keys(MODEL_RESOLVERS);
|
|
116
|
+
export { DEFAULT_MODEL };
|