khoj 1.30.11.dev55__py3-none-any.whl → 1.30.11.dev62__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- khoj/configure.py +3 -3
- khoj/database/adapters/__init__.py +15 -15
- khoj/database/admin.py +8 -7
- khoj/database/migrations/0076_rename_openaiprocessorconversationconfig_aimodelapi_and_more.py +26 -0
- khoj/database/models/__init__.py +32 -12
- khoj/interface/compiled/404/index.html +1 -1
- khoj/interface/compiled/_next/static/chunks/app/agents/{page-2ffa7560aebff9a1.js → page-8eead7920b0ff92a.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/automations/{page-b0a6a6ed2267c1a2.js → page-b5800b5286306140.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/chat/layout-3435e55ef2f45d88.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/chat/{page-02f8616bba3e449e.js → page-d7d2ab93e519f0b2.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/{page-3ffd8f0934b896f3.js → page-3c32ad5472f75965.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/search/layout-2677a9f48ccd7b38.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/search/{page-059f237514f77628.js → page-faa998c71eb7ca8e.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/settings/{page-32e9423bede5b4a1.js → page-cbe7f56b1f87d77a.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-dc97434f0354a74e.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-0b8d90dc57dbc1d8.js → page-cd5757199539bbf2.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/{webpack-56eb35ef7ce6af2e.js → webpack-e220e21d9a8c0d5e.js} +1 -1
- khoj/interface/compiled/_next/static/css/33e9fb1e7a15c208.css +1 -0
- khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +1 -0
- khoj/interface/compiled/agents/index.html +1 -1
- khoj/interface/compiled/agents/index.txt +2 -2
- khoj/interface/compiled/automations/index.html +1 -1
- khoj/interface/compiled/automations/index.txt +2 -2
- khoj/interface/compiled/chat/index.html +1 -1
- khoj/interface/compiled/chat/index.txt +2 -2
- khoj/interface/compiled/index.html +1 -1
- khoj/interface/compiled/index.txt +2 -2
- khoj/interface/compiled/search/index.html +1 -1
- khoj/interface/compiled/search/index.txt +2 -2
- khoj/interface/compiled/settings/index.html +1 -1
- khoj/interface/compiled/settings/index.txt +2 -2
- khoj/interface/compiled/share/chat/index.html +1 -1
- khoj/interface/compiled/share/chat/index.txt +2 -2
- khoj/migrations/migrate_server_pg.py +3 -9
- khoj/processor/conversation/openai/utils.py +1 -6
- khoj/processor/conversation/prompts.py +1 -1
- khoj/processor/conversation/utils.py +13 -14
- khoj/processor/image/generate.py +2 -2
- khoj/routers/api.py +4 -5
- khoj/routers/helpers.py +11 -11
- khoj/utils/helpers.py +2 -2
- khoj/utils/initialization.py +14 -12
- {khoj-1.30.11.dev55.dist-info → khoj-1.30.11.dev62.dist-info}/METADATA +1 -1
- {khoj-1.30.11.dev55.dist-info → khoj-1.30.11.dev62.dist-info}/RECORD +49 -48
- khoj/interface/compiled/_next/static/chunks/app/chat/layout-9219a85f3477e722.js +0 -1
- khoj/interface/compiled/_next/static/chunks/app/search/layout-2ca475462c0b2176.js +0 -1
- khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-f662c9e5091603cf.js +0 -1
- khoj/interface/compiled/_next/static/css/34f2d177dfee2dd8.css +0 -1
- khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +0 -1
- /khoj/interface/compiled/_next/static/{WZaSBOrll6IpdAZVluZmu → FJZukpb_TPhFjZ5RC7rGN}/_buildManifest.js +0 -0
- /khoj/interface/compiled/_next/static/{WZaSBOrll6IpdAZVluZmu → FJZukpb_TPhFjZ5RC7rGN}/_ssgManifest.js +0 -0
- {khoj-1.30.11.dev55.dist-info → khoj-1.30.11.dev62.dist-info}/WHEEL +0 -0
- {khoj-1.30.11.dev55.dist-info → khoj-1.30.11.dev62.dist-info}/entry_points.txt +0 -0
- {khoj-1.30.11.dev55.dist-info → khoj-1.30.11.dev62.dist-info}/licenses/LICENSE +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
2:I[66513,[],"ClientPageRoot"]
|
2
|
-
3:I[5506,["3954","static/chunks/d3ac728e-a9e3522eef9b6b28.js","3072","static/chunks/3072-be830e4f8412b9d2.js","4752","static/chunks/4752-554a3db270186ce3.js","7592","static/chunks/7592-a09c39a38e60634b.js","3690","static/chunks/3690-51312931ba1eae30.js","3463","static/chunks/3463-081c031e873b7966.js","1603","static/chunks/1603-e40aadd1e56ab030.js","8423","static/chunks/8423-1dda16bc56236523.js","5538","static/chunks/5538-0ea2d3944ca051e1.js","3111","static/chunks/app/share/chat/page-
|
2
|
+
3:I[5506,["3954","static/chunks/d3ac728e-a9e3522eef9b6b28.js","3072","static/chunks/3072-be830e4f8412b9d2.js","4752","static/chunks/4752-554a3db270186ce3.js","7592","static/chunks/7592-a09c39a38e60634b.js","3690","static/chunks/3690-51312931ba1eae30.js","3463","static/chunks/3463-081c031e873b7966.js","1603","static/chunks/1603-e40aadd1e56ab030.js","8423","static/chunks/8423-1dda16bc56236523.js","5538","static/chunks/5538-0ea2d3944ca051e1.js","3111","static/chunks/app/share/chat/page-cd5757199539bbf2.js"],"default",1]
|
3
3
|
4:I[39275,[],""]
|
4
4
|
5:I[61343,[],""]
|
5
|
-
0:["
|
5
|
+
0:["FJZukpb_TPhFjZ5RC7rGN",[[["",{"children":["share",{"children":["chat",{"children":["__PAGE__",{}]}]}]},"$undefined","$undefined",true],["",{"children":["share",{"children":["chat",{"children":["__PAGE__",{},[["$L1",["$","$L2",null,{"props":{"params":{},"searchParams":{}},"Component":"$3"}],[["$","link","0",{"rel":"stylesheet","href":"/_next/static/css/3cf13271869a4aeb.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","1",{"rel":"stylesheet","href":"/_next/static/css/33e9fb1e7a15c208.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","2",{"rel":"stylesheet","href":"/_next/static/css/1f293605f2871853.css","precedence":"next","crossOrigin":"$undefined"}]]],null],null]},[[null,["$","html",null,{"lang":"en","className":"__variable_f36179 __variable_702545","children":[["$","meta",null,{"httpEquiv":"Content-Security-Policy","content":"default-src 'self' https://assets.khoj.dev; media-src * blob:; script-src 'self' https://assets.khoj.dev https://app.chatwoot.com 'unsafe-inline' 'unsafe-eval'; connect-src 'self' blob: https://ipapi.co/json ws://localhost:42110; style-src 'self' https://assets.khoj.dev 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: blob: https://*.khoj.dev https://*.googleusercontent.com https://*.google.com/ https://*.gstatic.com; font-src 'self' https://assets.khoj.dev https://fonts.gstatic.com; child-src 'self' https://app.chatwoot.com; object-src 'none';"}],["$","body",null,{"children":[["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children","share","children","chat","children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":"$undefined","notFoundStyles":"$undefined"}],["$","script",null,{"dangerouslySetInnerHTML":{"__html":"window.EXCALIDRAW_ASSET_PATH = 'https://assets.khoj.dev/@excalidraw/excalidraw/dist/';"}}]]}]]}]],null],null]},[null,["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children","share","children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":"$undefined","notFoundStyles":"$undefined"}]],null]},[[[["$","link","0",{"rel":"stylesheet","href":"/_next/static/css/089de1d8526b96e9.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","1",{"rel":"stylesheet","href":"/_next/static/css/3c34171b174cc381.css","precedence":"next","crossOrigin":"$undefined"}]],["$","html",null,{"lang":"en","className":"__variable_f36179 __variable_702545","children":[["$","meta",null,{"httpEquiv":"Content-Security-Policy","content":"default-src 'self' https://assets.khoj.dev; media-src * blob:; script-src 'self' https://assets.khoj.dev https://app.chatwoot.com 'unsafe-inline' 'unsafe-eval'; connect-src 'self' blob: https://ipapi.co/json ws://localhost:42110; style-src 'self' https://assets.khoj.dev 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: blob: https://*.khoj.dev https://*.googleusercontent.com https://*.google.com/ https://*.gstatic.com; font-src 'self' https://assets.khoj.dev https://fonts.gstatic.com; child-src 'self' https://app.chatwoot.com; object-src 'none';"}],["$","body",null,{"children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[]}]}]]}]],null],null],["$L6",null]]]]
|
6
6
|
6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"Khoj AI - Chat"}],["$","meta","3",{"name":"description","content":"Use this page to view a chat with Khoj AI."}],["$","link","4",{"rel":"manifest","href":"/static/khoj.webmanifest","crossOrigin":"use-credentials"}],["$","meta","5",{"property":"og:title","content":"Khoj AI"}],["$","meta","6",{"property":"og:description","content":"Your Second Brain."}],["$","meta","7",{"property":"og:url","content":"https://app.khoj.dev/"}],["$","meta","8",{"property":"og:site_name","content":"Khoj AI"}],["$","meta","9",{"property":"og:image","content":"https://assets.khoj.dev/khoj_lantern_256x256.png"}],["$","meta","10",{"property":"og:image:width","content":"256"}],["$","meta","11",{"property":"og:image:height","content":"256"}],["$","meta","12",{"property":"og:image","content":"https://assets.khoj.dev/khoj_lantern_logomarktype_1200x630.png"}],["$","meta","13",{"property":"og:image:width","content":"1200"}],["$","meta","14",{"property":"og:image:height","content":"630"}],["$","meta","15",{"property":"og:type","content":"website"}],["$","meta","16",{"name":"twitter:card","content":"summary_large_image"}],["$","meta","17",{"name":"twitter:title","content":"Khoj AI"}],["$","meta","18",{"name":"twitter:description","content":"Your Second Brain."}],["$","meta","19",{"name":"twitter:image","content":"https://assets.khoj.dev/khoj_lantern_256x256.png"}],["$","meta","20",{"name":"twitter:image:width","content":"256"}],["$","meta","21",{"name":"twitter:image:height","content":"256"}],["$","meta","22",{"name":"twitter:image","content":"https://assets.khoj.dev/khoj_lantern_logomarktype_1200x630.png"}],["$","meta","23",{"name":"twitter:image:width","content":"1200"}],["$","meta","24",{"name":"twitter:image:height","content":"630"}],["$","link","25",{"rel":"icon","href":"/static/assets/icons/khoj_lantern.ico"}],["$","link","26",{"rel":"apple-touch-icon","href":"/static/assets/icons/khoj_lantern_256x256.png"}],["$","meta","27",{"name":"next-size-adjust"}]]
|
7
7
|
1:null
|
@@ -60,11 +60,7 @@ import logging
|
|
60
60
|
|
61
61
|
from packaging import version
|
62
62
|
|
63
|
-
from khoj.database.models import
|
64
|
-
ChatModelOptions,
|
65
|
-
OpenAIProcessorConversationConfig,
|
66
|
-
SearchModelConfig,
|
67
|
-
)
|
63
|
+
from khoj.database.models import AiModelApi, ChatModelOptions, SearchModelConfig
|
68
64
|
from khoj.utils.yaml import load_config_from_file, save_config_to_file
|
69
65
|
|
70
66
|
logger = logging.getLogger(__name__)
|
@@ -121,16 +117,14 @@ def migrate_server_pg(args):
|
|
121
117
|
if openai.get("chat-model") is None:
|
122
118
|
openai["chat-model"] = "gpt-3.5-turbo"
|
123
119
|
|
124
|
-
|
125
|
-
api_key=openai.get("api-key"), name="default"
|
126
|
-
)
|
120
|
+
openai_model_api = AiModelApi.objects.create(api_key=openai.get("api-key"), name="default")
|
127
121
|
|
128
122
|
ChatModelOptions.objects.create(
|
129
123
|
chat_model=openai.get("chat-model"),
|
130
124
|
tokenizer=processor_conversation.get("tokenizer"),
|
131
125
|
max_prompt_size=processor_conversation.get("max-prompt-size"),
|
132
126
|
model_type=ChatModelOptions.ModelType.OPENAI,
|
133
|
-
|
127
|
+
ai_model_api=openai_model_api,
|
134
128
|
)
|
135
129
|
|
136
130
|
save_config_to_file(raw_config, args.config_file)
|
@@ -19,12 +19,7 @@ from khoj.processor.conversation.utils import (
|
|
19
19
|
ThreadedGenerator,
|
20
20
|
commit_conversation_trace,
|
21
21
|
)
|
22
|
-
from khoj.utils import
|
23
|
-
from khoj.utils.helpers import (
|
24
|
-
get_chat_usage_metrics,
|
25
|
-
in_debug_mode,
|
26
|
-
is_promptrace_enabled,
|
27
|
-
)
|
22
|
+
from khoj.utils.helpers import get_chat_usage_metrics, is_promptrace_enabled
|
28
23
|
|
29
24
|
logger = logging.getLogger(__name__)
|
30
25
|
|
@@ -850,7 +850,7 @@ python_code_generation_prompt = PromptTemplate.from_template(
|
|
850
850
|
You are Khoj, an advanced python programmer. You are tasked with constructing a python program to best answer the user query.
|
851
851
|
- The python program will run in a pyodide python sandbox with no network access.
|
852
852
|
- You can write programs to run complex calculations, analyze data, create charts, generate documents to meticulously answer the query.
|
853
|
-
- The sandbox has access to the standard library, matplotlib, panda, numpy, scipy, bs4
|
853
|
+
- The sandbox has access to the standard library, matplotlib, panda, numpy, scipy, bs4 and sympy packages. The requests, torch, catboost, tensorflow and tkinter packages are not available.
|
854
854
|
- List known file paths to required user documents in "input_files" and known links to required documents from the web in the "input_links" field.
|
855
855
|
- The python program should be self-contained. It can only read data generated by the program itself and from provided input_files, input_links by their basename (i.e filename excluding file path).
|
856
856
|
- Do not try display images or plots in the code directly. The code should save the image or plot to a file instead.
|
@@ -433,20 +433,19 @@ def generate_chatml_messages_with_context(
|
|
433
433
|
reconstructed_context_message = ChatMessage(content=message_context, role="user")
|
434
434
|
chatml_messages.insert(0, reconstructed_context_message)
|
435
435
|
|
436
|
-
if chat.get("images"):
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
)
|
436
|
+
if chat.get("images") and role == "assistant":
|
437
|
+
# Issue: the assistant role cannot accept an image as a message content, so send it in a separate user message.
|
438
|
+
file_attachment_message = construct_structured_message(
|
439
|
+
message=prompts.generated_image_attachment.format(),
|
440
|
+
images=chat.get("images"),
|
441
|
+
model_type=model_type,
|
442
|
+
vision_enabled=vision_enabled,
|
443
|
+
)
|
444
|
+
chatml_messages.append(ChatMessage(content=file_attachment_message, role="user"))
|
445
|
+
|
446
|
+
message_content = construct_structured_message(
|
447
|
+
chat_message, chat.get("images") if role == "user" else [], model_type, vision_enabled
|
448
|
+
)
|
450
449
|
|
451
450
|
reconstructed_message = ChatMessage(content=message_content, role=role)
|
452
451
|
chatml_messages.insert(0, reconstructed_message)
|
khoj/processor/image/generate.py
CHANGED
@@ -124,8 +124,8 @@ def generate_image_with_openai(
|
|
124
124
|
# Get the API key from the user's configuration
|
125
125
|
if text_to_image_config.api_key:
|
126
126
|
api_key = text_to_image_config.api_key
|
127
|
-
elif text_to_image_config.
|
128
|
-
api_key = text_to_image_config.
|
127
|
+
elif text_to_image_config.ai_model_api:
|
128
|
+
api_key = text_to_image_config.ai_model_api.api_key
|
129
129
|
elif state.openai_client:
|
130
130
|
api_key = state.openai_client.api_key
|
131
131
|
auth_header = {"Authorization": f"Bearer {api_key}"} if api_key else {}
|
khoj/routers/api.py
CHANGED
@@ -430,9 +430,8 @@ async def extract_references_and_questions(
|
|
430
430
|
tracer=tracer,
|
431
431
|
)
|
432
432
|
elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
|
433
|
-
|
434
|
-
|
435
|
-
base_url = openai_chat_config.api_base_url
|
433
|
+
api_key = conversation_config.ai_model_api.api_key
|
434
|
+
base_url = conversation_config.ai_model_api.api_base_url
|
436
435
|
chat_model = conversation_config.chat_model
|
437
436
|
inferred_queries = extract_questions(
|
438
437
|
defiltered_query,
|
@@ -449,7 +448,7 @@ async def extract_references_and_questions(
|
|
449
448
|
tracer=tracer,
|
450
449
|
)
|
451
450
|
elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
|
452
|
-
api_key = conversation_config.
|
451
|
+
api_key = conversation_config.ai_model_api.api_key
|
453
452
|
chat_model = conversation_config.chat_model
|
454
453
|
inferred_queries = extract_questions_anthropic(
|
455
454
|
defiltered_query,
|
@@ -465,7 +464,7 @@ async def extract_references_and_questions(
|
|
465
464
|
tracer=tracer,
|
466
465
|
)
|
467
466
|
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
|
468
|
-
api_key = conversation_config.
|
467
|
+
api_key = conversation_config.ai_model_api.api_key
|
469
468
|
chat_model = conversation_config.chat_model
|
470
469
|
inferred_queries = extract_questions_gemini(
|
471
470
|
defiltered_query,
|
khoj/routers/helpers.py
CHANGED
@@ -136,7 +136,7 @@ def validate_conversation_config(user: KhojUser):
|
|
136
136
|
if default_config is None:
|
137
137
|
raise HTTPException(status_code=500, detail="Contact the server administrator to add a chat model.")
|
138
138
|
|
139
|
-
if default_config.model_type == "openai" and not default_config.
|
139
|
+
if default_config.model_type == "openai" and not default_config.ai_model_api:
|
140
140
|
raise HTTPException(status_code=500, detail="Contact the server administrator to add a chat model.")
|
141
141
|
|
142
142
|
|
@@ -163,7 +163,7 @@ async def is_ready_to_chat(user: KhojUser):
|
|
163
163
|
ChatModelOptions.ModelType.GOOGLE,
|
164
164
|
]
|
165
165
|
)
|
166
|
-
and user_conversation_config.
|
166
|
+
and user_conversation_config.ai_model_api
|
167
167
|
):
|
168
168
|
return True
|
169
169
|
|
@@ -990,7 +990,7 @@ async def send_message_to_model_wrapper(
|
|
990
990
|
)
|
991
991
|
|
992
992
|
elif model_type == ChatModelOptions.ModelType.OPENAI:
|
993
|
-
openai_chat_config = conversation_config.
|
993
|
+
openai_chat_config = conversation_config.ai_model_api
|
994
994
|
api_key = openai_chat_config.api_key
|
995
995
|
api_base_url = openai_chat_config.api_base_url
|
996
996
|
truncated_messages = generate_chatml_messages_with_context(
|
@@ -1015,7 +1015,7 @@ async def send_message_to_model_wrapper(
|
|
1015
1015
|
tracer=tracer,
|
1016
1016
|
)
|
1017
1017
|
elif model_type == ChatModelOptions.ModelType.ANTHROPIC:
|
1018
|
-
api_key = conversation_config.
|
1018
|
+
api_key = conversation_config.ai_model_api.api_key
|
1019
1019
|
truncated_messages = generate_chatml_messages_with_context(
|
1020
1020
|
user_message=query,
|
1021
1021
|
context_message=context,
|
@@ -1037,7 +1037,7 @@ async def send_message_to_model_wrapper(
|
|
1037
1037
|
tracer=tracer,
|
1038
1038
|
)
|
1039
1039
|
elif model_type == ChatModelOptions.ModelType.GOOGLE:
|
1040
|
-
api_key = conversation_config.
|
1040
|
+
api_key = conversation_config.ai_model_api.api_key
|
1041
1041
|
truncated_messages = generate_chatml_messages_with_context(
|
1042
1042
|
user_message=query,
|
1043
1043
|
context_message=context,
|
@@ -1102,7 +1102,7 @@ def send_message_to_model_wrapper_sync(
|
|
1102
1102
|
)
|
1103
1103
|
|
1104
1104
|
elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
|
1105
|
-
api_key = conversation_config.
|
1105
|
+
api_key = conversation_config.ai_model_api.api_key
|
1106
1106
|
truncated_messages = generate_chatml_messages_with_context(
|
1107
1107
|
user_message=message,
|
1108
1108
|
system_message=system_message,
|
@@ -1124,7 +1124,7 @@ def send_message_to_model_wrapper_sync(
|
|
1124
1124
|
return openai_response
|
1125
1125
|
|
1126
1126
|
elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
|
1127
|
-
api_key = conversation_config.
|
1127
|
+
api_key = conversation_config.ai_model_api.api_key
|
1128
1128
|
truncated_messages = generate_chatml_messages_with_context(
|
1129
1129
|
user_message=message,
|
1130
1130
|
system_message=system_message,
|
@@ -1144,7 +1144,7 @@ def send_message_to_model_wrapper_sync(
|
|
1144
1144
|
)
|
1145
1145
|
|
1146
1146
|
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
|
1147
|
-
api_key = conversation_config.
|
1147
|
+
api_key = conversation_config.ai_model_api.api_key
|
1148
1148
|
truncated_messages = generate_chatml_messages_with_context(
|
1149
1149
|
user_message=message,
|
1150
1150
|
system_message=system_message,
|
@@ -1255,7 +1255,7 @@ def generate_chat_response(
|
|
1255
1255
|
)
|
1256
1256
|
|
1257
1257
|
elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
|
1258
|
-
openai_chat_config = conversation_config.
|
1258
|
+
openai_chat_config = conversation_config.ai_model_api
|
1259
1259
|
api_key = openai_chat_config.api_key
|
1260
1260
|
chat_model = conversation_config.chat_model
|
1261
1261
|
chat_response = converse(
|
@@ -1285,7 +1285,7 @@ def generate_chat_response(
|
|
1285
1285
|
)
|
1286
1286
|
|
1287
1287
|
elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
|
1288
|
-
api_key = conversation_config.
|
1288
|
+
api_key = conversation_config.ai_model_api.api_key
|
1289
1289
|
chat_response = converse_anthropic(
|
1290
1290
|
compiled_references,
|
1291
1291
|
query_to_run,
|
@@ -1311,7 +1311,7 @@ def generate_chat_response(
|
|
1311
1311
|
tracer=tracer,
|
1312
1312
|
)
|
1313
1313
|
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
|
1314
|
-
api_key = conversation_config.
|
1314
|
+
api_key = conversation_config.ai_model_api.api_key
|
1315
1315
|
chat_response = converse_gemini(
|
1316
1316
|
compiled_references,
|
1317
1317
|
query_to_run,
|
khoj/utils/helpers.py
CHANGED
@@ -366,7 +366,7 @@ tool_descriptions_for_llm = {
|
|
366
366
|
ConversationCommand.Notes: "To search the user's personal knowledge base. Especially helpful if the question expects context from the user's notes or documents.",
|
367
367
|
ConversationCommand.Online: "To search for the latest, up-to-date information from the internet. Note: **Questions about Khoj should always use this data source**",
|
368
368
|
ConversationCommand.Webpage: "To use if the user has directly provided the webpage urls or you are certain of the webpage urls to read.",
|
369
|
-
ConversationCommand.Code: "To run Python code in a Pyodide sandbox with no network access. Helpful when need to parse information, run complex calculations, create plaintext documents, and create charts with quantitative data.
|
369
|
+
ConversationCommand.Code: "To run Python code in a Pyodide sandbox with no network access. Helpful when need to parse complex information, run complex calculations, create plaintext documents, and create charts with quantitative data. Only matplotlib, panda, numpy, scipy, bs4 and sympy external packages are available.",
|
370
370
|
ConversationCommand.Summarize: "To retrieve an answer that depends on the entire document or a large text.",
|
371
371
|
}
|
372
372
|
|
@@ -374,7 +374,7 @@ function_calling_description_for_llm = {
|
|
374
374
|
ConversationCommand.Notes: "To search the user's personal knowledge base. Especially helpful if the question expects context from the user's notes or documents.",
|
375
375
|
ConversationCommand.Online: "To search the internet for information. Useful to get a quick, broad overview from the internet. Provide all relevant context to ensure new searches, not in previous iterations, are performed.",
|
376
376
|
ConversationCommand.Webpage: "To extract information from webpages. Useful for more detailed research from the internet. Usually used when you know the webpage links to refer to. Share the webpage links and information to extract in your query.",
|
377
|
-
ConversationCommand.Code: "To run Python code in a Pyodide sandbox with no network access. Helpful when need to parse information, run complex calculations, create plaintext documents, and create charts with quantitative data.
|
377
|
+
ConversationCommand.Code: "To run Python code in a Pyodide sandbox with no network access. Helpful when need to parse complex information, run complex calculations, create plaintext documents, and create charts with quantitative data. Only matplotlib, panda, numpy, scipy, bs4 and sympy external packages are available.",
|
378
378
|
}
|
379
379
|
|
380
380
|
mode_descriptions_for_llm = {
|
khoj/utils/initialization.py
CHANGED
@@ -6,9 +6,9 @@ import openai
|
|
6
6
|
|
7
7
|
from khoj.database.adapters import ConversationAdapters
|
8
8
|
from khoj.database.models import (
|
9
|
+
AiModelApi,
|
9
10
|
ChatModelOptions,
|
10
11
|
KhojUser,
|
11
|
-
OpenAIProcessorConversationConfig,
|
12
12
|
SpeechToTextModelOptions,
|
13
13
|
TextToImageModelConfig,
|
14
14
|
)
|
@@ -98,7 +98,7 @@ def initialization(interactive: bool = True):
|
|
98
98
|
TextToImageModelConfig.objects.create(
|
99
99
|
model_name=openai_text_to_image_model,
|
100
100
|
model_type=TextToImageModelConfig.ModelType.OPENAI,
|
101
|
-
|
101
|
+
ai_model_api=openai_provider,
|
102
102
|
)
|
103
103
|
|
104
104
|
# Set up Google's Gemini online chat models
|
@@ -177,7 +177,7 @@ def initialization(interactive: bool = True):
|
|
177
177
|
vision_enabled: bool = False,
|
178
178
|
is_offline: bool = False,
|
179
179
|
provider_name: str = None,
|
180
|
-
) -> Tuple[bool,
|
180
|
+
) -> Tuple[bool, AiModelApi]:
|
181
181
|
supported_vision_models = (
|
182
182
|
default_openai_chat_models + default_anthropic_chat_models + default_gemini_chat_models
|
183
183
|
)
|
@@ -192,16 +192,14 @@ def initialization(interactive: bool = True):
|
|
192
192
|
|
193
193
|
logger.info(f"️💬 Setting up your {provider_name} chat configuration")
|
194
194
|
|
195
|
-
|
195
|
+
ai_model_api = None
|
196
196
|
if not is_offline:
|
197
197
|
if interactive:
|
198
198
|
user_api_key = input(f"Enter your {provider_name} API key (default: {default_api_key}): ")
|
199
199
|
api_key = user_api_key if user_api_key != "" else default_api_key
|
200
200
|
else:
|
201
201
|
api_key = default_api_key
|
202
|
-
|
203
|
-
api_key=api_key, name=provider_name, api_base_url=api_base_url
|
204
|
-
)
|
202
|
+
ai_model_api = AiModelApi.objects.create(api_key=api_key, name=provider_name, api_base_url=api_base_url)
|
205
203
|
|
206
204
|
if interactive:
|
207
205
|
chat_model_names = input(
|
@@ -223,19 +221,23 @@ def initialization(interactive: bool = True):
|
|
223
221
|
"max_prompt_size": default_max_tokens,
|
224
222
|
"vision_enabled": vision_enabled,
|
225
223
|
"tokenizer": default_tokenizer,
|
226
|
-
"
|
224
|
+
"ai_model_api": ai_model_api,
|
227
225
|
}
|
228
226
|
|
229
227
|
ChatModelOptions.objects.create(**chat_model_options)
|
230
228
|
|
231
229
|
logger.info(f"🗣️ {provider_name} chat model configuration complete")
|
232
|
-
return True,
|
230
|
+
return True, ai_model_api
|
233
231
|
|
234
232
|
def _update_chat_model_options():
|
235
233
|
"""Update available chat models for OpenAI-compatible APIs"""
|
236
234
|
try:
|
237
235
|
# Get OpenAI configs with custom base URLs
|
238
|
-
custom_configs =
|
236
|
+
custom_configs = AiModelApi.objects.exclude(api_base_url__isnull=True)
|
237
|
+
|
238
|
+
# Only enable for whitelisted provider names (i.e Ollama) for now
|
239
|
+
# TODO: This is hacky. Will be replaced with more robust solution based on provider type enum
|
240
|
+
custom_configs = custom_configs.filter(name__in=["Ollama"])
|
239
241
|
|
240
242
|
for config in custom_configs:
|
241
243
|
try:
|
@@ -247,7 +249,7 @@ def initialization(interactive: bool = True):
|
|
247
249
|
|
248
250
|
# Get existing chat model options for this config
|
249
251
|
existing_models = ChatModelOptions.objects.filter(
|
250
|
-
|
252
|
+
ai_model_api=config, model_type=ChatModelOptions.ModelType.OPENAI
|
251
253
|
)
|
252
254
|
|
253
255
|
# Add new models
|
@@ -259,7 +261,7 @@ def initialization(interactive: bool = True):
|
|
259
261
|
max_prompt_size=model_to_prompt_size.get(model),
|
260
262
|
vision_enabled=model in default_openai_chat_models,
|
261
263
|
tokenizer=model_to_tokenizer.get(model),
|
262
|
-
|
264
|
+
ai_model_api=config,
|
263
265
|
)
|
264
266
|
|
265
267
|
# Remove models that are no longer available
|