khoj 1.31.1.dev1__py3-none-any.whl → 1.31.1.dev9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- khoj/database/adapters/__init__.py +51 -51
- khoj/database/admin.py +8 -8
- khoj/database/migrations/0077_chatmodel_alter_agent_chat_model_and_more.py +62 -0
- khoj/database/models/__init__.py +7 -7
- khoj/interface/compiled/404/index.html +1 -1
- khoj/interface/compiled/_next/static/chunks/app/agents/{page-2a0b821cf69bdf06.js → page-22cff208006a7f6a.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/automations/{page-ffa30be1dda97643.js → page-6ed169c6f570a6e2.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/chat/layout-1072c3b0ab136e74.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/chat/{page-c2c62ae6f013443c.js → page-a92692cd2b735a82.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/{page-083f798a7562cda5.js → page-25b44557720dc93a.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/search/layout-cae84c87073877f0.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/search/{page-845fe099f1f4375e.js → page-4e3bb682be861aa7.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/settings/{page-3257ef0146ab18da.js → page-142e854abace0750.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-3b0c60bc13a963db.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-f625859c1a122441.js → page-2518c42e2d59b121.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/{webpack-8baab5e194ab74e6.js → webpack-3a2dfd74acf6e193.js} +1 -1
- khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +1 -0
- khoj/interface/compiled/_next/static/css/ae6bc2d6cdba8da7.css +1 -0
- khoj/interface/compiled/agents/index.html +1 -1
- khoj/interface/compiled/agents/index.txt +2 -2
- khoj/interface/compiled/automations/index.html +1 -1
- khoj/interface/compiled/automations/index.txt +2 -2
- khoj/interface/compiled/chat/index.html +1 -1
- khoj/interface/compiled/chat/index.txt +2 -2
- khoj/interface/compiled/index.html +1 -1
- khoj/interface/compiled/index.txt +2 -2
- khoj/interface/compiled/search/index.html +1 -1
- khoj/interface/compiled/search/index.txt +2 -2
- khoj/interface/compiled/settings/index.html +1 -1
- khoj/interface/compiled/settings/index.txt +2 -2
- khoj/interface/compiled/share/chat/index.html +1 -1
- khoj/interface/compiled/share/chat/index.txt +2 -2
- khoj/migrations/migrate_server_pg.py +7 -7
- khoj/processor/conversation/anthropic/anthropic_chat.py +3 -3
- khoj/processor/conversation/google/gemini_chat.py +3 -3
- khoj/processor/conversation/offline/chat_model.py +3 -3
- khoj/processor/conversation/openai/gpt.py +3 -3
- khoj/processor/conversation/utils.py +4 -4
- khoj/routers/api.py +22 -27
- khoj/routers/api_agents.py +4 -4
- khoj/routers/api_chat.py +6 -6
- khoj/routers/api_model.py +4 -4
- khoj/routers/helpers.py +106 -102
- khoj/utils/initialization.py +17 -17
- {khoj-1.31.1.dev1.dist-info → khoj-1.31.1.dev9.dist-info}/METADATA +1 -1
- {khoj-1.31.1.dev1.dist-info → khoj-1.31.1.dev9.dist-info}/RECORD +51 -50
- khoj/interface/compiled/_next/static/chunks/app/chat/layout-9219a85f3477e722.js +0 -1
- khoj/interface/compiled/_next/static/chunks/app/search/layout-2ca475462c0b2176.js +0 -1
- khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-592e8c470f2c2084.js +0 -1
- khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +0 -1
- khoj/interface/compiled/_next/static/css/f172a0fb3eb177e1.css +0 -1
- /khoj/interface/compiled/_next/static/{dgvSdwrDoCLqxLqsw0wnz → RbKhSKUaO-yxcUXaKm6x4}/_buildManifest.js +0 -0
- /khoj/interface/compiled/_next/static/{dgvSdwrDoCLqxLqsw0wnz → RbKhSKUaO-yxcUXaKm6x4}/_ssgManifest.js +0 -0
- {khoj-1.31.1.dev1.dist-info → khoj-1.31.1.dev9.dist-info}/WHEEL +0 -0
- {khoj-1.31.1.dev1.dist-info → khoj-1.31.1.dev9.dist-info}/entry_points.txt +0 -0
- {khoj-1.31.1.dev1.dist-info → khoj-1.31.1.dev9.dist-info}/licenses/LICENSE +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
2:I[66513,[],"ClientPageRoot"]
|
2
|
-
3:I[5506,["3954","static/chunks/d3ac728e-a9e3522eef9b6b28.js","3072","static/chunks/3072-be830e4f8412b9d2.js","4200","static/chunks/4200-ea75740bb3c6ae60.js","7592","static/chunks/7592-a09c39a38e60634b.js","3690","static/chunks/3690-51312931ba1eae30.js","3463","static/chunks/3463-081c031e873b7966.js","1603","static/chunks/1603-f8ef9930c1f4eaef.js","8423","static/chunks/8423-1dda16bc56236523.js","5538","static/chunks/5538-0ea2d3944ca051e1.js","3111","static/chunks/app/share/chat/page-
|
2
|
+
3:I[5506,["3954","static/chunks/d3ac728e-a9e3522eef9b6b28.js","3072","static/chunks/3072-be830e4f8412b9d2.js","4200","static/chunks/4200-ea75740bb3c6ae60.js","7592","static/chunks/7592-a09c39a38e60634b.js","3690","static/chunks/3690-51312931ba1eae30.js","3463","static/chunks/3463-081c031e873b7966.js","1603","static/chunks/1603-f8ef9930c1f4eaef.js","8423","static/chunks/8423-1dda16bc56236523.js","5538","static/chunks/5538-0ea2d3944ca051e1.js","3111","static/chunks/app/share/chat/page-2518c42e2d59b121.js"],"default",1]
|
3
3
|
4:I[39275,[],""]
|
4
4
|
5:I[61343,[],""]
|
5
|
-
0:["
|
5
|
+
0:["RbKhSKUaO-yxcUXaKm6x4",[[["",{"children":["share",{"children":["chat",{"children":["__PAGE__",{}]}]}]},"$undefined","$undefined",true],["",{"children":["share",{"children":["chat",{"children":["__PAGE__",{},[["$L1",["$","$L2",null,{"props":{"params":{},"searchParams":{}},"Component":"$3"}],[["$","link","0",{"rel":"stylesheet","href":"/_next/static/css/3cf13271869a4aeb.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","1",{"rel":"stylesheet","href":"/_next/static/css/ae6bc2d6cdba8da7.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","2",{"rel":"stylesheet","href":"/_next/static/css/1f293605f2871853.css","precedence":"next","crossOrigin":"$undefined"}]]],null],null]},[[null,["$","html",null,{"lang":"en","className":"__variable_f36179 __variable_702545","children":[["$","meta",null,{"httpEquiv":"Content-Security-Policy","content":"default-src 'self' https://assets.khoj.dev; media-src * blob:; script-src 'self' https://assets.khoj.dev https://app.chatwoot.com 'unsafe-inline' 'unsafe-eval'; connect-src 'self' blob: https://ipapi.co/json ws://localhost:42110; style-src 'self' https://assets.khoj.dev 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: blob: https://*.khoj.dev https://*.googleusercontent.com https://*.google.com/ https://*.gstatic.com; font-src 'self' https://assets.khoj.dev https://fonts.gstatic.com; child-src 'self' https://app.chatwoot.com; object-src 'none';"}],["$","body",null,{"children":[["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children","share","children","chat","children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":"$undefined","notFoundStyles":"$undefined"}],["$","script",null,{"dangerouslySetInnerHTML":{"__html":"window.EXCALIDRAW_ASSET_PATH = 'https://assets.khoj.dev/@excalidraw/excalidraw/dist/';"}}]]}]]}]],null],null]},[null,["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children","share","children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":"$undefined","notFoundStyles":"$undefined"}]],null]},[[[["$","link","0",{"rel":"stylesheet","href":"/_next/static/css/089de1d8526b96e9.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","1",{"rel":"stylesheet","href":"/_next/static/css/fd628f01a581ec3c.css","precedence":"next","crossOrigin":"$undefined"}]],["$","html",null,{"lang":"en","className":"__variable_f36179 __variable_702545","children":[["$","meta",null,{"httpEquiv":"Content-Security-Policy","content":"default-src 'self' https://assets.khoj.dev; media-src * blob:; script-src 'self' https://assets.khoj.dev https://app.chatwoot.com 'unsafe-inline' 'unsafe-eval'; connect-src 'self' blob: https://ipapi.co/json ws://localhost:42110; style-src 'self' https://assets.khoj.dev 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: blob: https://*.khoj.dev https://*.googleusercontent.com https://*.google.com/ https://*.gstatic.com; font-src 'self' https://assets.khoj.dev https://fonts.gstatic.com; child-src 'self' https://app.chatwoot.com; object-src 'none';"}],["$","body",null,{"children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[]}]}]]}]],null],null],["$L6",null]]]]
|
6
6
|
6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"Khoj AI - Chat"}],["$","meta","3",{"name":"description","content":"Use this page to view a chat with Khoj AI."}],["$","link","4",{"rel":"manifest","href":"/static/khoj.webmanifest","crossOrigin":"use-credentials"}],["$","meta","5",{"property":"og:title","content":"Khoj AI"}],["$","meta","6",{"property":"og:description","content":"Your Second Brain."}],["$","meta","7",{"property":"og:url","content":"https://app.khoj.dev/"}],["$","meta","8",{"property":"og:site_name","content":"Khoj AI"}],["$","meta","9",{"property":"og:image","content":"https://assets.khoj.dev/khoj_lantern_256x256.png"}],["$","meta","10",{"property":"og:image:width","content":"256"}],["$","meta","11",{"property":"og:image:height","content":"256"}],["$","meta","12",{"property":"og:image","content":"https://assets.khoj.dev/khoj_lantern_logomarktype_1200x630.png"}],["$","meta","13",{"property":"og:image:width","content":"1200"}],["$","meta","14",{"property":"og:image:height","content":"630"}],["$","meta","15",{"property":"og:type","content":"website"}],["$","meta","16",{"name":"twitter:card","content":"summary_large_image"}],["$","meta","17",{"name":"twitter:title","content":"Khoj AI"}],["$","meta","18",{"name":"twitter:description","content":"Your Second Brain."}],["$","meta","19",{"name":"twitter:image","content":"https://assets.khoj.dev/khoj_lantern_256x256.png"}],["$","meta","20",{"name":"twitter:image:width","content":"256"}],["$","meta","21",{"name":"twitter:image:height","content":"256"}],["$","meta","22",{"name":"twitter:image","content":"https://assets.khoj.dev/khoj_lantern_logomarktype_1200x630.png"}],["$","meta","23",{"name":"twitter:image:width","content":"1200"}],["$","meta","24",{"name":"twitter:image:height","content":"630"}],["$","link","25",{"rel":"icon","href":"/static/assets/icons/khoj_lantern.ico"}],["$","link","26",{"rel":"apple-touch-icon","href":"/static/assets/icons/khoj_lantern_256x256.png"}],["$","meta","27",{"name":"next-size-adjust"}]]
|
7
7
|
1:null
|
@@ -60,7 +60,7 @@ import logging
|
|
60
60
|
|
61
61
|
from packaging import version
|
62
62
|
|
63
|
-
from khoj.database.models import AiModelApi,
|
63
|
+
from khoj.database.models import AiModelApi, ChatModel, SearchModelConfig
|
64
64
|
from khoj.utils.yaml import load_config_from_file, save_config_to_file
|
65
65
|
|
66
66
|
logger = logging.getLogger(__name__)
|
@@ -98,11 +98,11 @@ def migrate_server_pg(args):
|
|
98
98
|
|
99
99
|
if "offline-chat" in raw_config["processor"]["conversation"]:
|
100
100
|
offline_chat = raw_config["processor"]["conversation"]["offline-chat"]
|
101
|
-
|
102
|
-
|
101
|
+
ChatModel.objects.create(
|
102
|
+
name=offline_chat.get("chat-model"),
|
103
103
|
tokenizer=processor_conversation.get("tokenizer"),
|
104
104
|
max_prompt_size=processor_conversation.get("max-prompt-size"),
|
105
|
-
model_type=
|
105
|
+
model_type=ChatModel.ModelType.OFFLINE,
|
106
106
|
)
|
107
107
|
|
108
108
|
if (
|
@@ -119,11 +119,11 @@ def migrate_server_pg(args):
|
|
119
119
|
|
120
120
|
openai_model_api = AiModelApi.objects.create(api_key=openai.get("api-key"), name="default")
|
121
121
|
|
122
|
-
|
123
|
-
|
122
|
+
ChatModel.objects.create(
|
123
|
+
name=openai.get("chat-model"),
|
124
124
|
tokenizer=processor_conversation.get("tokenizer"),
|
125
125
|
max_prompt_size=processor_conversation.get("max-prompt-size"),
|
126
|
-
model_type=
|
126
|
+
model_type=ChatModel.ModelType.OPENAI,
|
127
127
|
ai_model_api=openai_model_api,
|
128
128
|
)
|
129
129
|
|
@@ -5,7 +5,7 @@ from typing import Dict, List, Optional
|
|
5
5
|
import pyjson5
|
6
6
|
from langchain.schema import ChatMessage
|
7
7
|
|
8
|
-
from khoj.database.models import Agent,
|
8
|
+
from khoj.database.models import Agent, ChatModel, KhojUser
|
9
9
|
from khoj.processor.conversation import prompts
|
10
10
|
from khoj.processor.conversation.anthropic.utils import (
|
11
11
|
anthropic_chat_completion_with_backoff,
|
@@ -85,7 +85,7 @@ def extract_questions_anthropic(
|
|
85
85
|
prompt = construct_structured_message(
|
86
86
|
message=prompt,
|
87
87
|
images=query_images,
|
88
|
-
model_type=
|
88
|
+
model_type=ChatModel.ModelType.ANTHROPIC,
|
89
89
|
vision_enabled=vision_enabled,
|
90
90
|
attached_file_context=query_files,
|
91
91
|
)
|
@@ -218,7 +218,7 @@ def converse_anthropic(
|
|
218
218
|
tokenizer_name=tokenizer_name,
|
219
219
|
query_images=query_images,
|
220
220
|
vision_enabled=vision_available,
|
221
|
-
model_type=
|
221
|
+
model_type=ChatModel.ModelType.ANTHROPIC,
|
222
222
|
query_files=query_files,
|
223
223
|
generated_files=generated_files,
|
224
224
|
generated_asset_results=generated_asset_results,
|
@@ -5,7 +5,7 @@ from typing import Dict, List, Optional
|
|
5
5
|
import pyjson5
|
6
6
|
from langchain.schema import ChatMessage
|
7
7
|
|
8
|
-
from khoj.database.models import Agent,
|
8
|
+
from khoj.database.models import Agent, ChatModel, KhojUser
|
9
9
|
from khoj.processor.conversation import prompts
|
10
10
|
from khoj.processor.conversation.google.utils import (
|
11
11
|
format_messages_for_gemini,
|
@@ -86,7 +86,7 @@ def extract_questions_gemini(
|
|
86
86
|
prompt = construct_structured_message(
|
87
87
|
message=prompt,
|
88
88
|
images=query_images,
|
89
|
-
model_type=
|
89
|
+
model_type=ChatModel.ModelType.GOOGLE,
|
90
90
|
vision_enabled=vision_enabled,
|
91
91
|
attached_file_context=query_files,
|
92
92
|
)
|
@@ -229,7 +229,7 @@ def converse_gemini(
|
|
229
229
|
tokenizer_name=tokenizer_name,
|
230
230
|
query_images=query_images,
|
231
231
|
vision_enabled=vision_available,
|
232
|
-
model_type=
|
232
|
+
model_type=ChatModel.ModelType.GOOGLE,
|
233
233
|
query_files=query_files,
|
234
234
|
generated_files=generated_files,
|
235
235
|
generated_asset_results=generated_asset_results,
|
@@ -9,7 +9,7 @@ import pyjson5
|
|
9
9
|
from langchain.schema import ChatMessage
|
10
10
|
from llama_cpp import Llama
|
11
11
|
|
12
|
-
from khoj.database.models import Agent,
|
12
|
+
from khoj.database.models import Agent, ChatModel, KhojUser
|
13
13
|
from khoj.processor.conversation import prompts
|
14
14
|
from khoj.processor.conversation.offline.utils import download_model
|
15
15
|
from khoj.processor.conversation.utils import (
|
@@ -96,7 +96,7 @@ def extract_questions_offline(
|
|
96
96
|
model_name=model,
|
97
97
|
loaded_model=offline_chat_model,
|
98
98
|
max_prompt_size=max_prompt_size,
|
99
|
-
model_type=
|
99
|
+
model_type=ChatModel.ModelType.OFFLINE,
|
100
100
|
query_files=query_files,
|
101
101
|
)
|
102
102
|
|
@@ -232,7 +232,7 @@ def converse_offline(
|
|
232
232
|
loaded_model=offline_chat_model,
|
233
233
|
max_prompt_size=max_prompt_size,
|
234
234
|
tokenizer_name=tokenizer_name,
|
235
|
-
model_type=
|
235
|
+
model_type=ChatModel.ModelType.OFFLINE,
|
236
236
|
query_files=query_files,
|
237
237
|
generated_files=generated_files,
|
238
238
|
generated_asset_results=generated_asset_results,
|
@@ -5,7 +5,7 @@ from typing import Dict, List, Optional
|
|
5
5
|
import pyjson5
|
6
6
|
from langchain.schema import ChatMessage
|
7
7
|
|
8
|
-
from khoj.database.models import Agent,
|
8
|
+
from khoj.database.models import Agent, ChatModel, KhojUser
|
9
9
|
from khoj.processor.conversation import prompts
|
10
10
|
from khoj.processor.conversation.openai.utils import (
|
11
11
|
chat_completion_with_backoff,
|
@@ -83,7 +83,7 @@ def extract_questions(
|
|
83
83
|
prompt = construct_structured_message(
|
84
84
|
message=prompt,
|
85
85
|
images=query_images,
|
86
|
-
model_type=
|
86
|
+
model_type=ChatModel.ModelType.OPENAI,
|
87
87
|
vision_enabled=vision_enabled,
|
88
88
|
attached_file_context=query_files,
|
89
89
|
)
|
@@ -220,7 +220,7 @@ def converse_openai(
|
|
220
220
|
tokenizer_name=tokenizer_name,
|
221
221
|
query_images=query_images,
|
222
222
|
vision_enabled=vision_available,
|
223
|
-
model_type=
|
223
|
+
model_type=ChatModel.ModelType.OPENAI,
|
224
224
|
query_files=query_files,
|
225
225
|
generated_files=generated_files,
|
226
226
|
generated_asset_results=generated_asset_results,
|
@@ -24,7 +24,7 @@ from llama_cpp.llama import Llama
|
|
24
24
|
from transformers import AutoTokenizer
|
25
25
|
|
26
26
|
from khoj.database.adapters import ConversationAdapters
|
27
|
-
from khoj.database.models import
|
27
|
+
from khoj.database.models import ChatModel, ClientApplication, KhojUser
|
28
28
|
from khoj.processor.conversation import prompts
|
29
29
|
from khoj.processor.conversation.offline.utils import download_model, infer_max_tokens
|
30
30
|
from khoj.search_filter.base_filter import BaseFilter
|
@@ -330,9 +330,9 @@ def construct_structured_message(
|
|
330
330
|
Format messages into appropriate multimedia format for supported chat model types
|
331
331
|
"""
|
332
332
|
if model_type in [
|
333
|
-
|
334
|
-
|
335
|
-
|
333
|
+
ChatModel.ModelType.OPENAI,
|
334
|
+
ChatModel.ModelType.GOOGLE,
|
335
|
+
ChatModel.ModelType.ANTHROPIC,
|
336
336
|
]:
|
337
337
|
if not attached_file_context and not (vision_enabled and images):
|
338
338
|
return message
|
khoj/routers/api.py
CHANGED
@@ -28,12 +28,7 @@ from khoj.database.adapters import (
|
|
28
28
|
get_default_search_model,
|
29
29
|
get_user_photo,
|
30
30
|
)
|
31
|
-
from khoj.database.models import
|
32
|
-
Agent,
|
33
|
-
ChatModelOptions,
|
34
|
-
KhojUser,
|
35
|
-
SpeechToTextModelOptions,
|
36
|
-
)
|
31
|
+
from khoj.database.models import Agent, ChatModel, KhojUser, SpeechToTextModelOptions
|
37
32
|
from khoj.processor.conversation import prompts
|
38
33
|
from khoj.processor.conversation.anthropic.anthropic_chat import (
|
39
34
|
extract_questions_anthropic,
|
@@ -404,15 +399,15 @@ async def extract_references_and_questions(
|
|
404
399
|
# Infer search queries from user message
|
405
400
|
with timer("Extracting search queries took", logger):
|
406
401
|
# If we've reached here, either the user has enabled offline chat or the openai model is enabled.
|
407
|
-
|
408
|
-
vision_enabled =
|
402
|
+
chat_model = await ConversationAdapters.aget_default_chat_model(user)
|
403
|
+
vision_enabled = chat_model.vision_enabled
|
409
404
|
|
410
|
-
if
|
405
|
+
if chat_model.model_type == ChatModel.ModelType.OFFLINE:
|
411
406
|
using_offline_chat = True
|
412
|
-
|
413
|
-
max_tokens =
|
407
|
+
chat_model_name = chat_model.name
|
408
|
+
max_tokens = chat_model.max_prompt_size
|
414
409
|
if state.offline_chat_processor_config is None:
|
415
|
-
state.offline_chat_processor_config = OfflineChatProcessorModel(
|
410
|
+
state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model_name, max_tokens)
|
416
411
|
|
417
412
|
loaded_model = state.offline_chat_processor_config.loaded_model
|
418
413
|
|
@@ -424,18 +419,18 @@ async def extract_references_and_questions(
|
|
424
419
|
should_extract_questions=True,
|
425
420
|
location_data=location_data,
|
426
421
|
user=user,
|
427
|
-
max_prompt_size=
|
422
|
+
max_prompt_size=chat_model.max_prompt_size,
|
428
423
|
personality_context=personality_context,
|
429
424
|
query_files=query_files,
|
430
425
|
tracer=tracer,
|
431
426
|
)
|
432
|
-
elif
|
433
|
-
api_key =
|
434
|
-
base_url =
|
435
|
-
|
427
|
+
elif chat_model.model_type == ChatModel.ModelType.OPENAI:
|
428
|
+
api_key = chat_model.ai_model_api.api_key
|
429
|
+
base_url = chat_model.ai_model_api.api_base_url
|
430
|
+
chat_model_name = chat_model.name
|
436
431
|
inferred_queries = extract_questions(
|
437
432
|
defiltered_query,
|
438
|
-
model=
|
433
|
+
model=chat_model_name,
|
439
434
|
api_key=api_key,
|
440
435
|
api_base_url=base_url,
|
441
436
|
conversation_log=meta_log,
|
@@ -447,13 +442,13 @@ async def extract_references_and_questions(
|
|
447
442
|
query_files=query_files,
|
448
443
|
tracer=tracer,
|
449
444
|
)
|
450
|
-
elif
|
451
|
-
api_key =
|
452
|
-
|
445
|
+
elif chat_model.model_type == ChatModel.ModelType.ANTHROPIC:
|
446
|
+
api_key = chat_model.ai_model_api.api_key
|
447
|
+
chat_model_name = chat_model.name
|
453
448
|
inferred_queries = extract_questions_anthropic(
|
454
449
|
defiltered_query,
|
455
450
|
query_images=query_images,
|
456
|
-
model=
|
451
|
+
model=chat_model_name,
|
457
452
|
api_key=api_key,
|
458
453
|
conversation_log=meta_log,
|
459
454
|
location_data=location_data,
|
@@ -463,17 +458,17 @@ async def extract_references_and_questions(
|
|
463
458
|
query_files=query_files,
|
464
459
|
tracer=tracer,
|
465
460
|
)
|
466
|
-
elif
|
467
|
-
api_key =
|
468
|
-
|
461
|
+
elif chat_model.model_type == ChatModel.ModelType.GOOGLE:
|
462
|
+
api_key = chat_model.ai_model_api.api_key
|
463
|
+
chat_model_name = chat_model.name
|
469
464
|
inferred_queries = extract_questions_gemini(
|
470
465
|
defiltered_query,
|
471
466
|
query_images=query_images,
|
472
|
-
model=
|
467
|
+
model=chat_model_name,
|
473
468
|
api_key=api_key,
|
474
469
|
conversation_log=meta_log,
|
475
470
|
location_data=location_data,
|
476
|
-
max_tokens=
|
471
|
+
max_tokens=chat_model.max_prompt_size,
|
477
472
|
user=user,
|
478
473
|
vision_enabled=vision_enabled,
|
479
474
|
personality_context=personality_context,
|
khoj/routers/api_agents.py
CHANGED
@@ -62,7 +62,7 @@ async def all_agents(
|
|
62
62
|
"color": agent.style_color,
|
63
63
|
"icon": agent.style_icon,
|
64
64
|
"privacy_level": agent.privacy_level,
|
65
|
-
"chat_model": agent.chat_model.
|
65
|
+
"chat_model": agent.chat_model.name,
|
66
66
|
"files": file_names,
|
67
67
|
"input_tools": agent.input_tools,
|
68
68
|
"output_modes": agent.output_modes,
|
@@ -150,7 +150,7 @@ async def get_agent(
|
|
150
150
|
"color": agent.style_color,
|
151
151
|
"icon": agent.style_icon,
|
152
152
|
"privacy_level": agent.privacy_level,
|
153
|
-
"chat_model": agent.chat_model.
|
153
|
+
"chat_model": agent.chat_model.name,
|
154
154
|
"files": file_names,
|
155
155
|
"input_tools": agent.input_tools,
|
156
156
|
"output_modes": agent.output_modes,
|
@@ -225,7 +225,7 @@ async def create_agent(
|
|
225
225
|
"color": agent.style_color,
|
226
226
|
"icon": agent.style_icon,
|
227
227
|
"privacy_level": agent.privacy_level,
|
228
|
-
"chat_model": agent.chat_model.
|
228
|
+
"chat_model": agent.chat_model.name,
|
229
229
|
"files": body.files,
|
230
230
|
"input_tools": agent.input_tools,
|
231
231
|
"output_modes": agent.output_modes,
|
@@ -286,7 +286,7 @@ async def update_agent(
|
|
286
286
|
"color": agent.style_color,
|
287
287
|
"icon": agent.style_icon,
|
288
288
|
"privacy_level": agent.privacy_level,
|
289
|
-
"chat_model": agent.chat_model.
|
289
|
+
"chat_model": agent.chat_model.name,
|
290
290
|
"files": body.files,
|
291
291
|
"input_tools": agent.input_tools,
|
292
292
|
"output_modes": agent.output_modes,
|
khoj/routers/api_chat.py
CHANGED
@@ -58,7 +58,7 @@ from khoj.routers.helpers import (
|
|
58
58
|
is_ready_to_chat,
|
59
59
|
read_chat_stream,
|
60
60
|
update_telemetry_state,
|
61
|
-
|
61
|
+
validate_chat_model,
|
62
62
|
)
|
63
63
|
from khoj.routers.research import (
|
64
64
|
InformationCollectionIteration,
|
@@ -205,7 +205,7 @@ def chat_history(
|
|
205
205
|
n: Optional[int] = None,
|
206
206
|
):
|
207
207
|
user = request.user.object
|
208
|
-
|
208
|
+
validate_chat_model(user)
|
209
209
|
|
210
210
|
# Load Conversation History
|
211
211
|
conversation = ConversationAdapters.get_conversation_by_user(
|
@@ -898,10 +898,10 @@ async def chat(
|
|
898
898
|
custom_filters = []
|
899
899
|
if conversation_commands == [ConversationCommand.Help]:
|
900
900
|
if not q:
|
901
|
-
|
902
|
-
if
|
903
|
-
|
904
|
-
model_type =
|
901
|
+
chat_model = await ConversationAdapters.aget_user_chat_model(user)
|
902
|
+
if chat_model == None:
|
903
|
+
chat_model = await ConversationAdapters.aget_default_chat_model(user)
|
904
|
+
model_type = chat_model.model_type
|
905
905
|
formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
|
906
906
|
async for result in send_llm_response(formatted_help, tracer.get("usage")):
|
907
907
|
yield result
|
khoj/routers/api_model.py
CHANGED
@@ -24,7 +24,7 @@ def get_chat_model_options(
|
|
24
24
|
|
25
25
|
all_conversation_options = list()
|
26
26
|
for conversation_option in conversation_options:
|
27
|
-
all_conversation_options.append({"chat_model": conversation_option.
|
27
|
+
all_conversation_options.append({"chat_model": conversation_option.name, "id": conversation_option.id})
|
28
28
|
|
29
29
|
return Response(content=json.dumps(all_conversation_options), media_type="application/json", status_code=200)
|
30
30
|
|
@@ -37,12 +37,12 @@ def get_user_chat_model(
|
|
37
37
|
):
|
38
38
|
user = request.user.object
|
39
39
|
|
40
|
-
chat_model = ConversationAdapters.
|
40
|
+
chat_model = ConversationAdapters.get_chat_model(user)
|
41
41
|
|
42
42
|
if chat_model is None:
|
43
|
-
chat_model = ConversationAdapters.
|
43
|
+
chat_model = ConversationAdapters.get_default_chat_model(user)
|
44
44
|
|
45
|
-
return Response(status_code=200, content=json.dumps({"id": chat_model.id, "chat_model": chat_model.
|
45
|
+
return Response(status_code=200, content=json.dumps({"id": chat_model.id, "chat_model": chat_model.name}))
|
46
46
|
|
47
47
|
|
48
48
|
@api_model.post("/chat", status_code=200)
|