khoj 1.36.7.dev18__py3-none-any.whl → 1.36.7.dev22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- khoj/interface/compiled/404/index.html +2 -2
- khoj/interface/compiled/_next/static/chunks/{2327-02e86a50c65e575a.js → 2327-36d17f2483e80f60.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/{8155-ad130153ddcc930f.js → 8155-87b4d2ea2cf725cc.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/agents/{layout-4a0e32561d6b1e27.js → layout-447b58869479276c.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/chat/{layout-9e151fb837f53026.js → layout-4d0b1ba93124fccb.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/chat/{page-24bf7a5c917dbaff.js → page-4108f46796c1c606.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/{layout-26139159e500852a.js → layout-6dba801826c4fe59.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/{page-528e96d17e520304.js → page-f91e6a6a849baf5e.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/search/{layout-66f736b858b38c2c.js → layout-ab5dbb69fb914900.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-975934b12916f3d4.js → page-ffcb3ce5c6af9988.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/{webpack-32b7cc428f0d015b.js → webpack-f83765a7accac982.js} +1 -1
- khoj/interface/compiled/_next/static/css/7889a30fe9c83846.css +1 -0
- khoj/interface/compiled/_next/static/css/8051073dc55b92b3.css +1 -0
- khoj/interface/compiled/_next/static/css/b15666ef52060cd0.css +1 -0
- khoj/interface/compiled/_next/static/media/2aa11a72f7f24b58-s.woff2 +0 -0
- khoj/interface/compiled/_next/static/media/383a65b63658737d-s.woff2 +0 -0
- khoj/interface/compiled/_next/static/media/40381518f67e6cb9-s.p.woff2 +0 -0
- khoj/interface/compiled/_next/static/media/85fe2766c5e6072a-s.woff2 +0 -0
- khoj/interface/compiled/_next/static/media/8a6e4d7cd15e805a-s.woff2 +0 -0
- khoj/interface/compiled/agents/index.html +3 -3
- khoj/interface/compiled/agents/index.txt +3 -3
- khoj/interface/compiled/automations/index.html +2 -2
- khoj/interface/compiled/automations/index.txt +4 -4
- khoj/interface/compiled/chat/index.html +3 -3
- khoj/interface/compiled/chat/index.txt +3 -3
- khoj/interface/compiled/index.html +2 -2
- khoj/interface/compiled/index.txt +3 -3
- khoj/interface/compiled/search/index.html +2 -2
- khoj/interface/compiled/search/index.txt +3 -3
- khoj/interface/compiled/settings/index.html +3 -3
- khoj/interface/compiled/settings/index.txt +5 -5
- khoj/interface/compiled/share/chat/index.html +3 -3
- khoj/interface/compiled/share/chat/index.txt +2 -2
- khoj/processor/conversation/anthropic/anthropic_chat.py +6 -3
- khoj/processor/conversation/anthropic/utils.py +48 -13
- khoj/processor/conversation/google/gemini_chat.py +7 -7
- khoj/processor/conversation/google/utils.py +63 -63
- khoj/processor/conversation/utils.py +6 -0
- khoj/routers/helpers.py +5 -0
- khoj/routers/research.py +1 -0
- khoj/utils/constants.py +4 -1
- khoj/utils/helpers.py +1 -1
- {khoj-1.36.7.dev18.dist-info → khoj-1.36.7.dev22.dist-info}/METADATA +6 -6
- {khoj-1.36.7.dev18.dist-info → khoj-1.36.7.dev22.dist-info}/RECORD +55 -51
- khoj/interface/compiled/_next/static/css/017016e6eff88fdc.css +0 -1
- khoj/interface/compiled/_next/static/css/089de1d8526b96e9.css +0 -1
- khoj/interface/compiled/_next/static/css/440ae0f0f650dc35.css +0 -1
- khoj/interface/compiled/_next/static/media/e098aaaecc9cfbb2-s.p.woff2 +0 -0
- /khoj/interface/compiled/_next/static/chunks/{1915-4b7980a58fb630d6.js → 1915-233ac8a122732d6b.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{2117-f99825f0a867a42d.js → 2117-ce1f0a4598f5e4fe.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{4363-ac51bce40b6fc313.js → 4363-9870bda67c2cf031.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{4447-30959771ff58d99d.js → 4447-6e47461d1100c3cc.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{8667-adbe6017a66cef10.js → 8667-8136f74e9a086fca.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{9259-5be50737cfe989bc.js → 9259-fa40e7cf2ca28e04.js} +0 -0
- /khoj/interface/compiled/_next/static/{SaAC14w_LNITF1YZanO67 → w25ObnntxL_4D4MY2j-Yc}/_buildManifest.js +0 -0
- /khoj/interface/compiled/_next/static/{SaAC14w_LNITF1YZanO67 → w25ObnntxL_4D4MY2j-Yc}/_ssgManifest.js +0 -0
- {khoj-1.36.7.dev18.dist-info → khoj-1.36.7.dev22.dist-info}/WHEEL +0 -0
- {khoj-1.36.7.dev18.dist-info → khoj-1.36.7.dev22.dist-info}/entry_points.txt +0 -0
- {khoj-1.36.7.dev18.dist-info → khoj-1.36.7.dev22.dist-info}/licenses/LICENSE +0 -0
@@ -34,7 +34,7 @@ def extract_questions_gemini(
|
|
34
34
|
model: Optional[str] = "gemini-2.0-flash",
|
35
35
|
conversation_log={},
|
36
36
|
api_key=None,
|
37
|
-
temperature=0.
|
37
|
+
temperature=0.6,
|
38
38
|
max_tokens=None,
|
39
39
|
location_data: LocationData = None,
|
40
40
|
user: KhojUser = None,
|
@@ -121,14 +121,14 @@ def gemini_send_message_to_model(
|
|
121
121
|
api_key,
|
122
122
|
model,
|
123
123
|
response_type="text",
|
124
|
-
temperature=0.
|
124
|
+
temperature=0.6,
|
125
125
|
model_kwargs=None,
|
126
126
|
tracer={},
|
127
127
|
):
|
128
128
|
"""
|
129
129
|
Send message to model
|
130
130
|
"""
|
131
|
-
|
131
|
+
messages_for_gemini, system_prompt = format_messages_for_gemini(messages)
|
132
132
|
|
133
133
|
model_kwargs = {}
|
134
134
|
|
@@ -138,7 +138,7 @@ def gemini_send_message_to_model(
|
|
138
138
|
|
139
139
|
# Get Response from Gemini
|
140
140
|
return gemini_completion_with_backoff(
|
141
|
-
messages=
|
141
|
+
messages=messages_for_gemini,
|
142
142
|
system_prompt=system_prompt,
|
143
143
|
model_name=model,
|
144
144
|
api_key=api_key,
|
@@ -156,7 +156,7 @@ def converse_gemini(
|
|
156
156
|
conversation_log={},
|
157
157
|
model: Optional[str] = "gemini-2.0-flash",
|
158
158
|
api_key: Optional[str] = None,
|
159
|
-
temperature: float = 0.
|
159
|
+
temperature: float = 0.6,
|
160
160
|
completion_func=None,
|
161
161
|
conversation_commands=[ConversationCommand.Default],
|
162
162
|
max_prompt_size=None,
|
@@ -236,12 +236,12 @@ def converse_gemini(
|
|
236
236
|
program_execution_context=program_execution_context,
|
237
237
|
)
|
238
238
|
|
239
|
-
|
239
|
+
messages_for_gemini, system_prompt = format_messages_for_gemini(messages, system_prompt)
|
240
240
|
logger.debug(f"Conversation Context for Gemini: {messages_to_print(messages)}")
|
241
241
|
|
242
242
|
# Get Response from Google AI
|
243
243
|
return gemini_chat_completion_with_backoff(
|
244
|
-
messages=
|
244
|
+
messages=messages_for_gemini,
|
245
245
|
compiled_references=references,
|
246
246
|
online_results=online_results,
|
247
247
|
model_name=model,
|
@@ -1,15 +1,11 @@
|
|
1
1
|
import logging
|
2
2
|
import random
|
3
|
+
from copy import deepcopy
|
3
4
|
from threading import Thread
|
4
5
|
|
5
|
-
|
6
|
-
from google.
|
6
|
+
from google import genai
|
7
|
+
from google.genai import types as gtypes
|
7
8
|
from google.generativeai.types.generation_types import StopCandidateException
|
8
|
-
from google.generativeai.types.safety_types import (
|
9
|
-
HarmBlockThreshold,
|
10
|
-
HarmCategory,
|
11
|
-
HarmProbability,
|
12
|
-
)
|
13
9
|
from langchain.schema import ChatMessage
|
14
10
|
from tenacity import (
|
15
11
|
before_sleep_log,
|
@@ -24,7 +20,6 @@ from khoj.processor.conversation.utils import (
|
|
24
20
|
commit_conversation_trace,
|
25
21
|
get_image_from_url,
|
26
22
|
)
|
27
|
-
from khoj.utils import state
|
28
23
|
from khoj.utils.helpers import (
|
29
24
|
get_chat_usage_metrics,
|
30
25
|
is_none_or_empty,
|
@@ -35,6 +30,24 @@ logger = logging.getLogger(__name__)
|
|
35
30
|
|
36
31
|
|
37
32
|
MAX_OUTPUT_TOKENS_GEMINI = 8192
|
33
|
+
SAFETY_SETTINGS = [
|
34
|
+
gtypes.SafetySetting(
|
35
|
+
category=gtypes.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
36
|
+
threshold=gtypes.HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
37
|
+
),
|
38
|
+
gtypes.SafetySetting(
|
39
|
+
category=gtypes.HarmCategory.HARM_CATEGORY_HARASSMENT,
|
40
|
+
threshold=gtypes.HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
41
|
+
),
|
42
|
+
gtypes.SafetySetting(
|
43
|
+
category=gtypes.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
44
|
+
threshold=gtypes.HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
45
|
+
),
|
46
|
+
gtypes.SafetySetting(
|
47
|
+
category=gtypes.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
48
|
+
threshold=gtypes.HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
49
|
+
),
|
50
|
+
]
|
38
51
|
|
39
52
|
|
40
53
|
@retry(
|
@@ -46,30 +59,19 @@ MAX_OUTPUT_TOKENS_GEMINI = 8192
|
|
46
59
|
def gemini_completion_with_backoff(
|
47
60
|
messages, system_prompt, model_name, temperature=0, api_key=None, model_kwargs=None, tracer={}
|
48
61
|
) -> str:
|
49
|
-
genai.
|
50
|
-
|
51
|
-
model_kwargs["temperature"] = temperature
|
52
|
-
model_kwargs["max_output_tokens"] = MAX_OUTPUT_TOKENS_GEMINI
|
53
|
-
model = genai.GenerativeModel(
|
54
|
-
model_name,
|
55
|
-
generation_config=model_kwargs,
|
62
|
+
client = genai.Client(api_key=api_key)
|
63
|
+
config = gtypes.GenerateContentConfig(
|
56
64
|
system_instruction=system_prompt,
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
61
|
-
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
62
|
-
},
|
65
|
+
temperature=temperature,
|
66
|
+
max_output_tokens=MAX_OUTPUT_TOKENS_GEMINI,
|
67
|
+
safety_settings=SAFETY_SETTINGS,
|
63
68
|
)
|
64
69
|
|
65
|
-
formatted_messages = [
|
66
|
-
|
67
|
-
# Start chat session. All messages up to the last are considered to be part of the chat history
|
68
|
-
chat_session = model.start_chat(history=formatted_messages[0:-1])
|
70
|
+
formatted_messages = [gtypes.Content(role=message.role, parts=message.content) for message in messages]
|
69
71
|
|
70
72
|
try:
|
71
|
-
# Generate the response
|
72
|
-
response =
|
73
|
+
# Generate the response
|
74
|
+
response = client.models.generate_content(model=model_name, config=config, contents=formatted_messages)
|
73
75
|
response_text = response.text
|
74
76
|
except StopCandidateException as e:
|
75
77
|
response = None
|
@@ -125,30 +127,21 @@ def gemini_llm_thread(
|
|
125
127
|
g, messages, system_prompt, model_name, temperature, api_key, model_kwargs=None, tracer: dict = {}
|
126
128
|
):
|
127
129
|
try:
|
128
|
-
genai.
|
129
|
-
|
130
|
-
model_kwargs["temperature"] = temperature
|
131
|
-
model_kwargs["max_output_tokens"] = MAX_OUTPUT_TOKENS_GEMINI
|
132
|
-
model_kwargs["stop_sequences"] = ["Notes:\n["]
|
133
|
-
model = genai.GenerativeModel(
|
134
|
-
model_name,
|
135
|
-
generation_config=model_kwargs,
|
130
|
+
client = genai.Client(api_key=api_key)
|
131
|
+
config = gtypes.GenerateContentConfig(
|
136
132
|
system_instruction=system_prompt,
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
142
|
-
},
|
133
|
+
temperature=temperature,
|
134
|
+
max_output_tokens=MAX_OUTPUT_TOKENS_GEMINI,
|
135
|
+
stop_sequences=["Notes:\n["],
|
136
|
+
safety_settings=SAFETY_SETTINGS,
|
143
137
|
)
|
144
138
|
|
145
139
|
aggregated_response = ""
|
146
|
-
formatted_messages = [
|
140
|
+
formatted_messages = [gtypes.Content(role=message.role, parts=message.content) for message in messages]
|
147
141
|
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
for chunk in chat_session.send_message(formatted_messages[-1]["parts"], stream=True):
|
142
|
+
for chunk in client.models.generate_content_stream(
|
143
|
+
model=model_name, config=config, contents=formatted_messages
|
144
|
+
):
|
152
145
|
message, stopped = handle_gemini_response(chunk.candidates, chunk.prompt_feedback)
|
153
146
|
message = message or chunk.text
|
154
147
|
aggregated_response += message
|
@@ -177,14 +170,16 @@ def gemini_llm_thread(
|
|
177
170
|
g.close()
|
178
171
|
|
179
172
|
|
180
|
-
def handle_gemini_response(
|
173
|
+
def handle_gemini_response(
|
174
|
+
candidates: list[gtypes.Candidate], prompt_feedback: gtypes.GenerateContentResponsePromptFeedback = None
|
175
|
+
):
|
181
176
|
"""Check if Gemini response was blocked and return an explanatory error message."""
|
182
177
|
# Check if the response was blocked due to safety concerns with the prompt
|
183
178
|
if len(candidates) == 0 and prompt_feedback:
|
184
179
|
message = f"\nI'd prefer to not respond to that due to **{prompt_feedback.block_reason.name}** issues with your query."
|
185
180
|
stopped = True
|
186
181
|
# Check if the response was blocked due to safety concerns with the generated content
|
187
|
-
elif candidates[0].finish_reason == FinishReason.SAFETY:
|
182
|
+
elif candidates[0].finish_reason == gtypes.FinishReason.SAFETY:
|
188
183
|
message = generate_safety_response(candidates[0].safety_ratings)
|
189
184
|
stopped = True
|
190
185
|
# Check if finish reason is empty, therefore generation is in progress
|
@@ -192,7 +187,7 @@ def handle_gemini_response(candidates, prompt_feedback=None):
|
|
192
187
|
message = None
|
193
188
|
stopped = False
|
194
189
|
# Check if the response was stopped due to reaching maximum token limit or other reasons
|
195
|
-
elif candidates[0].finish_reason != FinishReason.STOP:
|
190
|
+
elif candidates[0].finish_reason != gtypes.FinishReason.STOP:
|
196
191
|
message = f"\nI can't talk further about that because of **{candidates[0].finish_reason.name} issue.**"
|
197
192
|
stopped = True
|
198
193
|
# Otherwise, the response is valid and can be used
|
@@ -202,18 +197,18 @@ def handle_gemini_response(candidates, prompt_feedback=None):
|
|
202
197
|
return message, stopped
|
203
198
|
|
204
199
|
|
205
|
-
def generate_safety_response(safety_ratings):
|
200
|
+
def generate_safety_response(safety_ratings: list[gtypes.SafetyRating]):
|
206
201
|
"""Generate a conversational response based on the safety ratings of the response."""
|
207
202
|
# Get the safety rating with the highest probability
|
208
|
-
max_safety_rating = sorted(safety_ratings, key=lambda x: x.probability, reverse=True)[0]
|
203
|
+
max_safety_rating: gtypes.SafetyRating = sorted(safety_ratings, key=lambda x: x.probability, reverse=True)[0]
|
209
204
|
# Remove the "HARM_CATEGORY_" prefix and title case the category name
|
210
205
|
max_safety_category = " ".join(max_safety_rating.category.name.split("_")[2:]).title()
|
211
206
|
# Add a bit of variety to the discomfort level based on the safety rating probability
|
212
207
|
discomfort_level = {
|
213
|
-
HarmProbability.HARM_PROBABILITY_UNSPECIFIED: " ",
|
214
|
-
HarmProbability.LOW: "a bit ",
|
215
|
-
HarmProbability.MEDIUM: "moderately ",
|
216
|
-
HarmProbability.HIGH: random.choice(["very ", "quite ", "fairly "]),
|
208
|
+
gtypes.HarmProbability.HARM_PROBABILITY_UNSPECIFIED: " ",
|
209
|
+
gtypes.HarmProbability.LOW: "a bit ",
|
210
|
+
gtypes.HarmProbability.MEDIUM: "moderately ",
|
211
|
+
gtypes.HarmProbability.HIGH: random.choice(["very ", "quite ", "fairly "]),
|
217
212
|
}[max_safety_rating.probability]
|
218
213
|
# Generate a response using a random response template
|
219
214
|
safety_response_choice = random.choice(
|
@@ -229,9 +224,12 @@ def generate_safety_response(safety_ratings):
|
|
229
224
|
)
|
230
225
|
|
231
226
|
|
232
|
-
def format_messages_for_gemini(
|
227
|
+
def format_messages_for_gemini(
|
228
|
+
original_messages: list[ChatMessage], system_prompt: str = None
|
229
|
+
) -> tuple[list[str], str]:
|
233
230
|
# Extract system message
|
234
231
|
system_prompt = system_prompt or ""
|
232
|
+
messages = deepcopy(original_messages)
|
235
233
|
for message in messages.copy():
|
236
234
|
if message.role == "system":
|
237
235
|
system_prompt += message.content
|
@@ -242,14 +240,16 @@ def format_messages_for_gemini(messages: list[ChatMessage], system_prompt: str =
|
|
242
240
|
# Convert message content to string list from chatml dictionary list
|
243
241
|
if isinstance(message.content, list):
|
244
242
|
# Convert image_urls to PIL.Image and place them at beginning of list (better for Gemini)
|
245
|
-
|
246
|
-
|
247
|
-
if item["type"] == "image_url"
|
248
|
-
|
249
|
-
|
250
|
-
|
243
|
+
message_content = []
|
244
|
+
for item in sorted(message.content, key=lambda x: 0 if x["type"] == "image_url" else 1):
|
245
|
+
if item["type"] == "image_url":
|
246
|
+
image = get_image_from_url(item["image_url"]["url"], type="bytes")
|
247
|
+
message_content += [gtypes.Part.from_bytes(data=image.content, mime_type=image.type)]
|
248
|
+
else:
|
249
|
+
message_content += [gtypes.Part.from_text(text=item.get("text", ""))]
|
250
|
+
message.content = message_content
|
251
251
|
elif isinstance(message.content, str):
|
252
|
-
message.content = [message.content]
|
252
|
+
message.content = [gtypes.Part.from_text(text=message.content)]
|
253
253
|
|
254
254
|
if message.role == "assistant":
|
255
255
|
message.role = "model"
|
@@ -61,6 +61,9 @@ model_to_prompt_size = {
|
|
61
61
|
"gemini-1.5-pro": 60000,
|
62
62
|
# Anthropic Models
|
63
63
|
"claude-3-5-sonnet-20241022": 60000,
|
64
|
+
"claude-3-5-sonnet-latest": 60000,
|
65
|
+
"claude-3-7-sonnet-20250219": 60000,
|
66
|
+
"claude-3-7-sonnet-latest": 60000,
|
64
67
|
"claude-3-5-haiku-20241022": 60000,
|
65
68
|
# Offline Models
|
66
69
|
"bartowski/Qwen2.5-14B-Instruct-GGUF": 20000,
|
@@ -670,10 +673,13 @@ def get_image_from_url(image_url: str, type="pil"):
|
|
670
673
|
content_type = response.headers.get("content-type") or mimetypes.guess_type(image_url)[0] or "image/webp"
|
671
674
|
|
672
675
|
# Convert image to desired format
|
676
|
+
image_data: Any = None
|
673
677
|
if type == "b64":
|
674
678
|
image_data = base64.b64encode(response.content).decode("utf-8")
|
675
679
|
elif type == "pil":
|
676
680
|
image_data = PIL.Image.open(BytesIO(response.content))
|
681
|
+
elif type == "bytes":
|
682
|
+
image_data = response.content
|
677
683
|
else:
|
678
684
|
raise ValueError(f"Invalid image type: {type}")
|
679
685
|
|
khoj/routers/helpers.py
CHANGED
@@ -1125,6 +1125,7 @@ async def send_message_to_model_wrapper(
|
|
1125
1125
|
query: str,
|
1126
1126
|
system_message: str = "",
|
1127
1127
|
response_type: str = "text",
|
1128
|
+
deepthought: bool = False,
|
1128
1129
|
user: KhojUser = None,
|
1129
1130
|
query_images: List[str] = None,
|
1130
1131
|
context: str = "",
|
@@ -1227,6 +1228,7 @@ async def send_message_to_model_wrapper(
|
|
1227
1228
|
api_key=api_key,
|
1228
1229
|
model=chat_model_name,
|
1229
1230
|
response_type=response_type,
|
1231
|
+
deepthought=deepthought,
|
1230
1232
|
tracer=tracer,
|
1231
1233
|
)
|
1232
1234
|
elif model_type == ChatModel.ModelType.GOOGLE:
|
@@ -1425,11 +1427,13 @@ def generate_chat_response(
|
|
1425
1427
|
)
|
1426
1428
|
|
1427
1429
|
query_to_run = q
|
1430
|
+
deepthought = False
|
1428
1431
|
if meta_research:
|
1429
1432
|
query_to_run = f"<query>{q}</query>\n<collected_research>\n{meta_research}\n</collected_research>"
|
1430
1433
|
compiled_references = []
|
1431
1434
|
online_results = {}
|
1432
1435
|
code_results = {}
|
1436
|
+
deepthought = True
|
1433
1437
|
|
1434
1438
|
chat_model = ConversationAdapters.get_valid_chat_model(user, conversation, is_subscribed)
|
1435
1439
|
vision_available = chat_model.vision_enabled
|
@@ -1513,6 +1517,7 @@ def generate_chat_response(
|
|
1513
1517
|
generated_files=raw_generated_files,
|
1514
1518
|
generated_asset_results=generated_asset_results,
|
1515
1519
|
program_execution_context=program_execution_context,
|
1520
|
+
deepthought=deepthought,
|
1516
1521
|
tracer=tracer,
|
1517
1522
|
)
|
1518
1523
|
elif chat_model.model_type == ChatModel.ModelType.GOOGLE:
|
khoj/routers/research.py
CHANGED
khoj/utils/constants.py
CHANGED
@@ -48,6 +48,9 @@ model_to_cost: Dict[str, Dict[str, float]] = {
|
|
48
48
|
"gemini-1.5-pro-002": {"input": 1.25, "output": 5.00},
|
49
49
|
"gemini-2.0-flash": {"input": 0.10, "output": 0.40},
|
50
50
|
# Anthropic Pricing: https://www.anthropic.com/pricing#anthropic-api_
|
51
|
-
"claude-3-5-sonnet-20241022": {"input": 3.0, "output": 15.0},
|
52
51
|
"claude-3-5-haiku-20241022": {"input": 1.0, "output": 5.0},
|
52
|
+
"claude-3-5-sonnet-20241022": {"input": 3.0, "output": 15.0},
|
53
|
+
"claude-3-5-sonnet-latest": {"input": 3.0, "output": 15.0},
|
54
|
+
"claude-3-7-sonnet-20250219": {"input": 3.0, "output": 15.0},
|
55
|
+
"claude-3-7-sonnet-latest": {"input": 3.0, "output": 15.0},
|
53
56
|
}
|
khoj/utils/helpers.py
CHANGED
@@ -368,7 +368,7 @@ command_descriptions_for_agent = {
|
|
368
368
|
ConversationCommand.Code: "Agent can run Python code to parse information, run complex calculations, create documents and charts.",
|
369
369
|
}
|
370
370
|
|
371
|
-
e2b_tool_description = "To run Python code in a E2B sandbox with no network access. Helpful to parse complex information, run calculations, create text documents and create charts with quantitative data. Only matplotlib, pandas, numpy, scipy, bs4, sympy, einops, biopython, shapely and rdkit external packages are available."
|
371
|
+
e2b_tool_description = "To run Python code in a E2B sandbox with no network access. Helpful to parse complex information, run calculations, create text documents and create charts with quantitative data. Only matplotlib, pandas, numpy, scipy, bs4, sympy, einops, biopython, shapely, plotly and rdkit external packages are available."
|
372
372
|
terrarium_tool_description = "To run Python code in a Terrarium, Pyodide sandbox with no network access. Helpful to parse complex information, run complex calculations, create plaintext documents and create charts with quantitative data. Only matplotlib, panda, numpy, scipy, bs4 and sympy external packages are available."
|
373
373
|
|
374
374
|
tool_descriptions_for_llm = {
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: khoj
|
3
|
-
Version: 1.36.7.
|
3
|
+
Version: 1.36.7.dev22
|
4
4
|
Summary: Your Second Brain
|
5
5
|
Project-URL: Homepage, https://khoj.dev
|
6
6
|
Project-URL: Documentation, https://docs.khoj.dev
|
@@ -22,8 +22,8 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
22
|
Classifier: Topic :: Scientific/Engineering :: Human Machine Interfaces
|
23
23
|
Requires-Python: >=3.10
|
24
24
|
Requires-Dist: aiohttp~=3.9.0
|
25
|
-
Requires-Dist: anthropic==0.
|
26
|
-
Requires-Dist: anyio
|
25
|
+
Requires-Dist: anthropic==0.49.0
|
26
|
+
Requires-Dist: anyio~=4.8.0
|
27
27
|
Requires-Dist: apscheduler~=3.10.0
|
28
28
|
Requires-Dist: authlib==1.2.1
|
29
29
|
Requires-Dist: beautifulsoup4~=4.12.3
|
@@ -39,8 +39,8 @@ Requires-Dist: e2b-code-interpreter~=1.0.0
|
|
39
39
|
Requires-Dist: einops==0.8.0
|
40
40
|
Requires-Dist: email-validator==2.2.0
|
41
41
|
Requires-Dist: fastapi>=0.110.0
|
42
|
-
Requires-Dist: google-
|
43
|
-
Requires-Dist: httpx==0.
|
42
|
+
Requires-Dist: google-genai==1.5.0
|
43
|
+
Requires-Dist: httpx==0.28.1
|
44
44
|
Requires-Dist: huggingface-hub>=0.22.2
|
45
45
|
Requires-Dist: itsdangerous==2.1.2
|
46
46
|
Requires-Dist: jinja2==3.1.5
|
@@ -77,7 +77,7 @@ Requires-Dist: torch==2.2.2
|
|
77
77
|
Requires-Dist: transformers>=4.28.0
|
78
78
|
Requires-Dist: tzdata==2023.3
|
79
79
|
Requires-Dist: uvicorn==0.30.6
|
80
|
-
Requires-Dist: websockets==
|
80
|
+
Requires-Dist: websockets==13.0
|
81
81
|
Provides-Extra: dev
|
82
82
|
Requires-Dist: black>=23.1.0; extra == 'dev'
|
83
83
|
Requires-Dist: boto3>=1.34.57; extra == 'dev'
|