@1mancompany/onemancompany 0.7.67 → 0.7.69
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
package/pyproject.toml
CHANGED
|
@@ -221,6 +221,11 @@ def make_llm(employee_id: str = "", temperature: float | None = None) -> BaseCha
|
|
|
221
221
|
base_url = settings.openrouter_base_url
|
|
222
222
|
elif api_provider == "custom" or (settings.default_api_base_url and api_provider == settings.default_api_provider):
|
|
223
223
|
base_url = settings.default_api_base_url
|
|
224
|
+
extra_body = None
|
|
225
|
+
if (api_provider or "").lower() == "deepseek":
|
|
226
|
+
# DeepSeek V4 thinking mode currently requires reasoning_content
|
|
227
|
+
# replay across tool calls, which LangChain does not preserve.
|
|
228
|
+
extra_body = {"thinking": {"type": "disabled"}}
|
|
224
229
|
return ChatOpenAI(
|
|
225
230
|
model=model,
|
|
226
231
|
api_key=effective_key,
|
|
@@ -229,6 +234,7 @@ def make_llm(employee_id: str = "", temperature: float | None = None) -> BaseCha
|
|
|
229
234
|
max_retries=3,
|
|
230
235
|
request_timeout=300.0,
|
|
231
236
|
stream_usage=True,
|
|
237
|
+
extra_body=extra_body,
|
|
232
238
|
)
|
|
233
239
|
|
|
234
240
|
# --- Fallback: unknown provider or no key → fall back to openrouter with default model ---
|
|
@@ -958,6 +964,17 @@ class BaseAgentRunner:
|
|
|
958
964
|
now = datetime.now()
|
|
959
965
|
parts.append(f"- Current time: {now.strftime('%Y-%m-%d %H:%M')}")
|
|
960
966
|
|
|
967
|
+
# Runtime model identity. This helps model-agnostic providers answer
|
|
968
|
+
# direct CEO questions about their configured runtime without guessing.
|
|
969
|
+
cfg = employee_configs.get(self.employee_id)
|
|
970
|
+
provider = (cfg.api_provider if cfg and cfg.api_provider else _cfg.settings.default_api_provider) or "unknown"
|
|
971
|
+
model = (cfg.llm_model if cfg and cfg.llm_model else _cfg.settings.default_llm_model) or "unknown"
|
|
972
|
+
parts.append(f"- Runtime LLM: provider={provider}, model={model}")
|
|
973
|
+
parts.append(
|
|
974
|
+
"- If the CEO asks what model/provider you are, answer using Runtime LLM above. "
|
|
975
|
+
"Do not infer or claim a different vendor from your role, tools, or framework."
|
|
976
|
+
)
|
|
977
|
+
|
|
961
978
|
# Team roster summary (compact)
|
|
962
979
|
from onemancompany.core.store import load_all_employees
|
|
963
980
|
all_emps = load_all_employees()
|
|
@@ -1243,4 +1260,3 @@ class EmployeeAgent(BaseAgentRunner):
|
|
|
1243
1260
|
self._set_status(STATUS_IDLE)
|
|
1244
1261
|
await self._publish("agent_done", {"role": self.role, "summary": final[:MAX_SUMMARY_LEN]})
|
|
1245
1262
|
return final
|
|
1246
|
-
|
|
@@ -102,10 +102,18 @@ def _build_conversation_prompt(
|
|
|
102
102
|
conversation: Conversation, messages: list[Message], new_message: Message,
|
|
103
103
|
) -> str:
|
|
104
104
|
"""Build a prompt with conversation history for the executor."""
|
|
105
|
-
from onemancompany.core.config import get_workspace_dir
|
|
105
|
+
from onemancompany.core.config import employee_configs, get_workspace_dir, settings
|
|
106
106
|
|
|
107
107
|
lines = []
|
|
108
108
|
lines.append("You are in a conversation with the CEO.")
|
|
109
|
+
cfg = employee_configs.get(conversation.employee_id)
|
|
110
|
+
provider = (cfg.api_provider if cfg and cfg.api_provider else settings.default_api_provider) or "unknown"
|
|
111
|
+
model = (cfg.llm_model if cfg and cfg.llm_model else settings.default_llm_model) or "unknown"
|
|
112
|
+
lines.append(f"Runtime LLM: provider={provider}, model={model}.")
|
|
113
|
+
lines.append(
|
|
114
|
+
"If the CEO asks what model/provider you are, answer from Runtime LLM above. "
|
|
115
|
+
"Do not infer or claim a different vendor from your role, tools, or framework."
|
|
116
|
+
)
|
|
109
117
|
if conversation.type == ConversationType.ONE_ON_ONE:
|
|
110
118
|
lines.append("This is a 1-on-1 meeting. Be direct and professional.")
|
|
111
119
|
workspace_dir = get_workspace_dir(conversation.employee_id).resolve()
|
|
@@ -96,7 +96,18 @@ def _load_llm(profile: dict):
|
|
|
96
96
|
base_url = prov[0]
|
|
97
97
|
if provider_name == "openrouter":
|
|
98
98
|
base_url = os.environ.get("OPENROUTER_BASE_URL", base_url)
|
|
99
|
-
|
|
99
|
+
extra_body = None
|
|
100
|
+
if (provider_name or "").lower() == "deepseek":
|
|
101
|
+
# DeepSeek V4 thinking mode requires reasoning_content replay
|
|
102
|
+
# across tool calls; LangChain does not preserve it yet.
|
|
103
|
+
extra_body = {{"thinking": {{"type": "disabled"}}}}
|
|
104
|
+
return ChatOpenAI(
|
|
105
|
+
model=model,
|
|
106
|
+
api_key=key,
|
|
107
|
+
base_url=base_url,
|
|
108
|
+
temperature=temperature,
|
|
109
|
+
extra_body=extra_body,
|
|
110
|
+
)
|
|
100
111
|
|
|
101
112
|
# Unknown provider — fall back to OpenRouter
|
|
102
113
|
key = api_key or os.environ.get("OPENROUTER_API_KEY", "")
|