mycode-sdk 0.7.5__tar.gz → 0.8.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/PKG-INFO +1 -1
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/pyproject.toml +1 -1
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/agent.py +55 -23
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/models_catalog.json +21 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/session.py +28 -50
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/.gitignore +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/LICENSE +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/README.md +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/__init__.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/hooks.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/messages.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/models.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/providers/__init__.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/providers/anthropic_like.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/providers/base.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/providers/gemini.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/providers/openai_chat.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/providers/openai_responses.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/py.typed +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/tools.py +0 -0
- {mycode_sdk-0.7.5 → mycode_sdk-0.8.0}/src/mycode/utils.py +0 -0
|
@@ -23,6 +23,7 @@ from mycode.messages import (
|
|
|
23
23
|
ConversationMessage,
|
|
24
24
|
build_message,
|
|
25
25
|
flatten_message_text,
|
|
26
|
+
text_block,
|
|
26
27
|
tool_result_block,
|
|
27
28
|
user_text_message,
|
|
28
29
|
)
|
|
@@ -30,10 +31,13 @@ from mycode.models import infer_provider_from_model, resolve_model_metadata
|
|
|
30
31
|
from mycode.providers import get_provider_adapter
|
|
31
32
|
from mycode.providers.base import ProviderAdapter, ProviderRequest, ProviderStreamEvent
|
|
32
33
|
from mycode.session import (
|
|
34
|
+
COMPACT_ACK,
|
|
33
35
|
COMPACT_SUMMARY_PROMPT,
|
|
36
|
+
CONTINUATION_FOOTER,
|
|
37
|
+
CONTINUATION_HEADER,
|
|
34
38
|
DEFAULT_COMPACT_THRESHOLD,
|
|
39
|
+
TRANSCRIPT_HINT,
|
|
35
40
|
SessionStore,
|
|
36
|
-
apply_compact,
|
|
37
41
|
build_compact_event,
|
|
38
42
|
should_compact,
|
|
39
43
|
)
|
|
@@ -500,7 +504,7 @@ class Agent:
|
|
|
500
504
|
provider=self.provider,
|
|
501
505
|
model=self.model,
|
|
502
506
|
session_id=self.session_id,
|
|
503
|
-
messages=self.messages,
|
|
507
|
+
messages=self._project_for_provider(self.messages),
|
|
504
508
|
system=self.system,
|
|
505
509
|
tools=self.tools.definitions,
|
|
506
510
|
max_tokens=self.max_tokens,
|
|
@@ -633,7 +637,12 @@ class Agent:
|
|
|
633
637
|
try:
|
|
634
638
|
async for event in self._compact(adapter, persist):
|
|
635
639
|
yield event
|
|
636
|
-
except
|
|
640
|
+
except asyncio.CancelledError:
|
|
641
|
+
yield Event("error", {"message": "cancelled"})
|
|
642
|
+
return
|
|
643
|
+
except Exception:
|
|
644
|
+
# Best-effort: transient failures retry next threshold check;
|
|
645
|
+
# persistent ones surface from phase 1 of the next turn.
|
|
637
646
|
logger.warning(
|
|
638
647
|
"Context compaction failed, continuing without compaction",
|
|
639
648
|
exc_info=True,
|
|
@@ -678,17 +687,52 @@ class Agent:
|
|
|
678
687
|
# Context compaction
|
|
679
688
|
# ------------------------------------------------------------------
|
|
680
689
|
|
|
690
|
+
def _project_for_provider(
|
|
691
|
+
self,
|
|
692
|
+
messages: list[ConversationMessage],
|
|
693
|
+
) -> list[ConversationMessage]:
|
|
694
|
+
"""Replace pre-compact history with a summary continuation."""
|
|
695
|
+
|
|
696
|
+
last_compact = -1
|
|
697
|
+
for i, message in enumerate(messages):
|
|
698
|
+
if message.get("role") == "compact":
|
|
699
|
+
last_compact = i
|
|
700
|
+
|
|
701
|
+
if last_compact < 0:
|
|
702
|
+
return messages
|
|
703
|
+
|
|
704
|
+
summary_text = ""
|
|
705
|
+
for block in messages[last_compact].get("content") or []:
|
|
706
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
707
|
+
summary_text = str(block.get("text") or "")
|
|
708
|
+
break
|
|
709
|
+
|
|
710
|
+
tail = [m for m in messages[last_compact + 1 :] if m.get("role") != "compact"]
|
|
711
|
+
# No tail or assistant-led tail = mid-loop; append a resume instruction
|
|
712
|
+
# and skip the ack. A user-led tail needs the ack to keep alternation.
|
|
713
|
+
continue_now = not tail or tail[0].get("role") == "assistant"
|
|
714
|
+
|
|
715
|
+
parts = [CONTINUATION_HEADER, summary_text]
|
|
716
|
+
if self._store and self.session_id:
|
|
717
|
+
parts.append(TRANSCRIPT_HINT.format(path=self._store.messages_path(self.session_id)))
|
|
718
|
+
if continue_now:
|
|
719
|
+
parts.append(CONTINUATION_FOOTER)
|
|
720
|
+
|
|
721
|
+
projected = [build_message("user", [text_block("\n\n".join(parts))])]
|
|
722
|
+
if not continue_now:
|
|
723
|
+
projected.append(build_message("assistant", [text_block(COMPACT_ACK)]))
|
|
724
|
+
projected.extend(tail)
|
|
725
|
+
return projected
|
|
726
|
+
|
|
681
727
|
async def _compact(
|
|
682
728
|
self,
|
|
683
729
|
adapter: ProviderAdapter,
|
|
684
730
|
persist: PersistCallback,
|
|
685
731
|
) -> AsyncIterator[Event]:
|
|
686
|
-
"""Generate a conversation summary and
|
|
687
|
-
|
|
688
|
-
compacted_count = len(self.messages)
|
|
732
|
+
"""Generate a conversation summary and append a compact marker."""
|
|
689
733
|
|
|
690
734
|
# Ask the same provider for a summary — no tools, just text generation.
|
|
691
|
-
compact_messages =
|
|
735
|
+
compact_messages = self._project_for_provider(self.messages) + [user_text_message(COMPACT_SUMMARY_PROMPT)]
|
|
692
736
|
request = ProviderRequest(
|
|
693
737
|
provider=self.provider,
|
|
694
738
|
model=self.model,
|
|
@@ -711,34 +755,22 @@ class Agent:
|
|
|
711
755
|
summary_message = msg
|
|
712
756
|
|
|
713
757
|
if not summary_message:
|
|
714
|
-
|
|
715
|
-
return
|
|
758
|
+
raise ValueError("compaction produced no response")
|
|
716
759
|
|
|
717
760
|
summary_text = flatten_message_text(summary_message, include_thinking=False)
|
|
718
761
|
if not summary_text:
|
|
719
|
-
|
|
720
|
-
return
|
|
762
|
+
raise ValueError("compaction produced empty summary")
|
|
721
763
|
|
|
722
764
|
summary_total_tokens = (summary_message.get("meta") or {}).get("total_tokens")
|
|
723
765
|
compact_event = build_compact_event(
|
|
724
766
|
summary_text,
|
|
725
767
|
provider=self.provider,
|
|
726
768
|
model=self.model,
|
|
727
|
-
compacted_count=compacted_count,
|
|
728
769
|
total_tokens=summary_total_tokens,
|
|
729
770
|
)
|
|
730
771
|
|
|
731
772
|
# Persist the compact event (append-only — original messages stay in JSONL).
|
|
732
773
|
await persist(compact_event)
|
|
733
|
-
|
|
734
|
-
# Rebuild in-memory messages from the compact event.
|
|
735
774
|
self.messages.append(compact_event)
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
yield Event(
|
|
739
|
-
"compact",
|
|
740
|
-
{
|
|
741
|
-
"message": f"Context compacted ({compacted_count} messages → summary)",
|
|
742
|
-
"compacted_count": compacted_count,
|
|
743
|
-
},
|
|
744
|
-
)
|
|
775
|
+
|
|
776
|
+
yield Event("compact", {})
|
|
@@ -794,6 +794,13 @@
|
|
|
794
794
|
"supports_pdf_input": true,
|
|
795
795
|
"supports_reasoning": true
|
|
796
796
|
},
|
|
797
|
+
"gpt-5.5-pro": {
|
|
798
|
+
"context_window": 1050000,
|
|
799
|
+
"max_output_tokens": 128000,
|
|
800
|
+
"supports_image_input": true,
|
|
801
|
+
"supports_pdf_input": true,
|
|
802
|
+
"supports_reasoning": true
|
|
803
|
+
},
|
|
797
804
|
"gpt-image-1": {
|
|
798
805
|
"context_window": 0,
|
|
799
806
|
"max_output_tokens": 0,
|
|
@@ -1545,6 +1552,13 @@
|
|
|
1545
1552
|
"supports_pdf_input": false,
|
|
1546
1553
|
"supports_reasoning": true
|
|
1547
1554
|
},
|
|
1555
|
+
"nvidia/nemotron-3-nano-omni-30b-a3b-reasoning:free": {
|
|
1556
|
+
"context_window": 256000,
|
|
1557
|
+
"max_output_tokens": 65536,
|
|
1558
|
+
"supports_image_input": true,
|
|
1559
|
+
"supports_pdf_input": false,
|
|
1560
|
+
"supports_reasoning": true
|
|
1561
|
+
},
|
|
1548
1562
|
"nvidia/nemotron-3-super-120b-a12b": {
|
|
1549
1563
|
"context_window": 262144,
|
|
1550
1564
|
"max_output_tokens": 262144,
|
|
@@ -1755,6 +1769,13 @@
|
|
|
1755
1769
|
"supports_pdf_input": true,
|
|
1756
1770
|
"supports_reasoning": true
|
|
1757
1771
|
},
|
|
1772
|
+
"openai/gpt-5.5-pro": {
|
|
1773
|
+
"context_window": 1050000,
|
|
1774
|
+
"max_output_tokens": 128000,
|
|
1775
|
+
"supports_image_input": true,
|
|
1776
|
+
"supports_pdf_input": true,
|
|
1777
|
+
"supports_reasoning": true
|
|
1778
|
+
},
|
|
1758
1779
|
"openai/gpt-oss-120b": {
|
|
1759
1780
|
"context_window": 131072,
|
|
1760
1781
|
"max_output_tokens": 32768,
|
|
@@ -24,7 +24,7 @@ from mycode.messages import ConversationMessage, build_message, flatten_message_
|
|
|
24
24
|
# Session format and compacting defaults
|
|
25
25
|
# ---------------------------------------------------------------------
|
|
26
26
|
|
|
27
|
-
MESSAGE_FORMAT_VERSION =
|
|
27
|
+
MESSAGE_FORMAT_VERSION = 7
|
|
28
28
|
DEFAULT_COMPACT_THRESHOLD = 0.8
|
|
29
29
|
DEFAULT_SESSION_TITLE = "New chat"
|
|
30
30
|
|
|
@@ -35,26 +35,38 @@ capture everything needed to continue the work seamlessly.
|
|
|
35
35
|
|
|
36
36
|
Include:
|
|
37
37
|
|
|
38
|
-
1. **
|
|
38
|
+
1. **Task and Intent**: Describe the user's overall goal — what is being \
|
|
39
|
+
built, fixed, or investigated, and why.
|
|
40
|
+
2. **Decisions and Constraints**: List the decisions made, constraints \
|
|
41
|
+
discovered, and approaches chosen or rejected, with the reasoning behind \
|
|
42
|
+
each.
|
|
43
|
+
3. **User Requests**: Every distinct request or instruction the user gave, \
|
|
39
44
|
in chronological order. Preserve the user's original wording for ambiguous \
|
|
40
45
|
or nuanced requests.
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
46
|
+
4. **Files and Changes**: Enumerate every file read, modified, or created \
|
|
47
|
+
— paths, what changed, and any code snippets the next turn will need to \
|
|
48
|
+
reason about, quoted verbatim.
|
|
49
|
+
5. **Errors and Fixes**: List errors encountered with the original message \
|
|
50
|
+
verbatim, the cause if known, and the resolution — or that it remains open.
|
|
51
|
+
6. **Current State**: What is verified working, what is known broken, what \
|
|
52
|
+
is in progress.
|
|
53
|
+
7. **Next Step**: The next step to take, with a direct quote from the most \
|
|
54
|
+
recent conversation showing where the work left off.
|
|
49
55
|
|
|
50
56
|
Rules:
|
|
51
|
-
- Be specific:
|
|
52
|
-
|
|
57
|
+
- Be specific: reproduce file paths, function names, error messages, and \
|
|
58
|
+
other identifiers verbatim — never paraphrase them.
|
|
53
59
|
- Do not add suggestions or opinions — only summarize what happened.
|
|
54
60
|
- Keep it concise but complete.\
|
|
55
61
|
"""
|
|
56
62
|
|
|
57
|
-
|
|
63
|
+
CONTINUATION_HEADER = "This session is being continued from a previous conversation that was compacted to fit the context window. The summary below covers the earlier portion of the conversation."
|
|
64
|
+
|
|
65
|
+
TRANSCRIPT_HINT = "For verbatim details not captured in this summary (exact code snippets, error messages, or earlier output), read the original conversation log at: {path}"
|
|
66
|
+
|
|
67
|
+
CONTINUATION_FOOTER = 'Resume directly from where the work left off. Do not acknowledge this summary, do not recap, and do not preface with "I\'ll continue" or similar.'
|
|
68
|
+
|
|
69
|
+
COMPACT_ACK = "Acknowledged."
|
|
58
70
|
|
|
59
71
|
|
|
60
72
|
# ---------------------------------------------------------------------
|
|
@@ -88,7 +100,6 @@ def build_compact_event(
|
|
|
88
100
|
*,
|
|
89
101
|
provider: str,
|
|
90
102
|
model: str,
|
|
91
|
-
compacted_count: int,
|
|
92
103
|
total_tokens: int | None = None,
|
|
93
104
|
) -> ConversationMessage:
|
|
94
105
|
"""Build the compact event stored in session JSONL."""
|
|
@@ -96,43 +107,12 @@ def build_compact_event(
|
|
|
96
107
|
meta: dict[str, Any] = {
|
|
97
108
|
"provider": provider,
|
|
98
109
|
"model": model,
|
|
99
|
-
"compacted_count": compacted_count,
|
|
100
110
|
}
|
|
101
111
|
if total_tokens is not None:
|
|
102
112
|
meta["total_tokens"] = total_tokens
|
|
103
113
|
return build_message("compact", [text_block(summary_text)], meta=meta)
|
|
104
114
|
|
|
105
115
|
|
|
106
|
-
def apply_compact(messages: list[ConversationMessage]) -> list[ConversationMessage]:
|
|
107
|
-
"""Replace the latest compact event with a summary + synthetic ack."""
|
|
108
|
-
|
|
109
|
-
# Only the newest compact event matters. Older history before it is no
|
|
110
|
-
# longer visible once the summary replaces that earlier conversation.
|
|
111
|
-
last_compact_index: int | None = None
|
|
112
|
-
for index, message in enumerate(messages):
|
|
113
|
-
if message.get("role") == "compact":
|
|
114
|
-
last_compact_index = index
|
|
115
|
-
|
|
116
|
-
if last_compact_index is None:
|
|
117
|
-
return messages
|
|
118
|
-
|
|
119
|
-
summary_text = ""
|
|
120
|
-
for block in messages[last_compact_index].get("content") or []:
|
|
121
|
-
if isinstance(block, dict) and block.get("type") == "text":
|
|
122
|
-
summary_text = str(block.get("text") or "")
|
|
123
|
-
break
|
|
124
|
-
|
|
125
|
-
return [
|
|
126
|
-
build_message(
|
|
127
|
-
"user",
|
|
128
|
-
[text_block(f"[Conversation Summary]\n\n{summary_text}")],
|
|
129
|
-
meta={"synthetic": True},
|
|
130
|
-
),
|
|
131
|
-
build_message("assistant", [text_block(_COMPACT_ACK)], meta={"synthetic": True}),
|
|
132
|
-
*messages[last_compact_index + 1 :],
|
|
133
|
-
]
|
|
134
|
-
|
|
135
|
-
|
|
136
116
|
def build_rewind_event(rewind_to: int) -> ConversationMessage:
|
|
137
117
|
"""Build a rewind marker to append to session JSONL."""
|
|
138
118
|
|
|
@@ -312,13 +292,11 @@ class SessionStore:
|
|
|
312
292
|
except FileNotFoundError:
|
|
313
293
|
pass
|
|
314
294
|
|
|
315
|
-
#
|
|
316
|
-
#
|
|
317
|
-
# 2) rewind truncates that visible list by message index
|
|
295
|
+
# Visible state = raw JSONL minus rewound tails. `compact` markers
|
|
296
|
+
# stay inline; the agent substitutes them when calling the provider.
|
|
318
297
|
# Orphan tool_use blocks (e.g. left open by a server crash) are
|
|
319
298
|
# closed by the provider adapter at replay time, not here.
|
|
320
|
-
visible_messages =
|
|
321
|
-
visible_messages = apply_rewind(visible_messages)
|
|
299
|
+
visible_messages = apply_rewind(raw_messages)
|
|
322
300
|
|
|
323
301
|
return {"session": self._summary(session_id, meta), "messages": visible_messages}
|
|
324
302
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|