aient 1.0.66__py3-none-any.whl → 1.0.68__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/request.py CHANGED
@@ -604,6 +604,253 @@ async def get_vertex_claude_payload(request, engine, provider, api_key=None):
604
604
 
605
605
  return url, headers, payload
606
606
 
607
+ import hashlib
608
+ import hmac
609
+ import datetime
610
+ import urllib.parse
611
+ from datetime import timezone
612
+
613
+ def sign(key, msg):
614
+ return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
615
+
616
+ def get_signature_key(key, date_stamp, region_name, service_name):
617
+ k_date = sign(('AWS4' + key).encode('utf-8'), date_stamp)
618
+ k_region = sign(k_date, region_name)
619
+ k_service = sign(k_region, service_name)
620
+ k_signing = sign(k_service, 'aws4_request')
621
+ return k_signing
622
+
623
+ def get_signature(request_body, model_id, aws_access_key, aws_secret_key, aws_region, host, content_type, accept_header):
624
+ request_body = json.dumps(request_body)
625
+ SERVICE = "bedrock"
626
+ canonical_querystring = ''
627
+ method = 'POST'
628
+ raw_path = f'/model/{model_id}/invoke-with-response-stream'
629
+ canonical_uri = urllib.parse.quote(raw_path, safe='/-_.~')
630
+ # Create a date for headers and the credential string
631
+ t = datetime.datetime.now(timezone.utc)
632
+ amz_date = t.strftime('%Y%m%dT%H%M%SZ')
633
+ date_stamp = t.strftime('%Y%m%d') # Date YYYYMMDD
634
+
635
+ # --- Task 1: Create a Canonical Request ---
636
+ payload_hash = hashlib.sha256(request_body.encode('utf-8')).hexdigest()
637
+
638
+ canonical_headers = f'accept:{accept_header}\n' \
639
+ f'content-type:{content_type}\n' \
640
+ f'host:{host}\n' \
641
+ f'x-amz-bedrock-accept:{accept_header}\n' \
642
+ f'x-amz-content-sha256:{payload_hash}\n' \
643
+ f'x-amz-date:{amz_date}\n'
644
+ # 注意:头名称需要按字母顺序排序
645
+
646
+ signed_headers = 'accept;content-type;host;x-amz-bedrock-accept;x-amz-content-sha256;x-amz-date' # 按字母顺序排序
647
+
648
+ canonical_request = f'{method}\n' \
649
+ f'{canonical_uri}\n' \
650
+ f'{canonical_querystring}\n' \
651
+ f'{canonical_headers}\n' \
652
+ f'{signed_headers}\n' \
653
+ f'{payload_hash}'
654
+
655
+ # --- Task 2: Create the String to Sign ---
656
+ algorithm = 'AWS4-HMAC-SHA256'
657
+ credential_scope = f'{date_stamp}/{aws_region}/{SERVICE}/aws4_request'
658
+ string_to_sign = f'{algorithm}\n' \
659
+ f'{amz_date}\n' \
660
+ f'{credential_scope}\n' \
661
+ f'{hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()}'
662
+
663
+ # --- Task 3: Calculate the Signature ---
664
+ signing_key = get_signature_key(aws_secret_key, date_stamp, aws_region, SERVICE)
665
+ signature = hmac.new(signing_key, string_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
666
+
667
+ # --- Task 4: Add Signing Information to the Request ---
668
+ authorization_header = f'{algorithm} Credential={aws_access_key}/{credential_scope}, SignedHeaders={signed_headers}, Signature={signature}'
669
+ return amz_date, payload_hash, authorization_header
670
+
671
+ async def get_aws_payload(request, engine, provider, api_key=None):
672
+ CONTENT_TYPE = "application/json"
673
+ # AWS_REGION = "us-east-1"
674
+ model_dict = get_model_dict(provider)
675
+ original_model = model_dict[request.model]
676
+ # MODEL_ID = "anthropic.claude-3-5-sonnet-20240620-v1:0"
677
+ base_url = provider.get('base_url')
678
+ AWS_REGION = base_url.split('.')[1]
679
+ HOST = f"bedrock-runtime.{AWS_REGION}.amazonaws.com"
680
+ # url = f"{base_url}/model/{original_model}/invoke"
681
+ url = f"{base_url}/model/{original_model}/invoke-with-response-stream"
682
+
683
+ # if "claude-3-5-sonnet" in original_model or "claude-3-7-sonnet" in original_model:
684
+ # location = c35s
685
+ # elif "claude-3-opus" in original_model:
686
+ # location = c3o
687
+ # elif "claude-3-sonnet" in original_model:
688
+ # location = c3s
689
+ # elif "claude-3-haiku" in original_model:
690
+ # location = c3h
691
+
692
+ # claude_stream = "streamRawPredict"
693
+ # url = "https://{LOCATION}-aiplatform.googleapis.com/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/anthropic/models/{MODEL}:{stream}".format(
694
+ # LOCATION=await location.next(),
695
+ # PROJECT_ID=project_id,
696
+ # MODEL=original_model,
697
+ # stream=claude_stream
698
+ # )
699
+
700
+ messages = []
701
+ system_prompt = None
702
+ tool_id = None
703
+ for msg in request.messages:
704
+ tool_call_id = None
705
+ tool_calls = None
706
+ if isinstance(msg.content, list):
707
+ content = []
708
+ for item in msg.content:
709
+ if item.type == "text":
710
+ text_message = await get_text_message(item.text, engine)
711
+ content.append(text_message)
712
+ elif item.type == "image_url" and provider.get("image", True):
713
+ image_message = await get_image_message(item.image_url.url, engine)
714
+ content.append(image_message)
715
+ else:
716
+ content = msg.content
717
+ tool_calls = msg.tool_calls
718
+ tool_id = tool_calls[0].id if tool_calls else None or tool_id
719
+ tool_call_id = msg.tool_call_id
720
+
721
+ if tool_calls:
722
+ tool_calls_list = []
723
+ tool_call = tool_calls[0]
724
+ tool_calls_list.append({
725
+ "type": "tool_use",
726
+ "id": tool_call.id,
727
+ "name": tool_call.function.name,
728
+ "input": json.loads(tool_call.function.arguments),
729
+ })
730
+ messages.append({"role": msg.role, "content": tool_calls_list})
731
+ elif tool_call_id:
732
+ messages.append({"role": "user", "content": [{
733
+ "type": "tool_result",
734
+ "tool_use_id": tool_id,
735
+ "content": content
736
+ }]})
737
+ elif msg.role == "function":
738
+ messages.append({"role": "assistant", "content": [{
739
+ "type": "tool_use",
740
+ "id": "toolu_017r5miPMV6PGSNKmhvHPic4",
741
+ "name": msg.name,
742
+ "input": {"prompt": "..."}
743
+ }]})
744
+ messages.append({"role": "user", "content": [{
745
+ "type": "tool_result",
746
+ "tool_use_id": "toolu_017r5miPMV6PGSNKmhvHPic4",
747
+ "content": msg.content
748
+ }]})
749
+ elif msg.role != "system":
750
+ messages.append({"role": msg.role, "content": content})
751
+ elif msg.role == "system":
752
+ system_prompt = content
753
+
754
+ conversation_len = len(messages) - 1
755
+ message_index = 0
756
+ while message_index < conversation_len:
757
+ if messages[message_index]["role"] == messages[message_index + 1]["role"]:
758
+ if messages[message_index].get("content"):
759
+ if isinstance(messages[message_index]["content"], list):
760
+ messages[message_index]["content"].extend(messages[message_index + 1]["content"])
761
+ elif isinstance(messages[message_index]["content"], str) and isinstance(messages[message_index + 1]["content"], list):
762
+ content_list = [{"type": "text", "text": messages[message_index]["content"]}]
763
+ content_list.extend(messages[message_index + 1]["content"])
764
+ messages[message_index]["content"] = content_list
765
+ else:
766
+ messages[message_index]["content"] += messages[message_index + 1]["content"]
767
+ messages.pop(message_index + 1)
768
+ conversation_len = conversation_len - 1
769
+ else:
770
+ message_index = message_index + 1
771
+
772
+ # if "claude-3-7-sonnet" in original_model:
773
+ # max_tokens = 20000
774
+ # elif "claude-3-5-sonnet" in original_model:
775
+ # max_tokens = 8192
776
+ # else:
777
+ # max_tokens = 4096
778
+ max_tokens = 4096
779
+
780
+ payload = {
781
+ "messages": messages,
782
+ "anthropic_version": "bedrock-2023-05-31",
783
+ "max_tokens": max_tokens,
784
+ }
785
+
786
+ # payload = {
787
+ # "anthropic_version": "vertex-2023-10-16",
788
+ # "messages": messages,
789
+ # "system": system_prompt or "You are Claude, a large language model trained by Anthropic.",
790
+ # "max_tokens": max_tokens,
791
+ # }
792
+
793
+ if request.max_tokens:
794
+ payload["max_tokens"] = int(request.max_tokens)
795
+
796
+ miss_fields = [
797
+ 'model',
798
+ 'messages',
799
+ 'presence_penalty',
800
+ 'frequency_penalty',
801
+ 'n',
802
+ 'user',
803
+ 'include_usage',
804
+ 'stream_options',
805
+ 'stream',
806
+ ]
807
+
808
+ for field, value in request.model_dump(exclude_unset=True).items():
809
+ if field not in miss_fields and value is not None:
810
+ payload[field] = value
811
+
812
+ if request.tools and provider.get("tools"):
813
+ tools = []
814
+ for tool in request.tools:
815
+ json_tool = await gpt2claude_tools_json(tool.dict()["function"])
816
+ tools.append(json_tool)
817
+ payload["tools"] = tools
818
+ if "tool_choice" in payload:
819
+ if isinstance(payload["tool_choice"], dict):
820
+ if payload["tool_choice"]["type"] == "function":
821
+ payload["tool_choice"] = {
822
+ "type": "tool",
823
+ "name": payload["tool_choice"]["function"]["name"]
824
+ }
825
+ if isinstance(payload["tool_choice"], str):
826
+ if payload["tool_choice"] == "auto":
827
+ payload["tool_choice"] = {
828
+ "type": "auto"
829
+ }
830
+ if payload["tool_choice"] == "none":
831
+ payload["tool_choice"] = {
832
+ "type": "any"
833
+ }
834
+
835
+ if provider.get("tools") == False:
836
+ payload.pop("tools", None)
837
+ payload.pop("tool_choice", None)
838
+
839
+ if provider.get("aws_access_key") and provider.get("aws_secret_key"):
840
+ ACCEPT_HEADER = "application/vnd.amazon.bedrock.payload+json" # 指定接受 Bedrock 流格式
841
+ amz_date, payload_hash, authorization_header = get_signature(payload, original_model, provider.get("aws_access_key"), provider.get("aws_secret_key"), AWS_REGION, HOST, CONTENT_TYPE, ACCEPT_HEADER)
842
+ headers = {
843
+ 'Accept': ACCEPT_HEADER,
844
+ 'Content-Type': CONTENT_TYPE,
845
+ 'X-Amz-Date': amz_date,
846
+ 'X-Amz-Bedrock-Accept': ACCEPT_HEADER, # Bedrock 特定头
847
+ 'X-Amz-Content-Sha256': payload_hash,
848
+ 'Authorization': authorization_header,
849
+ # Add 'X-Amz-Security-Token': SESSION_TOKEN if using temporary credentials
850
+ }
851
+
852
+ return url, headers, payload
853
+
607
854
  async def get_gpt_payload(request, engine, provider, api_key=None):
608
855
  headers = {
609
856
  'Content-Type': 'application/json',
@@ -1362,6 +1609,8 @@ async def get_payload(request: RequestModel, engine, provider, api_key=None):
1362
1609
  return await get_gemini_payload(request, engine, provider, api_key)
1363
1610
  elif engine == "vertex-gemini":
1364
1611
  return await get_vertex_gemini_payload(request, engine, provider, api_key)
1612
+ elif engine == "aws":
1613
+ return await get_aws_payload(request, engine, provider, api_key)
1365
1614
  elif engine == "vertex-claude":
1366
1615
  return await get_vertex_claude_payload(request, engine, provider, api_key)
1367
1616
  elif engine == "azure":
aient/core/response.py CHANGED
@@ -1,6 +1,8 @@
1
+ import re
1
2
  import json
2
3
  import random
3
4
  import string
5
+ import base64
4
6
  from datetime import datetime
5
7
 
6
8
  from .log_config import logger
@@ -446,6 +448,53 @@ async def fetch_claude_response_stream(client, url, headers, payload, model):
446
448
  yield sse_string
447
449
  yield "data: [DONE]" + end_of_line
448
450
 
451
+ async def fetch_aws_response_stream(client, url, headers, payload, model):
452
+ timestamp = int(datetime.timestamp(datetime.now()))
453
+ async with client.stream('POST', url, headers=headers, json=payload) as response:
454
+ error_message = await check_response(response, "fetch_aws_response_stream")
455
+ if error_message:
456
+ yield error_message
457
+ return
458
+
459
+ async for line in response.aiter_text():
460
+ if not line or \
461
+ line.strip() == "" or\
462
+ line.strip().startswith(':content-type') or \
463
+ line.strip().startswith(':event-type'): # 过滤掉完全空的行或只有空白的行
464
+ continue
465
+
466
+ json_match = re.search(r'event{.*?}', line)
467
+ if not json_match:
468
+ continue
469
+ try:
470
+ chunk_data = json.loads(json_match.group(0).lstrip('event'))
471
+ except json.JSONDecodeError:
472
+ logger.error(f"DEBUG json.JSONDecodeError: {json_match.group(0).lstrip('event')!r}")
473
+ continue
474
+
475
+ # --- 后续处理逻辑不变 ---
476
+ if "bytes" in chunk_data:
477
+ # 解码 Base64 编码的字节
478
+ decoded_bytes = base64.b64decode(chunk_data["bytes"])
479
+ # 将解码后的字节再次解析为 JSON
480
+ payload_chunk = json.loads(decoded_bytes.decode('utf-8'))
481
+ # print(f"DEBUG payload_chunk: {payload_chunk!r}")
482
+
483
+ text = safe_get(payload_chunk, "delta", "text", default="")
484
+ if text:
485
+ sse_string = await generate_sse_response(timestamp, model, text, None, None)
486
+ yield sse_string
487
+
488
+ usage = safe_get(payload_chunk, "amazon-bedrock-invocationMetrics", default="")
489
+ if usage:
490
+ input_tokens = usage.get("inputTokenCount", 0)
491
+ output_tokens = usage.get("outputTokenCount", 0)
492
+ total_tokens = input_tokens + output_tokens
493
+ sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, total_tokens, input_tokens, output_tokens)
494
+ yield sse_string
495
+
496
+ yield "data: [DONE]" + end_of_line
497
+
449
498
  async def fetch_response(client, url, headers, payload, engine, model):
450
499
  response = None
451
500
  if payload.get("file"):
@@ -461,7 +510,7 @@ async def fetch_response(client, url, headers, payload, engine, model):
461
510
  if engine == "tts":
462
511
  yield response.read()
463
512
 
464
- elif engine == "gemini" or engine == "vertex-gemini":
513
+ elif engine == "gemini" or engine == "vertex-gemini" or engine == "aws":
465
514
  response_json = response.json()
466
515
 
467
516
  if isinstance(response_json, str):
@@ -542,6 +591,9 @@ async def fetch_response_stream(client, url, headers, payload, engine, model):
542
591
  elif engine == "claude" or engine == "vertex-claude":
543
592
  async for chunk in fetch_claude_response_stream(client, url, headers, payload, model):
544
593
  yield chunk
594
+ elif engine == "aws":
595
+ async for chunk in fetch_aws_response_stream(client, url, headers, payload, model):
596
+ yield chunk
545
597
  elif engine == "gpt":
546
598
  async for chunk in fetch_gpt_response_stream(client, url, headers, payload):
547
599
  yield chunk
aient/core/utils.py CHANGED
@@ -64,7 +64,9 @@ def get_engine(provider, endpoint=None, original_model=""):
64
64
  # print("parsed_url", parsed_url)
65
65
  engine = None
66
66
  stream = None
67
- if parsed_url.path.endswith("/v1beta") or parsed_url.path.endswith("/v1") or parsed_url.netloc == 'generativelanguage.googleapis.com':
67
+ if parsed_url.path.endswith("/v1beta") or \
68
+ parsed_url.path.endswith("/v1") or \
69
+ (parsed_url.netloc == 'generativelanguage.googleapis.com' and "openai/chat/completions" not in parsed_url.path):
68
70
  engine = "gemini"
69
71
  elif parsed_url.netloc.rstrip('/').endswith('aiplatform.googleapis.com') or (parsed_url.netloc.rstrip('/').endswith('gateway.ai.cloudflare.com') and "google-vertex-ai" in parsed_url.path):
70
72
  engine = "vertex"
@@ -74,6 +76,8 @@ def get_engine(provider, endpoint=None, original_model=""):
74
76
  engine = "cloudflare"
75
77
  elif parsed_url.netloc == 'api.anthropic.com' or parsed_url.path.endswith("v1/messages"):
76
78
  engine = "claude"
79
+ elif 'amazonaws.com' in parsed_url.netloc:
80
+ engine = "aws"
77
81
  elif parsed_url.netloc == 'api.cohere.com':
78
82
  engine = "cohere"
79
83
  stream = True
@@ -648,7 +652,7 @@ async def get_image_message(base64_image, engine = None):
648
652
  "url": base64_image,
649
653
  }
650
654
  }
651
- if "claude" == engine or "vertex-claude" == engine:
655
+ if "claude" == engine or "vertex-claude" == engine or "aws" == engine:
652
656
  # if not validate_image(base64_image.split(",")[1], image_type):
653
657
  # raise ValueError(f"Invalid image format. Expected {image_type}")
654
658
  return {
@@ -669,7 +673,7 @@ async def get_image_message(base64_image, engine = None):
669
673
  raise ValueError("Unknown engine")
670
674
 
671
675
  async def get_text_message(message, engine = None):
672
- if "gpt" == engine or "claude" == engine or "openrouter" == engine or "vertex-claude" == engine or "azure" == engine:
676
+ if "gpt" == engine or "claude" == engine or "openrouter" == engine or "vertex-claude" == engine or "azure" == engine or "aws" == engine:
673
677
  return {"type": "text", "text": message}
674
678
  if "gemini" == engine or "vertex-gemini" == engine:
675
679
  return {"text": message}
@@ -5,14 +5,15 @@ from .registry import register_tool
5
5
  @register_tool()
6
6
  def excute_command(command):
7
7
  """
8
- 执行命令并返回输出结果
9
- 禁止用于查看pdf,禁止使用 pdftotext 命令
8
+ 执行命令并返回输出结果
9
+ 禁止用于查看pdf,禁止使用 pdftotext 命令
10
+ 请确保生成的命令字符串可以直接在终端执行,特殊字符(例如 &&)必须保持原样,不要进行 HTML 编码或任何形式的转义,比如禁止使用 &amp;&amp; 代替 &&。
10
11
 
11
- 参数:
12
- command: 要执行的命令,可以克隆仓库,安装依赖,运行代码等
12
+ 参数:
13
+ command: 要执行的命令,可以克隆仓库,安装依赖,运行代码等
13
14
 
14
- 返回:
15
- 命令执行的输出结果或错误信息
15
+ 返回:
16
+ 命令执行的输出结果或错误信息
16
17
  """
17
18
  try:
18
19
  # 使用subprocess.run捕获命令输出
aient/prompt/agent.py CHANGED
@@ -62,7 +62,7 @@ When making code changes, NEVER output code to the USER, unless requested. Inste
62
62
  </calling_external_apis>
63
63
 
64
64
  <user_info>
65
- The user's OS version is {os_name} {os_version}. The absolute path of the user's workspace is {workspace_path} which is also the project root directory. 请在指令中使用绝对路径。所有操作必须基于工作目录。禁止在工作目录之外进行任何操作。 The user's shell is {shell}.
65
+ The user's OS version is {os_name} {os_version}. The absolute path of the user's workspace is {workspace_path} which is also the project root directory. 请在指令中使用绝对路径。所有操作必须基于工作目录。禁止在工作目录之外进行任何操作。禁止默认你当前就在工作目录。The user's shell is {shell}.
66
66
  </user_info>
67
67
 
68
68
  <instructions for tool use>
@@ -127,7 +127,7 @@ instruction_system_prompt = """
127
127
 
128
128
  你需要称呼工作智能体为“你”,指令禁止使用疑问句,必须使用祈使句。
129
129
  所有回复必须使用中文。
130
- 你的工作目录为:{workspace_path},请在指令中使用绝对路径。所有操作必须基于工作目录。禁止在工作目录之外进行任何操作。
130
+ 你的工作目录为:{workspace_path},请在指令中使用绝对路径。所有操作必须基于工作目录。禁止在工作目录之外进行任何操作。禁止默认你当前就在工作目录。
131
131
 
132
132
 
133
133
  你的输出必须符合以下步骤:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.0.66
3
+ Version: 1.0.68
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -3,9 +3,9 @@ aient/core/.git,sha256=lrAcW1SxzRBUcUiuKL5tS9ykDmmTXxyLP3YYU-Y-Q-I,45
3
3
  aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
4
4
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
5
5
  aient/core/models.py,sha256=_1wYZg_n9kb2A3C8xCboyqleH2iHc9scwOvtx9DPeok,7582
6
- aient/core/request.py,sha256=sXAZwek-JtuCYheuaQiLizBSJRij_xZmTdf0-91N0AE,49986
7
- aient/core/response.py,sha256=6Fq-EIL7ua1zeT6GMc1fgQeRs_MCP6UST_pL0tM3f3I,27879
8
- aient/core/utils.py,sha256=7R_4X4VhTigiuysJ-JuJuhtorAfkCEM-y-9N8dS5cME,25860
6
+ aient/core/request.py,sha256=025Jih6jUhlCnO_JT0si5ygbxB1h-aMUPw6ZhL8mJhk,60362
7
+ aient/core/response.py,sha256=EYlTrpMOInM9IF0uM954xQ6QDca1u33UVcYHfoMomHE,30307
8
+ aient/core/utils.py,sha256=8rZaTu3PA9UyEqeFd-Oxm7VGBO59CzOzcII_UIzmZJY,26035
9
9
  aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
10
10
  aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
11
11
  aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhFkw,2755
@@ -21,7 +21,7 @@ aient/models/vertex.py,sha256=qVD5l1Q538xXUPulxG4nmDjXE1VoV4yuAkTCpIeJVw0,16795
21
21
  aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
22
22
  aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
23
23
  aient/plugins/config.py,sha256=tFN54y9pVgK46e9VAOux9h69XuoKJQeTGTAdFKpPPnc,7813
24
- aient/plugins/excute_command.py,sha256=3jh0o3K33GfcHvNywCTvhU5axN5CDi20ZPer4SwWZmI,943
24
+ aient/plugins/excute_command.py,sha256=rFvL91vm_xazNd0mU_oeNQ1N2U1Q80MyZO-A-v2xULs,1124
25
25
  aient/plugins/get_time.py,sha256=Ih5XIW5SDAIhrZ9W4Qe5Hs1k4ieKPUc_LAd6ySNyqZk,654
26
26
  aient/plugins/image.py,sha256=ZElCIaZznE06TN9xW3DrSukS7U3A5_cjk1Jge4NzPxw,2072
27
27
  aient/plugins/list_directory.py,sha256=5ubm-mfrj-tanGSDp4M_Tmb6vQb3dx2-XVfQ2yL2G8A,1394
@@ -31,12 +31,12 @@ aient/plugins/run_python.py,sha256=dgcUwBunMuDkaSKR5bToudVzSdrXVewktDDFUz_iIOQ,4
31
31
  aient/plugins/websearch.py,sha256=yiBzqXK5X220ibR-zko3VDsn4QOnLu1k6E2YOygCeTQ,15185
32
32
  aient/plugins/write_file.py,sha256=qYbNvcDTGs00pWYb0UOSYSeN4kw5_DMDxjaSlxtlAkU,1732
33
33
  aient/prompt/__init__.py,sha256=GBtn6-JDT8KHFCcuPpfSNE_aGddg5p4FEyMCy4BfwGs,20
34
- aient/prompt/agent.py,sha256=h39WOoafv0_lh2coBCiG9k-VWa3Yi9HRZV0hnvsc4gs,23598
34
+ aient/prompt/agent.py,sha256=ssZREegzmkWNW20dxzGO92J8Y5v7yjRTR9znTPDjo5Q,23681
35
35
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
37
37
  aient/utils/scripts.py,sha256=n0jR5eXCBIK12W4bIx-xU1FVl1hZ4zDC7hq_BWQHYJU,27537
38
- aient-1.0.66.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
39
- aient-1.0.66.dist-info/METADATA,sha256=P_IHQlqtE0huTvv0iqY48DM1ZJL8KecLY_lw31UMPok,5000
40
- aient-1.0.66.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
41
- aient-1.0.66.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
42
- aient-1.0.66.dist-info/RECORD,,
38
+ aient-1.0.68.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
39
+ aient-1.0.68.dist-info/METADATA,sha256=ybmAd9zAXxJwW78DBy9o9lk4n3ulW52EazBSkt7C7t8,5000
40
+ aient-1.0.68.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
41
+ aient-1.0.68.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
42
+ aient-1.0.68.dist-info/RECORD,,
File without changes