jaf-py 2.4.6__py3-none-any.whl → 2.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jaf/core/tracing.py CHANGED
@@ -341,7 +341,7 @@ class LangfuseTraceCollector:
341
341
  public_key=public_key,
342
342
  secret_key=secret_key,
343
343
  host=host,
344
- release="jaf-py-v2.1.1"
344
+ release="jaf-py-v2.4.8"
345
345
  )
346
346
  self.active_spans: Dict[str, Any] = {}
347
347
  self.trace_spans: Dict[TraceId, Any] = {}
@@ -399,15 +399,15 @@ class LangfuseTraceCollector:
399
399
  print(f"[LANGFUSE DEBUG] Found user_query from history: {user_query}")
400
400
  break
401
401
 
402
- # Try to extract user_id from token_response
403
- if hasattr(context, 'token_response'):
404
- token_response = context.token_response
405
- print(f"[LANGFUSE DEBUG] Found token_response: {type(token_response)}")
406
- if isinstance(token_response, dict):
407
- user_id = token_response.get("email") or token_response.get("username")
402
+ # Try to extract user_id from user_info
403
+ if hasattr(context, 'user_info'):
404
+ user_info = context.user_info
405
+ print(f"[LANGFUSE DEBUG] Found user_info: {type(user_info)}")
406
+ if isinstance(user_info, dict):
407
+ user_id = user_info.get("email") or user_info.get("username")
408
408
  print(f"[LANGFUSE DEBUG] Extracted user_id: {user_id}")
409
- elif hasattr(token_response, 'email'):
410
- user_id = token_response.email
409
+ elif hasattr(user_info, 'email'):
410
+ user_id = user_info.email
411
411
  print(f"[LANGFUSE DEBUG] Extracted user_id from attr: {user_id}")
412
412
 
413
413
  # Extract conversation history and current user query from messages
@@ -524,7 +524,8 @@ class LangfuseTraceCollector:
524
524
  "agent_name": event.data.get("agent_name", "analytics_agent_jaf"),
525
525
  "conversation_history": conversation_history,
526
526
  "tool_calls": [],
527
- "tool_results": []
527
+ "tool_results": [],
528
+ "user_info": event.data.get("context").user_info if event.data.get("context") and hasattr(event.data.get("context"), 'user_info') else None
528
529
  }
529
530
  )
530
531
  self.trace_spans[trace_id] = trace
jaf/providers/__init__.py CHANGED
@@ -7,7 +7,7 @@ from .mcp import (
7
7
  create_mcp_sse_tools,
8
8
  create_mcp_http_tools,
9
9
  )
10
- from .model import make_litellm_provider
10
+ from .model import make_litellm_provider, make_litellm_sdk_provider
11
11
 
12
12
  # Back-compat for renamed/removed exports (do not add to __all__)
13
13
  import warnings as _warnings
@@ -57,4 +57,5 @@ __all__ = [
57
57
  "create_mcp_sse_tools",
58
58
  "create_mcp_http_tools",
59
59
  "make_litellm_provider",
60
+ "make_litellm_sdk_provider",
60
61
  ]
jaf/providers/model.py CHANGED
@@ -14,6 +14,7 @@ import base64
14
14
 
15
15
  from openai import OpenAI
16
16
  from pydantic import BaseModel
17
+ import litellm
17
18
 
18
19
  from ..core.types import (
19
20
  Agent, ContentRole, Message, ModelProvider, RunConfig, RunState,
@@ -445,9 +446,327 @@ def make_litellm_provider(
445
446
 
446
447
  return LiteLLMProvider()
447
448
 
449
+ def make_litellm_sdk_provider(
450
+ api_key: Optional[str] = None,
451
+ model: str = "gpt-3.5-turbo",
452
+ base_url: Optional[str] = None,
453
+ default_timeout: Optional[float] = None,
454
+ **litellm_kwargs: Any
455
+ ) -> ModelProvider[Ctx]:
456
+ """
457
+ Create a LiteLLM SDK-based model provider with universal provider support.
458
+
459
+ LiteLLM automatically detects the provider from the model name and handles
460
+ API key management through environment variables or direct parameters.
461
+
462
+ Args:
463
+ api_key: API key for the provider (optional, can use env vars)
464
+ model: Model name (e.g., "gpt-4", "claude-3-sonnet", "gemini-pro", "llama2", etc.)
465
+ base_url: Optional base URL for custom endpoints
466
+ default_timeout: Default timeout for model API calls in seconds
467
+ **litellm_kwargs: Additional arguments passed to litellm.completion()
468
+ Common examples:
469
+ - vertex_project: "your-project" (for Google models)
470
+ - vertex_location: "us-central1" (for Google models)
471
+ - azure_deployment: "your-deployment" (for Azure OpenAI)
472
+ - api_base: "https://your-endpoint.com" (custom endpoint)
473
+ - custom_llm_provider: "custom_provider_name"
474
+
475
+ Returns:
476
+ ModelProvider instance
477
+
478
+ Examples:
479
+ # OpenAI
480
+ make_litellm_sdk_provider(api_key="sk-...", model="gpt-4")
481
+
482
+ # Anthropic Claude
483
+ make_litellm_sdk_provider(api_key="sk-ant-...", model="claude-3-sonnet-20240229")
484
+
485
+ # Google Gemini
486
+ make_litellm_sdk_provider(model="gemini-pro", vertex_project="my-project")
487
+
488
+ # Ollama (local)
489
+ make_litellm_sdk_provider(model="ollama/llama2", base_url="http://localhost:11434")
490
+
491
+ # Azure OpenAI
492
+ make_litellm_sdk_provider(
493
+ model="azure/gpt-4",
494
+ api_key="your-azure-key",
495
+ azure_deployment="gpt-4-deployment",
496
+ api_base="https://your-resource.openai.azure.com"
497
+ )
498
+
499
+ # Hugging Face
500
+ make_litellm_sdk_provider(
501
+ model="huggingface/microsoft/DialoGPT-medium",
502
+ api_key="hf_..."
503
+ )
504
+
505
+ # Any custom provider
506
+ make_litellm_sdk_provider(
507
+ model="custom_provider/model-name",
508
+ api_key="your-key",
509
+ custom_llm_provider="your_provider"
510
+ )
511
+ """
512
+
513
+ class LiteLLMSDKProvider:
514
+ def __init__(self):
515
+ self.api_key = api_key
516
+ self.model = model
517
+ self.base_url = base_url
518
+ self.default_timeout = default_timeout
519
+ self.litellm_kwargs = litellm_kwargs
520
+
521
+ async def get_completion(
522
+ self,
523
+ state: RunState[Ctx],
524
+ agent: Agent[Ctx, Any],
525
+ config: RunConfig[Ctx]
526
+ ) -> Dict[str, Any]:
527
+ """Get completion from the model using LiteLLM SDK."""
528
+
529
+ # Determine model to use
530
+ model_name = config.model_override or self.model
531
+
532
+ # Create system message
533
+ system_message = {
534
+ "role": "system",
535
+ "content": agent.instructions(state)
536
+ }
537
+
538
+ # Convert messages to OpenAI format
539
+ messages = [system_message]
540
+ for msg in state.messages:
541
+ converted_msg = await _convert_message(msg)
542
+ messages.append(converted_msg)
543
+
544
+ # Convert tools to OpenAI format
545
+ tools = None
546
+ if agent.tools:
547
+ tools = [
548
+ {
549
+ "type": "function",
550
+ "function": {
551
+ "name": tool.schema.name,
552
+ "description": tool.schema.description,
553
+ "parameters": _pydantic_to_json_schema(tool.schema.parameters),
554
+ }
555
+ }
556
+ for tool in agent.tools
557
+ ]
558
+
559
+ # Prepare request parameters for LiteLLM
560
+ request_params = {
561
+ "model": model_name,
562
+ "messages": messages,
563
+ **self.litellm_kwargs
564
+ }
565
+
566
+ # Add API key if provided
567
+ if self.api_key:
568
+ request_params["api_key"] = self.api_key
569
+
570
+ # Add optional parameters
571
+ if agent.model_config:
572
+ if agent.model_config.temperature is not None:
573
+ request_params["temperature"] = agent.model_config.temperature
574
+ if agent.model_config.max_tokens is not None:
575
+ request_params["max_tokens"] = agent.model_config.max_tokens
576
+
577
+ if tools:
578
+ request_params["tools"] = tools
579
+ request_params["tool_choice"] = "auto"
580
+
581
+ if agent.output_codec:
582
+ request_params["response_format"] = {"type": "json_object"}
583
+
584
+ # LiteLLM will use api_base from kwargs or base_url parameter
585
+ if self.base_url:
586
+ request_params["api_base"] = self.base_url
587
+
588
+ # Make the API call using litellm
589
+ response = await litellm.acompletion(**request_params)
590
+
591
+ # Return in the expected format that the engine expects
592
+ choice = response.choices[0]
593
+
594
+ # Convert tool_calls to dict format if present
595
+ tool_calls = None
596
+ if choice.message.tool_calls:
597
+ tool_calls = [
598
+ {
599
+ 'id': tc.id,
600
+ 'type': tc.type,
601
+ 'function': {
602
+ 'name': tc.function.name,
603
+ 'arguments': tc.function.arguments
604
+ }
605
+ }
606
+ for tc in choice.message.tool_calls
607
+ ]
608
+
609
+ # Extract usage data
610
+ usage_data = None
611
+ if response.usage:
612
+ usage_data = {
613
+ "prompt_tokens": response.usage.prompt_tokens,
614
+ "completion_tokens": response.usage.completion_tokens,
615
+ "total_tokens": response.usage.total_tokens,
616
+ }
617
+
618
+ return {
619
+ 'id': response.id,
620
+ 'created': response.created,
621
+ 'model': response.model,
622
+ 'system_fingerprint': getattr(response, 'system_fingerprint', None),
623
+ 'message': {
624
+ 'content': choice.message.content,
625
+ 'tool_calls': tool_calls
626
+ },
627
+ 'usage': usage_data,
628
+ 'prompt': messages
629
+ }
630
+
631
+ async def get_completion_stream(
632
+ self,
633
+ state: RunState[Ctx],
634
+ agent: Agent[Ctx, Any],
635
+ config: RunConfig[Ctx]
636
+ ) -> AsyncIterator[CompletionStreamChunk]:
637
+ """
638
+ Stream completion chunks from the model provider using LiteLLM SDK.
639
+ """
640
+ # Determine model to use
641
+ model_name = config.model_override or self.model
642
+
643
+ # Create system message
644
+ system_message = {
645
+ "role": "system",
646
+ "content": agent.instructions(state)
647
+ }
648
+
649
+ # Convert messages to OpenAI format
650
+ messages = [system_message]
651
+ for msg in state.messages:
652
+ converted_msg = await _convert_message(msg)
653
+ messages.append(converted_msg)
654
+
655
+ # Convert tools to OpenAI format
656
+ tools = None
657
+ if agent.tools:
658
+ tools = [
659
+ {
660
+ "type": "function",
661
+ "function": {
662
+ "name": tool.schema.name,
663
+ "description": tool.schema.description,
664
+ "parameters": _pydantic_to_json_schema(tool.schema.parameters),
665
+ }
666
+ }
667
+ for tool in agent.tools
668
+ ]
669
+
670
+ # Prepare request parameters for LiteLLM streaming
671
+ request_params: Dict[str, Any] = {
672
+ "model": model_name,
673
+ "messages": messages,
674
+ "stream": True,
675
+ **self.litellm_kwargs
676
+ }
677
+
678
+ # Add API key if provided
679
+ if self.api_key:
680
+ request_params["api_key"] = self.api_key
681
+
682
+ # Add optional parameters
683
+ if agent.model_config:
684
+ if agent.model_config.temperature is not None:
685
+ request_params["temperature"] = agent.model_config.temperature
686
+ if agent.model_config.max_tokens is not None:
687
+ request_params["max_tokens"] = agent.model_config.max_tokens
688
+
689
+ if tools:
690
+ request_params["tools"] = tools
691
+ request_params["tool_choice"] = "auto"
692
+
693
+ if agent.output_codec:
694
+ request_params["response_format"] = {"type": "json_object"}
695
+
696
+ # LiteLLM will use api_base from kwargs or base_url parameter
697
+ if self.base_url:
698
+ request_params["api_base"] = self.base_url
699
+
700
+ # Stream using litellm
701
+ stream = await litellm.acompletion(**request_params)
702
+
703
+ async for chunk in stream:
704
+ try:
705
+ # Best-effort extraction of raw for debugging
706
+ try:
707
+ raw_obj = chunk.model_dump() if hasattr(chunk, 'model_dump') else None
708
+ except Exception:
709
+ raw_obj = None
710
+
711
+ choice = None
712
+ if getattr(chunk, "choices", None):
713
+ choice = chunk.choices[0]
714
+
715
+ if choice is None:
716
+ continue
717
+
718
+ delta = getattr(choice, "delta", None)
719
+ finish_reason = getattr(choice, "finish_reason", None)
720
+
721
+ # Text content delta
722
+ if delta is not None:
723
+ content_delta = getattr(delta, "content", None)
724
+ if content_delta:
725
+ yield CompletionStreamChunk(delta=content_delta, raw=raw_obj)
726
+
727
+ # Tool call deltas
728
+ tool_calls = getattr(delta, "tool_calls", None)
729
+ if isinstance(tool_calls, list):
730
+ for tc in tool_calls:
731
+ try:
732
+ idx = getattr(tc, "index", 0) or 0
733
+ tc_id = getattr(tc, "id", None)
734
+ fn = getattr(tc, "function", None)
735
+ fn_name = getattr(fn, "name", None) if fn is not None else None
736
+ args_delta = getattr(fn, "arguments", None) if fn is not None else None
737
+
738
+ yield CompletionStreamChunk(
739
+ tool_call_delta=ToolCallDelta(
740
+ index=idx,
741
+ id=tc_id,
742
+ type='function',
743
+ function=ToolCallFunctionDelta(
744
+ name=fn_name,
745
+ arguments_delta=args_delta
746
+ )
747
+ ),
748
+ raw=raw_obj
749
+ )
750
+ except Exception:
751
+ continue
752
+
753
+ # Completion ended
754
+ if finish_reason:
755
+ yield CompletionStreamChunk(is_done=True, finish_reason=finish_reason, raw=raw_obj)
756
+ except Exception:
757
+ continue
758
+
759
+ return LiteLLMSDKProvider()
760
+
448
761
  async def _convert_message(msg: Message) -> Dict[str, Any]:
449
- """Convert JAF Message to OpenAI message format with attachment support."""
450
- if msg.role == 'user':
762
+ """
763
+ Handles all possible role types (string and enum) and content formats.
764
+ """
765
+ # Normalize role to handle both string and enum values
766
+ role_value = msg.role.value if hasattr(msg.role, 'value') else str(msg.role).lower()
767
+
768
+ # Handle user messages
769
+ if role_value in ('user', ContentRole.USER.value if hasattr(ContentRole, 'USER') else 'user'):
451
770
  if isinstance(msg.content, list):
452
771
  # Multi-part content
453
772
  return {
@@ -457,12 +776,16 @@ async def _convert_message(msg: Message) -> Dict[str, Any]:
457
776
  else:
458
777
  # Build message with attachments if available
459
778
  return await _build_chat_message_with_attachments('user', msg)
460
- elif msg.role == 'assistant':
779
+
780
+ # Handle assistant messages
781
+ elif role_value in ('assistant', ContentRole.ASSISTANT.value if hasattr(ContentRole, 'ASSISTANT') else 'assistant'):
461
782
  result = {
462
783
  "role": "assistant",
463
- "content": get_text_content(msg.content),
784
+ "content": get_text_content(msg.content) or "", # Ensure content is never None
464
785
  }
465
- if msg.tool_calls:
786
+
787
+ # Add tool calls if present
788
+ if msg.tool_calls and len(msg.tool_calls) > 0:
466
789
  result["tool_calls"] = [
467
790
  {
468
791
  "id": tc.id,
@@ -473,16 +796,48 @@ async def _convert_message(msg: Message) -> Dict[str, Any]:
473
796
  }
474
797
  }
475
798
  for tc in msg.tool_calls
799
+ if tc.id and tc.function and tc.function.name # Validate tool call structure
476
800
  ]
801
+
477
802
  return result
478
- elif msg.role == ContentRole.TOOL:
803
+
804
+ # Handle system messages
805
+ elif role_value in ('system', ContentRole.SYSTEM.value if hasattr(ContentRole, 'SYSTEM') else 'system'):
806
+ return {
807
+ "role": "system",
808
+ "content": get_text_content(msg.content) or ""
809
+ }
810
+
811
+ # Handle tool messages
812
+ elif role_value in ('tool', ContentRole.TOOL.value if hasattr(ContentRole, 'TOOL') else 'tool'):
813
+ if not msg.tool_call_id:
814
+ raise ValueError(f"Tool message must have tool_call_id. Message: {msg}")
815
+
479
816
  return {
480
817
  "role": "tool",
481
- "content": get_text_content(msg.content),
818
+ "content": get_text_content(msg.content) or "",
482
819
  "tool_call_id": msg.tool_call_id
483
820
  }
821
+
822
+ # Handle function messages (legacy support)
823
+ elif role_value == 'function':
824
+ if not msg.tool_call_id:
825
+ raise ValueError(f"Function message must have tool_call_id. Message: {msg}")
826
+
827
+ return {
828
+ "role": "function",
829
+ "content": get_text_content(msg.content) or "",
830
+ "name": getattr(msg, 'name', 'unknown_function')
831
+ }
832
+
833
+ # Unknown role - provide helpful error message
484
834
  else:
485
- raise ValueError(f"Unknown message role: {msg.role}")
835
+ available_roles = ['user', 'assistant', 'system', 'tool', 'function']
836
+ raise ValueError(
837
+ f"Unknown message role: {msg.role} (type: {type(msg.role)}). "
838
+ f"Supported roles: {available_roles}. "
839
+ f"Message content: {get_text_content(msg.content)[:100] if msg.content else 'None'}"
840
+ )
486
841
 
487
842
 
488
843
  def _convert_content_part(part: MessageContentPart) -> Dict[str, Any]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jaf-py
3
- Version: 2.4.6
3
+ Version: 2.4.8
4
4
  Summary: A purely functional agent framework with immutable state and composable tools - Python implementation
5
5
  Author: JAF Contributors
6
6
  Maintainer: JAF Contributors
@@ -43,6 +43,7 @@ Requires-Dist: opentelemetry-api>=1.22.0
43
43
  Requires-Dist: opentelemetry-sdk>=1.22.0
44
44
  Requires-Dist: opentelemetry-exporter-otlp>=1.22.0
45
45
  Requires-Dist: langfuse<3.0.0
46
+ Requires-Dist: litellm>=1.76.3
46
47
  Provides-Extra: tracing
47
48
  Requires-Dist: opentelemetry-api>=1.22.0; extra == "tracing"
48
49
  Requires-Dist: opentelemetry-sdk>=1.22.0; extra == "tracing"
@@ -53,7 +53,7 @@ jaf/core/state.py,sha256=NMtYTpUYa64m1Kte6lD8LGnF2bl69HAcdgXH6f-M97c,5650
53
53
  jaf/core/streaming.py,sha256=h_lYHQA9ee_D5QsDO9-Vhevgi7rFXPslPzd9605AJGo,17034
54
54
  jaf/core/tool_results.py,sha256=-bTOqOX02lMyslp5Z4Dmuhx0cLd5o7kgR88qK2HO_sw,11323
55
55
  jaf/core/tools.py,sha256=84N9A7QQ3xxcOs2eUUot3nmCnt5i7iZT9VwkuzuFBxQ,16274
56
- jaf/core/tracing.py,sha256=iuVgykFUSkoBjem1k6jdVLrhRZzJn-avyxc_6W9BXPI,40159
56
+ jaf/core/tracing.py,sha256=UyAUTBtj_3bThslvtqdmOt7bSMxFkHrTBc-RvItfmvI,40269
57
57
  jaf/core/types.py,sha256=FCc9uWTUS6P1iU-_RxJM7k-HNorsHM-0XHqwwaUGLkE,26267
58
58
  jaf/core/workflows.py,sha256=Ul-82gzjIXtkhnSMSPv-8igikjkMtW1EBo9yrfodtvI,26294
59
59
  jaf/memory/__init__.py,sha256=-L98xlvihurGAzF0DnXtkueDVvO_wV2XxxEwAWdAj50,1400
@@ -70,9 +70,9 @@ jaf/plugins/base.py,sha256=clQohPxT19zj-AjRtDoevE26xS5-cm7qdUOW2VX2InY,12575
70
70
  jaf/policies/__init__.py,sha256=tfbUgIPMwMKvZsyDItP_zvbhGXGY5scKSo78LIomgDU,416
71
71
  jaf/policies/handoff.py,sha256=KJYYuL9T6v6DECRhnsS2Je6q4Aj9_zC5d_KBnvEnZNE,8318
72
72
  jaf/policies/validation.py,sha256=wn-7ynH10E5nk-_r1_kHIYHrBGmLX0EFr-FUTHrsxvc,10903
73
- jaf/providers/__init__.py,sha256=j_o-Rubr8d9tNYlFWb6fvzkxIBl3JKK_iabj9wTFia0,2114
73
+ jaf/providers/__init__.py,sha256=lIbl1JvGrDhI9CzEk79N8yJNhf7ww_aWD-F40MnG3vY,2174
74
74
  jaf/providers/mcp.py,sha256=WxcC8gUFpDBBYyhorMcc1jHq3xMDMBtnwyRPthfL0S0,13074
75
- jaf/providers/model.py,sha256=NJTa-1k0EruDdLf2HS1ZdDpFJhHXzzfQyXAbJx9kZVc,25468
75
+ jaf/providers/model.py,sha256=bN2Hhr0N3soZzMrCdJ1pJa4rvo80oedCphDPcNgrVMY,39336
76
76
  jaf/server/__init__.py,sha256=fMPnLZBRm6t3yQrr7-PnoHAQ8qj9o6Z1AJLM1M6bIS0,392
77
77
  jaf/server/main.py,sha256=CTb0ywbPIq9ELfay5MKChVR7BpIQOoEbPjPfpzo2aBQ,2152
78
78
  jaf/server/server.py,sha256=K8XKNyadP_YqkCRSK9jCVZh52d2_IbHp_jHkKzBeB9Q,38786
@@ -86,9 +86,9 @@ jaf/visualization/functional_core.py,sha256=zedMDZbvjuOugWwnh6SJ2stvRNQX1Hlkb9Ab
86
86
  jaf/visualization/graphviz.py,sha256=WTOM6UP72-lVKwI4_SAr5-GCC3ouckxHv88ypCDQWJ0,12056
87
87
  jaf/visualization/imperative_shell.py,sha256=GpMrAlMnLo2IQgyB2nardCz09vMvAzaYI46MyrvJ0i4,2593
88
88
  jaf/visualization/types.py,sha256=QQcbVeQJLuAOXk8ynd08DXIS-PVCnv3R-XVE9iAcglw,1389
89
- jaf_py-2.4.6.dist-info/licenses/LICENSE,sha256=LXUQBJxdyr-7C4bk9cQBwvsF_xwA-UVstDTKabpcjlI,1063
90
- jaf_py-2.4.6.dist-info/METADATA,sha256=ep-RyxTMs_RhA4h10yXfZ5s4RQOiIZ0A5hRoP4ZV3sg,27712
91
- jaf_py-2.4.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
92
- jaf_py-2.4.6.dist-info/entry_points.txt,sha256=OtIJeNJpb24kgGrqRx9szGgDx1vL9ayq8uHErmu7U5w,41
93
- jaf_py-2.4.6.dist-info/top_level.txt,sha256=Xu1RZbGaM4_yQX7bpalo881hg7N_dybaOW282F15ruE,4
94
- jaf_py-2.4.6.dist-info/RECORD,,
89
+ jaf_py-2.4.8.dist-info/licenses/LICENSE,sha256=LXUQBJxdyr-7C4bk9cQBwvsF_xwA-UVstDTKabpcjlI,1063
90
+ jaf_py-2.4.8.dist-info/METADATA,sha256=e1L0JOAJzW81CBJ5RmOXFsSMFr80JioAQ2PTMGISiT0,27743
91
+ jaf_py-2.4.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
92
+ jaf_py-2.4.8.dist-info/entry_points.txt,sha256=OtIJeNJpb24kgGrqRx9szGgDx1vL9ayq8uHErmu7U5w,41
93
+ jaf_py-2.4.8.dist-info/top_level.txt,sha256=Xu1RZbGaM4_yQX7bpalo881hg7N_dybaOW282F15ruE,4
94
+ jaf_py-2.4.8.dist-info/RECORD,,
File without changes