llmflowstack 1.1.2__tar.gz → 1.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/PKG-INFO +1 -1
  2. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/models/LLaMA4.py +9 -11
  3. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/pyproject.toml +1 -1
  4. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/.github/workflows/python-publish.yml +0 -0
  5. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/.gitignore +0 -0
  6. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/LICENSE +0 -0
  7. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/README.md +0 -0
  8. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/__init__.py +0 -0
  9. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/base/__init__.py +0 -0
  10. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/base/base.py +0 -0
  11. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/callbacks/__init__.py +0 -0
  12. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/callbacks/log_collector.py +0 -0
  13. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/callbacks/stop_on_token.py +0 -0
  14. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/models/GPT_OSS.py +0 -0
  15. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/models/Gemma.py +0 -0
  16. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/models/LLaMA3.py +0 -0
  17. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/models/MedGemma.py +0 -0
  18. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/models/__init__.py +0 -0
  19. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/rag/__iinit__.py +0 -0
  20. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/rag/pipeline.py +0 -0
  21. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/schemas/__init__.py +0 -0
  22. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/schemas/params.py +0 -0
  23. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/utils/__init__.py +0 -0
  24. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/utils/evaluation_methods.py +0 -0
  25. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/utils/exceptions.py +0 -0
  26. {llmflowstack-1.1.2 → llmflowstack-1.1.3}/llmflowstack/utils/generation_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llmflowstack
3
- Version: 1.1.2
3
+ Version: 1.1.3
4
4
  Summary: LLMFlowStack is a framework for training and using LLMs (LLaMA, GPT-OSS, Gemma, ...). Supports DAPT, fine-tuning, and distributed inference. Public fork without institution-specific components.
5
5
  Author-email: Gustavo Henrique Ferreira Cruz <gustavohferreiracruz@gmail.com>
6
6
  License: MIT
@@ -21,7 +21,6 @@ class LLaMA4Input(TypedDict):
21
21
  input_text: str
22
22
  expected_answer: str | None
23
23
  system_message: str | None
24
- image_paths: list[str] | None
25
24
 
26
25
  class LLaMA4(BaseModel):
27
26
  model: Llama4ForCausalLM | None = None
@@ -82,15 +81,16 @@ class LLaMA4(BaseModel):
82
81
  system_message = ""
83
82
 
84
83
  if system_message:
85
- system_message = f"{system_message}\n"
84
+ system_message = f"<|header_start|>system<|header_end|>\n\n{system_message}<|eot|>"
86
85
 
87
86
  expected_answer = data.get("expected_answer")
88
- answer = f"{expected_answer}<end_of_turn>" if expected_answer else ""
89
-
87
+ answer = f"<|header_start|>assistant<|header_end|>\n\n{expected_answer}<|eot|>" if expected_answer else ""
88
+
90
89
  return (
91
- f"<start_of_turn>user"
92
- f"{system_message}\n{data["input_text"]}<end_of_turn>\n"
93
- f"<start_of_turn>model\n"
90
+ "<|begin_of_text|>"
91
+ f"{system_message}"
92
+ "<|header_start|>user<|header_end|>\n\n"
93
+ f"{data["input_text"]}<|eot|>"
94
94
  f"{answer}"
95
95
  )
96
96
 
@@ -98,8 +98,7 @@ class LLaMA4(BaseModel):
98
98
  self,
99
99
  input_text: str,
100
100
  system_message: str | None = None,
101
- expected_answer: str | None = None,
102
- image_paths: list[str] | None = None
101
+ expected_answer: str | None = None
103
102
  ) -> LLaMA4Input:
104
103
  if not self.tokenizer:
105
104
  raise MissingEssentialProp("Could not find tokenizer.")
@@ -107,8 +106,7 @@ class LLaMA4(BaseModel):
107
106
  return {
108
107
  "input_text": input_text,
109
108
  "system_message": system_message,
110
- "expected_answer": expected_answer,
111
- "image_paths": image_paths
109
+ "expected_answer": expected_answer
112
110
  }
113
111
 
114
112
  def dapt(
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "llmflowstack"
7
- version = "1.1.2"
7
+ version = "1.1.3"
8
8
  authors = [
9
9
  { name = "Gustavo Henrique Ferreira Cruz", email = "gustavohferreiracruz@gmail.com" }
10
10
  ]
File without changes
File without changes
File without changes