flock-core 0.4.0b46__py3-none-any.whl → 0.4.0b48__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (31) hide show
  1. flock/core/flock.py +105 -61
  2. flock/core/flock_registry.py +45 -38
  3. flock/core/util/spliter.py +4 -0
  4. flock/evaluators/__init__.py +1 -0
  5. flock/evaluators/declarative/__init__.py +1 -0
  6. flock/modules/__init__.py +1 -0
  7. flock/modules/assertion/__init__.py +1 -0
  8. flock/modules/callback/__init__.py +1 -0
  9. flock/modules/memory/__init__.py +1 -0
  10. flock/modules/output/__init__.py +1 -0
  11. flock/modules/performance/__init__.py +1 -0
  12. flock/modules/zep/__init__.py +1 -0
  13. flock/tools/__init__.py +188 -0
  14. flock/{core/tools → tools}/azure_tools.py +284 -0
  15. flock/tools/code_tools.py +56 -0
  16. flock/tools/file_tools.py +140 -0
  17. flock/{core/tools/dev_tools/github.py → tools/github_tools.py} +3 -3
  18. flock/{core/tools → tools}/markdown_tools.py +14 -4
  19. flock/tools/system_tools.py +9 -0
  20. flock/{core/tools/llm_tools.py → tools/text_tools.py} +47 -25
  21. flock/tools/web_tools.py +90 -0
  22. flock/{core/tools → tools}/zendesk_tools.py +6 -6
  23. flock/webapp/app/config.py +1 -1
  24. flock/webapp/app/main.py +109 -16
  25. flock/workflow/activities.py +1 -0
  26. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b48.dist-info}/METADATA +24 -13
  27. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b48.dist-info}/RECORD +30 -17
  28. flock/core/tools/basic_tools.py +0 -317
  29. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b48.dist-info}/WHEEL +0 -0
  30. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b48.dist-info}/entry_points.txt +0 -0
  31. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b48.dist-info}/licenses/LICENSE +0 -0
@@ -21,12 +21,12 @@ except LookupError:
21
21
 
22
22
 
23
23
  @traced_and_logged
24
- def split_by_sentences(text: str) -> list[str]:
24
+ def text_split_by_sentences(text: str) -> list[str]:
25
25
  return nltk.sent_tokenize(text)
26
26
 
27
27
 
28
28
  @traced_and_logged
29
- def split_by_characters(
29
+ def text_split_by_characters(
30
30
  text: str, chunk_size: int = 4000, overlap: int = 200
31
31
  ) -> list[str]:
32
32
  if chunk_size <= 0:
@@ -71,7 +71,7 @@ def split_by_characters(
71
71
 
72
72
 
73
73
  @traced_and_logged
74
- def split_by_tokens(
74
+ def text_split_by_tokens(
75
75
  text: str,
76
76
  tokenizer: Callable[[str], list[str]],
77
77
  max_tokens: int = 1024,
@@ -90,7 +90,7 @@ def split_by_tokens(
90
90
 
91
91
 
92
92
  @traced_and_logged
93
- def split_by_separator(text: str, separator: str = "\n\n") -> list[str]:
93
+ def text_split_by_separator(text: str, separator: str = "\n\n") -> list[str]:
94
94
  if not text:
95
95
  return []
96
96
 
@@ -99,7 +99,7 @@ def split_by_separator(text: str, separator: str = "\n\n") -> list[str]:
99
99
 
100
100
 
101
101
  @traced_and_logged
102
- def recursive_text_splitter(
102
+ def text_recursive_splitter(
103
103
  text: str,
104
104
  chunk_size: int = 4000,
105
105
  separators: list[str] = ["\n\n", "\n", ". ", ", ", " ", ""],
@@ -114,7 +114,7 @@ def recursive_text_splitter(
114
114
  if not separators:
115
115
  return [
116
116
  text[:chunk_size],
117
- *recursive_text_splitter(text[chunk_size:], chunk_size, separators),
117
+ *text_recursive_splitter(text[chunk_size:], chunk_size, separators),
118
118
  ]
119
119
 
120
120
  separator = separators[0]
@@ -122,7 +122,7 @@ def recursive_text_splitter(
122
122
 
123
123
  if separator == "":
124
124
  # If we're at the character level, just split by characters
125
- return split_by_characters(text, chunk_size=chunk_size, overlap=0)
125
+ return text_split_by_characters(text, chunk_size=chunk_size, overlap=0)
126
126
 
127
127
  splits = text.split(separator)
128
128
  separator_len = len(separator) if keep_separator else 0
@@ -147,7 +147,7 @@ def recursive_text_splitter(
147
147
  current_length = 0
148
148
 
149
149
  # Recursively split this large piece
150
- smaller_chunks = recursive_text_splitter(
150
+ smaller_chunks = text_recursive_splitter(
151
151
  split, chunk_size, new_separators, keep_separator
152
152
  )
153
153
  result.extend(smaller_chunks)
@@ -169,10 +169,10 @@ def recursive_text_splitter(
169
169
 
170
170
 
171
171
  @traced_and_logged
172
- def chunk_text_for_embedding(
172
+ def text_chunking_for_embedding(
173
173
  text: str, file_name: str, chunk_size: int = 1000, overlap: int = 100
174
174
  ) -> list[dict[str, Any]]:
175
- chunks = split_by_characters(text, chunk_size=chunk_size, overlap=overlap)
175
+ chunks = text_split_by_characters(text, chunk_size=chunk_size, overlap=overlap)
176
176
 
177
177
  # Create metadata for each chunk
178
178
  result = []
@@ -190,7 +190,7 @@ def chunk_text_for_embedding(
190
190
 
191
191
 
192
192
  @traced_and_logged
193
- def split_code_by_functions(code: str) -> list[dict[str, Any]]:
193
+ def text_split_code_by_functions(code: str) -> list[dict[str, Any]]:
194
194
  if not code:
195
195
  return []
196
196
 
@@ -238,7 +238,7 @@ def split_code_by_functions(code: str) -> list[dict[str, Any]]:
238
238
 
239
239
 
240
240
  @traced_and_logged
241
- def count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
241
+ def text_count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
242
242
  """Count tokens using tiktoken."""
243
243
  if not text:
244
244
  return 0
@@ -272,11 +272,11 @@ def count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
272
272
 
273
273
  except ImportError:
274
274
  # Fallback to character-based estimation if tiktoken is not installed
275
- return count_tokens_estimate(text, model)
275
+ return text_count_tokens_estimate(text, model)
276
276
 
277
277
 
278
278
  @traced_and_logged
279
- def count_tokens_estimate(text: str, model: str = "gpt-3.5-turbo") -> int:
279
+ def text_count_tokens_estimate(text: str, model: str = "gpt-3.5-turbo") -> int:
280
280
  """Estimate token count for different models."""
281
281
  if not text:
282
282
  return 0
@@ -297,7 +297,7 @@ def count_tokens_estimate(text: str, model: str = "gpt-3.5-turbo") -> int:
297
297
 
298
298
 
299
299
  @traced_and_logged
300
- def truncate_to_token_limit(
300
+ def text_truncate_to_token_limit(
301
301
  text: str, max_tokens: int = 4000, model: str = "gpt-3.5-turbo"
302
302
  ) -> str:
303
303
  if not text:
@@ -327,7 +327,7 @@ def truncate_to_token_limit(
327
327
 
328
328
  except ImportError:
329
329
  # Fallback to the character-based method if tiktoken is not available
330
- estimated_tokens = count_tokens_estimate(text, model)
330
+ estimated_tokens = text_count_tokens_estimate(text, model)
331
331
 
332
332
  if estimated_tokens <= max_tokens:
333
333
  return text
@@ -353,7 +353,7 @@ def truncate_to_token_limit(
353
353
 
354
354
 
355
355
  @traced_and_logged
356
- def extract_keywords(text: str, top_n: int = 10) -> list[str]:
356
+ def text_extract_keywords(text: str, top_n: int = 10) -> list[str]:
357
357
  if not text:
358
358
  return []
359
359
 
@@ -489,7 +489,7 @@ def extract_keywords(text: str, top_n: int = 10) -> list[str]:
489
489
 
490
490
 
491
491
  @traced_and_logged
492
- def clean_text(
492
+ def text_clean_text(
493
493
  text: str,
494
494
  remove_urls: bool = True,
495
495
  remove_html: bool = True,
@@ -518,7 +518,7 @@ def clean_text(
518
518
 
519
519
 
520
520
  @traced_and_logged
521
- def format_chat_history(
521
+ def text_format_chat_history(
522
522
  messages: list[dict[str, str]],
523
523
  format_type: str = "text",
524
524
  system_prefix: str = "System: ",
@@ -567,7 +567,7 @@ def format_chat_history(
567
567
 
568
568
 
569
569
  @traced_and_logged
570
- def extract_json_from_text(text: str) -> dict[str, Any] | None:
570
+ def text_extract_json_from_text(text: str) -> dict[str, Any] | None:
571
571
  if not text:
572
572
  return None
573
573
 
@@ -599,7 +599,7 @@ def extract_json_from_text(text: str) -> dict[str, Any] | None:
599
599
 
600
600
 
601
601
  @traced_and_logged
602
- def calculate_text_hash(text: str, algorithm: str = "sha256") -> str:
602
+ def text_calculate_hash(text: str, algorithm: str = "sha256") -> str:
603
603
  if not text:
604
604
  return ""
605
605
 
@@ -614,7 +614,7 @@ def calculate_text_hash(text: str, algorithm: str = "sha256") -> str:
614
614
 
615
615
 
616
616
  @traced_and_logged
617
- def format_table_from_dicts(data: list[dict[str, Any]]) -> str:
617
+ def text_format_table_from_dicts(data: list[dict[str, Any]]) -> str:
618
618
  if not data:
619
619
  return ""
620
620
 
@@ -649,7 +649,7 @@ def format_table_from_dicts(data: list[dict[str, Any]]) -> str:
649
649
 
650
650
 
651
651
  @traced_and_logged
652
- def detect_language(text: str) -> str:
652
+ def text_detect_language(text: str) -> str:
653
653
  """Simple language detection"""
654
654
  if not text or len(text.strip()) < 10:
655
655
  return "unknown"
@@ -734,7 +734,7 @@ def detect_language(text: str) -> str:
734
734
 
735
735
 
736
736
  @traced_and_logged
737
- def tiktoken_split(
737
+ def text_tiktoken_split(
738
738
  text: str,
739
739
  model: str = "gpt-3.5-turbo",
740
740
  chunk_size: int = 1000,
@@ -783,6 +783,28 @@ def tiktoken_split(
783
783
  return chunks
784
784
  except ImportError:
785
785
  # Fallback to character-based chunking if tiktoken is not available
786
- return split_by_characters(
786
+ return text_split_by_characters(
787
787
  text, chunk_size=chunk_size * 4, overlap=overlap * 4
788
788
  )
789
+
790
+
791
+ @traced_and_logged
792
+ def text_count_words(text: str) -> int:
793
+ if not text:
794
+ return 0
795
+ return len(text.split())
796
+
797
+
798
+ @traced_and_logged
799
+ def text_extract_urls(text: str) -> list[str]:
800
+ if not text:
801
+ return []
802
+ # A more robust regex might be needed for complex cases
803
+ return re.findall(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", text)
804
+
805
+
806
+ @traced_and_logged
807
+ def text_extract_numbers(text: str) -> list[float]:
808
+ if not text:
809
+ return []
810
+ return [float(num) for num in re.findall(r"[-+]?\d*\.?\d+", text)]
@@ -0,0 +1,90 @@
1
+
2
+ import importlib
3
+ import os
4
+ from typing import Literal
5
+
6
+ from flock.core.logging.trace_and_logged import traced_and_logged
7
+
8
+
9
+ @traced_and_logged
10
+ def web_search_tavily(query: str):
11
+ if importlib.util.find_spec("tavily") is not None:
12
+ from tavily import TavilyClient
13
+
14
+ client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
15
+ try:
16
+ response = client.search(query, include_answer=True) # type: ignore
17
+ return response
18
+ except Exception:
19
+ raise
20
+ else:
21
+ raise ImportError(
22
+ "Optional tool dependencies not installed. Install with 'pip install flock-core[tools]'."
23
+ )
24
+
25
+
26
+ @traced_and_logged
27
+ def web_search_duckduckgo(
28
+ keywords: str, search_type: Literal["news", "web"] = "web"
29
+ ):
30
+ try:
31
+ if importlib.util.find_spec("duckduckgo_search") is not None:
32
+ from duckduckgo_search import DDGS
33
+
34
+ if search_type == "news":
35
+ response = DDGS().news(keywords)
36
+ else:
37
+ response = DDGS().text(keywords)
38
+
39
+ return response
40
+ else:
41
+ raise ImportError(
42
+ "Optional tool dependencies not installed. Install with 'pip install flock-core[tools]'."
43
+ )
44
+ except Exception:
45
+ raise
46
+
47
+
48
+ @traced_and_logged
49
+ def web_search_bing(keywords: str):
50
+ try:
51
+ import httpx
52
+
53
+ subscription_key = os.environ["BING_SEARCH_V7_SUBSCRIPTION_KEY"]
54
+ endpoint = "https://api.bing.microsoft.com/v7.0/search"
55
+
56
+ # Query term(s) to search for.
57
+ query = keywords
58
+
59
+ # Construct a request
60
+ mkt = "en-US"
61
+ params = {"q": query, "mkt": mkt}
62
+ headers = {"Ocp-Apim-Subscription-Key": subscription_key}
63
+
64
+ response = httpx.get(endpoint, headers=headers, params=params)
65
+ response.raise_for_status()
66
+ search_results = response.json()
67
+ return search_results["webPages"]
68
+ except Exception:
69
+ raise
70
+
71
+ @traced_and_logged
72
+ def web_content_as_markdown(url: str) -> str:
73
+ if (
74
+ importlib.util.find_spec("httpx") is not None
75
+ and importlib.util.find_spec("markdownify") is not None
76
+ ):
77
+ import httpx
78
+ from markdownify import markdownify as md
79
+
80
+ try:
81
+ response = httpx.get(url)
82
+ response.raise_for_status()
83
+ markdown = md(response.text)
84
+ return markdown
85
+ except Exception:
86
+ raise
87
+ else:
88
+ raise ImportError(
89
+ "Optional tool dependencies not installed. Install with 'pip install flock-core[tools]'."
90
+ )
@@ -12,7 +12,7 @@ HEADERS = {
12
12
  }
13
13
 
14
14
 
15
- def get_tickets(number_of_tickets: int = 10) -> list[dict]:
15
+ def zendesk_get_tickets(number_of_tickets: int = 10) -> list[dict]:
16
16
  """Get all tickets."""
17
17
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_TICKET")
18
18
  BASE_URL = f"https://{ZENDESK_SUBDOMAIN}.zendesk.com"
@@ -31,7 +31,7 @@ def get_tickets(number_of_tickets: int = 10) -> list[dict]:
31
31
  return all_tickets
32
32
 
33
33
 
34
- def get_ticket_by_id(ticket_id: str) -> dict:
34
+ def zendesk_get_ticket_by_id(ticket_id: str) -> dict:
35
35
  """Get a ticket by ID."""
36
36
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_TICKET")
37
37
  BASE_URL = f"https://{ZENDESK_SUBDOMAIN}.zendesk.com"
@@ -42,7 +42,7 @@ def get_ticket_by_id(ticket_id: str) -> dict:
42
42
  return response.json()["ticket"]
43
43
 
44
44
 
45
- def get_comments_by_ticket_id(ticket_id: str) -> list[dict]:
45
+ def zendesk_get_comments_by_ticket_id(ticket_id: str) -> list[dict]:
46
46
  """Get all comments for a ticket."""
47
47
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_TICKET")
48
48
  BASE_URL = f"https://{ZENDESK_SUBDOMAIN}.zendesk.com"
@@ -53,7 +53,7 @@ def get_comments_by_ticket_id(ticket_id: str) -> list[dict]:
53
53
  return response.json()["comments"]
54
54
 
55
55
 
56
- def get_article_by_id(article_id: str) -> dict:
56
+ def zendesk_get_article_by_id(article_id: str) -> dict:
57
57
  """Get an article by ID."""
58
58
  ZENDESK_LOCALE = os.getenv("ZENDESK_ARTICLE_LOCALE")
59
59
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_ARTICLE")
@@ -67,7 +67,7 @@ def get_article_by_id(article_id: str) -> dict:
67
67
  return response.json()["article"]
68
68
 
69
69
 
70
- def get_articles() -> list[dict]:
70
+ def zendesk_get_articles() -> list[dict]:
71
71
  """Get all articles."""
72
72
  ZENDESK_LOCALE = os.getenv("ZENDESK_ARTICLE_LOCALE")
73
73
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_ARTICLE")
@@ -79,7 +79,7 @@ def get_articles() -> list[dict]:
79
79
  return response.json()["articles"]
80
80
 
81
81
 
82
- def search_articles(query: str) -> list[dict]:
82
+ def zendesk_search_articles(query: str) -> list[dict]:
83
83
  """Search Zendesk Help Center articles using a query string."""
84
84
  ZENDESK_LOCALE = os.getenv("ZENDESK_ARTICLE_LOCALE") # e.g., "en-us"
85
85
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_ARTICLE")
@@ -70,7 +70,7 @@ def set_current_theme_name(theme_name: str | None):
70
70
  else:
71
71
  print(f"Warning: Theme 'random' specified, but no themes found in {THEMES_DIR}. Using default: {DEFAULT_THEME_NAME}")
72
72
  # resolved_theme remains DEFAULT_THEME_NAME
73
- elif theme_name in [t.value for t in OutputTheme] or theme_name in list_available_themes():
73
+ elif theme_name in list_available_themes():
74
74
  resolved_theme = theme_name
75
75
  else:
76
76
  print(f"Warning: Invalid theme name provided ('{theme_name}'). Using default: {DEFAULT_THEME_NAME}")
flock/webapp/app/main.py CHANGED
@@ -149,18 +149,70 @@ app.include_router(registry_viewer.router, prefix="/ui/api/registry", tags=["UI
149
149
 
150
150
  def generate_theme_css_web(theme_name: str | None) -> str:
151
151
  if not THEME_LOADER_AVAILABLE or THEMES_DIR is None: return ""
152
- active_theme_name = theme_name or get_current_theme_name() or DEFAULT_THEME_NAME
153
- theme_filename = f"{active_theme_name}.toml"
154
- theme_path = THEMES_DIR / theme_filename
155
- if not theme_path.exists():
156
- logger.warning(f"Theme file not found: {theme_path}. Using default: {DEFAULT_THEME_NAME}.toml")
157
- theme_path = THEMES_DIR / f"{DEFAULT_THEME_NAME}.toml"
158
- active_theme_name = DEFAULT_THEME_NAME
152
+
153
+ chosen_theme_name_input = theme_name or get_current_theme_name() or DEFAULT_THEME_NAME
154
+
155
+ # Sanitize the input to get only the filename component
156
+ sanitized_name_part = Path(chosen_theme_name_input).name
157
+ # Ensure we have a stem
158
+ theme_stem_candidate = sanitized_name_part
159
+ if theme_stem_candidate.endswith(".toml"):
160
+ theme_stem_candidate = theme_stem_candidate[:-5]
161
+
162
+ effective_theme_filename = f"{theme_stem_candidate}.toml"
163
+ _theme_to_load_stem = theme_stem_candidate # This will be the name of the theme we attempt to load
164
+
165
+ try:
166
+ resolved_themes_dir = THEMES_DIR.resolve(strict=True) # Ensure THEMES_DIR itself is valid
167
+ prospective_theme_path = resolved_themes_dir / effective_theme_filename
168
+
169
+ # Resolve the prospective path
170
+ resolved_theme_path = prospective_theme_path.resolve()
171
+
172
+ # Validate:
173
+ # 1. Path is still within the resolved THEMES_DIR
174
+ # 2. The final filename component of the resolved path matches the intended filename
175
+ # (guards against symlinks or normalization changing the name unexpectedly)
176
+ # 3. The file exists
177
+ if (
178
+ str(resolved_theme_path).startswith(str(resolved_themes_dir)) and
179
+ resolved_theme_path.name == effective_theme_filename and
180
+ resolved_theme_path.is_file() # is_file checks existence too
181
+ ):
182
+ theme_path = resolved_theme_path
183
+ else:
184
+ logger.warning(
185
+ f"Validation failed or theme '{effective_theme_filename}' not found in '{resolved_themes_dir}'. "
186
+ f"Attempted path: '{prospective_theme_path}'. Resolved to: '{resolved_theme_path}'. "
187
+ f"Falling back to default theme: {DEFAULT_THEME_NAME}.toml"
188
+ )
189
+ _theme_to_load_stem = DEFAULT_THEME_NAME
190
+ theme_path = resolved_themes_dir / f"{DEFAULT_THEME_NAME}.toml"
191
+ if not theme_path.is_file():
192
+ logger.error(f"Default theme file '{theme_path}' not found. No theme CSS will be generated.")
193
+ return ""
194
+ except FileNotFoundError: # THEMES_DIR does not exist
195
+ logger.error(f"Themes directory '{THEMES_DIR}' not found. Falling back to default theme.")
196
+ _theme_to_load_stem = DEFAULT_THEME_NAME
197
+ # Attempt to use a conceptual default path if THEMES_DIR was bogus, though it's unlikely to succeed
198
+ theme_path = Path(f"{DEFAULT_THEME_NAME}.toml") # This won't be in THEMES_DIR if THEMES_DIR is bad
199
+ if not theme_path.exists(): # Check existence without assuming a base directory
200
+ logger.error(f"Default theme file '{DEFAULT_THEME_NAME}.toml' not found at root or THEMES_DIR is inaccessible. No theme CSS.")
201
+ return ""
202
+ except Exception as e:
203
+ logger.error(f"Error during theme path resolution for '{effective_theme_filename}': {e}. Falling back to default.")
204
+ _theme_to_load_stem = DEFAULT_THEME_NAME
205
+ theme_path = THEMES_DIR / f"{DEFAULT_THEME_NAME}.toml" if THEMES_DIR else Path(f"{DEFAULT_THEME_NAME}.toml")
159
206
  if not theme_path.exists():
160
- logger.warning(f"Default theme file not found: {theme_path}. No theme CSS.")
207
+ logger.error(f"Default theme file '{theme_path}' not found after error. No theme CSS.")
161
208
  return ""
162
- try: theme_dict = load_theme_from_file(str(theme_path))
163
- except Exception as e: logger.error(f"Error loading theme {theme_path}: {e}"); return ""
209
+
210
+ try:
211
+ theme_dict = load_theme_from_file(str(theme_path))
212
+ logger.debug(f"Successfully loaded theme '{_theme_to_load_stem}' from '{theme_path}'")
213
+ except Exception as e:
214
+ logger.error(f"Error loading theme file '{theme_path}' (intended: '{_theme_to_load_stem}.toml'): {e}")
215
+ return ""
164
216
 
165
217
  pico_vars = alacritty_to_pico(theme_dict)
166
218
  if not pico_vars: return ""
@@ -429,16 +481,57 @@ async def htmx_env_add(request: Request, var_name: str = Form(...), var_value: s
429
481
 
430
482
  @app.get("/ui/htmx/theme-preview", response_class=HTMLResponse, tags=["UI HTMX Partials"])
431
483
  async def htmx_theme_preview(request: Request, theme: str = Query(None)):
432
- theme_name = theme or get_current_theme_name() or DEFAULT_THEME_NAME
484
+ if not THEME_LOADER_AVAILABLE:
485
+ return HTMLResponse("<p>Theme loading functionality is not available.</p>", status_code=500)
486
+ if THEMES_DIR is None or not THEMES_DIR.exists():
487
+ return HTMLResponse("<p>Themes directory is not configured or does not exist.</p>", status_code=500)
488
+
489
+ chosen_theme_name_input = theme or get_current_theme_name() or DEFAULT_THEME_NAME
490
+
491
+ # Sanitize the input to get only the filename component
492
+ sanitized_name_part = Path(chosen_theme_name_input).name
493
+ # Ensure we have a stem
494
+ theme_stem_from_input = sanitized_name_part
495
+ if theme_stem_from_input.endswith(".toml"):
496
+ theme_stem_from_input = theme_stem_from_input[:-5]
497
+
498
+ theme_filename_to_load = f"{theme_stem_from_input}.toml"
499
+ theme_name_for_display = theme_stem_from_input # Use the sanitized stem for display/logging
500
+
433
501
  try:
434
- theme_path = THEMES_DIR / f"{theme_name}.toml" if THEMES_DIR else None
435
- if not (theme_path and theme_path.exists()): return HTMLResponse("<p>Theme not found.</p>")
502
+ resolved_themes_dir = THEMES_DIR.resolve(strict=True)
503
+ theme_path_candidate = resolved_themes_dir / theme_filename_to_load
504
+ resolved_theme_path = theme_path_candidate.resolve()
505
+
506
+ if not str(resolved_theme_path).startswith(str(resolved_themes_dir)) or \
507
+ resolved_theme_path.name != theme_filename_to_load:
508
+ logger.warning(f"Invalid theme path access attempt for '{theme_name_for_display}'. "
509
+ f"Original input: '{chosen_theme_name_input}', Sanitized filename: '{theme_filename_to_load}', "
510
+ f"Attempted path: '{theme_path_candidate}', Resolved to: '{resolved_theme_path}'")
511
+ return HTMLResponse(f"<p>Invalid theme name or path for '{theme_name_for_display}'.</p>", status_code=400)
512
+
513
+ if not resolved_theme_path.is_file():
514
+ logger.info(f"Theme preview: Theme file '{theme_filename_to_load}' not found at '{resolved_theme_path}'.")
515
+ return HTMLResponse(f"<p>Theme '{theme_name_for_display}' not found.</p>", status_code=404)
516
+
517
+ theme_path = resolved_theme_path
436
518
  theme_data = load_theme_from_file(str(theme_path))
437
- except Exception as e: return HTMLResponse(f"<p>Error loading theme: {e}</p>")
519
+ logger.debug(f"Successfully loaded theme '{theme_name_for_display}' for preview from '{theme_path}'")
520
+
521
+ except FileNotFoundError: # For THEMES_DIR.resolve(strict=True)
522
+ logger.error(f"Themes directory '{THEMES_DIR}' not found during preview for '{theme_name_for_display}'.")
523
+ return HTMLResponse("<p>Themes directory not found.</p>", status_code=500)
524
+ except Exception as e:
525
+ logger.error(f"Error loading theme '{theme_name_for_display}' for preview (path: '{theme_path_candidate if 'theme_path_candidate' in locals() else 'unknown'}'): {e}")
526
+ return HTMLResponse(f"<p>Error loading theme '{theme_name_for_display}': {e}</p>", status_code=500)
527
+
438
528
  css_vars = alacritty_to_pico(theme_data)
439
- css_vars_str = ":root {\n" + "\n".join([f" {k}: {v};" for k, v in css_vars.items()]) + "\n}"
529
+ if not css_vars:
530
+ return HTMLResponse(f"<p>Could not convert theme '{theme_name_for_display}' to CSS variables.</p>")
531
+
532
+ css_vars_str = ":root {\n" + "\\n".join([f" {k}: {v};" for k, v in css_vars.items()]) + "\\n}"
440
533
  main_colors = [("Background", css_vars.get("--pico-background-color")), ("Text", css_vars.get("--pico-color")), ("Primary", css_vars.get("--pico-primary")), ("Secondary", css_vars.get("--pico-secondary")), ("Muted", css_vars.get("--pico-muted-color"))]
441
- return templates.TemplateResponse("partials/_theme_preview.html", {"request": request, "theme_name": theme_name, "css_vars_str": css_vars_str, "main_colors": main_colors})
534
+ return templates.TemplateResponse("partials/_theme_preview.html", {"request": request, "theme_name": theme_name_for_display, "css_vars_str": css_vars_str, "main_colors": main_colors})
442
535
 
443
536
  @app.post("/ui/apply-theme", tags=["UI Actions"])
444
537
  async def apply_theme(request: Request, theme: str = Form(...)):
@@ -27,6 +27,7 @@ async def run_agent(context: FlockContext) -> dict:
27
27
  # Start a top-level span for the entire run_agent activity.
28
28
  with tracer.start_as_current_span("run_agent") as span:
29
29
  registry = get_registry()
30
+
30
31
  previous_agent_name = ""
31
32
  if isinstance(context, dict):
32
33
  context = FlockContext.from_dict(context)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flock-core
3
- Version: 0.4.0b46
3
+ Version: 0.4.0b48
4
4
  Summary: Declarative LLM Orchestration at Scale
5
5
  Author-email: Andre Ratzenberger <andre.ratzenberger@whiteduck.de>
6
6
  License-File: LICENSE
@@ -8,22 +8,20 @@ Classifier: License :: OSI Approved :: MIT License
8
8
  Classifier: Operating System :: OS Independent
9
9
  Classifier: Programming Language :: Python :: 3
10
10
  Requires-Python: >=3.10
11
- Requires-Dist: azure-search-documents>=11.5.2
12
11
  Requires-Dist: chromadb>=0.6.3
13
12
  Requires-Dist: cloudpickle>=3.1.1
14
13
  Requires-Dist: datasets>=3.2.0
15
14
  Requires-Dist: devtools>=0.12.2
16
- Requires-Dist: dspy==2.6.16
17
- Requires-Dist: duckduckgo-search>=7.3.2
15
+ Requires-Dist: dspy==2.6.23
18
16
  Requires-Dist: fastapi>=0.115.8
19
17
  Requires-Dist: httpx>=0.28.1
20
18
  Requires-Dist: inspect-ai>=0.3.88
21
- Requires-Dist: litellm==1.63.7
19
+ Requires-Dist: litellm==1.69.3
22
20
  Requires-Dist: loguru>=0.7.3
23
21
  Requires-Dist: matplotlib>=3.10.0
24
22
  Requires-Dist: msgpack>=1.1.0
25
- Requires-Dist: nltk>=3.9.1
26
23
  Requires-Dist: notion-client>=2.3.0
24
+ Requires-Dist: openai==1.75.0
27
25
  Requires-Dist: opentelemetry-api>=1.30.0
28
26
  Requires-Dist: opentelemetry-exporter-jaeger-proto-grpc>=1.21.0
29
27
  Requires-Dist: opentelemetry-exporter-jaeger>=1.21.0
@@ -50,13 +48,26 @@ Requires-Dist: toml>=0.10.2
50
48
  Requires-Dist: tqdm>=4.67.1
51
49
  Requires-Dist: uvicorn>=0.34.0
52
50
  Requires-Dist: zep-python>=2.0.2
53
- Provides-Extra: all
54
- Requires-Dist: docling>=2.18.0; extra == 'all'
55
- Requires-Dist: markdownify>=0.14.1; extra == 'all'
56
- Requires-Dist: tavily-python>=0.5.0; extra == 'all'
57
- Provides-Extra: tools
58
- Requires-Dist: markdownify>=0.14.1; extra == 'tools'
59
- Requires-Dist: tavily-python>=0.5.0; extra == 'tools'
51
+ Provides-Extra: all-tools
52
+ Requires-Dist: azure-identity>=1.23.0; extra == 'all-tools'
53
+ Requires-Dist: azure-search-documents>=11.5.2; extra == 'all-tools'
54
+ Requires-Dist: azure-storage-blob>=12.25.1; extra == 'all-tools'
55
+ Requires-Dist: docling>=2.18.0; extra == 'all-tools'
56
+ Requires-Dist: duckduckgo-search>=7.3.2; extra == 'all-tools'
57
+ Requires-Dist: markdownify>=0.14.1; extra == 'all-tools'
58
+ Requires-Dist: nltk>=3.9.1; extra == 'all-tools'
59
+ Requires-Dist: tavily-python>=0.5.0; extra == 'all-tools'
60
+ Provides-Extra: azure-tools
61
+ Requires-Dist: azure-identity>=1.23.0; extra == 'azure-tools'
62
+ Requires-Dist: azure-search-documents>=11.5.2; extra == 'azure-tools'
63
+ Requires-Dist: azure-storage-blob>=12.25.1; extra == 'azure-tools'
64
+ Provides-Extra: basic-tools
65
+ Requires-Dist: docling>=2.18.0; extra == 'basic-tools'
66
+ Requires-Dist: duckduckgo-search>=7.3.2; extra == 'basic-tools'
67
+ Requires-Dist: markdownify>=0.14.1; extra == 'basic-tools'
68
+ Requires-Dist: tavily-python>=0.5.0; extra == 'basic-tools'
69
+ Provides-Extra: llm-tools
70
+ Requires-Dist: nltk>=3.9.1; extra == 'llm-tools'
60
71
  Description-Content-Type: text/markdown
61
72
 
62
73
  <p align="center">