flock-core 0.4.0b46__py3-none-any.whl → 0.4.0b49__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (50) hide show
  1. flock/__init__.py +45 -3
  2. flock/core/flock.py +105 -61
  3. flock/core/flock_registry.py +45 -38
  4. flock/core/util/spliter.py +4 -0
  5. flock/evaluators/__init__.py +1 -0
  6. flock/evaluators/declarative/__init__.py +1 -0
  7. flock/modules/__init__.py +1 -0
  8. flock/modules/assertion/__init__.py +1 -0
  9. flock/modules/callback/__init__.py +1 -0
  10. flock/modules/mem0/__init__.py +1 -0
  11. flock/modules/mem0/mem0_module.py +63 -0
  12. flock/modules/mem0graph/__init__.py +1 -0
  13. flock/modules/mem0graph/mem0_graph_module.py +63 -0
  14. flock/modules/memory/__init__.py +1 -0
  15. flock/modules/output/__init__.py +1 -0
  16. flock/modules/performance/__init__.py +1 -0
  17. flock/tools/__init__.py +188 -0
  18. flock/{core/tools → tools}/azure_tools.py +284 -0
  19. flock/tools/code_tools.py +56 -0
  20. flock/tools/file_tools.py +140 -0
  21. flock/{core/tools/dev_tools/github.py → tools/github_tools.py} +3 -3
  22. flock/{core/tools → tools}/markdown_tools.py +14 -4
  23. flock/tools/system_tools.py +9 -0
  24. flock/{core/tools/llm_tools.py → tools/text_tools.py} +47 -25
  25. flock/tools/web_tools.py +90 -0
  26. flock/{core/tools → tools}/zendesk_tools.py +6 -6
  27. flock/webapp/app/api/execution.py +130 -30
  28. flock/webapp/app/chat.py +303 -16
  29. flock/webapp/app/config.py +15 -1
  30. flock/webapp/app/dependencies.py +22 -0
  31. flock/webapp/app/main.py +509 -18
  32. flock/webapp/app/services/flock_service.py +38 -13
  33. flock/webapp/app/services/sharing_models.py +43 -0
  34. flock/webapp/app/services/sharing_store.py +156 -0
  35. flock/webapp/static/css/chat.css +57 -0
  36. flock/webapp/templates/chat.html +29 -4
  37. flock/webapp/templates/partials/_chat_messages.html +1 -1
  38. flock/webapp/templates/partials/_chat_settings_form.html +22 -0
  39. flock/webapp/templates/partials/_execution_form.html +28 -1
  40. flock/webapp/templates/partials/_share_chat_link_snippet.html +11 -0
  41. flock/webapp/templates/partials/_share_link_snippet.html +35 -0
  42. flock/webapp/templates/shared_run_page.html +116 -0
  43. flock/workflow/activities.py +1 -0
  44. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b49.dist-info}/METADATA +27 -14
  45. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b49.dist-info}/RECORD +48 -28
  46. flock/core/tools/basic_tools.py +0 -317
  47. flock/modules/zep/zep_module.py +0 -187
  48. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b49.dist-info}/WHEEL +0 -0
  49. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b49.dist-info}/entry_points.txt +0 -0
  50. {flock_core-0.4.0b46.dist-info → flock_core-0.4.0b49.dist-info}/licenses/LICENSE +0 -0
@@ -9,7 +9,7 @@ from flock.core.logging.trace_and_logged import traced_and_logged
9
9
 
10
10
 
11
11
  @traced_and_logged
12
- def create_user_stories_as_github_issue(title: str, body: str) -> str:
12
+ def github_create_user_stories_as_github_issue(title: str, body: str) -> str:
13
13
  github_pat = os.getenv("GITHUB_PAT")
14
14
  github_repo = os.getenv("GITHUB_REPO")
15
15
 
@@ -31,7 +31,7 @@ def create_user_stories_as_github_issue(title: str, body: str) -> str:
31
31
 
32
32
 
33
33
  @traced_and_logged
34
- def upload_readme(content: str):
34
+ def github_upload_readme(content: str):
35
35
  GITHUB_USERNAME = os.getenv("GITHUB_USERNAME")
36
36
  REPO_NAME = os.getenv("GITHUB_REPO")
37
37
  GITHUB_TOKEN = os.getenv("GITHUB_PAT")
@@ -82,7 +82,7 @@ def upload_readme(content: str):
82
82
 
83
83
 
84
84
  @traced_and_logged
85
- def create_files(file_paths) -> str:
85
+ def github_create_files(file_paths) -> str:
86
86
  """Create multiple files in a GitHub repository with a predefined content.
87
87
 
88
88
  This function iterates over a list of file paths (relative to the repository root) and creates
@@ -5,7 +5,7 @@ from flock.core.logging.trace_and_logged import traced_and_logged
5
5
 
6
6
 
7
7
  @traced_and_logged
8
- def split_markdown_by_headers(
8
+ def markdown_split_by_headers(
9
9
  markdown_text: str, min_header_level: int = 1, max_header_level: int = 2
10
10
  ) -> list[dict[str, Any]]:
11
11
  if not markdown_text:
@@ -58,7 +58,7 @@ def split_markdown_by_headers(
58
58
 
59
59
 
60
60
  @traced_and_logged
61
- def extract_code_blocks(
61
+ def markdown_extract_code_blocks(
62
62
  markdown_text: str, language: str = None
63
63
  ) -> list[dict[str, str]]:
64
64
  if not markdown_text:
@@ -92,7 +92,7 @@ def extract_code_blocks(
92
92
 
93
93
 
94
94
  @traced_and_logged
95
- def extract_links(markdown_text: str) -> list[dict[str, str]]:
95
+ def markdown_extract_links(markdown_text: str) -> list[dict[str, str]]:
96
96
  if not markdown_text:
97
97
  return []
98
98
 
@@ -104,7 +104,7 @@ def extract_links(markdown_text: str) -> list[dict[str, str]]:
104
104
 
105
105
 
106
106
  @traced_and_logged
107
- def extract_tables(markdown_text: str) -> list[dict[str, Any]]:
107
+ def markdown_extract_tables(markdown_text: str) -> list[dict[str, Any]]:
108
108
  if not markdown_text:
109
109
  return []
110
110
 
@@ -193,3 +193,13 @@ def markdown_to_plain_text(markdown_text: str) -> str:
193
193
  text = re.sub(r"\n{3,}", "\n\n", text)
194
194
 
195
195
  return text.strip()
196
+
197
+
198
+
199
+ @traced_and_logged
200
+ def extract_links_from_markdown(markdown: str, url: str) -> list:
201
+ # Regular expression to find all markdown links
202
+ link_pattern = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
203
+ links = link_pattern.findall(markdown)
204
+ return [url + link[1] for link in links]
205
+
@@ -0,0 +1,9 @@
1
+ from flock.core.logging.trace_and_logged import traced_and_logged
2
+
3
+
4
+ @traced_and_logged
5
+ def get_current_time() -> str:
6
+ import datetime
7
+
8
+ time = datetime.datetime.now().isoformat()
9
+ return time
@@ -21,12 +21,12 @@ except LookupError:
21
21
 
22
22
 
23
23
  @traced_and_logged
24
- def split_by_sentences(text: str) -> list[str]:
24
+ def text_split_by_sentences(text: str) -> list[str]:
25
25
  return nltk.sent_tokenize(text)
26
26
 
27
27
 
28
28
  @traced_and_logged
29
- def split_by_characters(
29
+ def text_split_by_characters(
30
30
  text: str, chunk_size: int = 4000, overlap: int = 200
31
31
  ) -> list[str]:
32
32
  if chunk_size <= 0:
@@ -71,7 +71,7 @@ def split_by_characters(
71
71
 
72
72
 
73
73
  @traced_and_logged
74
- def split_by_tokens(
74
+ def text_split_by_tokens(
75
75
  text: str,
76
76
  tokenizer: Callable[[str], list[str]],
77
77
  max_tokens: int = 1024,
@@ -90,7 +90,7 @@ def split_by_tokens(
90
90
 
91
91
 
92
92
  @traced_and_logged
93
- def split_by_separator(text: str, separator: str = "\n\n") -> list[str]:
93
+ def text_split_by_separator(text: str, separator: str = "\n\n") -> list[str]:
94
94
  if not text:
95
95
  return []
96
96
 
@@ -99,7 +99,7 @@ def split_by_separator(text: str, separator: str = "\n\n") -> list[str]:
99
99
 
100
100
 
101
101
  @traced_and_logged
102
- def recursive_text_splitter(
102
+ def text_recursive_splitter(
103
103
  text: str,
104
104
  chunk_size: int = 4000,
105
105
  separators: list[str] = ["\n\n", "\n", ". ", ", ", " ", ""],
@@ -114,7 +114,7 @@ def recursive_text_splitter(
114
114
  if not separators:
115
115
  return [
116
116
  text[:chunk_size],
117
- *recursive_text_splitter(text[chunk_size:], chunk_size, separators),
117
+ *text_recursive_splitter(text[chunk_size:], chunk_size, separators),
118
118
  ]
119
119
 
120
120
  separator = separators[0]
@@ -122,7 +122,7 @@ def recursive_text_splitter(
122
122
 
123
123
  if separator == "":
124
124
  # If we're at the character level, just split by characters
125
- return split_by_characters(text, chunk_size=chunk_size, overlap=0)
125
+ return text_split_by_characters(text, chunk_size=chunk_size, overlap=0)
126
126
 
127
127
  splits = text.split(separator)
128
128
  separator_len = len(separator) if keep_separator else 0
@@ -147,7 +147,7 @@ def recursive_text_splitter(
147
147
  current_length = 0
148
148
 
149
149
  # Recursively split this large piece
150
- smaller_chunks = recursive_text_splitter(
150
+ smaller_chunks = text_recursive_splitter(
151
151
  split, chunk_size, new_separators, keep_separator
152
152
  )
153
153
  result.extend(smaller_chunks)
@@ -169,10 +169,10 @@ def recursive_text_splitter(
169
169
 
170
170
 
171
171
  @traced_and_logged
172
- def chunk_text_for_embedding(
172
+ def text_chunking_for_embedding(
173
173
  text: str, file_name: str, chunk_size: int = 1000, overlap: int = 100
174
174
  ) -> list[dict[str, Any]]:
175
- chunks = split_by_characters(text, chunk_size=chunk_size, overlap=overlap)
175
+ chunks = text_split_by_characters(text, chunk_size=chunk_size, overlap=overlap)
176
176
 
177
177
  # Create metadata for each chunk
178
178
  result = []
@@ -190,7 +190,7 @@ def chunk_text_for_embedding(
190
190
 
191
191
 
192
192
  @traced_and_logged
193
- def split_code_by_functions(code: str) -> list[dict[str, Any]]:
193
+ def text_split_code_by_functions(code: str) -> list[dict[str, Any]]:
194
194
  if not code:
195
195
  return []
196
196
 
@@ -238,7 +238,7 @@ def split_code_by_functions(code: str) -> list[dict[str, Any]]:
238
238
 
239
239
 
240
240
  @traced_and_logged
241
- def count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
241
+ def text_count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
242
242
  """Count tokens using tiktoken."""
243
243
  if not text:
244
244
  return 0
@@ -272,11 +272,11 @@ def count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
272
272
 
273
273
  except ImportError:
274
274
  # Fallback to character-based estimation if tiktoken is not installed
275
- return count_tokens_estimate(text, model)
275
+ return text_count_tokens_estimate(text, model)
276
276
 
277
277
 
278
278
  @traced_and_logged
279
- def count_tokens_estimate(text: str, model: str = "gpt-3.5-turbo") -> int:
279
+ def text_count_tokens_estimate(text: str, model: str = "gpt-3.5-turbo") -> int:
280
280
  """Estimate token count for different models."""
281
281
  if not text:
282
282
  return 0
@@ -297,7 +297,7 @@ def count_tokens_estimate(text: str, model: str = "gpt-3.5-turbo") -> int:
297
297
 
298
298
 
299
299
  @traced_and_logged
300
- def truncate_to_token_limit(
300
+ def text_truncate_to_token_limit(
301
301
  text: str, max_tokens: int = 4000, model: str = "gpt-3.5-turbo"
302
302
  ) -> str:
303
303
  if not text:
@@ -327,7 +327,7 @@ def truncate_to_token_limit(
327
327
 
328
328
  except ImportError:
329
329
  # Fallback to the character-based method if tiktoken is not available
330
- estimated_tokens = count_tokens_estimate(text, model)
330
+ estimated_tokens = text_count_tokens_estimate(text, model)
331
331
 
332
332
  if estimated_tokens <= max_tokens:
333
333
  return text
@@ -353,7 +353,7 @@ def truncate_to_token_limit(
353
353
 
354
354
 
355
355
  @traced_and_logged
356
- def extract_keywords(text: str, top_n: int = 10) -> list[str]:
356
+ def text_extract_keywords(text: str, top_n: int = 10) -> list[str]:
357
357
  if not text:
358
358
  return []
359
359
 
@@ -489,7 +489,7 @@ def extract_keywords(text: str, top_n: int = 10) -> list[str]:
489
489
 
490
490
 
491
491
  @traced_and_logged
492
- def clean_text(
492
+ def text_clean_text(
493
493
  text: str,
494
494
  remove_urls: bool = True,
495
495
  remove_html: bool = True,
@@ -518,7 +518,7 @@ def clean_text(
518
518
 
519
519
 
520
520
  @traced_and_logged
521
- def format_chat_history(
521
+ def text_format_chat_history(
522
522
  messages: list[dict[str, str]],
523
523
  format_type: str = "text",
524
524
  system_prefix: str = "System: ",
@@ -567,7 +567,7 @@ def format_chat_history(
567
567
 
568
568
 
569
569
  @traced_and_logged
570
- def extract_json_from_text(text: str) -> dict[str, Any] | None:
570
+ def text_extract_json_from_text(text: str) -> dict[str, Any] | None:
571
571
  if not text:
572
572
  return None
573
573
 
@@ -599,7 +599,7 @@ def extract_json_from_text(text: str) -> dict[str, Any] | None:
599
599
 
600
600
 
601
601
  @traced_and_logged
602
- def calculate_text_hash(text: str, algorithm: str = "sha256") -> str:
602
+ def text_calculate_hash(text: str, algorithm: str = "sha256") -> str:
603
603
  if not text:
604
604
  return ""
605
605
 
@@ -614,7 +614,7 @@ def calculate_text_hash(text: str, algorithm: str = "sha256") -> str:
614
614
 
615
615
 
616
616
  @traced_and_logged
617
- def format_table_from_dicts(data: list[dict[str, Any]]) -> str:
617
+ def text_format_table_from_dicts(data: list[dict[str, Any]]) -> str:
618
618
  if not data:
619
619
  return ""
620
620
 
@@ -649,7 +649,7 @@ def format_table_from_dicts(data: list[dict[str, Any]]) -> str:
649
649
 
650
650
 
651
651
  @traced_and_logged
652
- def detect_language(text: str) -> str:
652
+ def text_detect_language(text: str) -> str:
653
653
  """Simple language detection"""
654
654
  if not text or len(text.strip()) < 10:
655
655
  return "unknown"
@@ -734,7 +734,7 @@ def detect_language(text: str) -> str:
734
734
 
735
735
 
736
736
  @traced_and_logged
737
- def tiktoken_split(
737
+ def text_tiktoken_split(
738
738
  text: str,
739
739
  model: str = "gpt-3.5-turbo",
740
740
  chunk_size: int = 1000,
@@ -783,6 +783,28 @@ def tiktoken_split(
783
783
  return chunks
784
784
  except ImportError:
785
785
  # Fallback to character-based chunking if tiktoken is not available
786
- return split_by_characters(
786
+ return text_split_by_characters(
787
787
  text, chunk_size=chunk_size * 4, overlap=overlap * 4
788
788
  )
789
+
790
+
791
+ @traced_and_logged
792
+ def text_count_words(text: str) -> int:
793
+ if not text:
794
+ return 0
795
+ return len(text.split())
796
+
797
+
798
+ @traced_and_logged
799
+ def text_extract_urls(text: str) -> list[str]:
800
+ if not text:
801
+ return []
802
+ # A more robust regex might be needed for complex cases
803
+ return re.findall(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", text)
804
+
805
+
806
+ @traced_and_logged
807
+ def text_extract_numbers(text: str) -> list[float]:
808
+ if not text:
809
+ return []
810
+ return [float(num) for num in re.findall(r"[-+]?\d*\.?\d+", text)]
@@ -0,0 +1,90 @@
1
+
2
+ import importlib
3
+ import os
4
+ from typing import Literal
5
+
6
+ from flock.core.logging.trace_and_logged import traced_and_logged
7
+
8
+
9
+ @traced_and_logged
10
+ def web_search_tavily(query: str):
11
+ if importlib.util.find_spec("tavily") is not None:
12
+ from tavily import TavilyClient
13
+
14
+ client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
15
+ try:
16
+ response = client.search(query, include_answer=True) # type: ignore
17
+ return response
18
+ except Exception:
19
+ raise
20
+ else:
21
+ raise ImportError(
22
+ "Optional tool dependencies not installed. Install with 'pip install flock-core[tools]'."
23
+ )
24
+
25
+
26
+ @traced_and_logged
27
+ def web_search_duckduckgo(
28
+ keywords: str, search_type: Literal["news", "web"] = "web"
29
+ ):
30
+ try:
31
+ if importlib.util.find_spec("duckduckgo_search") is not None:
32
+ from duckduckgo_search import DDGS
33
+
34
+ if search_type == "news":
35
+ response = DDGS().news(keywords)
36
+ else:
37
+ response = DDGS().text(keywords)
38
+
39
+ return response
40
+ else:
41
+ raise ImportError(
42
+ "Optional tool dependencies not installed. Install with 'pip install flock-core[tools]'."
43
+ )
44
+ except Exception:
45
+ raise
46
+
47
+
48
+ @traced_and_logged
49
+ def web_search_bing(keywords: str):
50
+ try:
51
+ import httpx
52
+
53
+ subscription_key = os.environ["BING_SEARCH_V7_SUBSCRIPTION_KEY"]
54
+ endpoint = "https://api.bing.microsoft.com/v7.0/search"
55
+
56
+ # Query term(s) to search for.
57
+ query = keywords
58
+
59
+ # Construct a request
60
+ mkt = "en-US"
61
+ params = {"q": query, "mkt": mkt}
62
+ headers = {"Ocp-Apim-Subscription-Key": subscription_key}
63
+
64
+ response = httpx.get(endpoint, headers=headers, params=params)
65
+ response.raise_for_status()
66
+ search_results = response.json()
67
+ return search_results["webPages"]
68
+ except Exception:
69
+ raise
70
+
71
+ @traced_and_logged
72
+ def web_content_as_markdown(url: str) -> str:
73
+ if (
74
+ importlib.util.find_spec("httpx") is not None
75
+ and importlib.util.find_spec("markdownify") is not None
76
+ ):
77
+ import httpx
78
+ from markdownify import markdownify as md
79
+
80
+ try:
81
+ response = httpx.get(url)
82
+ response.raise_for_status()
83
+ markdown = md(response.text)
84
+ return markdown
85
+ except Exception:
86
+ raise
87
+ else:
88
+ raise ImportError(
89
+ "Optional tool dependencies not installed. Install with 'pip install flock-core[tools]'."
90
+ )
@@ -12,7 +12,7 @@ HEADERS = {
12
12
  }
13
13
 
14
14
 
15
- def get_tickets(number_of_tickets: int = 10) -> list[dict]:
15
+ def zendesk_get_tickets(number_of_tickets: int = 10) -> list[dict]:
16
16
  """Get all tickets."""
17
17
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_TICKET")
18
18
  BASE_URL = f"https://{ZENDESK_SUBDOMAIN}.zendesk.com"
@@ -31,7 +31,7 @@ def get_tickets(number_of_tickets: int = 10) -> list[dict]:
31
31
  return all_tickets
32
32
 
33
33
 
34
- def get_ticket_by_id(ticket_id: str) -> dict:
34
+ def zendesk_get_ticket_by_id(ticket_id: str) -> dict:
35
35
  """Get a ticket by ID."""
36
36
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_TICKET")
37
37
  BASE_URL = f"https://{ZENDESK_SUBDOMAIN}.zendesk.com"
@@ -42,7 +42,7 @@ def get_ticket_by_id(ticket_id: str) -> dict:
42
42
  return response.json()["ticket"]
43
43
 
44
44
 
45
- def get_comments_by_ticket_id(ticket_id: str) -> list[dict]:
45
+ def zendesk_get_comments_by_ticket_id(ticket_id: str) -> list[dict]:
46
46
  """Get all comments for a ticket."""
47
47
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_TICKET")
48
48
  BASE_URL = f"https://{ZENDESK_SUBDOMAIN}.zendesk.com"
@@ -53,7 +53,7 @@ def get_comments_by_ticket_id(ticket_id: str) -> list[dict]:
53
53
  return response.json()["comments"]
54
54
 
55
55
 
56
- def get_article_by_id(article_id: str) -> dict:
56
+ def zendesk_get_article_by_id(article_id: str) -> dict:
57
57
  """Get an article by ID."""
58
58
  ZENDESK_LOCALE = os.getenv("ZENDESK_ARTICLE_LOCALE")
59
59
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_ARTICLE")
@@ -67,7 +67,7 @@ def get_article_by_id(article_id: str) -> dict:
67
67
  return response.json()["article"]
68
68
 
69
69
 
70
- def get_articles() -> list[dict]:
70
+ def zendesk_get_articles() -> list[dict]:
71
71
  """Get all articles."""
72
72
  ZENDESK_LOCALE = os.getenv("ZENDESK_ARTICLE_LOCALE")
73
73
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_ARTICLE")
@@ -79,7 +79,7 @@ def get_articles() -> list[dict]:
79
79
  return response.json()["articles"]
80
80
 
81
81
 
82
- def search_articles(query: str) -> list[dict]:
82
+ def zendesk_search_articles(query: str) -> list[dict]:
83
83
  """Search Zendesk Help Center articles using a query string."""
84
84
  ZENDESK_LOCALE = os.getenv("ZENDESK_ARTICLE_LOCALE") # e.g., "en-us"
85
85
  ZENDESK_SUBDOMAIN = os.getenv("ZENDESK_SUBDOMAIN_ARTICLE")
@@ -1,9 +1,14 @@
1
1
  # src/flock/webapp/app/api/execution.py
2
2
  import json
3
3
  from pathlib import Path
4
- from typing import TYPE_CHECKING
4
+ from typing import TYPE_CHECKING, Any
5
5
 
6
- from fastapi import APIRouter, Depends, Request # Added Depends
6
+ from fastapi import ( # Ensure Form and HTTPException are imported
7
+ APIRouter,
8
+ Depends,
9
+ Form,
10
+ Request,
11
+ )
7
12
  from fastapi.responses import HTMLResponse
8
13
  from fastapi.templating import Jinja2Templates
9
14
 
@@ -11,6 +16,12 @@ if TYPE_CHECKING:
11
16
  from flock.core.flock import Flock
12
17
 
13
18
 
19
+ from flock.core.flock import (
20
+ Flock as ConcreteFlock, # For creating Flock instance
21
+ )
22
+ from flock.core.logging.logging import (
23
+ get_logger as get_flock_logger, # For logging within the new endpoint
24
+ )
14
25
  from flock.core.util.spliter import parse_schema
15
26
 
16
27
  # Import the dependency to get the current Flock instance
@@ -97,19 +108,23 @@ async def htmx_run_flock(
97
108
  # We retrieve current_flock from app_state inside the service or before calling if needed for validation here
98
109
 
99
110
  # It's better to get flock from app_state here to validate before calling service
100
- current_flock: Flock | None = getattr(request.app.state, 'flock_instance', None)
111
+ current_flock_from_state: Flock | None = getattr(request.app.state, 'flock_instance', None)
112
+ logger = get_flock_logger("webapp.execution.regular_run") # Standard logger
101
113
 
102
- if not current_flock:
114
+ if not current_flock_from_state:
115
+ logger.error("HTMX Run (Regular): No Flock loaded in app_state.")
103
116
  return HTMLResponse("<p class='error'>No Flock loaded to run.</p>")
104
117
 
105
118
  form_data = await request.form()
106
119
  start_agent_name = form_data.get("start_agent_name")
107
120
 
108
121
  if not start_agent_name:
122
+ logger.warning("HTMX Run (Regular): Starting agent not selected.")
109
123
  return HTMLResponse("<p class='error'>Starting agent not selected.</p>")
110
124
 
111
- agent = current_flock.agents.get(start_agent_name)
125
+ agent = current_flock_from_state.agents.get(start_agent_name)
112
126
  if not agent:
127
+ logger.error(f"HTMX Run (Regular): Agent '{start_agent_name}' not found in Flock '{current_flock_from_state.name}'.")
113
128
  return HTMLResponse(
114
129
  f"<p class='error'>Agent '{start_agent_name}' not found in the current Flock.</p>"
115
130
  )
@@ -119,46 +134,131 @@ async def htmx_run_flock(
119
134
  try:
120
135
  parsed_spec = parse_schema(agent.input)
121
136
  for name, type_str, _ in parsed_spec:
122
- form_field_name = f"agent_input_{name}" # Matches the name in _dynamic_input_form_content.html
137
+ form_field_name = f"agent_input_{name}"
123
138
  raw_value = form_data.get(form_field_name)
124
-
125
139
  if raw_value is None and "bool" in type_str.lower(): inputs[name] = False; continue
126
140
  if raw_value is None: inputs[name] = None; continue
127
-
128
- if "int" in type_str.lower():
129
- try: inputs[name] = int(raw_value)
130
- except ValueError: return HTMLResponse(f"<p class='error'>Invalid integer for '{name}'.</p>")
131
- elif "float" in type_str.lower():
132
- try: inputs[name] = float(raw_value)
133
- except ValueError: return HTMLResponse(f"<p class='error'>Invalid float for '{name}'.</p>")
134
- elif "bool" in type_str.lower():
135
- inputs[name] = raw_value.lower() in ["true", "on", "1", "yes"]
136
- elif "list" in type_str.lower() or "dict" in type_str.lower():
137
- try: inputs[name] = json.loads(raw_value)
138
- except json.JSONDecodeError: return HTMLResponse(f"<p class='error'>Invalid JSON for '{name}'.</p>")
141
+ if "int" in type_str.lower(): inputs[name] = int(raw_value)
142
+ elif "float" in type_str.lower(): inputs[name] = float(raw_value)
143
+ elif "bool" in type_str.lower(): inputs[name] = raw_value.lower() in ["true", "on", "1", "yes"]
144
+ elif "list" in type_str.lower() or "dict" in type_str.lower(): inputs[name] = json.loads(raw_value)
139
145
  else: inputs[name] = raw_value
140
- except Exception as e:
141
- return HTMLResponse(f"<p class='error'>Error processing inputs for {start_agent_name}: {e}</p>")
146
+ except ValueError as ve:
147
+ logger.error(f"HTMX Run (Regular): Input parsing error for agent '{start_agent_name}': {ve}", exc_info=True)
148
+ return HTMLResponse(f"<p class='error'>Invalid input format: {ve!s}</p>")
149
+ except Exception as e_parse:
150
+ logger.error(f"HTMX Run (Regular): Error processing inputs for '{start_agent_name}': {e_parse}", exc_info=True)
151
+ return HTMLResponse(f"<p class='error'>Error processing inputs for {start_agent_name}: {e_parse}</p>")
142
152
 
143
153
  try:
144
- # Pass request.app.state to the service function
154
+ logger.info(f"HTMX Run (Regular): Executing agent '{start_agent_name}' from Flock '{current_flock_from_state.name}' via service.")
145
155
  result_data = await run_current_flock_service(start_agent_name, inputs, request.app.state)
156
+ except Exception as e_run:
157
+ logger.error(f"HTMX Run (Regular): Error during service execution for '{start_agent_name}': {e_run}", exc_info=True)
158
+ return templates.TemplateResponse(
159
+ "partials/_results_display.html",
160
+ {"request": request, "result_data": {"error": f"Error during execution: {e_run!s}"}},
161
+ )
162
+
163
+ # Process and serialize result for template
164
+ try:
165
+ processed_result = pydantic_to_dict(result_data)
166
+ try: json.dumps(processed_result)
167
+ except (TypeError, ValueError) as ser_e:
168
+ processed_result = f"Error: Result contains non-serializable data: {ser_e!s}\nOriginal result preview: {str(result_data)[:500]}..."
169
+ logger.warning(f"HTMX Run (Regular): Serialization issue: {processed_result}")
170
+ except Exception as proc_e:
171
+ processed_result = f"Error: Failed to process result data: {proc_e!s}"
172
+ logger.error(f"HTMX Run (Regular): Result processing error: {processed_result}", exc_info=True)
173
+
174
+ return templates.TemplateResponse(
175
+ "partials/_results_display.html",
176
+ {"request": request, "result_data": processed_result},
177
+ )
178
+
179
+
180
+ # --- NEW ENDPOINT FOR SHARED RUNS ---
181
+ @router.post("/htmx/run-shared", response_class=HTMLResponse)
182
+ async def htmx_run_shared_flock(
183
+ request: Request,
184
+ share_id: str = Form(...), # Expect share_id from the form
185
+ # flock_definition_str: str = Form(...), # No longer needed
186
+ # start_agent_name from form is still required implicitly via form_data.get()
187
+ # No DI for current_flock, as we are using the one from app.state via share_id
188
+ ):
189
+ shared_logger = get_flock_logger("webapp.execution.shared_run_stateful")
190
+ form_data = await request.form()
191
+ start_agent_name = form_data.get("start_agent_name")
146
192
 
193
+ if not start_agent_name:
194
+ shared_logger.warning("HTMX Run Shared (Stateful): Starting agent name not provided in form.")
195
+ return HTMLResponse("<p class='error'>Critical error: Starting agent name missing from shared run form.</p>")
196
+
197
+ shared_logger.info(f"HTMX Run Shared (Stateful): Attempting to execute agent '{start_agent_name}' using pre-loaded Flock for share_id '{share_id}'.")
198
+
199
+ inputs = {}
200
+ result_data: Any = None
201
+ temp_flock: ConcreteFlock | None = None
202
+
203
+ try:
204
+ # Retrieve the pre-loaded Flock instance from app.state
205
+ shared_flocks_store = getattr(request.app.state, 'shared_flocks', {})
206
+ temp_flock = shared_flocks_store.get(share_id)
207
+
208
+ if not temp_flock:
209
+ shared_logger.error(f"HTMX Run Shared: Flock instance for share_id '{share_id}' not found in app.state.")
210
+ return HTMLResponse(f"<p class='error'>Shared session not found or expired. Please try accessing the shared link again.</p>")
211
+
212
+ shared_logger.info(f"HTMX Run Shared: Successfully retrieved pre-loaded Flock '{temp_flock.name}' for agent '{start_agent_name}' (share_id: {share_id}).")
213
+
214
+ agent = temp_flock.agents.get(start_agent_name)
215
+ if not agent:
216
+ shared_logger.error(f"HTMX Run Shared: Agent '{start_agent_name}' not found in shared Flock '{temp_flock.name}'.")
217
+ return HTMLResponse(f"<p class='error'>Agent '{start_agent_name}' not found in the provided shared Flock definition.</p>")
218
+
219
+ # Parse inputs for the agent from the temporary flock
220
+ if agent.input and isinstance(agent.input, str):
221
+ parsed_spec = parse_schema(agent.input)
222
+ for name, type_str, _ in parsed_spec:
223
+ form_field_name = f"agent_input_{name}" # Name used in shared_run_page.html
224
+ raw_value = form_data.get(form_field_name)
225
+
226
+ # Input type conversion (consistent with the other endpoint)
227
+ if raw_value is None and "bool" in type_str.lower(): inputs[name] = False; continue
228
+ if raw_value is None: inputs[name] = None; continue
229
+ if "int" in type_str.lower(): inputs[name] = int(raw_value)
230
+ elif "float" in type_str.lower(): inputs[name] = float(raw_value)
231
+ elif "bool" in type_str.lower(): inputs[name] = raw_value.lower() in ["true", "on", "1", "yes"]
232
+ elif "list" in type_str.lower() or "dict" in type_str.lower(): inputs[name] = json.loads(raw_value)
233
+ else: inputs[name] = raw_value
234
+
235
+ shared_logger.info(f"HTMX Run Shared: Executing agent '{start_agent_name}' in pre-loaded Flock '{temp_flock.name}'. Inputs: {list(inputs.keys())}")
236
+ result_data = await temp_flock.run_async(start_agent=start_agent_name, input=inputs, box_result=False)
237
+ shared_logger.info(f"HTMX Run Shared: Agent '{start_agent_name}' executed. Result keys: {list(result_data.keys()) if isinstance(result_data, dict) else 'N/A'}")
238
+
239
+ # Process and serialize result for template (same logic as other endpoint)
147
240
  try:
148
- result_data = pydantic_to_dict(result_data)
149
- try: json.dumps(result_data)
241
+ processed_result = pydantic_to_dict(result_data)
242
+ try: json.dumps(processed_result)
150
243
  except (TypeError, ValueError) as ser_e:
151
- result_data = f"Error: Result contains non-serializable data: {ser_e!s}\nOriginal result: {result_data!s}"
244
+ processed_result = f"Error: Result contains non-serializable data: {ser_e!s}\nOriginal result preview: {str(result_data)[:500]}..."
245
+ shared_logger.warning(f"HTMX Run Shared: Serialization issue: {processed_result}")
152
246
  except Exception as proc_e:
153
- result_data = f"Error: Failed to process result data: {proc_e!s}"
247
+ processed_result = f"Error: Failed to process result data: {proc_e!s}"
248
+ shared_logger.error(f"HTMX Run Shared: Result processing error: {processed_result}", exc_info=True)
154
249
 
155
250
  return templates.TemplateResponse(
156
251
  "partials/_results_display.html",
157
- {"request": request, "result_data": result_data},
252
+ {"request": request, "result_data": processed_result},
158
253
  )
159
- except Exception as e:
160
- error_message = f"Error during execution: {e!s}"
254
+
255
+ except ValueError as ve: # Catch specific input parsing errors
256
+ shared_logger.error(f"HTMX Run Shared: Input parsing error for agent '{start_agent_name}': {ve}", exc_info=True)
257
+ return HTMLResponse(f"<p class='error'>Invalid input format: {ve!s}</p>")
258
+ except Exception as e_general:
259
+ error_message = f"An unexpected error occurred during shared execution: {e_general!s}"
260
+ shared_logger.error(f"HTMX Run Shared: General error for agent '{start_agent_name}': {e_general}", exc_info=True)
161
261
  return templates.TemplateResponse(
162
262
  "partials/_results_display.html",
163
- {"request": request, "result_data": error_message}, # Display error in the same partial
263
+ {"request": request, "result_data": {"error": error_message } },
164
264
  )