gofannon 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,144 @@
1
+ import json
2
+ import time
3
+ from typing import List, Dict, Any
4
+ from ..base import WorkflowContext, ToolResult
5
+ from ..config import FunctionRegistry
6
+ import logging
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ class FunctionOrchestrator:
11
+ def __init__(self, llm_client, tool_configs=None):
12
+ self.logger = logging.getLogger(f"{__name__}.FunctionOrchestrator")
13
+ self.llm = llm_client
14
+ self.available_functions = FunctionRegistry.get_tools()
15
+ self.tool_configs = tool_configs or {}
16
+ self.function_map = self.function_map = self._build_function_map()
17
+ self.logger.debug("Available functions in orchestrator: " + ', '.join(
18
+ [f['function']['name'] for f in self.available_functions]))
19
+
20
+ def _build_function_map(self):
21
+ return {
22
+ func_def['function']['name']: (
23
+ FunctionRegistry._tools[func_def['function']['name']],
24
+ self.tool_configs.get(func_def['function']['name'], {})
25
+ ) for func_def in self.available_functions
26
+ }
27
+
28
+ def _instantiate_tool(self, function_name):
29
+ tool_class, config = self.function_map[function_name]
30
+ return tool_class(**config)
31
+
32
+ def execute_workflow(self, user_query: str, model_name: str, max_steps=5):
33
+ self.logger.debug("Starting workflow execution with query: %s", user_query)
34
+ messages = [{"role": "user", "content": user_query}]
35
+ final_answer = None
36
+
37
+ for _ in range(max_steps):
38
+ # Get LLM response
39
+ response = self.llm.chat.completions.create(
40
+ model=model_name,
41
+ messages=messages,
42
+ tools=self.available_functions
43
+ )
44
+ msg = response.choices[0].message
45
+ messages.append(msg)
46
+
47
+ # Check for direct answer first
48
+ if msg.content and not msg.tool_calls:
49
+ final_answer = msg.content
50
+ break
51
+
52
+ # Process tool calls if any
53
+ if msg.tool_calls:
54
+ for tool_call in msg.tool_calls:
55
+ function_name = tool_call.function.name
56
+
57
+ function_args = json.loads(tool_call.function.arguments)
58
+
59
+ # Execute function
60
+ # Get tool class and configuration
61
+ tool_class, config = self.function_map[function_name]
62
+
63
+ # Instantiate tool with configuration
64
+ tool = tool_class(**config)
65
+
66
+ # Execute function
67
+ result = tool.fn(**function_args)
68
+
69
+ # Store result in context
70
+ messages.append({
71
+ "role": "tool",
72
+ "tool_call_id": tool_call.id,
73
+ "content": str(result),#.output,
74
+ # "name": function_name
75
+ })
76
+ else:
77
+ break # Exit if no tools called and no content
78
+
79
+ # Final synthesis step
80
+ if not final_answer:
81
+ synthesis_prompt = '''Based on the tool outputs above,
82
+ provide a complete natural language answer with final numerical result
83
+ in bold. Follow this format:
84
+
85
+ **Final Answer**: [result in bold]
86
+
87
+ With supporting calculations shown.'''
88
+
89
+ messages.append({"role": "user", "content": synthesis_prompt})
90
+
91
+ response = self.llm.chat.completions.create(
92
+ model=model_name,
93
+ messages=messages
94
+ )
95
+ final_answer = response.choices[0].message.content
96
+
97
+ return {
98
+ "conversation": messages,
99
+ "final_answer": final_answer
100
+ }
101
+
102
+ class ToolChain:
103
+ def __init__(self, tools: List[Any], context: WorkflowContext):
104
+ self.tools = tools
105
+ self.context = context
106
+
107
+ def _resolve_input(self, input_template: str) -> Any:
108
+ if not input_template:
109
+ return None
110
+
111
+ if input_template.startswith('{{') and input_template.endswith('}}'):
112
+ key = input_template[2:-2].strip()
113
+ return self.context.data.get(key)
114
+ return input_template
115
+
116
+ def execute(self, initial_input: Dict[str, Any] = None) -> ToolResult:
117
+ self.context.data.update(initial_input or {})
118
+
119
+ for tool in self.tools:
120
+ tool_name = tool.__class__.__name__
121
+
122
+ # Resolve inputs from context
123
+ resolved_inputs = {
124
+ k: self._resolve_input(v)
125
+ for k, v in tool.definition.get('function', {}).get('parameters', {}).items()
126
+ }
127
+
128
+ # Execute tool
129
+ result = tool.execute(self.context, **resolved_inputs)
130
+
131
+ if not result.success:
132
+ return result
133
+
134
+ # Store output in context
135
+ output_key = f"{tool_name}_output"
136
+ self.context.data[output_key] = result.output
137
+
138
+ # Save checkpoint
139
+ self.context.save_checkpoint(f"after_{tool_name}")
140
+
141
+ return ToolResult(
142
+ success=True,
143
+ output=self.context.data
144
+ )
@@ -0,0 +1,42 @@
1
+ import firebase_admin
2
+ from firebase_admin import credentials, firestore
3
+ from typing import Optional
4
+ from . import WorkflowContext
5
+
6
+ class FirebaseWrapper:
7
+ _initialized = False
8
+
9
+ @classmethod
10
+ def initialize(cls, config_path: Optional[str] = None):
11
+ if not cls._initialized:
12
+ if config_path:
13
+ cred = credentials.Certificate(config_path)
14
+ else:
15
+ cred = credentials.ApplicationDefault()
16
+
17
+ firebase_admin.initialize_app(cred)
18
+ cls._initialized = True
19
+
20
+ @classmethod
21
+ def get_context(cls, doc_id: str) -> WorkflowContext:
22
+ db = firestore.client()
23
+ doc_ref = db.collection('workflows').document(doc_id)
24
+ doc = doc_ref.get()
25
+
26
+ if doc.exists:
27
+ data = doc.to_dict()
28
+ context = WorkflowContext(firebase_config=True)
29
+ context.data = data.get('data', {})
30
+ context.execution_log = data.get('execution_log', [])
31
+ return context
32
+ return WorkflowContext(firebase_config=True)
33
+
34
+ @classmethod
35
+ def save_context(cls, doc_id: str, context: WorkflowContext):
36
+ db = firestore.client()
37
+ doc_ref = db.collection('workflows').document(doc_id)
38
+ doc_ref.set({
39
+ 'data': context.data,
40
+ 'execution_log': context.execution_log,
41
+ 'timestamp': firestore.SERVER_TIMESTAMP
42
+ })
@@ -0,0 +1,3 @@
1
+ from .sequential_cot import SequentialCoT
2
+ from .hierarchical_cot import HierarchicalCoT
3
+ from .tree_of_thought import TreeOfThought
@@ -0,0 +1,47 @@
1
+ from abc import ABC, abstractmethod
2
+ import json
3
+ from openai import OpenAI
4
+ from gofannon.base import BaseTool
5
+
6
+ sample_depth_chart = [
7
+ {'model_name' : "Qwen/Qwen2.5-72B-Instruct",
8
+ 'base_url' : "https://api.deepinfra.com/v1/openai",
9
+ 'api_key' : "YOUR_API_KEY_HERE",
10
+ 'temperature' : 0.3,
11
+ 'prompt_appendix' : '\n\nThinks carefully.'
12
+ }
13
+ ]
14
+
15
+ class ReasoningTool(BaseTool, ABC):
16
+ def __init__(self,
17
+ depth_chart = sample_depth_chart
18
+ ):
19
+ super().__init__()
20
+ self.depth_chart = depth_chart
21
+ self.error_context = []
22
+ self.jsonify_prompt_s = "Your output should be a properly formatted JSON only. No preamble, explanations, or markdown ticks (```)."
23
+
24
+
25
+ @abstractmethod
26
+ def fn(self, *args, **kwargs):
27
+ pass
28
+
29
+ def create_openai_like_client(self, level: int):
30
+ return OpenAI(
31
+ api_key=self.depth_chart[level]['api_key'],
32
+ base_url=self.depth_chart[level]['base_url']
33
+ )
34
+
35
+ def get_response(self, level: int, messages):
36
+ return self.create_openai_like_client(level).chat.completions.create(
37
+ model=self.depth_chart[level]['model_name'],
38
+ messages=messages,
39
+ temperature=self.depth_chart[level]['temperature']
40
+ )
41
+
42
+ def get_debug_info(self):
43
+ """Get current debugging information"""
44
+ return {
45
+ "error_context": self.error_context,
46
+ "depth_chart_config": self.depth_chart
47
+ }
@@ -0,0 +1,272 @@
1
+ import json
2
+ import logging
3
+ from openai import OpenAI, APIError
4
+ from.base import ReasoningTool
5
+ from ..config import FunctionRegistry
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ @FunctionRegistry.register
10
+ class HierarchicalCoT(ReasoningTool):
11
+ def __init__(self, depth_chart= None):
12
+ super().__init__(depth_chart=depth_chart)
13
+ self.name = "hierarchical_cot"
14
+ self.depth_chart = depth_chart or []
15
+ self.error_context = [] # Track error locations
16
+
17
+ @property
18
+ def definition(self):
19
+ return {
20
+ "type": "function",
21
+ "function": {
22
+ "name": self.name,
23
+ "description": "Hierarchical Chain-of-Thought reasoning with outline generation and section expansion",
24
+ "parameters": {
25
+ "type": "object",
26
+ "properties": {
27
+ "prompt": {
28
+ "type": "string",
29
+ "description": "The problem prompt to process"
30
+ },
31
+ "depth": {
32
+ "type": "integer",
33
+ "description": "Depth of hierarchy (default: 2)",
34
+ "default": 2
35
+ }
36
+ },
37
+ "required": ["prompt"]
38
+ }
39
+ }
40
+ }
41
+
42
+ def fn(self, prompt, depth=2):
43
+ self.error_context = [] # Reset error tracking
44
+ if depth > len(self.depth_chart):
45
+ return {"error": f"Requested depth {depth} exceeds configured model levels {len(self.depth_chart)}"}
46
+
47
+ try:
48
+ if depth < 1:
49
+ raise ValueError("Depth must be at least 1")
50
+
51
+ outline = self._generate_outline(prompt, depth)
52
+ if 'error' in outline:
53
+ return outline
54
+
55
+ return self._expand_sections(outline, current_depth=1, max_depth=depth)
56
+
57
+ except Exception as e:
58
+ logger.error(f"Critical failure: {str(e)}")
59
+ return {
60
+ "error": "HierarchicalCoT processing failed",
61
+ "context": self.error_context,
62
+ "exception": str(e)
63
+ }
64
+
65
+ def _generate_outline(self, prompt, depth):
66
+ try:
67
+ outline_prompt = f"""Organize this problem into a {depth}-level hierarchical structure:
68
+ {prompt}
69
+
70
+ Your output should be a properly formatted JSON only. No preamble, explanations, or markdown ticks (```).
71
+ Return JSON format with keys 'title' and 'sections' (array of section objects).
72
+ """
73
+ if depth > 1:
74
+ outline_prompt += "Each section should have 'title' and 'sections'. "
75
+
76
+ messages = [{"role": "user", "content": outline_prompt}]
77
+ response = self.get_response(level=0, messages=messages)
78
+
79
+ try:
80
+ structure = json.loads(response.choices[0].message.content)
81
+ except json.JSONDecodeError as e:
82
+ self.error_context.append({
83
+ "stage": "outline_parsing",
84
+ "response": response.choices[0].message.content,
85
+ "error": str(e)
86
+ })
87
+ return {"error": "Invalid JSON structure in outline"}
88
+
89
+ # Validate outline structure
90
+ if not isinstance(structure, dict):
91
+ self.error_context.append({
92
+ "stage": "outline_validation",
93
+ "structure_type": type(structure).__name__,
94
+ "expected_type": "dict"
95
+ })
96
+ return {"error": "Outline structure is not a dictionary"}
97
+
98
+ required_keys = {'title', 'sections'}
99
+ if not required_keys.issubset(structure.keys()):
100
+ self.error_context.append({
101
+ "stage": "outline_validation",
102
+ "missing_keys": list(required_keys - structure.keys())
103
+ })
104
+ return {"error": f"Outline missing required keys: {required_keys}"}
105
+
106
+ return structure
107
+
108
+ except APIError as e:
109
+ self.error_context.append({
110
+ "stage": "outline_generation",
111
+ "error_type": "APIError",
112
+ "status_code": e.status_code,
113
+ "message": e.message
114
+ })
115
+ return {"error": "API failure during outline generation"}
116
+
117
+ except Exception as e:
118
+ self.error_context.append({
119
+ "stage": "outline_generation",
120
+ "error_type": type(e).__name__,
121
+ "message": str(e)
122
+ })
123
+ return {"error": "Unexpected error during outline generation"}
124
+
125
+ def _expand_sections(self, node, current_depth, max_depth, path=None):
126
+ if path is None:
127
+ path = []
128
+
129
+ current_path = path + [node.get('title', 'Untitled Section')]
130
+
131
+ if current_depth >= max_depth:
132
+ return node
133
+
134
+ if current_depth >= len(self.depth_chart):
135
+ self.error_context.append({
136
+ "stage": "depth_validation",
137
+ "current_depth": current_depth,
138
+ "max_configured_depth": len(self.depth_chart)-1
139
+ })
140
+ raise ValueError("Current depth exceeds configured model depth chart")
141
+
142
+ try:
143
+ client = self.create_openai_like_client(current_depth)
144
+ expanded = node.copy()
145
+
146
+ if 'sections' in node:
147
+ for i, section in enumerate(node['sections']):
148
+ logger.debug(f"Expanding section {i+1}/{len(node['sections'])} at depth {current_depth}")
149
+
150
+ next_depth = current_depth + 1
151
+ is_final_depth = next_depth == max_depth
152
+
153
+ # Generate appropriate prompt based on depth
154
+ if is_final_depth:
155
+ expansion_prompt = f"""Expand this section within the context of: {" -> ".join(current_path)}
156
+
157
+ Section to expand: {section['title']}
158
+ Current depth: {current_depth}/{max_depth}
159
+
160
+ Provide detailed content for this section. The content should be a concise explanation.
161
+ Your output should be a properly formatted JSON only with a 'content' field.
162
+ No preamble, explanations, or markdown ticks (```). """
163
+ else:
164
+ expansion_prompt = f"""Expand this section within the context of: {" -> ".join(current_path)}
165
+
166
+ Section to expand: {section['title']}
167
+ Current depth: {current_depth}/{max_depth}
168
+
169
+ Provide detailed sub-sections in JSON format with 'title' and 'sections'.
170
+ Your output should be a properly formatted JSON only. No preamble, explanations, or markdown ticks (```). """
171
+
172
+ try:
173
+ response = client.chat.completions.create(
174
+ model=self.depth_chart[current_depth]['model_name'],
175
+ messages=[{"role": "user", "content": expansion_prompt}],
176
+ temperature=self.depth_chart[current_depth]['temperature']
177
+ )
178
+ except APIError as e:
179
+ self.error_context.append({
180
+ "stage": f"section_expansion_depth_{current_depth}",
181
+ "section_index": i,
182
+ "section_title": section.get('title'),
183
+ "error_type": "APIError",
184
+ "status_code": e.status_code,
185
+ "message": e.message
186
+ })
187
+ continue
188
+
189
+ if not response.choices:
190
+ self.error_context.append({
191
+ "stage": f"section_expansion_depth_{current_depth}",
192
+ "section_index": i,
193
+ "error": "Empty API response"
194
+ })
195
+ continue
196
+
197
+ try:
198
+ expanded_section = json.loads(response.choices[0].message.content)
199
+ except json.JSONDecodeError as e:
200
+ self.error_context.append({
201
+ "stage": f"section_parsing_depth_{current_depth}",
202
+ "section_index": i,
203
+ "response": response.choices[0].message.content,
204
+ "error": str(e)
205
+ })
206
+ continue
207
+
208
+ # Handle content generation for final depth
209
+ if is_final_depth:
210
+ if 'content' not in expanded_section:
211
+ self.error_context.append({
212
+ "stage": f"content_validation_depth_{current_depth}",
213
+ "section_index": i,
214
+ "response": expanded_section
215
+ })
216
+ continue
217
+
218
+ # Update section with content and remove subsections
219
+ expanded['sections'][i]['content'] = expanded_section['content']
220
+ if 'sections' in expanded['sections'][i]:
221
+ del expanded['sections'][i]['sections']
222
+ else:
223
+ # Validate section structure and recursively expand
224
+ if not isinstance(expanded_section, dict) or 'title' not in expanded_section:
225
+ self.error_context.append({
226
+ "stage": f"section_validation_depth_{current_depth}",
227
+ "section_index": i,
228
+ "response_structure": type(expanded_section).__name__
229
+ })
230
+ continue
231
+
232
+ expanded['sections'][i] = self._expand_sections(
233
+ expanded_section,
234
+ next_depth,
235
+ max_depth,
236
+ current_path
237
+ )
238
+
239
+ return expanded
240
+
241
+ except Exception as e:
242
+ self.error_context.append({
243
+ "stage": f"section_expansion_depth_{current_depth}",
244
+ "error_type": type(e).__name__,
245
+ "message": str(e),
246
+ "node": node.get('title')[:100] if 'title' in node else str(node)[:100]
247
+ })
248
+ raise
249
+
250
+ def get_debug_info(self):
251
+ return {
252
+ "error_context": self.error_context,
253
+ "depth_chart_config": self.depth_chart
254
+ }
255
+
256
+ def to_markdown(self, output):
257
+ def _to_markdown(node, level=1):
258
+ markdown = ""
259
+
260
+ if 'title' in node:
261
+ title = node['title']
262
+ markdown += f"{'#' * level} {title}\n\n"
263
+
264
+ if 'content' in node:
265
+ markdown += f"{node['content']}\n\n"
266
+
267
+ if 'sections' in node:
268
+ for section in node['sections']:
269
+ markdown += _to_markdown(section, level + 1)
270
+
271
+ return markdown
272
+ return _to_markdown(output)
@@ -0,0 +1,77 @@
1
+ import json
2
+ from openai import OpenAI
3
+ from gofannon.reasoning.base import ReasoningTool
4
+ from ..config import FunctionRegistry
5
+ import logging
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ @FunctionRegistry.register
10
+ class SequentialCoT(ReasoningTool):
11
+ def __init__(self, depth_chart= None, steps= 5):
12
+ super().__init__(depth_chart= depth_chart)
13
+ self.name = "sequential_cot"
14
+ self.depth_chart = depth_chart or []
15
+ self.steps = steps
16
+
17
+
18
+ @property
19
+ def definition(self):
20
+ return {
21
+ "type": "function",
22
+ "function": {
23
+ "name": self.name,
24
+ "description": "Generate a series of steps required to solve a problem using Chain-of-Thought reasoning.",
25
+ "parameters": {
26
+ "type": "object",
27
+ "properties": {
28
+ "prompt": {
29
+ "type": "string",
30
+ "description": "The prompt to generate steps for."
31
+ },
32
+ "steps" : {
33
+ "type": "number",
34
+ "description": "How many steps to take. (Default 3)"
35
+ }
36
+ },
37
+ "required": ["prompt", "steps"]
38
+ }
39
+ }
40
+ }
41
+
42
+ def fn(self, prompt, steps):
43
+ logger.debug(f"Starting SequentialCoT with {steps} steps")
44
+ modified_prompt = prompt + f"""
45
+
46
+ Given the prompt above, return a series of {steps} steps required to arrive at an answer.
47
+ Do not attempt to compute the answer now, only return the series of steps
48
+ required to solve the problem, as a series of prompts to future LLM calls. Your
49
+ response should be a properly formatted json with one field `steps` which contains
50
+ an array of strings, where each string is a step. Do no include any explanations
51
+ or ticks to indicate it is a markdown code block."""
52
+
53
+ messages = [{"role": "user", "content": modified_prompt}]
54
+
55
+ response = self.get_response(level= 0 , messages= messages)
56
+ messages= [
57
+ {'role': 'user', 'content': prompt},
58
+ {'role' : 'assistant', 'content':response.choices[0].message.content}
59
+ ]
60
+
61
+ try:
62
+ steps = json.loads(response.choices[0].message.content)["steps"]
63
+ step_output = []
64
+ for i in range(len(steps)):
65
+ logger.debug(f"Executing Step {i+1}/{len(steps)}:'{steps[i]}'...")
66
+ messages.append({'role': 'user', 'content': steps[i]})
67
+
68
+ response = self.get_response(level= 1, messages= messages)
69
+ step_output.append(response.choices[0].message.content)
70
+ messages.append({'role': 'assistant', 'content': response.choices[0].message.content})
71
+ messages.append({'role': 'user', 'content': self.depth_chart[2]['prompt_appendix']})
72
+ logger.debug('Synthesizing Response')
73
+ response = self.get_response(level = 2, messages= messages)
74
+ return response
75
+ except json.JSONDecodeError:
76
+ return {"error": "Failed to decode the response as JSON."}
77
+