mfcli 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mfcli/.env.example +72 -0
- mfcli/__init__.py +0 -0
- mfcli/agents/__init__.py +0 -0
- mfcli/agents/controller/__init__.py +0 -0
- mfcli/agents/controller/agent.py +19 -0
- mfcli/agents/controller/config.yaml +27 -0
- mfcli/agents/controller/tools.py +42 -0
- mfcli/agents/tools/general.py +118 -0
- mfcli/alembic/env.py +61 -0
- mfcli/alembic/script.py.mako +28 -0
- mfcli/alembic/versions/6ccc0c7c397c_added_fields_to_pdf_parts_model.py +39 -0
- mfcli/alembic/versions/769019ef4870_added_gemini_file_path_to_pdf_part_model.py +33 -0
- mfcli/alembic/versions/7a2e3a779fdc_added_functional_block_and_component_.py +54 -0
- mfcli/alembic/versions/7d5adb2a47a7_added_pdf_parts_model.py +41 -0
- mfcli/alembic/versions/7fcb7d6a5836_init.py +167 -0
- mfcli/alembic/versions/e0f2b5765c72_added_cascade_delete_for_models_that_.py +32 -0
- mfcli/alembic.ini +147 -0
- mfcli/cli/__init__.py +0 -0
- mfcli/cli/dependencies.py +59 -0
- mfcli/cli/main.py +200 -0
- mfcli/client/__init__.py +0 -0
- mfcli/client/chroma_db.py +184 -0
- mfcli/client/docling.py +44 -0
- mfcli/client/gemini.py +252 -0
- mfcli/client/llama_parse.py +38 -0
- mfcli/client/vector_db.py +93 -0
- mfcli/constants/__init__.py +0 -0
- mfcli/constants/base_enum.py +18 -0
- mfcli/constants/directory_names.py +1 -0
- mfcli/constants/file_types.py +189 -0
- mfcli/constants/gemini.py +1 -0
- mfcli/constants/openai.py +6 -0
- mfcli/constants/pipeline_run_status.py +3 -0
- mfcli/crud/__init__.py +0 -0
- mfcli/crud/file.py +42 -0
- mfcli/crud/functional_blocks.py +26 -0
- mfcli/crud/netlist.py +18 -0
- mfcli/crud/pipeline_run.py +17 -0
- mfcli/crud/project.py +144 -0
- mfcli/digikey/__init__.py +0 -0
- mfcli/digikey/digikey.py +105 -0
- mfcli/main.py +5 -0
- mfcli/mcp/__init__.py +0 -0
- mfcli/mcp/configs/cline_mcp_settings.json +11 -0
- mfcli/mcp/configs/mfcli.mcp.json +7 -0
- mfcli/mcp/mcp_instance.py +6 -0
- mfcli/mcp/server.py +37 -0
- mfcli/mcp/state_manager.py +51 -0
- mfcli/mcp/tools/__init__.py +0 -0
- mfcli/mcp/tools/query_knowledgebase.py +108 -0
- mfcli/models/__init__.py +10 -0
- mfcli/models/base.py +10 -0
- mfcli/models/bom.py +71 -0
- mfcli/models/datasheet.py +10 -0
- mfcli/models/debug_setup.py +64 -0
- mfcli/models/file.py +43 -0
- mfcli/models/file_docket.py +94 -0
- mfcli/models/file_metadata.py +19 -0
- mfcli/models/functional_blocks.py +94 -0
- mfcli/models/llm_response.py +5 -0
- mfcli/models/mcu.py +97 -0
- mfcli/models/mcu_errata.py +26 -0
- mfcli/models/netlist.py +59 -0
- mfcli/models/pdf_parts.py +25 -0
- mfcli/models/pipeline_run.py +34 -0
- mfcli/models/project.py +27 -0
- mfcli/models/project_metadata.py +15 -0
- mfcli/pipeline/__init__.py +0 -0
- mfcli/pipeline/analysis/__init__.py +0 -0
- mfcli/pipeline/analysis/bom_netlist_mapper.py +28 -0
- mfcli/pipeline/analysis/generators/__init__.py +0 -0
- mfcli/pipeline/analysis/generators/bom/__init__.py +0 -0
- mfcli/pipeline/analysis/generators/bom/bom.py +74 -0
- mfcli/pipeline/analysis/generators/debug_setup/__init__.py +0 -0
- mfcli/pipeline/analysis/generators/debug_setup/debug_setup.py +71 -0
- mfcli/pipeline/analysis/generators/debug_setup/instructions.py +150 -0
- mfcli/pipeline/analysis/generators/functional_blocks/__init__.py +0 -0
- mfcli/pipeline/analysis/generators/functional_blocks/functional_blocks.py +93 -0
- mfcli/pipeline/analysis/generators/functional_blocks/instructions.py +34 -0
- mfcli/pipeline/analysis/generators/functional_blocks/validator.py +94 -0
- mfcli/pipeline/analysis/generators/generator.py +258 -0
- mfcli/pipeline/analysis/generators/generator_base.py +18 -0
- mfcli/pipeline/analysis/generators/mcu/__init__.py +0 -0
- mfcli/pipeline/analysis/generators/mcu/instructions.py +156 -0
- mfcli/pipeline/analysis/generators/mcu/mcu.py +84 -0
- mfcli/pipeline/analysis/generators/mcu_errata/__init__.py +1 -0
- mfcli/pipeline/analysis/generators/mcu_errata/instructions.py +77 -0
- mfcli/pipeline/analysis/generators/mcu_errata/mcu_errata.py +95 -0
- mfcli/pipeline/analysis/generators/summary/__init__.py +0 -0
- mfcli/pipeline/analysis/generators/summary/summary.py +47 -0
- mfcli/pipeline/classifier.py +93 -0
- mfcli/pipeline/data_enricher.py +15 -0
- mfcli/pipeline/extractor.py +34 -0
- mfcli/pipeline/extractors/__init__.py +0 -0
- mfcli/pipeline/extractors/pdf.py +12 -0
- mfcli/pipeline/parser.py +120 -0
- mfcli/pipeline/parsers/__init__.py +0 -0
- mfcli/pipeline/parsers/netlist/__init__.py +0 -0
- mfcli/pipeline/parsers/netlist/edif.py +93 -0
- mfcli/pipeline/parsers/netlist/kicad_legacy_net.py +326 -0
- mfcli/pipeline/parsers/netlist/kicad_spice.py +135 -0
- mfcli/pipeline/parsers/netlist/pads.py +185 -0
- mfcli/pipeline/parsers/netlist/protel.py +166 -0
- mfcli/pipeline/parsers/netlist/protel_detector.py +29 -0
- mfcli/pipeline/pipeline.py +470 -0
- mfcli/pipeline/preprocessors/__init__.py +0 -0
- mfcli/pipeline/preprocessors/user_guide.py +127 -0
- mfcli/pipeline/run_context.py +32 -0
- mfcli/pipeline/schema_mapper.py +89 -0
- mfcli/pipeline/sub_classifier.py +115 -0
- mfcli/utils/__init__.py +0 -0
- mfcli/utils/cline_rules.py +256 -0
- mfcli/utils/config.py +33 -0
- mfcli/utils/configurator.py +324 -0
- mfcli/utils/data_cleaner.py +114 -0
- mfcli/utils/datasheet_vectorizer.py +283 -0
- mfcli/utils/directory_manager.py +116 -0
- mfcli/utils/file_upload.py +298 -0
- mfcli/utils/files.py +16 -0
- mfcli/utils/http_requests.py +54 -0
- mfcli/utils/kb_lister.py +89 -0
- mfcli/utils/kb_remover.py +173 -0
- mfcli/utils/logger.py +28 -0
- mfcli/utils/mcp_configurator.py +394 -0
- mfcli/utils/migrations.py +18 -0
- mfcli/utils/orm.py +43 -0
- mfcli/utils/pdf_splitter.py +63 -0
- mfcli/utils/pre_uninstall.py +167 -0
- mfcli/utils/query_service.py +22 -0
- mfcli/utils/system_check.py +306 -0
- mfcli/utils/tools.py +98 -0
- mfcli/utils/vectorizer.py +28 -0
- mfcli-0.2.1.dist-info/METADATA +956 -0
- mfcli-0.2.1.dist-info/RECORD +138 -0
- mfcli-0.2.1.dist-info/WHEEL +5 -0
- mfcli-0.2.1.dist-info/entry_points.txt +4 -0
- mfcli-0.2.1.dist-info/licenses/LICENSE +21 -0
- mfcli-0.2.1.dist-info/top_level.txt +1 -0
mfcli/.env.example
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# Multifactor ADK Backend Configuration
|
|
2
|
+
#
|
|
3
|
+
# This file contains the configuration settings for mfcli.
|
|
4
|
+
# Copy this file to ~/Multifactor/.env and fill in your API keys.
|
|
5
|
+
#
|
|
6
|
+
# Windows: C:\Users\<username>\Multifactor\.env
|
|
7
|
+
# macOS/Linux: ~/Multifactor/.env
|
|
8
|
+
#
|
|
9
|
+
# Or run: mfcli configure
|
|
10
|
+
# for an interactive setup wizard.
|
|
11
|
+
|
|
12
|
+
# ============================================================
|
|
13
|
+
# REQUIRED API KEYS
|
|
14
|
+
# ============================================================
|
|
15
|
+
|
|
16
|
+
# Google Gemini API Key
|
|
17
|
+
# Used for AI-powered document analysis and processing
|
|
18
|
+
# Get your key from: https://aistudio.google.com/app/apikey
|
|
19
|
+
google_api_key=your_google_api_key_here
|
|
20
|
+
|
|
21
|
+
# OpenAI API Key
|
|
22
|
+
# Used for generating embeddings for RAG (Retrieval-Augmented Generation)
|
|
23
|
+
# Get your key from: https://platform.openai.com/api-keys
|
|
24
|
+
# Note: Requires billing enabled on your OpenAI account
|
|
25
|
+
openai_api_key=your_openai_api_key_here
|
|
26
|
+
|
|
27
|
+
# LlamaParse API Key
|
|
28
|
+
# Used for advanced PDF parsing and text extraction
|
|
29
|
+
# Get your key from: https://cloud.llamaindex.ai/
|
|
30
|
+
llama_cloud_api_key=your_llamaparse_api_key_here
|
|
31
|
+
|
|
32
|
+
# DigiKey API Credentials
|
|
33
|
+
# Used for automatic datasheet downloads from DigiKey
|
|
34
|
+
# Get your credentials from: https://developer.digikey.com/
|
|
35
|
+
digikey_client_id=your_digikey_client_id_here
|
|
36
|
+
digikey_client_secret=your_digikey_client_secret_here
|
|
37
|
+
|
|
38
|
+
# ============================================================
|
|
39
|
+
# VECTOR DATABASE CONFIGURATION
|
|
40
|
+
# ============================================================
|
|
41
|
+
|
|
42
|
+
# Chunk size for document splitting (number of characters)
|
|
43
|
+
# Larger chunks provide more context but may dilute relevance
|
|
44
|
+
# Default: 1000
|
|
45
|
+
chunk_size=1000
|
|
46
|
+
|
|
47
|
+
# Overlap between chunks (number of characters)
|
|
48
|
+
# Helps maintain context across chunk boundaries
|
|
49
|
+
# Default: 200
|
|
50
|
+
chunk_overlap=200
|
|
51
|
+
|
|
52
|
+
# OpenAI embedding model to use
|
|
53
|
+
# Available models: text-embedding-3-small, text-embedding-3-large, text-embedding-ada-002
|
|
54
|
+
# Default: text-embedding-3-small (best balance of cost and quality)
|
|
55
|
+
embedding_model=text-embedding-3-small
|
|
56
|
+
|
|
57
|
+
# Embedding dimensions
|
|
58
|
+
# text-embedding-3-small: 1536
|
|
59
|
+
# text-embedding-3-large: 3072
|
|
60
|
+
# text-embedding-ada-002: 1536
|
|
61
|
+
embedding_dimensions=1536
|
|
62
|
+
|
|
63
|
+
# ============================================================
|
|
64
|
+
# OPTIONAL: AWS CONFIGURATION
|
|
65
|
+
# ============================================================
|
|
66
|
+
|
|
67
|
+
# AWS credentials for S3 storage (optional)
|
|
68
|
+
# Only needed if you want to store files in AWS S3
|
|
69
|
+
# aws_access_key_id=your_aws_access_key
|
|
70
|
+
# aws_secret_access_key=your_aws_secret_key
|
|
71
|
+
# aws_region=us-east-1
|
|
72
|
+
# s3_bucket_name=your_bucket_name
|
mfcli/__init__.py
ADDED
|
File without changes
|
mfcli/agents/__init__.py
ADDED
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
from google.adk.agents import Agent
|
|
4
|
+
|
|
5
|
+
from mfcli.agents.controller.tools import query_knowledgebase, list_projects
|
|
6
|
+
from mfcli.agents.tools.general import load_agent_config
|
|
7
|
+
from mfcli.pipeline.pipeline import run_pipeline
|
|
8
|
+
|
|
9
|
+
config_path = Path(__file__).parent / "config.yaml"
|
|
10
|
+
config = load_agent_config(config_path)
|
|
11
|
+
|
|
12
|
+
root_agent = Agent(
|
|
13
|
+
name=config.name,
|
|
14
|
+
model=config.model,
|
|
15
|
+
description=config.description,
|
|
16
|
+
instruction=config.instructions,
|
|
17
|
+
tools=[run_pipeline, query_knowledgebase, list_projects],
|
|
18
|
+
output_key="pipeline_run_output"
|
|
19
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
name: controller_agent
|
|
2
|
+
description: "Controller agent starts the document processing pipeline"
|
|
3
|
+
model: gemini-2.5-flash
|
|
4
|
+
instructions: |
|
|
5
|
+
You are the controller agent for an engineering document processing pipeline.
|
|
6
|
+
You have two main workflows:
|
|
7
|
+
|
|
8
|
+
1. DOCUMENT ANALYSIS WORKFLOW:
|
|
9
|
+
- The user asks you to analyze documents in a directory
|
|
10
|
+
- If the user does not specify the name of the project to analyze, run the list_projects tool and ask the user to choose it from the returned list first
|
|
11
|
+
- Display the list of projects neatly as a bulleted list
|
|
12
|
+
- Once the user has chosen a project, you MUST then run the run_pipeline tool to start the pipeline
|
|
13
|
+
- Respond to the user with the output from the pipeline
|
|
14
|
+
- You will receive a summary from the completed pipeline about the number of files which were successfully processed, the number that failed and the total
|
|
15
|
+
- You will also receive a list of errors, if any
|
|
16
|
+
- If there are any error messages analyze and format them for the user, along with file counts and file names
|
|
17
|
+
|
|
18
|
+
2. QUERY WORKFLOW:
|
|
19
|
+
- The user may ask you general engineering questions
|
|
20
|
+
- The user may also ask you questions about files which have already been processed
|
|
21
|
+
- If they ask these types of general questions you will use the query_knowledgebase tool to perform RAG and respond to the user.
|
|
22
|
+
- You will need the name of the project, run the list_projects tool and ask the user to choose it from the returned list first
|
|
23
|
+
- Display the list of projects neatly as a bulleted list
|
|
24
|
+
|
|
25
|
+
When to use each workflow:
|
|
26
|
+
- Use ANALYSIS workflow for: "analyze these files", "process this directory"
|
|
27
|
+
- Use QUERY workflow for: when the user asks you an engineering-related question or a question about components or files
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
from mfcli.agents.tools.general import format_error_for_llm
|
|
4
|
+
from mfcli.client.chroma_db import ChromaClient
|
|
5
|
+
from mfcli.crud.project import get_project_by_name
|
|
6
|
+
from mfcli.models.project import Project
|
|
7
|
+
from mfcli.utils.logger import get_logger
|
|
8
|
+
from mfcli.utils.orm import Session
|
|
9
|
+
|
|
10
|
+
logger = get_logger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def list_projects() -> List[str]:
|
|
14
|
+
"""
|
|
15
|
+
Agent will use this tool to list the names of projects.
|
|
16
|
+
If the user does not supply a project name this tool must be called and the user prompted to select a project.
|
|
17
|
+
:return: list of project names
|
|
18
|
+
"""
|
|
19
|
+
with Session() as db:
|
|
20
|
+
projects: List[Project] = db.query(Project).all()
|
|
21
|
+
return [project.name for project in projects]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def query_knowledgebase(project_name: str, query: str) -> str:
|
|
25
|
+
"""
|
|
26
|
+
Agent will use this tool to do RAG (query knowledgebase).
|
|
27
|
+
The user must supply a project_name to select the right knowledgebase.
|
|
28
|
+
:param project_name:
|
|
29
|
+
:param query:
|
|
30
|
+
:return:
|
|
31
|
+
"""
|
|
32
|
+
with Session() as db:
|
|
33
|
+
project = get_project_by_name(db, project_name)
|
|
34
|
+
logger.debug(f"Querying knowledge base for query: {query}")
|
|
35
|
+
try:
|
|
36
|
+
chunks = ChromaClient(project.index_id).query(query)
|
|
37
|
+
return str([{
|
|
38
|
+
"chunk": chunk.document,
|
|
39
|
+
"metadata": chunk.metadata
|
|
40
|
+
} for chunk in chunks])
|
|
41
|
+
except Exception as e:
|
|
42
|
+
return format_error_for_llm(e)
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import traceback
|
|
3
|
+
from functools import lru_cache
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from textwrap import dedent
|
|
6
|
+
from typing import Optional, Type
|
|
7
|
+
|
|
8
|
+
import yaml
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
from typing_extensions import TypeVar
|
|
11
|
+
|
|
12
|
+
from mfcli.utils.logger import get_logger
|
|
13
|
+
|
|
14
|
+
logger = get_logger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AgentConfig(BaseModel):
|
|
18
|
+
name: str
|
|
19
|
+
description: str
|
|
20
|
+
model: str
|
|
21
|
+
instructions: str
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@lru_cache
|
|
25
|
+
def load_agent_config(path: str | Path) -> AgentConfig:
|
|
26
|
+
with open(path, "r") as f:
|
|
27
|
+
config = yaml.safe_load(f)
|
|
28
|
+
return AgentConfig(
|
|
29
|
+
name=config["name"],
|
|
30
|
+
description=config["description"],
|
|
31
|
+
model=config["model"],
|
|
32
|
+
instructions=config["instructions"]
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def format_instructions(instructions: str) -> str:
|
|
37
|
+
return dedent(instructions).strip()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def validation_response(validated: bool, e: Exception | str | None = None) -> dict[str:bool | str]:
|
|
41
|
+
return {"validated": validated, "errors": str(e)}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def format_error_for_llm(e: Exception, msg: str | None = None) -> str:
|
|
45
|
+
return json.dumps({"error": msg or str(e), "stack_trace": traceback.format_exc()})
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
T = TypeVar('T', bound=BaseModel)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def validate_schema(model: Type[T], response: str) -> T:
|
|
52
|
+
return model.model_validate_json(response)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def head(file_path: str) -> str:
|
|
56
|
+
"""
|
|
57
|
+
Get the head of any text file. The agent will run this to determine file subtype.
|
|
58
|
+
:param file_path: path of the file.
|
|
59
|
+
:return: first 50 lines of the file.
|
|
60
|
+
"""
|
|
61
|
+
try:
|
|
62
|
+
logger.debug(f"Reading: {file_path}")
|
|
63
|
+
with open(file_path, encoding="utf-8", errors="ignore") as f:
|
|
64
|
+
return str([line for _, line in zip(range(50), f)])
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.exception(e)
|
|
67
|
+
format_error_for_llm(e)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# TODO: vectorize_generated_file needs to be updated with project name or chromadb client and placed elsewhere
|
|
71
|
+
def vectorize_generated_file(
|
|
72
|
+
file_path: str,
|
|
73
|
+
purpose: str,
|
|
74
|
+
agent_name: str,
|
|
75
|
+
project_id: Optional[str] = None
|
|
76
|
+
) -> str:
|
|
77
|
+
"""
|
|
78
|
+
Vectorize a generated file into the vector database for future RAG queries.
|
|
79
|
+
|
|
80
|
+
This shared tool can be used by any agent to vectorize their output files
|
|
81
|
+
(PDFs, CSVs, JSON, etc.) into the vector database for context retrieval.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
file_path: Path to the file to vectorize
|
|
85
|
+
purpose: Purpose/category of the file (e.g., 'bom', 'errata', 'functional_blocks', 'datasheet')
|
|
86
|
+
agent_name: Name of the agent generating the file
|
|
87
|
+
project_id: Optional project ID for metadata
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Success message or error details
|
|
91
|
+
"""
|
|
92
|
+
try:
|
|
93
|
+
# Import here to avoid circular dependencies
|
|
94
|
+
from mfcli.utils.datasheet_vectorizer import DatasheetVectorizer
|
|
95
|
+
|
|
96
|
+
vectorizer = DatasheetVectorizer()
|
|
97
|
+
|
|
98
|
+
# Build metadata
|
|
99
|
+
additional_metadata = {
|
|
100
|
+
"agent": agent_name,
|
|
101
|
+
"generated": True
|
|
102
|
+
}
|
|
103
|
+
if project_id:
|
|
104
|
+
additional_metadata["project_id"] = project_id
|
|
105
|
+
|
|
106
|
+
# Vectorize the file
|
|
107
|
+
vectorizer.vectorize_local_file(
|
|
108
|
+
file_path=file_path,
|
|
109
|
+
purpose=purpose,
|
|
110
|
+
additional_metadata=additional_metadata
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
logger.info(f"Successfully vectorized file: {file_path} (agent: {agent_name}, purpose: {purpose})")
|
|
114
|
+
return f"Successfully vectorized file: {file_path} with purpose '{purpose}'"
|
|
115
|
+
|
|
116
|
+
except Exception as e:
|
|
117
|
+
logger.error(f"Error vectorizing file {file_path}: {e}")
|
|
118
|
+
return format_error_for_llm(e)
|
mfcli/alembic/env.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from logging.config import fileConfig
|
|
2
|
+
|
|
3
|
+
from sqlalchemy import engine_from_config
|
|
4
|
+
from sqlalchemy import pool
|
|
5
|
+
|
|
6
|
+
from alembic import context
|
|
7
|
+
|
|
8
|
+
# import all models to register them
|
|
9
|
+
|
|
10
|
+
from mfcli.models.base import Base, target_metadata
|
|
11
|
+
|
|
12
|
+
from mfcli.utils.config import get_config
|
|
13
|
+
from mfcli.utils.orm import get_db_url
|
|
14
|
+
|
|
15
|
+
# Load Alembic config
|
|
16
|
+
config = context.config
|
|
17
|
+
|
|
18
|
+
# Interpret the config file for Python logging.
|
|
19
|
+
# This line sets up loggers basically.
|
|
20
|
+
# if config.config_file_name is not None:
|
|
21
|
+
# fileConfig(config.config_file_name)
|
|
22
|
+
|
|
23
|
+
# Dynamically get DB URL (from .env or Pydantic Settings)
|
|
24
|
+
db_url = get_db_url()
|
|
25
|
+
|
|
26
|
+
# Override the alembic.ini value at runtime
|
|
27
|
+
config.set_main_option("sqlalchemy.url", db_url)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def run_migrations_offline():
|
|
31
|
+
"""Run migrations in 'offline' mode."""
|
|
32
|
+
context.configure(
|
|
33
|
+
url=db_url,
|
|
34
|
+
target_metadata=target_metadata,
|
|
35
|
+
literal_binds=True,
|
|
36
|
+
compare_type=True,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
with context.begin_transaction():
|
|
40
|
+
context.run_migrations()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def run_migrations_online():
|
|
44
|
+
"""Run migrations in 'online' mode."""
|
|
45
|
+
connectable = engine_from_config(
|
|
46
|
+
config.get_section(config.config_ini_section, {}),
|
|
47
|
+
prefix="sqlalchemy.",
|
|
48
|
+
poolclass=pool.NullPool,
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
with connectable.connect() as connection:
|
|
52
|
+
context.configure(connection=connection, target_metadata=target_metadata, compare_type=True)
|
|
53
|
+
|
|
54
|
+
with context.begin_transaction():
|
|
55
|
+
context.run_migrations()
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
if context.is_offline_mode():
|
|
59
|
+
run_migrations_offline()
|
|
60
|
+
else:
|
|
61
|
+
run_migrations_online()
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""${message}
|
|
2
|
+
|
|
3
|
+
Revision ID: ${up_revision}
|
|
4
|
+
Revises: ${down_revision | comma,n}
|
|
5
|
+
Create Date: ${create_date}
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
from typing import Sequence, Union
|
|
9
|
+
|
|
10
|
+
from alembic import op
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
${imports if imports else ""}
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = ${repr(up_revision)}
|
|
16
|
+
down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)}
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def upgrade() -> None:
|
|
22
|
+
"""Upgrade schema."""
|
|
23
|
+
${upgrades if upgrades else "pass"}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def downgrade() -> None:
|
|
27
|
+
"""Downgrade schema."""
|
|
28
|
+
${downgrades if downgrades else "pass"}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Added fields to pdf parts model
|
|
2
|
+
|
|
3
|
+
Revision ID: 6ccc0c7c397c
|
|
4
|
+
Revises: 769019ef4870
|
|
5
|
+
Create Date: 2025-12-15 22:36:48.423361
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
from typing import Sequence, Union
|
|
9
|
+
|
|
10
|
+
import sqlmodel
|
|
11
|
+
from alembic import op
|
|
12
|
+
import sqlalchemy as sa
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# revision identifiers, used by Alembic.
|
|
16
|
+
revision: str = '6ccc0c7c397c'
|
|
17
|
+
down_revision: Union[str, Sequence[str], None] = '769019ef4870'
|
|
18
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
19
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def upgrade() -> None:
|
|
23
|
+
"""Upgrade schema."""
|
|
24
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
25
|
+
op.add_column('pdf_parts', sa.Column('start_page', sa.Integer(), nullable=True))
|
|
26
|
+
op.add_column('pdf_parts', sa.Column('end_page', sa.Integer(), nullable=True))
|
|
27
|
+
op.add_column('pdf_parts', sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
|
|
28
|
+
op.add_column('pdf_parts', sa.Column('section_no', sa.Integer(), nullable=True))
|
|
29
|
+
# ### end Alembic commands ###
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def downgrade() -> None:
|
|
33
|
+
"""Downgrade schema."""
|
|
34
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
35
|
+
op.drop_column('pdf_parts', 'section_no')
|
|
36
|
+
op.drop_column('pdf_parts', 'title')
|
|
37
|
+
op.drop_column('pdf_parts', 'end_page')
|
|
38
|
+
op.drop_column('pdf_parts', 'start_page')
|
|
39
|
+
# ### end Alembic commands ###
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Added gemini file path to pdf part model
|
|
2
|
+
|
|
3
|
+
Revision ID: 769019ef4870
|
|
4
|
+
Revises: 7d5adb2a47a7
|
|
5
|
+
Create Date: 2025-12-13 01:12:10.436014
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
from typing import Sequence, Union
|
|
9
|
+
|
|
10
|
+
import sqlmodel
|
|
11
|
+
from alembic import op
|
|
12
|
+
import sqlalchemy as sa
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# revision identifiers, used by Alembic.
|
|
16
|
+
revision: str = '769019ef4870'
|
|
17
|
+
down_revision: Union[str, Sequence[str], None] = '7d5adb2a47a7'
|
|
18
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
19
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def upgrade() -> None:
|
|
23
|
+
"""Upgrade schema."""
|
|
24
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
25
|
+
op.add_column('pdf_parts', sa.Column('gemini_file_id', sqlmodel.sql.sqltypes.AutoString(length=40), nullable=True))
|
|
26
|
+
# ### end Alembic commands ###
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def downgrade() -> None:
|
|
30
|
+
"""Downgrade schema."""
|
|
31
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
32
|
+
op.drop_column('pdf_parts', 'gemini_file_id')
|
|
33
|
+
# ### end Alembic commands ###
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""added functional block and component models
|
|
2
|
+
|
|
3
|
+
Revision ID: 7a2e3a779fdc
|
|
4
|
+
Revises: 4c0509f2a4cf
|
|
5
|
+
Create Date: 2025-12-10 01:09:06.904237
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
from typing import Sequence, Union
|
|
9
|
+
|
|
10
|
+
import sqlmodel
|
|
11
|
+
from alembic import op
|
|
12
|
+
import sqlalchemy as sa
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# revision identifiers, used by Alembic.
|
|
16
|
+
revision: str = '7a2e3a779fdc'
|
|
17
|
+
down_revision: Union[str, Sequence[str], None] = '7fcb7d6a5836'
|
|
18
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
19
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def upgrade() -> None:
|
|
23
|
+
"""Upgrade schema."""
|
|
24
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
25
|
+
op.create_table('functional_blocks',
|
|
26
|
+
sa.Column('id', sa.Integer(), nullable=False),
|
|
27
|
+
sa.Column('pipeline_run_id', sa.Integer(), nullable=False),
|
|
28
|
+
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(length=255), nullable=False),
|
|
29
|
+
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
|
30
|
+
sa.ForeignKeyConstraint(['pipeline_run_id'], ['pipeline_runs.id'], ondelete='CASCADE'),
|
|
31
|
+
sa.PrimaryKeyConstraint('id')
|
|
32
|
+
)
|
|
33
|
+
op.create_index(op.f('ix_functional_blocks_name'), 'functional_blocks', ['name'], unique=False)
|
|
34
|
+
op.create_index(op.f('ix_functional_blocks_pipeline_run_id'), 'functional_blocks', ['pipeline_run_id'], unique=False)
|
|
35
|
+
op.create_table('functional_block_components',
|
|
36
|
+
sa.Column('id', sa.Integer(), nullable=False),
|
|
37
|
+
sa.Column('functional_block_id', sa.Integer(), nullable=False),
|
|
38
|
+
sa.Column('ref', sqlmodel.sql.sqltypes.AutoString(length=45), nullable=False),
|
|
39
|
+
sa.ForeignKeyConstraint(['functional_block_id'], ['functional_blocks.id'], ondelete='CASCADE'),
|
|
40
|
+
sa.PrimaryKeyConstraint('id')
|
|
41
|
+
)
|
|
42
|
+
op.create_index(op.f('ix_functional_block_components_functional_block_id'), 'functional_block_components', ['functional_block_id'], unique=False)
|
|
43
|
+
# ### end Alembic commands ###
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def downgrade() -> None:
|
|
47
|
+
"""Downgrade schema."""
|
|
48
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
49
|
+
op.drop_index(op.f('ix_functional_block_components_functional_block_id'), table_name='functional_block_components')
|
|
50
|
+
op.drop_table('functional_block_components')
|
|
51
|
+
op.drop_index(op.f('ix_functional_blocks_pipeline_run_id'), table_name='functional_blocks')
|
|
52
|
+
op.drop_index(op.f('ix_functional_blocks_name'), table_name='functional_blocks')
|
|
53
|
+
op.drop_table('functional_blocks')
|
|
54
|
+
# ### end Alembic commands ###
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Added PDF parts model
|
|
2
|
+
|
|
3
|
+
Revision ID: 7d5adb2a47a7
|
|
4
|
+
Revises: 7a2e3a779fdc
|
|
5
|
+
Create Date: 2025-12-12 23:27:28.310305
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
from typing import Sequence, Union
|
|
9
|
+
|
|
10
|
+
import sqlmodel
|
|
11
|
+
from alembic import op
|
|
12
|
+
import sqlalchemy as sa
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# revision identifiers, used by Alembic.
|
|
16
|
+
revision: str = '7d5adb2a47a7'
|
|
17
|
+
down_revision: Union[str, Sequence[str], None] = '7a2e3a779fdc'
|
|
18
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
19
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def upgrade() -> None:
|
|
23
|
+
"""Upgrade schema."""
|
|
24
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
25
|
+
op.create_table('pdf_parts',
|
|
26
|
+
sa.Column('id', sa.Integer(), nullable=False),
|
|
27
|
+
sa.Column('file_id', sa.Integer(), nullable=False),
|
|
28
|
+
sa.Column('path', sqlmodel.sql.sqltypes.AutoString(length=600), nullable=False),
|
|
29
|
+
sa.ForeignKeyConstraint(['file_id'], ['files.id'], ondelete='CASCADE'),
|
|
30
|
+
sa.PrimaryKeyConstraint('id')
|
|
31
|
+
)
|
|
32
|
+
op.create_index(op.f('ix_pdf_parts_file_id'), 'pdf_parts', ['file_id'], unique=False)
|
|
33
|
+
# ### end Alembic commands ###
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def downgrade() -> None:
|
|
37
|
+
"""Downgrade schema."""
|
|
38
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
39
|
+
op.drop_index(op.f('ix_pdf_parts_file_id'), table_name='pdf_parts')
|
|
40
|
+
op.drop_table('pdf_parts')
|
|
41
|
+
# ### end Alembic commands ###
|