spotify-analytics-core 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. spotify_analytics_core-0.1.0/.gitignore +224 -0
  2. spotify_analytics_core-0.1.0/LICENSE +21 -0
  3. spotify_analytics_core-0.1.0/PKG-INFO +35 -0
  4. spotify_analytics_core-0.1.0/README.md +3 -0
  5. spotify_analytics_core-0.1.0/__init__.py +0 -0
  6. spotify_analytics_core-0.1.0/pyproject.toml +47 -0
  7. spotify_analytics_core-0.1.0/spotify_core/__init__.py +0 -0
  8. spotify_analytics_core-0.1.0/spotify_core/agent/__init__.py +0 -0
  9. spotify_analytics_core-0.1.0/spotify_core/agent/graph.py +26 -0
  10. spotify_analytics_core-0.1.0/spotify_core/agent/nodes.py +131 -0
  11. spotify_analytics_core-0.1.0/spotify_core/agent/playback_tools.py +22 -0
  12. spotify_analytics_core-0.1.0/spotify_core/agent/prompts.py +33 -0
  13. spotify_analytics_core-0.1.0/spotify_core/agent/resources.py +27 -0
  14. spotify_analytics_core-0.1.0/spotify_core/agent/schemas.py +57 -0
  15. spotify_analytics_core-0.1.0/spotify_core/agent/state.py +14 -0
  16. spotify_analytics_core-0.1.0/spotify_core/agent/tools.py +311 -0
  17. spotify_analytics_core-0.1.0/spotify_core/config.py +65 -0
  18. spotify_analytics_core-0.1.0/spotify_core/db/__init__.py +1 -0
  19. spotify_analytics_core-0.1.0/spotify_core/db/errors.py +18 -0
  20. spotify_analytics_core-0.1.0/spotify_core/db/migrations.py +137 -0
  21. spotify_analytics_core-0.1.0/spotify_core/db/pipeline.py +426 -0
  22. spotify_analytics_core-0.1.0/spotify_core/db/queries.py +480 -0
  23. spotify_analytics_core-0.1.0/spotify_core/db/schema.py +48 -0
  24. spotify_analytics_core-0.1.0/spotify_core/env_file.py +86 -0
  25. spotify_analytics_core-0.1.0/spotify_core/logging.py +80 -0
  26. spotify_analytics_core-0.1.0/spotify_core/memory/__init__.py +4 -0
  27. spotify_analytics_core-0.1.0/spotify_core/memory/checkpointer.py +18 -0
  28. spotify_analytics_core-0.1.0/spotify_core/memory/store.py +37 -0
  29. spotify_analytics_core-0.1.0/spotify_core/paths.py +59 -0
  30. spotify_analytics_core-0.1.0/spotify_core/setup.py +117 -0
  31. spotify_analytics_core-0.1.0/spotify_core/spotify_client/__init__.py +13 -0
  32. spotify_analytics_core-0.1.0/spotify_core/spotify_client/auth.py +178 -0
  33. spotify_analytics_core-0.1.0/spotify_core/spotify_client/client.py +548 -0
  34. spotify_analytics_core-0.1.0/spotify_core/spotify_client/errors.py +25 -0
  35. spotify_analytics_core-0.1.0/spotify_core/spotify_client/pkce.py +46 -0
  36. spotify_analytics_core-0.1.0/spotify_core/spotify_client/token_store.py +187 -0
  37. spotify_analytics_core-0.1.0/spotify_core/spotify_utils/__init__.py +3 -0
  38. spotify_analytics_core-0.1.0/spotify_core/spotify_utils/spotify_facade.py +232 -0
@@ -0,0 +1,224 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
208
+
209
+ # Git worktrees
210
+ .worktrees/
211
+ .claude/worktrees/
212
+
213
+ # Project specific
214
+ # Vector database and user data
215
+ data/vectordb/
216
+ # sqlite databases and wal, shm files - ignore all but keep directory
217
+ data/*.db*
218
+ # Exclude user's actual Spotify data, but keep sample data
219
+ data/spotify_history/*
220
+ !data/spotify_history/sample_history.json
221
+ # logs
222
+ logs/
223
+ # Internal planning docs (superpowers/Claude Code session artifacts)
224
+ docs/superpowers/
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 WC Chang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,35 @@
1
+ Metadata-Version: 2.4
2
+ Name: spotify-analytics-core
3
+ Version: 0.1.0
4
+ Summary: Core analytics, agent, and Spotify client library for the Spotify AI Analytics project.
5
+ Project-URL: Homepage, https://github.com/wcnoname5/spotify-ai-analytics
6
+ Project-URL: Repository, https://github.com/wcnoname5/spotify-ai-analytics
7
+ Project-URL: Issues, https://github.com/wcnoname5/spotify-ai-analytics/issues
8
+ Author: WC Chang
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: analytics,listening-history,mcp,spotify
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Operating System :: OS Independent
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Programming Language :: Python :: 3.13
18
+ Requires-Python: >=3.12
19
+ Requires-Dist: cryptography>=42.0
20
+ Requires-Dist: httpx>=0.27
21
+ Requires-Dist: langgraph-checkpoint-sqlite>=3.0.3
22
+ Requires-Dist: platformdirs>=4.0
23
+ Requires-Dist: pydantic-settings>=2.0
24
+ Requires-Dist: pydantic>=2.0
25
+ Requires-Dist: python-dotenv>=1.0
26
+ Provides-Extra: agent
27
+ Requires-Dist: langchain-core>=1.0; extra == 'agent'
28
+ Requires-Dist: langchain-google-genai>=4.0; extra == 'agent'
29
+ Requires-Dist: langchain-openai>=1.0; extra == 'agent'
30
+ Requires-Dist: langgraph>=1.0; extra == 'agent'
31
+ Description-Content-Type: text/markdown
32
+
33
+ # spotify-analytics-core
34
+
35
+ Core analytics, agent, database, and Spotify client library for Spotify AI Analytics.
@@ -0,0 +1,3 @@
1
+ # spotify-analytics-core
2
+
3
+ Core analytics, agent, database, and Spotify client library for Spotify AI Analytics.
File without changes
@@ -0,0 +1,47 @@
1
+ [project]
2
+ name = "spotify-analytics-core"
3
+ version = "0.1.0"
4
+ description = "Core analytics, agent, and Spotify client library for the Spotify AI Analytics project."
5
+ readme = "README.md"
6
+ license = { text = "MIT" }
7
+ license-files = ["LICENSE"]
8
+ authors = [{ name = "WC Chang" }]
9
+ requires-python = ">=3.12"
10
+ keywords = ["spotify", "analytics", "mcp", "listening-history"]
11
+ classifiers = [
12
+ "Programming Language :: Python :: 3",
13
+ "Programming Language :: Python :: 3.12",
14
+ "Programming Language :: Python :: 3.13",
15
+ "License :: OSI Approved :: MIT License",
16
+ "Operating System :: OS Independent",
17
+ "Intended Audience :: Developers",
18
+ ]
19
+ dependencies = [
20
+ "pydantic>=2.0",
21
+ "pydantic-settings>=2.0",
22
+ "cryptography>=42.0",
23
+ "httpx>=0.27",
24
+ "python-dotenv>=1.0",
25
+ "langgraph-checkpoint-sqlite>=3.0.3",
26
+ "platformdirs>=4.0",
27
+ ]
28
+
29
+ [project.optional-dependencies]
30
+ agent = [
31
+ "langgraph>=1.0",
32
+ "langchain-core>=1.0",
33
+ "langchain-openai>=1.0",
34
+ "langchain-google-genai>=4.0",
35
+ ]
36
+
37
+ [project.urls]
38
+ Homepage = "https://github.com/wcnoname5/spotify-ai-analytics"
39
+ Repository = "https://github.com/wcnoname5/spotify-ai-analytics"
40
+ Issues = "https://github.com/wcnoname5/spotify-ai-analytics/issues"
41
+
42
+ [build-system]
43
+ requires = ["hatchling"]
44
+ build-backend = "hatchling.build"
45
+
46
+ [tool.hatch.build.targets.wheel]
47
+ packages = ["spotify_core"]
File without changes
@@ -0,0 +1,26 @@
1
+ from typing import Optional
2
+ from langgraph.graph import StateGraph, END
3
+ from .state import AgentState
4
+ from .nodes import make_nodes
5
+
6
+ def build_app(llm, tools_list: list, checkpointer=None, store=None):
7
+ """Build and compile the LangGraph workflow.
8
+
9
+ Args:
10
+ llm: LangChain LLM instance (ChatOpenAI or ChatGoogleGenerativeAI)
11
+ tools_list: List of LangChain tools from initialize_tools()
12
+ checkpointer: Optional SqliteSaver for short-term per-thread memory.
13
+ store: Optional SqliteStore for long-term cross-session memory.
14
+ """
15
+ tool_executor = {t.name: t for t in tools_list}
16
+ intent_parser, data_fetch, analyst_node, should_continue = make_nodes(llm, tools_list, tool_executor)
17
+
18
+ workflow = StateGraph(AgentState)
19
+ workflow.add_node("IntentParser", intent_parser)
20
+ workflow.add_node("ToolExecute", data_fetch)
21
+ workflow.add_node("Analyst", analyst_node)
22
+ workflow.set_entry_point("IntentParser")
23
+ workflow.add_conditional_edges("IntentParser", should_continue, {"continue": "ToolExecute", "end": "Analyst"})
24
+ workflow.add_edge("ToolExecute", "Analyst")
25
+ workflow.add_edge("Analyst", END)
26
+ return workflow.compile(checkpointer=checkpointer, store=store)
@@ -0,0 +1,131 @@
1
+ import json
2
+ import logging
3
+ from datetime import datetime
4
+ from typing import Dict, Any, List
5
+ from langchain_core.messages import ToolMessage, AIMessage
6
+
7
+ from .state import AgentState
8
+ from .schemas import IntentPlan, ToolPlan
9
+ from .prompts import INTENT_PARSER_SYSTEM_PROMPT
10
+
11
+ def make_nodes(llm, tools_list: list, tool_executor: dict):
12
+ """Factory that returns graph node functions bound to the given resources via closure."""
13
+
14
+ def intent_parser(state: AgentState) -> Dict[str, Any]:
15
+ """Parse the user's intent and generate a strategic execution plan."""
16
+ logger = logging.getLogger(f'{__name__}.intent_parser')
17
+ logger.info(f"Planning for input: {state['input']}")
18
+
19
+ if llm is None:
20
+ error_msg = "AI model not initialized. Please check your API key."
21
+ return {"intent": "other", "messages": [AIMessage(content=error_msg)], "final_response": error_msg}
22
+
23
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
24
+ full_system_prompt = INTENT_PARSER_SYSTEM_PROMPT + f"\n\nCurrent date and time: {now}."
25
+ plan = llm.with_structured_output(IntentPlan).invoke([
26
+ {"role": "system", "content": full_system_prompt},
27
+ {"role": "user", "content": state["input"]}
28
+ ])
29
+
30
+ if not plan:
31
+ raise ValueError("Failed to parse intent plan.")
32
+
33
+ logger.info(f"Intent parsed: {plan.intent_type} | Tools: {[tp.tool_name for tp in plan.tool_plan]}")
34
+ return {"intent": plan.intent_type, "plan": plan, "messages": [AIMessage(content=f"Strategy: {plan.reasoning}")]}
35
+
36
+ def data_fetch(state: AgentState) -> Dict[str, Any]:
37
+ """Execute tools based on the plan."""
38
+ logger = logging.getLogger(f'{__name__}.data_fetch')
39
+ plan = state.get("plan")
40
+ if not plan or not plan.tool_plan:
41
+ return {"messages": []}
42
+
43
+ planned_tool_names = [tp.tool_name for tp in plan.tool_plan]
44
+ available_tools = [t for t in tools_list if t.name in planned_tool_names]
45
+ if not available_tools:
46
+ return {"messages": [AIMessage(content="Planned tools are unavailable.")]}
47
+
48
+ llm_with_tools = llm.bind_tools(available_tools)
49
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
50
+ response = llm_with_tools.invoke([
51
+ {"role": "system", "content": f"You are a Spotify Tool Specialist. Context: {plan.reasoning}. Time: {now}. Only call provided tools."},
52
+ {"role": "user", "content": state["input"]}
53
+ ])
54
+
55
+ results, tool_messages = [], []
56
+ if hasattr(response, 'tool_calls') and response.tool_calls:
57
+ tool_messages.append(response)
58
+ for i, tool_call in enumerate(response.tool_calls):
59
+ tool_name = tool_call["name"]
60
+ if tool_name not in planned_tool_names:
61
+ logger.warning(f"Hallucinated tool call: {tool_name}")
62
+ continue
63
+ tool_obj = tool_executor.get(tool_name)
64
+ if tool_obj is None:
65
+ logger.error(f"Tool '{tool_name}' in plan but not in executor. Skipping.")
66
+ continue
67
+ call_id = tool_call.get("id", f"call_{i}")
68
+ retrieved_data, last_error = None, None
69
+ for attempt in range(3):
70
+ try:
71
+ retrieved_data = tool_obj.invoke(tool_call["args"])
72
+ break
73
+ except Exception as e:
74
+ last_error = str(e)
75
+ if attempt == 2:
76
+ retrieved_data = f"Error after 3 attempts: {last_error}"
77
+ obs_str = str(retrieved_data)
78
+ if len(obs_str) > 1000:
79
+ obs_str = obs_str[:1000] + "... [truncated]"
80
+ tool_messages.append(ToolMessage(content=obs_str, name=tool_name, tool_call_id=call_id))
81
+ results.append(retrieved_data)
82
+ logger.info(f"Tool {tool_name}: success={last_error is None}")
83
+ return {"messages": tool_messages, "tool_results": results}
84
+
85
+ def analyst_node(state: AgentState) -> Dict[str, Any]:
86
+ """Synthesize the final response."""
87
+ logger = logging.getLogger(f'{__name__}.analyst_node')
88
+ if state.get("final_response"):
89
+ return {"final_response": state["final_response"]}
90
+
91
+ intent = state.get("intent", "other")
92
+ plan = state.get("plan")
93
+
94
+ system_prompts = {
95
+ "factual_query": "You are a Spotify analytics assistant. Be direct, use bullet points.",
96
+ "insight_analysis": f"You are a Spotify music critic. Tell a story. Focus: {plan.analysis_focus if plan else ''}",
97
+ "recommendation": "You are a Spotify recommendation expert. Suggest music based on listening history.",
98
+ }
99
+ system_prompt = system_prompts.get(intent, "You are a Spotify analytics assistant.")
100
+
101
+ tool_messages = [m for m in state.get("messages", []) if isinstance(m, ToolMessage)]
102
+
103
+ if intent == "other" and (not plan or not plan.tool_plan):
104
+ response_content = plan.reasoning if plan else "I'm a Spotify analytics assistant and can't help with that."
105
+ else:
106
+ data_str = ""
107
+ if tool_messages:
108
+ tool_results = state.get("tool_results", [])
109
+ for i, msg in enumerate(tool_messages):
110
+ result = tool_results[i] if i < len(tool_results) else msg.content
111
+ formatted = json.dumps(result, indent=2) if isinstance(result, (list, dict)) else str(result)
112
+ data_str += f"### Tool: {getattr(msg, 'name', f'Tool_{i}')}\n{formatted}\n\n"
113
+
114
+ messages = [{"role": "system", "content": system_prompt}]
115
+ if data_str:
116
+ messages.append({"role": "user", "content": f"Spotify data:\n<data>\n{data_str}\n</data>"})
117
+ messages.append({"role": "user", "content": f"Address my request: {state['input']}"})
118
+ response_content = llm.invoke(messages).content
119
+
120
+ logger.info(f"[Monitoring] Node: analyst_node | Intent: {intent} | Tools Used: {len(tool_messages) > 0}")
121
+ logger.info(f"Final response length: {len(response_content)}")
122
+ return {"final_response": response_content}
123
+
124
+ def should_continue(state: AgentState) -> str:
125
+ """Route: continue to ToolExecute or go to Analyst."""
126
+ plan = state.get("plan")
127
+ if plan and plan.tool_plan:
128
+ return "continue"
129
+ return "end"
130
+
131
+ return intent_parser, data_fetch, analyst_node, should_continue
@@ -0,0 +1,22 @@
1
+ """LangChain-wrapped playback tools for agent integration."""
2
+ from langchain_core.tools import tool
3
+
4
+ from ..spotify_utils.spotify_facade import SpotifyToolFacade
5
+
6
+
7
+ class AgentPlaybackTools(SpotifyToolFacade):
8
+ """SpotifyPlaybackTools with LangChain tool wrapping for agent integration."""
9
+
10
+ def get_tools(self) -> list:
11
+ """Return all playback/sync methods as LangChain tools."""
12
+ return [
13
+ tool(self.get_now_playing),
14
+ tool(self.get_devices),
15
+ tool(self.play_track),
16
+ tool(self.pause),
17
+ tool(self.skip),
18
+ tool(self.set_volume),
19
+ tool(self.add_to_queue),
20
+ tool(self.create_playlist),
21
+ tool(self.sync_recent_history),
22
+ ]
@@ -0,0 +1,33 @@
1
+ INTENT_PARSER_SYSTEM_PROMPT = """
2
+ You are a Spotify Data Assistant Orchestrator.
3
+ Your job is to:
4
+ 1. Understand the user's request.
5
+ 2. Select the most appropriate tools from the list below to fetch necessary data.
6
+ 3. Classify the user's intent to guide the downstream analyst.
7
+
8
+ ### Available Tools:
9
+ - **get_summary_stats**: Get overall listening summary (total records, time, date range).
10
+ - **get_top_artists**: Get top artists by listening time. Supports date range and limit.
11
+ - **get_top_tracks**: Get top tracks by play count. Supports artist filter, date range, and limit.
12
+ - **free_query**: Execute complex filtering on raw data (e.g., filter by specific day, hour, or multiple fields).
13
+ - **free_aggregate**: Perform custom aggregations (e.g., grouping by month, track, or artist with custom metrics).
14
+ 3. Classify the user's intent to guide the downstream analyst.
15
+
16
+ ### Tool Selection Guidelines:
17
+ - **ALWAYS prioritize specific tools** (`get_top_artists`, `get_summary_stats`) over generic ones.
18
+ - Use `free_query` or `free_aggregate` ONLY when the request involves complex filtering that standard tools cannot handle.
19
+ - If the user asks for "Recommendations", you usually need to fetch their `get_top_artists` and `get_top_tracks` first as a baseline.
20
+ - You only need to provide the `tool_name` and `reasoning`. Downstream execution logic will handle the specific arguments.
21
+
22
+ ### Intent Classification Guidelines:
23
+ - **factual_query**: The user wants raw numbers, lists, or specific facts. (e.g., "Count of my tracks", "Top 5 songs")
24
+ - **insight_analysis**: The user asks about habits, trends, or "Why/How". (e.g., "Am I listening to sadder music?", "Compare my listening between 2023 and 2024")
25
+ - **recommendation**: The user explicitly asks for new music suggestions.
26
+ - **other**: Greetings or questions unrelated to Spotify data.
27
+
28
+ ### Output Format:
29
+ You must call the relevant tools (if any) AND return the intent classification structure.
30
+ """
31
+
32
+ # TODO: Implement dynamic injection of tool docstrings into INTENT_PARSER_SYSTEM_PROMPT
33
+ # for better maintainability and accuracy as tool schemas evolve.
@@ -0,0 +1,27 @@
1
+ """Pure resource factory. No Streamlit dependency."""
2
+ import logging
3
+ from typing import Optional, Tuple
4
+ from langchain_openai import ChatOpenAI
5
+ from langchain_google_genai import ChatGoogleGenerativeAI
6
+ from spotify_dataloader import SpotifyDataLoader
7
+ from .tools import initialize_tools
8
+ from ..config import settings
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ def build_llm(provider: str, api_key: str):
13
+ """Build LLM instance from provider name and API key."""
14
+ if provider.lower() == "gemini":
15
+ return ChatGoogleGenerativeAI(model=settings.gemini_model, google_api_key=api_key, temperature=0)
16
+ return ChatOpenAI(model=settings.openai_model, api_key=api_key, temperature=0)
17
+
18
+ def build_resources(provider: str, api_key: str, loader: Optional[SpotifyDataLoader] = None):
19
+ """Build LLM, tools list, and tool executor dict.
20
+
21
+ Returns:
22
+ (llm, tools_list, tool_executor) tuple
23
+ """
24
+ llm = build_llm(provider, api_key)
25
+ tools_list = initialize_tools(loader=loader)
26
+ tool_executor = {t.name: t for t in tools_list}
27
+ return llm, tools_list, tool_executor
@@ -0,0 +1,57 @@
1
+ import pydantic
2
+ from typing import List, Optional, Any, Dict, Literal
3
+ from pydantic import Field
4
+
5
+ class ToolPlan(pydantic.BaseModel):
6
+ """Plan for a single tool execution."""
7
+ tool_name: Literal[
8
+ "get_summary_stats",
9
+ "get_top_artists",
10
+ "get_top_tracks",
11
+ "free_query",
12
+ "free_aggregate"
13
+ ] = Field(
14
+ ...,
15
+ description="Name of the tool to be executed"
16
+ )
17
+ reasoning: str = Field(
18
+ default="",
19
+ description="Reasoning for why this tool is selected"
20
+ )
21
+ args: Optional[Dict[str, Any]] = Field(
22
+ default_factory=dict,
23
+ description="Arguments for the tool (Optional, will be refined by downstream node)"
24
+ )
25
+
26
+ class IntentPlan(pydantic.BaseModel):
27
+ """Execution plan generated by the Planner node."""
28
+ intent_type: Literal["factual_query", "insight_analysis", "recommendation", "other"] = Field(
29
+ ...,
30
+ description="The category of the user's request."
31
+ )
32
+ reasoning: str = Field(..., description="Brief reason why this intent was chosen.")
33
+ analysis_focus: str = Field(
34
+ default="",
35
+ description="If intent is analysis, what specific patterns should be looked at?"
36
+ )
37
+ tool_plan: List[ToolPlan] = Field(
38
+ default_factory=list,
39
+ description="Planned tools to execute in order"
40
+ )
41
+
42
+ class ToolFreeQueryArgs(pydantic.BaseModel):
43
+ """Arguments for the free_query tool."""
44
+ where: Optional[str] = pydantic.Field(None, description="Polars filter expression as a string, e.g., \"pl.col('artist') == 'Taylor Swift'\"")
45
+ select: Optional[List[str]] = pydantic.Field(None, description="List of column names to include")
46
+ limit: Optional[int] = pydantic.Field(None, description="Maximum number of rows to return")
47
+ sort_by: Optional[str] = pydantic.Field(None, description="Column name to sort by")
48
+ descending: bool = pydantic.Field(True, description="Whether to sort in descending order")
49
+
50
+ class ToolFreeAggrgateArgs(pydantic.BaseModel):
51
+ """Arguments for the free_aggregate tool."""
52
+ group_by: List[str] = pydantic.Field(..., description="List of column names to group by")
53
+ metrics: Dict[str, Any] = pydantic.Field(..., description="Dictionary mapping columns to aggregation functions (sum, mean, count, n_unique). Value can be a string or a (function, alias) tuple.")
54
+ where: Optional[str] = pydantic.Field(None, description="Polars filter expression as a string")
55
+ sort_by: Optional[str] = pydantic.Field(None, description="Column name to sort by")
56
+ descending: bool = pydantic.Field(True, description="Whether to sort in descending order")
57
+ limit: Optional[int] = pydantic.Field(None, description="Maximum number of rows to return")
@@ -0,0 +1,14 @@
1
+ import operator
2
+ from typing import TypedDict, List, Literal, Annotated, Sequence, Optional, Any
3
+ from langchain_core.messages import BaseMessage
4
+ from .schemas import IntentPlan
5
+
6
+ class AgentState(TypedDict):
7
+ input: str
8
+ messages: Annotated[Sequence[BaseMessage], operator.add]
9
+ intent: Optional[Literal['factual_query', 'insight_analysis', 'recommendation', 'other',
10
+ 'playback_control', 'playlist_create', 'sync_history']]
11
+ plan: Optional[IntentPlan]
12
+ tool_results: List[Optional[Any]] # e.g., the queries and their results
13
+ final_response: Optional[str]
14
+ retry_count: Optional[int]