ws-bom-robot-app 0.0.79__tar.gz → 0.0.81__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. {ws_bom_robot_app-0.0.79/ws_bom_robot_app.egg-info → ws_bom_robot_app-0.0.81}/PKG-INFO +51 -62
  2. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/README.md +50 -61
  3. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/setup.py +1 -1
  4. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/config.py +1 -1
  5. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/agent_description.py +123 -123
  6. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/agent_handler.py +166 -166
  7. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/agent_lcel.py +50 -50
  8. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/defaut_prompt.py +15 -15
  9. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/feedbacks/feedback_manager.py +66 -66
  10. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/main.py +158 -158
  11. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/models/api.py +33 -0
  12. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/models/feedback.py +30 -30
  13. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/nebuly_handler.py +185 -185
  14. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/tools/tool_builder.py +65 -65
  15. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/tools/tool_manager.py +330 -330
  16. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/tools/utils.py +41 -41
  17. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/agent.py +34 -34
  18. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/cms.py +114 -114
  19. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/download.py +185 -185
  20. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/print.py +29 -29
  21. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/generator.py +137 -137
  22. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/manager.py +2 -0
  23. ws_bom_robot_app-0.0.81/ws_bom_robot_app/llm/vector_store/integration/shopify.py +144 -0
  24. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/thron.py +103 -103
  25. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
  26. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81/ws_bom_robot_app.egg-info}/PKG-INFO +51 -62
  27. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app.egg-info/SOURCES.txt +1 -0
  28. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/MANIFEST.in +0 -0
  29. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/pyproject.toml +0 -0
  30. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/requirements.txt +0 -0
  31. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/setup.cfg +0 -0
  32. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/__init__.py +0 -0
  33. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/auth.py +0 -0
  34. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/cron_manager.py +0 -0
  35. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/__init__.py +0 -0
  36. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/agent_context.py +0 -0
  37. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/api.py +0 -0
  38. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/feedbacks/__init__.py +0 -0
  39. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/models/__init__.py +0 -0
  40. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/models/base.py +0 -0
  41. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/models/kb.py +0 -0
  42. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/providers/__init__.py +0 -0
  43. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/providers/llm_manager.py +0 -0
  44. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/tools/__init__.py +0 -0
  45. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/tools/models/__init__.py +0 -0
  46. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/tools/models/main.py +0 -0
  47. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/__init__.py +0 -0
  48. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/chunker.py +0 -0
  49. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/cleanup.py +0 -0
  50. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/secrets.py +0 -0
  51. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/utils/webhooks.py +0 -0
  52. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/__init__.py +0 -0
  53. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/db/__init__.py +0 -0
  54. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/db/base.py +0 -0
  55. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/db/chroma.py +0 -0
  56. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/db/faiss.py +0 -0
  57. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/db/manager.py +0 -0
  58. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/db/qdrant.py +0 -0
  59. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/__init__.py +0 -0
  60. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/azure.py +0 -0
  61. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/base.py +0 -0
  62. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/confluence.py +0 -0
  63. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/dropbox.py +0 -0
  64. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/gcs.py +0 -0
  65. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/github.py +0 -0
  66. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/googledrive.py +0 -0
  67. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/jira.py +0 -0
  68. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/s3.py +0 -0
  69. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/sftp.py +0 -0
  70. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/sharepoint.py +0 -0
  71. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/sitemap.py +0 -0
  72. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/integration/slack.py +0 -0
  73. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/loader/__init__.py +0 -0
  74. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/loader/base.py +0 -0
  75. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/llm/vector_store/loader/docling.py +0 -0
  76. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/main.py +0 -0
  77. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/task_manager.py +0 -0
  78. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app/util.py +0 -0
  79. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app.egg-info/dependency_links.txt +0 -0
  80. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app.egg-info/requires.txt +0 -0
  81. {ws_bom_robot_app-0.0.79 → ws_bom_robot_app-0.0.81}/ws_bom_robot_app.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ws_bom_robot_app
3
- Version: 0.0.79
3
+ Version: 0.0.81
4
4
  Summary: A FastAPI application serving ws bom/robot/llm platform ai.
5
5
  Home-page: https://github.com/websolutespa/bom
6
6
  Author: Websolute Spa
@@ -83,18 +83,30 @@ from ws_bom_robot_app import main
83
83
  app = main.app
84
84
  ```
85
85
 
86
- FIll `.env` with the following code:
87
-
88
- ```env
89
- #robot_env=local/development/production
90
- robot_env=local
91
- robot_user='[user]'
92
- robot_password='[pwd]'
93
- robot_data_folder='./.data'
94
- robot_cms_auth='[auth]'
95
- robot_cms_host='https://[DOMAIN]'
96
- robot_cms_db_folder=llmVectorDb
97
- robot_cms_files_folder=llmKbFile
86
+ Create a `.env` file in the root directory with the following configuration:
87
+
88
+ ```properties
89
+ # robot configuration
90
+ robot_env=development
91
+ robot_user=your_username
92
+ USER_AGENT=ws-bom-robot-app
93
+
94
+ # cms (bowl) configuration
95
+ robot_cms_host='http://localhost:4000'
96
+ robot_cms_auth='users API-Key your-api-key-here'
97
+
98
+ # llm providers: fill one or more of these with your API keys
99
+ DEEPSEEK_API_KEY="your-deepseek-api-key"
100
+ OPENAI_API_KEY="your-openai-api-key"
101
+ GOOGLE_API_KEY="your-google-api-key"
102
+ ANTHROPIC_API_KEY="your-anthropic-api-key"
103
+ GROQ_API_KEY="your-groq-api-key"
104
+ # ibm
105
+ WATSONX_URL="https://eu-gb.ml.cloud.ibm.com"
106
+ WATSONX_APIKEY="your-watsonx-api-key"
107
+ WATSONX_PROJECTID="your-watsonx-project-id"
108
+ # gvertex: ensure to mount the file in docker
109
+ GOOGLE_APPLICATION_CREDENTIALS="./.data/secrets/google-credentials.json"
98
110
  ```
99
111
 
100
112
  ## 🚀 Run the app
@@ -125,38 +137,39 @@ robot_cms_files_folder=llmKbFile
125
137
  - [swagger](http://localhost:6001/docs)
126
138
  - [redoc](http://localhost:6001/redoc)
127
139
 
128
- ### 💬 multimodal chat
140
+ ---
129
141
 
130
- The multimodal message allows users to interact with the application using both text and media files.
131
- `robot` accept multimodal input in a uniform way, regarding the llm provider used. Can also be used the llm/model specific input format.
142
+ ## 🐳 Docker
132
143
 
133
- - simple message
144
+ dockerize base image
134
145
 
135
- ```json
136
- {
137
- "role": "user",
138
- "content": "What is the capital of France?"
139
- }
146
+ ```pwsh
147
+ <# cpu #>
148
+ docker build -f Dockerfile-robot-base-cpu -t ghcr.io/websolutespa/ws-bom-robot-base:cpu .
149
+ docker push ghcr.io/websolutespa/ws-bom-robot-base:cpu
150
+ <# gpu #>
151
+ docker build -f Dockerfile-robot-base-gpu -t ghcr.io/websolutespa/ws-bom-robot-base:gpu .
152
+ docker push ghcr.io/websolutespa/ws-bom-robot-base:gpu
140
153
  ```
141
154
 
142
- - multimodal message
143
-
144
- ```json
145
- {
146
- "role": "user",
147
- "content": [
148
- {"type": "text", "text": "Read carefully all the attachments, analize the content and provide a summary for each one:"},
149
- {"type": "image", "url": "https://www.example.com/image/foo.jpg"},
150
- {"type": "file", "url": "https://www.example.com/pdf/bar.pdf"},
151
- {"type": "file", "url": "data:plain/text;base64,CiAgICAgIF9fX19fCiAgICAgLyAgIC..."}, # base64 encoded file
152
- {"type": "media", "mime_type": "plain/text", "data": "CiAgICAgIF9fX19fCiAgICAgLyAgIC..."} # google/gemini specific input format
153
- ]
154
- }
155
+ dockerize app
156
+
157
+ ```pwsh
158
+ docker build -f Dockerfile -t ws-bom-robot-app .
159
+ docker run --rm --name ws-bom-robot-app -d --env-file .env -p 6001:6001 ws-bom-robot-app
160
+ ```
161
+
162
+ docker run mounted to src (dev mode)
163
+
164
+ ```pwsh
165
+ docker run --rm --name ws-bom-robot-app-src -d --env-file .env -v "$(pwd)/ws_bom_robot_app:/app/ws_bom_robot_app" -v "$(pwd)/.data:/app/.data" -v "$(pwd)/tmp:/tmp" -p 6001:6001 ws-bom-robot-app fastapi dev ./ws_bom_robot_app/main.py --host 0.0.0.0 --port 6001
155
166
  ```
156
167
 
157
168
  ---
158
169
 
159
- ## 🔖 Windows requirements
170
+ ## 🔖 Windows requirements (for RAG functionality only)
171
+
172
+ > ⚠️ While it's strongly recommended to use a docker container for development, you can run the app on Windows with the following requirements
160
173
 
161
174
  ### libmagic (mandatory)
162
175
 
@@ -267,7 +280,7 @@ prospector ./ws_bom_robot_app -t dodgy -t bandit
267
280
  prospector ./ws_bom_robot_app -t pyroma
268
281
  ```
269
282
 
270
- lauch pytest
283
+ #### 🧪 run tests
271
284
 
272
285
  ```pwsh
273
286
  !py -m pip install -U pytest pytest-asyncio pytest-mock pytest-cov pyclean
@@ -278,36 +291,12 @@ pytest --cov=ws_bom_robot_app --log-cli-level=info
278
291
  # pytest --cov=ws_bom_robot_app --log-cli-level=info ./tests/app/llm/vector_store/db
279
292
  ```
280
293
 
281
- launch debugger
294
+ #### 🐞 start debugger
282
295
 
283
296
  ```pwsh
284
297
  streamlit run debugger.py --server.port 8051
285
298
  ```
286
299
 
287
- dockerize base image
288
-
289
- ```pwsh
290
- <# cpu #>
291
- docker build -f Dockerfile-robot-base-cpu -t ghcr.io/websolutespa/ws-bom-robot-base:cpu .
292
- docker push ghcr.io/websolutespa/ws-bom-robot-base:cpu
293
- <# gpu #>
294
- docker build -f Dockerfile-robot-base-gpu -t ghcr.io/websolutespa/ws-bom-robot-base:gpu .
295
- docker push ghcr.io/websolutespa/ws-bom-robot-base:gpu
296
- ```
297
-
298
- dockerize app
299
-
300
- ```pwsh
301
- docker build -f Dockerfile -t ws-bom-robot-app .
302
- docker run --rm --name ws-bom-robot-app -d -p 6001:6001 ws-bom-robot-app
303
- ```
304
-
305
- docker run mounted to src
306
-
307
- ```pwsh
308
- docker run --rm --name ws-bom-robot-app-src -d -v "$(pwd)/ws_bom_robot_app:/app/ws_bom_robot_app" -v "$(pwd)/.data:/app/.data" -v "$(pwd)/tmp:/tmp" -p 6001:6001 ws-bom-robot-app
309
- ```
310
-
311
300
  ### ✈️ publish
312
301
 
313
302
  - [testpypi](https://test.pypi.org/project/ws-bom-robot-app/)
@@ -17,18 +17,30 @@ from ws_bom_robot_app import main
17
17
  app = main.app
18
18
  ```
19
19
 
20
- FIll `.env` with the following code:
21
-
22
- ```env
23
- #robot_env=local/development/production
24
- robot_env=local
25
- robot_user='[user]'
26
- robot_password='[pwd]'
27
- robot_data_folder='./.data'
28
- robot_cms_auth='[auth]'
29
- robot_cms_host='https://[DOMAIN]'
30
- robot_cms_db_folder=llmVectorDb
31
- robot_cms_files_folder=llmKbFile
20
+ Create a `.env` file in the root directory with the following configuration:
21
+
22
+ ```properties
23
+ # robot configuration
24
+ robot_env=development
25
+ robot_user=your_username
26
+ USER_AGENT=ws-bom-robot-app
27
+
28
+ # cms (bowl) configuration
29
+ robot_cms_host='http://localhost:4000'
30
+ robot_cms_auth='users API-Key your-api-key-here'
31
+
32
+ # llm providers: fill one or more of these with your API keys
33
+ DEEPSEEK_API_KEY="your-deepseek-api-key"
34
+ OPENAI_API_KEY="your-openai-api-key"
35
+ GOOGLE_API_KEY="your-google-api-key"
36
+ ANTHROPIC_API_KEY="your-anthropic-api-key"
37
+ GROQ_API_KEY="your-groq-api-key"
38
+ # ibm
39
+ WATSONX_URL="https://eu-gb.ml.cloud.ibm.com"
40
+ WATSONX_APIKEY="your-watsonx-api-key"
41
+ WATSONX_PROJECTID="your-watsonx-project-id"
42
+ # gvertex: ensure to mount the file in docker
43
+ GOOGLE_APPLICATION_CREDENTIALS="./.data/secrets/google-credentials.json"
32
44
  ```
33
45
 
34
46
  ## 🚀 Run the app
@@ -59,38 +71,39 @@ robot_cms_files_folder=llmKbFile
59
71
  - [swagger](http://localhost:6001/docs)
60
72
  - [redoc](http://localhost:6001/redoc)
61
73
 
62
- ### 💬 multimodal chat
74
+ ---
63
75
 
64
- The multimodal message allows users to interact with the application using both text and media files.
65
- `robot` accept multimodal input in a uniform way, regarding the llm provider used. Can also be used the llm/model specific input format.
76
+ ## 🐳 Docker
66
77
 
67
- - simple message
78
+ dockerize base image
68
79
 
69
- ```json
70
- {
71
- "role": "user",
72
- "content": "What is the capital of France?"
73
- }
80
+ ```pwsh
81
+ <# cpu #>
82
+ docker build -f Dockerfile-robot-base-cpu -t ghcr.io/websolutespa/ws-bom-robot-base:cpu .
83
+ docker push ghcr.io/websolutespa/ws-bom-robot-base:cpu
84
+ <# gpu #>
85
+ docker build -f Dockerfile-robot-base-gpu -t ghcr.io/websolutespa/ws-bom-robot-base:gpu .
86
+ docker push ghcr.io/websolutespa/ws-bom-robot-base:gpu
74
87
  ```
75
88
 
76
- - multimodal message
77
-
78
- ```json
79
- {
80
- "role": "user",
81
- "content": [
82
- {"type": "text", "text": "Read carefully all the attachments, analize the content and provide a summary for each one:"},
83
- {"type": "image", "url": "https://www.example.com/image/foo.jpg"},
84
- {"type": "file", "url": "https://www.example.com/pdf/bar.pdf"},
85
- {"type": "file", "url": "data:plain/text;base64,CiAgICAgIF9fX19fCiAgICAgLyAgIC..."}, # base64 encoded file
86
- {"type": "media", "mime_type": "plain/text", "data": "CiAgICAgIF9fX19fCiAgICAgLyAgIC..."} # google/gemini specific input format
87
- ]
88
- }
89
+ dockerize app
90
+
91
+ ```pwsh
92
+ docker build -f Dockerfile -t ws-bom-robot-app .
93
+ docker run --rm --name ws-bom-robot-app -d --env-file .env -p 6001:6001 ws-bom-robot-app
94
+ ```
95
+
96
+ docker run mounted to src (dev mode)
97
+
98
+ ```pwsh
99
+ docker run --rm --name ws-bom-robot-app-src -d --env-file .env -v "$(pwd)/ws_bom_robot_app:/app/ws_bom_robot_app" -v "$(pwd)/.data:/app/.data" -v "$(pwd)/tmp:/tmp" -p 6001:6001 ws-bom-robot-app fastapi dev ./ws_bom_robot_app/main.py --host 0.0.0.0 --port 6001
89
100
  ```
90
101
 
91
102
  ---
92
103
 
93
- ## 🔖 Windows requirements
104
+ ## 🔖 Windows requirements (for RAG functionality only)
105
+
106
+ > ⚠️ While it's strongly recommended to use a docker container for development, you can run the app on Windows with the following requirements
94
107
 
95
108
  ### libmagic (mandatory)
96
109
 
@@ -201,7 +214,7 @@ prospector ./ws_bom_robot_app -t dodgy -t bandit
201
214
  prospector ./ws_bom_robot_app -t pyroma
202
215
  ```
203
216
 
204
- lauch pytest
217
+ #### 🧪 run tests
205
218
 
206
219
  ```pwsh
207
220
  !py -m pip install -U pytest pytest-asyncio pytest-mock pytest-cov pyclean
@@ -212,36 +225,12 @@ pytest --cov=ws_bom_robot_app --log-cli-level=info
212
225
  # pytest --cov=ws_bom_robot_app --log-cli-level=info ./tests/app/llm/vector_store/db
213
226
  ```
214
227
 
215
- launch debugger
228
+ #### 🐞 start debugger
216
229
 
217
230
  ```pwsh
218
231
  streamlit run debugger.py --server.port 8051
219
232
  ```
220
233
 
221
- dockerize base image
222
-
223
- ```pwsh
224
- <# cpu #>
225
- docker build -f Dockerfile-robot-base-cpu -t ghcr.io/websolutespa/ws-bom-robot-base:cpu .
226
- docker push ghcr.io/websolutespa/ws-bom-robot-base:cpu
227
- <# gpu #>
228
- docker build -f Dockerfile-robot-base-gpu -t ghcr.io/websolutespa/ws-bom-robot-base:gpu .
229
- docker push ghcr.io/websolutespa/ws-bom-robot-base:gpu
230
- ```
231
-
232
- dockerize app
233
-
234
- ```pwsh
235
- docker build -f Dockerfile -t ws-bom-robot-app .
236
- docker run --rm --name ws-bom-robot-app -d -p 6001:6001 ws-bom-robot-app
237
- ```
238
-
239
- docker run mounted to src
240
-
241
- ```pwsh
242
- docker run --rm --name ws-bom-robot-app-src -d -v "$(pwd)/ws_bom_robot_app:/app/ws_bom_robot_app" -v "$(pwd)/.data:/app/.data" -v "$(pwd)/tmp:/tmp" -p 6001:6001 ws-bom-robot-app
243
- ```
244
-
245
234
  ### ✈️ publish
246
235
 
247
236
  - [testpypi](https://test.pypi.org/project/ws-bom-robot-app/)
@@ -4,7 +4,7 @@ _requirements = [line.split('#')[0].strip() for line in open("requirements.txt")
4
4
 
5
5
  setup(
6
6
  name="ws_bom_robot_app",
7
- version="0.0.79",
7
+ version="0.0.81",
8
8
  description="A FastAPI application serving ws bom/robot/llm platform ai.",
9
9
  long_description=open("README.md", encoding='utf-8').read(),
10
10
  long_description_content_type="text/markdown",
@@ -32,7 +32,7 @@ class Settings(BaseSettings):
32
32
  WATSONX_URL: str = ''
33
33
  WATSONX_APIKEY: str = ''
34
34
  WATSONX_PROJECTID: str = ''
35
- NEBULY_API_URL: str =''
35
+ NEBULY_API_URL: str ='https://backend.nebuly.com/'
36
36
  GOOGLE_APPLICATION_CREDENTIALS: str = '' # path to google credentials iam file, e.d. ./.secrets/google-credentials.json
37
37
  model_config = ConfigDict(
38
38
  env_file='./.env',
@@ -1,123 +1,123 @@
1
- import json, requests, re
2
- from typing import Any
3
- from abc import ABC, abstractmethod
4
- from langchain_core.prompts import ChatPromptTemplate
5
- from langchain_core.messages import AIMessage
6
- from langchain_core.runnables import RunnableSerializable
7
- from langchain_core.runnables import RunnableLambda
8
- from bs4 import BeautifulSoup
9
- from ws_bom_robot_app.llm.models.api import LlmRules
10
- from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
11
- from ws_bom_robot_app.llm.utils.agent import get_rules
12
-
13
- # SafeDict helper class
14
- class SafeDict(dict):
15
- def __missing__(self, key):
16
- return ''
17
-
18
- # Strategy Interface
19
- class AgentDescriptorStrategy(ABC):
20
- @abstractmethod
21
- def enrich_prompt(self, prompt: str, input: dict) -> str:
22
- pass
23
-
24
- @abstractmethod
25
- def rule_input(self, input: dict) -> str:
26
- pass
27
-
28
- # Concrete Strategy for Default Agent
29
- class DefaultAgentDescriptor(AgentDescriptorStrategy):
30
- def enrich_prompt(self, prompt: str, input: dict) -> str:
31
- # Default enrichment logic (could be minimal or no-op)
32
- return prompt.format_map(SafeDict(input))
33
-
34
- def rule_input(self, input: dict) -> str:
35
- return input.get('content', "")
36
-
37
- # Concrete Strategy for URL2Text Agent
38
- class URL2TextAgentDescriptor(AgentDescriptorStrategy):
39
- def enrich_prompt(self, prompt: str, input: dict) -> str:
40
- input["context"] = self._get_page_text(input)
41
- return prompt.format_map(SafeDict(input))
42
-
43
- def rule_input(self, input: dict) -> str:
44
- return input.get('context', "")
45
-
46
- def _get_page_text(self, input: dict) -> str:
47
- url = input.get("content", "")
48
- exclusions = input.get("exclude", {})
49
- response = requests.get(url)
50
- response.raise_for_status()
51
- soup = BeautifulSoup(response.content, 'html5lib')
52
- classes_to_exclude = exclusions.get("classes", [])
53
- ids_to_exclude = exclusions.get("ids", [])
54
- for class_name in classes_to_exclude:
55
- for element in soup.find_all(class_=class_name):
56
- element.extract()
57
- for id_name in ids_to_exclude:
58
- for element in soup.find_all(id=id_name):
59
- element.extract()
60
- for script in soup(["script", "noscript", "style", "head", "footer", "iframe"]):
61
- script.extract()
62
- return re.sub(' +', ' ', soup.get_text())
63
-
64
-
65
- class AgentDescriptor:
66
- # Dictionary to hold all agent strategies
67
- _list: dict[str,AgentDescriptorStrategy] = {
68
- "default": DefaultAgentDescriptor(),
69
- "url2text": URL2TextAgentDescriptor(),
70
- }
71
-
72
- # Functions to manage strategies
73
- @staticmethod
74
- def add_strategy(name: str, strategy: AgentDescriptorStrategy):
75
- """_summary_
76
- add a new strategy to the dictionary
77
- Args:
78
- name (str): name of the strategy, in lowercase
79
- strategy (AgentDescriptorStrategy): class implementing the strategy
80
- Examples:
81
- AgentDescriptor.add_strategy("custom_agent_descriptor", CustomAgentDescriptor())
82
- """
83
- AgentDescriptor._list[name.lower()] = strategy
84
-
85
- @staticmethod
86
- def get_strategy(name: str) -> AgentDescriptorStrategy:
87
- return AgentDescriptor._list.get(name.lower(), DefaultAgentDescriptor())
88
-
89
- def __init__(self, llm: LlmInterface, prompt: str, mode: str, rules: LlmRules = None):
90
- self.__prompt = prompt
91
- self.__llm = llm
92
- self.rules= rules
93
- self.strategy = self.get_strategy(mode) # Selects the strategy from the dictionary
94
-
95
- async def __create_prompt(self, input_dict: dict):
96
- input_data = json.loads(input_dict.get("input", {}))
97
- system = self.strategy.enrich_prompt(self.__prompt, input_data)
98
- if self.rules:
99
- rule_input = self.strategy.rule_input(input_data)
100
- rules_prompt = await get_rules(self.__llm.get_embeddings(), self.rules, rule_input)
101
- system += rules_prompt
102
- return ChatPromptTemplate.from_messages(
103
- [
104
- ("system", system),
105
- ("user", input_data.get("content", ""))
106
- ]
107
- )
108
-
109
- def __create_agent_descriptor(self, content) -> RunnableSerializable[Any, Any]:
110
- content = json.loads(content)
111
- agent = (
112
- {
113
- "input": lambda x: x["input"],
114
- }
115
- | RunnableLambda(self.__create_prompt)
116
- | self.__llm.get_llm()
117
- )
118
- return agent
119
-
120
- async def run_agent(self, content) -> Any:
121
- agent_descriptor = self.__create_agent_descriptor(content)
122
- response: AIMessage = await agent_descriptor.ainvoke({"input": content})
123
- return response
1
+ import json, requests, re
2
+ from typing import Any
3
+ from abc import ABC, abstractmethod
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ from langchain_core.messages import AIMessage
6
+ from langchain_core.runnables import RunnableSerializable
7
+ from langchain_core.runnables import RunnableLambda
8
+ from bs4 import BeautifulSoup
9
+ from ws_bom_robot_app.llm.models.api import LlmRules
10
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
11
+ from ws_bom_robot_app.llm.utils.agent import get_rules
12
+
13
+ # SafeDict helper class
14
+ class SafeDict(dict):
15
+ def __missing__(self, key):
16
+ return ''
17
+
18
+ # Strategy Interface
19
+ class AgentDescriptorStrategy(ABC):
20
+ @abstractmethod
21
+ def enrich_prompt(self, prompt: str, input: dict) -> str:
22
+ pass
23
+
24
+ @abstractmethod
25
+ def rule_input(self, input: dict) -> str:
26
+ pass
27
+
28
+ # Concrete Strategy for Default Agent
29
+ class DefaultAgentDescriptor(AgentDescriptorStrategy):
30
+ def enrich_prompt(self, prompt: str, input: dict) -> str:
31
+ # Default enrichment logic (could be minimal or no-op)
32
+ return prompt.format_map(SafeDict(input))
33
+
34
+ def rule_input(self, input: dict) -> str:
35
+ return input.get('content', "")
36
+
37
+ # Concrete Strategy for URL2Text Agent
38
+ class URL2TextAgentDescriptor(AgentDescriptorStrategy):
39
+ def enrich_prompt(self, prompt: str, input: dict) -> str:
40
+ input["context"] = self._get_page_text(input)
41
+ return prompt.format_map(SafeDict(input))
42
+
43
+ def rule_input(self, input: dict) -> str:
44
+ return input.get('context', "")
45
+
46
+ def _get_page_text(self, input: dict) -> str:
47
+ url = input.get("content", "")
48
+ exclusions = input.get("exclude", {})
49
+ response = requests.get(url)
50
+ response.raise_for_status()
51
+ soup = BeautifulSoup(response.content, 'html5lib')
52
+ classes_to_exclude = exclusions.get("classes", [])
53
+ ids_to_exclude = exclusions.get("ids", [])
54
+ for class_name in classes_to_exclude:
55
+ for element in soup.find_all(class_=class_name):
56
+ element.extract()
57
+ for id_name in ids_to_exclude:
58
+ for element in soup.find_all(id=id_name):
59
+ element.extract()
60
+ for script in soup(["script", "noscript", "style", "head", "footer", "iframe"]):
61
+ script.extract()
62
+ return re.sub(' +', ' ', soup.get_text())
63
+
64
+
65
+ class AgentDescriptor:
66
+ # Dictionary to hold all agent strategies
67
+ _list: dict[str,AgentDescriptorStrategy] = {
68
+ "default": DefaultAgentDescriptor(),
69
+ "url2text": URL2TextAgentDescriptor(),
70
+ }
71
+
72
+ # Functions to manage strategies
73
+ @staticmethod
74
+ def add_strategy(name: str, strategy: AgentDescriptorStrategy):
75
+ """_summary_
76
+ add a new strategy to the dictionary
77
+ Args:
78
+ name (str): name of the strategy, in lowercase
79
+ strategy (AgentDescriptorStrategy): class implementing the strategy
80
+ Examples:
81
+ AgentDescriptor.add_strategy("custom_agent_descriptor", CustomAgentDescriptor())
82
+ """
83
+ AgentDescriptor._list[name.lower()] = strategy
84
+
85
+ @staticmethod
86
+ def get_strategy(name: str) -> AgentDescriptorStrategy:
87
+ return AgentDescriptor._list.get(name.lower(), DefaultAgentDescriptor())
88
+
89
+ def __init__(self, llm: LlmInterface, prompt: str, mode: str, rules: LlmRules = None):
90
+ self.__prompt = prompt
91
+ self.__llm = llm
92
+ self.rules= rules
93
+ self.strategy = self.get_strategy(mode) # Selects the strategy from the dictionary
94
+
95
+ async def __create_prompt(self, input_dict: dict):
96
+ input_data = json.loads(input_dict.get("input", {}))
97
+ system = self.strategy.enrich_prompt(self.__prompt, input_data)
98
+ if self.rules:
99
+ rule_input = self.strategy.rule_input(input_data)
100
+ rules_prompt = await get_rules(self.__llm.get_embeddings(), self.rules, rule_input)
101
+ system += rules_prompt
102
+ return ChatPromptTemplate.from_messages(
103
+ [
104
+ ("system", system),
105
+ ("user", input_data.get("content", ""))
106
+ ]
107
+ )
108
+
109
+ def __create_agent_descriptor(self, content) -> RunnableSerializable[Any, Any]:
110
+ content = json.loads(content)
111
+ agent = (
112
+ {
113
+ "input": lambda x: x["input"],
114
+ }
115
+ | RunnableLambda(self.__create_prompt)
116
+ | self.__llm.get_llm()
117
+ )
118
+ return agent
119
+
120
+ async def run_agent(self, content) -> Any:
121
+ agent_descriptor = self.__create_agent_descriptor(content)
122
+ response: AIMessage = await agent_descriptor.ainvoke({"input": content})
123
+ return response